r3gm commited on
Commit
82f5f40
verified
1 Parent(s): 160919a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -10
app.py CHANGED
@@ -3,26 +3,35 @@ import spaces
3
  import torch
4
  from diffusers import AuraFlowPipeline
5
  import random
 
6
 
7
  pipe = AuraFlowPipeline.from_pretrained("purplesmartai/pony-v7-base", torch_dtype=torch.float16)
8
  pipe = pipe.to("cuda")
9
 
10
  @spaces.GPU()
11
- def generate_image(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
12
  if seed < 0:
13
  seed = random.randint(0, 2**32 - 1)
14
 
15
  generator = torch.Generator("cuda").manual_seed(int(seed))
16
 
17
- image = pipe(
18
- prompt=prompt,
19
- negative_prompt=negative_prompt,
20
- height=int(height),
21
- width=int(width),
22
- num_inference_steps=int(num_inference_steps),
23
- guidance_scale=guidance_scale,
24
- generator=generator,
25
- ).images[0]
 
 
 
 
 
 
 
 
26
  return image, seed
27
 
28
  iface = gr.Interface(
@@ -34,6 +43,7 @@ iface = gr.Interface(
34
  gr.Slider(label="Width", minimum=256, maximum=2048, step=64, value=1024),
35
  gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=30),
36
  gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, step=0.1, value=3.5),
 
37
  gr.Number(label="Seed (set to -1 for random)", value=-1, minimum=-1)
38
  ],
39
  outputs=[
 
3
  import torch
4
  from diffusers import AuraFlowPipeline
5
  import random
6
+ import numpy as np
7
 
8
  pipe = AuraFlowPipeline.from_pretrained("purplesmartai/pony-v7-base", torch_dtype=torch.float16)
9
  pipe = pipe.to("cuda")
10
 
11
  @spaces.GPU()
12
+ def generate_image(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, sigmas_factor, seed, progress=gr.Progress(track_tqdm=True)):
13
  if seed < 0:
14
  seed = random.randint(0, 2**32 - 1)
15
 
16
  generator = torch.Generator("cuda").manual_seed(int(seed))
17
 
18
+ pipeline_args = {
19
+ "prompt": prompt,
20
+ "negative_prompt": negative_prompt,
21
+ "height": int(height),
22
+ "width": int(width),
23
+ "num_inference_steps": int(num_inference_steps),
24
+ "guidance_scale": guidance_scale,
25
+ "generator": generator,
26
+ }
27
+
28
+ if sigmas_factor != 1.0:
29
+ steps = int(num_inference_steps)
30
+ sigmas = np.linspace(1.0, 1 / steps, steps)
31
+ sigmas = sigmas * sigmas_factor
32
+ pipeline_args["sigmas"] = sigmas.tolist()
33
+
34
+ image = pipe(**pipeline_args).images[0]
35
  return image, seed
36
 
37
  iface = gr.Interface(
 
43
  gr.Slider(label="Width", minimum=256, maximum=2048, step=64, value=1024),
44
  gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=30),
45
  gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, step=0.1, value=3.5),
46
+ gr.Slider(label="Sigmas Factor", minimum=0.1, maximum=2.0, step=0.01, value=1.0),
47
  gr.Number(label="Seed (set to -1 for random)", value=-1, minimum=-1)
48
  ],
49
  outputs=[