Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -22,7 +22,7 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
| 22 |
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
|
| 23 |
optimize_pipeline_(pipe, image=Image.new("RGB", (512, 512)), prompt='prompt')
|
| 24 |
|
| 25 |
-
@spaces.GPU(duration=
|
| 26 |
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
|
| 27 |
start_load = time.time()
|
| 28 |
"""
|
|
@@ -89,7 +89,7 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
|
|
| 89 |
print(f"Time Elapsed: {td(seconds=int(time.time() - start_load))}")
|
| 90 |
return image, seed, gr.Button(visible=True)
|
| 91 |
|
| 92 |
-
@spaces.GPU(duration=
|
| 93 |
def infer_example(input_image, prompt):
|
| 94 |
start_load = time.time()
|
| 95 |
image, seed, _ = infer(input_image, prompt)
|
|
|
|
| 22 |
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
|
| 23 |
optimize_pipeline_(pipe, image=Image.new("RGB", (512, 512)), prompt='prompt')
|
| 24 |
|
| 25 |
+
@spaces.GPU(duration=21)
|
| 26 |
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
|
| 27 |
start_load = time.time()
|
| 28 |
"""
|
|
|
|
| 89 |
print(f"Time Elapsed: {td(seconds=int(time.time() - start_load))}")
|
| 90 |
return image, seed, gr.Button(visible=True)
|
| 91 |
|
| 92 |
+
@spaces.GPU(duration=21)
|
| 93 |
def infer_example(input_image, prompt):
|
| 94 |
start_load = time.time()
|
| 95 |
image, seed, _ = infer(input_image, prompt)
|