Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -38,7 +38,8 @@ torch.backends.cudnn.benchmark = False
|
|
| 38 |
|
| 39 |
hftoken = os.getenv("HF_TOKEN")
|
| 40 |
|
| 41 |
-
image_encoder_path = "google/siglip-so400m-patch14-384"
|
|
|
|
| 42 |
ipadapter_path = hf_hub_download(repo_id="InstantX/SD3.5-Large-IP-Adapter", filename="ip-adapter.bin")
|
| 43 |
model_path = 'ford442/stable-diffusion-3.5-large-bf16'
|
| 44 |
|
|
@@ -100,6 +101,7 @@ def infer(
|
|
| 100 |
num_inference_steps,
|
| 101 |
latent_file, # Add latents file input
|
| 102 |
ip_scale,
|
|
|
|
| 103 |
progress=gr.Progress(track_tqdm=True),
|
| 104 |
):
|
| 105 |
upscaler_2.to(torch.device('cpu'))
|
|
@@ -226,6 +228,10 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 226 |
result = gr.Image(label="Result", show_label=False)
|
| 227 |
with gr.Accordion("Advanced Settings", open=True):
|
| 228 |
latent_file = gr.File(label="Image File (optional)") # Add latents file input
|
|
|
|
|
|
|
|
|
|
|
|
|
| 229 |
ip_scale = gr.Slider(
|
| 230 |
label="Image Prompt Scale",
|
| 231 |
minimum=0.0,
|
|
@@ -298,6 +304,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 298 |
num_inference_steps,
|
| 299 |
latent_file, # Add latent_file to the inputs
|
| 300 |
ip_scale,
|
|
|
|
| 301 |
],
|
| 302 |
outputs=[result, expanded_prompt_output],
|
| 303 |
)
|
|
|
|
| 38 |
|
| 39 |
hftoken = os.getenv("HF_TOKEN")
|
| 40 |
|
| 41 |
+
#image_encoder_path = "google/siglip-so400m-patch14-384"
|
| 42 |
+
#image_encoder_path_b = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
|
| 43 |
ipadapter_path = hf_hub_download(repo_id="InstantX/SD3.5-Large-IP-Adapter", filename="ip-adapter.bin")
|
| 44 |
model_path = 'ford442/stable-diffusion-3.5-large-bf16'
|
| 45 |
|
|
|
|
| 101 |
num_inference_steps,
|
| 102 |
latent_file, # Add latents file input
|
| 103 |
ip_scale,
|
| 104 |
+
image_encoder_path,
|
| 105 |
progress=gr.Progress(track_tqdm=True),
|
| 106 |
):
|
| 107 |
upscaler_2.to(torch.device('cpu'))
|
|
|
|
| 228 |
result = gr.Image(label="Result", show_label=False)
|
| 229 |
with gr.Accordion("Advanced Settings", open=True):
|
| 230 |
latent_file = gr.File(label="Image File (optional)") # Add latents file input
|
| 231 |
+
image_encoder_path = gr.Dropdown(
|
| 232 |
+
["google/siglip-so400m-patch14-384", "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"],
|
| 233 |
+
label="CLIP Model",
|
| 234 |
+
)
|
| 235 |
ip_scale = gr.Slider(
|
| 236 |
label="Image Prompt Scale",
|
| 237 |
minimum=0.0,
|
|
|
|
| 304 |
num_inference_steps,
|
| 305 |
latent_file, # Add latent_file to the inputs
|
| 306 |
ip_scale,
|
| 307 |
+
image_encoder_path,
|
| 308 |
],
|
| 309 |
outputs=[result, expanded_prompt_output],
|
| 310 |
)
|