Spaces:
Runtime error
Runtime error
fixing bug
Browse files
app.py
CHANGED
|
@@ -43,13 +43,13 @@ n_sample_image = 1
|
|
| 43 |
|
| 44 |
controlnet_path = OrderedDict([
|
| 45 |
['canny' , ('canny' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_canny_slimmed.safetensors'))],
|
| 46 |
-
['canny_v11p' , ('canny' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_canny_slimmed.safetensors'))],
|
| 47 |
['depth' , ('depth' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_depth_slimmed.safetensors'))],
|
| 48 |
-
['hed' , ('hed' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_hed_slimmed.safetensors'))],
|
| 49 |
['mlsd' , ('mlsd' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_mlsd_slimmed.safetensors'))],
|
| 50 |
-
['mlsd_v11p' , ('mlsd' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_mlsd_slimmed.safetensors'))],
|
| 51 |
-
['normal' , ('normal' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_normal_slimmed.safetensors'))],
|
| 52 |
-
['openpose' , ('openpose', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_openpose_slimmed.safetensors'))],
|
| 53 |
['openpose_v11p' , ('openpose', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_openpose_slimmed.safetensors'))],
|
| 54 |
['scribble' , ('scribble', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_scribble_slimmed.safetensors'))],
|
| 55 |
['softedge_v11p' , ('scribble', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_softedge_slimmed.safetensors'))],
|
|
@@ -59,15 +59,15 @@ controlnet_path = OrderedDict([
|
|
| 59 |
])
|
| 60 |
|
| 61 |
preprocess_method = [
|
| 62 |
-
'canny' ,
|
| 63 |
-
'depth' ,
|
| 64 |
-
'hed' ,
|
| 65 |
-
'mlsd' ,
|
| 66 |
-
'normal' ,
|
| 67 |
-
'openpose' ,
|
| 68 |
-
'openpose_withface' ,
|
| 69 |
-
'openpose_withfacehand',
|
| 70 |
-
'scribble' ,
|
| 71 |
'none' ,
|
| 72 |
]
|
| 73 |
|
|
@@ -146,7 +146,7 @@ class prompt_free_diffusion(object):
|
|
| 146 |
self.net = get_model()(cfgm)
|
| 147 |
sdvae = hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/vae/sd-v2-0-base-autokl.pth')
|
| 148 |
sdvae = torch.load(sdvae)
|
| 149 |
-
self.net.vae.load_state_dict(sdvae)
|
| 150 |
|
| 151 |
self.action_load_ctx(tag_ctx)
|
| 152 |
self.action_load_diffuser(tag_diffuser)
|
|
@@ -414,9 +414,9 @@ def interface():
|
|
| 414 |
button = gr.Button("Run")
|
| 415 |
with gr.Column():
|
| 416 |
ctl_input = gr.Image(label='Control Input', type='pil', elem_id='customized_imbox')
|
| 417 |
-
do_preprocess = gr.Checkbox(label='Preprocess', value=False)
|
| 418 |
with gr.Row():
|
| 419 |
-
ctl_method = gr.Dropdown(label='Preprocess Type', choices=preprocess_method, value='canny')
|
| 420 |
tag_ctl = gr.Dropdown(label='ControlNet', choices=[pi for pi in controlnet_path.keys()], value='canny')
|
| 421 |
with gr.Column():
|
| 422 |
img_output = gr.Gallery(label="Image Result", elem_id='customized_imbox').style(grid=n_sample_image+1)
|
|
|
|
| 43 |
|
| 44 |
controlnet_path = OrderedDict([
|
| 45 |
['canny' , ('canny' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_canny_slimmed.safetensors'))],
|
| 46 |
+
# ['canny_v11p' , ('canny' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_canny_slimmed.safetensors'))],
|
| 47 |
['depth' , ('depth' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_depth_slimmed.safetensors'))],
|
| 48 |
+
# ['hed' , ('hed' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_hed_slimmed.safetensors'))],
|
| 49 |
['mlsd' , ('mlsd' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_mlsd_slimmed.safetensors'))],
|
| 50 |
+
# ['mlsd_v11p' , ('mlsd' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_mlsd_slimmed.safetensors'))],
|
| 51 |
+
# ['normal' , ('normal' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_normal_slimmed.safetensors'))],
|
| 52 |
+
# ['openpose' , ('openpose', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_openpose_slimmed.safetensors'))],
|
| 53 |
['openpose_v11p' , ('openpose', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_openpose_slimmed.safetensors'))],
|
| 54 |
['scribble' , ('scribble', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_scribble_slimmed.safetensors'))],
|
| 55 |
['softedge_v11p' , ('scribble', hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_softedge_slimmed.safetensors'))],
|
|
|
|
| 59 |
])
|
| 60 |
|
| 61 |
preprocess_method = [
|
| 62 |
+
# 'canny' ,
|
| 63 |
+
# 'depth' ,
|
| 64 |
+
# 'hed' ,
|
| 65 |
+
# 'mlsd' ,
|
| 66 |
+
# 'normal' ,
|
| 67 |
+
# 'openpose' ,
|
| 68 |
+
# 'openpose_withface' ,
|
| 69 |
+
# 'openpose_withfacehand',
|
| 70 |
+
# 'scribble' ,
|
| 71 |
'none' ,
|
| 72 |
]
|
| 73 |
|
|
|
|
| 146 |
self.net = get_model()(cfgm)
|
| 147 |
sdvae = hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/pfd/vae/sd-v2-0-base-autokl.pth')
|
| 148 |
sdvae = torch.load(sdvae)
|
| 149 |
+
self.net.vae['image'].load_state_dict(sdvae)
|
| 150 |
|
| 151 |
self.action_load_ctx(tag_ctx)
|
| 152 |
self.action_load_diffuser(tag_diffuser)
|
|
|
|
| 414 |
button = gr.Button("Run")
|
| 415 |
with gr.Column():
|
| 416 |
ctl_input = gr.Image(label='Control Input', type='pil', elem_id='customized_imbox')
|
| 417 |
+
do_preprocess = gr.Checkbox(label='Preprocess (Disabled)', value=False)
|
| 418 |
with gr.Row():
|
| 419 |
+
ctl_method = gr.Dropdown(label='Preprocess Type (Fixed to none)', choices=preprocess_method, value='canny')
|
| 420 |
tag_ctl = gr.Dropdown(label='ControlNet', choices=[pi for pi in controlnet_path.keys()], value='canny')
|
| 421 |
with gr.Column():
|
| 422 |
img_output = gr.Gallery(label="Image Result", elem_id='customized_imbox').style(grid=n_sample_image+1)
|