Spaces:
Runtime error
Runtime error
envs
Browse files
app.py
CHANGED
|
@@ -86,7 +86,6 @@ os.system(f'wget https://huggingface.co/TencentARC/ImageConductor/blob/main/imag
|
|
| 86 |
os.system(f'wget https://huggingface.co/TencentARC/ImageConductor/blob/main/unet.ckpt -P models/')
|
| 87 |
os.system(f'wget https://huggingface.co/TencentARC/ImageConductor/blob/main/helloobjects_V12c.safetensors -P models/personalized')
|
| 88 |
os.system(f'wget https://huggingface.co/TencentARC/ImageConductor/blob/main/TUSUN.safetensors -P models/personalized')
|
| 89 |
-
os.system(f'git clone https://huggingface.co/runwayml/stable-diffusion-v1-5')
|
| 90 |
|
| 91 |
|
| 92 |
|
|
@@ -191,11 +190,11 @@ def points_to_flows(track_points, model_length, height, width):
|
|
| 191 |
class ImageConductor:
|
| 192 |
def __init__(self, device, unet_path, image_controlnet_path, flow_controlnet_path, height, width, model_length, lora_rank=64):
|
| 193 |
self.device = device
|
| 194 |
-
tokenizer = CLIPTokenizer.from_pretrained("stable-diffusion-v1-5", subfolder="tokenizer")
|
| 195 |
-
text_encoder = CLIPTextModel.from_pretrained("stable-diffusion-v1-5", subfolder="text_encoder").cuda()
|
| 196 |
-
vae = AutoencoderKL.from_pretrained("stable-diffusion-v1-5", subfolder="vae").cuda()
|
| 197 |
inference_config = OmegaConf.load("configs/inference/inference.yaml")
|
| 198 |
-
unet = UNet3DConditionFlowModel.from_pretrained_2d("stable-diffusion-v1-5", subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(inference_config.unet_additional_kwargs))
|
| 199 |
|
| 200 |
self.vae = vae
|
| 201 |
|
|
|
|
| 86 |
os.system(f'wget https://huggingface.co/TencentARC/ImageConductor/blob/main/unet.ckpt -P models/')
|
| 87 |
os.system(f'wget https://huggingface.co/TencentARC/ImageConductor/blob/main/helloobjects_V12c.safetensors -P models/personalized')
|
| 88 |
os.system(f'wget https://huggingface.co/TencentARC/ImageConductor/blob/main/TUSUN.safetensors -P models/personalized')
|
|
|
|
| 89 |
|
| 90 |
|
| 91 |
|
|
|
|
| 190 |
class ImageConductor:
|
| 191 |
def __init__(self, device, unet_path, image_controlnet_path, flow_controlnet_path, height, width, model_length, lora_rank=64):
|
| 192 |
self.device = device
|
| 193 |
+
tokenizer = CLIPTokenizer.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="tokenizer")
|
| 194 |
+
text_encoder = CLIPTextModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="text_encoder").cuda()
|
| 195 |
+
vae = AutoencoderKL.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="vae").cuda()
|
| 196 |
inference_config = OmegaConf.load("configs/inference/inference.yaml")
|
| 197 |
+
unet = UNet3DConditionFlowModel.from_pretrained_2d("runwayml/stable-diffusion-v1-5", subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(inference_config.unet_additional_kwargs))
|
| 198 |
|
| 199 |
self.vae = vae
|
| 200 |
|