Spaces:
Sleeping
Sleeping
| import os | |
| import imageio | |
| import numpy as np | |
| from typing import Union | |
| import cv2 | |
| import torch | |
| import torchvision | |
| import torch.distributed as dist | |
| from safetensors import safe_open | |
| from tqdm import tqdm | |
| from einops import rearrange | |
| from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint | |
| from animatediff.utils.convert_lora_safetensor_to_diffusers import convert_lora, load_diffusers_lora | |
| def zero_rank_print(s): | |
| if (not dist.is_initialized()) and (dist.is_initialized() and dist.get_rank() == 0): print("### " + s) | |
| from typing import List | |
| import PIL | |
| def export_to_video( | |
| video_frames: Union[List[np.ndarray], List[PIL.Image.Image]], output_video_path: str = None, fps: int = 8 | |
| ) -> str: | |
| # if output_video_path is None: | |
| # output_video_path = tempfile.NamedTemporaryFile(suffix=".webm").name | |
| if isinstance(video_frames[0], PIL.Image.Image): | |
| video_frames = [np.array(frame) for frame in video_frames] | |
| fourcc = cv2.VideoWriter_fourcc(*"mp4v") | |
| # fourcc = cv2.VideoWriter_fourcc(*'VP90') | |
| h, w, c = video_frames[0].shape | |
| video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=fps, frameSize=(w, h)) | |
| for i in range(len(video_frames)): | |
| img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) | |
| video_writer.write(img) | |
| return output_video_path | |
| def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=9): | |
| videos = rearrange(videos, "b c t h w -> t b c h w") | |
| outputs = [] | |
| for x in videos: | |
| x = torchvision.utils.make_grid(x, nrow=n_rows) | |
| x = x.transpose(0, 1).transpose(1, 2).squeeze(-1) | |
| if rescale: | |
| x = (x + 1.0) / 2.0 # -1,1 -> 0,1 | |
| x = (x * 255).numpy().astype(np.uint8) | |
| outputs.append(x) | |
| os.makedirs(os.path.dirname(path), exist_ok=True) | |
| # export_to_video(outputs, output_video_path=path, fps=fps) | |
| imageio.mimsave(path, outputs, fps=fps) | |
| # DDIM Inversion | |
| def init_prompt(prompt, pipeline): | |
| uncond_input = pipeline.tokenizer( | |
| [""], padding="max_length", max_length=pipeline.tokenizer.model_max_length, | |
| return_tensors="pt" | |
| ) | |
| uncond_embeddings = pipeline.text_encoder(uncond_input.input_ids.to(pipeline.device))[0] | |
| text_input = pipeline.tokenizer( | |
| [prompt], | |
| padding="max_length", | |
| max_length=pipeline.tokenizer.model_max_length, | |
| truncation=True, | |
| return_tensors="pt", | |
| ) | |
| text_embeddings = pipeline.text_encoder(text_input.input_ids.to(pipeline.device))[0] | |
| context = torch.cat([uncond_embeddings, text_embeddings]) | |
| return context | |
| def next_step(model_output: Union[torch.FloatTensor, np.ndarray], timestep: int, | |
| sample: Union[torch.FloatTensor, np.ndarray], ddim_scheduler): | |
| timestep, next_timestep = min( | |
| timestep - ddim_scheduler.config.num_train_timesteps // ddim_scheduler.num_inference_steps, 999), timestep | |
| alpha_prod_t = ddim_scheduler.alphas_cumprod[timestep] if timestep >= 0 else ddim_scheduler.final_alpha_cumprod | |
| alpha_prod_t_next = ddim_scheduler.alphas_cumprod[next_timestep] | |
| beta_prod_t = 1 - alpha_prod_t | |
| next_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 | |
| next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output | |
| next_sample = alpha_prod_t_next ** 0.5 * next_original_sample + next_sample_direction | |
| return next_sample | |
| def get_noise_pred_single(latents, t, context, unet): | |
| noise_pred = unet(latents, t, encoder_hidden_states=context)["sample"] | |
| return noise_pred | |
| def ddim_loop(pipeline, ddim_scheduler, latent, num_inv_steps, prompt): | |
| context = init_prompt(prompt, pipeline) | |
| uncond_embeddings, cond_embeddings = context.chunk(2) | |
| all_latent = [latent] | |
| latent = latent.clone().detach() | |
| for i in tqdm(range(num_inv_steps)): | |
| t = ddim_scheduler.timesteps[len(ddim_scheduler.timesteps) - i - 1] | |
| noise_pred = get_noise_pred_single(latent, t, cond_embeddings, pipeline.unet) | |
| latent = next_step(noise_pred, t, latent, ddim_scheduler) | |
| all_latent.append(latent) | |
| return all_latent | |
| def ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=""): | |
| ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt) | |
| return ddim_latents | |
| def load_weights( | |
| animation_pipeline, | |
| # motion module | |
| motion_module_path = "", | |
| motion_module_lora_configs = [], | |
| # domain adapter | |
| adapter_lora_path = "", | |
| adapter_lora_scale = 1.0, | |
| # image layers | |
| dreambooth_model_path = "", | |
| lora_model_path = "", | |
| lora_alpha = 0.8, | |
| ): | |
| # motion module | |
| unet_state_dict = {} | |
| if motion_module_path != "": | |
| print(f"load motion module from {motion_module_path}") | |
| motion_module_state_dict = torch.load(motion_module_path, map_location="cpu") | |
| motion_module_state_dict = motion_module_state_dict["state_dict"] if "state_dict" in motion_module_state_dict else motion_module_state_dict | |
| unet_state_dict.update({name: param for name, param in motion_module_state_dict.items() if "motion_modules." in name}) | |
| unet_state_dict.pop("animatediff_config", "") | |
| missing, unexpected = animation_pipeline.unet.load_state_dict(unet_state_dict, strict=False) | |
| print("motion_module missing:",len(missing)) | |
| print("motion_module unexpe:",len(unexpected)) | |
| assert len(unexpected) == 0 | |
| del unet_state_dict | |
| # base model | |
| # if dreambooth_model_path != "": | |
| # print(f"load dreambooth model from {dreambooth_model_path}") | |
| # # if dreambooth_model_path.endswith(".safetensors"): | |
| # # dreambooth_state_dict = {} | |
| # # with safe_open(dreambooth_model_path, framework="pt", device="cpu") as f: | |
| # # for key in f.keys(): | |
| # # dreambooth_state_dict[key] = f.get_tensor(key) | |
| # # elif dreambooth_model_path.endswith(".ckpt"): | |
| # # dreambooth_state_dict = torch.load(dreambooth_model_path, map_location="cpu") | |
| # # # 1. vae | |
| # # converted_vae_checkpoint = convert_ldm_vae_checkpoint(dreambooth_state_dict, animation_pipeline.vae.config) | |
| # # animation_pipeline.vae.load_state_dict(converted_vae_checkpoint) | |
| # # # 2. unet | |
| # # converted_unet_checkpoint = convert_ldm_unet_checkpoint(dreambooth_state_dict, animation_pipeline.unet.config) | |
| # # animation_pipeline.unet.load_state_dict(converted_unet_checkpoint, strict=False) | |
| # # # 3. text_model | |
| # # animation_pipeline.text_encoder = convert_ldm_clip_checkpoint(dreambooth_state_dict) | |
| # # del dreambooth_state_dict | |
| # dreambooth_state_dict = {} | |
| # with safe_open(dreambooth_model_path, framework="pt", device="cpu") as f: | |
| # for key in f.keys(): | |
| # dreambooth_state_dict[key] = f.get_tensor(key) | |
| # converted_vae_checkpoint = convert_ldm_vae_checkpoint(dreambooth_state_dict, animation_pipeline.vae.config) | |
| # # print(vae) | |
| # #vae ->to_q,to_k,to_v | |
| # # print(converted_vae_checkpoint) | |
| # convert_vae_keys = list(converted_vae_checkpoint.keys()) | |
| # for key in convert_vae_keys: | |
| # if "encoder.mid_block.attentions" in key or "decoder.mid_block.attentions" in key: | |
| # new_key = None | |
| # if "key" in key: | |
| # new_key = key.replace("key","to_k") | |
| # elif "query" in key: | |
| # new_key = key.replace("query","to_q") | |
| # elif "value" in key: | |
| # new_key = key.replace("value","to_v") | |
| # elif "proj_attn" in key: | |
| # new_key = key.replace("proj_attn","to_out.0") | |
| # if new_key: | |
| # converted_vae_checkpoint[new_key] = converted_vae_checkpoint.pop(key) | |
| # animation_pipeline.vae.load_state_dict(converted_vae_checkpoint) | |
| # converted_unet_checkpoint = convert_ldm_unet_checkpoint(dreambooth_state_dict, animation_pipeline.unet.config) | |
| # animation_pipeline.unet.load_state_dict(converted_unet_checkpoint, strict=False) | |
| # animation_pipeline.text_encoder = convert_ldm_clip_checkpoint(dreambooth_state_dict) | |
| # del dreambooth_state_dict | |
| # lora layers | |
| if lora_model_path != "": | |
| print(f"load lora model from {lora_model_path}") | |
| assert lora_model_path.endswith(".safetensors") | |
| lora_state_dict = {} | |
| with safe_open(lora_model_path, framework="pt", device="cpu") as f: | |
| for key in f.keys(): | |
| lora_state_dict[key] = f.get_tensor(key) | |
| animation_pipeline = convert_lora(animation_pipeline, lora_state_dict, alpha=lora_alpha) | |
| del lora_state_dict | |
| # domain adapter lora | |
| if adapter_lora_path != "": | |
| print(f"load domain lora from {adapter_lora_path}") | |
| domain_lora_state_dict = torch.load(adapter_lora_path, map_location="cpu") | |
| domain_lora_state_dict = domain_lora_state_dict["state_dict"] if "state_dict" in domain_lora_state_dict else domain_lora_state_dict | |
| domain_lora_state_dict.pop("animatediff_config", "") | |
| animation_pipeline = load_diffusers_lora(animation_pipeline, domain_lora_state_dict, alpha=adapter_lora_scale) | |
| # motion module lora | |
| for motion_module_lora_config in motion_module_lora_configs: | |
| path, alpha = motion_module_lora_config["path"], motion_module_lora_config["alpha"] | |
| print(f"load motion LoRA from {path}") | |
| motion_lora_state_dict = torch.load(path, map_location="cpu") | |
| motion_lora_state_dict = motion_lora_state_dict["state_dict"] if "state_dict" in motion_lora_state_dict else motion_lora_state_dict | |
| motion_lora_state_dict.pop("animatediff_config", "") | |
| animation_pipeline = load_diffusers_lora(animation_pipeline, motion_lora_state_dict, alpha) | |
| return animation_pipeline | |