File size: 9,799 Bytes
9f65008 0fd259e 9f65008 0fd259e 9f65008 0fd259e 9f65008 0fd259e 9f65008 0fd259e 9f65008 0fd259e 9f65008 0fd259e 9f65008 0fd259e 9f65008 70b72ca 9f65008 70b72ca 9f65008 70b72ca 9f65008 70b72ca 9f65008 70b72ca 9f65008 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 |
# app.py (VERSÃO FINAL E CORRIGIDA)
import spaces
import subprocess
import os
import torch
import mediapy
from einops import rearrange
from omegaconf import OmegaConf
import datetime
from tqdm import tqdm
import gc
import uuid
import mimetypes
import torchvision.transforms as T
from PIL import Image
from pathlib import Path
import gradio as gr
# --- Módulos do SeedVR (agora que estão no ambiente, podemos importá-los) ---
from data.image.transforms.divisible_crop import DivisibleCrop
from data.image.transforms.na_resize import NaResize
from data.video.transforms.rearrange import Rearrange
from projects.video_diffusion_sr.color_fix import wavelet_reconstruction
from torchvision.transforms import Compose, Lambda, Normalize
from torchvision.io.video import read_video
from common.distributed import init_torch
from common.distributed.advanced import init_sequence_parallel
from projects.video_diffusion_sr.infer import VideoDiffusionInfer
from common.config import load_config
from common.distributed.ops import sync_data
from common.seed import set_seed
from common.partition import partition_by_size
# --- CONFIGURAÇÃO DO AMBIENTE (REMOVIDA) ---
# REMOVIDO: A instalação do flash-attn e apex já é feita no Dockerfile.
# REMOVIDO: O download dos checkpoints do modelo já é feito no Dockerfile.
# REMOVIDO: A configuração de torch.distributed é tratada de forma mais simples.
# Verificação para garantir que estamos no diretório certo
print(f"Diretório de trabalho atual: {os.getcwd()}")
if not os.path.exists('./projects'):
print("AVISO: O script parece não estar rodando de dentro do diretório /app/SeedVR. Verifique o WORKDIR no Dockerfile.")
# Checa se a correção de cor está disponível
use_colorfix = os.path.exists("./projects/video_diffusion_sr/color_fix.py")
if not use_colorfix:
print('Atenção: Correção de cor (color_fix.py) não disponível!')
def configure_sequence_parallel(sp_size):
if sp_size > 1:
init_sequence_parallel(sp_size)
# O decorador @spaces.GPU garante que a função rode na GPU e gerencia a duração
@spaces.GPU(duration=120)
def configure_runner(sp_size):
config_path = os.path.join('./configs_3b', 'main.yaml')
config = load_config(config_path)
runner = VideoDiffusionInfer(config)
OmegaConf.set_readonly(runner.config, False)
# Inicializa o torch para um único processo
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "12355"
if "RANK" not in os.environ:
os.environ["RANK"] = "0"
if "WORLD_SIZE" not in os.environ:
os.environ["WORLD_SIZE"] = "1"
init_torch(cudnn_benchmark=False, timeout=datetime.timedelta(seconds=3600))
configure_sequence_parallel(sp_size)
# Os checkpoints estão no diretório ckpts, conforme baixado pelo Dockerfile
runner.configure_dit_model(device="cuda", checkpoint='./ckpts/seedvr2_ema_3b.pth')
runner.configure_vae_model(checkpoint_path='./ckpts/ema_vae.pth')
if hasattr(runner.vae, "set_memory_limit"):
runner.vae.set_memory_limit(**runner.config.vae.memory_limit)
return runner
@spaces.GPU(duration=120)
def generation_step(runner, text_embeds_dict, cond_latents):
def _move_to_cuda(x):
return [i.to(torch.device("cuda")) for i in x]
noises = [torch.randn_like(latent) for latent in cond_latents]
aug_noises = [torch.randn_like(latent) for latent in cond_latents]
noises, aug_noises, cond_latents = sync_data((noises, aug_noises, cond_latents), 0)
noises, aug_noises, cond_latents = list(map(_move_to_cuda, (noises, aug_noises, cond_latents)))
cond_noise_scale = 0.1
def _add_noise(x, aug_noise):
t = (torch.tensor([1000.0], device=torch.device("cuda")) * cond_noise_scale)
shape = torch.tensor(x.shape[1:], device=torch.device("cuda"))[None]
t = runner.timestep_transform(t, shape)
x = runner.schedule.forward(x, aug_noise, t)
return x
conditions = [
runner.get_condition(noise, task="sr", latent_blur=_add_noise(latent_blur, aug_noise))
for noise, aug_noise, latent_blur in zip(noises, aug_noises, cond_latents)
]
with torch.no_grad(), torch.autocast("cuda", torch.bfloat16, enabled=True):
video_tensors = runner.inference(noises=noises, conditions=conditions, dit_offload=False, **text_embeds_dict)
samples = [rearrange(video, "c t h w -> t c h w") for video in video_tensors]
del video_tensors
return samples
@spaces.GPU(duration=120)
def generation_loop(video_path, seed, fps_out, batch_size=1, cfg_scale=1.0, cfg_rescale=0.0, sample_steps=1, res_h=720, res_w=1280, sp_size=1):
# O Gradio passa o caminho do arquivo temporário
if video_path is None:
raise gr.Error("Por favor, faça o upload de um arquivo de vídeo ou imagem.")
runner = configure_runner(sp_size)
def _extract_text_embeds():
text_pos_embeds = torch.load('pos_emb.pt')
text_neg_embeds = torch.load('neg_emb.pt')
return [{"texts_pos": [text_pos_embeds], "texts_neg": [text_neg_embeds]}]
def cut_videos(videos, sp_size):
if videos.size(1) > 121: videos = videos[:, :121]
t = videos.size(1)
if t <= 4 * sp_size:
padding = torch.cat([videos[:, -1].unsqueeze(1)] * (4 * sp_size - t + 1), dim=1)
return torch.cat([videos, padding], dim=1)
if (t - 1) % (4 * sp_size) == 0: return videos
padding = torch.cat([videos[:, -1].unsqueeze(1)] * (4 * sp_size - ((t - 1) % (4 * sp_size))), dim=1)
return torch.cat([videos, padding], dim=1)
runner.config.diffusion.cfg.scale = cfg_scale
runner.config.diffusion.cfg.rescale = cfg_rescale
runner.config.diffusion.timesteps.sampling.steps = sample_steps
runner.configure_diffusion()
set_seed(seed % (2**32), same_across_ranks=True)
os.makedirs('output/', exist_ok=True)
original_videos_local = [[os.path.basename(video_path)]]
positive_prompts_embeds = _extract_text_embeds()
video_transform = Compose([
NaResize(resolution=(res_h * res_w) ** 0.5, mode="area", downsample_only=False),
Lambda(lambda x: torch.clamp(x, 0.0, 1.0)),
DivisibleCrop((16, 16)),
Normalize(0.5, 0.5),
Rearrange("t c h w -> c t h w"),
])
for videos, text_embeds in tqdm(zip(original_videos_local, positive_prompts_embeds)):
cond_latents = []
media_type, _ = mimetypes.guess_type(video_path)
is_image = media_type and media_type.startswith("image")
is_video = media_type and media_type.startswith("video")
if is_video:
video_frames = read_video(video_path, output_format="TCHW")[0] / 255.0
if video_frames.size(0) > 121: video_frames = video_frames[:121]
output_filename = str(uuid.uuid4()) + '.mp4'
elif is_image:
img = Image.open(video_path).convert("RGB")
video_frames = T.ToTensor()(img).unsqueeze(0)
output_filename = str(uuid.uuid4()) + '.png'
else:
raise gr.Error("Formato de arquivo não suportado. Use vídeo ou imagem.")
output_dir = os.path.join('output', output_filename)
cond_latents.append(video_transform(video_frames.to(torch.device("cuda"))))
ori_lengths = [v.size(1) for v in cond_latents]
input_videos = cond_latents
if is_video: cond_latents = [cut_videos(v, sp_size) for v in cond_latents]
cond_latents = runner.vae_encode(cond_latents)
for i, emb in enumerate(text_embeds["texts_pos"]): text_embeds["texts_pos"][i] = emb.to(torch.device("cuda"))
for i, emb in enumerate(text_embeds["texts_neg"]): text_embeds["texts_neg"][i] = emb.to(torch.device("cuda"))
samples = generation_step(runner, text_embeds, cond_latents=cond_latents)
del cond_latents
for path, input_vid, sample, ori_length in zip(videos, input_videos, samples, ori_lengths):
if ori_length < sample.shape[0]: sample = sample[:ori_length]
input_vid = rearrange(input_vid, "c t h w -> t c h w")
if use_colorfix: sample = wavelet_reconstruction(sample.cpu(), input_vid[:sample.size(0)].cpu())
else: sample = sample.cpu()
sample = rearrange(sample, "t c h w -> t h w c")
sample = sample.clip(-1, 1).mul_(0.5).add_(0.5).mul_(255).round().to(torch.uint8).numpy()
if is_image:
mediapy.write_image(output_dir, sample[0])
else:
mediapy.write_video(output_dir, sample, fps=fps_out)
gc.collect()
torch.cuda.empty_cache()
# Retorna os valores para os componentes corretos da UI
if is_image:
return output_dir, None, output_dir
else:
return None, output_dir, output_dir
# --- Interface Gradio ---
with gr.Blocks(title="SeedVR2: One-Step Video Restoration") as demo:
gr.HTML(...) # Mantido como no original
with gr.Row():
# CORRIGIDO: gr.File para gr.Video, que passa um 'filepath' por padrão
input_file = gr.Video(label="Upload image or video")
seed = gr.Number(label="Seeds", value=666)
fps = gr.Number(label="fps", value=24)
with gr.Row():
output_image = gr.Image(label="Output_Image")
output_video = gr.Video(label="Output_Video")
download_link = gr.File(label="Download the output")
run_button = gr.Button("Run")
run_button.click(
fn=generation_loop,
inputs=[input_file, seed, fps],
outputs=[output_image, output_video, download_link]
)
gr.Examples(...) # Mantido como no original
gr.HTML(...) # Mantido como no original
demo.queue(max_size=10)
demo.launch() |