Spaces:
Paused
Paused
Update api/seedvr_server.py
Browse files- api/seedvr_server.py +122 -131
api/seedvr_server.py
CHANGED
|
@@ -9,46 +9,50 @@ import multiprocessing as mp
|
|
| 9 |
from pathlib import Path
|
| 10 |
from typing import Optional, Callable
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
|
| 15 |
-
#
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
-
|
|
|
|
| 19 |
if mp.get_start_method(allow_none=True) != 'spawn':
|
| 20 |
mp.set_start_method('spawn', force=True)
|
| 21 |
|
| 22 |
-
# Configuração de alocação de memória da VRAM
|
| 23 |
os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
|
| 24 |
|
| 25 |
-
# Adiciona
|
| 26 |
SEEDVR_REPO_PATH = Path(os.getenv("SEEDVR_ROOT", "/data/SeedVR"))
|
| 27 |
if str(SEEDVR_REPO_PATH) not in sys.path:
|
| 28 |
sys.path.insert(0, str(SEEDVR_REPO_PATH))
|
| 29 |
|
| 30 |
-
#
|
| 31 |
import torch
|
| 32 |
import cv2
|
| 33 |
import numpy as np
|
| 34 |
from datetime import datetime
|
| 35 |
|
| 36 |
-
|
| 37 |
-
#
|
| 38 |
-
#
|
| 39 |
|
| 40 |
def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load_cap=None):
|
| 41 |
-
|
| 42 |
-
if debug: print(f"🎬 Extraindo frames de: {video_path}")
|
| 43 |
if not os.path.exists(video_path): raise FileNotFoundError(f"Arquivo de vídeo não encontrado: {video_path}")
|
| 44 |
-
|
| 45 |
cap = cv2.VideoCapture(video_path)
|
| 46 |
-
if not cap.isOpened(): raise ValueError(f"Não foi possível abrir o
|
| 47 |
-
|
| 48 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 49 |
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 50 |
-
if debug: print(f"📊 Info do vídeo: {frame_count} frames, {fps:.2f} FPS")
|
| 51 |
-
|
| 52 |
frames = []
|
| 53 |
frames_loaded = 0
|
| 54 |
for i in range(frame_count):
|
|
@@ -56,32 +60,26 @@ def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load
|
|
| 56 |
if not ret: break
|
| 57 |
if i < skip_first_frames: continue
|
| 58 |
if load_cap and frames_loaded >= load_cap: break
|
| 59 |
-
|
| 60 |
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 61 |
frames.append(frame.astype(np.float32) / 255.0)
|
| 62 |
frames_loaded += 1
|
| 63 |
cap.release()
|
| 64 |
-
|
| 65 |
-
if
|
| 66 |
-
if debug: print(f"✅ {len(frames)} frames extraídos com sucesso.")
|
| 67 |
return torch.from_numpy(np.stack(frames)).to(torch.float16), fps
|
| 68 |
|
| 69 |
def save_frames_to_video(frames_tensor, output_path, fps=30.0, debug=False):
|
| 70 |
-
"
|
| 71 |
-
if debug: print(f"🎬 Salvando {frames_tensor.shape[0]} frames em: {output_path}")
|
| 72 |
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 73 |
-
|
| 74 |
frames_np = (frames_tensor.cpu().numpy() * 255.0).astype(np.uint8)
|
| 75 |
T, H, W, _ = frames_np.shape
|
| 76 |
-
|
| 77 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 78 |
out = cv2.VideoWriter(output_path, fourcc, fps, (W, H))
|
| 79 |
-
if not out.isOpened(): raise ValueError(f"Não foi possível criar o
|
| 80 |
-
|
| 81 |
for frame in frames_np:
|
| 82 |
out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
| 83 |
out.release()
|
| 84 |
-
if debug: print(f"✅ Vídeo salvo com sucesso: {output_path}")
|
| 85 |
|
| 86 |
def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, progress_queue=None):
|
| 87 |
"""Processo filho (worker) que executa o upscaling em uma GPU dedicada."""
|
|
@@ -94,7 +92,6 @@ def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, p
|
|
| 94 |
|
| 95 |
try:
|
| 96 |
frames_tensor = torch.from_numpy(frames_np).to(torch.float16)
|
| 97 |
-
|
| 98 |
callback = (lambda b, t, _, m: progress_queue.put((proc_idx, b, t, m))) if progress_queue else None
|
| 99 |
|
| 100 |
runner = configure_runner(shared_args["model"], shared_args["model_dir"], shared_args["preserve_vram"], shared_args["debug"])
|
|
@@ -107,14 +104,12 @@ def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, p
|
|
| 107 |
return_queue.put((proc_idx, result_tensor.cpu().numpy()))
|
| 108 |
except Exception as e:
|
| 109 |
import traceback
|
| 110 |
-
error_msg = f"ERRO no worker {proc_idx}: {e}\n{traceback.format_exc()}"
|
| 111 |
print(error_msg)
|
| 112 |
if progress_queue: progress_queue.put((proc_idx, -1, -1, error_msg))
|
| 113 |
return_queue.put((proc_idx, error_msg))
|
| 114 |
|
| 115 |
-
#
|
| 116 |
-
# 3. CLASSE DO SERVIDOR PRINCIPAL
|
| 117 |
-
# -------------------------------------------------------------
|
| 118 |
|
| 119 |
class SeedVRServer:
|
| 120 |
def __init__(self, **kwargs):
|
|
@@ -122,13 +117,16 @@ class SeedVRServer:
|
|
| 122 |
print("⚙️ SeedVRServer inicializando...")
|
| 123 |
self.SEEDVR_ROOT = SEEDVR_REPO_PATH
|
| 124 |
self.CKPTS_ROOT = Path("/data/seedvr_models_fp16")
|
| 125 |
-
self.OUTPUT_ROOT = Path(os.getenv("OUTPUT_ROOT", "/app/
|
| 126 |
-
self.INPUT_ROOT = Path(os.getenv("INPUT_ROOT", "/app/inputs"))
|
| 127 |
self.HF_HOME_CACHE = Path(os.getenv("HF_HOME", "/data/.cache/huggingface"))
|
| 128 |
self.REPO_URL = os.getenv("SEEDVR_GIT_URL", "https://github.com/numz/ComfyUI-SeedVR2_VideoUpscaler")
|
| 129 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
|
| 131 |
-
for p in [self.CKPTS_ROOT, self.OUTPUT_ROOT, self.
|
| 132 |
p.mkdir(parents=True, exist_ok=True)
|
| 133 |
|
| 134 |
self.setup_dependencies()
|
|
@@ -136,15 +134,10 @@ class SeedVRServer:
|
|
| 136 |
|
| 137 |
def setup_dependencies(self):
|
| 138 |
"""Garante que o repositório e os modelos estão presentes."""
|
| 139 |
-
# Clona o repositório do SeedVR se não existir
|
| 140 |
if not (self.SEEDVR_ROOT / ".git").exists():
|
| 141 |
-
print(f"[
|
| 142 |
subprocess.run(["git", "clone", "--depth", "1", self.REPO_URL, str(self.SEEDVR_ROOT)], check=True)
|
| 143 |
-
else:
|
| 144 |
-
print("[SeedVRServer] Repositório SeedVR já existe.")
|
| 145 |
|
| 146 |
-
# Baixa os checkpoints do Hugging Face se não existirem
|
| 147 |
-
print(f"[SeedVRServer] Verificando checkpoints em {self.CKPTS_ROOT}...")
|
| 148 |
model_files = {
|
| 149 |
"seedvr2_ema_7b_sharp_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses",
|
| 150 |
"ema_vae_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses"
|
|
@@ -157,7 +150,7 @@ class SeedVRServer:
|
|
| 157 |
repo_id=repo_id, filename=filename, local_dir=str(self.CKPTS_ROOT),
|
| 158 |
cache_dir=str(self.HF_HOME_CACHE), token=os.getenv("HF_TOKEN")
|
| 159 |
)
|
| 160 |
-
print("[
|
| 161 |
|
| 162 |
def run_inference(
|
| 163 |
self,
|
|
@@ -172,106 +165,104 @@ class SeedVRServer:
|
|
| 172 |
progress: Optional[Callable] = None
|
| 173 |
) -> str:
|
| 174 |
"""
|
| 175 |
-
Executa o pipeline completo de upscaling de vídeo
|
| 176 |
"""
|
| 177 |
-
if progress: progress(0.01, "⌛ Inicializando...")
|
| 178 |
-
|
| 179 |
-
# --- 1. Extração de Frames ---
|
| 180 |
-
if progress: progress(0.05, "🎬 Extraindo frames do vídeo...")
|
| 181 |
-
frames_tensor, original_fps = extract_frames_from_video(file_path, debug)
|
| 182 |
|
| 183 |
-
# ---
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
return_queue = manager.Queue()
|
| 190 |
-
progress_queue = manager.Queue() if progress else None
|
| 191 |
-
|
| 192 |
-
shared_args = {
|
| 193 |
-
"model": model, "model_dir": str(self.CKPTS_ROOT), "preserve_vram": preserve_vram,
|
| 194 |
-
"debug": debug, "seed": seed, "resolution": resolution, "batch_size": batch_size
|
| 195 |
-
}
|
| 196 |
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
if
|
| 212 |
-
while not progress_queue.empty():
|
| 213 |
-
try:
|
| 214 |
-
p_idx, b_idx, b_total, msg = progress_queue.get_nowait()
|
| 215 |
-
if b_idx == -1: raise RuntimeError(f"Erro no Worker {p_idx}: {msg}")
|
| 216 |
-
if b_total > 0: worker_progress[p_idx] = b_idx / b_total
|
| 217 |
-
total_progress = sum(worker_progress) / num_devices
|
| 218 |
-
progress(0.1 + total_progress * 0.85, desc=f"GPU {p_idx+1}/{num_devices}: {msg}")
|
| 219 |
-
except queue.Empty: pass
|
| 220 |
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
results_np[proc_idx] = result
|
| 226 |
-
worker_progress[proc_idx] = 1.0
|
| 227 |
-
finished_workers += 1
|
| 228 |
-
except queue.Empty: pass
|
| 229 |
|
| 230 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
|
| 232 |
-
|
| 233 |
-
raise RuntimeError("Um ou mais workers falharam ao retornar um resultado.")
|
| 234 |
|
| 235 |
-
|
| 236 |
-
|
|
|
|
| 237 |
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
|
|
|
| 243 |
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
|
| 250 |
-
|
| 251 |
-
#
|
| 252 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 253 |
|
|
|
|
| 254 |
if __name__ == "__main__":
|
| 255 |
-
|
| 256 |
-
print("🚀 Executando o servidor SeedVR em modo autônomo...")
|
| 257 |
try:
|
| 258 |
server = SeedVRServer()
|
| 259 |
print("✅ Servidor inicializado com sucesso. Pronto para receber chamadas.")
|
| 260 |
-
# Exemplo de como chamar a inferência (requer um arquivo de vídeo):
|
| 261 |
-
# input_video = "caminho/para/seu/video.mp4"
|
| 262 |
-
# if os.path.exists(input_video):
|
| 263 |
-
# server.run_inference(
|
| 264 |
-
# file_path=input_video,
|
| 265 |
-
# seed=42,
|
| 266 |
-
# resolution=1072,
|
| 267 |
-
# batch_size=4,
|
| 268 |
-
# progress=lambda p, desc: print(f"Progresso: {p*100:.1f}% - {desc}")
|
| 269 |
-
# )
|
| 270 |
-
# else:
|
| 271 |
-
# print(f"Vídeo de teste não encontrado em '{input_video}'. Pulei a execução da inferência.")
|
| 272 |
except Exception as e:
|
| 273 |
-
print(f"❌ Falha ao inicializar o servidor: {e}")
|
| 274 |
-
import traceback
|
| 275 |
traceback.print_exc()
|
| 276 |
-
sys.exit(1)
|
| 277 |
-
|
|
|
|
| 9 |
from pathlib import Path
|
| 10 |
from typing import Optional, Callable
|
| 11 |
|
| 12 |
+
# --- 1. Import dos Módulos Compartilhados ---
|
| 13 |
+
# É crucial que estes imports venham antes dos imports pesados (torch, etc.)
|
| 14 |
+
# para que o ambiente de multiprocessing seja configurado corretamente.
|
| 15 |
|
| 16 |
+
try:
|
| 17 |
+
# Importa o gerenciador de GPUs que centraliza a lógica de alocação
|
| 18 |
+
from api.gpu_manager import gpu_manager
|
| 19 |
+
# Importa o serviço do LTX para podermos comandá-lo a liberar a VRAM
|
| 20 |
+
from api.ltx_server_refactored import video_generation_service
|
| 21 |
+
except ImportError:
|
| 22 |
+
print("ERRO FATAL: Não foi possível importar `gpu_manager` ou `video_generation_service`.")
|
| 23 |
+
print("Certifique-se de que os arquivos `gpu_manager.py` e `ltx_server_refactored.py` existem em `api/`.")
|
| 24 |
+
sys.exit(1)
|
| 25 |
|
| 26 |
+
|
| 27 |
+
# --- 2. Configuração de Ambiente e CUDA ---
|
| 28 |
if mp.get_start_method(allow_none=True) != 'spawn':
|
| 29 |
mp.set_start_method('spawn', force=True)
|
| 30 |
|
|
|
|
| 31 |
os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
|
| 32 |
|
| 33 |
+
# Adiciona o caminho do repositório SeedVR
|
| 34 |
SEEDVR_REPO_PATH = Path(os.getenv("SEEDVR_ROOT", "/data/SeedVR"))
|
| 35 |
if str(SEEDVR_REPO_PATH) not in sys.path:
|
| 36 |
sys.path.insert(0, str(SEEDVR_REPO_PATH))
|
| 37 |
|
| 38 |
+
# Imports pesados
|
| 39 |
import torch
|
| 40 |
import cv2
|
| 41 |
import numpy as np
|
| 42 |
from datetime import datetime
|
| 43 |
|
| 44 |
+
|
| 45 |
+
# --- 3. Funções Auxiliares de Processamento (Workers e I/O) ---
|
| 46 |
+
# (Estas funções não precisam de alteração)
|
| 47 |
|
| 48 |
def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load_cap=None):
|
| 49 |
+
if debug: print(f"🎬 [SeedVR] Extraindo frames de: {video_path}")
|
|
|
|
| 50 |
if not os.path.exists(video_path): raise FileNotFoundError(f"Arquivo de vídeo não encontrado: {video_path}")
|
|
|
|
| 51 |
cap = cv2.VideoCapture(video_path)
|
| 52 |
+
if not cap.isOpened(): raise ValueError(f"Não foi possível abrir o vídeo: {video_path}")
|
| 53 |
+
|
| 54 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 55 |
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
|
|
|
|
|
| 56 |
frames = []
|
| 57 |
frames_loaded = 0
|
| 58 |
for i in range(frame_count):
|
|
|
|
| 60 |
if not ret: break
|
| 61 |
if i < skip_first_frames: continue
|
| 62 |
if load_cap and frames_loaded >= load_cap: break
|
|
|
|
| 63 |
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 64 |
frames.append(frame.astype(np.float32) / 255.0)
|
| 65 |
frames_loaded += 1
|
| 66 |
cap.release()
|
| 67 |
+
if not frames: raise ValueError(f"Nenhum frame extraído de: {video_path}")
|
| 68 |
+
if debug: print(f"✅ [SeedVR] {len(frames)} frames extraídos com sucesso.")
|
|
|
|
| 69 |
return torch.from_numpy(np.stack(frames)).to(torch.float16), fps
|
| 70 |
|
| 71 |
def save_frames_to_video(frames_tensor, output_path, fps=30.0, debug=False):
|
| 72 |
+
if debug: print(f"💾 [SeedVR] Salvando {frames_tensor.shape[0]} frames em: {output_path}")
|
|
|
|
| 73 |
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
|
|
|
| 74 |
frames_np = (frames_tensor.cpu().numpy() * 255.0).astype(np.uint8)
|
| 75 |
T, H, W, _ = frames_np.shape
|
|
|
|
| 76 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 77 |
out = cv2.VideoWriter(output_path, fourcc, fps, (W, H))
|
| 78 |
+
if not out.isOpened(): raise ValueError(f"Não foi possível criar o vídeo: {output_path}")
|
|
|
|
| 79 |
for frame in frames_np:
|
| 80 |
out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
| 81 |
out.release()
|
| 82 |
+
if debug: print(f"✅ [SeedVR] Vídeo salvo com sucesso: {output_path}")
|
| 83 |
|
| 84 |
def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, progress_queue=None):
|
| 85 |
"""Processo filho (worker) que executa o upscaling em uma GPU dedicada."""
|
|
|
|
| 92 |
|
| 93 |
try:
|
| 94 |
frames_tensor = torch.from_numpy(frames_np).to(torch.float16)
|
|
|
|
| 95 |
callback = (lambda b, t, _, m: progress_queue.put((proc_idx, b, t, m))) if progress_queue else None
|
| 96 |
|
| 97 |
runner = configure_runner(shared_args["model"], shared_args["model_dir"], shared_args["preserve_vram"], shared_args["debug"])
|
|
|
|
| 104 |
return_queue.put((proc_idx, result_tensor.cpu().numpy()))
|
| 105 |
except Exception as e:
|
| 106 |
import traceback
|
| 107 |
+
error_msg = f"ERRO no worker {proc_idx} (GPU {device_id}): {e}\n{traceback.format_exc()}"
|
| 108 |
print(error_msg)
|
| 109 |
if progress_queue: progress_queue.put((proc_idx, -1, -1, error_msg))
|
| 110 |
return_queue.put((proc_idx, error_msg))
|
| 111 |
|
| 112 |
+
# --- 4. CLASSE DO SERVIDOR PRINCIPAL ---
|
|
|
|
|
|
|
| 113 |
|
| 114 |
class SeedVRServer:
|
| 115 |
def __init__(self, **kwargs):
|
|
|
|
| 117 |
print("⚙️ SeedVRServer inicializando...")
|
| 118 |
self.SEEDVR_ROOT = SEEDVR_REPO_PATH
|
| 119 |
self.CKPTS_ROOT = Path("/data/seedvr_models_fp16")
|
| 120 |
+
self.OUTPUT_ROOT = Path(os.getenv("OUTPUT_ROOT", "/app/output"))
|
|
|
|
| 121 |
self.HF_HOME_CACHE = Path(os.getenv("HF_HOME", "/data/.cache/huggingface"))
|
| 122 |
self.REPO_URL = os.getenv("SEEDVR_GIT_URL", "https://github.com/numz/ComfyUI-SeedVR2_VideoUpscaler")
|
| 123 |
+
|
| 124 |
+
# OBTÉM AS GPUS ALOCADAS PELO GERENCIADOR CENTRAL
|
| 125 |
+
self.device_list = gpu_manager.get_seedvr_devices()
|
| 126 |
+
self.num_gpus = len(self.device_list)
|
| 127 |
+
print(f"[SeedVR] Alocado para usar {self.num_gpus} GPU(s): {self.device_list}")
|
| 128 |
|
| 129 |
+
for p in [self.CKPTS_ROOT, self.OUTPUT_ROOT, self.HF_HOME_CACHE]:
|
| 130 |
p.mkdir(parents=True, exist_ok=True)
|
| 131 |
|
| 132 |
self.setup_dependencies()
|
|
|
|
| 134 |
|
| 135 |
def setup_dependencies(self):
|
| 136 |
"""Garante que o repositório e os modelos estão presentes."""
|
|
|
|
| 137 |
if not (self.SEEDVR_ROOT / ".git").exists():
|
| 138 |
+
print(f"[SeedVR] Clonando repositório para {self.SEEDVR_ROOT}...")
|
| 139 |
subprocess.run(["git", "clone", "--depth", "1", self.REPO_URL, str(self.SEEDVR_ROOT)], check=True)
|
|
|
|
|
|
|
| 140 |
|
|
|
|
|
|
|
| 141 |
model_files = {
|
| 142 |
"seedvr2_ema_7b_sharp_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses",
|
| 143 |
"ema_vae_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses"
|
|
|
|
| 150 |
repo_id=repo_id, filename=filename, local_dir=str(self.CKPTS_ROOT),
|
| 151 |
cache_dir=str(self.HF_HOME_CACHE), token=os.getenv("HF_TOKEN")
|
| 152 |
)
|
| 153 |
+
print("[SeedVR] Checkpoints verificados.")
|
| 154 |
|
| 155 |
def run_inference(
|
| 156 |
self,
|
|
|
|
| 165 |
progress: Optional[Callable] = None
|
| 166 |
) -> str:
|
| 167 |
"""
|
| 168 |
+
Executa o pipeline completo de upscaling de vídeo, gerenciando a memória da GPU.
|
| 169 |
"""
|
| 170 |
+
if progress: progress(0.01, "⌛ Inicializando inferência SeedVR...")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
|
| 172 |
+
# --- NÓ 1: GERENCIAMENTO DE MEMÓRIA (SWAP) ---
|
| 173 |
+
if gpu_manager.requires_memory_swap():
|
| 174 |
+
print("[SWAP] SeedVR precisa da GPU. Movendo LTX para a CPU...")
|
| 175 |
+
if progress: progress(0.02, "🔄 Liberando VRAM para o SeedVR...")
|
| 176 |
+
video_generation_service.move_to_cpu()
|
| 177 |
+
print("[SWAP] LTX movido para a CPU. VRAM liberada.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
|
| 179 |
+
try:
|
| 180 |
+
# --- NÓ 2: EXTRAÇÃO DE FRAMES ---
|
| 181 |
+
if progress: progress(0.05, "🎬 Extraindo frames do vídeo...")
|
| 182 |
+
frames_tensor, original_fps = extract_frames_from_video(file_path, debug)
|
| 183 |
+
|
| 184 |
+
# --- NÓ 3: DIVISÃO PARA MULTI-GPU ---
|
| 185 |
+
if self.num_gpus == 0:
|
| 186 |
+
raise RuntimeError("SeedVR requer pelo menos 1 GPU alocada, mas não encontrou nenhuma.")
|
| 187 |
+
|
| 188 |
+
print(f"[SeedVR] Dividindo {frames_tensor.shape[0]} frames em {self.num_gpus} chunks para processamento paralelo.")
|
| 189 |
+
chunks = torch.chunk(frames_tensor, self.num_gpus, dim=0)
|
| 190 |
+
|
| 191 |
+
manager = mp.Manager()
|
| 192 |
+
return_queue = manager.Queue()
|
| 193 |
+
progress_queue = manager.Queue() if progress else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 194 |
|
| 195 |
+
shared_args = {
|
| 196 |
+
"model": model, "model_dir": str(self.CKPTS_ROOT), "preserve_vram": preserve_vram,
|
| 197 |
+
"debug": debug, "seed": seed, "resolution": resolution, "batch_size": batch_size
|
| 198 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
|
| 200 |
+
# --- NÓ 4: INÍCIO DOS WORKERS ---
|
| 201 |
+
if progress: progress(0.1, f"🚀 Iniciando geração em {self.num_gpus} GPU(s)...")
|
| 202 |
+
workers = []
|
| 203 |
+
for idx, device_id in enumerate(self.device_list):
|
| 204 |
+
p = mp.Process(target=_worker_process, args=(idx, device_id, chunks[idx].cpu().numpy(), shared_args, return_queue, progress_queue))
|
| 205 |
+
p.start()
|
| 206 |
+
workers.append(p)
|
| 207 |
+
|
| 208 |
+
# --- NÓ 5: COLETA DE RESULTADOS E MONITORAMENTO ---
|
| 209 |
+
results_np = [None] * self.num_gpus
|
| 210 |
+
finished_workers = 0
|
| 211 |
+
worker_progress = [0.0] * self.num_gpus
|
| 212 |
+
while finished_workers < self.num_gpus:
|
| 213 |
+
if progress_queue:
|
| 214 |
+
while not progress_queue.empty():
|
| 215 |
+
try:
|
| 216 |
+
p_idx, b_idx, b_total, msg = progress_queue.get_nowait()
|
| 217 |
+
if b_idx == -1: raise RuntimeError(f"Erro no Worker {p_idx}: {msg}")
|
| 218 |
+
if b_total > 0: worker_progress[p_idx] = b_idx / b_total
|
| 219 |
+
total_progress = sum(worker_progress) / self.num_gpus
|
| 220 |
+
progress(0.1 + total_progress * 0.85, desc=f"GPU {p_idx+1}/{self.num_gpus}: {msg}")
|
| 221 |
+
except queue.Empty: pass
|
| 222 |
+
|
| 223 |
+
try:
|
| 224 |
+
proc_idx, result = return_queue.get(timeout=0.2)
|
| 225 |
+
if isinstance(result, str): raise RuntimeError(f"Worker {proc_idx} falhou: {result}")
|
| 226 |
+
results_np[proc_idx] = result
|
| 227 |
+
worker_progress[proc_idx] = 1.0
|
| 228 |
+
finished_workers += 1
|
| 229 |
+
except queue.Empty: pass
|
| 230 |
|
| 231 |
+
for p in workers: p.join()
|
|
|
|
| 232 |
|
| 233 |
+
# --- NÓ 6: FINALIZAÇÃO ---
|
| 234 |
+
if any(r is None for r in results_np):
|
| 235 |
+
raise RuntimeError("Um ou mais workers falharam ao retornar um resultado.")
|
| 236 |
|
| 237 |
+
result_tensor = torch.from_numpy(np.concatenate(results_np, axis=0)).to(torch.float16)
|
| 238 |
+
if progress: progress(0.95, "💾 Salvando o vídeo final...")
|
| 239 |
+
|
| 240 |
+
out_dir = self.OUTPUT_ROOT / f"run_{int(time.time())}_{Path(file_path).stem}"
|
| 241 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 242 |
+
output_filepath = out_dir / f"result_{Path(file_path).stem}.mp4"
|
| 243 |
|
| 244 |
+
final_fps = fps if fps and fps > 0 else original_fps
|
| 245 |
+
save_frames_to_video(result_tensor, str(output_filepath), final_fps, debug)
|
| 246 |
+
|
| 247 |
+
print(f"✅ Vídeo salvo com sucesso em: {output_filepath}")
|
| 248 |
+
return str(output_filepath)
|
| 249 |
|
| 250 |
+
finally:
|
| 251 |
+
# --- NÓ 7: RESTAURAÇÃO DE MEMÓRIA (SWAP BACK) ---
|
| 252 |
+
if gpu_manager.requires_memory_swap():
|
| 253 |
+
print("[SWAP] Inferência do SeedVR concluída. Movendo LTX de volta para a GPU...")
|
| 254 |
+
if progress: progress(0.99, "🔄 Restaurando o ambiente LTX...")
|
| 255 |
+
ltx_device = gpu_manager.get_ltx_device()
|
| 256 |
+
video_generation_service.move_to_device(ltx_device)
|
| 257 |
+
print(f"[SWAP] LTX de volta em {ltx_device}.")
|
| 258 |
|
| 259 |
+
# --- PONTO DE ENTRADA ---
|
| 260 |
if __name__ == "__main__":
|
| 261 |
+
print("🚀 Executando o servidor SeedVR em modo autônomo para inicialização...")
|
|
|
|
| 262 |
try:
|
| 263 |
server = SeedVRServer()
|
| 264 |
print("✅ Servidor inicializado com sucesso. Pronto para receber chamadas.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 265 |
except Exception as e:
|
| 266 |
+
print(f"❌ Falha ao inicializar o servidor SeedVR: {e}")
|
|
|
|
| 267 |
traceback.print_exc()
|
| 268 |
+
sys.exit(1)
|
|
|