Eueuiaa commited on
Commit
5a3bb22
·
verified ·
1 Parent(s): 36ce571

Update api/ltx_server_refactored.py

Browse files
Files changed (1) hide show
  1. api/ltx_server_refactored.py +28 -28
api/ltx_server_refactored.py CHANGED
@@ -303,17 +303,17 @@ class VideoService:
303
  self.finalize(keep_paths=[])
304
 
305
  def generate_upscale_denoise(self, latents_path, prompt, negative_prompt, guidance_scale, seed):
306
- used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
307
- seed_everething(used_seed)
308
- temp_dir = tempfile.mkdtemp(prefix="ltxv_up_"); self._register_tmp_dir(temp_dir)
309
- results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
310
- latents_low = torch.load(latents_path).to(self.device)
311
  try:
 
 
 
 
 
312
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
313
  upsampled_latents = self._upsample_latents_internal(latents_low)
314
  upsampled_latents = adain_filter_latent(latents=upsampled_latents, reference_latents=latents_low)
315
  del latents_low; torch.cuda.empty_cache()
316
-
317
  # --- LÓGICA DE DIVISÃO SIMPLES COM OVERLAP ---
318
  total_frames = upsampled_latents.shape[2]
319
  # Garante que mid_point seja pelo menos 1 para evitar um segundo chunk vazio se houver poucos frames
@@ -321,42 +321,42 @@ class VideoService:
321
  chunk1 = upsampled_latents[:, :, :mid_point, :, :]
322
  # O segundo chunk começa um frame antes para criar o overlap
323
  chunk2 = upsampled_latents[:, :, mid_point - 1:, :, :]
324
-
325
  final_latents_list = []
326
  for i, chunk in enumerate([chunk1, chunk2]):
327
- if chunk.shape[2] <= 1: continue # Pula chunks inválidos ou vazios
328
- second_pass_height = chunk.shape[3] * self.pipeline.vae_scale_factor
329
- second_pass_width = chunk.shape[4] * self.pipeline.vae_scale_factor
330
- second_pass_kwargs = {
331
- "prompt": prompt, "negative_prompt": negative_prompt, "height": second_pass_height, "width": second_pass_width,
332
- "num_frames": chunk.shape[2], "latents": chunk, "guidance_scale": float(guidance_scale),
333
- "output_type": "latent", "generator": torch.Generator(device=self.device).manual_seed(used_seed),
334
- **(self.config.get("second_pass", {}))
335
- }
336
- refined_chunk = self.pipeline(**second_pass_kwargs).images
337
- # Remove o overlap do primeiro chunk refinado antes de juntar
338
  if i == 0:
339
  final_latents_list.append(refined_chunk[:, :, :-1, :, :])
340
  else:
341
  final_latents_list.append(refined_chunk)
342
-
343
  final_latents = torch.cat(final_latents_list, dim=2)
344
  log_tensor_info(final_latents, "Latentes Upscaled/Refinados Finais")
345
-
346
- latents_cpu = final_latents.detach().to("cpu")
347
- tensor_path = os.path.join(results_dir, f"latents_refined_{used_seed}.pt")
348
- torch.save(latents_cpu, tensor_path)
349
- pixel_tensor = vae_manager_singleton.decode(final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05)))
350
- video_path = self._save_and_log_video(pixel_tensor, "refined_video", 24.0, temp_dir, results_dir, used_seed)
351
- return video_path, tensor_path
352
- except Exception as e:
353
  pass
354
  finally:
355
  torch.cuda.empty_cache()
356
  torch.cuda.ipc_collect()
357
  self.finalize(keep_paths=[])
358
 
359
- def encode_mp4(self, latents_path: str, fps: int = 24):
360
  latents = torch.load(latents_path)
361
  seed = random.randint(0, 99999)
362
  temp_dir = tempfile.mkdtemp(prefix="ltxv_enc_"); self._register_tmp_dir(temp_dir)
 
303
  self.finalize(keep_paths=[])
304
 
305
  def generate_upscale_denoise(self, latents_path, prompt, negative_prompt, guidance_scale, seed):
 
 
 
 
 
306
  try:
307
+ used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
308
+ seed_everething(used_seed)
309
+ temp_dir = tempfile.mkdtemp(prefix="ltxv_up_"); self._register_tmp_dir(temp_dir)
310
+ results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
311
+ latents_low = torch.load(latents_path).to(self.device)
312
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
313
  upsampled_latents = self._upsample_latents_internal(latents_low)
314
  upsampled_latents = adain_filter_latent(latents=upsampled_latents, reference_latents=latents_low)
315
  del latents_low; torch.cuda.empty_cache()
316
+
317
  # --- LÓGICA DE DIVISÃO SIMPLES COM OVERLAP ---
318
  total_frames = upsampled_latents.shape[2]
319
  # Garante que mid_point seja pelo menos 1 para evitar um segundo chunk vazio se houver poucos frames
 
321
  chunk1 = upsampled_latents[:, :, :mid_point, :, :]
322
  # O segundo chunk começa um frame antes para criar o overlap
323
  chunk2 = upsampled_latents[:, :, mid_point - 1:, :, :]
324
+
325
  final_latents_list = []
326
  for i, chunk in enumerate([chunk1, chunk2]):
327
+ if chunk.shape[2] <= 1: continue # Pula chunks inválidos ou vazios
328
+ second_pass_height = chunk.shape[3] * self.pipeline.vae_scale_factor
329
+ second_pass_width = chunk.shape[4] * self.pipeline.vae_scale_factor
330
+ second_pass_kwargs = {
331
+ "prompt": prompt, "negative_prompt": negative_prompt, "height": second_pass_height, "width": second_pass_width,
332
+ "num_frames": chunk.shape[2], "latents": chunk, "guidance_scale": float(guidance_scale),
333
+ "output_type": "latent", "generator": torch.Generator(device=self.device).manual_seed(used_seed),
334
+ **(self.config.get("second_pass", {}))
335
+ }
336
+ refined_chunk = self.pipeline(**second_pass_kwargs).images
337
+ # Remove o overlap do primeiro chunk refinado antes de juntar
338
  if i == 0:
339
  final_latents_list.append(refined_chunk[:, :, :-1, :, :])
340
  else:
341
  final_latents_list.append(refined_chunk)
342
+
343
  final_latents = torch.cat(final_latents_list, dim=2)
344
  log_tensor_info(final_latents, "Latentes Upscaled/Refinados Finais")
345
+
346
+ latents_cpu = final_latents.detach().to("cpu")
347
+ tensor_path = os.path.join(results_dir, f"latents_refined_{used_seed}.pt")
348
+ torch.save(latents_cpu, tensor_path)
349
+ pixel_tensor = vae_manager_singleton.decode(final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05)))
350
+ video_path = self._save_and_log_video(pixel_tensor, "refined_video", 24.0, temp_dir, results_dir, used_seed)
351
+ return video_path, tensor_path except Exception as e:
352
+ except Exception as e:
353
  pass
354
  finally:
355
  torch.cuda.empty_cache()
356
  torch.cuda.ipc_collect()
357
  self.finalize(keep_paths=[])
358
 
359
+ def encode_mp4(self, latents_path: str, fps: int = 24):
360
  latents = torch.load(latents_path)
361
  seed = random.randint(0, 99999)
362
  temp_dir = tempfile.mkdtemp(prefix="ltxv_enc_"); self._register_tmp_dir(temp_dir)