eeuuia commited on
Commit
78b5cca
·
verified ·
1 Parent(s): f8baf05

Update api/ltx/ltx_aduc_pipeline.py

Browse files
Files changed (1) hide show
  1. api/ltx/ltx_aduc_pipeline.py +5 -5
api/ltx/ltx_aduc_pipeline.py CHANGED
@@ -552,7 +552,7 @@ def generate_narrative_low(self, prompt: str, **kwargs) -> Tuple[Optional[str],
552
 
553
  # (O resto das funções de _finalize_generation, _save_and_log_video, etc., permanecem as mesmas)
554
  @log_function_io
555
- def _finalize_generation(self, final_latents_cpu: torch.Tensor, base_filename: str, seed: int) -> Tuple[str, str]:
556
  final_latents_path = RESULTS_DIR / f"latents_{base_filename}_{seed}.pt"
557
  torch.save(final_latents_cpu, final_latents_path)
558
  logging.info(f"Final latents saved to: {final_latents_path}")
@@ -577,19 +577,19 @@ def generate_narrative_low(self, prompt: str, **kwargs) -> Tuple[Optional[str],
577
  logging.info(f"Video saved successfully to: {final_path}")
578
  return final_path
579
 
580
- def _apply_precision_policy(self):
581
  precision = str(self.config.get("precision", "bfloat16")).lower()
582
  if precision in ["float8_e4m3fn", "bfloat16"]: self.runtime_autocast_dtype = torch.bfloat16
583
  elif precision == "mixed_precision": self.runtime_autocast_dtype = torch.float16
584
  else: self.runtime_autocast_dtype = torch.float32
585
  logging.info(f"Runtime precision policy set for autocast: {self.runtime_autocast_dtype}")
586
 
587
- def _align(self, dim: int, alignment: int = FRAMES_ALIGNMENT, alignment_rule: str = 'default') -> int:
588
  if alignment_rule == 'n*8+1':
589
  return ((dim - 1) // alignment) * alignment + 1
590
  return ((dim - 1) // alignment + 1) * alignment
591
 
592
- def _calculate_aligned_frames(self, duration_s: float, min_frames: int = 1) -> int:
593
  num_frames = int(round(duration_s * DEFAULT_FPS))
594
  aligned_frames = self._align(num_frames, alignment=FRAMES_ALIGNMENT)
595
  return max(aligned_frames, min_frames)
@@ -598,7 +598,7 @@ def generate_narrative_low(self, prompt: str, **kwargs) -> Tuple[Optional[str],
598
  return random.randint(0, 2**32 - 1)
599
 
600
 
601
- def _finalize_generation(self, latents_paths: List[Path], base_filename: str, seed: int) -> Tuple[str, str, int]:
602
  """Loads latents, concatenates, decodes to video, and saves both."""
603
  logging.info("Finalizing generation: decoding latents to video.")
604
  all_tensors_cpu = [torch.load(p) for p in latents_paths]
 
552
 
553
  # (O resto das funções de _finalize_generation, _save_and_log_video, etc., permanecem as mesmas)
554
  @log_function_io
555
+ def _finalize_generation1(self, final_latents_cpu: torch.Tensor, base_filename: str, seed: int) -> Tuple[str, str]:
556
  final_latents_path = RESULTS_DIR / f"latents_{base_filename}_{seed}.pt"
557
  torch.save(final_latents_cpu, final_latents_path)
558
  logging.info(f"Final latents saved to: {final_latents_path}")
 
577
  logging.info(f"Video saved successfully to: {final_path}")
578
  return final_path
579
 
580
+ def _apply_precision_policy1(self):
581
  precision = str(self.config.get("precision", "bfloat16")).lower()
582
  if precision in ["float8_e4m3fn", "bfloat16"]: self.runtime_autocast_dtype = torch.bfloat16
583
  elif precision == "mixed_precision": self.runtime_autocast_dtype = torch.float16
584
  else: self.runtime_autocast_dtype = torch.float32
585
  logging.info(f"Runtime precision policy set for autocast: {self.runtime_autocast_dtype}")
586
 
587
+ def _align1(self, dim: int, alignment: int = FRAMES_ALIGNMENT, alignment_rule: str = 'default') -> int:
588
  if alignment_rule == 'n*8+1':
589
  return ((dim - 1) // alignment) * alignment + 1
590
  return ((dim - 1) // alignment + 1) * alignment
591
 
592
+ def _calculate_aligned_frames1(self, duration_s: float, min_frames: int = 1) -> int:
593
  num_frames = int(round(duration_s * DEFAULT_FPS))
594
  aligned_frames = self._align(num_frames, alignment=FRAMES_ALIGNMENT)
595
  return max(aligned_frames, min_frames)
 
598
  return random.randint(0, 2**32 - 1)
599
 
600
 
601
+ def _finalize_generation(self, latents_paths: List[Path], base_filename: str, seed: int) -> Tuple[str, str, int]:
602
  """Loads latents, concatenates, decodes to video, and saves both."""
603
  logging.info("Finalizing generation: decoding latents to video.")
604
  all_tensors_cpu = [torch.load(p) for p in latents_paths]