Eueuiaa commited on
Commit
99efd77
·
verified ·
1 Parent(s): 6658b64

Update api/ltx_server_refactored.py

Browse files
Files changed (1) hide show
  1. api/ltx_server_refactored.py +22 -23
api/ltx_server_refactored.py CHANGED
@@ -283,7 +283,7 @@ class VideoService:
283
  def _generate_single_chunk_low(
284
  self, prompt, negative_prompt,
285
  height, width, num_frames, guidance_scale,
286
- seed, itens_conditions=None,
287
  ltx_configs_override=None):
288
  """
289
  [NÓ DE GERAÇÃO]
@@ -333,7 +333,7 @@ class VideoService:
333
  first_pass_kwargs = {
334
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
335
  "num_frames": num_frames, "frame_rate": 24, "generator": generator, "output_type": "latent",
336
- #"conditioning_items": itens_conditions
337
  **first_pass_config
338
  }
339
 
@@ -371,7 +371,7 @@ class VideoService:
371
  def generate_narrative_low(
372
  self, prompt: str, negative_prompt,
373
  height, width, duration, guidance_scale,
374
- seed, initial_image_conditions=None, overlap_frames: int = 8,
375
  ltx_configs_override: dict = None):
376
  """
377
  [ORQUESTRADOR NARRATIVO]
@@ -443,24 +443,26 @@ class VideoService:
443
  latentes_bruto_r = self._generate_single_chunk_low(
444
  prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
445
  num_frames=frames_per_chunk, guidance_scale=guidance_scale, seed=used_seed + i,
446
- itens_conditions=None,
447
  ltx_configs_override=ltx_configs_override
448
  )
449
 
450
 
451
-
452
-
453
-
454
  print(f"[DEBUG] generate_narrative_low.frames_per_chunk: {frames_per_chunk}")
455
-
456
-
457
  log_tensor_info(latentes_bruto_r, f"latentes_bruto_r recebidk: {i}...'")
458
 
459
  #latent_path_bufer = load_tensor(latent_path)
460
  #final_latents = torch.cat(lista_tensores, dim=2).to(self.device)
461
 
462
-
463
 
 
 
 
 
 
 
 
 
464
  #poda inicio overlap
465
  if i > 0 and poda_latents_num > 0 and latentes_bruto_r.shape[2]>poda_latents_num:
466
  latentes_bruto = latentes_bruto_r[:, :, poda_latents_num:, :, :].clone()
@@ -517,7 +519,7 @@ class VideoService:
517
  def generate_single_low(
518
  self, prompt: str, negative_prompt,
519
  height, width, duration, guidance_scale,
520
- seed, initial_image_conditions=None,
521
  ltx_configs_override: dict = None):
522
  """
523
  [ORQUESTRADOR SIMPLES]
@@ -537,28 +539,25 @@ class VideoService:
537
  results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
538
 
539
  # Chama a função de geração de chunk único para fazer todo o trabalho
540
- latent_path = self._generate_single_chunk_low(
541
  prompt=prompt, negative_prompt=negative_prompt, height=height, width=width,
542
  num_frames=total_actual_frames, guidance_scale=guidance_scale, seed=used_seed,
543
- itens_conditions=initial_image_conditions,
544
  ltx_configs_override=ltx_configs_override
545
  )
546
 
547
- print(f"[DEBUG] generate_single_low.total_actual_frames: {total_actual_frames}")
548
-
549
-
550
- final_latents = torch.load(latent_path).to(self.device)
551
  print("\n--- Finalizando Geração Simples: Salvando e decodificando ---")
552
  log_tensor_info(final_latents, "Tensor de Latentes Final")
553
 
554
  try:
555
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
556
- pixel_tensor = vae_manager_singleton.decode(final_latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
557
- video_path = self._save_and_log_video(pixel_tensor, "single_video", FPS, temp_dir, results_dir, used_seed)
558
- latents_cpu = final_latents.detach().to("cpu")
559
- tensor_path = os.path.join(results_dir, f"latents_single.pt")
560
- torch.save(latents_cpu, tensor_path)
561
- return video_path, tensor_path, used_seed
 
562
  except Exception as e:
563
  print("-" * 20 + " ERRO: generate_single_low --------------------")
564
  traceback.print_exc()
 
283
  def _generate_single_chunk_low(
284
  self, prompt, negative_prompt,
285
  height, width, num_frames, guidance_scale,
286
+ seed, itens_conditions_itens=None,
287
  ltx_configs_override=None):
288
  """
289
  [NÓ DE GERAÇÃO]
 
333
  first_pass_kwargs = {
334
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
335
  "num_frames": num_frames, "frame_rate": 24, "generator": generator, "output_type": "latent",
336
+ "conditioning_items": itens_conditions_itens
337
  **first_pass_config
338
  }
339
 
 
371
  def generate_narrative_low(
372
  self, prompt: str, negative_prompt,
373
  height, width, duration, guidance_scale,
374
+ seed, initial_conditions=None, overlap_frames: int = 8,
375
  ltx_configs_override: dict = None):
376
  """
377
  [ORQUESTRADOR NARRATIVO]
 
443
  latentes_bruto_r = self._generate_single_chunk_low(
444
  prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
445
  num_frames=frames_per_chunk, guidance_scale=guidance_scale, seed=used_seed + i,
446
+ itens_conditions_itens=None,
447
  ltx_configs_override=ltx_configs_override
448
  )
449
 
450
 
 
 
 
451
  print(f"[DEBUG] generate_narrative_low.frames_per_chunk: {frames_per_chunk}")
 
 
452
  log_tensor_info(latentes_bruto_r, f"latentes_bruto_r recebidk: {i}...'")
453
 
454
  #latent_path_bufer = load_tensor(latent_path)
455
  #final_latents = torch.cat(lista_tensores, dim=2).to(self.device)
456
 
 
457
 
458
+ if i== 0:
459
+ initial_conditions = None
460
+ else:
461
+ initial_conditions = initial_conditions
462
+
463
+
464
+
465
+
466
  #poda inicio overlap
467
  if i > 0 and poda_latents_num > 0 and latentes_bruto_r.shape[2]>poda_latents_num:
468
  latentes_bruto = latentes_bruto_r[:, :, poda_latents_num:, :, :].clone()
 
519
  def generate_single_low(
520
  self, prompt: str, negative_prompt,
521
  height, width, duration, guidance_scale,
522
+ seed, initial_conditions=None,
523
  ltx_configs_override: dict = None):
524
  """
525
  [ORQUESTRADOR SIMPLES]
 
539
  results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
540
 
541
  # Chama a função de geração de chunk único para fazer todo o trabalho
542
+ final_latents = self._generate_single_chunk_low(
543
  prompt=prompt, negative_prompt=negative_prompt, height=height, width=width,
544
  num_frames=total_actual_frames, guidance_scale=guidance_scale, seed=used_seed,
545
+ itens_conditions_itens=initial_conditions,
546
  ltx_configs_override=ltx_configs_override
547
  )
548
 
 
 
 
 
549
  print("\n--- Finalizando Geração Simples: Salvando e decodificando ---")
550
  log_tensor_info(final_latents, "Tensor de Latentes Final")
551
 
552
  try:
553
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
554
+ pixel_tensor = vae_manager_singleton.decode(final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05)))
555
+ pixel_tensor_cpu = pixel_tensor.detach().to("cpu")
556
+ video_path = self._save_and_log_video(pixel_tensor_cpu, "narrative_video", FPS, temp_dir, results_dir, used_seed)
557
+ final_latents_cpu = final_latents.detach().to("cpu")
558
+ final_latents_patch = os.path.join(results_dir, f"latents_low_fim.pt")
559
+ torch.save(final_latents_cpu, final_latents_patch)
560
+ return video_path, final_latents_patch, used_seed
561
  except Exception as e:
562
  print("-" * 20 + " ERRO: generate_single_low --------------------")
563
  traceback.print_exc()