EuuIia commited on
Commit
c884640
·
verified ·
1 Parent(s): 690d65c

Update LTX-Video/ltx_video/pipelines/pipeline_ltx_video.py

Browse files
LTX-Video/ltx_video/pipelines/pipeline_ltx_video.py CHANGED
@@ -977,7 +977,7 @@ class LTXVideoPipeline(DiffusionPipeline):
977
  print(f"[2ADUC DEBUG LTX *causal_video_autoencoder.py*]=======")
978
  print(f"skip_initial_inference_steps {skip_initial_inference_steps}")
979
  print(f"skip_final_inference_steps {skip_final_inference_steps}")
980
- #print(f"latents {latents.shape}")
981
 
982
  if self.allowed_inference_steps is not None:
983
  for timestep in [round(x, 4) for x in timesteps.tolist()]:
@@ -1121,7 +1121,7 @@ class LTXVideoPipeline(DiffusionPipeline):
1121
  print(f"[5ADUC DEBUG LTX *causal_video_autoencoder.py*]=======")
1122
  print(f"skip_initial_inference_steps {skip_initial_inference_steps}")
1123
  print(f"skip_final_inference_steps {skip_final_inference_steps}")
1124
- #print(f"latents {latents.shape}")
1125
 
1126
 
1127
  # Update the latents with the conditioning items and patchify them into (b, n, c)
@@ -1143,7 +1143,7 @@ class LTXVideoPipeline(DiffusionPipeline):
1143
  print(f"[6ADUC DEBUG LTX *causal_video_autoencoder.py*]=======")
1144
  print(f"skip_initial_inference_steps {skip_initial_inference_steps}")
1145
  print(f"skip_final_inference_steps {skip_final_inference_steps}")
1146
- #print(f"latents {latents.shape}")
1147
  # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1148
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1149
 
@@ -1151,7 +1151,7 @@ class LTXVideoPipeline(DiffusionPipeline):
1151
  print(f"[7ADUC DEBUG LTX *causal_video_autoencoder.py*]=======")
1152
  print(f"skip_initial_inference_steps {skip_initial_inference_steps}")
1153
  print(f"skip_final_inference_steps {skip_final_inference_steps}")
1154
- #print(f"latents {latents.shape}")
1155
  # 7. Denoising loop
1156
  num_warmup_steps = max(
1157
  len(timesteps) - num_inference_steps * self.scheduler.order, 0
@@ -1347,9 +1347,9 @@ class LTXVideoPipeline(DiffusionPipeline):
1347
  print(f"[8ADUC DEBUG LTX *causal_video_autoencoder.py*]=======")
1348
  print(f"skip_initial_inference_steps {skip_initial_inference_steps}")
1349
  print(f"skip_final_inference_steps {skip_final_inference_steps}")
1350
- #print(f"latents {latents.shape}")
1351
 
1352
- if offload_to_cpu:
1353
  self.transformer = self.transformer.cpu()
1354
  if self._execution_device == "cuda":
1355
  torch.cuda.empty_cache()
 
977
  print(f"[2ADUC DEBUG LTX *causal_video_autoencoder.py*]=======")
978
  print(f"skip_initial_inference_steps {skip_initial_inference_steps}")
979
  print(f"skip_final_inference_steps {skip_final_inference_steps}")
980
+ print(f"latents {latents.shape}")
981
 
982
  if self.allowed_inference_steps is not None:
983
  for timestep in [round(x, 4) for x in timesteps.tolist()]:
 
1121
  print(f"[5ADUC DEBUG LTX *causal_video_autoencoder.py*]=======")
1122
  print(f"skip_initial_inference_steps {skip_initial_inference_steps}")
1123
  print(f"skip_final_inference_steps {skip_final_inference_steps}")
1124
+ print(f"latents {latents.shape}")
1125
 
1126
 
1127
  # Update the latents with the conditioning items and patchify them into (b, n, c)
 
1143
  print(f"[6ADUC DEBUG LTX *causal_video_autoencoder.py*]=======")
1144
  print(f"skip_initial_inference_steps {skip_initial_inference_steps}")
1145
  print(f"skip_final_inference_steps {skip_final_inference_steps}")
1146
+ print(f"latents {latents.shape}")
1147
  # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1148
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1149
 
 
1151
  print(f"[7ADUC DEBUG LTX *causal_video_autoencoder.py*]=======")
1152
  print(f"skip_initial_inference_steps {skip_initial_inference_steps}")
1153
  print(f"skip_final_inference_steps {skip_final_inference_steps}")
1154
+ print(f"latents {latents.shape}")
1155
  # 7. Denoising loop
1156
  num_warmup_steps = max(
1157
  len(timesteps) - num_inference_steps * self.scheduler.order, 0
 
1347
  print(f"[8ADUC DEBUG LTX *causal_video_autoencoder.py*]=======")
1348
  print(f"skip_initial_inference_steps {skip_initial_inference_steps}")
1349
  print(f"skip_final_inference_steps {skip_final_inference_steps}")
1350
+ print(f"latents {latents.shape}")
1351
 
1352
+ if offload_to_cpu:
1353
  self.transformer = self.transformer.cpu()
1354
  if self._execution_device == "cuda":
1355
  torch.cuda.empty_cache()