Eueuiaa commited on
Commit
cedf791
·
verified ·
1 Parent(s): e371fb6

Update api/ltx_server.py

Browse files
Files changed (1) hide show
  1. api/ltx_server.py +2 -4
api/ltx_server.py CHANGED
@@ -486,9 +486,7 @@ class VideoService:
486
  print(f"[DEBUG] Aplicando política de precisão: {prec}")
487
  if prec == "float8_e4m3fn":
488
  self.runtime_autocast_dtype = torch.bfloat16
489
-
490
- LTXV_FORCE_BF16_ON_FP8 = 1
491
- force_promote = 1 #os.getenv("LTXV_FORCE_BF16_ON_FP8", "0") == "1"
492
  print(f"[DEBUG] FP8 detectado. force_promote={force_promote}")
493
  if force_promote and hasattr(torch, "float8_e4m3fn"):
494
  try:
@@ -791,7 +789,7 @@ class VideoService:
791
  "mixed_precision": (self.config["precision"] == "mixed_precision"),
792
  "offload_to_cpu": False,
793
  "enhance_prompt": False,
794
- "skip_layer_strategy": None #SkipLayerStrategy.AttentionValues,
795
  }
796
  print(f"[DEBUG] output_type={call_kwargs['output_type']} skip_layer_strategy={call_kwargs['skip_layer_strategy']}")
797
 
 
486
  print(f"[DEBUG] Aplicando política de precisão: {prec}")
487
  if prec == "float8_e4m3fn":
488
  self.runtime_autocast_dtype = torch.bfloat16
489
+ force_promote = os.getenv("LTXV_FORCE_BF16_ON_FP8", "0") == "1"
 
 
490
  print(f"[DEBUG] FP8 detectado. force_promote={force_promote}")
491
  if force_promote and hasattr(torch, "float8_e4m3fn"):
492
  try:
 
789
  "mixed_precision": (self.config["precision"] == "mixed_precision"),
790
  "offload_to_cpu": False,
791
  "enhance_prompt": False,
792
+ "skip_layer_strategy": SkipLayerStrategy.AttentionValues,
793
  }
794
  print(f"[DEBUG] output_type={call_kwargs['output_type']} skip_layer_strategy={call_kwargs['skip_layer_strategy']}")
795