Spaces:
Paused
Paused
Update api/ltx/vae_aduc_pipeline.py
Browse files
api/ltx/vae_aduc_pipeline.py
CHANGED
|
@@ -87,12 +87,14 @@ class VaeAducPipeline:
|
|
| 87 |
self._initialized = True
|
| 88 |
logging.info(f"✅ VaeServer ready. VAE model is 'hot' on {self.device} with dtype {self.dtype}. Startup time: {time.time() - t0:.2f}s")
|
| 89 |
|
|
|
|
| 90 |
def _cleanup_gpu(self):
|
| 91 |
"""Limpa a VRAM da GPU do VAE."""
|
| 92 |
if torch.cuda.is_available():
|
| 93 |
with torch.cuda.device(self.device):
|
| 94 |
torch.cuda.empty_cache()
|
| 95 |
|
|
|
|
| 96 |
def _preprocess_input(self, item: Union[Image.Image, torch.Tensor], target_resolution: Tuple[int, int]) -> torch.Tensor:
|
| 97 |
"""Prepara uma imagem PIL ou um tensor para o formato de pixel que o VAE espera para encodar."""
|
| 98 |
if isinstance(item, Image.Image):
|
|
@@ -112,7 +114,7 @@ class VaeAducPipeline:
|
|
| 112 |
tensor_5d = tensor.unsqueeze(0).unsqueeze(2)
|
| 113 |
return (tensor_5d * 2.0) - 1.0
|
| 114 |
|
| 115 |
-
@
|
| 116 |
def generate_conditioning_items(
|
| 117 |
self,
|
| 118 |
media_items: List[Union[Image.Image, torch.Tensor]],
|
|
@@ -143,7 +145,7 @@ class VaeAducPipeline:
|
|
| 143 |
finally:
|
| 144 |
self._cleanup_gpu()
|
| 145 |
|
| 146 |
-
@
|
| 147 |
def decode_to_pixels(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor:
|
| 148 |
"""Decodifica um tensor latente para um tensor de pixels, retornando na CPU."""
|
| 149 |
t0 = time.time()
|
|
|
|
| 87 |
self._initialized = True
|
| 88 |
logging.info(f"✅ VaeServer ready. VAE model is 'hot' on {self.device} with dtype {self.dtype}. Startup time: {time.time() - t0:.2f}s")
|
| 89 |
|
| 90 |
+
@log_function_io
|
| 91 |
def _cleanup_gpu(self):
|
| 92 |
"""Limpa a VRAM da GPU do VAE."""
|
| 93 |
if torch.cuda.is_available():
|
| 94 |
with torch.cuda.device(self.device):
|
| 95 |
torch.cuda.empty_cache()
|
| 96 |
|
| 97 |
+
@log_function_io
|
| 98 |
def _preprocess_input(self, item: Union[Image.Image, torch.Tensor], target_resolution: Tuple[int, int]) -> torch.Tensor:
|
| 99 |
"""Prepara uma imagem PIL ou um tensor para o formato de pixel que o VAE espera para encodar."""
|
| 100 |
if isinstance(item, Image.Image):
|
|
|
|
| 114 |
tensor_5d = tensor.unsqueeze(0).unsqueeze(2)
|
| 115 |
return (tensor_5d * 2.0) - 1.0
|
| 116 |
|
| 117 |
+
@log_function_io
|
| 118 |
def generate_conditioning_items(
|
| 119 |
self,
|
| 120 |
media_items: List[Union[Image.Image, torch.Tensor]],
|
|
|
|
| 145 |
finally:
|
| 146 |
self._cleanup_gpu()
|
| 147 |
|
| 148 |
+
@log_function_io
|
| 149 |
def decode_to_pixels(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor:
|
| 150 |
"""Decodifica um tensor latente para um tensor de pixels, retornando na CPU."""
|
| 151 |
t0 = time.time()
|