Update aduc_framework/engineers/deformes3D.py
Browse files
aduc_framework/engineers/deformes3D.py
CHANGED
|
@@ -67,24 +67,16 @@ class Deformes3DEngine:
|
|
| 67 |
general_ref_paths = [media["caminho"] for media in generation_state.get("midias_referencia", [])]
|
| 68 |
|
| 69 |
keyframe_resolution = params.get('resolution', 480)
|
| 70 |
-
initial_ref_path = general_ref_paths[0]
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
raise ValueError("Não há imagem de referência inicial para começar a geração de keyframes.")
|
| 74 |
-
|
| 75 |
-
current_base_image_path = initial_ref_path
|
| 76 |
-
previous_prompt = "N/A (imagem de referência inicial)"
|
| 77 |
all_keyframes_data: List[Dict[str, Any]] = []
|
| 78 |
width, height = keyframe_resolution, keyframe_resolution
|
| 79 |
target_resolution_tuple = (width, height)
|
| 80 |
|
| 81 |
-
|
| 82 |
-
if num_keyframes_to_generate <= 0:
|
| 83 |
-
logger.warning("Storyboard vazio. Nenhum keyframe a ser gerado.")
|
| 84 |
-
return []
|
| 85 |
|
| 86 |
logger.info(f"IMAGE SPECIALIST: Ordem para gerar {num_keyframes_to_generate} keyframes (versões LTX).")
|
| 87 |
-
|
| 88 |
ltx_conditioning_items0 = []
|
| 89 |
|
| 90 |
|
|
@@ -93,20 +85,20 @@ class Deformes3DEngine:
|
|
| 93 |
pixel_tensor0 = self._pil_to_pixel_tensor(img_processed0)
|
| 94 |
|
| 95 |
ltx_conditioning_items0.append(LatentConditioningItem(pixel_tensor0, 0, 0.05))
|
| 96 |
-
ltx_conditioning_items0.append(LatentConditioningItem(pixel_tensor0,
|
| 97 |
|
| 98 |
latent_tensorY = pixel_tensor0
|
| 99 |
latent_tensorX = latent_tensorY
|
|
|
|
| 100 |
|
| 101 |
current_base_image_path = initial_ref_path
|
| 102 |
past_base_image_path = initial_ref_path
|
| 103 |
-
|
| 104 |
|
| 105 |
for i in range(num_keyframes_to_generate):
|
| 106 |
ltx_conditioning_items = ltx_conditioning_items0
|
| 107 |
-
|
| 108 |
-
|
| 109 |
scene_index = i + 1
|
|
|
|
| 110 |
current_scene = storyboard[i]
|
| 111 |
future_scene = storyboard[i + 1] if (i + 1) < len(storyboard) else "A cena final."
|
| 112 |
logger.info(f"--> Gerando Keyframe {scene_index}/{num_keyframes_to_generate}...")
|
|
@@ -149,7 +141,7 @@ class Deformes3DEngine:
|
|
| 149 |
torch.save(final_latent.cpu(), latent_path)
|
| 150 |
|
| 151 |
latent_tensorY = latent_path
|
| 152 |
-
|
| 153 |
|
| 154 |
keyframe_data = {
|
| 155 |
"id": scene_index,
|
|
@@ -160,10 +152,7 @@ class Deformes3DEngine:
|
|
| 160 |
|
| 161 |
all_keyframes_data.append(keyframe_data)
|
| 162 |
|
| 163 |
-
|
| 164 |
-
progress_fraction = 0.2 + ((scene_index / num_keyframes_to_generate) * 0.8)
|
| 165 |
-
progress_callback(progress_fraction, f"Keyframe {scene_index}/{num_keyframes_to_generate} gerado.")
|
| 166 |
-
|
| 167 |
current_base_image_path = pixel_path
|
| 168 |
previous_prompt = img_prompt
|
| 169 |
|
|
|
|
| 67 |
general_ref_paths = [media["caminho"] for media in generation_state.get("midias_referencia", [])]
|
| 68 |
|
| 69 |
keyframe_resolution = params.get('resolution', 480)
|
| 70 |
+
initial_ref_path = general_ref_paths[0]
|
| 71 |
+
|
| 72 |
+
previous_prompt = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
all_keyframes_data: List[Dict[str, Any]] = []
|
| 74 |
width, height = keyframe_resolution, keyframe_resolution
|
| 75 |
target_resolution_tuple = (width, height)
|
| 76 |
|
| 77 |
+
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
logger.info(f"IMAGE SPECIALIST: Ordem para gerar {num_keyframes_to_generate} keyframes (versões LTX).")
|
|
|
|
| 80 |
ltx_conditioning_items0 = []
|
| 81 |
|
| 82 |
|
|
|
|
| 85 |
pixel_tensor0 = self._pil_to_pixel_tensor(img_processed0)
|
| 86 |
|
| 87 |
ltx_conditioning_items0.append(LatentConditioningItem(pixel_tensor0, 0, 0.05))
|
| 88 |
+
ltx_conditioning_items0.append(LatentConditioningItem(pixel_tensor0, 23, 0.05))
|
| 89 |
|
| 90 |
latent_tensorY = pixel_tensor0
|
| 91 |
latent_tensorX = latent_tensorY
|
| 92 |
+
|
| 93 |
|
| 94 |
current_base_image_path = initial_ref_path
|
| 95 |
past_base_image_path = initial_ref_path
|
| 96 |
+
|
| 97 |
|
| 98 |
for i in range(num_keyframes_to_generate):
|
| 99 |
ltx_conditioning_items = ltx_conditioning_items0
|
|
|
|
|
|
|
| 100 |
scene_index = i + 1
|
| 101 |
+
|
| 102 |
current_scene = storyboard[i]
|
| 103 |
future_scene = storyboard[i + 1] if (i + 1) < len(storyboard) else "A cena final."
|
| 104 |
logger.info(f"--> Gerando Keyframe {scene_index}/{num_keyframes_to_generate}...")
|
|
|
|
| 141 |
torch.save(final_latent.cpu(), latent_path)
|
| 142 |
|
| 143 |
latent_tensorY = latent_path
|
| 144 |
+
past_base_image_path = current_base_image_path
|
| 145 |
|
| 146 |
keyframe_data = {
|
| 147 |
"id": scene_index,
|
|
|
|
| 152 |
|
| 153 |
all_keyframes_data.append(keyframe_data)
|
| 154 |
|
| 155 |
+
|
|
|
|
|
|
|
|
|
|
| 156 |
current_base_image_path = pixel_path
|
| 157 |
previous_prompt = img_prompt
|
| 158 |
|