Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -374,8 +374,8 @@ css="""
|
|
| 374 |
"""
|
| 375 |
|
| 376 |
with gr.Blocks(css=css) as demo:
|
| 377 |
-
gr.Markdown("# LTX Video 0.9.8 Distilled")
|
| 378 |
-
gr.Markdown("Fast high quality video generation
|
| 379 |
|
| 380 |
with gr.Row():
|
| 381 |
with gr.Column():
|
|
@@ -404,7 +404,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 404 |
step=0.1,
|
| 405 |
info=f"Target video duration (0.3s to 8.5s)"
|
| 406 |
)
|
| 407 |
-
improve_texture = gr.Checkbox(label="Improve Texture (multi-scale)", value=True, info="Uses a two-pass generation for better quality, but is slower. Recommended for final output.")
|
| 408 |
|
| 409 |
with gr.Column():
|
| 410 |
output_video = gr.Video(label="Generated Video", interactive=False)
|
|
@@ -416,7 +416,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 416 |
with gr.Row():
|
| 417 |
seed_input = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=2**32-1)
|
| 418 |
randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
|
| 419 |
-
with gr.Row():
|
| 420 |
guidance_scale_input = gr.Slider(label="Guidance Scale (CFG)", minimum=1.0, maximum=10.0, value=PIPELINE_CONFIG_YAML.get("first_pass", {}).get("guidance_scale", 1.0), step=0.1, info="Controls how much the prompt influences the output. Higher values = stronger influence.")
|
| 421 |
with gr.Row():
|
| 422 |
height_input = gr.Slider(label="Height", value=512, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
|
|
|
|
| 374 |
"""
|
| 375 |
|
| 376 |
with gr.Blocks(css=css) as demo:
|
| 377 |
+
gr.Markdown("# LTX Video 0.9.8 13B Distilled")
|
| 378 |
+
gr.Markdown("Fast high quality video generation. [Model](https://huggingface.co/Lightricks/LTX-Video/blob/main/ltxv-13b-0.9.8-distilled.safetensors) [GitHub](https://github.com/Lightricks/LTX-Video) [Diffusers](https://huggingface.co/Lightricks/LTX-Video-0.9.8-13B-distilled#diffusers-🧨)")
|
| 379 |
|
| 380 |
with gr.Row():
|
| 381 |
with gr.Column():
|
|
|
|
| 404 |
step=0.1,
|
| 405 |
info=f"Target video duration (0.3s to 8.5s)"
|
| 406 |
)
|
| 407 |
+
improve_texture = gr.Checkbox(label="Improve Texture (multi-scale)", value=True,visible=False, info="Uses a two-pass generation for better quality, but is slower. Recommended for final output.")
|
| 408 |
|
| 409 |
with gr.Column():
|
| 410 |
output_video = gr.Video(label="Generated Video", interactive=False)
|
|
|
|
| 416 |
with gr.Row():
|
| 417 |
seed_input = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=2**32-1)
|
| 418 |
randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
|
| 419 |
+
with gr.Row(visible=False):
|
| 420 |
guidance_scale_input = gr.Slider(label="Guidance Scale (CFG)", minimum=1.0, maximum=10.0, value=PIPELINE_CONFIG_YAML.get("first_pass", {}).get("guidance_scale", 1.0), step=0.1, info="Controls how much the prompt influences the output. Higher values = stronger influence.")
|
| 421 |
with gr.Row():
|
| 422 |
height_input = gr.Slider(label="Height", value=512, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
|