init
Browse files
app.py
CHANGED
|
@@ -200,6 +200,7 @@ def generate_image(prompt_scene,
|
|
| 200 |
roll=0.1,
|
| 201 |
pitch=0.1,
|
| 202 |
fov=1.0,
|
|
|
|
| 203 |
progress=gr.Progress(track_tqdm=True)):
|
| 204 |
# Clear CUDA cache and avoid tracking gradients
|
| 205 |
torch.cuda.empty_cache()
|
|
@@ -354,7 +355,7 @@ with gr.Blocks(css=custom_css) as demo:
|
|
| 354 |
fov = gr.Slider(minimum=0.3491, maximum=1.8326, value=1.5000, step=0.1000, label="fov value")
|
| 355 |
with gr.Accordion("Settings", open=True):
|
| 356 |
with gr.Row():
|
| 357 |
-
|
| 358 |
seed_input = gr.Number(label="Seed (Optional)", precision=0, value=42)
|
| 359 |
|
| 360 |
generation_button = gr.Button("Generate Images")
|
|
@@ -377,15 +378,17 @@ with gr.Blocks(css=custom_css) as demo:
|
|
| 377 |
with gr.Tab("Camera Understanding"):
|
| 378 |
gr.Markdown(value="## Camera Understanding")
|
| 379 |
image_input = gr.Image()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 380 |
|
| 381 |
understanding_button = gr.Button("Chat")
|
| 382 |
understanding_output = gr.Textbox(label="Response")
|
| 383 |
|
| 384 |
camera_map = gr.Image(label="Camera maps (up vector and latitude)")
|
| 385 |
|
| 386 |
-
with gr.Accordion("Advanced options", open=False):
|
| 387 |
-
und_seed_input = gr.Number(label="Seed", precision=0, value=42)
|
| 388 |
-
|
| 389 |
examples_inpainting = gr.Examples(
|
| 390 |
label="Examples",
|
| 391 |
examples=[
|
|
@@ -401,13 +404,13 @@ with gr.Blocks(css=custom_css) as demo:
|
|
| 401 |
|
| 402 |
generation_button.click(
|
| 403 |
fn=generate_image,
|
| 404 |
-
inputs=[prompt_input, seed_input, roll, pitch, fov,
|
| 405 |
outputs=image_output
|
| 406 |
)
|
| 407 |
|
| 408 |
understanding_button.click(
|
| 409 |
camera_understanding,
|
| 410 |
-
inputs=[image_input, und_seed_input],
|
| 411 |
outputs=[understanding_output, camera_map]
|
| 412 |
)
|
| 413 |
|
|
|
|
| 200 |
roll=0.1,
|
| 201 |
pitch=0.1,
|
| 202 |
fov=1.0,
|
| 203 |
+
thinking_gen=False,
|
| 204 |
progress=gr.Progress(track_tqdm=True)):
|
| 205 |
# Clear CUDA cache and avoid tracking gradients
|
| 206 |
torch.cuda.empty_cache()
|
|
|
|
| 355 |
fov = gr.Slider(minimum=0.3491, maximum=1.8326, value=1.5000, step=0.1000, label="fov value")
|
| 356 |
with gr.Accordion("Settings", open=True):
|
| 357 |
with gr.Row():
|
| 358 |
+
thinking_gen = gr.Checkbox(label="Thinking", value=False)
|
| 359 |
seed_input = gr.Number(label="Seed (Optional)", precision=0, value=42)
|
| 360 |
|
| 361 |
generation_button = gr.Button("Generate Images")
|
|
|
|
| 378 |
with gr.Tab("Camera Understanding"):
|
| 379 |
gr.Markdown(value="## Camera Understanding")
|
| 380 |
image_input = gr.Image()
|
| 381 |
+
|
| 382 |
+
with gr.Accordion("Settings", open=True):
|
| 383 |
+
with gr.Row():
|
| 384 |
+
thinking_und = gr.Checkbox(label="Thinking", value=False)
|
| 385 |
+
und_seed_input = gr.Number(label="Seed (Optional)", precision=0, value=42)
|
| 386 |
|
| 387 |
understanding_button = gr.Button("Chat")
|
| 388 |
understanding_output = gr.Textbox(label="Response")
|
| 389 |
|
| 390 |
camera_map = gr.Image(label="Camera maps (up vector and latitude)")
|
| 391 |
|
|
|
|
|
|
|
|
|
|
| 392 |
examples_inpainting = gr.Examples(
|
| 393 |
label="Examples",
|
| 394 |
examples=[
|
|
|
|
| 404 |
|
| 405 |
generation_button.click(
|
| 406 |
fn=generate_image,
|
| 407 |
+
inputs=[prompt_input, seed_input, roll, pitch, fov, thinking_gen],
|
| 408 |
outputs=image_output
|
| 409 |
)
|
| 410 |
|
| 411 |
understanding_button.click(
|
| 412 |
camera_understanding,
|
| 413 |
+
inputs=[image_input, thinking_und, und_seed_input],
|
| 414 |
outputs=[understanding_output, camera_map]
|
| 415 |
)
|
| 416 |
|