KangLiao commited on
Commit
f203857
·
1 Parent(s): 46b59da
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -142,16 +142,22 @@ def camera_understanding(image_src, question, seed, progress=gr.Progress(track_t
142
 
143
  img_up = Image.open(saved_paths[0]).convert("RGB")
144
  img_lat = Image.open(saved_paths[1]).convert("RGB")
145
-
146
  w, h = img_up.size
147
  left = max(0, w - h)
148
  img_up = img_up.crop((left, 0, w, h))
149
-
150
  w, h = img_lat.size
151
  left = max(0, w - h)
152
  img_lat = img_lat.crop((left, 0, w, h))
 
 
 
 
 
 
 
 
153
 
154
- return text, [img_up], [img_lat]
155
 
156
 
157
  @torch.inference_mode()
@@ -249,8 +255,7 @@ with gr.Blocks(css=css) as demo:
249
  understanding_button = gr.Button("Chat")
250
  understanding_output = gr.Textbox(label="Response")
251
 
252
- camera1 = gr.Gallery(label="Camera Maps (Up Vector)", columns=1, rows=1)
253
- camera2 = gr.Gallery(label="Camera Maps (Latitude)", columns=1, rows=1)
254
 
255
  with gr.Accordion("Advanced options", open=False):
256
  und_seed_input = gr.Number(label="Seed", precision=0, value=42)
@@ -277,7 +282,7 @@ with gr.Blocks(css=css) as demo:
277
  understanding_button.click(
278
  camera_understanding,
279
  inputs=[image_input, und_seed_input],
280
- outputs=[understanding_output, camera1, camera2]
281
  )
282
 
283
  demo.launch(share=True)
 
142
 
143
  img_up = Image.open(saved_paths[0]).convert("RGB")
144
  img_lat = Image.open(saved_paths[1]).convert("RGB")
 
145
  w, h = img_up.size
146
  left = max(0, w - h)
147
  img_up = img_up.crop((left, 0, w, h))
 
148
  w, h = img_lat.size
149
  left = max(0, w - h)
150
  img_lat = img_lat.crop((left, 0, w, h))
151
+ img_up = img_up.resize((512, 512))
152
+ img_lat = img_lat.resize((512, 512))
153
+
154
+ gap = 10
155
+ W, H = img_up.size
156
+ combined = Image.new("RGB", (W * 2 + gap, H), (255, 255, 255))
157
+ combined.paste(img_up, (0, 0))
158
+ combined.paste(img_lat, (W + gap, 0))
159
 
160
+ return text, combined
161
 
162
 
163
  @torch.inference_mode()
 
255
  understanding_button = gr.Button("Chat")
256
  understanding_output = gr.Textbox(label="Response")
257
 
258
+ camera_map = gr.Image(label="Camera Maps")
 
259
 
260
  with gr.Accordion("Advanced options", open=False):
261
  und_seed_input = gr.Number(label="Seed", precision=0, value=42)
 
282
  understanding_button.click(
283
  camera_understanding,
284
  inputs=[image_input, und_seed_input],
285
+ outputs=[understanding_output, camera_map]
286
  )
287
 
288
  demo.launch(share=True)