Update app.py
Browse files
app.py
CHANGED
|
@@ -11,7 +11,7 @@ def predict(image_in_img, image_in_video):
|
|
| 11 |
if image_in_video == None and image_in_img == None: # If both inputs are None, raise an error
|
| 12 |
raise gr.Error("Please upload an image.")
|
| 13 |
if image_in_video or image_in_img: # If either input is not None,
|
| 14 |
-
image = image_in_video or image_in_img
|
| 15 |
return model(image).render()[0] # Use the YOLOv5 model to predict objects in the image and return the rendered output
|
| 16 |
|
| 17 |
# Define a function to toggle between webcam and file inputs
|
|
@@ -21,6 +21,9 @@ def toggle(choice):
|
|
| 21 |
else: # Otherwise, if "file" is selected,
|
| 22 |
return gr.update(visible=False, value=None), gr.update(visible=True, value=None) # Show file input and hide webcam input
|
| 23 |
|
|
|
|
|
|
|
|
|
|
| 24 |
# Create Gradio UI blocks
|
| 25 |
with gr.Blocks() as blocks:
|
| 26 |
gr.Markdown("# CiclopeIA: Imaginando tu futuro") # Display text in Markdown format
|
|
@@ -29,24 +32,32 @@ with gr.Blocks() as blocks:
|
|
| 29 |
with gr.Row(): # Create a row of UI elements
|
| 30 |
with gr.Column(): # Create a column of UI elements
|
| 31 |
# Create a radio button to choose between webcam and file inputs
|
| 32 |
-
image_or_file_opt = gr.Radio(["
|
| 33 |
label="How would you like to upload your image?")
|
| 34 |
-
# Create an image input for the webcam
|
| 35 |
-
image_in_video = gr.Image(source="webcam", type="filepath")
|
| 36 |
# Create an image input for a file, initially hidden
|
| 37 |
image_in_img = gr.Image(
|
| 38 |
-
source="upload",
|
| 39 |
-
|
|
|
|
|
|
|
| 40 |
# Bind the toggle function to the radio button to switch between webcam and file inputs
|
| 41 |
image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
|
| 42 |
outputs=[image_in_video, image_in_img], queue=False)
|
| 43 |
with gr.Column(): # Create another column of UI elements
|
| 44 |
# Create an output image to display the predicted objects
|
| 45 |
image_out = gr.Image()
|
|
|
|
| 46 |
# Create a button to run the prediction function and display the output image
|
| 47 |
run_btn = gr.Button("Run")
|
| 48 |
run_btn.click(fn=predict, inputs=[
|
| 49 |
image_in_img, image_in_video], outputs=[image_out])
|
| 50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
# Launch the Gradio UI blocks
|
| 52 |
blocks.launch()
|
|
|
|
|
|
| 11 |
if image_in_video == None and image_in_img == None: # If both inputs are None, raise an error
|
| 12 |
raise gr.Error("Please upload an image.")
|
| 13 |
if image_in_video or image_in_img: # If either input is not None,
|
| 14 |
+
image = image_in_video or image_in_img # set the image variable to the non-None input
|
| 15 |
return model(image).render()[0] # Use the YOLOv5 model to predict objects in the image and return the rendered output
|
| 16 |
|
| 17 |
# Define a function to toggle between webcam and file inputs
|
|
|
|
| 21 |
else: # Otherwise, if "file" is selected,
|
| 22 |
return gr.update(visible=False, value=None), gr.update(visible=True, value=None) # Show file input and hide webcam input
|
| 23 |
|
| 24 |
+
# Examples to test
|
| 25 |
+
ex = [["img1.jpeg"], ["img2.jpeg"], ["img3.jpeg"]]
|
| 26 |
+
|
| 27 |
# Create Gradio UI blocks
|
| 28 |
with gr.Blocks() as blocks:
|
| 29 |
gr.Markdown("# CiclopeIA: Imaginando tu futuro") # Display text in Markdown format
|
|
|
|
| 32 |
with gr.Row(): # Create a row of UI elements
|
| 33 |
with gr.Column(): # Create a column of UI elements
|
| 34 |
# Create a radio button to choose between webcam and file inputs
|
| 35 |
+
image_or_file_opt = gr.Radio(["file", "webcam"], value="file",
|
| 36 |
label="How would you like to upload your image?")
|
|
|
|
|
|
|
| 37 |
# Create an image input for a file, initially hidden
|
| 38 |
image_in_img = gr.Image(
|
| 39 |
+
source="upload", type="filepath")
|
| 40 |
+
# Create an image input for the webcam
|
| 41 |
+
image_in_video = gr.Image(source="webcam", visible=False, type="filepath")
|
| 42 |
+
|
| 43 |
# Bind the toggle function to the radio button to switch between webcam and file inputs
|
| 44 |
image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
|
| 45 |
outputs=[image_in_video, image_in_img], queue=False)
|
| 46 |
with gr.Column(): # Create another column of UI elements
|
| 47 |
# Create an output image to display the predicted objects
|
| 48 |
image_out = gr.Image()
|
| 49 |
+
|
| 50 |
# Create a button to run the prediction function and display the output image
|
| 51 |
run_btn = gr.Button("Run")
|
| 52 |
run_btn.click(fn=predict, inputs=[
|
| 53 |
image_in_img, image_in_video], outputs=[image_out])
|
| 54 |
|
| 55 |
+
gr.Examples(
|
| 56 |
+
examples = ex,
|
| 57 |
+
inputs = [image_in_img, image_in_video],
|
| 58 |
+
outputs = image_out,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
# Launch the Gradio UI blocks
|
| 62 |
blocks.launch()
|
| 63 |
+
|