Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import cv2 | |
| import torch | |
| import numpy as np | |
| # Load the YOLOv5 model | |
| model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) | |
| # Function to run inference on an image | |
| def run_inference(image): | |
| # Convert the image from PIL format to a format compatible with OpenCV | |
| image = np.array(image) | |
| # Run YOLOv5 inference | |
| results = model(image) | |
| # Convert the annotated image from BGR to RGB for display | |
| annotated_image = results.render()[0] | |
| annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) | |
| return annotated_image | |
| # Function to generate a summary for the detected objects | |
| def generate_summary(image): | |
| results = model(image) | |
| detected_objects = results.pandas().xyxy[0] | |
| summary = "Detected objects:\n\n" | |
| for idx, obj in detected_objects.iterrows(): | |
| summary += f"- {obj['name']} with confidence {obj['confidence']:.2f}\n" | |
| return summary | |
| # Create the Gradio interface with improved UI | |
| with gr.Blocks(css=""" | |
| body { | |
| font-family: 'Poppins', sans-serif; | |
| background-color: #2B3D41; | |
| color: #F9B9D2; | |
| } | |
| header { | |
| background-color: #83A0A0; | |
| padding: 20px; | |
| text-align: center; | |
| border-radius: 10px; | |
| color: white; | |
| box-shadow: 0 4px 10px rgba(0, 0, 0, 0.3); | |
| } | |
| footer { | |
| background-color: #4C5F6B; | |
| padding: 10px; | |
| text-align: center; | |
| border-radius: 10px; | |
| color: white; | |
| margin-top: 20px; | |
| box-shadow: 0 4px 10px rgba(0, 0, 0, 0.3); | |
| } | |
| .btn-primary { | |
| background-color: #BCA0BC; | |
| color: #2B3D41; | |
| padding: 10px 20px; | |
| border-radius: 5px; | |
| font-weight: bold; | |
| border: none; | |
| cursor: pointer; | |
| transition: all 0.3s; | |
| } | |
| .btn-primary:hover { | |
| background-color: #F9B9D2; | |
| color: #2B3D41; | |
| } | |
| .gr-box { | |
| background-color: #4C5F6B; | |
| border-radius: 10px; | |
| padding: 20px; | |
| color: #F9B9D2; | |
| box-shadow: 0 4px 10px rgba(0, 0, 0, 0.3); | |
| } | |
| .gr-input { | |
| background-color: #BCA0BC; | |
| border-radius: 5px; | |
| border: none; | |
| padding: 10px; | |
| color: #2B3D41; | |
| } | |
| """) as demo: | |
| with gr.Row(): | |
| gr.Markdown("<h1 style='text-align:center; color:#F9B9D2;'>✨ InsightVision: Detect, Analyze, Summarize ✨</h1>") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| image_input = gr.Image(label="Upload Image", type="pil", elem_classes="gr-input") | |
| with gr.Row(): | |
| detect_button = gr.Button("Run Detection", elem_classes="btn-primary") | |
| with gr.Column(scale=3): | |
| annotated_image_output = gr.Image(label="Detected Image", type="pil", elem_classes="gr-box") | |
| summary_output = gr.Textbox(label="Detection Summary", lines=10, interactive=False, elem_classes="gr-box") | |
| # Actions for buttons | |
| detect_button.click( | |
| fn=lambda image: (run_inference(image), generate_summary(np.array(image))), | |
| inputs=[image_input], | |
| outputs=[annotated_image_output, summary_output] | |
| ) | |
| gr.Markdown("<footer>Made with ❤️ using Gradio and YOLOv5 | © 2024 InsightVision</footer>") | |
| # Launch the interface | |
| demo.launch() |