Spaces:
Build error
Build error
| import gradio as gr | |
| from ultralytics import YOLO | |
| from PIL import ImageDraw, ImageEnhance, ImageFilter, Image | |
| import numpy as np | |
| import pandas as pd | |
| from datetime import datetime | |
| import os | |
| # Load the model | |
| model = YOLO('best.pt') | |
| def preprocess_image(image, blur_amount, brightness_level, rotation_angle): | |
| # Convert to PIL Image if needed | |
| if isinstance(image, np.ndarray): | |
| image = Image.fromarray(image) | |
| # Apply brightness adjustment | |
| enhancer = ImageEnhance.Brightness(image) | |
| image = enhancer.enhance(brightness_level) | |
| # Apply blur | |
| if blur_amount > 0: | |
| image = image.filter(ImageFilter.GaussianBlur(radius=blur_amount)) | |
| # Apply rotation with white background | |
| if rotation_angle != 0: | |
| # Convert to RGBA to handle transparency | |
| if image.mode != 'RGBA': | |
| image = image.convert('RGBA') | |
| rotated = image.rotate(rotation_angle, expand=True, fillcolor=(255, 255, 255, 255)) | |
| # Convert back to RGB | |
| image = Image.new('RGB', rotated.size, (255, 255, 255)) | |
| image.paste(rotated, mask=rotated.split()[3]) | |
| return image | |
| def preview_update(image, blur_amount, brightness_level, rotation_angle): | |
| if image is None: | |
| return None | |
| return preprocess_image(image, blur_amount, brightness_level, rotation_angle) | |
| def detect_objects(image, confidence, blur_amount, brightness_level, rotation_angle): | |
| if image is None: | |
| return None, None | |
| # First preprocess the image | |
| processed_image = preprocess_image(image, blur_amount, brightness_level, rotation_angle) | |
| # Perform object detection | |
| results = model(processed_image, conf=confidence) | |
| # Draw bounding boxes | |
| img = processed_image.copy() | |
| draw = ImageDraw.Draw(img) | |
| # Prepare detection data for table | |
| detection_data = [] | |
| for result in results: | |
| for box in result.boxes: | |
| xmin, ymin, xmax, ymax = [int(val) for val in box.xyxy[0]] | |
| confidence_score = float(box.conf[0]) | |
| # Draw rectangle with thicker border | |
| draw.rectangle([(xmin, ymin), (xmax, ymax)], outline='green', width=8) | |
| draw.text((xmin, ymin-20), f'{confidence_score:.2f}', fill='green', font_size=20) | |
| # Add to detection data | |
| detection_data.append([ | |
| f"{confidence_score:.2f}", | |
| f"({xmin}, {ymin})", | |
| f"({xmax}, {ymax})" | |
| ]) | |
| # If no detections, return empty list for table to render properly | |
| if not detection_data: | |
| detection_data = [] | |
| return img, detection_data | |
| def process_multiple_images(images, confidence, blur_amount, brightness_level, rotation_angle): | |
| if not images: | |
| return None, None | |
| all_results = [] | |
| all_data = [] | |
| for idx, file in enumerate(images): | |
| # Open image from file | |
| img = Image.open(file.name) | |
| result, data = detect_objects(img, confidence, blur_amount, brightness_level, rotation_angle) | |
| if result is not None: | |
| all_results.append(result) | |
| # Add image number to each detection in data | |
| image_data = [[f"Image {idx+1}"] + row for row in data] | |
| all_data.extend(image_data) | |
| return all_results, all_data | |
| def preview_multiple(files, blur_amount, brightness_level, rotation_angle): | |
| if not files: | |
| return None | |
| previews = [] | |
| for file in files: | |
| img = Image.open(file.name) | |
| preview = preview_update(img, blur_amount, brightness_level, rotation_angle) | |
| if preview is not None: | |
| previews.append(preview) | |
| return previews | |
| def export_to_csv(data): | |
| if data is None or data.empty: | |
| return None | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| filename = f"detection_results_{timestamp}.csv" | |
| temp_path = os.path.join(os.getcwd(), filename) | |
| data.to_csv(temp_path, index=False) | |
| return temp_path | |
| def cleanup_temp_files(): | |
| for file in os.listdir(): | |
| if file.startswith("detection_results_") and file.endswith(".csv"): | |
| try: | |
| os.remove(file) | |
| except: | |
| pass | |
| # Create the Gradio interface with Monochrome theme | |
| with gr.Blocks( | |
| theme=gr.themes.Monochrome(primary_hue="pink", secondary_hue="blue"), | |
| title="Object Detection with YOLO", | |
| css=""" | |
| .fixed-height-table { | |
| height: 400px !important; | |
| position: relative !important; | |
| } | |
| .fixed-height-table > div:nth-child(2) { | |
| max-height: 400px !important; | |
| overflow-y: auto !important; | |
| } | |
| .fixed-height-table table { | |
| width: 100% !important; | |
| border-collapse: separate !important; | |
| border-spacing: 0 !important; | |
| } | |
| .fixed-height-table thead { | |
| position: sticky !important; | |
| top: 0 !important; | |
| z-index: 2 !important; | |
| background: var(--background-fill-primary) !important; | |
| } | |
| .fixed-height-table th { | |
| background: var(--background-fill-primary) !important; | |
| border-bottom: 2px solid var(--border-color-primary) !important; | |
| padding: 8px !important; | |
| color: var(--body-text-color) !important; | |
| } | |
| .fixed-height-table td { | |
| padding: 8px !important; | |
| } | |
| /* Add gallery scroll styles */ | |
| .gallery-scroll { | |
| overflow-y: auto !important; | |
| max-height: 500px !important; | |
| } | |
| .gallery-scroll > div { | |
| height: auto !important; | |
| } | |
| """ | |
| ) as iface: | |
| gr.Markdown("# Object Detection with YOLO") | |
| gr.Markdown("Upload an image to detect objects using YOLO. Adjust controls to see live preview.") | |
| with gr.Row(): | |
| # Input column | |
| with gr.Column(scale=1): | |
| input_image = gr.File( | |
| file_count="multiple", | |
| label="Input Images", | |
| file_types=["image"] | |
| ) | |
| # Advanced controls | |
| with gr.Accordion("Advanced Controls", open=True): | |
| blur = gr.Slider( | |
| minimum=0, maximum=10, value=0, step=0.5, | |
| label="Blur Amount", | |
| info="Adjust image blur (0 = no blur)" | |
| ) | |
| brightness = gr.Slider( | |
| minimum=0.1, maximum=2.0, value=1.0, step=0.1, | |
| label="Brightness", | |
| info="Adjust image brightness (1 = original)" | |
| ) | |
| rotation = gr.Slider( | |
| minimum=-180, maximum=180, value=0, step=5, | |
| label="Rotation Angle", | |
| info="Rotate image (degrees)" | |
| ) | |
| confidence = gr.Slider( | |
| minimum=0.0, maximum=1.0, value=0.25, step=0.01, | |
| label="Confidence Threshold", | |
| info="Adjust detection sensitivity" | |
| ) | |
| detect_btn = gr.Button("Detect Objects", variant="primary") | |
| with gr.Column(scale=2) as output_column: | |
| # Preview container | |
| with gr.Row(visible=True) as preview_container: | |
| preview_image = gr.Gallery( | |
| label="Live Preview", | |
| show_label=True, | |
| elem_id="gallery", | |
| columns=2, | |
| height=500, | |
| allow_preview=True, | |
| object_fit="contain", | |
| elem_classes=["gallery-scroll"] | |
| ) | |
| # Result container (initially hidden) | |
| with gr.Row(visible=False) as result_container: | |
| with gr.Column(scale=1): | |
| final_output = gr.Gallery( | |
| label="Results with Detections", | |
| show_label=True, | |
| elem_id="result_gallery", | |
| columns=2, | |
| height=500, | |
| allow_preview=True, | |
| object_fit="contain", | |
| elem_classes=["gallery-scroll"] | |
| ) | |
| with gr.Accordion("Detection Details", open=False): | |
| detection_table = gr.Dataframe( | |
| headers=["Image", "Confidence", "Top-Left", "Bottom-Right"], | |
| wrap=True, | |
| value=[], | |
| interactive=False, | |
| elem_classes=["fixed-height-table"] | |
| ) | |
| with gr.Column(): | |
| export_btn = gr.Button("Export Results", variant="secondary") | |
| download_file = gr.File( | |
| label="Download Results", | |
| show_label=False | |
| ) | |
| # Connect all preview events | |
| for component in [input_image, blur, brightness, rotation]: | |
| component.change( | |
| fn=preview_multiple, | |
| inputs=[input_image, blur, brightness, rotation], | |
| outputs=preview_image | |
| ) | |
| #Clear preview container on new image uploads | |
| def on_new_image_upload(files, blur, brightness, rotation): | |
| # Get new previews | |
| previews = preview_multiple(files, blur, brightness, rotation) | |
| # Reset results container and table | |
| return [ | |
| previews, | |
| gr.Row(visible=True), | |
| gr.Row(visible=False), | |
| None, | |
| None | |
| ] | |
| # Update the event connections | |
| input_image.change( | |
| fn=on_new_image_upload, | |
| inputs=[input_image, blur, brightness, rotation], | |
| outputs=[ | |
| preview_image, | |
| preview_container, | |
| result_container, | |
| detection_table, | |
| download_file | |
| ] | |
| ) | |
| # Component change events for live preview | |
| for component in [blur, brightness, rotation]: | |
| component.change( | |
| fn=preview_multiple, | |
| inputs=[input_image, blur, brightness, rotation], | |
| outputs=preview_image | |
| ) | |
| # Connect the components | |
| def on_detect_click(*args): | |
| # Show "Processing" notification | |
| gr.Info("Detection in progress...") | |
| # Clear previous results | |
| yield [None, gr.Row(visible=False), gr.Row(visible=False), None] | |
| # Run detection | |
| results, data = process_multiple_images(*args) | |
| # Show completion notification | |
| gr.Info("Detection complete!") | |
| # Show new results | |
| yield [results, gr.Row(visible=False), gr.Row(visible=True), data] | |
| # Update the click handler to use streaming outputs | |
| detect_btn.click( | |
| fn=on_detect_click, | |
| inputs=[input_image, confidence, blur, brightness, rotation], | |
| outputs=[ | |
| final_output, | |
| preview_container, | |
| result_container, | |
| detection_table | |
| ], | |
| queue=True # Enable queuing for streaming | |
| ) | |
| # Add export button click handler with cleanup | |
| def on_export_click(data): | |
| cleanup_temp_files() | |
| return export_to_csv(data) | |
| export_btn.click( | |
| fn=on_export_click, | |
| inputs=[detection_table], | |
| outputs=[download_file] | |
| ) | |
| iface.launch() |