|
|
|
|
|
import gradio as gr |
|
|
from ultralytics import YOLO |
|
|
from huggingface_hub import hf_hub_download |
|
|
import cv2, tempfile |
|
|
import numpy as np |
|
|
from PIL import Image |
|
|
|
|
|
|
|
|
|
|
|
model_path = hf_hub_download( |
|
|
repo_id="utkarsh-23/yolov8m-garbage-pothole-detector", |
|
|
filename="best.pt", |
|
|
|
|
|
) |
|
|
model = YOLO(model_path) |
|
|
|
|
|
|
|
|
class_names = ['Container', 'Garbage', 'crocodile crack', 'longitudinal crack', 'pothole', |
|
|
'HV-switch', 'crossarm', 'streetlight', 'traffic-light', 'transformer'] |
|
|
|
|
|
|
|
|
department_mapping = { |
|
|
'Container': 'Garbage', |
|
|
'Garbage': 'Garbage', |
|
|
'crocodile crack': 'Pothole', |
|
|
'longitudinal crack': 'Pothole', |
|
|
'pothole': 'Pothole', |
|
|
'HV-switch': 'Streetlight', |
|
|
'crossarm': 'Streetlight', |
|
|
'streetlight': 'Streetlight', |
|
|
'traffic-light': 'Streetlight', |
|
|
'transformer': 'Streetlight' |
|
|
} |
|
|
|
|
|
|
|
|
def detect_image(image): |
|
|
if image is None: |
|
|
return None, "β οΈ Please upload an image first!" |
|
|
|
|
|
try: |
|
|
results = model(image) |
|
|
|
|
|
|
|
|
detected_objects = [] |
|
|
detected_departments = set() |
|
|
|
|
|
if results[0].boxes is not None: |
|
|
for box in results[0].boxes: |
|
|
class_id = int(box.cls[0]) |
|
|
confidence = float(box.conf[0]) |
|
|
class_name = class_names[class_id] if class_id < len(class_names) else f"Class {class_id}" |
|
|
department = department_mapping.get(class_name, "Unknown") |
|
|
|
|
|
detected_objects.append(f"{class_name} ({confidence:.2f})") |
|
|
detected_departments.add(department) |
|
|
|
|
|
|
|
|
if detected_departments: |
|
|
if len(detected_departments) == 1: |
|
|
department = list(detected_departments)[0] |
|
|
dept_emoji = {"Garbage": "ποΈ", "Pothole": "π³οΈ", "Streetlight": "π‘"}.get(department, "π") |
|
|
classification_text = f"{dept_emoji} **This image is classified under the {department} department**" |
|
|
else: |
|
|
departments_list = ", ".join(sorted(detected_departments)) |
|
|
classification_text = f"π **This image is classified under multiple departments:** {departments_list}" |
|
|
|
|
|
|
|
|
classification_text += "\n\n### π Detected Objects:\n" |
|
|
for obj in detected_objects: |
|
|
classification_text += f"β’ {obj}\n" |
|
|
else: |
|
|
classification_text = "β **No objects detected**\n\nPlease try with a different image containing garbage, potholes, or streetlight infrastructure." |
|
|
|
|
|
annotated_image = results[0].plot() |
|
|
return annotated_image, classification_text |
|
|
|
|
|
except Exception as e: |
|
|
return None, f"β **Error processing image:** {str(e)}" |
|
|
|
|
|
|
|
|
def detect_video(video_path): |
|
|
if video_path is None: |
|
|
return None |
|
|
|
|
|
try: |
|
|
cap = cv2.VideoCapture(video_path) |
|
|
if not cap.isOpened(): |
|
|
return None |
|
|
|
|
|
fourcc = cv2.VideoWriter_fourcc(*"mp4v") |
|
|
out_path = tempfile.mktemp(suffix=".mp4") |
|
|
|
|
|
|
|
|
fps = cap.get(cv2.CAP_PROP_FPS) |
|
|
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
|
|
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
|
|
|
|
|
out = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) |
|
|
|
|
|
while cap.isOpened(): |
|
|
ret, frame = cap.read() |
|
|
if not ret: |
|
|
break |
|
|
results = model(frame) |
|
|
annotated_frame = results[0].plot() |
|
|
out.write(annotated_frame) |
|
|
|
|
|
cap.release() |
|
|
out.release() |
|
|
return out_path |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error processing video: {e}") |
|
|
return None |
|
|
|
|
|
|
|
|
custom_css = """ |
|
|
.gradio-container { |
|
|
max-width: 1200px !important; |
|
|
margin: auto !important; |
|
|
} |
|
|
|
|
|
.main-header { |
|
|
text-align: center; |
|
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); |
|
|
color: white; |
|
|
padding: 2rem; |
|
|
border-radius: 10px; |
|
|
margin-bottom: 2rem; |
|
|
box-shadow: 0 4px 15px rgba(0,0,0,0.1); |
|
|
} |
|
|
|
|
|
.department-info { |
|
|
background: #f8f9fa; |
|
|
border-left: 4px solid #007bff; |
|
|
padding: 1rem; |
|
|
margin: 1rem 0; |
|
|
border-radius: 5px; |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.department-info h3 { |
|
|
color: #2c3e50 !important; |
|
|
margin-bottom: 1rem !important; |
|
|
font-weight: 600 !important; |
|
|
} |
|
|
|
|
|
.department-info div { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.department-info strong { |
|
|
color: #2c3e50 !important; |
|
|
font-weight: 600 !important; |
|
|
} |
|
|
|
|
|
.upload-area { |
|
|
border: 2px dashed #007bff; |
|
|
border-radius: 10px; |
|
|
padding: 2rem; |
|
|
text-align: center; |
|
|
background: #f8f9fa; |
|
|
transition: all 0.3s ease; |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.upload-area:hover { |
|
|
border-color: #0056b3; |
|
|
background: #e3f2fd; |
|
|
} |
|
|
|
|
|
/* Upload area text styling */ |
|
|
.upload-area .upload-text { |
|
|
color: #333 !important; |
|
|
font-weight: 500 !important; |
|
|
} |
|
|
|
|
|
/* Fix for file upload component text */ |
|
|
.file-upload { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.file-upload .upload-text { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
/* Gradio file upload specific styling */ |
|
|
.gr-file-upload { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.gr-file-upload .upload-text, |
|
|
.gr-file-upload .file-preview, |
|
|
.gr-file-upload .file-name { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
/* Additional upload component fixes */ |
|
|
[data-testid="upload-button"] { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.upload-container { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.upload-container * { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
/* Specific targeting for upload text */ |
|
|
.svelte-1nausj1 { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.svelte-1nausj1 * { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.classification-result { |
|
|
background: #ffffff !important; |
|
|
border: 1px solid #e0e0e0 !important; |
|
|
border-radius: 8px !important; |
|
|
padding: 1.5rem !important; |
|
|
color: #333333 !important; |
|
|
font-size: 14px !important; |
|
|
line-height: 1.6 !important; |
|
|
box-shadow: 0 2px 4px rgba(0,0,0,0.1) !important; |
|
|
} |
|
|
|
|
|
.classification-result h3 { |
|
|
color: #2c3e50 !important; |
|
|
margin-top: 1rem !important; |
|
|
margin-bottom: 0.5rem !important; |
|
|
} |
|
|
|
|
|
.classification-result p { |
|
|
color: #333333 !important; |
|
|
margin-bottom: 0.8rem !important; |
|
|
} |
|
|
|
|
|
.classification-result strong { |
|
|
color: #2c3e50 !important; |
|
|
font-weight: 600 !important; |
|
|
} |
|
|
|
|
|
.classification-result ul, .classification-result li { |
|
|
color: #444444 !important; |
|
|
} |
|
|
|
|
|
/* Fix for markdown content */ |
|
|
.markdown { |
|
|
background: #ffffff !important; |
|
|
color: #333333 !important; |
|
|
} |
|
|
|
|
|
.markdown h1, .markdown h2, .markdown h3, .markdown h4, .markdown h5, .markdown h6 { |
|
|
color: #2c3e50 !important; |
|
|
} |
|
|
|
|
|
.markdown p, .markdown li, .markdown span { |
|
|
color: #333333 !important; |
|
|
} |
|
|
|
|
|
.markdown strong { |
|
|
color: #2c3e50 !important; |
|
|
} |
|
|
|
|
|
footer { |
|
|
text-align: center; |
|
|
margin-top: 2rem; |
|
|
padding: 1rem; |
|
|
color: #666; |
|
|
} |
|
|
|
|
|
/* Additional text contrast fixes */ |
|
|
.block.svelte-90oupt { |
|
|
background: #ffffff !important; |
|
|
} |
|
|
|
|
|
.prose { |
|
|
color: #333333 !important; |
|
|
} |
|
|
|
|
|
.prose h1, .prose h2, .prose h3 { |
|
|
color: #2c3e50 !important; |
|
|
} |
|
|
|
|
|
.prose p, .prose li { |
|
|
color: #333333 !important; |
|
|
} |
|
|
|
|
|
/* Upload component text color fixes */ |
|
|
.image-container, |
|
|
.video-container { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.image-container *, |
|
|
.video-container * { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
/* More specific upload text targeting */ |
|
|
div[data-testid*="upload"] { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
div[data-testid*="upload"] * { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
/* Force text visibility in upload areas */ |
|
|
.block.svelte-1t38q2d { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.block.svelte-1t38q2d * { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
/* Additional upload text fixes */ |
|
|
.uploading, |
|
|
.upload-instructions, |
|
|
.drop-zone { |
|
|
color: #333 !important; |
|
|
} |
|
|
|
|
|
.uploading *, |
|
|
.upload-instructions *, |
|
|
.drop-zone * { |
|
|
color: #333 !important; |
|
|
} |
|
|
""" |
|
|
|
|
|
|
|
|
header_html = """ |
|
|
<div class="main-header"> |
|
|
<h1>π SAMADHAN </h1> |
|
|
<p>AI-Powered Classification for Urban Infrastructure Management</p> |
|
|
<div class="department-info"> |
|
|
<h3 style="color: #2c3e50 !important; margin-bottom: 1rem;">π Detection Categories:</h3> |
|
|
<div style="display: flex; justify-content: center; gap: 2rem; margin-top: 1rem; flex-wrap: wrap;"> |
|
|
<div style="color: #333 !important; font-weight: 500;"><strong style="color: #2c3e50 !important;">ποΈ Garbage Department:</strong> Container, Garbage</div> |
|
|
<div style="color: #333 !important; font-weight: 500;"><strong style="color: #2c3e50 !important;">π³οΈ Pothole Department:</strong> Cracks, Potholes</div> |
|
|
<div style="color: #333 !important; font-weight: 500;"><strong style="color: #2c3e50 !important;">π‘ Streetlight Department:</strong> Electrical Infrastructure</div> |
|
|
</div> |
|
|
</div> |
|
|
</div> |
|
|
""" |
|
|
|
|
|
|
|
|
footer_html = """ |
|
|
<div style="text-align: center; margin-top: 2rem; padding: 1rem; color: #666;"> |
|
|
<p>Built with β€οΈ using YOLOv8 and Gradio | Powered by AI for Smart City Management</p> |
|
|
</div> |
|
|
""" |
|
|
|
|
|
|
|
|
with gr.Blocks(css=custom_css, title="Infrastructure Detection System", theme=gr.themes.Soft()) as demo: |
|
|
gr.HTML(header_html) |
|
|
|
|
|
with gr.Tabs() as tabs: |
|
|
with gr.TabItem("πΈ Image Detection", elem_id="image-tab"): |
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
image_input = gr.Image( |
|
|
label="Upload Image", |
|
|
type="numpy", |
|
|
elem_classes="upload-area" |
|
|
) |
|
|
|
|
|
gr.Examples( |
|
|
examples=[], |
|
|
inputs=image_input, |
|
|
label="Example Images" |
|
|
) |
|
|
|
|
|
image_btn = gr.Button( |
|
|
"π Analyze Image", |
|
|
variant="primary", |
|
|
size="lg" |
|
|
) |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
image_output = gr.Image( |
|
|
label="Detection Results", |
|
|
type="numpy" |
|
|
) |
|
|
|
|
|
classification_output = gr.Markdown( |
|
|
label="Department Classification", |
|
|
elem_classes="classification-result" |
|
|
) |
|
|
|
|
|
with gr.TabItem("π₯ Video Detection", elem_id="video-tab"): |
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
video_input = gr.Video( |
|
|
label="Upload Video", |
|
|
elem_classes="upload-area" |
|
|
) |
|
|
|
|
|
video_btn = gr.Button( |
|
|
"π¬ Process Video", |
|
|
variant="primary", |
|
|
size="lg" |
|
|
) |
|
|
|
|
|
gr.Markdown(""" |
|
|
### π Video Processing Notes: |
|
|
- Supports common video formats (MP4, AVI, MOV) |
|
|
- Processing time depends on video length |
|
|
- Large videos may take several minutes |
|
|
""") |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
video_output = gr.Video( |
|
|
label="Processed Video with Detections" |
|
|
) |
|
|
|
|
|
|
|
|
image_btn.click( |
|
|
fn=detect_image, |
|
|
inputs=image_input, |
|
|
outputs=[image_output, classification_output], |
|
|
show_progress=True |
|
|
) |
|
|
|
|
|
video_btn.click( |
|
|
fn=detect_video, |
|
|
inputs=video_input, |
|
|
outputs=video_output, |
|
|
show_progress=True |
|
|
) |
|
|
|
|
|
|
|
|
image_input.change( |
|
|
fn=detect_image, |
|
|
inputs=image_input, |
|
|
outputs=[image_output, classification_output], |
|
|
show_progress=True |
|
|
) |
|
|
|
|
|
gr.HTML(footer_html) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("π Starting Infrastructure Detection System...") |
|
|
print("π Loading AI model...") |
|
|
demo.launch( |
|
|
share=False, |
|
|
inbrowser=True, |
|
|
show_error=True, |
|
|
favicon_path=None, |
|
|
app_kwargs={"docs_url": None} |
|
|
) |
|
|
|