Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image | |
| # Load the YOLOv8 model | |
| model = torch.hub.load('ultralytics/yolov8', 'custom', path='best.pt') # YOLOv8 specific | |
| def process_image(image): | |
| # Convert PIL image to numpy array if necessary | |
| if isinstance(image, Image.Image): | |
| image = np.array(image) | |
| # Perform detection | |
| results = model(image) | |
| # Render results | |
| annotated_image = results.render()[0] | |
| return Image.fromarray(annotated_image) | |
| def process_video(video_path): | |
| cap = cv2.VideoCapture(video_path) | |
| frames = [] | |
| while(cap.isOpened()): | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| # Perform detection | |
| results = model(frame) | |
| # Render results | |
| annotated_frame = results.render()[0] | |
| frames.append(annotated_frame) | |
| cap.release() | |
| # Convert frames back to a video format | |
| height, width, layers = frames[0].shape | |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
| video_out = cv2.VideoWriter('output.mp4', fourcc, 30, (width, height)) | |
| for frame in frames: | |
| video_out.write(frame) | |
| video_out.release() | |
| return 'output.mp4' | |
| # Create Gradio interface | |
| image_input = gr.inputs.Image(type="pil", label="Upload an image") | |
| video_input = gr.inputs.Video(type="mp4", label="Upload a video") | |
| image_output = gr.outputs.Image(type="pil", label="Detected image") | |
| video_output = gr.outputs.Video(type="mp4", label="Detected video") | |
| iface = gr.Interface(fn={'image': process_image, 'video': process_video}, | |
| inputs=[image_input, video_input], | |
| outputs=[image_output, video_output], | |
| title="YOLOv8 Object Detection", | |
| description="Upload an image or video to detect objects using YOLOv8.") | |
| if __name__ == "__main__": | |
| iface.launch() | |