Update app.py
Browse files
app.py
CHANGED
|
@@ -37,6 +37,7 @@ def generate_journal_with_images(video_path):
|
|
| 37 |
journal_entries = {}
|
| 38 |
saved_images = []
|
| 39 |
frame_count = 0
|
|
|
|
| 40 |
output_folder = "detected_frames"
|
| 41 |
os.makedirs(output_folder, exist_ok=True) # Create folder to store images
|
| 42 |
|
|
@@ -45,33 +46,37 @@ def generate_journal_with_images(video_path):
|
|
| 45 |
if not ret:
|
| 46 |
break
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
frame_count += 1
|
| 77 |
|
|
|
|
| 37 |
journal_entries = {}
|
| 38 |
saved_images = []
|
| 39 |
frame_count = 0
|
| 40 |
+
last_processed_frame = None
|
| 41 |
output_folder = "detected_frames"
|
| 42 |
os.makedirs(output_folder, exist_ok=True) # Create folder to store images
|
| 43 |
|
|
|
|
| 46 |
if not ret:
|
| 47 |
break
|
| 48 |
|
| 49 |
+
# Process every Nth frame or if the current frame is different from the last processed frame
|
| 50 |
+
if frame_count % frame_interval == 0 or (last_processed_frame is not None and is_frame_different(last_processed_frame, frame)):
|
| 51 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 52 |
+
|
| 53 |
+
# Make predictions using YOLOv10 on the current frame
|
| 54 |
+
results = model.predict(source=frame_rgb, device=device)
|
| 55 |
+
|
| 56 |
+
# Plot bounding boxes and labels on the image
|
| 57 |
+
annotated_frame = results[0].plot() # Plot detection results on the frame
|
| 58 |
+
|
| 59 |
+
# Save the annotated image
|
| 60 |
+
frame_filename = os.path.join(output_folder, f"frame_{frame_count}.jpg")
|
| 61 |
+
cv2.imwrite(frame_filename, annotated_frame[:, :, ::-1]) # Convert back to BGR for saving
|
| 62 |
+
saved_images.append(frame_filename)
|
| 63 |
+
|
| 64 |
+
# Extract labels (class indices) and map them to class names
|
| 65 |
+
detected_objects = [model.names[int(box.cls)] for box in results[0].boxes] # Access the first result
|
| 66 |
+
|
| 67 |
+
# Get current timestamp in the video
|
| 68 |
+
timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000 # Convert ms to seconds
|
| 69 |
+
|
| 70 |
+
# Categorize the detected objects into activities
|
| 71 |
+
activity_summary = categorize_activity(detected_objects)
|
| 72 |
+
|
| 73 |
+
# Store the activities with their timestamp
|
| 74 |
+
for activity, objects in activity_summary.items():
|
| 75 |
+
if activity not in journal_entries:
|
| 76 |
+
journal_entries[activity] = []
|
| 77 |
+
journal_entries[activity].append((f"At {timestamp:.2f} seconds: {', '.join(objects[0])}", frame_filename))
|
| 78 |
+
|
| 79 |
+
last_processed_frame = frame # Update the last processed frame
|
| 80 |
|
| 81 |
frame_count += 1
|
| 82 |
|