Spaces:
Sleeping
Sleeping
Upload 6 files
Browse files- .gitattributes +1 -0
- agent_tools.py +415 -0
- app.py +291 -0
- cookies.txt +106 -0
- efficientdet_lite0.tflite +3 -0
- mobilenet_v3_small_075_224_embedder.tflite +3 -0
- nn4.small2.v1.t7 +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
nn4.small2.v1.t7 filter=lfs diff=lfs merge=lfs -text
|
agent_tools.py
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
from phi.agent import Agent
|
| 3 |
+
from phi.model.google import Gemini
|
| 4 |
+
from phi.tools.duckduckgo import DuckDuckGo
|
| 5 |
+
import google.generativeai as genai
|
| 6 |
+
from google.generativeai import upload_file, get_file
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import numpy as np
|
| 10 |
+
import time
|
| 11 |
+
import uuid
|
| 12 |
+
|
| 13 |
+
import yt_dlp
|
| 14 |
+
import cv2
|
| 15 |
+
import mediapipe as mp
|
| 16 |
+
|
| 17 |
+
#==========================================================================================================
|
| 18 |
+
# Load a pre-trained face embedding model (OpenCV's FaceNet). This model has better performance than mp embedder
|
| 19 |
+
face_embedder = cv2.dnn.readNetFromTorch("nn4.small2.v1.t7") # Download the model from OpenCV's GitHub
|
| 20 |
+
|
| 21 |
+
# Define embedder with Mediapipe, -- comment off as worse performance for face detection
|
| 22 |
+
# Download the model from https://storage.googleapis.com/mediapipe-tasks/image_embedder
|
| 23 |
+
BaseOptions = mp.tasks.BaseOptions
|
| 24 |
+
ImageEmbedder = mp.tasks.vision.ImageEmbedder
|
| 25 |
+
ImageEmbedderOptions = mp.tasks.vision.ImageEmbedderOptions
|
| 26 |
+
VisionRunningMode = mp.tasks.vision.RunningMode
|
| 27 |
+
|
| 28 |
+
options = ImageEmbedderOptions(
|
| 29 |
+
base_options=BaseOptions(model_asset_path='mobilenet_v3_small_075_224_embedder.tflite'),
|
| 30 |
+
quantize=True,
|
| 31 |
+
running_mode=VisionRunningMode.IMAGE)
|
| 32 |
+
|
| 33 |
+
mp_embedder = ImageEmbedder.create_from_options(options)
|
| 34 |
+
|
| 35 |
+
#================================================================================================================
|
| 36 |
+
def initialize_agent():
|
| 37 |
+
return Agent(
|
| 38 |
+
name="Video AI summarizer",
|
| 39 |
+
model=Gemini(id="gemini-2.0-flash-exp"),
|
| 40 |
+
tools=[DuckDuckGo()],
|
| 41 |
+
show_tool_calls=True,
|
| 42 |
+
markdown=True,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
# Based on cv2 facenet embedder
|
| 46 |
+
def get_face_embedding(face_image):
|
| 47 |
+
"""
|
| 48 |
+
Generate a face embedding using the pre-trained model.
|
| 49 |
+
"""
|
| 50 |
+
# Preprocess the face image with cv2
|
| 51 |
+
blob = cv2.dnn.blobFromImage(face_image, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
|
| 52 |
+
face_embedder.setInput(blob)
|
| 53 |
+
embedding = face_embedder.forward()
|
| 54 |
+
|
| 55 |
+
return embedding.flatten()
|
| 56 |
+
|
| 57 |
+
# Based on mediapipe embedder
|
| 58 |
+
def get_mp_embedding(face_image):
|
| 59 |
+
"""
|
| 60 |
+
Generate a face embedding using the pre-trained model.
|
| 61 |
+
"""
|
| 62 |
+
# Load the input image from a numpy array.
|
| 63 |
+
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=np.array(face_image))
|
| 64 |
+
embedding_result = mp_embedder.embed(mp_image)
|
| 65 |
+
|
| 66 |
+
return embedding_result.embeddings[0]
|
| 67 |
+
|
| 68 |
+
# Advanced Face Tracking with MediaPipe and Face Embeddings
|
| 69 |
+
def face_detection_embed(video_path):
|
| 70 |
+
# Initialize MediaPipe Face Detection
|
| 71 |
+
mp_face_detection = mp.solutions.face_detection
|
| 72 |
+
mp_drawing = mp.solutions.drawing_utils
|
| 73 |
+
|
| 74 |
+
# Load a pre-trained face embedding model (OpenCV's FaceNet)
|
| 75 |
+
#embedder = cv2.dnn.readNetFromTorch("nn4.small2.v1.t7") # Download the model from OpenCV's GitHub, move out from this function
|
| 76 |
+
|
| 77 |
+
# Open the video file
|
| 78 |
+
video_capture = cv2.VideoCapture(video_path)
|
| 79 |
+
|
| 80 |
+
# Dictionary to store face embeddings and their corresponding IDs, number of matched, normalized images
|
| 81 |
+
face_tracker = {} # Format: {face_id: {"embedding": face_embedding, "number_matched": number_matched, "image": normalized_face}}
|
| 82 |
+
face_id_counter = 0
|
| 83 |
+
similarity_threshold = 0.5 # Threshold for considering two faces the same
|
| 84 |
+
frame_number = 0
|
| 85 |
+
|
| 86 |
+
# Define the target size for normalization
|
| 87 |
+
target_width = 100 # Desired width for all faces
|
| 88 |
+
target_height = 100 # Desired height for all faces
|
| 89 |
+
|
| 90 |
+
with mp_face_detection.FaceDetection(min_detection_confidence=0.5) as face_detection:
|
| 91 |
+
while True:
|
| 92 |
+
# Grab a single frame of video
|
| 93 |
+
ret, frame = video_capture.read()
|
| 94 |
+
if not ret:
|
| 95 |
+
break
|
| 96 |
+
|
| 97 |
+
if frame_number % 30 == 0:
|
| 98 |
+
# Convert the frame to RGB for MediaPipe
|
| 99 |
+
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 100 |
+
|
| 101 |
+
# Process the frame with MediaPipe Face Detection
|
| 102 |
+
results = face_detection.process(rgb_frame)
|
| 103 |
+
|
| 104 |
+
if results.detections:
|
| 105 |
+
for detection in results.detections:
|
| 106 |
+
# Get the bounding box of the face
|
| 107 |
+
bboxC = detection.location_data.relative_bounding_box
|
| 108 |
+
ih, iw, _ = frame.shape
|
| 109 |
+
x = int(bboxC.xmin * iw)
|
| 110 |
+
y = int(bboxC.ymin * ih)
|
| 111 |
+
w = int(bboxC.width * iw)
|
| 112 |
+
h = int(bboxC.height * ih)
|
| 113 |
+
score = detection.score[0]
|
| 114 |
+
|
| 115 |
+
# Extract the face region
|
| 116 |
+
face_image = frame[y:y+h, x:x+w]
|
| 117 |
+
if face_image.size == 0:
|
| 118 |
+
continue # Skip empty face regions
|
| 119 |
+
|
| 120 |
+
#yield face_image # Yield the frame for streaming
|
| 121 |
+
|
| 122 |
+
# Generate the face embedding
|
| 123 |
+
face_embedding = get_face_embedding(face_image) #This model has better performance than mp embedder
|
| 124 |
+
#face_embedding = get_mp_embedding(face_image)
|
| 125 |
+
|
| 126 |
+
# Check if this face matches any previously tracked face, and find face_id with maximum similarity
|
| 127 |
+
matched_id = None
|
| 128 |
+
max_similarity = 0
|
| 129 |
+
for face_id, data in face_tracker.items():
|
| 130 |
+
# Calculate the cosine similarity between embeddings. This model has better performance than mp embedder
|
| 131 |
+
similarity = np.dot(face_embedding, data["embedding"]) / (
|
| 132 |
+
np.linalg.norm(face_embedding) * np.linalg.norm(data["embedding"])
|
| 133 |
+
)
|
| 134 |
+
'''
|
| 135 |
+
# Compute cosine similarity. comment off because of worse performance
|
| 136 |
+
similarity = ImageEmbedder.cosine_similarity(
|
| 137 |
+
face_embedding, data["embedding"])
|
| 138 |
+
'''
|
| 139 |
+
if similarity > max_similarity:
|
| 140 |
+
max_similarity = similarity
|
| 141 |
+
max_face_id = face_id
|
| 142 |
+
|
| 143 |
+
# Define a larger bounding box for output faceface
|
| 144 |
+
xb = int(x * 0.8)
|
| 145 |
+
yb = int(y * 0.8)
|
| 146 |
+
xe = int(x * 1.2 + w)
|
| 147 |
+
ye = int(y * 1.2 + h)
|
| 148 |
+
|
| 149 |
+
if max_similarity > similarity_threshold:
|
| 150 |
+
matched_id = max_face_id
|
| 151 |
+
number_matched = face_tracker[matched_id]["number_matched"] + 1
|
| 152 |
+
face_tracker[matched_id]["number_matched"] = number_matched
|
| 153 |
+
if score > face_tracker[matched_id]["score"]: #switch to higher score image
|
| 154 |
+
face_image_b = frame[yb:ye, xb:xe]
|
| 155 |
+
normalized_face = cv2.resize(face_image_b, (target_width, target_height))
|
| 156 |
+
face_tracker[matched_id] = {"embedding": face_embedding, "number_matched": number_matched, "image": normalized_face, "score":score}
|
| 157 |
+
|
| 158 |
+
# If the face is not matched, assign a new ID
|
| 159 |
+
if matched_id is None:
|
| 160 |
+
face_id_counter += 1
|
| 161 |
+
matched_id = face_id_counter
|
| 162 |
+
|
| 163 |
+
# Update the face tracker with the new embedding and frame number
|
| 164 |
+
face_image_b = frame[yb:ye, xb:xe]
|
| 165 |
+
normalized_face = cv2.resize(face_image_b, (target_width, target_height))
|
| 166 |
+
face_tracker[matched_id] = {"embedding": face_embedding, "number_matched": 0, "image": normalized_face, "score":score}
|
| 167 |
+
|
| 168 |
+
# Draw a larger bounding box and face ID
|
| 169 |
+
cv2.rectangle(frame, (xb, yb), (xe, ye), (0, 255, 0), 2)
|
| 170 |
+
cv2.putText(frame, f"ID: {matched_id}", (xb, yb - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
|
| 171 |
+
|
| 172 |
+
# Display the resulting frame, for debug purpose
|
| 173 |
+
#yield frame # Yield the frame for streaming
|
| 174 |
+
#time.sleep(2) #simulate a delay
|
| 175 |
+
|
| 176 |
+
# Increment frame number
|
| 177 |
+
frame_number += 1
|
| 178 |
+
|
| 179 |
+
# finished reading video
|
| 180 |
+
if len(face_tracker) == 0:
|
| 181 |
+
return None
|
| 182 |
+
|
| 183 |
+
sorted_data = sorted(face_tracker, key=lambda x: face_tracker[x]['number_matched'], reverse =True)
|
| 184 |
+
|
| 185 |
+
# find top N faces in all detected faces
|
| 186 |
+
number_faces = len(face_tracker)
|
| 187 |
+
if number_faces >= 3:
|
| 188 |
+
center_top1 = [sorted_data[1], sorted_data[0], sorted_data[2]] # Top 1 will take center position
|
| 189 |
+
else:
|
| 190 |
+
center_top1 = sorted_data
|
| 191 |
+
|
| 192 |
+
images = []
|
| 193 |
+
contents = []
|
| 194 |
+
for face_id in center_top1:
|
| 195 |
+
#yield face_tracker[face_id]["image"] # Yield the frame for streaming
|
| 196 |
+
#time.sleep(2) #simulate a delay
|
| 197 |
+
face_image = face_tracker[face_id]["image"]
|
| 198 |
+
images.append(face_image)
|
| 199 |
+
|
| 200 |
+
# Release the video capture object
|
| 201 |
+
video_capture.release()
|
| 202 |
+
cv2.destroyAllWindows()
|
| 203 |
+
|
| 204 |
+
return images
|
| 205 |
+
|
| 206 |
+
# Advanced object Tracking with MediaPipe object detection
|
| 207 |
+
def object_detection_embed(video_path):
|
| 208 |
+
# Initialize MediaPipe Face Detection
|
| 209 |
+
BaseOptions = mp.tasks.BaseOptions
|
| 210 |
+
ObjectDetector = mp.tasks.vision.ObjectDetector
|
| 211 |
+
ObjectDetectorOptions = mp.tasks.vision.ObjectDetectorOptions
|
| 212 |
+
VisionRunningMode = mp.tasks.vision.RunningMode
|
| 213 |
+
|
| 214 |
+
options = ObjectDetectorOptions(
|
| 215 |
+
base_options=BaseOptions(model_asset_path='efficientdet_lite0.tflite'),
|
| 216 |
+
max_results=3,
|
| 217 |
+
score_threshold=0.5,
|
| 218 |
+
running_mode=VisionRunningMode.IMAGE,
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
mp_drawing = mp.solutions.drawing_utils
|
| 222 |
+
|
| 223 |
+
# Load a pre-trained face embedding model (OpenCV's FaceNet)
|
| 224 |
+
#embedder = cv2.dnn.readNetFromTorch("nn4.small2.v1.t7") # Download the model from OpenCV's GitHub, move out from this function
|
| 225 |
+
|
| 226 |
+
# Open the video file
|
| 227 |
+
video_capture = cv2.VideoCapture(video_path)
|
| 228 |
+
|
| 229 |
+
# Dictionary to store face embeddings and their corresponding IDs, number of matched, normalized images
|
| 230 |
+
object_tracker = {} # Format: {object_id: {"embedding": obj_embedding, "number_matched": number_matched, "image": normalized_obj, "score":score, "category": category}}
|
| 231 |
+
object_id_counter = 0
|
| 232 |
+
similarity_threshold = 0.5 # Threshold for considering two faces the same
|
| 233 |
+
frame_number = 0
|
| 234 |
+
|
| 235 |
+
# Define the target size for normalization, only fix height
|
| 236 |
+
#target_width = 100 # Desired width for all faces
|
| 237 |
+
target_height = 100 # Desired height for all faces
|
| 238 |
+
|
| 239 |
+
with ObjectDetector.create_from_options(options) as obj_detection:
|
| 240 |
+
while True:
|
| 241 |
+
# Grab a single frame of video
|
| 242 |
+
ret, frame = video_capture.read()
|
| 243 |
+
if not ret:
|
| 244 |
+
break
|
| 245 |
+
|
| 246 |
+
if frame_number % 30 == 0:
|
| 247 |
+
# Convert the frame to RGB for MediaPipe
|
| 248 |
+
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 249 |
+
|
| 250 |
+
# Load the image back into memory because Image object needs filepath input
|
| 251 |
+
frame_height, frame_width, _ = rgb_frame.shape
|
| 252 |
+
|
| 253 |
+
# Load the input image from a numpy array.
|
| 254 |
+
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb_frame)
|
| 255 |
+
|
| 256 |
+
# Process the frame with MediaPipe Face Detection
|
| 257 |
+
results = obj_detection.detect(mp_image)
|
| 258 |
+
|
| 259 |
+
if results.detections:
|
| 260 |
+
for detection in results.detections:
|
| 261 |
+
#print("line 297: detection:", detection)
|
| 262 |
+
'''
|
| 263 |
+
sample output:
|
| 264 |
+
Detection(bounding_box=BoundingBox(origin_x=84, origin_y=168, width=272, height=448),
|
| 265 |
+
categories=[Category(index=None, score=0.81640625, display_name=None, category_name='person')], keypoints=[])
|
| 266 |
+
'''
|
| 267 |
+
# Get the bounding box of the face, note x is in height ditection(h)
|
| 268 |
+
bboxC = detection.bounding_box
|
| 269 |
+
x = int(bboxC.origin_x)
|
| 270 |
+
y = int(bboxC.origin_y)
|
| 271 |
+
w = int(bboxC.width)
|
| 272 |
+
h = int(bboxC.height)
|
| 273 |
+
score = detection.categories[0].score
|
| 274 |
+
category = detection.categories[0].category_name
|
| 275 |
+
|
| 276 |
+
# Extract the face region
|
| 277 |
+
obj_image = frame[y:y+w, x:x+h]
|
| 278 |
+
if obj_image.size == 0:
|
| 279 |
+
continue # Skip empty face regions
|
| 280 |
+
|
| 281 |
+
#yield obj_image # Yield the frame for streaming
|
| 282 |
+
|
| 283 |
+
# Generate the face embedding
|
| 284 |
+
#face_embedding = get_face_embedding(face_image) #This model has better performance than mp embedder
|
| 285 |
+
obj_embedding = get_mp_embedding(obj_image)
|
| 286 |
+
|
| 287 |
+
# Check if this face matches any previously tracked face, and find face_id with maximum similarity
|
| 288 |
+
matched_id = None
|
| 289 |
+
max_similarity = 0
|
| 290 |
+
for obj_id, data in object_tracker.items():
|
| 291 |
+
'''
|
| 292 |
+
# Calculate the cosine similarity between embeddings. This model has better performance than mp embedder
|
| 293 |
+
similarity = np.dot(face_embedding, data["embedding"]) / (
|
| 294 |
+
np.linalg.norm(face_embedding) * np.linalg.norm(data["embedding"])
|
| 295 |
+
)
|
| 296 |
+
'''
|
| 297 |
+
# Compute cosine similarity. comment off because of worse performance
|
| 298 |
+
similarity = ImageEmbedder.cosine_similarity(
|
| 299 |
+
obj_embedding, data["embedding"])
|
| 300 |
+
|
| 301 |
+
if similarity > max_similarity:
|
| 302 |
+
max_similarity = similarity
|
| 303 |
+
max_obj_id = obj_id
|
| 304 |
+
|
| 305 |
+
# Define a larger bounding box for output faceface
|
| 306 |
+
xb = int(x * 0.8)
|
| 307 |
+
yb = int(y * 0.8)
|
| 308 |
+
xe = int(x * 1.2 + h)
|
| 309 |
+
ye = int(y * 1.2 + w)
|
| 310 |
+
|
| 311 |
+
scale = target_height / (x * 0.4 + w)
|
| 312 |
+
target_width = int((y * 0.4 + w) * scale)
|
| 313 |
+
|
| 314 |
+
if max_similarity > similarity_threshold:
|
| 315 |
+
matched_id = max_obj_id
|
| 316 |
+
number_matched = object_tracker[matched_id]["number_matched"] + 1
|
| 317 |
+
object_tracker[matched_id]["number_matched"] = number_matched
|
| 318 |
+
if score > object_tracker[matched_id]["score"]: #switch to higher score image
|
| 319 |
+
obj_image_b = frame[yb:ye, xb:xe]
|
| 320 |
+
normalized_obj = cv2.resize(obj_image_b, (target_width, target_height))
|
| 321 |
+
object_tracker[matched_id] = {"embedding": obj_embedding, "number_matched": number_matched, "image": normalized_obj, "score":score, "category":category}
|
| 322 |
+
|
| 323 |
+
# If the face is not matched, assign a new ID
|
| 324 |
+
if matched_id is None:
|
| 325 |
+
object_id_counter += 1
|
| 326 |
+
matched_id = object_id_counter
|
| 327 |
+
|
| 328 |
+
# Update the face tracker with the new embedding and frame number
|
| 329 |
+
obj_image_b = frame[yb:ye, xb:xe]
|
| 330 |
+
normalized_obj = cv2.resize(obj_image_b, (target_width, target_height))
|
| 331 |
+
object_tracker[matched_id] = {"embedding": obj_embedding, "number_matched": 0, "image": normalized_obj, "score":score, "category":category}
|
| 332 |
+
|
| 333 |
+
# Draw a larger bounding box and face ID
|
| 334 |
+
#cv2.rectangle(frame, (xb, yb), (xe, ye), (0, 255, 0), 2)
|
| 335 |
+
#cv2.putText(frame, f"ID: {matched_id}", (xb, yb - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
|
| 336 |
+
|
| 337 |
+
# Display the resulting frame, for debug purpose
|
| 338 |
+
#yield frame # Yield the frame for streaming
|
| 339 |
+
#time.sleep(2) #simulate a delay
|
| 340 |
+
|
| 341 |
+
# Increment frame number
|
| 342 |
+
frame_number += 1
|
| 343 |
+
|
| 344 |
+
# finished reading video
|
| 345 |
+
if len(object_tracker) == 0:
|
| 346 |
+
return None
|
| 347 |
+
|
| 348 |
+
sorted_data = sorted(object_tracker, key=lambda x: object_tracker[x]['number_matched'], reverse =True)
|
| 349 |
+
|
| 350 |
+
# find top N faces in all detected faces
|
| 351 |
+
number_objs = len(object_tracker)
|
| 352 |
+
if number_objs >= 3:
|
| 353 |
+
center_top1 = [sorted_data[1], sorted_data[0], sorted_data[2]] # Top 1 will take center position
|
| 354 |
+
else:
|
| 355 |
+
center_top1 = sorted_data
|
| 356 |
+
|
| 357 |
+
images = []
|
| 358 |
+
contents = []
|
| 359 |
+
#center_top1 = [sorted_data[1], sorted_data[0], sorted_data[2]] # Top 1 will take center position
|
| 360 |
+
for obj_id in center_top1:
|
| 361 |
+
#yield object_tracker[obj_id]["image"] # Yield the frame for streaming
|
| 362 |
+
#time.sleep(2) #simulate a delay
|
| 363 |
+
obj_image = object_tracker[obj_id]["image"]
|
| 364 |
+
images.append(obj_image)
|
| 365 |
+
|
| 366 |
+
# Release the video capture object
|
| 367 |
+
video_capture.release()
|
| 368 |
+
cv2.destroyAllWindows()
|
| 369 |
+
|
| 370 |
+
return images
|
| 371 |
+
|
| 372 |
+
#=========================================================================================================
|
| 373 |
+
# Summarize video using phi Agent
|
| 374 |
+
|
| 375 |
+
def summarize_video(video_path, user_prompt, out_lang = 'Original'):
|
| 376 |
+
# Upload and process the video
|
| 377 |
+
processed_video = upload_file(video_path)
|
| 378 |
+
|
| 379 |
+
# Extract video info to a dictionary
|
| 380 |
+
video_info = str(processed_video).split('File(')[1]
|
| 381 |
+
video_info = video_info.replace(")", "")
|
| 382 |
+
video_dic = eval(video_info)
|
| 383 |
+
print("display_name, sha256_hash:", video_dic['display_name'], video_dic['sha256_hash'])
|
| 384 |
+
|
| 385 |
+
while processed_video.state.name == "PROCESSING":
|
| 386 |
+
time.sleep(1)
|
| 387 |
+
processed_video = get_file(processed_video.name)
|
| 388 |
+
|
| 389 |
+
# detect language
|
| 390 |
+
lang_prompt = (f'''Give language name''')
|
| 391 |
+
lang_response = multimodal_Agent.run(lang_prompt, videos=[processed_video]).content
|
| 392 |
+
language = str(lang_response).split(' ')[-1]
|
| 393 |
+
print('Video language is:', language)
|
| 394 |
+
if out_lang == 'Original':
|
| 395 |
+
out_lang = language
|
| 396 |
+
|
| 397 |
+
# Analysis prompt
|
| 398 |
+
analysis_prompt = ( f'''
|
| 399 |
+
First analyze the video and then answer following questions using the video analysis, questions:
|
| 400 |
+
{user_prompt}
|
| 401 |
+
Provide a comprehensive response focusing on practical, actionable information with original questions.
|
| 402 |
+
Answer questions in {out_lang}. limit the total lines to 30 lines.'''
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
# AI agent processing
|
| 406 |
+
response = multimodal_Agent.run(analysis_prompt, videos=[processed_video])
|
| 407 |
+
|
| 408 |
+
markdown_text = response.content
|
| 409 |
+
|
| 410 |
+
return out_lang, str(markdown_text)
|
| 411 |
+
|
| 412 |
+
#=======================================================================================
|
| 413 |
+
|
| 414 |
+
# Initialize the agent
|
| 415 |
+
multimodal_Agent = initialize_agent()
|
app.py
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from typing import Optional, List
|
| 4 |
+
|
| 5 |
+
from pydantic import BaseModel, Field
|
| 6 |
+
|
| 7 |
+
from phi.agent import Agent
|
| 8 |
+
from phi.model.google import Gemini
|
| 9 |
+
from phi.workflow import Workflow, RunResponse, RunEvent
|
| 10 |
+
from phi.storage.workflow.sqlite import SqlWorkflowStorage
|
| 11 |
+
from phi.tools.duckduckgo import DuckDuckGo
|
| 12 |
+
from phi.utils.pprint import pprint_run_response
|
| 13 |
+
from phi.utils.log import logger
|
| 14 |
+
|
| 15 |
+
from agent_tools import (
|
| 16 |
+
object_detection_embed,
|
| 17 |
+
summarize_video,
|
| 18 |
+
)
|
| 19 |
+
from utils import (
|
| 20 |
+
create_poster,
|
| 21 |
+
download_youtube_video,
|
| 22 |
+
generate_tmp_filename,
|
| 23 |
+
pdf_to_jpg,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
import os
|
| 27 |
+
from PIL import Image
|
| 28 |
+
import numpy as np
|
| 29 |
+
|
| 30 |
+
# Output language
|
| 31 |
+
LANG_OPTIONS = [
|
| 32 |
+
"Original",
|
| 33 |
+
"Chinese",
|
| 34 |
+
"English",
|
| 35 |
+
]
|
| 36 |
+
|
| 37 |
+
#====================================================================================
|
| 38 |
+
class Video(BaseModel):
|
| 39 |
+
name: str = Field(..., description="File name of the video.")
|
| 40 |
+
url: str = Field(..., description="Link to the video.")
|
| 41 |
+
summary: Optional[str] = Field(..., description="Summary of the video.")
|
| 42 |
+
hash_value: Optional[str] = Field(..., description="sha256_hash value of the video.")
|
| 43 |
+
|
| 44 |
+
class VideoCache:
|
| 45 |
+
def __init__(self):
|
| 46 |
+
self.session_state = gr.State({
|
| 47 |
+
'metadata': {}, # For summaries
|
| 48 |
+
'frame_data': {} # For image arrays (serialized)
|
| 49 |
+
})
|
| 50 |
+
|
| 51 |
+
def add_to_cache(self, basename: str, out_lang: str, summary: str, frames: list[np.ndarray]):
|
| 52 |
+
"""Store both summary and frames properly"""
|
| 53 |
+
key = basename + '_' + out_lang
|
| 54 |
+
# Convert numpy arrays to bytes
|
| 55 |
+
serialized_frames = [self._array_to_bytes(arr) for arr in frames]
|
| 56 |
+
|
| 57 |
+
# Update cache
|
| 58 |
+
current_state = self.session_state.value
|
| 59 |
+
current_state['metadata'][key] = summary
|
| 60 |
+
current_state['frame_data'][key] = serialized_frames
|
| 61 |
+
self.session_state.value = current_state
|
| 62 |
+
|
| 63 |
+
def get_from_cache(self, basename: str, out_lang: str) -> tuple:
|
| 64 |
+
"""Retrieve both summary and frames"""
|
| 65 |
+
key = basename + '_' + out_lang
|
| 66 |
+
cache = self.session_state.value
|
| 67 |
+
summary = cache['metadata'].get(key)
|
| 68 |
+
frame_bytes = cache['frame_data'].get(key, [])
|
| 69 |
+
|
| 70 |
+
# Convert bytes back to arrays
|
| 71 |
+
frames = [self._bytes_to_array(*b) for b in frame_bytes]
|
| 72 |
+
return summary, frames
|
| 73 |
+
|
| 74 |
+
@staticmethod
|
| 75 |
+
def _array_to_bytes(arr: np.ndarray) -> tuple:
|
| 76 |
+
"""Convert array to (bytes, shape)"""
|
| 77 |
+
return arr.tobytes(), arr.shape
|
| 78 |
+
|
| 79 |
+
@staticmethod
|
| 80 |
+
def _bytes_to_array(b: bytes, shape: tuple) -> np.ndarray:
|
| 81 |
+
"""Reconstruct array from (bytes, shape)"""
|
| 82 |
+
return np.frombuffer(b, dtype=np.uint8).reshape(shape)
|
| 83 |
+
|
| 84 |
+
class VideoPosterGenerator(Workflow):
|
| 85 |
+
# Define an Agent that will load video clip
|
| 86 |
+
loader: Agent = Agent(
|
| 87 |
+
tools=[download_youtube_video],
|
| 88 |
+
show_tool_calls=True,
|
| 89 |
+
description="Given a url_link, load video to process.",
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# Define an Agent that will summarize video
|
| 93 |
+
summarizer: Agent = Agent(
|
| 94 |
+
tools=[summarize_video],
|
| 95 |
+
show_tool_calls=True,
|
| 96 |
+
markdown=True,
|
| 97 |
+
description="Given a video, answer the prompt questions.",
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
# Define an Agent that will extract top three object images
|
| 101 |
+
detector: Agent = Agent(
|
| 102 |
+
tools=[object_detection_embed],
|
| 103 |
+
show_tool_calls=True,
|
| 104 |
+
structured_outputs=True,
|
| 105 |
+
description="Given a video, extract top three object images.",
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
# Define an Agent that will generate a poster
|
| 109 |
+
poster: Agent = Agent(
|
| 110 |
+
tools=[create_poster],
|
| 111 |
+
show_tool_calls=True,
|
| 112 |
+
structured_outputs=True,
|
| 113 |
+
description="Given summary and images, generate one page postes.",
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
def run(self, url: str, user_prompt: str, out_lang: str, use_cache: bool = True) -> RunResponse:
|
| 117 |
+
"""This is where the main logic of the workflow is implemented."""
|
| 118 |
+
|
| 119 |
+
logger.info(f"Generating a poster for video: {url}")
|
| 120 |
+
basename = os.path.basename(url)
|
| 121 |
+
pdf_name = generate_tmp_filename(basename, ".pdf")
|
| 122 |
+
|
| 123 |
+
# Step 1: Use the cached video poster if use_cache is True
|
| 124 |
+
if use_cache:
|
| 125 |
+
summary, objects = video_cache.get_from_cache(basename, out_lang)
|
| 126 |
+
|
| 127 |
+
if summary is not None and objects is not None:
|
| 128 |
+
logger.info(f"found cached_video_content: {url}")
|
| 129 |
+
poster_response: Optional[poster] = create_poster(pdf_name, objects, out_lang, summary, url)
|
| 130 |
+
|
| 131 |
+
if poster_response is None:
|
| 132 |
+
return RunResponse(
|
| 133 |
+
event=RunEvent.workflow_completed,
|
| 134 |
+
content=f"Failed to generate video poster, please try again!",
|
| 135 |
+
)
|
| 136 |
+
else:
|
| 137 |
+
logger.info(f"Poster is generated sucessfully.")
|
| 138 |
+
|
| 139 |
+
return RunResponse(
|
| 140 |
+
event=RunEvent.workflow_completed,
|
| 141 |
+
content=[None, poster_response],
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
# Step 2: load video for the given url
|
| 145 |
+
video_response: Optional[loader] = download_youtube_video(url)
|
| 146 |
+
# If the video is not loaded sucessfully, end the workflow
|
| 147 |
+
if video_response is None:
|
| 148 |
+
return RunResponse(
|
| 149 |
+
event=RunEvent.workflow_completed,
|
| 150 |
+
content=f"Sorry, could not load the video: {url}",
|
| 151 |
+
)
|
| 152 |
+
else:
|
| 153 |
+
logger.info(f"Video {url} is loaded.")
|
| 154 |
+
video_path = video_response
|
| 155 |
+
|
| 156 |
+
# Step 3: summalize the video for the given questions
|
| 157 |
+
summary_response: Optional[summarizer] = summarize_video(video_path, user_prompt, out_lang)
|
| 158 |
+
# If the summary is not generated, end the workflow
|
| 159 |
+
if summary_response is None:
|
| 160 |
+
return RunResponse(
|
| 161 |
+
event=RunEvent.workflow_completed,
|
| 162 |
+
content=f"Failed to get summary, please try again!",
|
| 163 |
+
)
|
| 164 |
+
else:
|
| 165 |
+
logger.info(f"Video summary is generated.")
|
| 166 |
+
lang, summary = summary_response
|
| 167 |
+
|
| 168 |
+
# Step 4: extract top 3 object(person or other) images
|
| 169 |
+
images_response: Optional[detector] = object_detection_embed(video_path)
|
| 170 |
+
# If objects are not detected sucessfully, end the workflow
|
| 171 |
+
if images_response is None:
|
| 172 |
+
return RunResponse(
|
| 173 |
+
event=RunEvent.workflow_completed,
|
| 174 |
+
content=f"Failed to extract images, please try again!",
|
| 175 |
+
)
|
| 176 |
+
else:
|
| 177 |
+
logger.info(f"Objects are extracted sucessfully.")
|
| 178 |
+
objects = images_response
|
| 179 |
+
|
| 180 |
+
# Step 5: generate video poster
|
| 181 |
+
poster_response: Optional[poster] = create_poster(pdf_name, objects, lang, summary, url)
|
| 182 |
+
|
| 183 |
+
if poster_response is None:
|
| 184 |
+
return RunResponse(
|
| 185 |
+
event=RunEvent.workflow_completed,
|
| 186 |
+
content=f"Failed to generate video poster, please try again!",
|
| 187 |
+
)
|
| 188 |
+
else:
|
| 189 |
+
logger.info(f"Poster is generated sucessfully.")
|
| 190 |
+
|
| 191 |
+
# Store in cache
|
| 192 |
+
video_cache.add_to_cache(basename=basename, out_lang=out_lang, summary=summary, frames=objects)
|
| 193 |
+
|
| 194 |
+
return RunResponse(
|
| 195 |
+
event=RunEvent.workflow_completed,
|
| 196 |
+
content=[video_path, poster_response],
|
| 197 |
+
)
|
| 198 |
+
#=====================================================================================
|
| 199 |
+
# Combine outputs of face detection and video summary to generate a single page paster
|
| 200 |
+
def generate_poster_2(url, user_prompt, out_lang):
|
| 201 |
+
url_base_name = os.path.basename(url)
|
| 202 |
+
jpg_name = generate_tmp_filename(url_base_name, ".jpg")
|
| 203 |
+
|
| 204 |
+
# Initialize the poster generator workflow
|
| 205 |
+
# - Creates a unique session ID based on the video url
|
| 206 |
+
# - Sets up SQLite storage for caching results
|
| 207 |
+
poster = VideoPosterGenerator(
|
| 208 |
+
session_id=f"generate-poster-on-{url}",
|
| 209 |
+
storage=SqlWorkflowStorage(
|
| 210 |
+
table_name="generate_poster_workflows",
|
| 211 |
+
db_file="tmp/workflows.db",
|
| 212 |
+
),
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# Execute the workflow with caching enabled
|
| 216 |
+
# Returns an iterator of RunResponse objects containing the generated content
|
| 217 |
+
video_path, video_poster = poster.run(url=url, user_prompt=user_prompt, out_lang=out_lang, use_cache=True).content
|
| 218 |
+
logger.info(f"video_poster: {video_poster}")
|
| 219 |
+
|
| 220 |
+
poster_jpg = pdf_to_jpg(video_poster, jpg_name)
|
| 221 |
+
|
| 222 |
+
return video_path, video_poster, jpg_name
|
| 223 |
+
#==================================================================================
|
| 224 |
+
# Gradio interface
|
| 225 |
+
print("Setting up Gradio interface...")
|
| 226 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 227 |
+
video_cache = VideoCache()
|
| 228 |
+
|
| 229 |
+
gr.Markdown(
|
| 230 |
+
"""
|
| 231 |
+
# 🎥 Video Smart Summary - From Video to Poster with Multimodal Agent
|
| 232 |
+
|
| 233 |
+
Provide a YouTube or other video url to get an AI-generated summary poster.
|
| 234 |
+
"""
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
with gr.Row():
|
| 238 |
+
with gr.Column(scale = 5, variant = "compact"):
|
| 239 |
+
url_input = gr.Textbox(label="Paste YouTube URL here",
|
| 240 |
+
placeholder="https://www.youtube.com/shorts/AE5HZsZOlkY",
|
| 241 |
+
value="https://www.youtube.com/shorts/AE5HZsZOlkY")
|
| 242 |
+
video_input = gr.Video(label="Downloaded Video", height = 300, scale = 5)
|
| 243 |
+
|
| 244 |
+
with gr.Column(scale = 5, variant = "compact"):
|
| 245 |
+
lang_name = gr.Dropdown(
|
| 246 |
+
choices=LANG_OPTIONS,
|
| 247 |
+
value=LANG_OPTIONS[0],
|
| 248 |
+
label="Output Language",
|
| 249 |
+
interactive = True,
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
user_prompt = gr.Textbox(label="📊 User Prompt",
|
| 253 |
+
value=
|
| 254 |
+
f'''0. **Title**: Summarize this video in one sentence with no more than 8 words.
|
| 255 |
+
1. **Story:** How the set scene introduced and tone is set. What is happening in the scene? Describe key visuals and actions.
|
| 256 |
+
2. **Characters**: Identify top three character, noting their expressions, attire, actions, and interactions. Highlight emotional nuances and gestures.
|
| 257 |
+
3. **Narration or Voiceover**: Describe what types of narrations or voiceovers are used in the video.
|
| 258 |
+
4. **Mood and Tone**: Capture the overall mood and tone of each scene, mentioning any music or sound effects that enhance these elements.''',
|
| 259 |
+
placeholder="Ask anything about the video - AI Agent will analyze everything and search the web if needed",
|
| 260 |
+
info="You can ask questions about the video content",
|
| 261 |
+
max_lines=30,
|
| 262 |
+
interactive = True)
|
| 263 |
+
|
| 264 |
+
with gr.Row():
|
| 265 |
+
poster_button = gr.Button("🚀 Generate Poster", variant="primary")
|
| 266 |
+
|
| 267 |
+
with gr.Row():
|
| 268 |
+
with gr.Column(scale = 6, variant = "compact"):
|
| 269 |
+
jpg_file = gr.Image(label="Generated Poster Image", type = "filepath")
|
| 270 |
+
with gr.Column(scale = 4, variant = "compact"):
|
| 271 |
+
pdf_file = gr.File(label="Generated Poster PDF", file_types=[".pdf"])
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
gr.Markdown(
|
| 275 |
+
"""
|
| 276 |
+
### How to use:
|
| 277 |
+
1. Paste a YouTube link in the URL input textbox;
|
| 278 |
+
2. Select output language you want to use, currently only support original(default, no translation), English and Chinese;
|
| 279 |
+
3. Modify you prompt questions if you want (optional);
|
| 280 |
+
4. Click the primary task button "Generate Poster";
|
| 281 |
+
5. Downalod generated poster (JPG or PDF) file from ‘Generated Poster ...’ block.
|
| 282 |
+
|
| 283 |
+
*Note: Processing may take a few minutes depending on the video length.*
|
| 284 |
+
*If you get error for some reason, retry it before debug it!*
|
| 285 |
+
"""
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
# actions
|
| 289 |
+
poster_button.click(generate_poster_2, inputs=[url_input, user_prompt, lang_name], outputs=[video_input, pdf_file, jpg_file])
|
| 290 |
+
|
| 291 |
+
demo.launch(share=True)
|
cookies.txt
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Netscape HTTP Cookie File
|
| 2 |
+
# http://curl.haxx.se/rfc/cookie_spec.html
|
| 3 |
+
# This is a generated file! Do not edit.
|
| 4 |
+
|
| 5 |
+
.youtube.com TRUE / TRUE 1737011390 GPS 1
|
| 6 |
+
.youtube.com TRUE / TRUE 1771569697 PREF f4=4000000&tz=Asia.Shanghai
|
| 7 |
+
.youtube.com TRUE / TRUE 1737010197 CONSISTENCY AKreu9vCfSs6pP6NbKMCvU1eYGcbBAg1tgQNnfkLoXJbJ7kHkErV_NV5RDuUQpFQFj71zvR8mS3w3aDOEBB6nL_vghwpUrX4DImcN8EQ1sSj1yjmZIkCskc
|
| 8 |
+
.google.com TRUE / TRUE 1752561661 AEC AZ6Zc-UXh8GpKVCy-OKF2pR_aElqmDmK4dWp2g-a44axMfB7G6mOtN1bEw
|
| 9 |
+
www.google.com FALSE / FALSE 1737010262 DV I-ER0WmhH2AfMMNiN6YW-gY0cYDdRhk
|
| 10 |
+
accounts.google.com FALSE / TRUE 1739601671 OTZ 7911761_24_24__24_
|
| 11 |
+
.google.com TRUE / TRUE 1752820863 NID 520=cuBAAmJ9ow9FZASusJgrrrOm9K93cjVcXVkAf_C2TaX2nO_JMArVty1oMP3yClxf90gTsOOtzG9iXkmQMPJ5iYPABT9AmV7Q1rQjCwC8RmWpe9jhKFTyxrFKusdtllARWnbWkt_BeivTmEPuE7-3nJgLKBKmYfLtYqsmFtpc9J6455fi10_as96jJvGQmTem4QF-W8oJoQaaDyoZ0K6nr-jGWJlhnPHAwgpIxWMGDPteJO0IBWkpyfvGDWf2_nqlf--2w2A61HQRHnbDkhM74a0RGSNuCUZQdjI_eSHYRKM5YtQixy2FQiOLmy7vwu8CB03RRw0poqgXNPaIExvyvhZ84dMYv1OysiraLfFnSDd2DG6F4TxjhUzeltoDuT45Gm4vq4xG8ohtIbOAw07XNJYYRYEFp82uC8btol5AL6PSBRBq76MSLzZmjkaLkyEeSUJETPROctxfk8jt07KZSZ7VPyW5yAlUAxrlkEs5fWKwievSkOa0ScSQ8MWNUnLKbMjESE1a_osLLiGBZ4t_aUMl79jotuHHb7ghhxfmJ97lHnMSqZgYfWjm18n-hxRmLYVtG1MQVgvjqdWLjoZik0h7JD_X6Fcq6fx6iIEFvaFsorECqBo2WSMM0vwkC01jWZGGL5popWOtUZrEgS3_EU09yCzkmAzxsmPe0q3WFa24bQYedaqAkFGebDyii_ETHrMtNIzbgu8C-qrE6d18lWsGry0dW893qGGfzOPJNajeY1fwA7187F7ZiRuCQZCSfZhjKQo
|
| 12 |
+
.google.com TRUE / FALSE 1771569694 SID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8vhKnjLqFPUE_JJBtTK6wRwACgYKASgSARESFQHGX2MieMk4vxy7FKpitLRDwDB9VxoVAUF8yKqbjHQ4UlA5HQJCEI3L8oNn0076
|
| 13 |
+
.google.com TRUE / TRUE 1771569694 __Secure-1PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8IZdcQGawqkTsrPnT4rKe6AACgYKAbcSARESFQHGX2MiV_toN7C6I0hFB1-I0u0fgBoVAUF8yKoEUGy6VBRVRzxRZSNZN4On0076
|
| 14 |
+
.google.com TRUE / TRUE 1771569694 __Secure-3PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8I1OfYMDUSZr0bBTgzee0nQACgYKAR0SARESFQHGX2Mi1uHS1DY--s5-A-rae7-XWxoVAUF8yKo6FbT_APdR1RYPFpEOV6BN0076
|
| 15 |
+
.google.com TRUE / FALSE 1771569694 HSID AXCj2ZxFZYUeEmGKG
|
| 16 |
+
.google.com TRUE / TRUE 1771569694 SSID ARPosjI5U2j0EUZjn
|
| 17 |
+
.google.com TRUE / FALSE 1771569694 APISID NQo2BYHF9ulg8byP/AYDJ1Kma5Z56PpGgm
|
| 18 |
+
.google.com TRUE / TRUE 1771569694 SAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 19 |
+
.google.com TRUE / TRUE 1771569694 __Secure-1PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 20 |
+
.google.com TRUE / TRUE 1771569694 __Secure-3PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 21 |
+
accounts.google.com FALSE / TRUE 1771569694 __Host-GAPS 1:RQBy5k5B_C5jAqAL-t7uioJGgXhjv7tr1erNCC_HSqO0mtLH04k0Jd6rXSX1g4NGa3_NUWC0JdI0BKf5EJ9_PapUHrXuoA:iejfuy8Yi2CXMrR5
|
| 22 |
+
accounts.google.com FALSE / TRUE 1771569694 LSID s.HK|s.youtube:g.a000sghTt4qHecL8VzIlofS3hyqQ9G36KeWl6kbej3QDkAeo-jUtkV72v3IOfySf7VQAoQgqZQACgYKAZUSARESFQHGX2Mi9XEPjf8DPezaAwhlHT7BOxoVAUF8yKq-bMPqx1SIgKrOeByS95TU0076
|
| 23 |
+
accounts.google.com FALSE / TRUE 1771569694 __Host-1PLSID s.HK|s.youtube:g.a000sghTt4qHecL8VzIlofS3hyqQ9G36KeWl6kbej3QDkAeo-jUtJwSmGJavnt2MVNOj2T4R9gACgYKATgSARESFQHGX2Mi0h-fm59mmVUt9CHO60aFnxoVAUF8yKpr4pWJllra6NcEAhozO72L0076
|
| 24 |
+
accounts.google.com FALSE / TRUE 1771569694 __Host-3PLSID s.HK|s.youtube:g.a000sghTt4qHecL8VzIlofS3hyqQ9G36KeWl6kbej3QDkAeo-jUtmS6j1SYGHxzLdhLpMl-OYQACgYKAVcSARESFQHGX2Mi2y89TCXJ6pkWk4ymz526SBoVAUF8yKrrGH0NhgZzD9fwr_Gilgrc0076
|
| 25 |
+
accounts.google.com FALSE / TRUE 1771569694 ACCOUNT_CHOOSER AFx_qI69XMKHVPBGm02RHkj0v9Nbnj4daVNtFekXGZUC9oUgqVEJzQ2H_1zKHWVTyb12fqmDuYTZv8In5c0ioe6HvNeuM6POTK9UImhuaYVLP_LZrcb02Bzd35wZZZ1qsno0uPCd7Z-s
|
| 26 |
+
.google.com TRUE / FALSE 1768545694 SIDCC AKEyXzVCtpM15wm98Z86_gKZMwkZ6i-tkR3Bknxmnzn-aOEMMJ1ugtwaIaLNBZnWulfiSUpIag
|
| 27 |
+
.google.com TRUE / TRUE 1768545694 __Secure-1PSIDCC AKEyXzUFkd_8y4XIRSAmLUOf3KHhaebAzXJl4rR9nQfgS18D681OWOviXnjUOHVw3x236NsSnw
|
| 28 |
+
.google.com TRUE / TRUE 1768545694 __Secure-3PSIDCC AKEyXzV8z6bF8CniAzfyuY_jjLJh8LSpaoSxf3Mbkcndp7Pxc-K-6bj-ardKxx0_IFo1tPE8Ig
|
| 29 |
+
.youtube.com TRUE / FALSE 1771569694 SID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8vhKnjLqFPUE_JJBtTK6wRwACgYKASgSARESFQHGX2MieMk4vxy7FKpitLRDwDB9VxoVAUF8yKqbjHQ4UlA5HQJCEI3L8oNn0076
|
| 30 |
+
.youtube.com TRUE / TRUE 1768545694 __Secure-1PSIDTS sidts-CjEBmiPuTRvrWXjcJY8WRvjcW-qZV1AMGYiTLMpAW9P9x_hrbHS1YQV3xveoWURPE0d1EAA
|
| 31 |
+
.youtube.com TRUE / TRUE 1768545694 __Secure-3PSIDTS sidts-CjEBmiPuTRvrWXjcJY8WRvjcW-qZV1AMGYiTLMpAW9P9x_hrbHS1YQV3xveoWURPE0d1EAA
|
| 32 |
+
.youtube.com TRUE / TRUE 1771569694 __Secure-1PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8IZdcQGawqkTsrPnT4rKe6AACgYKAbcSARESFQHGX2MiV_toN7C6I0hFB1-I0u0fgBoVAUF8yKoEUGy6VBRVRzxRZSNZN4On0076
|
| 33 |
+
.youtube.com TRUE / TRUE 1771569694 __Secure-3PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8I1OfYMDUSZr0bBTgzee0nQACgYKAR0SARESFQHGX2Mi1uHS1DY--s5-A-rae7-XWxoVAUF8yKo6FbT_APdR1RYPFpEOV6BN0076
|
| 34 |
+
.youtube.com TRUE / FALSE 1771569694 HSID AUX6oStcLvGkMrlC_
|
| 35 |
+
.youtube.com TRUE / TRUE 1771569694 SSID AoEYReBFJeow6fu4S
|
| 36 |
+
.youtube.com TRUE / FALSE 1771569694 APISID NQo2BYHF9ulg8byP/AYDJ1Kma5Z56PpGgm
|
| 37 |
+
.youtube.com TRUE / TRUE 1771569694 SAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 38 |
+
.youtube.com TRUE / TRUE 1771569694 __Secure-1PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 39 |
+
.youtube.com TRUE / TRUE 1771569694 __Secure-3PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 40 |
+
.google.com.hk TRUE / FALSE 1771569694 SID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8vhKnjLqFPUE_JJBtTK6wRwACgYKASgSARESFQHGX2MieMk4vxy7FKpitLRDwDB9VxoVAUF8yKqbjHQ4UlA5HQJCEI3L8oNn0076
|
| 41 |
+
.google.com.hk TRUE / TRUE 1771569694 __Secure-1PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8IZdcQGawqkTsrPnT4rKe6AACgYKAbcSARESFQHGX2MiV_toN7C6I0hFB1-I0u0fgBoVAUF8yKoEUGy6VBRVRzxRZSNZN4On0076
|
| 42 |
+
.google.com.hk TRUE / TRUE 1771569694 __Secure-3PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8I1OfYMDUSZr0bBTgzee0nQACgYKAR0SARESFQHGX2Mi1uHS1DY--s5-A-rae7-XWxoVAUF8yKo6FbT_APdR1RYPFpEOV6BN0076
|
| 43 |
+
.google.com.hk TRUE / FALSE 1771569694 HSID AUX6oStcLvGkMrlC_
|
| 44 |
+
.google.com.hk TRUE / TRUE 1771569694 SSID AoEYReBFJeow6fu4S
|
| 45 |
+
.google.com.hk TRUE / FALSE 1771569694 APISID NQo2BYHF9ulg8byP/AYDJ1Kma5Z56PpGgm
|
| 46 |
+
.google.com.hk TRUE / TRUE 1771569694 SAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 47 |
+
.google.com.hk TRUE / TRUE 1771569694 __Secure-1PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 48 |
+
.google.com.hk TRUE / TRUE 1771569694 __Secure-3PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 49 |
+
.google.com.hk TRUE / TRUE 1752820894 NID 520=V3_ibbSwhB5XjPIb_r-giJ1TsxRxIrJap8-ckGYzb3d7erWYQFXG-_kcB45hYfCAjvUICu9AJ3-46nlvcydookbhYOy5F9zYuiye8VLei0YoTXtIl_TcGyGJwaiOPBQIfp_JW1wMes5BpFJpDKmW3MbzLsIbBmmTnAVrQNvz8hpqzrU-I13ZjBz62E_YIeNdfbu0Hw
|
| 50 |
+
.youtube.com TRUE / TRUE 1771569694 LOGIN_INFO AFmmF2swRQIgVQxiog3h49cohLMh0fXuYNqw1eaBJQqGtnLjweOg3CgCIQCKS2rtwXWygfI5KuetNhlZZwJ66H8jCqIs-DQEM_Z_ng:QUQ3MjNmdzVoa0l6aGFhY2hpQXd3SndQTkxDbDllLTJKSzJ1UHpnZEg1WWlFYVZ3LU5lcTk1X2VraHVFeGdwaTN3V0l1dlhidVczbEpfMWh3SG9tMGdmbVRQaERXTVI0UDZGZWpuTFdmVnh4TGhzLVhaQml3eG5iZVdBV0tLWVc3RXRxWFROSkhRM0h0akh5S2tmOTVYS1dZLVQ4TnNzVm5n
|
| 51 |
+
.youtube.com TRUE / FALSE 1768545726 SIDCC AKEyXzXdo-vHKWTsyNosCG9hsnTUxON2C8bnZIeebixNhNoAsyg_pllH7fnuFBKhuTIHebrgiQ
|
| 52 |
+
.youtube.com TRUE / TRUE 1768545726 __Secure-1PSIDCC AKEyXzWr6tiO2G7V4n14bVr1ldXDeqSKJxuTPjEiReciP8fycHZeg9JCWtD7pGl2Zssmq2bRgw
|
| 53 |
+
.youtube.com TRUE / TRUE 1768545726 __Secure-3PSIDCC AKEyXzXWqjqwFaWOuiR0zCNx6KMQWjlWM3NRmPzk9_2fUHTji1gsf2IBOQFdZ4rScooVBBwX
|
| 54 |
+
.youtube.com TRUE / TRUE 1737011390 GPS 1
|
| 55 |
+
.youtube.com TRUE / TRUE 0 YSC NKrtaeYEjFI
|
| 56 |
+
.youtube.com TRUE / TRUE 1752561590 __Secure-ROLLOUT_TOKEN CJO2r_ft2JTQnAEQhLzR_ND5igMYhLzR_ND5igM%3D
|
| 57 |
+
.youtube.com TRUE / TRUE 1752561700 VISITOR_INFO1_LIVE nKhwD8k_TP8
|
| 58 |
+
.youtube.com TRUE / TRUE 1752561700 VISITOR_PRIVACY_METADATA CgJISxIEGgAgaA%3D%3D
|
| 59 |
+
.youtube.com TRUE / TRUE 1771569697 PREF f4=4000000&tz=Asia.Shanghai
|
| 60 |
+
.youtube.com TRUE / TRUE 1737010197 CONSISTENCY AKreu9vCfSs6pP6NbKMCvU1eYGcbBAg1tgQNnfkLoXJbJ7kHkErV_NV5RDuUQpFQFj71zvR8mS3w3aDOEBB6nL_vghwpUrX4DImcN8EQ1sSj1yjmZIkCskc
|
| 61 |
+
.google.com TRUE / TRUE 1752561661 AEC AZ6Zc-UXh8GpKVCy-OKF2pR_aElqmDmK4dWp2g-a44axMfB7G6mOtN1bEw
|
| 62 |
+
www.google.com FALSE / FALSE 1737010262 DV I-ER0WmhH2AfMMNiN6YW-gY0cYDdRhk
|
| 63 |
+
accounts.google.com FALSE / TRUE 1739601671 OTZ 7911761_24_24__24_
|
| 64 |
+
.google.com TRUE / TRUE 1752820863 NID 520=cuBAAmJ9ow9FZASusJgrrrOm9K93cjVcXVkAf_C2TaX2nO_JMArVty1oMP3yClxf90gTsOOtzG9iXkmQMPJ5iYPABT9AmV7Q1rQjCwC8RmWpe9jhKFTyxrFKusdtllARWnbWkt_BeivTmEPuE7-3nJgLKBKmYfLtYqsmFtpc9J6455fi10_as96jJvGQmTem4QF-W8oJoQaaDyoZ0K6nr-jGWJlhnPHAwgpIxWMGDPteJO0IBWkpyfvGDWf2_nqlf--2w2A61HQRHnbDkhM74a0RGSNuCUZQdjI_eSHYRKM5YtQixy2FQiOLmy7vwu8CB03RRw0poqgXNPaIExvyvhZ84dMYv1OysiraLfFnSDd2DG6F4TxjhUzeltoDuT45Gm4vq4xG8ohtIbOAw07XNJYYRYEFp82uC8btol5AL6PSBRBq76MSLzZmjkaLkyEeSUJETPROctxfk8jt07KZSZ7VPyW5yAlUAxrlkEs5fWKwievSkOa0ScSQ8MWNUnLKbMjESE1a_osLLiGBZ4t_aUMl79jotuHHb7ghhxfmJ97lHnMSqZgYfWjm18n-hxRmLYVtG1MQVgvjqdWLjoZik0h7JD_X6Fcq6fx6iIEFvaFsorECqBo2WSMM0vwkC01jWZGGL5popWOtUZrEgS3_EU09yCzkmAzxsmPe0q3WFa24bQYedaqAkFGebDyii_ETHrMtNIzbgu8C-qrE6d18lWsGry0dW893qGGfzOPJNajeY1fwA7187F7ZiRuCQZCSfZhjKQo
|
| 65 |
+
.google.com TRUE / FALSE 1771569694 SID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8vhKnjLqFPUE_JJBtTK6wRwACgYKASgSARESFQHGX2MieMk4vxy7FKpitLRDwDB9VxoVAUF8yKqbjHQ4UlA5HQJCEI3L8oNn0076
|
| 66 |
+
.google.com TRUE / TRUE 1771569694 __Secure-1PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8IZdcQGawqkTsrPnT4rKe6AACgYKAbcSARESFQHGX2MiV_toN7C6I0hFB1-I0u0fgBoVAUF8yKoEUGy6VBRVRzxRZSNZN4On0076
|
| 67 |
+
.google.com TRUE / TRUE 1771569694 __Secure-3PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8I1OfYMDUSZr0bBTgzee0nQACgYKAR0SARESFQHGX2Mi1uHS1DY--s5-A-rae7-XWxoVAUF8yKo6FbT_APdR1RYPFpEOV6BN0076
|
| 68 |
+
.google.com TRUE / FALSE 1771569694 HSID AXCj2ZxFZYUeEmGKG
|
| 69 |
+
.google.com TRUE / TRUE 1771569694 SSID ARPosjI5U2j0EUZjn
|
| 70 |
+
.google.com TRUE / FALSE 1771569694 APISID NQo2BYHF9ulg8byP/AYDJ1Kma5Z56PpGgm
|
| 71 |
+
.google.com TRUE / TRUE 1771569694 SAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 72 |
+
.google.com TRUE / TRUE 1771569694 __Secure-1PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 73 |
+
.google.com TRUE / TRUE 1771569694 __Secure-3PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 74 |
+
accounts.google.com FALSE / TRUE 1771569694 __Host-GAPS 1:RQBy5k5B_C5jAqAL-t7uioJGgXhjv7tr1erNCC_HSqO0mtLH04k0Jd6rXSX1g4NGa3_NUWC0JdI0BKf5EJ9_PapUHrXuoA:iejfuy8Yi2CXMrR5
|
| 75 |
+
accounts.google.com FALSE / TRUE 1771569694 LSID s.HK|s.youtube:g.a000sghTt4qHecL8VzIlofS3hyqQ9G36KeWl6kbej3QDkAeo-jUtkV72v3IOfySf7VQAoQgqZQACgYKAZUSARESFQHGX2Mi9XEPjf8DPezaAwhlHT7BOxoVAUF8yKq-bMPqx1SIgKrOeByS95TU0076
|
| 76 |
+
accounts.google.com FALSE / TRUE 1771569694 __Host-1PLSID s.HK|s.youtube:g.a000sghTt4qHecL8VzIlofS3hyqQ9G36KeWl6kbej3QDkAeo-jUtJwSmGJavnt2MVNOj2T4R9gACgYKATgSARESFQHGX2Mi0h-fm59mmVUt9CHO60aFnxoVAUF8yKpr4pWJllra6NcEAhozO72L0076
|
| 77 |
+
accounts.google.com FALSE / TRUE 1771569694 __Host-3PLSID s.HK|s.youtube:g.a000sghTt4qHecL8VzIlofS3hyqQ9G36KeWl6kbej3QDkAeo-jUtmS6j1SYGHxzLdhLpMl-OYQACgYKAVcSARESFQHGX2Mi2y89TCXJ6pkWk4ymz526SBoVAUF8yKrrGH0NhgZzD9fwr_Gilgrc0076
|
| 78 |
+
accounts.google.com FALSE / TRUE 1771569694 ACCOUNT_CHOOSER AFx_qI69XMKHVPBGm02RHkj0v9Nbnj4daVNtFekXGZUC9oUgqVEJzQ2H_1zKHWVTyb12fqmDuYTZv8In5c0ioe6HvNeuM6POTK9UImhuaYVLP_LZrcb02Bzd35wZZZ1qsno0uPCd7Z-s
|
| 79 |
+
.google.com TRUE / FALSE 1768545694 SIDCC AKEyXzVCtpM15wm98Z86_gKZMwkZ6i-tkR3Bknxmnzn-aOEMMJ1ugtwaIaLNBZnWulfiSUpIag
|
| 80 |
+
.google.com TRUE / TRUE 1768545694 __Secure-1PSIDCC AKEyXzUFkd_8y4XIRSAmLUOf3KHhaebAzXJl4rR9nQfgS18D681OWOviXnjUOHVw3x236NsSnw
|
| 81 |
+
.google.com TRUE / TRUE 1768545694 __Secure-3PSIDCC AKEyXzV8z6bF8CniAzfyuY_jjLJh8LSpaoSxf3Mbkcndp7Pxc-K-6bj-ardKxx0_IFo1tPE8Ig
|
| 82 |
+
.youtube.com TRUE / FALSE 1771569694 SID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8vhKnjLqFPUE_JJBtTK6wRwACgYKASgSARESFQHGX2MieMk4vxy7FKpitLRDwDB9VxoVAUF8yKqbjHQ4UlA5HQJCEI3L8oNn0076
|
| 83 |
+
.youtube.com TRUE / TRUE 1768545694 __Secure-1PSIDTS sidts-CjEBmiPuTRvrWXjcJY8WRvjcW-qZV1AMGYiTLMpAW9P9x_hrbHS1YQV3xveoWURPE0d1EAA
|
| 84 |
+
.youtube.com TRUE / TRUE 1768545694 __Secure-3PSIDTS sidts-CjEBmiPuTRvrWXjcJY8WRvjcW-qZV1AMGYiTLMpAW9P9x_hrbHS1YQV3xveoWURPE0d1EAA
|
| 85 |
+
.youtube.com TRUE / TRUE 1771569694 __Secure-1PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8IZdcQGawqkTsrPnT4rKe6AACgYKAbcSARESFQHGX2MiV_toN7C6I0hFB1-I0u0fgBoVAUF8yKoEUGy6VBRVRzxRZSNZN4On0076
|
| 86 |
+
.youtube.com TRUE / TRUE 1771569694 __Secure-3PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8I1OfYMDUSZr0bBTgzee0nQACgYKAR0SARESFQHGX2Mi1uHS1DY--s5-A-rae7-XWxoVAUF8yKo6FbT_APdR1RYPFpEOV6BN0076
|
| 87 |
+
.youtube.com TRUE / FALSE 1771569694 HSID AUX6oStcLvGkMrlC_
|
| 88 |
+
.youtube.com TRUE / TRUE 1771569694 SSID AoEYReBFJeow6fu4S
|
| 89 |
+
.youtube.com TRUE / FALSE 1771569694 APISID NQo2BYHF9ulg8byP/AYDJ1Kma5Z56PpGgm
|
| 90 |
+
.youtube.com TRUE / TRUE 1771569694 SAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 91 |
+
.youtube.com TRUE / TRUE 1771569694 __Secure-1PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 92 |
+
.youtube.com TRUE / TRUE 1771569694 __Secure-3PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 93 |
+
.google.com.hk TRUE / FALSE 1771569694 SID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8vhKnjLqFPUE_JJBtTK6wRwACgYKASgSARESFQHGX2MieMk4vxy7FKpitLRDwDB9VxoVAUF8yKqbjHQ4UlA5HQJCEI3L8oNn0076
|
| 94 |
+
.google.com.hk TRUE / TRUE 1771569694 __Secure-1PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8IZdcQGawqkTsrPnT4rKe6AACgYKAbcSARESFQHGX2MiV_toN7C6I0hFB1-I0u0fgBoVAUF8yKoEUGy6VBRVRzxRZSNZN4On0076
|
| 95 |
+
.google.com.hk TRUE / TRUE 1771569694 __Secure-3PSID g.a000sghTt3-obfaPZOkmJ-CDJ1U6Ei1Uq-8W0L1LCRwKhKsL2XK8I1OfYMDUSZr0bBTgzee0nQACgYKAR0SARESFQHGX2Mi1uHS1DY--s5-A-rae7-XWxoVAUF8yKo6FbT_APdR1RYPFpEOV6BN0076
|
| 96 |
+
.google.com.hk TRUE / FALSE 1771569694 HSID AUX6oStcLvGkMrlC_
|
| 97 |
+
.google.com.hk TRUE / TRUE 1771569694 SSID AoEYReBFJeow6fu4S
|
| 98 |
+
.google.com.hk TRUE / FALSE 1771569694 APISID NQo2BYHF9ulg8byP/AYDJ1Kma5Z56PpGgm
|
| 99 |
+
.google.com.hk TRUE / TRUE 1771569694 SAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 100 |
+
.google.com.hk TRUE / TRUE 1771569694 __Secure-1PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 101 |
+
.google.com.hk TRUE / TRUE 1771569694 __Secure-3PAPISID CUOKDfcJr5fsAuGB/A_rgQNjYhdnyijGwj
|
| 102 |
+
.google.com.hk TRUE / TRUE 1752820894 NID 520=V3_ibbSwhB5XjPIb_r-giJ1TsxRxIrJap8-ckGYzb3d7erWYQFXG-_kcB45hYfCAjvUICu9AJ3-46nlvcydookbhYOy5F9zYuiye8VLei0YoTXtIl_TcGyGJwaiOPBQIfp_JW1wMes5BpFJpDKmW3MbzLsIbBmmTnAVrQNvz8hpqzrU-I13ZjBz62E_YIeNdfbu0Hw
|
| 103 |
+
.youtube.com TRUE / TRUE 1771569694 LOGIN_INFO AFmmF2swRQIgVQxiog3h49cohLMh0fXuYNqw1eaBJQqGtnLjweOg3CgCIQCKS2rtwXWygfI5KuetNhlZZwJ66H8jCqIs-DQEM_Z_ng:QUQ3MjNmdzVoa0l6aGFhY2hpQXd3SndQTkxDbDllLTJKSzJ1UHpnZEg1WWlFYVZ3LU5lcTk1X2VraHVFeGdwaTN3V0l1dlhidVczbEpfMWh3SG9tMGdmbVRQaERXTVI0UDZGZWpuTFdmVnh4TGhzLVhaQml3eG5iZVdBV0tLWVc3RXRxWFROSkhRM0h0akh5S2tmOTVYS1dZLVQ4TnNzVm5n
|
| 104 |
+
.youtube.com TRUE / FALSE 1768545726 SIDCC AKEyXzXdo-vHKWTsyNosCG9hsnTUxON2C8bnZIeebixNhNoAsyg_pllH7fnuFBKhuTIHebrgiQ
|
| 105 |
+
.youtube.com TRUE / TRUE 1768545726 __Secure-1PSIDCC AKEyXzWr6tiO2G7V4n14bVr1ldXDeqSKJxuTPjEiReciP8fycHZeg9JCWtD7pGl2Zssmq2bRgw
|
| 106 |
+
.youtube.com TRUE / TRUE 1768545726 __Secure-3PSIDCC AKEyXzXWqjqwFaWOuiR0zCNx6KMQWjlWM3NRmPzk9_2fUHTji1gsf2IBOQFdZ4rScooVBBwX
|
efficientdet_lite0.tflite
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0720bf247bd76e6594ea28fa9c6f7c5242be774818997dbbeffc4da460c723bb
|
| 3 |
+
size 4602795
|
mobilenet_v3_small_075_224_embedder.tflite
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f786f8eb8bcecd1f9c5bc50f00a5e6b9f62600edb503ad3bfff5f51035b0ea5
|
| 3 |
+
size 4117698
|
nn4.small2.v1.t7
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9b72d54aeb24a64a8135dca8e792f7cc675c99a884a6940350a6cedcf7b7ba08
|
| 3 |
+
size 31510785
|