Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,531 +1,62 @@
|
|
| 1 |
import os
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
import io
|
| 7 |
-
import requests
|
| 8 |
-
import uuid
|
| 9 |
-
import numpy as np
|
| 10 |
-
from typing import List, Dict, Any, Optional, Union
|
| 11 |
-
from fastapi import FastAPI, HTTPException, Depends, Request
|
| 12 |
-
from fastapi.responses import HTMLResponse, JSONResponse
|
| 13 |
-
from fastapi.staticfiles import StaticFiles
|
| 14 |
-
from fastapi.templating import Jinja2Templates
|
| 15 |
-
from pydantic import BaseModel, Field, field_validator
|
| 16 |
-
from dotenv import load_dotenv
|
| 17 |
-
from huggingface_hub import snapshot_download
|
| 18 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 19 |
-
from collections import deque
|
| 20 |
-
from PIL import Image
|
| 21 |
-
from tensorflow.keras.models import load_model
|
| 22 |
-
from urllib.request import urlretrieve
|
| 23 |
-
import uvicorn
|
| 24 |
-
import logging
|
| 25 |
-
|
| 26 |
-
logging.basicConfig(level=logging.INFO)
|
| 27 |
-
logger = logging.getLogger(__name__)
|
| 28 |
-
|
| 29 |
-
load_dotenv()
|
| 30 |
-
|
| 31 |
-
os.makedirs("templates", exist_ok=True)
|
| 32 |
-
os.makedirs("static", exist_ok=True)
|
| 33 |
-
os.makedirs("image_model", exist_ok=True)
|
| 34 |
-
|
| 35 |
-
app = FastAPI(
|
| 36 |
-
title="Multimodal AI Content Moderation API",
|
| 37 |
-
description="An advanced, multilingual, and multimodal content moderation API.",
|
| 38 |
-
version="1.0.0"
|
| 39 |
-
)
|
| 40 |
-
|
| 41 |
-
request_times = deque(maxlen=100)
|
| 42 |
-
concurrent_requests = 0
|
| 43 |
-
request_lock = threading.Lock()
|
| 44 |
-
|
| 45 |
-
@app.middleware("http")
|
| 46 |
-
async def track_metrics(request: Request, call_next):
|
| 47 |
-
global concurrent_requests
|
| 48 |
-
with request_lock:
|
| 49 |
-
concurrent_requests += 1
|
| 50 |
-
|
| 51 |
-
start_time = time.time()
|
| 52 |
-
response = await call_next(request)
|
| 53 |
-
process_time = time.time() - start_time
|
| 54 |
-
request_times.append(process_time)
|
| 55 |
-
|
| 56 |
-
with request_lock:
|
| 57 |
-
concurrent_requests -= 1
|
| 58 |
-
|
| 59 |
-
return response
|
| 60 |
-
|
| 61 |
-
app.mount("/static", StaticFiles(directory="static"), name="static")
|
| 62 |
-
templates = Jinja2Templates(directory="templates")
|
| 63 |
-
|
| 64 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 65 |
-
logger.info(f"Using device: {device}")
|
| 66 |
-
|
| 67 |
-
def download_file(url, path):
|
| 68 |
-
if not os.path.exists(path):
|
| 69 |
-
logger.info(f"Downloading {os.path.basename(path)}...")
|
| 70 |
-
urlretrieve(url, path)
|
| 71 |
-
|
| 72 |
-
logger.info("Downloading and loading models...")
|
| 73 |
-
|
| 74 |
-
MODELS = {}
|
| 75 |
-
|
| 76 |
-
logger.info("Loading text moderation model: detoxify-multilingual")
|
| 77 |
-
from detoxify import Detoxify
|
| 78 |
-
MODELS['detoxify-multilingual'] = Detoxify('multilingual', device=device)
|
| 79 |
-
logger.info("Detoxify model loaded.")
|
| 80 |
-
|
| 81 |
-
GEMMA_REPO = "daniel-dona/gemma-3-270m-it"
|
| 82 |
-
LOCAL_GEMMA_DIR = os.path.join(os.getcwd(), "gemma_model")
|
| 83 |
-
os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1")
|
| 84 |
-
|
| 85 |
-
def ensure_local_model(repo_id: str, local_dir: str) -> str:
|
| 86 |
-
os.makedirs(local_dir, exist_ok=True)
|
| 87 |
-
snapshot_download(
|
| 88 |
-
repo_id=repo_id,
|
| 89 |
-
local_dir=local_dir,
|
| 90 |
-
local_dir_use_symlinks=False,
|
| 91 |
-
resume_download=True,
|
| 92 |
-
)
|
| 93 |
-
return local_dir
|
| 94 |
-
|
| 95 |
-
logger.info("Loading text moderation model: gemma-3-270m-it")
|
| 96 |
-
gemma_path = ensure_local_model(GEMMA_REPO, LOCAL_GEMMA_DIR)
|
| 97 |
-
gemma_tokenizer = AutoTokenizer.from_pretrained(gemma_path, local_files_only=True)
|
| 98 |
-
gemma_model = AutoModelForCausalLM.from_pretrained(
|
| 99 |
-
gemma_path,
|
| 100 |
-
local_files_only=True,
|
| 101 |
-
torch_dtype=torch.float32,
|
| 102 |
-
device_map=device
|
| 103 |
-
)
|
| 104 |
-
gemma_model.eval()
|
| 105 |
-
MODELS['gemma-3-270m-it'] = (gemma_model, gemma_tokenizer)
|
| 106 |
-
logger.info("Gemma model loaded.")
|
| 107 |
-
|
| 108 |
-
NSFW_MODEL_URL = "https://teachablemachine.withgoogle.com/models/gJOADmf_u/keras_model.h5"
|
| 109 |
-
NSFW_LABELS_URL = "https://teachablemachine.withgoogle.com/models/gJOADmf_u/labels.txt"
|
| 110 |
-
NSFW_MODEL_PATH = "image_model/keras_model.h5"
|
| 111 |
-
NSFW_LABELS_PATH = "image_model/labels.txt"
|
| 112 |
-
|
| 113 |
-
download_file(NSFW_MODEL_URL, NSFW_MODEL_PATH)
|
| 114 |
-
download_file(NSFW_LABELS_URL, NSFW_LABELS_PATH)
|
| 115 |
-
|
| 116 |
-
logger.info("Loading image moderation model: nsfw-image-classifier")
|
| 117 |
-
nsfw_model = load_model(NSFW_MODEL_PATH, compile=False)
|
| 118 |
-
with open(NSFW_LABELS_PATH, "r") as f:
|
| 119 |
-
nsfw_labels = [line.strip().split(' ')[1] for line in f]
|
| 120 |
-
MODELS['nsfw-image-classifier'] = (nsfw_model, nsfw_labels)
|
| 121 |
-
logger.info("NSFW image model loaded.")
|
| 122 |
-
|
| 123 |
-
class InputItem(BaseModel):
|
| 124 |
-
text: Optional[str] = None
|
| 125 |
-
image_url: Optional[str] = None
|
| 126 |
-
image_base64: Optional[str] = None
|
| 127 |
-
|
| 128 |
-
@field_validator('*')
|
| 129 |
-
@classmethod
|
| 130 |
-
def check_one_field(cls, v, info):
|
| 131 |
-
if sum(1 for value in info.data.values() if value is not None) > 1:
|
| 132 |
-
raise ValueError("Only one of text, image_url, or image_base64 can be provided.")
|
| 133 |
-
return v
|
| 134 |
-
|
| 135 |
-
class ModerationRequest(BaseModel):
|
| 136 |
-
input: Union[str, List[Union[str, InputItem]]] = Field(..., max_length=10)
|
| 137 |
-
model: str = "auto"
|
| 138 |
-
|
| 139 |
-
class ModerationResponse(BaseModel):
|
| 140 |
-
id: str
|
| 141 |
-
model: str
|
| 142 |
-
results: List[Dict[str, Any]]
|
| 143 |
-
|
| 144 |
-
def format_openai_result(flagged: bool, categories: Dict[str, bool], scores: Dict[str, float]):
|
| 145 |
-
return {
|
| 146 |
-
"flagged": flagged,
|
| 147 |
-
"categories": categories,
|
| 148 |
-
"category_scores": scores
|
| 149 |
-
}
|
| 150 |
-
|
| 151 |
-
def classify_text_detoxify(text: str):
|
| 152 |
-
predictions = MODELS['detoxify-multilingual'].predict(text)
|
| 153 |
-
|
| 154 |
-
categories = {
|
| 155 |
-
"hate": predictions['identity_attack'] > 0.5 or predictions['toxicity'] > 0.7,
|
| 156 |
-
"hate/threatening": predictions['threat'] > 0.5,
|
| 157 |
-
"harassment": predictions['insult'] > 0.5,
|
| 158 |
-
"harassment/threatening": predictions['threat'] > 0.5,
|
| 159 |
-
"self-harm": predictions['severe_toxicity'] > 0.6,
|
| 160 |
-
"sexual": predictions['sexual_explicit'] > 0.5,
|
| 161 |
-
"sexual/minors": False,
|
| 162 |
-
"violence": predictions['toxicity'] > 0.8,
|
| 163 |
-
"violence/graphic": predictions['severe_toxicity'] > 0.8,
|
| 164 |
-
}
|
| 165 |
-
scores = {
|
| 166 |
-
"hate": float(max(predictions.get('identity_attack', 0), predictions.get('toxicity', 0))),
|
| 167 |
-
"hate/threatening": float(predictions.get('threat', 0)),
|
| 168 |
-
"harassment": float(predictions.get('insult', 0)),
|
| 169 |
-
"harassment/threatening": float(predictions.get('threat', 0)),
|
| 170 |
-
"self-harm": float(predictions.get('severe_toxicity', 0)),
|
| 171 |
-
"sexual": float(predictions.get('sexual_explicit', 0)),
|
| 172 |
-
"sexual/minors": 0.0,
|
| 173 |
-
"violence": float(predictions.get('toxicity', 0)),
|
| 174 |
-
"violence/graphic": float(predictions.get('severe_toxicity', 0)),
|
| 175 |
-
}
|
| 176 |
-
flagged = any(categories.values())
|
| 177 |
-
return format_openai_result(flagged, categories, scores)
|
| 178 |
-
|
| 179 |
-
def process_image(image_data: bytes) -> np.ndarray:
|
| 180 |
-
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
| 181 |
-
image = image.resize((224, 224))
|
| 182 |
-
image_array = np.asarray(image)
|
| 183 |
-
normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1
|
| 184 |
-
return np.expand_dims(normalized_image_array, axis=0)
|
| 185 |
-
|
| 186 |
-
def classify_image(image_data: bytes):
|
| 187 |
-
model, labels = MODELS['nsfw-image-classifier']
|
| 188 |
-
processed_image = process_image(image_data)
|
| 189 |
-
prediction = model.predict(processed_image, verbose=0)
|
| 190 |
-
|
| 191 |
-
scores = {label: float(score) for label, score in zip(labels, prediction[0])}
|
| 192 |
-
is_nsfw = scores.get('nsfw', 0.0) > 0.7
|
| 193 |
-
|
| 194 |
-
categories = {
|
| 195 |
-
"hate": False, "hate/threatening": False, "harassment": False, "harassment/threatening": False,
|
| 196 |
-
"self-harm": False, "sexual": is_nsfw, "sexual/minors": is_nsfw, "violence": False, "violence/graphic": is_nsfw,
|
| 197 |
-
}
|
| 198 |
-
|
| 199 |
-
category_scores = {
|
| 200 |
-
"hate": 0.0, "hate/threatening": 0.0, "harassment": 0.0, "harassment/threatening": 0.0,
|
| 201 |
-
"self-harm": 0.0, "sexual": scores.get('nsfw', 0.0), "sexual/minors": scores.get('nsfw', 0.0),
|
| 202 |
-
"violence": 0.0, "violence/graphic": scores.get('nsfw', 0.0),
|
| 203 |
-
}
|
| 204 |
-
|
| 205 |
-
return format_openai_result(is_nsfw, categories, category_scores)
|
| 206 |
-
|
| 207 |
-
def get_api_key(request: Request):
|
| 208 |
-
api_key = request.headers.get("Authorization")
|
| 209 |
-
if not api_key or not api_key.startswith("Bearer "):
|
| 210 |
-
raise HTTPException(status_code=401, detail="API key is missing or invalid.")
|
| 211 |
-
|
| 212 |
-
api_key = api_key.split(" ")[1]
|
| 213 |
-
env_api_key = os.getenv("API_KEY")
|
| 214 |
-
if not env_api_key or api_key != env_api_key:
|
| 215 |
-
raise HTTPException(status_code=401, detail="Invalid API key.")
|
| 216 |
-
return api_key
|
| 217 |
-
|
| 218 |
-
@app.get("/", response_class=HTMLResponse)
|
| 219 |
-
async def get_home(request: Request):
|
| 220 |
-
return templates.TemplateResponse("index.html", {"request": request})
|
| 221 |
-
|
| 222 |
-
@app.get("/v1/metrics", response_class=JSONResponse)
|
| 223 |
-
async def get_metrics(api_key: str = Depends(get_api_key)):
|
| 224 |
-
avg_time = sum(request_times) / len(request_times) if request_times else 0
|
| 225 |
-
return {
|
| 226 |
-
"concurrent_requests": concurrent_requests,
|
| 227 |
-
"average_response_time_ms_last_100": avg_time * 1000,
|
| 228 |
-
"tracked_request_count": len(request_times)
|
| 229 |
-
}
|
| 230 |
-
|
| 231 |
-
@app.post("/v1/moderations", response_model=ModerationResponse)
|
| 232 |
-
async def moderate_content(
|
| 233 |
-
request: ModerationRequest,
|
| 234 |
-
api_key: str = Depends(get_api_key)
|
| 235 |
-
):
|
| 236 |
-
inputs = request.input
|
| 237 |
-
if isinstance(inputs, str):
|
| 238 |
-
inputs = [inputs]
|
| 239 |
-
|
| 240 |
-
if len(inputs) > 10:
|
| 241 |
-
raise HTTPException(status_code=400, detail="Maximum of 10 items per request is allowed.")
|
| 242 |
-
|
| 243 |
-
results = []
|
| 244 |
-
|
| 245 |
-
for item in inputs:
|
| 246 |
-
result = None
|
| 247 |
-
if isinstance(item, str):
|
| 248 |
-
result = classify_text_detoxify(item)
|
| 249 |
-
elif isinstance(item, InputItem):
|
| 250 |
-
if item.text:
|
| 251 |
-
result = classify_text_detoxify(item.text)
|
| 252 |
-
elif item.image_url:
|
| 253 |
-
try:
|
| 254 |
-
response = requests.get(item.image_url, stream=True, timeout=10)
|
| 255 |
-
response.raise_for_status()
|
| 256 |
-
image_bytes = response.content
|
| 257 |
-
result = classify_image(image_bytes)
|
| 258 |
-
except requests.RequestException as e:
|
| 259 |
-
raise HTTPException(status_code=400, detail=f"Could not fetch image from URL: {e}")
|
| 260 |
-
elif item.image_base64:
|
| 261 |
-
try:
|
| 262 |
-
image_bytes = base64.b64decode(item.image_base64)
|
| 263 |
-
result = classify_image(image_bytes)
|
| 264 |
-
except Exception as e:
|
| 265 |
-
raise HTTPException(status_code=400, detail=f"Invalid base64 image data: {e}")
|
| 266 |
-
|
| 267 |
-
if result:
|
| 268 |
-
results.append(result)
|
| 269 |
-
else:
|
| 270 |
-
raise HTTPException(status_code=400, detail="Invalid input item format provided.")
|
| 271 |
-
|
| 272 |
-
model_name = request.model if request.model != "auto" else "multimodal-moderator"
|
| 273 |
-
|
| 274 |
-
response_data = {
|
| 275 |
-
"id": f"modr-{uuid.uuid4().hex}",
|
| 276 |
-
"model": model_name,
|
| 277 |
-
"results": results,
|
| 278 |
-
}
|
| 279 |
-
|
| 280 |
-
return response_data
|
| 281 |
-
|
| 282 |
-
with open("templates/index.html", "w") as f:
|
| 283 |
-
f.write("""
|
| 284 |
-
<!DOCTYPE html>
|
| 285 |
-
<html lang="en">
|
| 286 |
-
<head>
|
| 287 |
-
<meta charset="UTF-8">
|
| 288 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 289 |
-
<title>Multimodal AI Content Moderator</title>
|
| 290 |
-
<script src="https://cdn.tailwindcss.com"></script>
|
| 291 |
-
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
|
| 292 |
-
<style>
|
| 293 |
-
.gradient-bg { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); }
|
| 294 |
-
.glass-effect {
|
| 295 |
-
background: rgba(255, 255, 255, 0.1);
|
| 296 |
-
backdrop-filter: blur(10px);
|
| 297 |
-
border-radius: 10px;
|
| 298 |
-
border: 1px solid rgba(255, 255, 255, 0.2);
|
| 299 |
-
}
|
| 300 |
-
</style>
|
| 301 |
-
</head>
|
| 302 |
-
<body class="min-h-screen gradient-bg text-white font-sans">
|
| 303 |
-
<div class="container mx-auto px-4 py-8">
|
| 304 |
-
<header class="text-center mb-10">
|
| 305 |
-
<h1 class="text-4xl md:text-5xl font-bold mb-4">Multimodal AI Content Moderator</h1>
|
| 306 |
-
<p class="text-xl text-gray-200 max-w-3xl mx-auto">
|
| 307 |
-
Advanced, multilingual, and multimodal content analysis for text and images.
|
| 308 |
-
</p>
|
| 309 |
-
</header>
|
| 310 |
-
|
| 311 |
-
<main class="max-w-6xl mx-auto">
|
| 312 |
-
<div class="grid grid-cols-1 lg:grid-cols-5 gap-8">
|
| 313 |
-
<div class="lg:col-span-2">
|
| 314 |
-
<div class="glass-effect p-6 rounded-xl h-full flex flex-col">
|
| 315 |
-
<h2 class="text-2xl font-bold mb-4 flex items-center">
|
| 316 |
-
<i class="fas fa-cogs mr-3"></i>Configuration & Status
|
| 317 |
-
</h2>
|
| 318 |
-
<div class="mb-4">
|
| 319 |
-
<label class="block text-sm font-medium mb-2">API Key</label>
|
| 320 |
-
<input type="password" id="apiKey" placeholder="Enter your API key"
|
| 321 |
-
class="w-full px-4 py-3 rounded-lg bg-white/10 border border-white/20 focus:outline-none focus:ring-2 focus:ring-indigo-400 text-white">
|
| 322 |
-
</div>
|
| 323 |
-
<div class="mt-4 border-t border-white/20 pt-4">
|
| 324 |
-
<h3 class="text-lg font-semibold mb-3">Server Metrics</h3>
|
| 325 |
-
<div class="space-y-3 text-sm">
|
| 326 |
-
<div class="flex justify-between"><span>Concurrent Requests:</span> <span id="concurrentRequests" class="font-mono">0</span></div>
|
| 327 |
-
<div class="flex justify-between"><span>Avg. Response (last 100):</span> <span id="avgResponseTime" class="font-mono">0.00 ms</span></div>
|
| 328 |
-
</div>
|
| 329 |
-
</div>
|
| 330 |
-
<div class="mt-auto pt-4">
|
| 331 |
-
<h3 class="text-lg font-semibold mb-2">API Endpoint</h3>
|
| 332 |
-
<div class="bg-black/20 p-3 rounded-lg text-xs font-mono">
|
| 333 |
-
POST /v1/moderations
|
| 334 |
-
</div>
|
| 335 |
-
</div>
|
| 336 |
-
</div>
|
| 337 |
-
</div>
|
| 338 |
-
|
| 339 |
-
<div class="lg:col-span-3">
|
| 340 |
-
<div class="glass-effect p-6 rounded-xl">
|
| 341 |
-
<h2 class="text-2xl font-bold mb-4 flex items-center">
|
| 342 |
-
<i class="fas fa-vial mr-3"></i>Live Tester
|
| 343 |
-
</h2>
|
| 344 |
-
|
| 345 |
-
<div id="input-container" class="space-y-3 mb-4">
|
| 346 |
-
<div class="input-item">
|
| 347 |
-
<textarea name="text" rows="2" placeholder="Enter text to analyze..." class="w-full p-2 rounded bg-white/10 border border-white/20 focus:outline-none focus:ring-2 focus:ring-indigo-400"></textarea>
|
| 348 |
-
</div>
|
| 349 |
-
</div>
|
| 350 |
-
|
| 351 |
-
<div class="flex space-x-2 mb-6">
|
| 352 |
-
<button id="add-text" class="text-sm bg-white/10 hover:bg-white/20 py-1 px-3 rounded"><i class="fas fa-plus mr-1"></i> Text</button>
|
| 353 |
-
<button id="add-image-url" class="text-sm bg-white/10 hover:bg-white/20 py-1 px-3 rounded"><i class="fas fa-link mr-1"></i> Image URL</button>
|
| 354 |
-
<button id="add-image-file" class="text-sm bg-white/10 hover:bg-white/20 py-1 px-3 rounded"><i class="fas fa-upload mr-1"></i> Image File</button>
|
| 355 |
-
</div>
|
| 356 |
-
|
| 357 |
-
<input type="file" id="image-file-input" class="hidden" accept="image/*">
|
| 358 |
-
|
| 359 |
-
<button id="analyzeBtn" class="w-full bg-indigo-600 hover:bg-indigo-700 text-white font-bold py-3 px-6 rounded-lg transition duration-300">
|
| 360 |
-
<i class="fas fa-search mr-2"></i> Analyze Content
|
| 361 |
-
</button>
|
| 362 |
-
</div>
|
| 363 |
-
</div>
|
| 364 |
-
</div>
|
| 365 |
-
|
| 366 |
-
<div id="resultsSection" class="mt-8 hidden">
|
| 367 |
-
<h3 class="text-xl font-bold mb-4">Analysis Results</h3>
|
| 368 |
-
<div id="resultsContainer" class="space-y-4"></div>
|
| 369 |
-
</div>
|
| 370 |
-
</main>
|
| 371 |
-
</div>
|
| 372 |
-
|
| 373 |
-
<script>
|
| 374 |
-
const apiKeyInput = document.getElementById('apiKey');
|
| 375 |
-
const inputContainer = document.getElementById('input-container');
|
| 376 |
-
const analyzeBtn = document.getElementById('analyzeBtn');
|
| 377 |
-
const resultsSection = document.getElementById('resultsSection');
|
| 378 |
-
const resultsContainer = document.getElementById('resultsContainer');
|
| 379 |
-
const concurrentRequestsEl = document.getElementById('concurrentRequests');
|
| 380 |
-
const avgResponseTimeEl = document.getElementById('avgResponseTime');
|
| 381 |
-
const imageFileInput = document.getElementById('image-file-input');
|
| 382 |
-
|
| 383 |
-
document.getElementById('add-text').addEventListener('click', () => addInput('text'));
|
| 384 |
-
document.getElementById('add-image-url').addEventListener('click', () => addInput('image_url'));
|
| 385 |
-
document.getElementById('add-image-file').addEventListener('click', () => imageFileInput.click());
|
| 386 |
-
|
| 387 |
-
imageFileInput.addEventListener('change', (event) => {
|
| 388 |
-
if (event.target.files && event.target.files[0]) {
|
| 389 |
-
const file = event.target.files[0];
|
| 390 |
-
const reader = new FileReader();
|
| 391 |
-
reader.onload = (e) => {
|
| 392 |
-
addInput('image_base64', e.target.result);
|
| 393 |
-
};
|
| 394 |
-
reader.readAsDataURL(file);
|
| 395 |
-
}
|
| 396 |
-
});
|
| 397 |
-
|
| 398 |
-
function addInput(type, value = '') {
|
| 399 |
-
if (inputContainer.children.length >= 10) {
|
| 400 |
-
alert('Maximum of 10 items per request.');
|
| 401 |
-
return;
|
| 402 |
-
}
|
| 403 |
-
const itemDiv = document.createElement('div');
|
| 404 |
-
itemDiv.className = 'input-item relative';
|
| 405 |
-
let inputHtml = '';
|
| 406 |
-
if (type === 'text') {
|
| 407 |
-
inputHtml = `<textarea name="text" rows="2" placeholder="Enter text..." class="w-full p-2 rounded bg-white/10 border border-white/20 focus:outline-none focus:ring-2 focus:ring-indigo-400">${value}</textarea>`;
|
| 408 |
-
} else if (type === 'image_url') {
|
| 409 |
-
inputHtml = `<input type="text" name="image_url" placeholder="Enter image URL..." value="${value}" class="w-full p-2 rounded bg-white/10 border border-white/20 focus:outline-none focus:ring-2 focus:ring-indigo-400">`;
|
| 410 |
-
} else if (type === 'image_base64') {
|
| 411 |
-
inputHtml = `
|
| 412 |
-
<div class="flex items-center space-x-2 p-2 rounded bg-white/10 border border-white/20">
|
| 413 |
-
<img src="${value}" class="h-10 w-10 object-cover rounded">
|
| 414 |
-
<span class="text-sm truncate">Image File Uploaded</span>
|
| 415 |
-
<input type="hidden" name="image_base64" value="${value.split(',')[1]}">
|
| 416 |
-
</div>
|
| 417 |
-
`;
|
| 418 |
-
}
|
| 419 |
-
const removeBtn = `<button class="absolute -top-1 -right-1 text-red-400 hover:text-red-200 bg-gray-800 rounded-full h-5 w-5 flex items-center justify-center text-xs" onclick="this.parentElement.remove()"><i class="fas fa-times"></i></button>`;
|
| 420 |
-
itemDiv.innerHTML = inputHtml + removeBtn;
|
| 421 |
-
inputContainer.appendChild(itemDiv);
|
| 422 |
-
}
|
| 423 |
-
|
| 424 |
-
analyzeBtn.addEventListener('click', async () => {
|
| 425 |
-
const apiKey = apiKeyInput.value.trim();
|
| 426 |
-
if (!apiKey) {
|
| 427 |
-
alert('Please enter your API key.');
|
| 428 |
-
return;
|
| 429 |
-
}
|
| 430 |
-
|
| 431 |
-
const inputs = [];
|
| 432 |
-
document.querySelectorAll('.input-item').forEach(item => {
|
| 433 |
-
const text = item.querySelector('textarea[name="text"]');
|
| 434 |
-
const imageUrl = item.querySelector('input[name="image_url"]');
|
| 435 |
-
const imageBase64 = item.querySelector('input[name="image_base64"]');
|
| 436 |
-
if (text && text.value.trim()) inputs.push({ text: text.value.trim() });
|
| 437 |
-
if (imageUrl && imageUrl.value.trim()) inputs.push({ image_url: imageUrl.value.trim() });
|
| 438 |
-
if (imageBase64 && imageBase64.value) inputs.push({ image_base64: imageBase64.value });
|
| 439 |
-
});
|
| 440 |
-
|
| 441 |
-
if (inputs.length === 0) {
|
| 442 |
-
alert('Please add at least one item to analyze.');
|
| 443 |
-
return;
|
| 444 |
-
}
|
| 445 |
-
|
| 446 |
-
analyzeBtn.disabled = true;
|
| 447 |
-
analyzeBtn.innerHTML = '<i class="fas fa-spinner fa-spin mr-2"></i> Analyzing...';
|
| 448 |
-
|
| 449 |
-
try {
|
| 450 |
-
const response = await fetch('/v1/moderations', {
|
| 451 |
-
method: 'POST',
|
| 452 |
-
headers: {
|
| 453 |
-
'Content-Type': 'application/json',
|
| 454 |
-
'Authorization': `Bearer ${apiKey}`
|
| 455 |
-
},
|
| 456 |
-
body: JSON.stringify({ input: inputs })
|
| 457 |
-
});
|
| 458 |
-
|
| 459 |
-
const data = await response.json();
|
| 460 |
-
if (!response.ok) {
|
| 461 |
-
throw new Error(data.detail || 'An error occurred.');
|
| 462 |
}
|
| 463 |
-
displayResults(data.results);
|
| 464 |
} catch (error) {
|
| 465 |
-
|
| 466 |
-
resultsSection.classList.add('hidden');
|
| 467 |
-
} finally {
|
| 468 |
-
analyzeBtn.disabled = false;
|
| 469 |
-
analyzeBtn.innerHTML = '<i class="fas fa-search mr-2"></i> Analyze Content';
|
| 470 |
}
|
| 471 |
-
});
|
| 472 |
-
|
| 473 |
-
function displayResults(results) {
|
| 474 |
-
resultsContainer.innerHTML = '';
|
| 475 |
-
results.forEach((result, index) => {
|
| 476 |
-
const flagged = result.flagged;
|
| 477 |
-
const card = document.createElement('div');
|
| 478 |
-
card.className = `glass-effect p-4 rounded-lg border-l-4 ${flagged ? 'border-red-400' : 'border-green-400'}`;
|
| 479 |
-
|
| 480 |
-
let flaggedCategories = Object.entries(result.categories)
|
| 481 |
-
.filter(([_, value]) => value === true)
|
| 482 |
-
.map(([key]) => key)
|
| 483 |
-
.join(', ');
|
| 484 |
-
|
| 485 |
-
let scoresHtml = Object.entries(result.category_scores).map(([key, score]) => `
|
| 486 |
-
<div class="flex justify-between text-xs my-1">
|
| 487 |
-
<span>${key.replace(/_/g, ' ')}</span>
|
| 488 |
-
<span class="font-mono">${(score * 100).toFixed(2)}%</span>
|
| 489 |
-
</div>
|
| 490 |
-
<div class="w-full bg-white/10 rounded-full h-1.5">
|
| 491 |
-
<div class="h-1.5 rounded-full ${score > 0.5 ? 'bg-red-400' : 'bg-green-400'}" style="width: ${score * 100}%"></div>
|
| 492 |
-
</div>
|
| 493 |
-
`).join('');
|
| 494 |
-
|
| 495 |
-
card.innerHTML = `
|
| 496 |
-
<div class="flex justify-between items-center mb-2">
|
| 497 |
-
<h4 class="font-bold">Item ${index + 1} - ${flagged ? 'FLAGGED' : 'SAFE'}</h4>
|
| 498 |
-
${flagged ? `<span class="text-xs text-red-300">${flaggedCategories}</span>` : ''}
|
| 499 |
-
</div>
|
| 500 |
-
<div>${scoresHtml}</div>
|
| 501 |
-
`;
|
| 502 |
-
resultsContainer.appendChild(card);
|
| 503 |
-
});
|
| 504 |
-
resultsSection.classList.remove('hidden');
|
| 505 |
}
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
headers: { 'Authorization': `Bearer ${apiKey}` }
|
| 513 |
-
});
|
| 514 |
-
if (response.ok) {
|
| 515 |
-
const data = await response.json();
|
| 516 |
-
concurrentRequestsEl.textContent = data.concurrent_requests;
|
| 517 |
-
avgResponseTimeEl.textContent = `${data.average_response_time_ms_last_100.toFixed(2)} ms`;
|
| 518 |
-
}
|
| 519 |
-
} catch (error) {
|
| 520 |
-
console.error("Failed to fetch metrics");
|
| 521 |
}
|
| 522 |
}
|
| 523 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 524 |
</script>
|
| 525 |
</body>
|
| 526 |
-
</html>
|
| 527 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 528 |
|
| 529 |
if __name__ == "__main__":
|
| 530 |
-
|
| 531 |
-
uvicorn.run(app, host="0.0.0.0", port=int(os.getenv("PORT", 7860)))
|
|
|
|
| 1 |
import os
|
| 2 |
+
document.getElementById('avgResponseTime').textContent = data.avg_request_time_ms.toFixed(0) + 'ms';
|
| 3 |
+
document.getElementById('concurrentRequests').textContent = data.concurrent_requests;
|
| 4 |
+
document.getElementById('requestsPerMinute').textContent = data.requests_per_minute;
|
| 5 |
+
document.getElementById('todayRequests').textContent = data.today_requests;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
}
|
|
|
|
| 7 |
} catch (error) {
|
| 8 |
+
console.error('Error updating metrics:', error);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
}
|
| 11 |
+
|
| 12 |
+
function showLoading(show) {
|
| 13 |
+
if (show) {
|
| 14 |
+
loadingModal.classList.remove('hidden');
|
| 15 |
+
} else {
|
| 16 |
+
loadingModal.classList.add('hidden');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
}
|
| 18 |
}
|
| 19 |
+
|
| 20 |
+
function showNotification(message, type = 'info') {
|
| 21 |
+
const notification = document.createElement('div');
|
| 22 |
+
notification.className = `fixed top-4 right-4 p-4 rounded-lg shadow-lg z-50 ${
|
| 23 |
+
type === 'error' ? 'bg-red-500' : 'bg-indigo-500'
|
| 24 |
+
} text-white`;
|
| 25 |
+
notification.innerHTML = `
|
| 26 |
+
<div class="flex items-center">
|
| 27 |
+
<i class="fas ${type === 'error' ? 'fa-exclamation-circle' : 'fa-info-circle'} mr-2"></i>
|
| 28 |
+
<span>${message}</span>
|
| 29 |
+
</div>
|
| 30 |
+
`;
|
| 31 |
+
|
| 32 |
+
document.body.appendChild(notification);
|
| 33 |
+
|
| 34 |
+
setTimeout(() => {
|
| 35 |
+
notification.style.opacity = '0';
|
| 36 |
+
notification.style.transition = 'opacity 0.5s';
|
| 37 |
+
setTimeout(() => {
|
| 38 |
+
document.body.removeChild(notification);
|
| 39 |
+
}, 500);
|
| 40 |
+
}, 3000);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
document.addEventListener('DOMContentLoaded', () => {
|
| 44 |
+
addMixedItem();
|
| 45 |
+
updateMetrics();
|
| 46 |
+
setInterval(updateMetrics, 30000);
|
| 47 |
+
});
|
| 48 |
</script>
|
| 49 |
</body>
|
| 50 |
+
</html>""")
|
| 51 |
+
|
| 52 |
+
print("Initializing models...")
|
| 53 |
+
with torch.inference_mode():
|
| 54 |
+
_ = model.generate(
|
| 55 |
+
**tokenizer(["Hello"], return_tensors="pt").to(model.device),
|
| 56 |
+
max_new_tokens=1, do_sample=False, use_cache=True
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
print("🚀 Starting AI Content Moderator API...")
|
| 60 |
|
| 61 |
if __name__ == "__main__":
|
| 62 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|