Princeaka commited on
Commit
dbc3943
·
verified ·
1 Parent(s): 262eccd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -29
app.py CHANGED
@@ -1,22 +1,25 @@
1
  #!/usr/bin/env python3
2
  """
3
- JusticeAI Backend — merged app.py with Image Editor endpoint
4
 
5
- This is a consolidated FastAPI backend that:
6
  - Keeps two databases: DATABASE_URL (personal user memory) and KNOWLEDGEDATABASE_URL (global knowledge).
7
- - Exposes /chat, /response, /add, /add-bulk, /leaderboard, /reembed, /model-status, /health, /metrics_stream,
8
  /metrics_recent, /verify-admin, /cleardatabase.
9
- - Adds TTS endpoints (/speak and /speak_json) with preloading and optimizations when Coqui TTS is installed.
10
- - Adds image editor endpoints:
11
- - /image_edit_json (JSON input with base64 image) always available
12
- - /image_edit (multipart/form-data with file upload and JSON "operations") — available only if python-multipart is installed
13
- The editor supports operations: resize, rotate, crop, flip, flop, grayscale, blur, overlay_text.
14
- - Ensures user chats are stored only in DATABASE_URL.user_memory and never used to modify global knowledge.
15
  - Keeps last 10 chats per user (pruned on insert).
 
16
 
17
  Notes:
18
- - For multipart endpoints install python-multipart: pip install python-multipart
 
19
  - For image editing the Pillow package is required: pip install pillow
 
20
  """
21
 
22
  from sqlalchemy.pool import NullPool
@@ -36,7 +39,7 @@ import base64
36
  import io
37
  from datetime import datetime, timezone
38
  from collections import deque
39
- from typing import Optional, Dict, Any, List, Tuple
40
 
41
  from fastapi import (
42
  FastAPI, Request, Body, Query, Header, BackgroundTasks,
@@ -679,8 +682,7 @@ def _save_upload_file_tmp(upload_file: UploadFile) -> str:
679
  if TTS_AVAILABLE:
680
  threading.Thread(target=lambda: (get_tts_model_blocking()), daemon=True).start()
681
 
682
- # /speak_json and /speak endpoints handled earlier in previous merge; they remain present below.
683
-
684
  @app.post("/speak_json")
685
  async def speak_json(background_tasks: BackgroundTasks, payload: dict = Body(...)):
686
  text = payload.get("text", "")
@@ -883,7 +885,6 @@ def _perform_image_operations(input_path: str, operations: List[Dict[str, Any]],
883
  anchor = op.get("anchor", "lt")
884
  draw = ImageDraw.Draw(img)
885
  try:
886
- # try to use a truetype font if available
887
  font_path = op.get("font_path") or "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
888
  font = ImageFont.truetype(font_path, size=size)
889
  except Exception:
@@ -894,7 +895,6 @@ def _perform_image_operations(input_path: str, operations: List[Dict[str, Any]],
894
  if mode:
895
  img = img.convert(mode)
896
  # ignore unknown ops
897
- # Save result. Determine format from out_path extension
898
  ext = os.path.splitext(out_path)[1].lower()
899
  fmt = None
900
  if ext in (".jpg", ".jpeg"):
@@ -905,7 +905,6 @@ def _perform_image_operations(input_path: str, operations: List[Dict[str, Any]],
905
  fmt = "WEBP"
906
  else:
907
  fmt = "PNG"
908
- # If saving JPEG, convert to RGB
909
  save_img = img
910
  if fmt == "JPEG":
911
  save_img = img.convert("RGB")
@@ -938,7 +937,6 @@ async def image_edit_json(background_tasks: BackgroundTasks, payload: dict = Bod
938
  if not image_b64 and not image_url:
939
  raise HTTPException(status_code=400, detail="Provide either image_b64 or image_url")
940
 
941
- # Save input image to temp file
942
  in_fd, in_path = tempfile.mkstemp(suffix=".input")
943
  os.close(in_fd)
944
  try:
@@ -950,7 +948,6 @@ async def image_edit_json(background_tasks: BackgroundTasks, payload: dict = Bod
950
  with open(in_path, "wb") as f:
951
  f.write(data)
952
  else:
953
- # download image_url (quick fetch)
954
  try:
955
  resp = requests.get(image_url, timeout=10)
956
  if resp.status_code != 200:
@@ -972,14 +969,12 @@ async def image_edit_json(background_tasks: BackgroundTasks, payload: dict = Bod
972
  pass
973
  raise HTTPException(status_code=500, detail="Failed to save input image")
974
 
975
- # Prepare output path
976
  ext = "." + out_format if not out_format.startswith(".") else out_format
977
  out_fd, out_path = tempfile.mkstemp(suffix=ext, prefix="img_edit_out_")
978
  os.close(out_fd)
979
  background_tasks.add_task(lambda p: os.path.exists(p) and os.remove(p), out_path)
980
  background_tasks.add_task(lambda p: os.path.exists(p) and os.remove(p), in_path)
981
 
982
- # Run ops in threadpool
983
  try:
984
  await _run_image_ops_in_thread(in_path, operations, out_path)
985
  except Exception as e:
@@ -1001,12 +996,6 @@ if HAVE_MULTIPART:
1001
  image_url: Optional[str] = Form(None),
1002
  format: Optional[str] = Form("png"),
1003
  ):
1004
- """
1005
- Multipart endpoint to edit uploaded image with form-data.
1006
- operations: JSON string of operations (see image_edit_json)
1007
- image: uploaded file
1008
- image_url: alternative to upload
1009
- """
1010
  if not PIL_AVAILABLE:
1011
  raise HTTPException(status_code=503, detail="Image editing requires Pillow. Install with pip install pillow")
1012
 
@@ -1073,7 +1062,7 @@ else:
1073
  )
1074
 
1075
  # -------------------------
1076
- # Metrics & small helpers
1077
  # -------------------------
1078
  recent_request_times = deque()
1079
  recent_learning_timestamps = deque()
@@ -1097,13 +1086,40 @@ def record_learn_event():
1097
  while recent_learning_timestamps and recent_learning_timestamps[0] < ts - 3600:
1098
  recent_learning_timestamps.popleft()
1099
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1100
  # -------------------------
1101
  # Startup warmups
1102
  # -------------------------
1103
  @app.on_event("startup")
1104
  async def startup_event():
1105
  logger.info("[JusticeAI] startup: warming optional components")
1106
- # Warm embeddings in background
1107
  if SentenceTransformer is not None:
1108
  def warm_embed():
1109
  try:
@@ -1111,7 +1127,6 @@ async def startup_event():
1111
  except Exception as e:
1112
  logger.debug(f"[startup] embed warmup error: {e}")
1113
  threading.Thread(target=warm_embed, daemon=True).start()
1114
- # Attempt Ollama pull if requested (best-effort)
1115
  if OLLAMA_AUTO_PULL and ollama_cli_available():
1116
  try:
1117
  subprocess.run(["ollama", "pull", OLLAMA_MODEL], timeout=300)
 
1
  #!/usr/bin/env python3
2
  """
3
+ JusticeAI Backend — merged app.py
4
 
5
+ This is a consolidated, ready-to-run FastAPI backend that:
6
  - Keeps two databases: DATABASE_URL (personal user memory) and KNOWLEDGEDATABASE_URL (global knowledge).
7
+ - Exposes /chat, /response, /add, /add-bulk, /leaderboard, /reembed, /model-status, /health, /metrics, /metrics_stream,
8
  /metrics_recent, /verify-admin, /cleardatabase.
9
+ - Provides TTS voice-cloning via Coqui TTS with optimizations: preloaded model, GPU/half precision (if available),
10
+ speaker-sample caching, and a JSON fallback endpoint so the app can run without python-multipart installed.
11
+ - Adds image editing endpoints: /image_edit_json (JSON with base64 image) and /image_edit (multipart if python-multipart installed).
12
+ - Performs topic inference (Ollama-first if available), uses embeddings when available, and translates replies
13
+ into the user's detected language (language.py preferred; Helsinki transformers fallback).
14
+ - Ensures user chats are stored only in DATABASE_URL.user_memory and never used to influence global knowledge.
15
  - Keeps last 10 chats per user (pruned on insert).
16
+ - Adds /language.bin and /metrics endpoints to reduce 404 noise.
17
 
18
  Notes:
19
+ - If you want multipart file upload support for /speak or /image_edit (form+file), install python-multipart:
20
+ pip install python-multipart
21
  - For image editing the Pillow package is required: pip install pillow
22
+ - For TTS: install Coqui TTS and (optionally) torch for GPU/half precision speedups.
23
  """
24
 
25
  from sqlalchemy.pool import NullPool
 
39
  import io
40
  from datetime import datetime, timezone
41
  from collections import deque
42
+ from typing import Optional, Dict, Any, List
43
 
44
  from fastapi import (
45
  FastAPI, Request, Body, Query, Header, BackgroundTasks,
 
682
  if TTS_AVAILABLE:
683
  threading.Thread(target=lambda: (get_tts_model_blocking()), daemon=True).start()
684
 
685
+ # /speak_json and /speak endpoints
 
686
  @app.post("/speak_json")
687
  async def speak_json(background_tasks: BackgroundTasks, payload: dict = Body(...)):
688
  text = payload.get("text", "")
 
885
  anchor = op.get("anchor", "lt")
886
  draw = ImageDraw.Draw(img)
887
  try:
 
888
  font_path = op.get("font_path") or "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
889
  font = ImageFont.truetype(font_path, size=size)
890
  except Exception:
 
895
  if mode:
896
  img = img.convert(mode)
897
  # ignore unknown ops
 
898
  ext = os.path.splitext(out_path)[1].lower()
899
  fmt = None
900
  if ext in (".jpg", ".jpeg"):
 
905
  fmt = "WEBP"
906
  else:
907
  fmt = "PNG"
 
908
  save_img = img
909
  if fmt == "JPEG":
910
  save_img = img.convert("RGB")
 
937
  if not image_b64 and not image_url:
938
  raise HTTPException(status_code=400, detail="Provide either image_b64 or image_url")
939
 
 
940
  in_fd, in_path = tempfile.mkstemp(suffix=".input")
941
  os.close(in_fd)
942
  try:
 
948
  with open(in_path, "wb") as f:
949
  f.write(data)
950
  else:
 
951
  try:
952
  resp = requests.get(image_url, timeout=10)
953
  if resp.status_code != 200:
 
969
  pass
970
  raise HTTPException(status_code=500, detail="Failed to save input image")
971
 
 
972
  ext = "." + out_format if not out_format.startswith(".") else out_format
973
  out_fd, out_path = tempfile.mkstemp(suffix=ext, prefix="img_edit_out_")
974
  os.close(out_fd)
975
  background_tasks.add_task(lambda p: os.path.exists(p) and os.remove(p), out_path)
976
  background_tasks.add_task(lambda p: os.path.exists(p) and os.remove(p), in_path)
977
 
 
978
  try:
979
  await _run_image_ops_in_thread(in_path, operations, out_path)
980
  except Exception as e:
 
996
  image_url: Optional[str] = Form(None),
997
  format: Optional[str] = Form("png"),
998
  ):
 
 
 
 
 
 
999
  if not PIL_AVAILABLE:
1000
  raise HTTPException(status_code=503, detail="Image editing requires Pillow. Install with pip install pillow")
1001
 
 
1062
  )
1063
 
1064
  # -------------------------
1065
+ # Metrics, language.bin, and small helpers
1066
  # -------------------------
1067
  recent_request_times = deque()
1068
  recent_learning_timestamps = deque()
 
1086
  while recent_learning_timestamps and recent_learning_timestamps[0] < ts - 3600:
1087
  recent_learning_timestamps.popleft()
1088
 
1089
+ @app.get("/metrics")
1090
+ async def metrics():
1091
+ try:
1092
+ with engine_knowledge.connect() as c:
1093
+ k = c.execute(sql_text("SELECT COUNT(*) FROM knowledge")).scalar() or 0
1094
+ except Exception:
1095
+ k = -1
1096
+ try:
1097
+ with engine_user.connect() as c:
1098
+ u = c.execute(sql_text("SELECT COUNT(*) FROM user_memory")).scalar() or 0
1099
+ except Exception:
1100
+ u = -1
1101
+ reqs_last_hour = sum(1 for ts, _ in recent_request_times if ts >= time.time() - 3600) if 'recent_request_times' in globals() else 0
1102
+ return {
1103
+ "ok": True,
1104
+ "uptime_s": round(time.time() - app_start_time, 2) if 'app_start_time' in globals() else None,
1105
+ "knowledge_count": int(k),
1106
+ "user_memory_count": int(u),
1107
+ "requests_last_hour": int(reqs_last_hour)
1108
+ }
1109
+
1110
+ @app.get("/language.bin")
1111
+ async def language_bin():
1112
+ path = "language.bin"
1113
+ if os.path.exists(path):
1114
+ return FileResponse(path, media_type="application/octet-stream")
1115
+ return JSONResponse(status_code=404, content={"error": "language.bin not found", "hint": "Place file at ./language.bin or upload it"})
1116
+
1117
  # -------------------------
1118
  # Startup warmups
1119
  # -------------------------
1120
  @app.on_event("startup")
1121
  async def startup_event():
1122
  logger.info("[JusticeAI] startup: warming optional components")
 
1123
  if SentenceTransformer is not None:
1124
  def warm_embed():
1125
  try:
 
1127
  except Exception as e:
1128
  logger.debug(f"[startup] embed warmup error: {e}")
1129
  threading.Thread(target=warm_embed, daemon=True).start()
 
1130
  if OLLAMA_AUTO_PULL and ollama_cli_available():
1131
  try:
1132
  subprocess.run(["ollama", "pull", OLLAMA_MODEL], timeout=300)