nixaut-codelabs commited on
Commit
2c60529
·
verified ·
1 Parent(s): eaee42c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -33
app.py CHANGED
@@ -15,10 +15,9 @@ from fastapi.templating import Jinja2Templates
15
  from pydantic import BaseModel, Field
16
  from dotenv import load_dotenv
17
  from huggingface_hub import snapshot_download
18
- from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
19
  from detoxify import Detoxify
20
  from PIL import Image
21
- from tensorflow.keras.models import load_model
22
  import uvicorn
23
  from datetime import datetime, timedelta
24
  from collections import defaultdict, deque
@@ -91,24 +90,10 @@ model.eval()
91
 
92
  detoxify_model = Detoxify('multilingual')
93
 
94
- teachable_machine_url = "https://teachablemachine.withgoogle.com/models/gJOADmf_u/"
95
- model_url = teachable_machine_url + "model.json"
96
- weights_url = teachable_machine_url + "weights.bin"
97
-
98
- model_path = os.path.join(os.getcwd(), "teachable_machine_model")
99
- os.makedirs(model_path, exist_ok=True)
100
-
101
- if not os.path.exists(os.path.join(model_path, "model.json")):
102
- response = requests.get(model_url)
103
- with open(os.path.join(model_path, "model.json"), "wb") as f:
104
- f.write(response.content)
105
-
106
- if not os.path.exists(os.path.join(model_path, "weights.bin")):
107
- response = requests.get(weights_url)
108
- with open(os.path.join(model_path, "weights.bin"), "wb") as f:
109
- f.write(response.content)
110
-
111
- image_model = load_model(model_path)
112
 
113
  MODERATION_SYSTEM_PROMPT = (
114
  "You are a multilingual content moderation classifier. "
@@ -320,22 +305,23 @@ def classify_text_with_detoxify(text):
320
  def classify_image(image_data):
321
  try:
322
  img = Image.open(io.BytesIO(image_data)).convert("RGB")
323
- img = img.resize((224, 224))
324
- img_array = np.array(img) / 255.0
325
- img_array = np.expand_dims(img_array, axis=0)
 
 
 
326
 
327
- predictions = image_model.predict(img_array)
328
- class_idx = np.argmax(predictions[0])
329
- classes = ["nothing", "nsfw"]
330
- class_name = classes[class_idx]
331
- confidence = float(predictions[0][class_idx])
332
 
333
  return {
334
- "classification": "u" if class_name == "nsfw" else "s",
335
- "label": "NSFW" if class_name == "nsfw" else "SFW",
336
- "description": "Content may contain inappropriate or harmful material." if class_name == "nsfw" else "Content appears to be safe and appropriate.",
337
- "confidence": confidence,
338
- "nsfw_score": confidence if class_name == "nsfw" else 1.0 - confidence
339
  }
340
  except Exception as e:
341
  return {
 
15
  from pydantic import BaseModel, Field
16
  from dotenv import load_dotenv
17
  from huggingface_hub import snapshot_download
18
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, pipeline
19
  from detoxify import Detoxify
20
  from PIL import Image
 
21
  import uvicorn
22
  from datetime import datetime, timedelta
23
  from collections import defaultdict, deque
 
90
 
91
  detoxify_model = Detoxify('multilingual')
92
 
93
+ # Use a Hugging Face pipeline for NSFW image detection
94
+ print("Loading NSFW image classification model...")
95
+ image_classifier = pipeline("image-classification", model="Falconsai/nsfw_image_detection")
96
+ print("NSFW image classification model loaded.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
  MODERATION_SYSTEM_PROMPT = (
99
  "You are a multilingual content moderation classifier. "
 
305
  def classify_image(image_data):
306
  try:
307
  img = Image.open(io.BytesIO(image_data)).convert("RGB")
308
+ results = image_classifier(img)
309
+
310
+ # Extract the top result
311
+ top_result = results[0]
312
+ label = top_result['label']
313
+ score = top_result['score']
314
 
315
+ # Map the label: 'normal' -> 's', 'nsfw' -> 'u'
316
+ classification = 'u' if label == 'nsfw' else 's'
317
+ nsfw_score = score if label == 'nsfw' else 1.0 - score
 
 
318
 
319
  return {
320
+ "classification": classification,
321
+ "label": "NSFW" if classification == 'u' else "SFW",
322
+ "description": "Content may contain inappropriate or harmful material." if classification == 'u' else "Content appears to be safe and appropriate.",
323
+ "confidence": score,
324
+ "nsfw_score": nsfw_score
325
  }
326
  except Exception as e:
327
  return {