ccabrerafreepik commited on
Commit
45eaad4
·
verified ·
1 Parent(s): b4c9ac7

Update app.py

Browse files

Improve app look and capabilities

Files changed (1) hide show
  1. app.py +40 -9
app.py CHANGED
@@ -1,24 +1,55 @@
1
  from transformers import pipeline
2
  from PIL import Image
3
  import gradio as gr
 
 
4
 
5
  # Load your model from the Hub
6
- classifier = pipeline("image-classification", model="Freepik/nsfw_image_detector")
 
7
 
8
  # Define the inference function
9
- def classify_image(image):
10
- result = classifier(image)
11
- # Format the result into a string with label and score (2 decimals)
12
- formatted_result = "\n".join([f"{entry['label']}: {entry['score']:.2f}" for entry in result])
13
- return formatted_result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  # Create Gradio interface
16
  demo = gr.Interface(
17
  fn=classify_image,
18
- inputs=gr.Image(type="pil", label="Upload an image"),
19
- outputs=gr.Textbox(label="Prediction"),
 
 
 
 
 
 
 
 
 
 
 
20
  title="NSFW Image Classifier",
21
- description="Upload an image and get a prediction using the Freepik/nsfw_image_detector model."
22
  )
23
 
24
  # Launch app
 
1
  from transformers import pipeline
2
  from PIL import Image
3
  import gradio as gr
4
+ from nsfw_image_detector import NSFWDetector
5
+ import torch
6
 
7
  # Load your model from the Hub
8
+ classifier_pipe = pipeline("image-classification", model="Freepik/nsfw_image_detector")
9
+ classifier_nsfw = NSFWDetector(dtype=torch.bfloat16, device="cpu")
10
 
11
  # Define the inference function
12
+ def classify_image(image, confidence_level):
13
+ # Get predictions from both models
14
+ result_nsfw_proba = classifier_nsfw.predict_proba(image)
15
+ is_nsfw_method = result_nsfw_proba[0][confidence_level] >= 0.5
16
+ result_pipe = classifier_pipe(image)
17
+
18
+ # Format NSFW probability scores
19
+ proba_dict = result_nsfw_proba[0]
20
+ nsfw_proba_str = "NSFW Probability Scores:\n"
21
+ for level, score in proba_dict.items():
22
+ nsfw_proba_str += f"{level.value.title()}: {score:.4f}\n"
23
+
24
+ # Format NSFW classification
25
+ is_nsfw_str = f"NSFW Classification ({confidence_level.title()}):\n"
26
+ is_nsfw_str += "🔴 True" if is_nsfw_method else "🟢 False"
27
+
28
+ # Format pipeline results
29
+ pipe_str = "Pipeline Results:\n"
30
+ for result in result_pipe:
31
+ pipe_str += f"{result['label']}: {result['score']:.4f}\n"
32
+
33
+ return nsfw_proba_str, is_nsfw_str, pipe_str
34
 
35
  # Create Gradio interface
36
  demo = gr.Interface(
37
  fn=classify_image,
38
+ inputs=[
39
+ gr.Image(type="pil", label="Upload an image"),
40
+ gr.Dropdown(
41
+ choices=["low", "medium", "high"],
42
+ value="medium",
43
+ label="Low is the most restrictive, high is the least restrictive"
44
+ )
45
+ ],
46
+ outputs=[
47
+ gr.Textbox(label="NSFW Probability Scores (recommended)", lines=3),
48
+ gr.Textbox(label="NSFW Classification (recommended)", lines=2),
49
+ gr.Textbox(label="Pipeline Results (not recommended, specially in production)", lines=3)
50
+ ],
51
  title="NSFW Image Classifier",
52
+ description="Upload an image and select a confidence level to get a prediction using the Freepik/nsfw_image_detector model."
53
  )
54
 
55
  # Launch app