Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,90 +1,66 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 3 |
-
import torch
|
| 4 |
import re
|
| 5 |
-
import
|
| 6 |
-
from
|
| 7 |
-
from
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
# ---- Model and Tokenizer Setup ----
|
| 13 |
-
model1_path = "modernbert.bin"
|
| 14 |
-
model2_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed12"
|
| 15 |
-
model3_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed22"
|
| 16 |
-
|
| 17 |
-
tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base")
|
| 18 |
-
|
| 19 |
-
# Load models
|
| 20 |
-
model_1 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
|
| 21 |
-
model_1.load_state_dict(torch.load(model1_path, map_location=device))
|
| 22 |
-
model_1.to(device).eval()
|
| 23 |
-
|
| 24 |
-
model_2 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
|
| 25 |
-
model_2.load_state_dict(torch.hub.load_state_dict_from_url(model2_path, map_location=device))
|
| 26 |
-
model_2.to(device).eval()
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
# ---- Label Mapping ----
|
| 34 |
label_mapping = {
|
| 35 |
-
0:
|
| 36 |
-
6: 'bloomz', 7: 'cohere', 8: 'davinci', 9: 'dolly', 10: 'dolly-v2-12b',
|
| 37 |
-
11: 'flan_t5_base', 12: 'flan_t5_large', 13: 'flan_t5_small',
|
| 38 |
-
14: 'flan_t5_xl', 15: 'flan_t5_xxl', 16: 'gemma-7b-it', 17: 'gemma2-9b-it',
|
| 39 |
-
18: 'gpt-3.5-turbo', 19: 'gpt-35', 20: 'gpt4', 21: 'gpt4o',
|
| 40 |
-
22: 'gpt_j', 23: 'gpt_neox', 24: 'human', 25: 'llama3-70b', 26: 'llama3-8b',
|
| 41 |
-
27: 'mixtral-8x7b', 28: 'opt_1.3b', 29: 'opt_125m', 30: 'opt_13b',
|
| 42 |
-
31: 'opt_2.7b', 32: 'opt_30b', 33: 'opt_350m', 34: 'opt_6.7b',
|
| 43 |
-
35: 'opt_iml_30b', 36: 'opt_iml_max_1.3b', 37: 't0_11b', 38: 't0_3b',
|
| 44 |
-
39: 'text-davinci-002', 40: 'text-davinci-003'
|
| 45 |
}
|
| 46 |
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
| 48 |
def clean_text(text: str) -> str:
|
| 49 |
-
text = re.sub(r'\s
|
| 50 |
-
text
|
| 51 |
-
|
| 52 |
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
newline_to_space,
|
| 57 |
-
Strip()
|
| 58 |
-
])
|
| 59 |
|
| 60 |
|
| 61 |
-
#
|
| 62 |
-
|
| 63 |
-
|
|
|
|
| 64 |
if not cleaned_text.strip():
|
| 65 |
-
return "
|
| 66 |
|
| 67 |
-
# Split into paragraphs
|
| 68 |
paragraphs = [p.strip() for p in re.split(r'\n{2,}', cleaned_text) if p.strip()]
|
|
|
|
|
|
|
|
|
|
| 69 |
chunk_scores = []
|
| 70 |
-
|
| 71 |
|
| 72 |
-
for
|
| 73 |
inputs = tokenizer(paragraph, return_tensors="pt", truncation=True, padding=True).to(device)
|
| 74 |
with torch.no_grad():
|
| 75 |
logits_1 = model_1(**inputs).logits
|
| 76 |
logits_2 = model_2(**inputs).logits
|
| 77 |
logits_3 = model_3(**inputs).logits
|
| 78 |
-
|
| 79 |
softmax_1 = torch.softmax(logits_1, dim=1)
|
| 80 |
softmax_2 = torch.softmax(logits_2, dim=1)
|
| 81 |
softmax_3 = torch.softmax(logits_3, dim=1)
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
-
|
| 84 |
-
probs = avg_probs[0]
|
| 85 |
-
all_probabilities.append(probs.cpu())
|
| 86 |
-
|
| 87 |
-
human_prob = probs[24].item()
|
| 88 |
ai_probs_clone = probs.clone()
|
| 89 |
ai_probs_clone[24] = 0
|
| 90 |
ai_total = ai_probs_clone.sum().item()
|
|
@@ -94,86 +70,30 @@ def classify_text(text):
|
|
| 94 |
ai_model = label_mapping[torch.argmax(ai_probs_clone).item()]
|
| 95 |
|
| 96 |
chunk_scores.append({
|
| 97 |
-
"human": human_pct,
|
| 98 |
-
"ai": ai_pct,
|
| 99 |
"model": ai_model,
|
| 100 |
-
"
|
| 101 |
})
|
| 102 |
|
| 103 |
-
# ----
|
| 104 |
avg_human = sum(c["human"] for c in chunk_scores) / len(chunk_scores)
|
| 105 |
avg_ai = sum(c["ai"] for c in chunk_scores) / len(chunk_scores)
|
| 106 |
|
| 107 |
-
if
|
| 108 |
-
|
|
|
|
| 109 |
else:
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
top_5_probs, top_5_idx = torch.topk(mean_probs, 5)
|
| 124 |
-
top_5_probs = top_5_probs.cpu().numpy()
|
| 125 |
-
top_5_labels = [label_mapping[i.item()] for i in top_5_idx]
|
| 126 |
-
|
| 127 |
-
fig, ax = plt.subplots(figsize=(10, 5))
|
| 128 |
-
bars = ax.barh(top_5_labels, top_5_probs, color='#4CAF50')
|
| 129 |
-
ax.set_xlabel('Probability')
|
| 130 |
-
ax.set_title('Top 5 Model Predictions')
|
| 131 |
-
ax.invert_yaxis()
|
| 132 |
-
for bar in bars:
|
| 133 |
-
width = bar.get_width()
|
| 134 |
-
ax.text(width + 0.005, bar.get_y() + bar.get_height() / 2, f'{width:.2%}', va='center')
|
| 135 |
-
plt.tight_layout()
|
| 136 |
-
|
| 137 |
-
return result_message + "\n\n" + paragraph_text, fig
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
# ---- UI Setup ----
|
| 141 |
-
title = "AI Text Detector"
|
| 142 |
-
description = """
|
| 143 |
-
This tool uses **ModernBERT** to detect AI-generated text.
|
| 144 |
-
Each paragraph is analyzed separately to show which parts are likely AI-generated.
|
| 145 |
-
"""
|
| 146 |
-
bottom_text = "**Developed by SzegedAI – Extended by Saber**"
|
| 147 |
-
|
| 148 |
-
AI_texts = [
|
| 149 |
-
"Artificial intelligence (AI) is reshaping industries by automating tasks, enhancing decision-making, and driving innovation. From predictive analytics in finance to autonomous vehicles in transportation, AI technologies are becoming integral to daily operations. The future of AI lies not only in technological advancement but also in ensuring ethical use, transparency, and accountability."
|
| 150 |
-
]
|
| 151 |
-
|
| 152 |
-
Human_texts = [
|
| 153 |
-
"Mathematics has always been a cornerstone of scientific discovery. It provides a precise language for describing natural phenomena, from the orbit of planets to the behavior of subatomic particles. The beauty of mathematics lies in its universality—its principles hold true regardless of context or culture."
|
| 154 |
-
]
|
| 155 |
-
|
| 156 |
-
iface = gr.Blocks(css="""
|
| 157 |
-
@import url('https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@400;700&display=swap');
|
| 158 |
-
#text_input_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 18px; padding: 15px; margin-bottom: 20px; width: 60%; margin: auto; }
|
| 159 |
-
#result_output_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 16px; padding: 15px; margin-top: 20px; width: 80%; margin: auto; }
|
| 160 |
-
body { font-family: 'Roboto Mono', sans-serif !important; padding: 20px; }
|
| 161 |
-
.gradio-container { border: 1px solid #4CAF50; border-radius: 15px; padding: 30px; box-shadow: 0px 0px 10px rgba(0,255,0,0.4); max-width: 900px; margin: auto; }
|
| 162 |
-
.highlight-human { color: #4CAF50; font-weight: bold; }
|
| 163 |
-
.highlight-ai { color: #FF5733; font-weight: bold; }
|
| 164 |
-
""")
|
| 165 |
-
|
| 166 |
-
with iface:
|
| 167 |
-
gr.Markdown(f"# {title}")
|
| 168 |
-
gr.Markdown(description)
|
| 169 |
-
text_input = gr.Textbox(label="", placeholder="Paste your article here...", elem_id="text_input_box", lines=10)
|
| 170 |
-
result_output = gr.HTML("", elem_id="result_output_box")
|
| 171 |
-
plot_output = gr.Plot(label="Model Probability Distribution")
|
| 172 |
-
text_input.change(classify_text, inputs=text_input, outputs=[result_output, plot_output])
|
| 173 |
-
with gr.Tab("AI Examples"):
|
| 174 |
-
gr.Examples(AI_texts, inputs=text_input)
|
| 175 |
-
with gr.Tab("Human Examples"):
|
| 176 |
-
gr.Examples(Human_texts, inputs=text_input)
|
| 177 |
-
gr.Markdown(bottom_text)
|
| 178 |
-
|
| 179 |
-
iface.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import re
|
| 2 |
+
import torch
|
| 3 |
+
from fastapi import FastAPI, Request
|
| 4 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 5 |
+
from pydantic import BaseModel
|
| 6 |
+
from typing import List
|
| 7 |
+
import uvicorn
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
+
# ========== CONFIG ==========
|
| 10 |
+
MODEL_PATH = "roberta-base-openai-detector" # or your preferred detector
|
| 11 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
|
| 13 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
| 14 |
+
model_1 = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH).to(device)
|
| 15 |
+
model_2 = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH).to(device)
|
| 16 |
+
model_3 = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH).to(device)
|
| 17 |
|
|
|
|
| 18 |
label_mapping = {
|
| 19 |
+
0: "gpt2", 1: "gpt3", 2: "gpt4", 3: "chatgpt", 4: "dolly", 5: "human", 24: "human"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
}
|
| 21 |
|
| 22 |
+
app = FastAPI(title="AI Text Classifier API", version="1.0.0")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# ========== HELPERS ==========
|
| 26 |
def clean_text(text: str) -> str:
|
| 27 |
+
text = re.sub(r'\s+', ' ', text)
|
| 28 |
+
return text.strip()
|
| 29 |
+
|
| 30 |
|
| 31 |
+
# ========== INPUT MODEL ==========
|
| 32 |
+
class TextInput(BaseModel):
|
| 33 |
+
text: str
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
|
| 36 |
+
# ========== MAIN LOGIC ==========
|
| 37 |
+
@app.post("/analyze")
|
| 38 |
+
async def analyze_text(data: TextInput):
|
| 39 |
+
cleaned_text = clean_text(data.text)
|
| 40 |
if not cleaned_text.strip():
|
| 41 |
+
return {"success": False, "error": "Empty text provided"}
|
| 42 |
|
|
|
|
| 43 |
paragraphs = [p.strip() for p in re.split(r'\n{2,}', cleaned_text) if p.strip()]
|
| 44 |
+
if not paragraphs:
|
| 45 |
+
paragraphs = [cleaned_text]
|
| 46 |
+
|
| 47 |
chunk_scores = []
|
| 48 |
+
all_probs = []
|
| 49 |
|
| 50 |
+
for paragraph in paragraphs:
|
| 51 |
inputs = tokenizer(paragraph, return_tensors="pt", truncation=True, padding=True).to(device)
|
| 52 |
with torch.no_grad():
|
| 53 |
logits_1 = model_1(**inputs).logits
|
| 54 |
logits_2 = model_2(**inputs).logits
|
| 55 |
logits_3 = model_3(**inputs).logits
|
|
|
|
| 56 |
softmax_1 = torch.softmax(logits_1, dim=1)
|
| 57 |
softmax_2 = torch.softmax(logits_2, dim=1)
|
| 58 |
softmax_3 = torch.softmax(logits_3, dim=1)
|
| 59 |
+
averaged = (softmax_1 + softmax_2 + softmax_3) / 3
|
| 60 |
+
probs = averaged[0]
|
| 61 |
+
all_probs.append(probs.cpu())
|
| 62 |
|
| 63 |
+
human_prob = probs[24].item() if 24 in label_mapping else probs[-1].item()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
ai_probs_clone = probs.clone()
|
| 65 |
ai_probs_clone[24] = 0
|
| 66 |
ai_total = ai_probs_clone.sum().item()
|
|
|
|
| 70 |
ai_model = label_mapping[torch.argmax(ai_probs_clone).item()]
|
| 71 |
|
| 72 |
chunk_scores.append({
|
| 73 |
+
"human": round(human_pct, 2),
|
| 74 |
+
"ai": round(ai_pct, 2),
|
| 75 |
"model": ai_model,
|
| 76 |
+
"text_preview": paragraph[:250].replace('\n', ' ') + ("..." if len(paragraph) > 250 else "")
|
| 77 |
})
|
| 78 |
|
| 79 |
+
# ---- OVERALL ----
|
| 80 |
avg_human = sum(c["human"] for c in chunk_scores) / len(chunk_scores)
|
| 81 |
avg_ai = sum(c["ai"] for c in chunk_scores) / len(chunk_scores)
|
| 82 |
|
| 83 |
+
if avg_ai > avg_human:
|
| 84 |
+
top_model = max(chunk_scores, key=lambda c: c["ai"])["model"]
|
| 85 |
+
overall = {"result": f"{avg_ai:.2f}% AI-generated", "model": top_model}
|
| 86 |
else:
|
| 87 |
+
overall = {"result": f"{avg_human:.2f}% Human-written", "model": "human"}
|
| 88 |
+
|
| 89 |
+
return {
|
| 90 |
+
"success": True,
|
| 91 |
+
"overall": overall,
|
| 92 |
+
"paragraphs": chunk_scores,
|
| 93 |
+
"total_paragraphs": len(chunk_scores)
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
# ========== RUN LOCALLY ==========
|
| 98 |
+
if __name__ == "__main__":
|
| 99 |
+
uvicorn.run("app:app", host="0.0.0.0", port=8000)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|