Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import BertTokenizer, BertForSequenceClassification | |
| import torch | |
| # Load the model and tokenizer from Hugging Face | |
| def load_model(): | |
| model_name = "pritamdeb68/BERTAIDetector" | |
| model = BertForSequenceClassification.from_pretrained(model_name) | |
| tokenizer = BertTokenizer.from_pretrained(model_name) | |
| return model, tokenizer | |
| # Load the model and tokenizer | |
| model, tokenizer = load_model() | |
| # Create a Streamlit app | |
| st.title("AI Text Detector") | |
| # Get user input | |
| user_input = st.text_area("Enter your text:", height=300, placeholder="Enter your text here (10-100 words). Example: 'This is a sample text for analysis.'") | |
| # Make predictions | |
| if st.button("Detect"): | |
| # Process user input | |
| if len(user_input.split()) < 10: | |
| st.error("Please enter at least 10 words.") | |
| elif len(user_input.split()) > 100: | |
| user_input = ' '.join(user_input.split()[:100]) | |
| st.warning("Text truncated to 100 words.") | |
| inputs = tokenizer(user_input, return_tensors="pt") | |
| with torch.no_grad(): | |
| logits = model(**inputs).logits | |
| predicted_class = torch.argmax(logits).item() | |
| confidence = torch.nn.functional.softmax(logits, dim=1) | |
| max_confidence = confidence.max().item() | |
| # Update predictions based on confidence levels | |
| if max_confidence > 0.50 and predicted_class == 0: | |
| st.write(f"This text is likely **Human-written** content. {max_confidence}") | |
| elif max_confidence < 0.90: | |
| st.write("This text is likely a mix of **AI-generated** and **Human-written** content.") | |
| elif predicted_class == 1: | |
| st.write(f"This text is likely **AI-generated**. {max_confidence}") | |
| else: | |
| st.write("This text is likely **Human Written**.") | |