Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import re | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| from nltk.corpus import stopwords | |
| import nltk | |
| # Download NLTK stopwords (only the first time you run) | |
| nltk.download('stopwords') | |
| stop_words = set(stopwords.words('english')) | |
| # Best lightweight summarization models | |
| model_choices = { | |
| "DistilBART CNN (sshleifer/distilbart-cnn-12-6)": "sshleifer/distilbart-cnn-12-6", | |
| "T5 Small (t5-small)": "t5-small", | |
| "T5 Base (t5-base)": "t5-base", | |
| "Flan-T5 Base (google/flan-t5-base)": "google/flan-t5-base", | |
| "DistilBART XSum (sshleifer/distilbart-xsum-12-6)": "sshleifer/distilbart-xsum-12-6" | |
| } | |
| model_cache = {} | |
| # Clean input text (remove stopwords, SKU codes, and non-meaningful text) | |
| def clean_text(input_text): | |
| # Step 1: Remove any non-English characters (like special symbols, non-latin characters) | |
| cleaned_text = re.sub(r'[^A-Za-z0-9\s]', ' ', input_text) # Allow only letters and numbers | |
| cleaned_text = re.sub(r'\s+', ' ', cleaned_text) # Replace multiple spaces with a single space | |
| # Step 2: Tokenize the text and remove stopwords and words that are too short to be meaningful | |
| words = cleaned_text.split() | |
| filtered_words = [word for word in words if word.lower() not in stop_words and len(word) > 2] | |
| # Step 3: Rebuild the text from the remaining words | |
| filtered_text = " ".join(filtered_words) | |
| # Step 4: Remove any product codes or sequences (e.g., ST1642, AB1234) | |
| # Assuming product codes follow a pattern of letters followed by numbers | |
| filtered_text = re.sub(r'\b[A-Za-z]{2,}[0-9]{3,}\b', '', filtered_text) # SKU/product code pattern | |
| # Strip leading/trailing spaces | |
| filtered_text = filtered_text.strip() | |
| return filtered_text | |
| # Load model and tokenizer | |
| def load_model(model_name): | |
| if model_name not in model_cache: | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSeq2SeqLM.from_pretrained( | |
| model_name, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32 | |
| ) | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model.to(device) | |
| # Warm-up | |
| dummy_input = tokenizer("summarize: warm up", return_tensors="pt").input_ids.to(device) | |
| model.generate(dummy_input, max_length=10) | |
| model_cache[model_name] = (tokenizer, model) | |
| return model_cache[model_name] | |
| # Summarize the text using a selected model | |
| def summarize_text(input_text, model_label, char_limit): | |
| if not input_text.strip(): | |
| return "Please enter some text." | |
| input_text = clean_text(input_text) | |
| model_name = model_choices[model_label] | |
| tokenizer, model = load_model(model_name) | |
| if "t5" in model_name.lower() or "flan" in model_name.lower(): | |
| input_text = "summarize: " + input_text | |
| device = model.device | |
| inputs = tokenizer(input_text, return_tensors="pt", truncation=True) | |
| input_ids = inputs["input_ids"].to(device) | |
| # Adjust the length constraints to make sure min_length < max_length | |
| max_len = 30 # Set your desired max length | |
| min_len = 5 # Ensure min_length is smaller than max_length | |
| # Generate summary | |
| summary_ids = model.generate( | |
| input_ids, | |
| max_length=max_len, | |
| min_length=min_len, | |
| do_sample=False | |
| ) | |
| # Decode the summary | |
| summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
| return summary[:char_limit].strip() | |
| # Gradio UI | |
| iface = gr.Interface( | |
| fn=summarize_text, | |
| inputs=[ | |
| gr.Textbox(lines=6, label="Enter text to summarize"), | |
| gr.Dropdown(choices=list(model_choices.keys()), label="Choose summarization model", value="DistilBART CNN (sshleifer/distilbart-cnn-12-6)"), | |
| gr.Slider(minimum=30, maximum=200, value=80, step=1, label="Max Character Limit") | |
| ], | |
| outputs=gr.Textbox(lines=3, label="Summary (truncated to character limit)"), | |
| title="🚀 Fast Lightweight Summarizer (GPU Optimized)", | |
| description="Summarize text quickly using compact models ideal for low-latency and ZeroGPU Spaces." | |
| ) | |
| iface.launch() | |