|
|
|
|
|
|
|
|
|
|
|
import torch |
|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, MarianMTModel, MarianTokenizer |
|
|
|
|
|
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
MODEL_OPTIONS = [ |
|
|
"FLAN-T5-base (Google en→en)", |
|
|
"Round-trip OPUS-MT en→es→en (Helsinki-NLP)" |
|
|
] |
|
|
|
|
|
|
|
|
CACHE = {} |
|
|
|
|
|
|
|
|
def load_flan(): |
|
|
if "flan" not in CACHE: |
|
|
tok = AutoTokenizer.from_pretrained("google/flan-t5-base") |
|
|
mdl = AutoModelForSeq2SeqLM.from_pretrained( |
|
|
"google/flan-t5-base", |
|
|
low_cpu_mem_usage=True, |
|
|
torch_dtype="auto" |
|
|
).to(DEVICE) |
|
|
CACHE["flan"] = (mdl, tok) |
|
|
return CACHE["flan"] |
|
|
|
|
|
def run_flan(sentence: str) -> str: |
|
|
model, tok = load_flan() |
|
|
prompt = f"Correct grammar and rewrite in fluent British English: {sentence}" |
|
|
inputs = tok(prompt, return_tensors="pt").to(DEVICE) |
|
|
with torch.no_grad(): |
|
|
out = model.generate(**inputs, max_new_tokens=96, num_beams=4) |
|
|
return tok.decode(out[0], skip_special_tokens=True).strip() |
|
|
|
|
|
|
|
|
def load_marian(): |
|
|
if "en_es" not in CACHE: |
|
|
tok1 = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es") |
|
|
mdl1 = MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-es").to(DEVICE) |
|
|
tok2 = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-es-en") |
|
|
mdl2 = MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-es-en").to(DEVICE) |
|
|
CACHE["en_es"] = (mdl1, tok1, mdl2, tok2) |
|
|
return CACHE["en_es"] |
|
|
|
|
|
def run_roundtrip(sentence: str) -> str: |
|
|
mdl1, tok1, mdl2, tok2 = load_marian() |
|
|
|
|
|
inputs = tok1(sentence, return_tensors="pt").to(DEVICE) |
|
|
es_tokens = mdl1.generate(**inputs, max_length=128, num_beams=4) |
|
|
spanish = tok1.decode(es_tokens[0], skip_special_tokens=True) |
|
|
|
|
|
inputs2 = tok2(spanish, return_tensors="pt").to(DEVICE) |
|
|
en_tokens = mdl2.generate(**inputs2, max_length=128, num_beams=4) |
|
|
english = tok2.decode(en_tokens[0], skip_special_tokens=True) |
|
|
return english.strip() |
|
|
|
|
|
|
|
|
def polish(sentence: str, choice: str) -> str: |
|
|
if not sentence.strip(): |
|
|
return "" |
|
|
if choice.startswith("FLAN"): |
|
|
return run_flan(sentence) |
|
|
elif choice.startswith("Round-trip"): |
|
|
return run_roundtrip(sentence) |
|
|
else: |
|
|
return "Unknown option." |
|
|
|
|
|
|
|
|
with gr.Blocks(title="English Grammar Polisher") as demo: |
|
|
gr.Markdown("### English Grammar Polisher\nChoose FLAN-T5 (Google) or OPUS-MT round-trip (Helsinki-NLP).") |
|
|
inp = gr.Textbox(lines=3, label="Input (English)", placeholder="Type a sentence…") |
|
|
choice = gr.Dropdown(choices=MODEL_OPTIONS, value="FLAN-T5-base (Google en→en)", label="Method") |
|
|
btn = gr.Button("Polish") |
|
|
out = gr.Textbox(label="Output") |
|
|
btn.click(polish, inputs=[inp, choice], outputs=out) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|