|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer |
|
|
import torch |
|
|
from gtts import gTTS |
|
|
import gradio as gr |
|
|
import tempfile |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model__name = "Helsinki-NLP/opus-mt-en-hi" |
|
|
|
|
|
|
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
model = model.to(device) |
|
|
|
|
|
|
|
|
model_name = "SweUmaVarsh/m2m100-en-sa-translation" |
|
|
tokenizer = M2M100Tokenizer.from_pretrained(model_name) |
|
|
model = M2M100ForConditionalGeneration.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
|
|
|
def translate_and_speak(text): |
|
|
input_text = "en " + text |
|
|
encoded = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True).to(device) |
|
|
generated_tokens = model.generate(**encoded, max_length=128, num_beams=5, early_stopping=True) |
|
|
output = tokenizer.decode(generated_tokens[0], skip_special_tokens=True) |
|
|
|
|
|
for tag in ["__en__", "__sa__", "en", "sa"]: |
|
|
output = output.replace(tag, "") |
|
|
sanskrit_text = output.strip() |
|
|
|
|
|
|
|
|
tts = gTTS(sanskrit_text, lang='hi') |
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp: |
|
|
tts.save(fp.name) |
|
|
audio_path = fp.name |
|
|
|
|
|
return sanskrit_text, audio_path |
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=translate_and_speak, |
|
|
inputs=gr.Textbox(label="Enter English Text"), |
|
|
outputs=[gr.Textbox(label="Sanskrit Translation"), gr.Audio(label="Sanskrit Speech")], |
|
|
title="Final Year Project: English to Sanskrit Translator (IT 'A' 2021β2025)", |
|
|
description="Enter a sentence in English to get its Sanskrit translation and audio output." |
|
|
) |
|
|
|
|
|
iface.launch() |
|
|
|