Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,7 +8,7 @@ import os
|
|
| 8 |
import nltk
|
| 9 |
nltk.download('punkt')
|
| 10 |
from nltk.tokenize import sent_tokenize
|
| 11 |
-
|
| 12 |
# Additions for file processing
|
| 13 |
import fitz # PyMuPDF for PDF
|
| 14 |
import docx
|
|
@@ -18,21 +18,21 @@ import chardet
|
|
| 18 |
|
| 19 |
# --- Device selection ---
|
| 20 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 21 |
-
|
| 22 |
# --- Load translation models ---
|
| 23 |
def load_models():
|
| 24 |
en_dar_model_path = "LocaleNLP/english_hausa"
|
| 25 |
en_wol_model_path = "LocaleNLP/eng_wolof"
|
| 26 |
en_hau_model_path = "LocaleNLP/english_darija"
|
| 27 |
|
| 28 |
-
en_dar_model = AutoModelForSeq2SeqLM.from_pretrained(en_dar_model_path).to(device)
|
| 29 |
-
en_dar_tokenizer = MarianTokenizer.from_pretrained(en_dar_model_path)
|
| 30 |
|
| 31 |
-
en_wol_model = AutoModelForSeq2SeqLM.from_pretrained(en_wol_model_path).to(device)
|
| 32 |
-
en_wol_tokenizer = MarianTokenizer.from_pretrained(en_wol_model_path)
|
| 33 |
|
| 34 |
-
en_hau_model = AutoModelForSeq2SeqLM.from_pretrained(en_hau_model_path).to(device)
|
| 35 |
-
en_hau_tokenizer = MarianTokenizer.from_pretrained(en_hau_model_path)
|
| 36 |
|
| 37 |
en_dar_translator = pipeline("translation", model=en_dar_model, tokenizer=en_dar_tokenizer, device=0 if device.type == 'cuda' else -1)
|
| 38 |
en_wol_translator = pipeline("translation", model=en_wol_model, tokenizer=en_wol_tokenizer, device=0 if device.type == 'cuda' else -1)
|
|
|
|
| 8 |
import nltk
|
| 9 |
nltk.download('punkt')
|
| 10 |
from nltk.tokenize import sent_tokenize
|
| 11 |
+
import os
|
| 12 |
# Additions for file processing
|
| 13 |
import fitz # PyMuPDF for PDF
|
| 14 |
import docx
|
|
|
|
| 18 |
|
| 19 |
# --- Device selection ---
|
| 20 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 21 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 22 |
# --- Load translation models ---
|
| 23 |
def load_models():
|
| 24 |
en_dar_model_path = "LocaleNLP/english_hausa"
|
| 25 |
en_wol_model_path = "LocaleNLP/eng_wolof"
|
| 26 |
en_hau_model_path = "LocaleNLP/english_darija"
|
| 27 |
|
| 28 |
+
en_dar_model = AutoModelForSeq2SeqLM.from_pretrained(en_dar_model_path, token=HF_TOKEN).to(device)
|
| 29 |
+
en_dar_tokenizer = MarianTokenizer.from_pretrained(en_dar_model_path, token=HF_TOKEN)
|
| 30 |
|
| 31 |
+
en_wol_model = AutoModelForSeq2SeqLM.from_pretrained(en_wol_model_path, token=HF_TOKEN).to(device)
|
| 32 |
+
en_wol_tokenizer = MarianTokenizer.from_pretrained(en_wol_model_path, token=HF_TOKEN)
|
| 33 |
|
| 34 |
+
en_hau_model = AutoModelForSeq2SeqLM.from_pretrained(en_hau_model_path, token=HF_TOKEN).to(device)
|
| 35 |
+
en_hau_tokenizer = MarianTokenizer.from_pretrained(en_hau_model_path, token=HF_TOKEN)
|
| 36 |
|
| 37 |
en_dar_translator = pipeline("translation", model=en_dar_model, tokenizer=en_dar_tokenizer, device=0 if device.type == 'cuda' else -1)
|
| 38 |
en_wol_translator = pipeline("translation", model=en_wol_model, tokenizer=en_wol_tokenizer, device=0 if device.type == 'cuda' else -1)
|