Spaces:
Running
Running
Update src/translate/Translate.py
Browse files
src/translate/Translate.py
CHANGED
|
@@ -83,12 +83,8 @@ def gemma_direct(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-bette
|
|
| 83 |
# Estimate output length (e.g., 50% longer)
|
| 84 |
max_new_tokens = int(num_tokens * 1.5)
|
| 85 |
max_new_tokens += max_new_tokens % 2 # ensure it's even
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
# max_new_tokens = int(len(request_value) * 1.5)
|
| 89 |
-
# max_new_tokens += max_new_tokens % 2 # ensure it's even
|
| 90 |
-
|
| 91 |
-
messages = [{"role": "user", "content": prompt]
|
| 92 |
tokenizer = AutoTokenizer.from_pretrained("Gargaz/gemma-2b-romanian-better")
|
| 93 |
model = AutoModelForCausalLM.from_pretrained("Gargaz/gemma-2b-romanian-better").to(device)
|
| 94 |
|
|
|
|
| 83 |
# Estimate output length (e.g., 50% longer)
|
| 84 |
max_new_tokens = int(num_tokens * 1.5)
|
| 85 |
max_new_tokens += max_new_tokens % 2 # ensure it's even
|
| 86 |
+
|
| 87 |
+
messages = [{"role": "user", "content": prompt}]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
tokenizer = AutoTokenizer.from_pretrained("Gargaz/gemma-2b-romanian-better")
|
| 89 |
model = AutoModelForCausalLM.from_pretrained("Gargaz/gemma-2b-romanian-better").to(device)
|
| 90 |
|