Spaces:
Running
Running
Update src/translate/Translate.py
Browse files- src/translate/Translate.py +13 -3
src/translate/Translate.py
CHANGED
|
@@ -53,9 +53,19 @@ def paraphraseTranslateMethod(requestValue: str, model: str):
|
|
| 53 |
|
| 54 |
return " ".join(result_value).strip(), model
|
| 55 |
|
| 56 |
-
def gemma(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-better'):
|
| 57 |
-
|
| 58 |
messages = [
|
| 59 |
{"role": "user", "content": f"Translate this to Romanian using a formal tone. Only return the translated text: {requestValue}"},
|
| 60 |
]
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
return " ".join(result_value).strip(), model
|
| 55 |
|
| 56 |
+
def gemma(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-better'):
|
| 57 |
+
prompt = f"Translate this to Romanian using a formal tone. Only return the translation:\n{requestValue}"
|
| 58 |
messages = [
|
| 59 |
{"role": "user", "content": f"Translate this to Romanian using a formal tone. Only return the translated text: {requestValue}"},
|
| 60 |
]
|
| 61 |
+
pipe = pipeline(
|
| 62 |
+
"text-generation",
|
| 63 |
+
model=model,
|
| 64 |
+
device=-1,
|
| 65 |
+
max_new_tokens=512, # Keep short to reduce verbosity
|
| 66 |
+
do_sample=False, # Use greedy decoding for determinism
|
| 67 |
+
temperature=0.7 # Raise slightly to avoid dull output
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
output = pipe(prompt, num_return_sequences=1, return_full_text=False)
|
| 71 |
+
return output[0]["generated_text"].strip(), model
|