Spaces:
Running
Running
Update src/translate/Translate.py
Browse files
src/translate/Translate.py
CHANGED
|
@@ -55,7 +55,7 @@ def paraphraseTranslateMethod(requestValue: str, model: str):
|
|
| 55 |
def gemma(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-better'):
|
| 56 |
requestValue = requestValue.replace('\n', ' ')
|
| 57 |
prompt = f"Translate this to Romanian using a formal tone. Only return the translation: {requestValue}"
|
| 58 |
-
messages = [{"role": "user", "content": f"Translate this text to Romanian using a formal tone
|
| 59 |
if '/' not in model:
|
| 60 |
model = 'Gargaz/gemma-2b-romanian-better'
|
| 61 |
# limit max_new_tokens to 150% of the requestValue
|
|
@@ -64,11 +64,10 @@ def gemma(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-better'):
|
|
| 64 |
"text-generation",
|
| 65 |
model=model,
|
| 66 |
device=-1,
|
| 67 |
-
max_new_tokens=max_new_tokens,
|
| 68 |
-
do_sample=False
|
| 69 |
)
|
| 70 |
output = pipe(messages, num_return_sequences=1, return_full_text=False)
|
| 71 |
generated_text = output[0]["generated_text"]
|
| 72 |
result = generated_text.split('\n', 1)[0].strip()
|
| 73 |
-
return result, model
|
| 74 |
-
# return output, model
|
|
|
|
| 55 |
def gemma(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-better'):
|
| 56 |
requestValue = requestValue.replace('\n', ' ')
|
| 57 |
prompt = f"Translate this to Romanian using a formal tone. Only return the translation: {requestValue}"
|
| 58 |
+
messages = [{"role": "user", "content": f"Translate this text to Romanian using a formal tone, responding only with the translated text: {requestValue}"}]
|
| 59 |
if '/' not in model:
|
| 60 |
model = 'Gargaz/gemma-2b-romanian-better'
|
| 61 |
# limit max_new_tokens to 150% of the requestValue
|
|
|
|
| 64 |
"text-generation",
|
| 65 |
model=model,
|
| 66 |
device=-1,
|
| 67 |
+
max_new_tokens=max_new_tokens, # Keep short to reduce verbosity
|
| 68 |
+
do_sample=False # Use greedy decoding for determinism
|
| 69 |
)
|
| 70 |
output = pipe(messages, num_return_sequences=1, return_full_text=False)
|
| 71 |
generated_text = output[0]["generated_text"]
|
| 72 |
result = generated_text.split('\n', 1)[0].strip()
|
| 73 |
+
return result, model
|
|
|