xuebi
commited on
Commit
·
8f96eee
1
Parent(s):
30a4a95
update: fix generation_config in docs
Browse filesSigned-off-by: xuebi <xuebi@minimaxi.com>
docs/transformers_deploy_guide.md
CHANGED
|
@@ -57,7 +57,7 @@ messages = [
|
|
| 57 |
|
| 58 |
model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to("cuda")
|
| 59 |
|
| 60 |
-
generated_ids = model.generate(model_inputs, max_new_tokens=100, generation_config=generation_config)
|
| 61 |
|
| 62 |
response = tokenizer.batch_decode(generated_ids)[0]
|
| 63 |
|
|
|
|
| 57 |
|
| 58 |
model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to("cuda")
|
| 59 |
|
| 60 |
+
generated_ids = model.generate(model_inputs, max_new_tokens=100, generation_config=model.generation_config)
|
| 61 |
|
| 62 |
response = tokenizer.batch_decode(generated_ids)[0]
|
| 63 |
|
docs/transformers_deploy_guide_cn.md
CHANGED
|
@@ -57,7 +57,7 @@ messages = [
|
|
| 57 |
|
| 58 |
model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to("cuda")
|
| 59 |
|
| 60 |
-
generated_ids = model.generate(model_inputs, max_new_tokens=100, generation_config=generation_config)
|
| 61 |
|
| 62 |
response = tokenizer.batch_decode(generated_ids)[0]
|
| 63 |
|
|
|
|
| 57 |
|
| 58 |
model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to("cuda")
|
| 59 |
|
| 60 |
+
generated_ids = model.generate(model_inputs, max_new_tokens=100, generation_config=model.generation_config)
|
| 61 |
|
| 62 |
response = tokenizer.batch_decode(generated_ids)[0]
|
| 63 |
|