Commit
·
af9c7a0
1
Parent(s):
6311bc5
Adjust GPU memory utilization default and add max model length argument in generate-responses.py
Browse files- generate-responses.py +8 -2
generate-responses.py
CHANGED
|
@@ -397,8 +397,13 @@ Examples:
|
|
| 397 |
parser.add_argument(
|
| 398 |
"--gpu-memory-utilization",
|
| 399 |
type=float,
|
| 400 |
-
default=0.
|
| 401 |
-
help="GPU memory utilization factor (default: 0.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 402 |
)
|
| 403 |
parser.add_argument(
|
| 404 |
"--tensor-parallel-size",
|
|
@@ -426,6 +431,7 @@ Examples:
|
|
| 426 |
max_tokens=args.max_tokens,
|
| 427 |
repetition_penalty=args.repetition_penalty,
|
| 428 |
gpu_memory_utilization=args.gpu_memory_utilization,
|
|
|
|
| 429 |
tensor_parallel_size=args.tensor_parallel_size,
|
| 430 |
hf_token=args.hf_token,
|
| 431 |
)
|
|
|
|
| 397 |
parser.add_argument(
|
| 398 |
"--gpu-memory-utilization",
|
| 399 |
type=float,
|
| 400 |
+
default=0.90,
|
| 401 |
+
help="GPU memory utilization factor (default: 0.90)",
|
| 402 |
+
)
|
| 403 |
+
parser.add_argument(
|
| 404 |
+
"--max-model-len",
|
| 405 |
+
type=int,
|
| 406 |
+
help="Maximum model context length (default: model's default)",
|
| 407 |
)
|
| 408 |
parser.add_argument(
|
| 409 |
"--tensor-parallel-size",
|
|
|
|
| 431 |
max_tokens=args.max_tokens,
|
| 432 |
repetition_penalty=args.repetition_penalty,
|
| 433 |
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 434 |
+
max_model_len=args.max_model_len,
|
| 435 |
tensor_parallel_size=args.tensor_parallel_size,
|
| 436 |
hf_token=args.hf_token,
|
| 437 |
)
|