Commit
·
c2b4647
1
Parent(s):
bdbcbee
update
Browse files- generate-responses.py +59 -56
generate-responses.py
CHANGED
|
@@ -2,18 +2,12 @@
|
|
| 2 |
# requires-python = ">=3.10"
|
| 3 |
# dependencies = [
|
| 4 |
# "datasets",
|
| 5 |
-
# "flashinfer-python",
|
| 6 |
# "huggingface-hub[hf_transfer]",
|
| 7 |
# "torch",
|
| 8 |
# "transformers",
|
| 9 |
# "vllm",
|
| 10 |
# ]
|
| 11 |
#
|
| 12 |
-
# [[tool.uv.index]]
|
| 13 |
-
# url = "https://flashinfer.ai/whl/cu126/torch2.6"
|
| 14 |
-
#
|
| 15 |
-
# [[tool.uv.index]]
|
| 16 |
-
# url = "https://wheels.vllm.ai/nightly"
|
| 17 |
# ///
|
| 18 |
"""
|
| 19 |
Generate responses for prompts in a dataset using vLLM for efficient GPU inference.
|
|
@@ -60,7 +54,9 @@ from vllm import LLM, SamplingParams
|
|
| 60 |
# Enable HF Transfer for faster downloads
|
| 61 |
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
| 62 |
|
| 63 |
-
logging.basicConfig(
|
|
|
|
|
|
|
| 64 |
logger = logging.getLogger(__name__)
|
| 65 |
|
| 66 |
|
|
@@ -68,15 +64,17 @@ def check_gpu_availability() -> int:
|
|
| 68 |
"""Check if CUDA is available and return the number of GPUs."""
|
| 69 |
if not cuda.is_available():
|
| 70 |
logger.error("CUDA is not available. This script requires a GPU.")
|
| 71 |
-
logger.error(
|
|
|
|
|
|
|
| 72 |
sys.exit(1)
|
| 73 |
-
|
| 74 |
num_gpus = cuda.device_count()
|
| 75 |
for i in range(num_gpus):
|
| 76 |
gpu_name = cuda.get_device_name(i)
|
| 77 |
gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
|
| 78 |
logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
|
| 79 |
-
|
| 80 |
return num_gpus
|
| 81 |
|
| 82 |
|
|
@@ -167,7 +165,7 @@ def main(
|
|
| 167 |
):
|
| 168 |
"""
|
| 169 |
Main generation pipeline.
|
| 170 |
-
|
| 171 |
Args:
|
| 172 |
src_dataset_hub_id: Input dataset on Hugging Face Hub
|
| 173 |
output_dataset_hub_id: Where to save results on Hugging Face Hub
|
|
@@ -185,30 +183,34 @@ def main(
|
|
| 185 |
hf_token: Hugging Face authentication token
|
| 186 |
"""
|
| 187 |
generation_start_time = datetime.now().isoformat()
|
| 188 |
-
|
| 189 |
# GPU check and configuration
|
| 190 |
num_gpus = check_gpu_availability()
|
| 191 |
if tensor_parallel_size is None:
|
| 192 |
tensor_parallel_size = num_gpus
|
| 193 |
-
logger.info(
|
|
|
|
|
|
|
| 194 |
else:
|
| 195 |
logger.info(f"Using specified tensor_parallel_size={tensor_parallel_size}")
|
| 196 |
if tensor_parallel_size > num_gpus:
|
| 197 |
-
logger.warning(
|
| 198 |
-
|
|
|
|
|
|
|
| 199 |
# Authentication - try multiple methods
|
| 200 |
HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token()
|
| 201 |
-
|
| 202 |
if not HF_TOKEN:
|
| 203 |
logger.error("No HuggingFace token found. Please provide token via:")
|
| 204 |
logger.error(" 1. --hf-token argument")
|
| 205 |
logger.error(" 2. HF_TOKEN environment variable")
|
| 206 |
logger.error(" 3. Run 'huggingface-cli login' or use login() in Python")
|
| 207 |
sys.exit(1)
|
| 208 |
-
|
| 209 |
logger.info("HuggingFace token found, authenticating...")
|
| 210 |
login(token=HF_TOKEN)
|
| 211 |
-
|
| 212 |
# Initialize vLLM
|
| 213 |
logger.info(f"Loading model: {model_id}")
|
| 214 |
llm = LLM(
|
|
@@ -216,11 +218,11 @@ def main(
|
|
| 216 |
tensor_parallel_size=tensor_parallel_size,
|
| 217 |
gpu_memory_utilization=gpu_memory_utilization,
|
| 218 |
)
|
| 219 |
-
|
| 220 |
# Load tokenizer for chat template
|
| 221 |
logger.info("Loading tokenizer...")
|
| 222 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 223 |
-
|
| 224 |
# Create sampling parameters
|
| 225 |
sampling_params = SamplingParams(
|
| 226 |
temperature=temperature,
|
|
@@ -230,18 +232,20 @@ def main(
|
|
| 230 |
max_tokens=max_tokens,
|
| 231 |
repetition_penalty=repetition_penalty,
|
| 232 |
)
|
| 233 |
-
|
| 234 |
# Load dataset
|
| 235 |
logger.info(f"Loading dataset: {src_dataset_hub_id}")
|
| 236 |
dataset = load_dataset(src_dataset_hub_id, split="train")
|
| 237 |
total_examples = len(dataset)
|
| 238 |
logger.info(f"Dataset loaded with {total_examples:,} examples")
|
| 239 |
-
|
| 240 |
# Validate messages column
|
| 241 |
if messages_column not in dataset.column_names:
|
| 242 |
-
logger.error(
|
|
|
|
|
|
|
| 243 |
sys.exit(1)
|
| 244 |
-
|
| 245 |
# Process messages and apply chat template
|
| 246 |
logger.info("Applying chat template to messages...")
|
| 247 |
prompts = []
|
|
@@ -249,29 +253,27 @@ def main(
|
|
| 249 |
messages = example[messages_column]
|
| 250 |
# Apply chat template
|
| 251 |
prompt = tokenizer.apply_chat_template(
|
| 252 |
-
messages,
|
| 253 |
-
tokenize=False,
|
| 254 |
-
add_generation_prompt=True
|
| 255 |
)
|
| 256 |
prompts.append(prompt)
|
| 257 |
-
|
| 258 |
# Generate responses - vLLM handles batching internally
|
| 259 |
logger.info(f"Starting generation for {len(prompts):,} prompts...")
|
| 260 |
logger.info("vLLM will handle batching and scheduling automatically")
|
| 261 |
-
|
| 262 |
outputs = llm.generate(prompts, sampling_params)
|
| 263 |
-
|
| 264 |
# Extract generated text
|
| 265 |
logger.info("Extracting generated responses...")
|
| 266 |
responses = []
|
| 267 |
for output in outputs:
|
| 268 |
response = output.outputs[0].text.strip()
|
| 269 |
responses.append(response)
|
| 270 |
-
|
| 271 |
# Add responses to dataset
|
| 272 |
logger.info("Adding responses to dataset...")
|
| 273 |
dataset = dataset.add_column(output_column, responses)
|
| 274 |
-
|
| 275 |
# Create dataset card
|
| 276 |
logger.info("Creating dataset card...")
|
| 277 |
card_content = create_dataset_card(
|
|
@@ -283,17 +285,19 @@ def main(
|
|
| 283 |
num_examples=total_examples,
|
| 284 |
generation_time=generation_start_time,
|
| 285 |
)
|
| 286 |
-
|
| 287 |
# Push dataset to hub
|
| 288 |
logger.info(f"Pushing dataset to: {output_dataset_hub_id}")
|
| 289 |
dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
|
| 290 |
-
|
| 291 |
# Push dataset card
|
| 292 |
card = DatasetCard(card_content)
|
| 293 |
card.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
|
| 294 |
-
|
| 295 |
logger.info("✅ Generation complete!")
|
| 296 |
-
logger.info(
|
|
|
|
|
|
|
| 297 |
|
| 298 |
|
| 299 |
if __name__ == "__main__":
|
|
@@ -319,90 +323,89 @@ Examples:
|
|
| 319 |
|
| 320 |
# Using environment variable for token
|
| 321 |
HF_TOKEN=hf_xxx uv run generate-responses.py input-dataset output-dataset
|
| 322 |
-
"""
|
| 323 |
)
|
| 324 |
-
|
| 325 |
parser.add_argument(
|
| 326 |
"src_dataset_hub_id",
|
| 327 |
-
help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)"
|
| 328 |
)
|
| 329 |
parser.add_argument(
|
| 330 |
-
"output_dataset_hub_id",
|
| 331 |
-
help="Output dataset name on Hugging Face Hub"
|
| 332 |
)
|
| 333 |
parser.add_argument(
|
| 334 |
"--model-id",
|
| 335 |
type=str,
|
| 336 |
default="Qwen/Qwen3-30B-A3B-Instruct-2507-FP8",
|
| 337 |
-
help="Model to use for generation (default: Qwen3-30B-A3B-Instruct-2507-FP8)"
|
| 338 |
)
|
| 339 |
parser.add_argument(
|
| 340 |
"--messages-column",
|
| 341 |
type=str,
|
| 342 |
default="messages",
|
| 343 |
-
help="Column containing chat messages (default: messages)"
|
| 344 |
)
|
| 345 |
parser.add_argument(
|
| 346 |
"--output-column",
|
| 347 |
type=str,
|
| 348 |
default="response",
|
| 349 |
-
help="Column name for generated responses (default: response)"
|
| 350 |
)
|
| 351 |
parser.add_argument(
|
| 352 |
"--temperature",
|
| 353 |
type=float,
|
| 354 |
default=0.7,
|
| 355 |
-
help="Sampling temperature (default: 0.7)"
|
| 356 |
)
|
| 357 |
parser.add_argument(
|
| 358 |
"--top-p",
|
| 359 |
type=float,
|
| 360 |
default=0.8,
|
| 361 |
-
help="Top-p sampling parameter (default: 0.8)"
|
| 362 |
)
|
| 363 |
parser.add_argument(
|
| 364 |
"--top-k",
|
| 365 |
type=int,
|
| 366 |
default=20,
|
| 367 |
-
help="Top-k sampling parameter (default: 20)"
|
| 368 |
)
|
| 369 |
parser.add_argument(
|
| 370 |
"--min-p",
|
| 371 |
type=float,
|
| 372 |
default=0.0,
|
| 373 |
-
help="Minimum probability threshold (default: 0.0)"
|
| 374 |
)
|
| 375 |
parser.add_argument(
|
| 376 |
"--max-tokens",
|
| 377 |
type=int,
|
| 378 |
default=16384,
|
| 379 |
-
help="Maximum tokens to generate (default: 16384)"
|
| 380 |
)
|
| 381 |
parser.add_argument(
|
| 382 |
"--repetition-penalty",
|
| 383 |
type=float,
|
| 384 |
default=1.0,
|
| 385 |
-
help="Repetition penalty (default: 1.0)"
|
| 386 |
)
|
| 387 |
parser.add_argument(
|
| 388 |
"--gpu-memory-utilization",
|
| 389 |
type=float,
|
| 390 |
default=0.90,
|
| 391 |
-
help="GPU memory utilization factor (default: 0.90)"
|
| 392 |
)
|
| 393 |
parser.add_argument(
|
| 394 |
"--tensor-parallel-size",
|
| 395 |
type=int,
|
| 396 |
-
help="Number of GPUs to use (default: auto-detect)"
|
| 397 |
)
|
| 398 |
parser.add_argument(
|
| 399 |
"--hf-token",
|
| 400 |
type=str,
|
| 401 |
-
help="Hugging Face token (can also use HF_TOKEN env var)"
|
| 402 |
)
|
| 403 |
-
|
| 404 |
args = parser.parse_args()
|
| 405 |
-
|
| 406 |
main(
|
| 407 |
src_dataset_hub_id=args.src_dataset_hub_id,
|
| 408 |
output_dataset_hub_id=args.output_dataset_hub_id,
|
|
@@ -439,4 +442,4 @@ Example HF Jobs command with multi-GPU:
|
|
| 439 |
--model-id Qwen/Qwen3-30B-A3B-Instruct-2507-FP8 \\
|
| 440 |
--temperature 0.7 \\
|
| 441 |
--max-tokens 16384
|
| 442 |
-
""")
|
|
|
|
| 2 |
# requires-python = ">=3.10"
|
| 3 |
# dependencies = [
|
| 4 |
# "datasets",
|
|
|
|
| 5 |
# "huggingface-hub[hf_transfer]",
|
| 6 |
# "torch",
|
| 7 |
# "transformers",
|
| 8 |
# "vllm",
|
| 9 |
# ]
|
| 10 |
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
# ///
|
| 12 |
"""
|
| 13 |
Generate responses for prompts in a dataset using vLLM for efficient GPU inference.
|
|
|
|
| 54 |
# Enable HF Transfer for faster downloads
|
| 55 |
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
| 56 |
|
| 57 |
+
logging.basicConfig(
|
| 58 |
+
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
| 59 |
+
)
|
| 60 |
logger = logging.getLogger(__name__)
|
| 61 |
|
| 62 |
|
|
|
|
| 64 |
"""Check if CUDA is available and return the number of GPUs."""
|
| 65 |
if not cuda.is_available():
|
| 66 |
logger.error("CUDA is not available. This script requires a GPU.")
|
| 67 |
+
logger.error(
|
| 68 |
+
"Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor."
|
| 69 |
+
)
|
| 70 |
sys.exit(1)
|
| 71 |
+
|
| 72 |
num_gpus = cuda.device_count()
|
| 73 |
for i in range(num_gpus):
|
| 74 |
gpu_name = cuda.get_device_name(i)
|
| 75 |
gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
|
| 76 |
logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
|
| 77 |
+
|
| 78 |
return num_gpus
|
| 79 |
|
| 80 |
|
|
|
|
| 165 |
):
|
| 166 |
"""
|
| 167 |
Main generation pipeline.
|
| 168 |
+
|
| 169 |
Args:
|
| 170 |
src_dataset_hub_id: Input dataset on Hugging Face Hub
|
| 171 |
output_dataset_hub_id: Where to save results on Hugging Face Hub
|
|
|
|
| 183 |
hf_token: Hugging Face authentication token
|
| 184 |
"""
|
| 185 |
generation_start_time = datetime.now().isoformat()
|
| 186 |
+
|
| 187 |
# GPU check and configuration
|
| 188 |
num_gpus = check_gpu_availability()
|
| 189 |
if tensor_parallel_size is None:
|
| 190 |
tensor_parallel_size = num_gpus
|
| 191 |
+
logger.info(
|
| 192 |
+
f"Auto-detected {num_gpus} GPU(s), using tensor_parallel_size={tensor_parallel_size}"
|
| 193 |
+
)
|
| 194 |
else:
|
| 195 |
logger.info(f"Using specified tensor_parallel_size={tensor_parallel_size}")
|
| 196 |
if tensor_parallel_size > num_gpus:
|
| 197 |
+
logger.warning(
|
| 198 |
+
f"Requested {tensor_parallel_size} GPUs but only {num_gpus} available"
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
# Authentication - try multiple methods
|
| 202 |
HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token()
|
| 203 |
+
|
| 204 |
if not HF_TOKEN:
|
| 205 |
logger.error("No HuggingFace token found. Please provide token via:")
|
| 206 |
logger.error(" 1. --hf-token argument")
|
| 207 |
logger.error(" 2. HF_TOKEN environment variable")
|
| 208 |
logger.error(" 3. Run 'huggingface-cli login' or use login() in Python")
|
| 209 |
sys.exit(1)
|
| 210 |
+
|
| 211 |
logger.info("HuggingFace token found, authenticating...")
|
| 212 |
login(token=HF_TOKEN)
|
| 213 |
+
|
| 214 |
# Initialize vLLM
|
| 215 |
logger.info(f"Loading model: {model_id}")
|
| 216 |
llm = LLM(
|
|
|
|
| 218 |
tensor_parallel_size=tensor_parallel_size,
|
| 219 |
gpu_memory_utilization=gpu_memory_utilization,
|
| 220 |
)
|
| 221 |
+
|
| 222 |
# Load tokenizer for chat template
|
| 223 |
logger.info("Loading tokenizer...")
|
| 224 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 225 |
+
|
| 226 |
# Create sampling parameters
|
| 227 |
sampling_params = SamplingParams(
|
| 228 |
temperature=temperature,
|
|
|
|
| 232 |
max_tokens=max_tokens,
|
| 233 |
repetition_penalty=repetition_penalty,
|
| 234 |
)
|
| 235 |
+
|
| 236 |
# Load dataset
|
| 237 |
logger.info(f"Loading dataset: {src_dataset_hub_id}")
|
| 238 |
dataset = load_dataset(src_dataset_hub_id, split="train")
|
| 239 |
total_examples = len(dataset)
|
| 240 |
logger.info(f"Dataset loaded with {total_examples:,} examples")
|
| 241 |
+
|
| 242 |
# Validate messages column
|
| 243 |
if messages_column not in dataset.column_names:
|
| 244 |
+
logger.error(
|
| 245 |
+
f"Column '{messages_column}' not found. Available columns: {dataset.column_names}"
|
| 246 |
+
)
|
| 247 |
sys.exit(1)
|
| 248 |
+
|
| 249 |
# Process messages and apply chat template
|
| 250 |
logger.info("Applying chat template to messages...")
|
| 251 |
prompts = []
|
|
|
|
| 253 |
messages = example[messages_column]
|
| 254 |
# Apply chat template
|
| 255 |
prompt = tokenizer.apply_chat_template(
|
| 256 |
+
messages, tokenize=False, add_generation_prompt=True
|
|
|
|
|
|
|
| 257 |
)
|
| 258 |
prompts.append(prompt)
|
| 259 |
+
|
| 260 |
# Generate responses - vLLM handles batching internally
|
| 261 |
logger.info(f"Starting generation for {len(prompts):,} prompts...")
|
| 262 |
logger.info("vLLM will handle batching and scheduling automatically")
|
| 263 |
+
|
| 264 |
outputs = llm.generate(prompts, sampling_params)
|
| 265 |
+
|
| 266 |
# Extract generated text
|
| 267 |
logger.info("Extracting generated responses...")
|
| 268 |
responses = []
|
| 269 |
for output in outputs:
|
| 270 |
response = output.outputs[0].text.strip()
|
| 271 |
responses.append(response)
|
| 272 |
+
|
| 273 |
# Add responses to dataset
|
| 274 |
logger.info("Adding responses to dataset...")
|
| 275 |
dataset = dataset.add_column(output_column, responses)
|
| 276 |
+
|
| 277 |
# Create dataset card
|
| 278 |
logger.info("Creating dataset card...")
|
| 279 |
card_content = create_dataset_card(
|
|
|
|
| 285 |
num_examples=total_examples,
|
| 286 |
generation_time=generation_start_time,
|
| 287 |
)
|
| 288 |
+
|
| 289 |
# Push dataset to hub
|
| 290 |
logger.info(f"Pushing dataset to: {output_dataset_hub_id}")
|
| 291 |
dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
|
| 292 |
+
|
| 293 |
# Push dataset card
|
| 294 |
card = DatasetCard(card_content)
|
| 295 |
card.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
|
| 296 |
+
|
| 297 |
logger.info("✅ Generation complete!")
|
| 298 |
+
logger.info(
|
| 299 |
+
f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}"
|
| 300 |
+
)
|
| 301 |
|
| 302 |
|
| 303 |
if __name__ == "__main__":
|
|
|
|
| 323 |
|
| 324 |
# Using environment variable for token
|
| 325 |
HF_TOKEN=hf_xxx uv run generate-responses.py input-dataset output-dataset
|
| 326 |
+
""",
|
| 327 |
)
|
| 328 |
+
|
| 329 |
parser.add_argument(
|
| 330 |
"src_dataset_hub_id",
|
| 331 |
+
help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)",
|
| 332 |
)
|
| 333 |
parser.add_argument(
|
| 334 |
+
"output_dataset_hub_id", help="Output dataset name on Hugging Face Hub"
|
|
|
|
| 335 |
)
|
| 336 |
parser.add_argument(
|
| 337 |
"--model-id",
|
| 338 |
type=str,
|
| 339 |
default="Qwen/Qwen3-30B-A3B-Instruct-2507-FP8",
|
| 340 |
+
help="Model to use for generation (default: Qwen3-30B-A3B-Instruct-2507-FP8)",
|
| 341 |
)
|
| 342 |
parser.add_argument(
|
| 343 |
"--messages-column",
|
| 344 |
type=str,
|
| 345 |
default="messages",
|
| 346 |
+
help="Column containing chat messages (default: messages)",
|
| 347 |
)
|
| 348 |
parser.add_argument(
|
| 349 |
"--output-column",
|
| 350 |
type=str,
|
| 351 |
default="response",
|
| 352 |
+
help="Column name for generated responses (default: response)",
|
| 353 |
)
|
| 354 |
parser.add_argument(
|
| 355 |
"--temperature",
|
| 356 |
type=float,
|
| 357 |
default=0.7,
|
| 358 |
+
help="Sampling temperature (default: 0.7)",
|
| 359 |
)
|
| 360 |
parser.add_argument(
|
| 361 |
"--top-p",
|
| 362 |
type=float,
|
| 363 |
default=0.8,
|
| 364 |
+
help="Top-p sampling parameter (default: 0.8)",
|
| 365 |
)
|
| 366 |
parser.add_argument(
|
| 367 |
"--top-k",
|
| 368 |
type=int,
|
| 369 |
default=20,
|
| 370 |
+
help="Top-k sampling parameter (default: 20)",
|
| 371 |
)
|
| 372 |
parser.add_argument(
|
| 373 |
"--min-p",
|
| 374 |
type=float,
|
| 375 |
default=0.0,
|
| 376 |
+
help="Minimum probability threshold (default: 0.0)",
|
| 377 |
)
|
| 378 |
parser.add_argument(
|
| 379 |
"--max-tokens",
|
| 380 |
type=int,
|
| 381 |
default=16384,
|
| 382 |
+
help="Maximum tokens to generate (default: 16384)",
|
| 383 |
)
|
| 384 |
parser.add_argument(
|
| 385 |
"--repetition-penalty",
|
| 386 |
type=float,
|
| 387 |
default=1.0,
|
| 388 |
+
help="Repetition penalty (default: 1.0)",
|
| 389 |
)
|
| 390 |
parser.add_argument(
|
| 391 |
"--gpu-memory-utilization",
|
| 392 |
type=float,
|
| 393 |
default=0.90,
|
| 394 |
+
help="GPU memory utilization factor (default: 0.90)",
|
| 395 |
)
|
| 396 |
parser.add_argument(
|
| 397 |
"--tensor-parallel-size",
|
| 398 |
type=int,
|
| 399 |
+
help="Number of GPUs to use (default: auto-detect)",
|
| 400 |
)
|
| 401 |
parser.add_argument(
|
| 402 |
"--hf-token",
|
| 403 |
type=str,
|
| 404 |
+
help="Hugging Face token (can also use HF_TOKEN env var)",
|
| 405 |
)
|
| 406 |
+
|
| 407 |
args = parser.parse_args()
|
| 408 |
+
|
| 409 |
main(
|
| 410 |
src_dataset_hub_id=args.src_dataset_hub_id,
|
| 411 |
output_dataset_hub_id=args.output_dataset_hub_id,
|
|
|
|
| 442 |
--model-id Qwen/Qwen3-30B-A3B-Instruct-2507-FP8 \\
|
| 443 |
--temperature 0.7 \\
|
| 444 |
--max-tokens 16384
|
| 445 |
+
""")
|