pretrain_core_model_2
Browse files
scripts/pretrain_core_model_2.yaml
CHANGED
|
@@ -52,7 +52,7 @@ data:
|
|
| 52 |
# Training-related arguments. See ``litgpt.args.TrainArgs`` for details
|
| 53 |
train:
|
| 54 |
# Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
|
| 55 |
-
save_interval:
|
| 56 |
|
| 57 |
# Number of iterations between logging calls (type: int, default: 1)
|
| 58 |
log_interval: 1
|
|
@@ -90,7 +90,7 @@ train:
|
|
| 90 |
# Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
|
| 91 |
eval:
|
| 92 |
# Number of optimizer steps between evaluation calls (type: int, default: 1000)
|
| 93 |
-
interval:
|
| 94 |
|
| 95 |
# Number of tokens to generate (type: Optional[int], default: null)
|
| 96 |
max_new_tokens:
|
|
|
|
| 52 |
# Training-related arguments. See ``litgpt.args.TrainArgs`` for details
|
| 53 |
train:
|
| 54 |
# Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
|
| 55 |
+
save_interval: 25
|
| 56 |
|
| 57 |
# Number of iterations between logging calls (type: int, default: 1)
|
| 58 |
log_interval: 1
|
|
|
|
| 90 |
# Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
|
| 91 |
eval:
|
| 92 |
# Number of optimizer steps between evaluation calls (type: int, default: 1000)
|
| 93 |
+
interval: 25
|
| 94 |
|
| 95 |
# Number of tokens to generate (type: Optional[int], default: null)
|
| 96 |
max_new_tokens:
|