forked from huggingface/nanotron
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllama_2B.yaml
91 lines (91 loc) · 2.25 KB
/
llama_2B.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
checkpoints:
checkpoint_interval: 2000
checkpoints_path: /scratch/project_462000353/villekom/checkpoints/nanotron
checkpoints_path_is_shared_file_system: true
resume_checkpoint_path: null
save_initial_state: false
data_stages:
- data:
dataset:
dataset_folder: /scratch/project_462000353/data/nanosets/fineweb-edu/350BT
num_loading_workers: 7
seed: 42
name: Stable Training Stage
start_training_step: 1
general:
benchmark_csv_path: null
consumed_train_samples: null
ignore_sanity_checks: true
project: fineweb-ablations
run: llama
seed: 42
step: null
lighteval: null
logging:
iteration_step_info_interval: 10
log_level: info
log_level_replica: warning
model:
ddp_bucket_cap_mb: 25
dtype: bfloat16
init_method:
std: 0.02
make_vocab_size_divisible_by: 1
model_config:
is_llama_config: true
bos_token_id: 1
eos_token_id: 2
hidden_act: silu
hidden_size: 2048
initializer_range: 0.02
intermediate_size: 8192
max_position_embeddings: 2048
num_attention_heads: 32
num_hidden_layers: 24
num_key_value_heads: 32
pretraining_tp: 1
rms_norm_eps: 1.0e-05
rope_scaling: null
tie_word_embeddings: true
use_cache: true
vocab_size: 50272
optimizer:
accumulate_grad_in_fp32: true
clip_grad: 1.0
learning_rate_scheduler:
learning_rate: 0.0003
lr_decay_starting_step: null
lr_decay_steps: 10000
lr_decay_style: cosine
lr_warmup_steps: 500
lr_warmup_style: linear
min_decay_lr: 3.0e-05
optimizer_factory:
adam_beta1: 0.9
adam_beta2: 0.95
adam_eps: 1.0e-08
name: adamW
torch_adam_is_fused: true
weight_decay: 0.1
zero_stage: 0
parallelism:
dp: 64 #This should be equal to amount of gpu's if tp and pp == 1
expert_parallel_size: 1
pp: 1
pp_engine: 1f1b
tp: 1
tp_linear_async_communication: true
tp_mode: REDUCE_SCATTER
profiler: null
tokenizer:
tokenizer_max_length: null
tokenizer_name_or_path: HuggingFaceFW/ablation-model-fineweb-edu
tokenizer_revision: null
tokens:
batch_accumulation_per_replica: 4
limit_test_batches: 0
limit_val_batches: 0
micro_batch_size: 4
sequence_length: 2048
train_steps: 167000 #350B tokens if on 1M global batch size on fineweb-edu
val_check_interval: 100