Skip to content

Commit

Permalink
debug
Browse files Browse the repository at this point in the history
  • Loading branch information
xrsrke committed Jan 27, 2025
1 parent 1e31cb9 commit 5bd9086
Show file tree
Hide file tree
Showing 6 changed files with 635 additions and 5 deletions.
105 changes: 105 additions & 0 deletions examples/exp6_elie_original_config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
checkpoints:
checkpoint_interval: 10000
# checkpoints_path: /fsx/elie_bakouch/nanotron/debug-ckpt-cpuoom
checkpoints_path: checkpoints
checkpoints_path_is_shared_file_system: false
resume_checkpoint_path: null
load_lr_scheduler: false
load_optimizer: false
save_final_state: true
save_initial_state: true
data_stages:
- data:
dataset:
dataset_folder:
- /fsx/elie_bakouch/data/fw-edu-dedup
num_loading_workers: 0
seed: 8
name: stable phase
start_training_step: 1
general:
benchmark_csv_path: null
consumed_train_samples: null
ignore_sanity_checks: true
project: llama3-3B-finetune
run: fwedu-60B-resume
seed: 6
step: null
logging:
iteration_step_info_interval: 1
log_level: info
log_level_replica: info
model:
ddp_bucket_cap_mb: 25
dtype: bfloat16
init_method:
std: 0.041666666666666664
make_vocab_size_divisible_by: 1
model_config:
bos_token_id: 128000
eos_token_id: 128001
hidden_act: silu
hidden_size: 3072
initializer_range: 0.02
intermediate_size: 8192
is_llama_config: true
max_position_embeddings: 4096
num_attention_heads: 24
num_hidden_layers: 28
num_key_value_heads: 8
pad_token_id: null
pretraining_tp: 2
rms_norm_eps: 1.0e-05
rope_interleaved: false
rope_scaling:
factor: 32.0
high_freq_factor: 4.0
low_freq_factor: 1.0
original_max_position_embeddings: 8192
rope_type: llama3
rope_theta: 500000.0
tie_word_embeddings: true
use_cache: true
vocab_size: 128256
optimizer:
accumulate_grad_in_fp32: true
clip_grad: 1.0
learning_rate_scheduler:
learning_rate: 0.00005
lr_decay_starting_step: 50000
lr_decay_steps: 10000
lr_decay_style: linear
lr_warmup_steps: 1000
lr_warmup_style: linear
min_decay_lr: 0
optimizer_factory:
adam_beta1: 0.9
adam_beta2: 0.95
adam_eps: 1.0e-08
name: adamW
torch_adam_is_fused: true
weight_decay: 0.01
zero_stage: 1
parallelism:
dp: 1
expert_parallel_size: 1
pp: 1
pp_engine: 1f1b
recompute_layer: false
tp: 2
tp_linear_async_communication: true
tp_mode: REDUCE_SCATTER
tp_recompute_allgather: true
profiler: null
tokenizer:
tokenizer_max_length: null
tokenizer_name_or_path: meta-llama/Llama-3.2-3B
tokenizer_revision: null
tokens:
batch_accumulation_per_replica: 2
limit_test_batches: 0
limit_val_batches: 0
micro_batch_size: 4
sequence_length: 4096
train_steps: 120000
val_check_interval: -1
106 changes: 106 additions & 0 deletions examples/exp6b0_elie_original_config_but_dp1_and_no_grad_accum.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
checkpoints:
checkpoint_interval: 10000
# checkpoints_path: /fsx/elie_bakouch/nanotron/debug-ckpt-cpuoom
checkpoints_path: checkpoints/exp6b0_elie_original_config_but_dp1_and_no_grad_accum
checkpoints_path_is_shared_file_system: false
resume_checkpoint_path: null
load_lr_scheduler: false
load_optimizer: false
save_final_state: true
save_initial_state: false
data_stages:
- data:
dataset:
dataset_folder:
- /fsx/elie_bakouch/data/fw-edu-dedup
num_loading_workers: 0
seed: 8
name: stable phase
start_training_step: 1
general:
benchmark_csv_path: null
consumed_train_samples: null
ignore_sanity_checks: true
project: llama3-3B-finetune
run: fwedu-60B-resume
seed: 6
step: null
logging:
iteration_step_info_interval: 1
log_level: info
log_level_replica: info
model:
ddp_bucket_cap_mb: 25
dtype: bfloat16
init_method:
std: 0.041666666666666664
make_vocab_size_divisible_by: 1
model_config:
bos_token_id: 128000
eos_token_id: 128001
hidden_act: silu
hidden_size: 3072
initializer_range: 0.02
intermediate_size: 8192
is_llama_config: true
max_position_embeddings: 4096
num_attention_heads: 24
num_hidden_layers: 28
num_key_value_heads: 8
pad_token_id: null
pretraining_tp: 2
rms_norm_eps: 1.0e-05
rope_interleaved: false
rope_scaling:
factor: 32.0
high_freq_factor: 4.0
low_freq_factor: 1.0
original_max_position_embeddings: 8192
rope_type: llama3
rope_theta: 500000.0
tie_word_embeddings: true
use_cache: true
vocab_size: 128256
optimizer:
accumulate_grad_in_fp32: false
clip_grad: 1.0
learning_rate_scheduler:
learning_rate: 0.00005
lr_decay_starting_step: 50000
lr_decay_steps: 10000
lr_decay_style: linear
lr_warmup_steps: 1000
lr_warmup_style: linear
min_decay_lr: 0
optimizer_factory:
adam_beta1: 0.9
adam_beta2: 0.95
adam_eps: 1.0e-08
name: adamW
torch_adam_is_fused: true
weight_decay: 0.01
zero_stage: 0
parallelism:
dp: 1
expert_parallel_size: 1
pp: 1
pp_engine: 1f1b
recompute_layer: false
tp: 2
tp_linear_async_communication: true
tp_mode: REDUCE_SCATTER
tp_recompute_allgather: true
profiler: null
tokenizer:
tokenizer_max_length: null
# tokenizer_name_or_path: meta-llama/Llama-3.2-3B
tokenizer_name_or_path: lvwerra/the-tokenizer-v1
tokenizer_revision: null
tokens:
batch_accumulation_per_replica: 1
limit_test_batches: 0
limit_val_batches: 0
micro_batch_size: 4
sequence_length: 4096
train_steps: 120000
val_check_interval: -1
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
checkpoints:
checkpoint_interval: 10000
# checkpoints_path: /fsx/elie_bakouch/nanotron/debug-ckpt-cpuoom
checkpoints_path: checkpoints/exp6b0_elie_original_config_but_dp1_and_no_grad_accum
checkpoints_path_is_shared_file_system: false
resume_checkpoint_path: null
load_lr_scheduler: false
load_optimizer: false
save_final_state: true
save_initial_state: false
data_stages:
- data:
dataset:
dataset_folder:
- /fsx/elie_bakouch/data/fw-edu-dedup
num_loading_workers: 1
seed: 8
name: stable phase
start_training_step: 1
general:
benchmark_csv_path: null
consumed_train_samples: null
ignore_sanity_checks: true
project: issue1_nanosets_hanging
run: exp6b1_elie_original_copnfig_but_dp1_and_no_grad_accum_and_num_loading_workers_1
seed: 6
step: null
logging:
iteration_step_info_interval: 1
log_level: info
log_level_replica: info
model:
ddp_bucket_cap_mb: 25
dtype: bfloat16
init_method:
std: 0.041666666666666664
make_vocab_size_divisible_by: 1
model_config:
bos_token_id: 128000
eos_token_id: 128001
hidden_act: silu
hidden_size: 3072
initializer_range: 0.02
intermediate_size: 8192
is_llama_config: true
max_position_embeddings: 4096
num_attention_heads: 24
num_hidden_layers: 28
num_key_value_heads: 8
pad_token_id: null
pretraining_tp: 2
rms_norm_eps: 1.0e-05
rope_interleaved: false
rope_scaling:
factor: 32.0
high_freq_factor: 4.0
low_freq_factor: 1.0
original_max_position_embeddings: 8192
rope_type: llama3
rope_theta: 500000.0
tie_word_embeddings: true
use_cache: true
vocab_size: 128256
optimizer:
accumulate_grad_in_fp32: false
clip_grad: 1.0
learning_rate_scheduler:
learning_rate: 0.00005
lr_decay_starting_step: 50000
lr_decay_steps: 10000
lr_decay_style: linear
lr_warmup_steps: 1000
lr_warmup_style: linear
min_decay_lr: 0
optimizer_factory:
adam_beta1: 0.9
adam_beta2: 0.95
adam_eps: 1.0e-08
name: adamW
torch_adam_is_fused: true
weight_decay: 0.01
zero_stage: 0
parallelism:
dp: 1
expert_parallel_size: 1
pp: 1
pp_engine: 1f1b
recompute_layer: false
tp: 2
tp_linear_async_communication: true
tp_mode: REDUCE_SCATTER
tp_recompute_allgather: true
profiler: null
tokenizer:
tokenizer_max_length: null
# tokenizer_name_or_path: meta-llama/Llama-3.2-3B
tokenizer_name_or_path: lvwerra/the-tokenizer-v1
tokenizer_revision: null
tokens:
batch_accumulation_per_replica: 1
limit_test_batches: 0
limit_val_batches: 0
micro_batch_size: 4
sequence_length: 4096
train_steps: 120000
val_check_interval: -1
Loading

0 comments on commit 5bd9086

Please sign in to comment.