Skip to content

Commit

Permalink
initial prototype for unit mup, but don't get std across outputs
Browse files Browse the repository at this point in the history
  • Loading branch information
xrsrke committed Sep 17, 2024
1 parent e255511 commit f6a19cf
Show file tree
Hide file tree
Showing 11 changed files with 719 additions and 241 deletions.
109 changes: 109 additions & 0 deletions examples/unit_mup/config_tiny_llama.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
checkpoints:
checkpoint_interval: 50000
checkpoints_path: checkpoints
checkpoints_path_is_shared_file_system: false
save_initial_state: false
data_stages:
- data:
dataset:
dataset_overwrite_cache: false
dataset_processing_num_proc_per_process: 1
hf_dataset_config_name: null
hf_dataset_or_datasets: roneneldan/TinyStories
hf_dataset_splits: train
text_column_name: text
num_loading_workers: 1
seed: 42
name: Stable Training Stage
start_training_step: 1
general:
benchmark_csv_path: null
consumed_train_samples: null
ignore_sanity_checks: true
project: fp8_for_nanotron
run: exp202_like_53_200m_kfloat16_mw_and_1stmom_fp8e4m3
seed: 42
step: null
lighteval: null
logging:
iteration_step_info_interval: 1
log_level: info
log_level_replica: info
monitor_fwd_states: true
monitor_optim_states: true
model:
ddp_bucket_cap_mb: 25
dtype: bfloat16
init_method:
# std: 0.25 # sqrt(1/16)
# std: 0.125 # sqrt(1/64)
std: 0.04419417382415922 # sqrt(1/512)
make_vocab_size_divisible_by: 1
model_config:
bos_token_id: 1
eos_token_id: 2
# hidden_act: silu
hidden_act: silu
hidden_size: 512
initializer_range: 0.02
intermediate_size: 2048
is_llama_config: true
max_position_embeddings: 128
num_attention_heads: 16
num_hidden_layers: 14
num_key_value_heads: 16
pad_token_id: null
pretraining_tp: 1
rms_norm_eps: 1.0e-05
rope_scaling: null
tie_word_embeddings: false
use_cache: true
vocab_size: 49152

optimizer:
accumulate_grad_in_fp32: false
learning_rate_scheduler:
learning_rate: 0.0006
lr_decay_starting_step: null
lr_decay_steps: null
lr_decay_style: cosine
lr_warmup_steps: 5000 # 10% warm up of total training steps
lr_warmup_style: linear
min_decay_lr: 0.00006

optimizer_factory:
adam_beta1: 0.9
adam_beta2: 0.95
adam_eps: 1.0e-08
name: custom_adamW
torch_adam_is_fused: true
weight_decay: 2.0e-13
zero_stage: 0
clip_grad: 0
# update_clipping: true

parallelism:
dp: 1
expert_parallel_size: 1
pp: 1
pp_engine: 1f1b
tp: 1
tp_linear_async_communication: false
tp_mode: ALL_REDUCE

profiler: null
tokenizer:
tokenizer_max_length: null
tokenizer_name_or_path: lvwerra/the-tokenizer-v1
tokenizer_revision: null
tokens:
# NOTE: micro_batch_size * sequence_length * batch_accumulation_per_replica
# = 128 * 256 * 1 = 16384
batch_accumulation_per_replica: 1
limit_test_batches: 0
limit_val_batches: 0
micro_batch_size: 64 # 256
# micro_batch_size: 1
sequence_length: 128
train_steps: 48000
val_check_interval: -1
34 changes: 18 additions & 16 deletions src/nanotron/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,22 +305,24 @@ def init_optimizer_and_grad_accumulator(
# Fix the root_model
module_id_to_prefix[id(unwrapped_model)] = ""

named_parameters = list(unwrapped_model.get_named_params_with_correct_tied())

named_param_groups_with_lr = get_custom_lr_for_named_parameters(
parametrization_method=parametrization_method,
named_parameters=named_parameters,
model=unwrapped_model,
lr=optimizer_args.learning_rate_scheduler.learning_rate,
)
named_param_groups_with_weight_decay = get_custom_weight_decay_for_named_parameters(
named_parameters=named_parameters,
model=unwrapped_model,
module_id_to_prefix=module_id_to_prefix,
weight_decay=optimizer_args.weight_decay,
)

named_param_groups = merge_named_param_groups(named_param_groups_with_lr, named_param_groups_with_weight_decay)
# named_parameters = list(unwrapped_model.get_named_params_with_correct_tied())

# named_param_groups_with_lr = get_custom_lr_for_named_parameters(
# parametrization_method=parametrization_method,
# named_parameters=named_parameters,
# model=unwrapped_model,
# lr=optimizer_args.learning_rate_scheduler.learning_rate,
# )
# named_param_groups_with_weight_decay = get_custom_weight_decay_for_named_parameters(
# named_parameters=named_parameters,
# model=unwrapped_model,
# module_id_to_prefix=module_id_to_prefix,
# weight_decay=optimizer_args.weight_decay,
# )

# named_param_groups = merge_named_param_groups(named_param_groups_with_lr, named_param_groups_with_weight_decay)
named_parameters = unwrapped_model.named_parameters()
named_param_groups = named_parameters

# Basic optimizer builder
def basic_optimizer_builder(named_param_groups):
Expand Down
Loading

0 comments on commit f6a19cf

Please sign in to comment.