diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c7401cd84e..f432c7f210 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -14,8 +14,16 @@ variables: &VARS TESTS_TO_RUN_AFTER_MERGING: "MR_TESTS NIGHTLY_TESTS" # Can specify levels TESTS_TO_RUN_ON_THIS_COMMIT: unit_tests TEST_REGEX_ON_THIS_COMMIT: NONE #https://github.com/google/re2/wiki/Syntax (Can define regex as in this spec) e.g /.*gpt3.*/ + JET_CUSTOM_FILTER: "" DISPLAY_OUTPUT: "True" # Set to true for new tests to copy the logs for creating golden truth file TIME_LIMIT: "10:00" # Default time limit for all jobs + MOE_GROUPED_GEMM: 0 # Set to 1 to enable grouped gemm for MoE + JET_CLUSTER_BRANCH: + value: "mcore/draco-oci" + options: + - "mcore/draco-oci" + - "mcore/eos" + description: '"mcore/draco-oci" for OCI-IAD, "mcore/eos" for EOS' include: @@ -32,6 +40,7 @@ unit_tests: - pip install nltk - pip install wrapt - pip install zarr "tensorstore==0.1.45" # for distributed checkpointing tests + - pip install git+https://github.com/fanshiqing/grouped_gemm@main # for grouped gemm tests - torchrun --nproc_per_node=8 -m pytest --cov-report=term --cov-report=html --cov=megatron/core tests/unit_tests coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/' artifacts: @@ -67,29 +76,6 @@ formatting: rules: - when: always -.selene_test_resume_checkpoint_launcher: &selene-test-resume-checkpoint-launcher - tags: - - ssh_selene_runner - stage: test - script: &selene-test-resume-launcher-script - - echo "Running selene resume from checkpoint test. " - - pwd - - run_cmd="bash tests/functional_tests/shell_test_utils/run_selene_test_resume_checkpoint_launcher_script.sh RUN_MODEL=$RUN_MODEL TP_SIZE=$TP_SIZE PP_SIZE=$PP_SIZE VP_SIZE=$VP_SIZE NUM_NODES=$NUM_NODES SELENE_ADLR_CI_PATH=$SELENE_ADLR_CI_PATH CI_PIPELINE_ID=$CI_PIPELINE_ID RUN_NAME=$RUN_NAME PYTORCH_IMAGE=$PYTORCH_IMAGE DATA_DIR=$DATA_DIR TIME_LIMIT=$TIME_LIMIT" - - echo "$run_cmd" - - ${run_cmd} - - echo "Completed the job" - rules: - - if: $TEST_LEVEL =~ $TESTS_TO_RUN_ON_THIS_COMMIT || $CI_JOB_NAME =~ $TESTS_TO_RUN_ON_THIS_COMMIT || $CI_JOB_NAME =~ $TEST_REGEX_ON_THIS_COMMIT - when: always - - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGING' - when: always - - if: $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED - when: always - - if: '$CI_MERGE_REQUEST_LABELS == "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' - when: always - allow_failure: false - retry: 2 - .selene_test_launcher: &selene-test-launcher tags: - ssh_selene_runner @@ -97,7 +83,7 @@ formatting: script: &selene-test-launcher-script - echo "Running selene test" - pwd - - run_cmd="bash tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh RUN_MODEL=$RUN_MODEL TP_SIZE=$TP_SIZE PP_SIZE=$PP_SIZE VP_SIZE=$VP_SIZE NUM_NODES=$NUM_NODES SELENE_ADLR_CI_PATH=$SELENE_ADLR_CI_PATH CI_PIPELINE_ID=$CI_PIPELINE_ID RUN_NAME=$RUN_NAME MAX_STEPS=$MAX_STEPS PYTORCH_IMAGE=$PYTORCH_IMAGE DATA_DIR=$DATA_DIR USE_CORE=$USE_CORE USE_TE=$USE_TE TIME_LIMIT=$TIME_LIMIT" + - run_cmd="bash tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh RUN_MODEL=$RUN_MODEL TP_SIZE=$TP_SIZE PP_SIZE=$PP_SIZE VP_SIZE=$VP_SIZE NUM_NODES=$NUM_NODES SELENE_ADLR_CI_PATH=$SELENE_ADLR_CI_PATH CI_PIPELINE_ID=$CI_PIPELINE_ID RUN_NAME=$RUN_NAME MAX_STEPS=$MAX_STEPS PYTORCH_IMAGE=$PYTORCH_IMAGE DATA_DIR=$DATA_DIR USE_CORE=$USE_CORE USE_TE=$USE_TE MOE_GROUPED_GEMM=$MOE_GROUPED_GEMM TIME_LIMIT=$TIME_LIMIT" - echo "$run_cmd" - ${run_cmd} - echo "Completed the job" @@ -106,748 +92,9 @@ formatting: when: always - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGING' when: always - - if: $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED when: always - - if: '$CI_MERGE_REQUEST_LABELS == "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED when: always allow_failure: false retry: 2 - -train.te_gpt3.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 1 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: MR_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -train.gpt3_core.345m_tp4_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3_core.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - -train.gpt3_core.345m_tp1_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TIME_LIMIT: "10:00" - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3_core.345m_tp1_pp4_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3_core.345m_tp1_pp4_interleaved_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - -train.gpt3_core.345m_tp1_pp2_1node_50steps_rope: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: rope_embeddings - ADDITIONAL_PARAMS: "--position-embedding-type rope" - -train.gpt3_core.345m_tp1_pp4_1node_50steps_swiglu: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: swiglu - ADDITIONAL_PARAMS: "--swiglu" - -train.gpt3_core.345m_tp1_pp4_1node_50steps_disable_bias_linear: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: disable_bias_linear - ADDITIONAL_PARAMS: "--disable-bias-linear" - -train.gpt3_core.345m_tp1_pp4_1node_50steps_untie_embeddings_and_outputs: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: untie_embeddings_and_outputs - ADDITIONAL_PARAMS: "--untie-embeddings-and-output-weights" - -train.gpt3_core.345m_tp1_pp4_1node_50steps_sequence_parallel: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: sequence_parallel - ADDITIONAL_PARAMS: "--sequence-parallel" - -train.gpt3.345m_tp4_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - -train.gpt3.345m_tp1_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3.345m_tp1_pp4_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3.345m_tp1_pp4_interleaved_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - -resume.checkpoint.gpt3.345m_tp1_pp2_1node: - <<: *selene-test-resume-checkpoint-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - TIME_LIMIT: "15:00" - TEST_LEVEL: MR_TESTS - -train.gpt3.345m_tp1_pp1_1node_50steps_dist_optimizer: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - METADATA: dist_optimizer - ADDITIONAL_PARAMS: "--use-distributed-optimizer" - -train.gpt3.345m_tp1_pp1_1node_50steps_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: overlap_grad_reduce - ADDITIONAL_PARAMS: "--overlap-grad-reduce" - -train.gpt3.345m_tp1_pp1_1node_50steps_dist_optimizer_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: dist_optimizer_overlap_grad_reduce - ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce" - -train.gpt3.345m_tp4_pp1_1node_50steps_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: overlap_grad_reduce - ADDITIONAL_PARAMS: "--overlap-grad-reduce" - -train.gpt3.345m_tp4_pp1_1node_50steps_dist_optimizer_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - METADATA: dist_optimizer_overlap_grad_reduce - ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce" - -train.gpt3.345m_tp1_pp4_1node_50steps_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: overlap_grad_reduce - ADDITIONAL_PARAMS: "--overlap-grad-reduce" - -train.gpt3.345m_tp1_pp4_interleaved_1node_50steps_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: overlap_grad_reduce - ADDITIONAL_PARAMS: "--overlap-grad-reduce" - -train.gpt3.345m_tp1_pp4_interleaved_1node_50steps_dist_optimizer_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - METADATA: dist_optimizer_overlap_grad_reduce - ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce" - -train.gpt3.345m_tp2_pp2_1node_50steps_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: overlap_grad_reduce - ADDITIONAL_PARAMS: "--overlap-grad-reduce" - -train.gpt3_core.345m_cp2_tp2_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TIME_LIMIT: "20:00" - TEST_LEVEL: MR_TESTS - METADATA: "context_parallelism_cp2" - PYTORCH_IMAGE: "/lustre/fsw/adlr/adlr-nlp/adlr_ci/megatron/pytorch_23.10_flash_attn_1.0.9_context_parallelism.sqsh" - ADDITIONAL_PARAMS: "--context-parallel-size 2 --sequence-parallel --hidden-dropout 0.0 --attention-dropout 0.0" - -# Note: Core MoE models currently will run TE by default -train.te_core_moe_gpt3.345m_tp2_pp2_2experts_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: "te_2experts" - ADDITIONAL_PARAMS: "--num-experts 2" - -train.te_core_moe_gpt3.345m_tp2_pp2_4experts2parallel_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: "te_4experts2parallel" - ADDITIONAL_PARAMS: "--sequence-parallel --num-experts 4 --expert-model-parallel-size 2" - -train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: "te_8experts2parallel" - ADDITIONAL_PARAMS: "--sequence-parallel --num-experts 8 --expert-model-parallel-size 2" - -train.moe_gpt3.345m_tp2_pp2_4experts_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: "4experts" - ADDITIONAL_PARAMS: "--num-experts 4" - -train.bert.345m_tp4_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "10:00" - TEST_LEVEL: NIGHTLY_TESTS - -train.bert.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TEST_LEVEL: MR_TESTS - -train.bert.345m_tp1_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TEST_LEVEL: NIGHTLY_TESTS - -train.bert.345m_tp1_pp4_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - TEST_LEVEL: NIGHTLY_TESTS - -train.bert.345m_tp1_pp4_interleaved_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TEST_LEVEL: MR_TESTS - -train.bert_core.345m_tp4_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: NIGHTLY_TESTS - -train.bert_core.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: MR_TESTS - -train.bert_core.345m_tp1_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: NIGHTLY_TESTS - -train.bert_core.345m_tp1_pp4_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 2 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: NIGHTLY_TESTS - -train.bert_core.345m_tp1_pp2_1node_50steps_rope: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 - METADATA: rope_embeddings - ADDITIONAL_PARAMS: "--position-embedding-type rope" - -train.bert_core.345m_tp1_pp2_1node_50steps_sequence_parallel: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 - METADATA: sequence_parallel - ADDITIONAL_PARAMS: "--sequence-parallel" - -resume.checkpoint.bert.345m_tp1_pp2_1node: - <<: *selene-test-resume-checkpoint-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - TEST_LEVEL: MR_TESTS - -train.retro_core.tp1_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: retro - USE_TE: 0 - USE_CORE: 1 - TP_SIZE: 1 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: MONTHLY_TESTS - -train.t5_core.220m_tp1_pp1_1node_100steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 0 - USE_CORE: 1 - TP_SIZE: 1 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 100 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -train.t5_core.220m_tp2_pp1_1node_100steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 0 - USE_CORE: 1 - TP_SIZE: 2 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 100 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -train.t5_core.220m_te_tp1_pp1_1node_100steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 1 - USE_CORE: 1 - TP_SIZE: 1 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 100 - TIME_LIMIT: "30:00" - TEST_LEVEL: MR_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -train.t5_core.220m_te_tp2_pp1_1node_100steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 1 - USE_CORE: 1 - TP_SIZE: 2 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 100 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -train.t5_core.220m_te_tp2_pp1_sp_1node_100steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 1 - USE_CORE: 1 - TP_SIZE: 2 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 100 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - ADDITIONAL_PARAMS: "--sequence-parallel" - -resume.checkpoint.t5_core.220m_tp1_pp1_1node: - <<: *selene-test-resume-checkpoint-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 0 - USE_CORE: 1 - TP_SIZE: 1 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -resume.checkpoint.t5_core.220m_te_tp1_pp1_1node: - <<: *selene-test-resume-checkpoint-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 1 - USE_CORE: 1 - TP_SIZE: 1 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -cleanup.selene: - tags: - - ssh_selene_runner - stage: cleanup - variables: - <<: [*VARS] - script: - - set +e - - NUM_CLEANUP=`find ${SELENE_ADLR_CI_PATH}/* -type d -ctime +20 | grep -v data | wc -l` - - find ${SELENE_ADLR_CI_PATH}/* -type d -ctime +20 | grep -v data | xargs rm -rf - - find ${SELENE_ADLR_CI_PATH}/* -type d -name "checkpoints" -ctime +2 | grep -v data | xargs rm -rf - - echo "Finished cleaning $NUM_CLEANUP directories older than 20 days everything in Selene" - allow_failure: true - rules: - - when: always diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000..b3356b76e1 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +include megatron/core/requirements.txt diff --git a/README.md b/README.md index 81b23c9ed3..a7a06c621d 100644 --- a/README.md +++ b/README.md @@ -241,7 +241,7 @@ With full global batch size of 1536 on 1024 A100 GPUs, each iteration takes arou Retro [(Borgeaud et al., 2022)](https://arxiv.org/abs/2112.04426) is an autoregressive decoder-only language model (LM) pretrained with retrieval-augmentation. -Retro features practical scalibility to support large-scale pretraining from scratch by retrieving from trillions of token. +Retro features practical scalability to support large-scale pretraining from scratch by retrieving from trillions of tokens. Pretraining with retrieval provides a more efficient storage mechanism of factual knowledge, when compared to storing factual knowledge implicitly within the network's parameters, thus largely reducing model parameters while achieving lower perplexity than standard GPT. Retro also provides the flexibility to update the knowledge stored in LMs [(Wang et al., 2023a)](https://arxiv.org/abs/2304.06762) @@ -519,6 +519,12 @@ The Llama-2 [family of models](https://ai.meta.com/llama/) are an open-source se The Llama-2 checkpoints can be loaded into Megatron for inference and finetuning. See documentation [here](docs/llama2.md). +# Model Optimization and Deployment +Megatron-Core (MCore) `GPTModel` family supports advanced quantization algorithms and high-performance deployment through TensorRT-LLM. + +## Quantization and TensorRT-LLM Deployment +See [Megatron Model Optimization and Deployment](examples/modelopt/README.md) for `llama2` and `nemotron3` examples. + # Datasets We do not host any datasets for GPT or BERT training, however, we detail their collection so that our results may be reproduced. diff --git a/docs/source/api-guide/dist_checkpointing.rst b/docs/source/api-guide/dist_checkpointing.rst index 67c4f6f525..7e384a08a3 100644 --- a/docs/source/api-guide/dist_checkpointing.rst +++ b/docs/source/api-guide/dist_checkpointing.rst @@ -1,6 +1,15 @@ dist\_checkpointing package =========================== +A library for saving and loading the distributed checkpoints. +A "distributed checkpoint" can have various underlying formats (current default format is based on Zarr) +but has a distinctive property - the checkpoint saved in one parallel configuration (tensor/pipeline/data parallelism) +can be loaded in a different parallel configuration. + +Using the library requires defining sharded state_dict dictionaries with functions from *mapping* and *optimizer* modules. +Those state dicts can be saved or loaded with a *serialization* module using strategies from *strategies* module. + + Subpackages ----------- @@ -12,18 +21,10 @@ Subpackages Submodules ---------- -dist\_checkpointing.core module -------------------------------- - -.. automodule:: core.dist_checkpointing.core - :members: - :undoc-members: - :show-inheritance: - -dist\_checkpointing.dict\_utils module --------------------------------------- +dist\_checkpointing.serialization module +---------------------------------------- -.. automodule:: core.dist_checkpointing.dict_utils +.. automodule:: core.dist_checkpointing.serialization :members: :undoc-members: :show-inheritance: @@ -44,14 +45,23 @@ dist\_checkpointing.optimizer module :undoc-members: :show-inheritance: -dist\_checkpointing.serialization module ----------------------------------------- +dist\_checkpointing.core module +------------------------------- -.. automodule:: core.dist_checkpointing.serialization +.. automodule:: core.dist_checkpointing.core + :members: + :undoc-members: + :show-inheritance: + +dist\_checkpointing.dict\_utils module +-------------------------------------- + +.. automodule:: core.dist_checkpointing.dict_utils :members: :undoc-members: :show-inheritance: + dist\_checkpointing.utils module -------------------------------- diff --git a/docs/source/api-guide/dist_checkpointing.strategies.rst b/docs/source/api-guide/dist_checkpointing.strategies.rst index c18d2464c2..41e674c761 100644 --- a/docs/source/api-guide/dist_checkpointing.strategies.rst +++ b/docs/source/api-guide/dist_checkpointing.strategies.rst @@ -1,6 +1,11 @@ dist\_checkpointing.strategies package ====================================== +Package defining different checkpoint formats (backends) and saving/loading algorithms (strategies). + +Strategies can be used for implementing new checkpoint formats or implementing new (more optimal for a given use case) ways of saving/loading of existing formats. +Strategies are passed to `dist_checkpointing.load` and `dist_checkpointing.save` functions and control the actual saving/loading procedure. + Submodules ---------- diff --git a/docs/source/api-guide/distributed.rst b/docs/source/api-guide/distributed.rst index 37b315303b..737820331c 100644 --- a/docs/source/api-guide/distributed.rst +++ b/docs/source/api-guide/distributed.rst @@ -1,6 +1,14 @@ distributed package =================== +This package contains various utilities to finalize model weight gradients +on each rank before the optimizer step. This includes a distributed data +parallelism wrapper to all-reduce or reduce-scatter the gradients across +data-parallel replicas, and a `finalize\_model\_grads` method to +synchronize gradients across different parallelism modes (e.g., 'tied' +layers on different pipeline stages, or gradients for experts in a MoE on +different ranks due to expert parallelism). + Submodules ---------- @@ -21,10 +29,10 @@ reduce-scatter on each bucket asynchronously. distributed.finalize\_model\_grads ---------------------------------- -Finalize model grads for optimizer step across all used parallelism modes. -Synchronizes the all-reduce / reduce-scatter of model grads across DP replicas, -and all-reduces the layernorm grads for sequence parallelism, embedding grads -across first and last pipeline stages (if not tied), and expert grads for expert +Finalize model gradients for optimizer step across all used parallelism modes. +Synchronizes the all-reduce / reduce-scatter of model gradients across DP replicas, +all-reduces the layernorm gradients for sequence parallelism, embedding gradients +across first and last pipeline stages (if not tied), and expert gradients for expert parallelism. .. automodule:: core.distributed.finalize_model_grads diff --git a/docs/source/api-guide/pipeline_parallel.rst b/docs/source/api-guide/pipeline_parallel.rst index b7f3511f5b..5c67079a70 100644 --- a/docs/source/api-guide/pipeline_parallel.rst +++ b/docs/source/api-guide/pipeline_parallel.rst @@ -1,12 +1,22 @@ pipeline\_parallel package ========================== +This package contains implementations for two different pipeline parallelism +schedules (one without interleaving and one with interleaving, see `Efficient +Large-Scale Language Model Training on GPU Clusters Using Megatron-LM `_ +for details), and a default no-pipelining schedule. It also contains methods +for the point-to-point communication that is needed between pipeline stages. + Submodules ---------- pipeline\_parallel.p2p\_communication module -------------------------------------------- +Contains implementations for the various point-to-point communication needed +(e.g., `recv_forward` and `recv_backward`) in the different pipeline parallelism +schedules. + .. automodule:: core.pipeline_parallel.p2p_communication :members: :undoc-members: @@ -15,6 +25,14 @@ pipeline\_parallel.p2p\_communication module pipeline\_parallel.schedules module ----------------------------------- +Contains implementations for two pipeline parallelism schedules +(`forward_backward_pipelining_with_interleaving`for pipeline parallelism with +interleaving, `forward_backward_pipelining_without_interleaving` for pipeline +parallelism without interleaving) and a default no-pipelining schedule +(`forward_backward_no_pipelining`). `get_forward_backward_func` returns the right +scheduling function to use based on the configuration being trained +(e.g., if pipeline-parallel size is 1, use `forward_backward_no_pipelining`). + .. automodule:: core.pipeline_parallel.schedules :members: :undoc-members: diff --git a/docs/source/api-guide/tensor_parallel.rst b/docs/source/api-guide/tensor_parallel.rst index 82b29f7866..d8ae9dea22 100644 --- a/docs/source/api-guide/tensor_parallel.rst +++ b/docs/source/api-guide/tensor_parallel.rst @@ -1,6 +1,12 @@ tensor\_parallel package ======================== +This package contains an implementation for tensor parallelism in transformer +models (see `Megatron-LM: Training Multi-Billion Parameter Language Models +Using Model Parallelism `_ and `Reducing +Activation Recomputation in Large Transformer Models `_ +for details). + Submodules ---------- diff --git a/examples/bert/train_bert_340m_distributed.sh b/examples/bert/train_bert_340m_distributed.sh index b9019fcecf..7d489917e5 100644 --- a/examples/bert/train_bert_340m_distributed.sh +++ b/examples/bert/train_bert_340m_distributed.sh @@ -12,9 +12,9 @@ NUM_NODES=1 NODE_RANK=0 WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -CHECKPOINT_PATH=$0 # -TENSORBOARD_LOGS_PATH=$1 # -VOCAB_FILE=$2 #/bert-vocab.json +CHECKPOINT_PATH=$1 # +TENSORBOARD_LOGS_PATH=$2 # +VOCAB_FILE=$3 #/bert-vocab.json DATA_PATH=$4 #_text_document DISTRIBUTED_ARGS=( diff --git a/examples/deploy/README.md b/examples/deploy/README.md new file mode 100644 index 0000000000..c63993e9ca --- /dev/null +++ b/examples/deploy/README.md @@ -0,0 +1,132 @@ +# Megatron Model Optimization and Deployment + +## Installation +We recommend that users follow TensorRT-LLM's official installation guide to build it from source +and proceed with a containerized environment (`docker.io/tensorrt_llm/release:latest`): + +``` +git clone https://github.com/NVIDIA/TensorRT-LLM.git +cd TensorRT-LLM +git checkout v0.7.1 +make -C docker release_build +``` + +> **TROUBLE SHOOTING:** rather than copying each folder separately in `docker/Dockerfile.multi`, +> you may need to copy the entire dir as `COPY ./ /src/tensorrt_llm` since a `git submodule` is +> called later which requires `.git` to continue. + +Once the container is built, install `nvidia-ammo` and additional dependencies for sharded checkpoint support: +``` +pip install --no-cache-dir --extra-index-url https://pypi.nvidia.com nvidia-ammo +pip install zarr tensorstore==0.1.45 +``` +TensorRT-LLM quantization functionalities are currently packaged in `nvidia-ammo`. +You can find more documentation about `nvidia-ammo` in [TensorRT-LLM's quantization +examples](https://github.com/NVIDIA/TensorRT-LLM/tree/main/examples/quantization). + +## Support Matrix + +The following matrix shows the current support for the PTQ + TensorRT-LLM export flow. + +| model | fp16 | int8_sq | fp8 | int4_awq | +|-----------------------------|------|---------| ----| -------- | +| nextllm-2b | x | x | x | | +| nemotron3-8b | x | | x | | +| nemotron3-15b | x | | x | | +| llama2-text-7b | x | x | x | TP2 | +| llama2-chat-70b | x | x | x | TP4 | + +Our PTQ + TensorRT-LLM flow has native support on MCore `GPTModel` with a mixed layer spec (native ParallelLinear +and Transformer-Engine Norm (`TENorm`). Note that this is not the default mcore gpt spec. You can still load the +following checkpoint formats with some remedy: + +| GPTModel | sharded | remedy arguments | +|-----------------------------------|---------|-----------------------------------------| +| megatron.model | | `--ammo-load-classic-megatron-to-mcore` | +| TE-Fused (default mcore gpt spec) | | `--ammo-convert-te-to-local-spec` | +| TE-Fused (default mcore gpt spec) | x | | + +> **TROUBLE SHOOTING:** If you are trying to load an unpacked `.nemo` sharded checkpoint, then typically you will +> need to adding `additional_sharded_prefix="model."` to `ammo_load_checkpoint()` since NeMo has an additional +> `model.` wrapper on top of the `GPTModel`. + +> **NOTE:** flag `--ammo-load-classic-megatron-to-mcore` may not work on all legacy checkpoint versions. + +## Examples + +> **NOTE:** we only provide a simple text generation script to test the generated TensorRT-LLM engines. For +> a production-level API server or enterprise support, see [NeMo](https://github.com/NVIDIA/NeMo) and TensorRT-LLM's +> backend for [NVIDIA Triton Inference Server](https://developer.nvidia.com/nvidia-triton-inference-server). + +### nemotron3-8B FP8 Quantization and TensorRT-LLM Deployment +First download the nemotron checkpoint from https://huggingface.co/nvidia/nemotron-3-8b-base-4k, extract the +sharded checkpoint from the `.nemo` tarbal and fix the tokenizer file name. + +> **NOTE:** The following cloning method uses `ssh`, and assume you have registered the `ssh-key` in Hugging Face. +> If you are want to clone with `https`, then `git clone https://huggingface.co/nvidia/nemotron-3-8b-base-4k` with an access token. + +```sh +git lfs install +git clone git@hf.co:nvidia/nemotron-3-8b-base-4k +cd nemotron-3-8b-base-4k +tar -xvf Nemotron-3-8B-Base-4k.nemo +mv 586f3f51a9cf43bc9369bd53fa08868c_a934dc7c3e1e46a6838bb63379916563_3feba89c944047c19d5a1d0c07a85c32_mt_nlg_plus_multilingual_ja_zh_the_stack_frac_015_256k.model mt_nlg_plus_multilingual_ja_zh_the_stack_frac_015_256k.model +cd .. +``` + +Now launch the PTQ + TensorRT-LLM export script, +``` +bash examples/deploy/ptq_trtllm_nemotron3_8b ./nemotron-3-8b-base-4k None +``` +By default, `cnn_dailymail` is used for calibration. The `GPTModel` will have quantizers for simulating the +quantization effect. The checkpoint will be saved optionally (with quantizers as additional states) and can +be restored for further evaluation. TensorRT-LLM engine is exported to `/tmo/ammo` by default. + +The script expects `${CHECKPOINT_DIR}` (`./nemotron-3-8b-base-4k`) to have the following structure: +``` +├── model_weights +│ ├── common.pt +│ ... +│ +├── model_config.yaml +├── mt_nlg_plus_multilingual_ja_zh_the_stack_frac_015_256k.model +``` + +> **NOTE:** The script is using `TP=8`. Change `$TP` in the script if your checkpoint has a different tensor +> model parallelism. + +> **KNOWN ISSUES:** The `mt_nlg_plus_multilingual_ja_zh_the_stack_frac_015_256k.model` in the checkpoint is for +> Megatron-LM's `GPTSentencePiece` tokenizer. +> For TensorRT-LLM, we are trying to load this tokenizer as a Hugging Face `T5Tokenizer` by changing +> some special tokens, `encode`, and `batch_decode`. As a result, the tokenizer behavior in TensorRT-LLM engine may +> not match exactly. + +> **TROUBLE SHOOTING:** If you are loading `.nemo` sharded checkpoint here, call +> `ammo_load_checkpoint(..., additional_sharded_prefix="model.")` with additional sharded prefix in +> `text_generation_ptq.py` to align the sharded keys. + +### llama2-text-7b INT8 SmoothQuant and TensorRT-LLM Deployment +> **NOTE:** Due to the LICENSE issue, we do not provide a MCore checkpoint to download. Users can follow +> the instruction in `docs/llama2.md` to convert the checkpoint to megatron classic `GPTModel` format and +> use `--ammo-load-classic-megatron-to-mcore` flag which will remap the checkpoint to the MCore `GPTModel` spec +> that we support. + +```sh +bash examples/deploy/ptq_trtllm_llama_7b.sh ${CHECKPOINT_DIR} +``` + +The script expect `${CHECKPOINT_DIR}` to have the following structure: +``` +├── hf +│ ├── tokenizer.config +│ ├── tokenizer.model +│ ... +│ +├── iter_0000001 +│ ├── mp_rank_00 +│ ... +│ +├── latest_checkpointed_iteration.txt +``` +In short, other than the converted llama megatron checkpoint, also put the Hugging Face checkpoint inside as +the source of the tokenizer. diff --git a/examples/deploy/ptq_trtllm_llama_7b.sh b/examples/deploy/ptq_trtllm_llama_7b.sh new file mode 100644 index 0000000000..dc936c82ac --- /dev/null +++ b/examples/deploy/ptq_trtllm_llama_7b.sh @@ -0,0 +1,79 @@ +#!/bin/bash +DEFAULT_NAME="/checkpoints/llama2-text-7b_v0.2.0" +NAME="${1:-$DEFAULT_NAME}" + +DEFAULT_QUANT_CFG="int8_sq" +QUANT_CFG="${2:-$DEFAULT_QUANT_CFG}" + +# CHANGE THE FOLLOWING IF YOU MOUNT YOUR DATA AND CHECKPOINTS DIFFERENTLY IN THE CONTAINER. +TP="8" +PP=1 +INFERENCE_TP=${TP} +DECODER_TYPE="llama" +CHECKPOINT_LOAD_DIR="${NAME}" +TOKENIZER_MODEL="${CHECKPOINT_LOAD_DIR}/hf/tokenizer.model" + +# LLaMA2 text 7b has ffn_hidden_size 11008. int4_awq requires a block_size of 128 as a result the TP can at most be 2 +if [ "$QUANT_CFG" = "int4_awq" ]; then + INFERENCE_TP="2" +fi + +additional_options=" \ + --ammo-quant-cfg ${QUANT_CFG} \ + --ammo-load-classic-megatron-to-mcore \ + --decoder ${DECODER_TYPE} \ + --engine-dir /tmp/ammo \ + --max-input-len 2048 \ + --max-output-len 512 \ + --max-batch-size 8 \ + --inference-tensor-parallel ${INFERENCE_TP} " + +trtllm_options=" \ + --engine-dir /tmp/ammo \ + --tokenizer ${CHECKPOINT_LOAD_DIR}/hf \ + --max-output-len 512 " + +# DO NOT CHANGE THE SETTING BELOW UNLESS YOU KNOW WHAT YOU ARE DOING!!! +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +options=" \ + --disable-bias-linear \ + --swiglu \ + --untie-embeddings-and-output-weights \ + --use-rotary-position-embeddings \ + --normalization RMSNorm \ + --norm-epsilon 1e-5 \ + --no-position-embedding \ + --no-masked-softmax-fusion \ + --no-bias-gelu-fusion \ + --no-bias-dropout-fusion \ + --no-async-tensor-model-parallel-allreduce \ + --tensor-model-parallel-size ${TP} \ + --pipeline-model-parallel-size 1 \ + --num-layers 32 \ + --hidden-size 4096 \ + --ffn-hidden-size 11008 \ + --num-attention-heads 32 \ + --seq-length 2048 \ + --max-position-embeddings 4096 \ + --micro-batch-size 1 \ + --make-vocab-size-divisible-by 1 \ + --tokenizer-type Llama2Tokenizer \ + --tokenizer-model ${TOKENIZER_MODEL} \ + --save-interval 1000000 \ + --bf16 \ + --use-mcore-models " + +set +x + +# Precompile CUDA extentions +python -c "import ammo.torch.quantization.extensions as ext; print(ext.cuda_ext); print(ext.cuda_ext_fp8)" + +# Acquire launch configuration where variable launch_config will be set +launch_config="--nproc_per_node=${TP}" + +# Launch multi-process with torchrun +torchrun ${launch_config} examples/deploy/text_generation_ptq.py ${options} ${additional_options} --load ${CHECKPOINT_LOAD_DIR} + +# This script is using mpi4py which will fork multiple processes. +python examples/deploy/trtllm_text_generation.py ${trtllm_options} diff --git a/examples/deploy/ptq_trtllm_nemotron3_8b.sh b/examples/deploy/ptq_trtllm_nemotron3_8b.sh new file mode 100644 index 0000000000..418021b102 --- /dev/null +++ b/examples/deploy/ptq_trtllm_nemotron3_8b.sh @@ -0,0 +1,75 @@ +#!/bin/bash +DEFAULT_NAME="/checkpoints/nemotron3-8b_v0.2.0" +NAME="${1:-$DEFAULT_NAME}" + +DEFAULT_QUANT_CFG="fp8" +QUANT_CFG="${2:-$DEFAULT_QUANT_CFG}" + +# CHANGE THE FOLLOWING IF YOU MOUNT YOUR DATA AND CHECKPOINTS DIFFERENTLY IN THE CONTAINER. +TP="8" +INFERENCE_TP=${TP} +DECODER_TYPE="gptnext" +CHECKPOINT_LOAD_DIR="${NAME}" +TOKENIZER_MODEL="${CHECKPOINT_LOAD_DIR}/mt_nlg_plus_multilingual_ja_zh_the_stack_frac_015_256k.model" + +if [ "$QUANT_CFG" = "int4_awq" ]; then + INFERENCE_TP="1" +fi + +additional_options=" \ + --ammo-quant-cfg ${QUANT_CFG} \ + --ammo-load-classic-megatron-to-mcore \ + --decoder ${DECODER_TYPE} \ + --engine-dir /tmp/ammo \ + --max-input-len 2048 \ + --max-output-len 512 \ + --max-batch-size 8 \ + --inference-tensor-parallel ${INFERENCE_TP} " + +trtllm_options=" \ + --engine-dir /tmp/ammo \ + --tokenizer ${TOKENIZER_MODEL} \ + --max-output-len 512 " + +# DO NOT CHANGE THE SETTING BELOW UNLESS YOU KNOW WHAT YOU ARE DOING!!! +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +options=" \ + --apply-layernorm-1p \ + --untie-embeddings-and-output-weights \ + --disable-bias-linear \ + --no-position-embedding \ + --use-rotary-position-embeddings \ + --rotary-percent 0.5 \ + --squared-relu \ + --attention-dropout 0.0 \ + --hidden-dropout 0.0 \ + --tensor-model-parallel-size ${TP} \ + --pipeline-model-parallel-size 1 \ + --num-layers 32 \ + --hidden-size 4096 \ + --num-attention-heads 32 \ + --seq-length 4096 \ + --max-position-embeddings 4096 \ + --micro-batch-size 1 \ + --tokenizer-type GPTSentencePieceTokenizer \ + --tokenizer-model ${TOKENIZER_MODEL} \ + --save-interval 1000000 \ + --load ${CHECKPOINT_LOAD_DIR} \ + --bf16 \ + --use-mcore-models " + +set +x + +# Precompile CUDA extentions +python -c "import ammo.torch.quantization.extensions as ext; print(ext.cuda_ext); print(ext.cuda_ext_fp8)" + +# Acquire launch configuration where variable launch_config will be set +launch_config="--nproc_per_node=${TP}" + +# Launch multi-process with torchrun +torchrun ${launch_config} examples/deploy/text_generation_ptq.py ${options} ${additional_options} --load ${CHECKPOINT_LOAD_DIR} + +# This script is using mpi4py which will fork multiple processes. +python examples/deploy/trtllm_text_generation.py ${trtllm_options} + diff --git a/examples/deploy/text_generation_ptq.py b/examples/deploy/text_generation_ptq.py new file mode 100644 index 0000000000..db25a5a4c7 --- /dev/null +++ b/examples/deploy/text_generation_ptq.py @@ -0,0 +1,273 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +"""Sample Generate GPT.""" +import functools +import os +import sys +from pathlib import Path + +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))) + +import ammo.torch.quantization as atq +import torch +from datasets import load_dataset + +# [ModelOpt]: changing the default model provider to the AMMO version +from megatron import get_args, print_rank_0 +from megatron.checkpointing import load_checkpoint, save_checkpoint +from megatron.core import mpu +from megatron.core.dist_checkpointing import load +from megatron.deploy.arguments import add_ammo_args +from megatron.deploy.gpt.model_provider import model_provider +from megatron.initialize import initialize_megatron +from megatron.text_generation import generate_and_post_process +from megatron.training import get_model +from megatron.utils import unwrap_model + +QUANT_CFG_CHOICES = { + "int8": atq.INT8_DEFAULT_CFG, + "int8_sq": atq.INT8_SMOOTHQUANT_CFG, + "fp8": atq.FP8_DEFAULT_CFG, + "int4_awq": atq.INT4_AWQ_CFG, + "w4a8_awq": atq.W4A8_AWQ_BETA_CFG, +} + + +def add_trtllm_args(parser): + """Add additional arguments for TensorRT-LLM.""" + group = parser.add_argument_group(title="trtllm") + + group.add_argument( + "--engine-dir", type=str, help="The output TensorRT-LLM engine dir.", + ) + group.add_argument( + "--decoder", type=str, choices=["gptnext", 'llama'], help="The decoder type of the model.", + ) + group.add_argument("--max-input-len", type=int, help="Max input sequence length.", default=2048) + group.add_argument( + "--max-output-len", type=int, help="Max output sequence length.", default=512 + ) + group.add_argument("--max-batch-size", type=int, help="Max batch size.", default=32) + group.add_argument( + "--inference-tensor-parallel", + type=int, + help="Tensor parallel for the inference time, can be different from the training config.", + default=1, + ) + + +def add_text_generate_ptq_args(parser): + """Add additional arguments for AMMO text generation PTQ.""" + group = parser.add_argument_group(title='AMMO text generation ptq') + group.add_argument( + "--calib-dataset", + type=str, + default="cnn_dailymail", + help="Calibration datasets from HuggingFace datasets.", + ) + group.add_argument( + "--calib-steps", type=int, default=512, help="Steps to perform atq.quantize calibration." + ) + parser.add_argument( + "--prompts", + type=str, + default=( + "Born in north-east France, Soyer trained as a|Born in California, Soyer trained as a" + ), + help="Input texts. Please use | to separate different batches.", + ) + add_ammo_args(parser) + add_trtllm_args(parser) + return parser + + +def get_calib_dataloader( + data="cnn_dailymail", batch_size=4, calib_size=512, max_sequence_length=512 +): + if data == "wikitext": + dataset = load_dataset("wikitext", "wikitext-103-v1", split="train") + text_column = "text" + elif data == "cnn_dailymail": + dataset = load_dataset("cnn_dailymail", name="3.0.0", split="train") + text_column = "article" + + calib_size = max(min(len(dataset), calib_size), batch_size) + for i in range(calib_size // batch_size): + batch = dataset[i * batch_size : (i + 1) * batch_size][text_column] + for j in range(len(batch)): + batch[j] = batch[j][:max_sequence_length] + yield batch + + +def ammo_load_checkpoint( + model, optimizer=None, opt_param_scheduler=None, strict=True, additional_sharded_prefix="" +): + """Load a megatron checkpoint depending its format. + + Args: + model: MCoreGPTModel instance + optimizer: Megatron optimizer instance + opt_param_scheduler: Megatron scheduler instance + strict: if True, no extra or missing keys are allowed while loading the state_dict + additional_sharded_prefix (str): Append additional prefix to align the sharded checkpoint keys. When loading + an .nemo sharded checkpoint, this is usually `model.`. Otherwise, this is typically an empty string. + """ + + def _remove_prefix_state_dict_pre_hook( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, + ): + """Pytorch _load_state_dict_pre_hook to remap the state_dict with the additional sharded prefix.""" + if additional_sharded_prefix is None: + return + key_rewrite_list = [] + for key, _ in state_dict.items(): + if key.startswith(additional_sharded_prefix): + key_rewrite_list.append(key) + for old_key in key_rewrite_list: + new_key = old_key[len(additional_sharded_prefix) :] + state_dict[new_key] = state_dict.pop(old_key) + + args = get_args() + load_dir = args.load + + shared_model_state_dir = "model_weights" + sharded_load_dir = Path(load_dir + "/" + shared_model_state_dir) + + if sharded_load_dir.exists() and optimizer is None and opt_param_scheduler is None: + unwrapped_model = unwrap_model(model) + shareded_state_dict = unwrapped_model[0].sharded_state_dict( + prefix=additional_sharded_prefix + ) + if additional_sharded_prefix: + unwrapped_model[0]._register_load_state_dict_pre_hook( + _remove_prefix_state_dict_pre_hook + ) + unwrapped_model[0].load_state_dict(load(shareded_state_dict, sharded_load_dir)) + else: + _ = load_checkpoint(model, optimizer, opt_param_scheduler, strict=strict) + + +if __name__ == "__main__": + initialize_megatron( + extra_args_provider=add_text_generate_ptq_args, + args_defaults={ + 'tokenizer_type': 'GPT2BPETokenizer', + 'no_load_rng': True, + 'no_load_optim': True, + }, + ) + + args = get_args() + if args.num_layers_per_virtual_pipeline_stage is not None: + print("Interleaved pipeline schedule is not yet supported for text generation.") + exit() + + text_generation_model_provider = functools.partial(model_provider, parallel_output=False) + model = get_model(text_generation_model_provider, wrap_with_ddp=False) + assert len(model) == 1, "Above condition should have caught this" + + if args.load is not None: + _ = ammo_load_checkpoint( + model, + None, + None, + strict=not args.untie_embeddings_and_output_weights, + additional_sharded_prefix="model.", + ) + else: + print_rank_0("WARNING: No checkpoint is loaded for PTQ! The process will still continue.") + + all_prompts = args.prompts.split("|") + + def custom_prompt_forward_loop_func(): + for prompt in all_prompts: + if mpu.is_pipeline_first_stage() and mpu.get_tensor_model_parallel_rank() == 0: + ( + prompts_plus_generations, + prompts_plus_generations_segments, + logprobs, + _, + ) = generate_and_post_process( + model[0], + prompts=[prompt], + tokens_to_generate=128, + return_output_log_probs=True, + temperature=1.0, + ) + print_rank_0(prompts_plus_generations) + else: + generate_and_post_process(model[0]) + + def hf_dataset_forword_loop_func(): + dataloader = get_calib_dataloader(args.calib_dataset, calib_size=args.calib_steps) + for prompts in dataloader: + if mpu.is_pipeline_first_stage() and mpu.get_tensor_model_parallel_rank() == 0: + ( + prompts_plus_generations, + prompts_plus_generations_segments, + logprobs, + _, + ) = generate_and_post_process( + model[0], + prompts=prompts, + tokens_to_generate=0, + return_output_log_probs=True, + temperature=1.0, + ) + else: + generate_and_post_process(model[0]) + + ptq_forward_loop_func = custom_prompt_forward_loop_func + if args.calib_dataset is not None: + ptq_forward_loop_func = hf_dataset_forword_loop_func + + if args.ammo_quant_cfg in QUANT_CFG_CHOICES: + atq_config = QUANT_CFG_CHOICES[args.ammo_quant_cfg] + if "awq" in args.ammo_quant_cfg: + weight_quantizer = atq_config["quant_cfg"]["*weight_quantizer"] # type: ignore + if isinstance(weight_quantizer, list): + weight_quantizer = weight_quantizer[0] + weight_quantizer["block_sizes"][-1] = 128 + atq_config["quant_cfg"]["*.output_layer.*"] = {"enable": False} + print_rank_0("atq.quantize: output_layer quantization is disable") + atq.quantize(model[0], atq_config, ptq_forward_loop_func) + custom_prompt_forward_loop_func() + if args.save: + save_checkpoint(1, model, None, None) + else: + custom_prompt_forward_loop_func() + + if args.engine_dir: + from ammo.deploy.llm import model_config_to_tensorrt_llm + from ammo.torch.export import torch_to_model_config + + assert args.decoder in ["gptnext", "llama"], f"Decoder type {args.decoder} not supported." + + Path(args.engine_dir).mkdir(parents=True, exist_ok=True) + + print_rank_0("Exporting model_configs for TRT LLM.") + model = unwrap_model(model) + model = model[0] + + # In TRT LLM, squared relu activation does not support bf16. So we use fp16 by default. + model_configs = torch_to_model_config( + model, + args.decoder, + torch.float16, + inference_tensor_parallel=args.inference_tensor_parallel, + ) + + print_rank_0("Building TRT LLM engines.") + for model_config in model_configs: + model_config_to_tensorrt_llm( + model_config, + args.engine_dir, + max_input_len=args.max_input_len, + max_output_len=args.max_output_len, + max_batch_size=args.max_batch_size, + max_beam_width=1, + num_build_workers=1, + inflight_batching=False, + enable_sparsity=False, + ) + print_rank_0(f"TRT LLM engines saved to {args.engine_dir}") diff --git a/examples/deploy/trtllm_text_generation.py b/examples/deploy/trtllm_text_generation.py new file mode 100644 index 0000000000..c6c0098f20 --- /dev/null +++ b/examples/deploy/trtllm_text_generation.py @@ -0,0 +1,93 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +"""An example script to run the tensorrt_llm engine.""" + +import argparse +from pathlib import Path + +import numpy as np +import torch +from ammo.deploy.llm import generate, load, unload +from transformers import AutoTokenizer, T5Tokenizer + + +class CustomSentencePieceTokenizer(T5Tokenizer): + """This is a custom GPTSentencePiece Tokenizer modified from the T5Tokenizer. + + Note: + The modification is kept minimal to make `encode` and `batch_decode` working + properly (used in TensorRT-LLM engine). Other functions have not been tested. + """ + + def __init__(self, model): + super().__init__(model, extra_ids=0, bos_token="", pad_token="") + + def encode(self, text, add_special_tokens: bool = True, **kwargs): + return self.sp_model.encode_as_ids(text) + + def batch_decode(self, sequences, skip_special_tokens: bool = False, **kwargs): + if isinstance(sequences, np.ndarray) or torch.is_tensor(sequences): + sequences = sequences.tolist() + return self.sp_model.decode(sequences) + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument("--tokenizer", type=str, default="") + parser.add_argument("--max-output-len", type=int, default=100) + parser.add_argument("--engine-dir", type=str, default="/tmp/ammo") + parser.add_argument( + "--input-texts", + type=str, + default=( + "Born in north-east France, Soyer trained as a|Born in California, Soyer trained as a" + ), + help="Input texts. Please use | to separate different batches.", + ) + parser.add_argument("--max-num-beams", type=int, default=1) + parser.add_argument("--profiler-output", type=str, default="") + return parser.parse_args() + + +def run(args): + tokenizer_path = Path(args.tokenizer) + + if tokenizer_path.is_dir(): + # For llama models, use local HF tokenizer which is a folder. + tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, trust_remote_code=True) + elif tokenizer_path.is_file(): + # For nextllm and nemotron models, use local Megatron GPTSentencePiece tokenizer which is a model file. + tokenizer = CustomSentencePieceTokenizer(args.tokenizer) + else: + raise ValueError( + "arg.tokenizer must be a dir to a hf tokenizer checkpoint for llama or a SentencePiece .model file for gptnext" + ) + + if not hasattr(args, "profiler_output"): + args.profiler_output = "" + + input_texts = args.input_texts.split("|") + assert input_texts, "input_text not specified" + print(input_texts) + + free_memory_before = torch.cuda.mem_get_info() + + host_context = load( + tokenizer=tokenizer, engine_dir=args.engine_dir, num_beams=args.max_num_beams + ) + torch.cuda.cudart().cudaProfilerStart() + outputs = generate(input_texts, args.max_output_len, host_context, None, args.profiler_output) + print(outputs) + torch.cuda.cudart().cudaProfilerStop() + + free_memory_after = torch.cuda.mem_get_info() + print( + f"Use GPU memory: {(free_memory_before[0] - free_memory_after[0]) / 1024 / 1024 / 1024} GB" + ) + + unload(host_context) + + +if __name__ == "__main__": + args = parse_arguments() + run(args) diff --git a/examples/detxoify_lm/generate_samples_gpt.py b/examples/detxoify_lm/generate_samples_gpt.py index 47e1590ea5..da12bbd7dc 100644 --- a/examples/detxoify_lm/generate_samples_gpt.py +++ b/examples/detxoify_lm/generate_samples_gpt.py @@ -18,14 +18,78 @@ from megatron.model import GPTModel from megatron.training import get_model from megatron.text_generation import generate_and_post_process +from megatron.arguments import core_transformer_config_from_args +from megatron.core.models.gpt import GPTModel +from typing import Union +import megatron.model +from megatron.core.transformer.spec_utils import import_module +from megatron.arguments import core_transformer_config_from_args +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec, get_gpt_layer_local_spec +def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megatron.model.GPTModel]: + """Builds the model. -def model_provider(pre_process=True, post_process=True): - """Build the model.""" + If you set the use_mcore_models to True, it will return the mcore GPT model and if not the legacy GPT model. + + Args: + pre_process (bool, optional): Set to true if you need to compute embedings. Defaults to True. + post_process (bool, optional): Set to true if you need to want to compute output logits/loss. Defaults to True. + + + Returns: + Union[GPTModel, megatron.model.GPTModel]: The returned model + """ + args = get_args() print_rank_0('building GPT model ...') - model = GPTModel(num_tokentypes=0, parallel_output=False, - pre_process=pre_process, post_process=post_process) + config = core_transformer_config_from_args(args) + + if args.use_mcore_models: + + if args.spec is None: + if args.transformer_impl == 'local': + transformer_layer_spec = get_gpt_layer_local_spec( + num_experts=args.num_experts, + moe_grouped_gemm=args.moe_grouped_gemm + ) + elif args.transformer_impl == 'transformer_engine': + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( + num_experts=args.num_experts, + moe_grouped_gemm=args.moe_grouped_gemm + ) + else: + raise ValueError(f"Invalid transformer_impl {args.transformer_impl}") + elif args.spec[0] == 'local': + transformer_layer_spec = get_gpt_layer_local_spec( + num_experts=args.num_experts, + moe_grouped_gemm=args.moe_grouped_gemm + ) + else: + transformer_layer_spec = import_module(args.spec) + + model = GPTModel( + config=config, + transformer_layer_spec=transformer_layer_spec, + vocab_size=args.padded_vocab_size, + max_sequence_length=args.max_position_embeddings, + pre_process=pre_process, + post_process=post_process, + fp16_lm_cross_entropy=args.fp16_lm_cross_entropy, + parallel_output=True, + share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights, + position_embedding_type=args.position_embedding_type, + rotary_percent=args.rotary_percent + ) + else: + assert(args.context_parallel_size == 1), "Context parallelism is only supported with Megatron Core!" + + model = megatron.model.GPTModel( + config, + num_tokentypes=0, + parallel_output=True, + pre_process=pre_process, + post_process=post_process + ) return model diff --git a/examples/gpt3/gpt_config.yaml b/examples/gpt3/gpt_config.yaml new file mode 100644 index 0000000000..652cd4d43e --- /dev/null +++ b/examples/gpt3/gpt_config.yaml @@ -0,0 +1,303 @@ +# WARNING: Yaml configs is currently an experimental feature +language_model: + # model architecture + num_layers: 24 + hidden_size: 1024 + num_attention_heads: 16 + num_query_groups: null + + ffn_hidden_size: null + kv_channels: null + hidden_dropout: 0.0 + attention_dropout: 0.0 + fp32_residual_connection: False + + apply_residual_connection_post_layernorm: False + layernorm_epsilon: 1.e-5 + layernorm_zero_centered_gamma: True + add_bias_linear: False + bias_activation_fusion: False + add_qkv_bias: False + gated_linear_unit: False + activation_func: swiglu + num_moe_experts: null + rotary_interleaved: False + window_size: null + + # initialization + init_method: null + init_method_std: 0.02 + output_layer_init_method: null + + # mixed-precision + apply_query_key_layer_scaling: False + attention_softmax_in_fp32: False + + # fusion + bias_swiglu_fusion: True + masked_softmax_fusion: True + persist_layer_norm: False + memory_efficient_layer_norm: False + bias_dropout_fusion: True + apply_rope_fusion: True + + # activation recomputation + recompute_granularity: null + recompute_method: null + recompute_num_layers: null + distribute_saved_activations: null + + # fp8 related + fp8: null + fp8_margin: 0 + fp8_interval: 1 + fp8_amax_history_len: 1 + fp8_amax_compute_algo: "most_recent" + fp8_wgrad: True + + # miscellaneous + clone_scatter_output_in_embedding: True + + normalization: "LayerNorm" # alt value supported by TE: "RMSNorm" + + # MoE related + moe_router_load_balancing_type: "aux_loss" + moe_router_topk: 2 + moe_grouped_gemm: False + moe_aux_loss_coeff: 0 # 1e-2 would be a good start value for load balance loss. + moe_z_loss_coeff: null # 1e-3 would be a good start value for z-loss + moe_input_jitter_eps: null + moe_token_dropping: False + +model_parallel: + # Model parallelism + tensor_model_parallel_size: 1 + context_parallel_size: 1 + pipeline_model_parallel_size: 1 + virtual_pipeline_model_parallel_size: null + sequence_parallel: True + expert_model_parallel_size: 1 + + # Initialization + perform_initialization: True + use_cpu_initialization: null + + # Training + fp16: False + bf16: True + params_dtype: null # Set from above arguments for core + timers: null + + # Optimizations + gradient_accumulation_fusion: True + async_tensor_model_parallel_allreduce: True + tp_comm_overlap: False + + # Debug Options + tp_comm_split_ag: True + tp_comm_atomic_ag: True + tp_comm_split_rs: True + tp_comm_atomic_rs: True + tp_comm_bulk_wgrad: True + tp_comm_bulk_dgrad: True + + # Parallelism + finalize_model_grads_func: null + + # Pipeline Parallel + pipeline_dtype: null + grad_scale_func: null + enable_autocast: False + autocast_dtype: null + variable_seq_lengths: False + num_microbatches_with_partial_activation_checkpoints: null + overlap_p2p_comm: False + batch_p2p_comm: True + batch_p2p_sync: True + use_ring_exchange_p2p: False + deallocate_pipeline_outputs: False + no_sync_func: null + grad_sync_func: null + param_sync_func: null + pipeline_model_parallel_split_rank: null + + # CPU Offloading + cpu_offloading: False + cpu_offloading_num_layers: 0 + _cpu_offloading_context: null + cpu_offloading_weights: False + cpu_offloading_activations: True + + # Timing + barrier_with_L1_time: True + +# training: +use_mcore_models: True +spec: null +micro_batch_size: 2 +global_batch_size: 128 +rampup_batch_size: [32, 32, 65324160] +check_for_nan_in_loss_and_grad: True +num_layers_per_virtual_pipeline_stage: null + +encoder_num_layers: null +decoder_num_layers: null +rotary_seq_len_interpolation_factor: null +add_position_embedding: False +make_vocab_size_divisible_by: 128 +group_query_attention: False + + +exit_signal_handler: False +exit_duration_in_mins: null +exit_interval: null + +untie_embeddings_and_output_weights: True +position_embedding_type: rope +rotary_percent: 0.5 +openai_gelu: False +squared_relu: False +swiglu: True +onnx_safe: null +bert_binary_head: True +max_position_embeddings: 4096 + +transformer_impl: local +use_flash_attn: False +seed: 1234 +data_parallel_random_init: False + +# Optimizer +optimizer: adam +lr: 2.5e-4 +lr_decay_style: cosine +lr_decay_iters: null +lr_decay_samples: 255126953 +lr_warmup_fraction: null +lr_warmup_iters: 0 +lr_warmup_samples: 81381 +lr_warmup_init: 0.0 +min_lr: 2.5e-5 +weight_decay: 0.1 +start_weight_decay: null +end_weight_decay: null +weight_decay_incr_style: constant +clip_grad: 1.0 +adam_beta1: 0.9 +adam_beta2: 0.95 +adam_eps: 1.e-08 +sgd_momentum: 0.9 +override_opt_param_scheduler: False +use_checkpoint_opt_param_scheduler: False + +# checkpointing arguments +save: null +save_interval: 20000 +no_save_optim: null +no_save_rng: null +load: null +no_load_optim: null +no_load_rng: null +finetune: False +use_checkpoint_args: False +exit_on_missing_checkpoint: False + +# loss arguments +loss_scale: null +initial_loss_scale: 4294967296 +min_loss_scale: 1.0 +loss_scale_window: 1000 +hysteresis: 2 +accumulate_allreduce_grads_in_fp32: False +fp16_lm_cross_entropy: False + +# distributed arguments +distributed_backend: nccl +distributed_timeout_minutes: 10 +overlap_grad_reduce: False +delay_grad_reduce: True +overlap_param_gather: False +delay_param_gather: False +scatter_gather_tensors_in_pipeline: True +local_rank: null +lazy_mpu_init: null +empty_unused_memory_level: 0 +standalone_embedding_stage: False +use_distributed_optimizer: False +nccl_communicator_config_path: null + +train_iters: null +eval_iters: 32 +eval_interval: 2000 +skip_train: False + +adlr_autoresume: False +adlr_autoresume_interval: 1000 + +# garbage collection +manual_gc: False +manual_gc_interval: 0 +manual_gc_eval: True + +tp_comm_overlap_cfg: null + +#data +data_path: null +split: '99,1,0' +train_data_path: null +valid_data_path: null +test_data_path: null +data_cache_path: null +mock_data: False +vocab_size: null +vocab_file: null +merge_file: null +vocab_extra_ids: 0 +seq_length: 4096 +encoder_seq_length: null +decoder_seq_length: null +retriever_seq_length: 256 +sample_rate: 1.0 +mask_prob: 0.15 +short_seq_prob: 0.1 +num_workers: 2 +tokenizer_type: GPTSentencePieceTokenizer +tokenizer_model: null +reset_position_ids: False +reset_attention_mask: False +eod_mask_loss: False +train_samples: 268554688 +dataloader_type: null + +#profile: +profile: False +profile_ranks: [0] +profile_step_end: 12 +profile_step_start: 10 + +#logging: +log_params_norm: True +log_num_zeros_in_grad: True +log_throughput: False +log_progress: False +timing_log_level: 0 +timing_log_option: minmax +tensorboard_log_interval: 1 +tensorboard_queue_size: 1000 +log_timers_to_tensorboard: False +log_batch_size_to_tensorboard: False +log_learning_rate_to_tensorboard: True +log_learning_rate_to_tensorboard: True +log_validation_ppl_to_tensorboard: False +log_memory_to_tensorboard: False +log_world_size_to_tensorboard: False +log_loss_scale_to_tensorboard: True +wandb_project: '' +wandb_exp_name: '' +wandb_save_dir: '' +enable_one_logger: False +one_logger_project: e2e-tracking +one_logger_entity: hwinf_dcm +one_logger_run_name: null +log_interval: 100 +tensorboard_dir: null diff --git a/examples/gpt3/train_gpt3_175b_distributed.sh b/examples/gpt3/train_gpt3_175b_distributed.sh index 01ca2e0309..ccba78784b 100755 --- a/examples/gpt3/train_gpt3_175b_distributed.sh +++ b/examples/gpt3/train_gpt3_175b_distributed.sh @@ -12,11 +12,11 @@ NUM_NODES=1 NODE_RANK=0 WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -CHECKPOINT_PATH=$0 # -TENSORBOARD_LOGS_PATH=$1 # -VOCAB_FILE=$2 #/gpt2-vocab.json -MERGE_FILE=$3 #/gpt2-merges.txt -DATA_PATH=$4 #_text_document +CHECKPOINT_PATH=$1 # +TENSORBOARD_LOGS_PATH=$2 # +VOCAB_FILE=$3 #/gpt2-vocab.json +MERGE_FILE=$4 #/gpt2-merges.txt +DATA_PATH=$5 #_text_document DISTRIBUTED_ARGS=( --nproc_per_node $GPUS_PER_NODE diff --git a/examples/t5/train_t5_220m_distributed.sh b/examples/t5/train_t5_220m_distributed.sh index 9385e390ed..4a55bb6e95 100755 --- a/examples/t5/train_t5_220m_distributed.sh +++ b/examples/t5/train_t5_220m_distributed.sh @@ -12,10 +12,10 @@ NUM_NODES=1 NODE_RANK=0 WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -CHECKPOINT_PATH=$0 # -TENSORBOARD_DIR=$1 # -VOCAB_FILE=$2 #/bert-large-cased-vocab.txt -DATA_PATH=$3 #_text_document +CHECKPOINT_PATH=$1 # +TENSORBOARD_DIR=$2 # +VOCAB_FILE=$3 #/bert-large-cased-vocab.txt +DATA_PATH=$4 #_text_document DISTRIBUTED_ARGS=" --nproc_per_node $GPUS_PER_NODE \ diff --git a/jet-tests.yml b/jet-tests.yml index 02d441354a..e23f9cc98f 100644 --- a/jet-tests.yml +++ b/jet-tests.yml @@ -1,71 +1,87 @@ .jet_common: stage: jet rules: - - if: '"JET" =~ $TESTS_TO_RUN_ON_THIS_COMMIT' - - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && "JET" =~ $TESTS_TO_RUN_AFTER_MERGING - - if: $CI_MERGE_REQUEST_APPROVED && "JET" =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED - - if: '$CI_MERGE_REQUEST_LABELS == "READY FOR REVIEW" && "JET" =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_APPROVED + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_LABELS =~ /READY FOR REVIEW/' + - if: $JET_CUSTOM_FILTER != "" && $CI_PIPELINE_SOURCE != 'merge_request_event' + - when: never -jet-generate: - extends: .jet_common +include: + - project: dl/jet/gitlab-templates + ref: main + file: downstreams.yml + +jet-setup: + extends: [ .jet_common ] + tags: + - os/linux + script: + - set -x + - | + if [[ $CI_PIPELINE_SOURCE == "merge_request_event" ]] && [[ $CI_MERGE_REQUEST_APPROVED || $CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" ]]; then + JET_FILTER="type == 'build' or 'merge-request' in spec.scope" + elif [[ -n $JET_CUSTOM_FILTER && $CI_PIPELINE_SOURCE != 'merge_request_event' ]]; then + JET_FILTER=$JET_CUSTOM_FILTER + else + JET_FILTER="False" + fi + echo "_JET_FILTER=$JET_FILTER" | tee -a config.env + artifacts: + reports: + dotenv: config.env + +jet-configure: + extends: [.jet_common, .jet-configure] tags: - - docker_local_runner - variables: - JET_WORKLOADS_REF_MAIN: megatron-core - JET_WORKLOADS_REF_EPHEMERAL: ephemeral/${CI_PROJECT_PATH_SLUG}/${CI_PIPELINE_ID} + - os/linux script: - wget https://github.com/mikefarah/yq/releases/download/v4.35.2/yq_linux_amd64.tar.gz -O - | tar xz && mv yq_linux_amd64 /usr/local/bin/yq - - git clone https://gitlab-ci-token:${JET_WORKLOADS_TOKEN}@gitlab-master.nvidia.com/dl/jet/workloads-registry jet-workloads-registry - - - cd jet-workloads-registry - - git config user.name "Megatron-LM CI" - - git config user.email "megatron-lm@ci.nvidia.com" - - - git checkout -f "$JET_WORKLOADS_REF_MAIN" - - git checkout -b "$JET_WORKLOADS_REF_EPHEMERAL" - + - cd tests/functional_tests/jet_recipes - | if [[ $CI_PIPELINE_SOURCE == "merge_request_event" ]]; then - yq e ".spec.source.ref = \"merge-requests/${CI_MERGE_REQUEST_IID}/head\"" -i recipes/build-pyt.yaml + yq e ".spec.source.ref = \"merge-requests/${CI_MERGE_REQUEST_IID}/head\"" -i build-pyt.yaml else - yq e ".spec.source.ref = \"${CI_COMMIT_REF_NAME}\"" -i recipes/build-pyt.yaml + yq e ".spec.source.ref = \"${CI_COMMIT_REF_NAME}\"" -i build-pyt.yaml fi - - - git add recipes/build-pyt.yaml - - git commit -m "Dynamic configuration - ${CI_PIPELINE_ID}" - - git push origin "$JET_WORKLOADS_REF_EPHEMERAL" + artifacts: + paths: + - tests/functional_tests/jet_recipes jet-trigger: - extends: .jet_common - needs: [ jet-generate ] - when: on_success - inherit: - variables: - - CI_PROJECT_PATH_SLUG - - CI_PIPELINE_ID - - TESTS_TO_RUN_ON_THIS_COMMIT - - TESTS_TO_RUN_AFTER_MERGING - - TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED - variables: - JET_WORKLOADS_REF: ephemeral/${CI_PROJECT_PATH_SLUG}/${CI_PIPELINE_ID} - JET_WORKLOADS_FILTER: "True" + stage: jet + extends: [.jet_common, .jet-trigger] + needs: [ jet-configure, jet-setup ] trigger: project: dl/jet/ci - branch: megatron-core + branch: $JET_CLUSTER_BRANCH strategy: depend + inherit: + variables: + - JET_CUSTOM_FILTER + - JET_CLUSTER_BRANCH + variables: + JET_WORKLOADS_FILTER: "$_JET_FILTER" + jet-functional-results: - extends: .jet_common + stage: jet tags: - docker_local_runner image: gitlab-master.nvidia.com:5005/dl/jet/api:latest needs: [ jet-trigger ] - when: on_success before_script: - jet secrets jwt-login jwt/nvidia/gitlab-master adlr-megatron-lm-ci $CI_JOB_JWT script: - python -m pip install -U --no-cache-dir prettytable - - python tests/functional_tests/python_test_utils/jet_test_pipeline.py "ephemeral/${CI_PROJECT_PATH_SLUG}/${CI_PIPELINE_ID}" --test exit + - python tests/functional_tests/python_test_utils/jet_test_pipeline.py ${CI_PIPELINE_ID} --test exit + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_APPROVED + when: always + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_LABELS =~ /READY FOR REVIEW/' + when: always + - if: $JET_CUSTOM_FILTER != "" && $CI_PIPELINE_SOURCE != 'merge_request_event' + when: always + - when: never jet-compare-metrics: extends: .jet_common @@ -73,9 +89,8 @@ jet-compare-metrics: - docker_local_runner image: gitlab-master.nvidia.com:5005/dl/jet/api:latest needs: [ jet-functional-results ] - when: on_success before_script: - jet secrets jwt-login jwt/nvidia/gitlab-master adlr-megatron-lm-ci $CI_JOB_JWT script: - python -m pip install -U --no-cache-dir pytest tensorboard - - python tests/functional_tests/python_test_utils/jet_test_pipeline.py "ephemeral/${CI_PROJECT_PATH_SLUG}/${CI_PIPELINE_ID}" --test metrics + - python tests/functional_tests/python_test_utils/jet_test_pipeline.py ${CI_PIPELINE_ID} --test metrics diff --git a/megatron/__init__.py b/megatron/__init__.py index c35de282a2..e9faa069ed 100644 --- a/megatron/__init__.py +++ b/megatron/__init__.py @@ -10,6 +10,7 @@ from .global_vars import get_tokenizer from .global_vars import get_tensorboard_writer from .global_vars import get_wandb_writer +from .global_vars import get_one_logger from .global_vars import get_adlr_autoresume from .global_vars import get_timers from .initialize import initialize_megatron diff --git a/megatron/arguments.py b/megatron/arguments.py index fff5bbeb5b..b7487074bf 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -11,12 +11,16 @@ import torch.nn.functional as F from megatron.global_vars import set_retro_args, get_retro_args -from tools.retro.utils import get_args_path as get_retro_args_path from megatron.core.models.retro import RetroConfig from megatron.core.transformer import TransformerConfig +def get_args_path(workdir): + '''Argument copy stored within retro workdir.''' + return os.path.join(workdir, "args.json") + + def parse_args(extra_args_provider=None, ignore_unknown_args=False): """Parse all arguments.""" parser = argparse.ArgumentParser(description='Megatron-LM Arguments', @@ -36,6 +40,7 @@ def parse_args(extra_args_provider=None, ignore_unknown_args=False): parser = _add_autoresume_args(parser) parser = _add_biencoder_args(parser) parser = _add_vision_args(parser) + parser = _add_moe_args(parser) parser = _add_logging_args(parser) parser = _add_inference_args(parser) parser = _add_transformer_engine_args(parser) @@ -45,13 +50,20 @@ def parse_args(extra_args_provider=None, ignore_unknown_args=False): # Custom arguments. if extra_args_provider is not None: parser = extra_args_provider(parser) - + # Parse. if ignore_unknown_args: args, _ = parser.parse_known_args() else: args = parser.parse_args() + # Experimental yaml + if args.yaml_cfg is not None: + from .yaml_arguments import load_yaml + assert args.yaml_cfg and args.use_mcore_models, "To use yaml, mcore must be enabled" + args = load_yaml(args.yaml_cfg) + + # Args from environment args.rank = int(os.getenv('RANK', '0')) args.world_size = int(os.getenv("WORLD_SIZE", '1')) @@ -170,12 +182,23 @@ def validate_args(args, defaults={}): if args.overlap_param_gather: assert args.use_distributed_optimizer, \ '--overlap-param-gather only supported with distributed optimizer' + assert args.overlap_grad_reduce, \ + '--overlap-grad-reduce should be turned on when using --overlap-param-gather' + assert args.use_mcore_models, \ + '--overlap-param-gather only supported with MCore models' # Parameters dtype. args.params_dtype = torch.float if args.fp16: assert not args.bf16 args.params_dtype = torch.half + # Turn off checking for NaNs in loss and grads if using dynamic loss scaling, + # where NaNs in grads / loss are signal to the loss scaler. + if not args.loss_scale: + args.check_for_nan_in_loss_and_grad = False + if args.rank == 0: + print('WARNING: Setting args.check_for_nan_in_loss_and_grad to False since ' + 'dynamic loss scaling is being used') if args.bf16: assert not args.fp16 args.params_dtype = torch.bfloat16 @@ -194,6 +217,9 @@ def validate_args(args, defaults={}): if args.dataloader_type is None: args.dataloader_type = 'single' + if args.valid_num_workers is None: + args.valid_num_workers = args.num_workers + # Consumed tokens. args.consumed_train_samples = 0 args.consumed_valid_samples = 0 @@ -289,6 +315,11 @@ def validate_args(args, defaults={}): assert args.fp16 or args.bf16, \ 'residual connection in fp32 only supported when using fp16 or bf16.' + if args.moe_grouped_gemm: + assert args.bf16, 'Currently GroupedGEMM for MoE only supports bf16 dtype.' + dc = torch.cuda.get_device_capability() + assert dc[0] >= 8, "Unsupported compute capability for GroupedGEMM kernels." + if args.weight_decay_incr_style == 'constant': assert args.start_weight_decay is None assert args.end_weight_decay is None @@ -340,6 +371,9 @@ def validate_args(args, defaults={}): if args.sequence_parallel: args.async_tensor_model_parallel_allreduce = False + if not args.use_flash_attn: + assert args.window_size is None + if os.environ.get('CUDA_DEVICE_MAX_CONNECTIONS') != "1": if args.sequence_parallel: raise RuntimeError( @@ -380,6 +414,10 @@ def validate_args(args, defaults={}): # Legacy RoPE arguments if args.use_rotary_position_embeddings: args.position_embedding_type = 'rope' + if args.rotary_interleaved and args.apply_rope_fusion: + raise RuntimeError('--rotary-interleaved does not work with rope_fusion.') + if args.rotary_interleaved and not args.use_mcore_models: + raise RuntimeError('--rotary-interleaved only support Megatron Core, please add --use-mcore-models.') # Would just need to add 'NoPE' as a position_embedding_type to support this, but for now # don't allow it to keep things simple @@ -389,19 +427,17 @@ def validate_args(args, defaults={}): # MoE Spec check if args.num_experts is not None: assert args.spec is None, "Model Spec must be None when using MoEs" + if args.tensor_model_parallel_size > 1: + assert args.sequence_parallel, \ + "When using MoE and tensor parallelism, sequence parallelism must be used." # Expert parallelism check if args.expert_model_parallel_size > 1: assert args.num_experts is not None, "num_experts must be non None to use expert model parallelism" assert args.num_experts % args.expert_model_parallel_size == 0, \ "Number of experts should be a multiple of expert model parallel_size." - assert not args.use_distributed_optimizer, \ - "Expert parallelism is not suppored with distributed optimizer." assert not args.fp16, \ "Expert parallelism is not supported with fp16 training." - if args.tensor_model_parallel_size > 1: - assert args.sequence_parallel, \ - "When using expert parallelism and tensor parallelism, sequence parallelism must be used." # Print arguments. _print_args("arguments", args) @@ -442,12 +478,15 @@ def core_transformer_config_from_args(args): kw_args['layernorm_epsilon'] = args.norm_epsilon kw_args['deallocate_pipeline_outputs'] = True kw_args['pipeline_dtype'] = args.params_dtype - kw_args['batch_p2p_comm'] = not args.overlap_p2p_comm + kw_args['batch_p2p_comm'] = not args.overlap_p2p_comm kw_args['num_moe_experts'] = args.num_experts + kw_args['rotary_interleaved'] = args.rotary_interleaved if args.swiglu: kw_args['activation_func'] = F.silu kw_args['gated_linear_unit'] = True - kw_args['bias_gelu_fusion'] = False + kw_args['bias_activation_fusion'] = args.bias_swiglu_fusion + else: + kw_args['bias_activation_fusion'] = args.bias_gelu_fusion if args.squared_relu: assert not args.swiglu def squared_relu(x): @@ -611,6 +650,10 @@ def _add_network_size_args(parser): 'Deprecated: use --position-embedding-type') group.add_argument('--rotary-percent', type=float, default=1.0, help='Percent of rotary dimension to use, default 100%%') + group.add_argument('--rotary-theta', type=int, default=10000, + help='Theta/frequency value for rotary positional embeddings') + group.add_argument('--rotary-interleaved', action='store_true', + help='Use interleaved rotary embedding.') group.add_argument('--rotary-seq-len-interpolation-factor', type=int, default=None, help='Sequence length interpolation factor for rotary embeddings.') group.add_argument('--no-position-embedding', @@ -646,10 +689,9 @@ def _add_network_size_args(parser): group.add_argument('--bert-no-binary-head', action='store_false', help='Disable BERT binary head.', dest='bert_binary_head') - group.add_argument('--num-experts', type=int, default=None, - help='Number of Experts in Switch Transformer (None means no Switch)') group.add_argument('--untie-embeddings-and-output-weights', action='store_true', help='Untie embeddings and output weights.'), + group.add_argument('--window-size', type=int, default=None) return parser @@ -662,6 +704,10 @@ def _add_logging_args(parser): help='If set, calculate and log the number of zeros in gradient.') group.add_argument('--log-throughput', action='store_true', help='If set, calculate and log throughput per GPU.') + group.add_argument('--log-progress', action='store_true', + help='If set, log progress (in terms of number of processed tokens and ' + 'number of floating-point operations) to progress.txt file in checkpoint ' + 'directory.') group.add_argument('--timing-log-level', type=int, default=0, choices=range(0,3), help='Granularity level to measure and report timing. ' @@ -716,12 +762,41 @@ def _add_logging_args(parser): group.add_argument('--log-world-size-to-tensorboard', action='store_true', help='Enable world size logging to tensorboard.') - group.add_argument('--wandb-project', type=str, default='', + group.add_argument('--wandb-project', '--wandb-project-name', type=str, default='', help='The wandb project name. Ignore wandb by default.') group.add_argument('--wandb-exp-name', type=str, default='', help='The wandb experiment name.') group.add_argument('--wandb-save-dir', type=str, default='', help='Path to save the wandb results locally.') + group.add_argument('--wandb-group-name', type=str, default="default") + group.add_argument('--wandb-entity-name', type=str, default=None, + help="Name of wandb entity for reporting") + group.add_argument('--structured-logs', action="store_true", + help='Add timestamp and worker name to stdout and stderr.') + group.add_argument('--structured-logs-dir', type=str, default=None, + help='Directory to save the logs.') + group.add_argument('--debug_layer_outputs', '--debug-layer-outputs', type=int, default=0) + group.add_argument('--debug_layer_gradients', '--debug-layer-gradients', type=int, default=0) + group.add_argument('--debug_all_param_gradients', '--debug-all-param-gradients', type=int, default=0) + group.add_argument('--debug_param_init', '--debug-param-init', type=int, default=0) + group.add_argument('--debug_param_update', '--debug-param-update', type=int, default=0) + group.add_argument('--debug_transformer', '--debug-transformer', type=int, default=0) + group.add_argument('--enable-one-logger', action='store_true', + help='If set, use one_logger to track E2E metrics' + 'Note that one_logger is an internal tool and not available externally. ' + 'For installation, please try command: `pip install ' + '--index-url=https://sc-hw-artf.nvidia.com/api/pypi/hwinf-ml-pypi/simple' + ' one_logger` or go to https://gitlab-master.nvidia.com/hwinf-dcm/onelogger ' + 'for more details') + group.add_argument('--one-logger-project', type=str, default='e2e-tracking', + help='The one-logger project name. Will ignore if ' + '--enable-one-logger is not set') + group.add_argument('--one-logger-entity', type=str, default='hwinf_dcm', + help='The one-logger username or team name. Will ignore if ' + '--enable-one-logger is not set') + group.add_argument('--one-logger-run-name', type=str, default=None, + help='The one-logger run name displayed. Will ignore if ' + '--enable-one-logger is not set') return parser @@ -834,6 +909,7 @@ def _add_training_args(parser): help='Global step to stop profiling.') group.add_argument('--profile-ranks', nargs='+', type=int, default=[0], help='Global ranks to profile.') + group.add_argument('--torch-profile-dir', type=str, default=None) group.add_argument('--tp-comm-overlap', action='store_true', help = 'Enables the ' ' overlap of Tensor parallel communication and GEMM kernels.') group.add_argument('--tp-comm-overlap-cfg', type=str, default=None, @@ -884,15 +960,26 @@ def _add_training_args(parser): group.add_argument('--no-bias-gelu-fusion', action='store_false', help='Disable bias and gelu fusion.', dest='bias_gelu_fusion') + group.add_argument('--no-bias-swiglu-fusion', action='store_false', + help='Disable bias and swiglu fusion, the fusion is ' + 'available only when using megatron-core.', + dest='bias_swiglu_fusion') group.add_argument('--no-bias-dropout-fusion', action='store_false', help='Disable bias and dropout fusion.', dest='bias_dropout_fusion') + group.add_argument('--no-rope-fusion', action='store_false', + help='Disable rope fusion, the fusion is available ' + 'only when using megatron-core.', + dest='apply_rope_fusion') group.add_argument('--use-flash-attn', action='store_true', help='use FlashAttention implementation of attention. ' 'https://arxiv.org/abs/2205.14135') group.add_argument('--disable-bias-linear', action='store_false', help='Disable bias in the linear layers', dest='add_bias_linear') + group.add_argument('--add-qkv-bias', action='store_true', + help='Enable bias only in the QKV linear layers', + dest='add_qkv_bias') group.add_argument('--optimizer', type=str, default='adam', choices=['adam', 'sgd'], help='Optimizer function') @@ -962,7 +1049,7 @@ def _add_learning_rate_args(parser): group.add_argument('--lr', type=float, default=None, help='Initial learning rate. Depending on decay style ' - 'and initial warmup, the learing rate at each ' + 'and initial warmup, the learning rate at each ' 'iteration would be different.') group.add_argument('--lr-decay-style', type=str, default='linear', choices=['constant', 'linear', 'cosine', 'inverse-square-root'], @@ -1057,7 +1144,7 @@ def _add_mixed_precision_args(parser): group.add_argument('--initial-loss-scale', type=float, default=2**32, help='Initial loss-scale for dynamic loss scaling.') group.add_argument('--min-loss-scale', type=float, default=1.0, - help='Minimum loss scale for dynamic loss scale.') + help='Minimum loss scale for dynamic loss scaling.') group.add_argument('--loss-scale-window', type=float, default=1000, help='Window over which to raise/lower dynamic scale.') group.add_argument('--hysteresis', type=int, default=2, @@ -1143,8 +1230,6 @@ def _add_distributed_args(parser): 'affects the encoder embedding.)') group.add_argument('--use-distributed-optimizer', action='store_true', help='Use distributed optimizer.') - group.add_argument('--expert-model-parallel-size', type=int, default=1, - help='Degree of expert model parallelism.') group.add_argument('--context-parallel-size', type=int, default=1, help='Degree of context parallelism.') group.add_argument('--nccl-communicator-config-path', type=str, default=None, @@ -1204,6 +1289,9 @@ def _add_data_args(parser): 'dataset2-path ...') group.add_argument('--data-cache-path', default=None, help='Path to a directory to hold cached index files.') + group.add_argument('--mock-data', action='store_true', + help='Skip data loading and validation and opt for artificial ' + 'generation of mock data when an implementation is available.') group.add_argument('--vocab-size', type=int, default=None, help='Size of vocab before EOD or padding.') @@ -1211,6 +1299,8 @@ def _add_data_args(parser): help='Path to the vocab file.') group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file.') + group.add_argument('--tokenizer-file', type=str, default=None, + help='Path to the tokenizer.json file. Used for the TokenizerFromFile[...] tokenizers') group.add_argument('--vocab-extra-ids', type=int, default=0, help='Number of additional vocabulary tokens. ' 'They are used for span masking in the T5 model') @@ -1233,11 +1323,16 @@ def _add_data_args(parser): help='Probability of producing a short sequence.') group.add_argument('--num-workers', type=int, default=2, help="Dataloader number of workers.") + group.add_argument('--valid-num-workers', type=int, default=None, + help="Dataloader number of workers for validation.") group.add_argument('--tokenizer-type', type=str, default=None, choices=['BertWordPieceLowerCase', 'BertWordPieceCase', 'GPT2BPETokenizer', + 'GPT2BPETokenizerWithFIM', + 'TokenizerFromFile', + 'TokenizerFromFileWithFIM', 'SentencePieceTokenizer', 'GPTSentencePieceTokenizer', 'Llama2Tokenizer', @@ -1252,7 +1347,15 @@ def _add_data_args(parser): 'end-of-document token.') group.add_argument('--eod-mask-loss', action='store_true', help='Mask loss for the end of document tokens.') - + group.add_argument('--fim-rate', type=float, default=0., + help='Probability to convert a training sample into a "Fill-in-the-Middle" format. Must be between 0 and 1.') + group.add_argument('--fim-spm-rate', type=float, default=0.5, + help='Probability that the a FIM sample uses the SPM format over the PSM format. ' + 'At 1, exclusively train with SPM. At 0, exclusively train with PSM') + group.add_argument('--fim-split-sample', type=str, default=None, + help='String around which to split the sample for FIM. If None (default), FIM is applied on the sample-level') + group.add_argument('--fragment-fim-rate', type=float, default=0.5, + help='Rate of FIM on each fragment when fim_split_sample is not None.') return parser @@ -1361,7 +1464,6 @@ def _add_vision_args(parser): group.add_argument('--swin-backbone-type', type=str, default='tiny', choices=['tiny', 'base', 'h3'], help='pretraining objectives') - # inpainting arguments group.add_argument('--mask-type', type=str, default='random', choices=['random', 'row'], @@ -1393,14 +1495,42 @@ def _add_vision_args(parser): return parser +def _add_moe_args(parser): + group = parser.add_argument_group(title="moe") + group.add_argument('--expert-model-parallel-size', type=int, default=1, + help='Degree of expert model parallelism.') + group.add_argument('--num-experts', type=int, default=None, + help='Number of Experts in MoE (None means no MoE)') + group.add_argument('--moe-router-load-balancing-type', type=str, + choices=['aux_loss', 'sinkhorn', "none"], + default='aux_loss', + help='Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "none" implies no load balancing. The default is "aux_loss".') + group.add_argument('--moe-router-topk', type=int, default=2, + help='Number of experts to route to for each token. The default is 2.') + group.add_argument('--moe-grouped-gemm', action='store_true', + help='When there are multiple experts per rank, compress multiple local (potentially small) gemms in a single kernel launch to improve the utilization and performance by leveraging the Grouped GEMM feature introduced since CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm).') + group.add_argument('--moe-aux-loss-coeff', type=float, default=0.0, + help='Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended.') + group.add_argument('--moe-z-loss-coeff', type=float, default=None, + help='Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended.') + group.add_argument('--moe-input-jitter-eps', type=float, default=None, + help='Add noise to the input tensor by applying jitter with a specified epsilon value.') + group.add_argument('--moe-token-dropping', action='store_true', + help='This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to GShard, Switch-Transformer, and DeepSpeed-MoE. Note: Currently unsupported.') + + return parser + def _add_experimental_args(parser): group = parser.add_argument_group(title='experimental') - group.add_argument('--spec', type=str, default=None, nargs=2, + group.add_argument('--spec', type=str, default=None, nargs='*', help='Specify the pair ' 'that returns a spec to customize a model, transformer ' - 'block, or transformer layer, depending on the use case. ' + 'block, or transformer layer, depending on the use case.' + 'To use local spec specify local as the argument.' 'For more details, see the model class, ' '`transformer_block.py`, or `transformer_layer.py`') + group.add_argument('--yaml-cfg', type=str, default=None, + help = 'Config file to add additional arguments') return parser diff --git a/megatron/checkpointing.py b/megatron/checkpointing.py index a3303229a0..79b6c7d920 100644 --- a/megatron/checkpointing.py +++ b/megatron/checkpointing.py @@ -56,7 +56,7 @@ def _compare(arg_name, old_arg_name=None, default=None): _compare('hidden_size') _compare('num_attention_heads') _compare('add_position_embedding', default=True) - if args.vocab_file: + if args.vocab_file or args.tokenizer_file: _compare('max_position_embeddings') _compare('make_vocab_size_divisible_by') _compare('padded_vocab_size') @@ -238,7 +238,8 @@ def get_rng_state(): return rng_state_list -def save_checkpoint(iteration, model, optimizer, opt_param_scheduler): +def save_checkpoint(iteration, model, optimizer, opt_param_scheduler, + num_floating_point_operations_so_far): """Save a model checkpoint.""" args = get_args() @@ -270,6 +271,7 @@ def save_checkpoint(iteration, model, optimizer, opt_param_scheduler): state_dict['args'] = args state_dict['checkpoint_version'] = 3.0 state_dict['iteration'] = iteration + state_dict['num_floating_point_operations_so_far'] = num_floating_point_operations_so_far if len(model) == 1: state_dict['model'] = model[0].state_dict_for_save_checkpoint() else: @@ -504,7 +506,9 @@ def _set_arg(arg_name, old_arg_name=None, force=False): _set_arg('add_position_embedding', force=True) _set_arg('use_rotary_position_embeddings', force=True) _set_arg('rotary_percent', force=True) + _set_arg('rotary_interleaved', force=True) _set_arg('add_bias_linear', force=True) + _set_arg('add_qkv_bias', force=True) _set_arg('swiglu', force=True) _set_arg('untie_embeddings_and_output_weights', force=True) _set_arg('apply_layernorm_1p', force=True) @@ -544,8 +548,8 @@ def load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', stri torch.distributed.barrier() sys.exit() - # Iteration defaults to 0. - return 0 + # Iteration and num_floating_point_operations_so_far default to 0. + return 0, 0 # Set checkpoint version. set_checkpoint_version(state_dict.get('checkpoint_version', 0)) @@ -564,6 +568,7 @@ def load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', stri 'iteration from checkpoint {}, exiting'.format( checkpoint_name)) sys.exit() + num_floating_point_operations_so_far = state_dict.get('num_floating_point_operations_so_far', 0) # Check arguments. assert args.consumed_train_samples == 0 @@ -580,7 +585,7 @@ def load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', stri print_rank_0('could not find arguments in the checkpoint ...') # Model. - strict = False if args.retro_add_retriever else strict + strict = False if args.retro_add_retriever or args.transformer_impl == 'transformer_engine' else strict if len(model) == 1: model[0].load_state_dict(state_dict['model'], strict=strict) else: @@ -669,7 +674,7 @@ def load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', stri print_rank_0(f' successfully loaded checkpoint from {args.load} ' f'at iteration {iteration}') - return iteration + return iteration, num_floating_point_operations_so_far def load_biencoder_checkpoint(model, only_query_model=False, diff --git a/megatron/core/__init__.py b/megatron/core/__init__.py index 2858dc692d..b4165eb23d 100644 --- a/megatron/core/__init__.py +++ b/megatron/core/__init__.py @@ -4,6 +4,7 @@ from megatron.core.distributed import DistributedDataParallel from megatron.core.inference_params import InferenceParams from megatron.core.model_parallel_config import ModelParallelConfig +from megatron.core.timers import Timers # Alias parallel_state as mpu, its legacy name mpu = parallel_state @@ -15,4 +16,5 @@ "DistributedDataParallel", "InferenceParams", "ModelParallelConfig", + "Timers", ] diff --git a/megatron/core/datasets/bert_dataset.py b/megatron/core/datasets/bert_dataset.py new file mode 100644 index 0000000000..1168ca239a --- /dev/null +++ b/megatron/core/datasets/bert_dataset.py @@ -0,0 +1,207 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from dataclasses import dataclass +from typing import Dict, List, Optional, Union + +import numpy + +from megatron.core.datasets.indexed_dataset import MMapIndexedDataset +from megatron.core.datasets.masked_dataset import ( + MaskedWordPieceDataset, + MaskedWordPieceDatasetConfig, +) +from megatron.core.datasets.utils import Split + + +@dataclass +class BERTMaskedWordPieceDatasetConfig(MaskedWordPieceDatasetConfig): + """Configuration object for Megatron Core BERT WordPiece datasets + + Attributes: + classification_head (bool): Option to perform the next sequence prediction during + sampling + """ + + classification_head: bool = None + + def __post_init__(self) -> None: + """Do asserts and set fields post init + """ + super().__post_init__() + + assert self.classification_head is not None + + +class BERTMaskedWordPieceDataset(MaskedWordPieceDataset): + """The BERT dataset that assumes WordPiece tokenization + + Args: + indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the + MegatronDataset + + dataset_path (str): The real path on disk to the dataset, for bookkeeping + + indexed_indices (numpy.ndarray): The set of the documents indices to expose + + num_samples (int): The number of samples to draw from the indexed dataset + + index_split (Split): The indexed_indices Split + + config (BERTMaskedWordPieceDatasetConfig): The config + """ + + def __init__( + self, + indexed_dataset: MMapIndexedDataset, + dataset_path: str, + indexed_indices: numpy.ndarray, + num_samples: int, + index_split: Split, + config: BERTMaskedWordPieceDatasetConfig, + ) -> None: + super().__init__( + indexed_dataset, dataset_path, indexed_indices, num_samples, index_split, config + ) + + def _finalize(self) -> None: + """Abstract method implementation + """ + self.token_lookup = list(self.config.tokenizer.inv_vocab.keys()) + # Account for the single and two token ids + self.sample_index = self._build_sample_index( + self.config.sequence_length - 3, 2 if self.config.classification_head else 1 + ) + + @staticmethod + def _key_config_attributes() -> List[str]: + """Inherited method implementation + + Returns: + List[str]: The key config attributes + """ + return super( + BERTMaskedWordPieceDataset, BERTMaskedWordPieceDataset + )._key_config_attributes() + ["classification_head",] + + def __getitem__(self, idx: int) -> Dict[str, Union[int, numpy.ndarray]]: + """Abstract method implementation + + Args: + idx (int): The index into the dataset + + Returns: + Dict[str, Union[int, numpy.ndarray]]: The + """ + idx_beg, idx_end, target_sequence_length = self.sample_index[idx] + sample = [self.dataset[i] for i in range(idx_beg, idx_end)] + numpy_random_state = numpy.random.RandomState( + seed=(self.config.random_seed + idx) % 2 ** 32 + ) + + assert target_sequence_length <= self.config.sequence_length + + # Split the sample into contiguous subsegments A and B + pivot = len(sample) + is_next_random = False + if self.config.classification_head: + assert len(sample) > 1, "the sample must contain at least two sentences" + pivot = 1 + if len(sample) >= 3: + pivot = numpy_random_state.randint(low=1, high=len(sample)) + is_next_random = numpy_random_state.random() < 0.5 + split_A = [] + for sample_a in sample[:pivot]: + split_A.extend(sample_a) + split_B = [] + for sample_b in sample[pivot:]: + split_B.extend(sample_b) + if is_next_random: + split_A, split_B = split_B, split_A + + # Trim the subsegments from either end to a desired joint length + length_A = len(split_A) + length_B = len(split_B) + if length_A + length_B <= target_sequence_length: + truncated = False + else: + while length_A + length_B > target_sequence_length: + split = split_A if length_A > length_B else split_B + if numpy_random_state.random() < 0.5: + del split[0] + else: + del split[-1] + length_A = len(split_A) + length_B = len(split_B) + truncated = True + + # Merge the subsegments and create the token assignment labels + tokens = [ + self.config.tokenizer.cls, + *split_A, + self.config.tokenizer.sep, + ] + assignments = [0 for _ in range(1 + len(split_A) + 1)] + if split_B: + tokens += [*split_B, self.config.tokenizer.sep] + assignments += [1 for _ in range(len(split_B) + 1)] + + # Masking + tokens, masked_positions, masked_labels, _, _ = self._create_masked_lm_predictions( + tokens, target_sequence_length, numpy_random_state + ) + + # Pad the sequences and convert to NumPy + length_toks = len(tokens) + length_pads = self.config.sequence_length - length_toks + assert length_pads >= 0 + + tokens = numpy.array(tokens, dtype=numpy.int64) + tokens = numpy.pad(tokens, (0, length_pads), constant_values=self.config.tokenizer.pad) + + assignments = numpy.array(assignments, dtype=numpy.int64) + assignments = numpy.pad( + assignments, (0, length_pads), constant_values=self.config.tokenizer.pad + ) + + # Get the padding mask + mask_pads = numpy.ones(length_toks, dtype=numpy.int64) + mask_pads = numpy.pad( + mask_pads, (0, length_pads), constant_values=self.config.tokenizer.pad + ) + + # Mask the labels + labels = numpy.zeros(self.config.sequence_length, dtype=numpy.int64) - 1 + labels[masked_positions] = masked_labels + + # Get the loss mask + mask_loss = numpy.zeros(self.config.sequence_length, dtype=numpy.int64) + mask_loss[masked_positions] = 1 + + return { + "text": tokens, + "types": assignments, + "labels": labels, + "is_random": int(is_next_random), + "padding_mask": mask_pads, + "loss_mask": mask_loss, + "truncated": int(truncated), + } + + def _get_token_mask(self, numpy_random_state: numpy.random.RandomState) -> Optional[int]: + """Abstract method implementation + + 80% of the time, replace the token id with mask token id. 10% of the time, replace token id + with a random token id from the vocabulary. 10% of the time, do nothing. + + Args: + numpy_random_state (RandomState): The NumPy random state + + Returns: + Optional[int]: The replacement token id or None + """ + if numpy_random_state.random() < 0.8: + return self.config.tokenizer.mask + else: + if numpy_random_state.random() >= 0.5: + return self.token_lookup[numpy_random_state.randint(0, len(self.token_lookup))] + return None diff --git a/megatron/core/datasets/blended_dataset.py b/megatron/core/datasets/blended_dataset.py index 421d193c3b..a21fe02202 100644 --- a/megatron/core/datasets/blended_dataset.py +++ b/megatron/core/datasets/blended_dataset.py @@ -30,7 +30,7 @@ class BlendedDataset(torch.utils.data.Dataset): size (int): The number of samples to draw from the blend - config (BlendedMegatronDatasetConfig): The config object which informs dataset creation + config (BlendedMegatronDatasetConfig): The config Raises: RuntimeError: When the dataset has fewer or more samples than 'size' post-initialization @@ -68,7 +68,9 @@ def __init__( unique_identifiers["weights"] = self.weights unique_identifiers["size"] = self.size - self.unique_description = json.dumps(unique_identifiers, indent=4) + self.unique_description = json.dumps( + unique_identifiers, indent=4, default=lambda obj: obj.unique_identifiers + ) self.unique_description_hash = hashlib.md5( self.unique_description.encode("utf-8") ).hexdigest() diff --git a/megatron/core/datasets/blended_megatron_dataset_builder.py b/megatron/core/datasets/blended_megatron_dataset_builder.py index c5c509ea7c..af540fd7fb 100644 --- a/megatron/core/datasets/blended_megatron_dataset_builder.py +++ b/megatron/core/datasets/blended_megatron_dataset_builder.py @@ -2,21 +2,24 @@ import logging import math -from typing import Any, Callable, List, Optional, Tuple, Type, Union +from typing import Any, Callable, Iterable, List, Optional, Tuple, Type, Union import numpy import torch from megatron.core.datasets.blended_dataset import BlendedDataset from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig -from megatron.core.datasets.indexed_dataset import MMapIndexedDataset -from megatron.core.datasets.megatron_dataset import MegatronDataset +from megatron.core.datasets.megatron_dataset import LowLevelDataset, MegatronDataset, MockDataset from megatron.core.datasets.utils import Split, normalize logger = logging.getLogger(__name__) +MidLevelDataset = Union[MegatronDataset, MockDataset] + +TopLevelDataset = Union[BlendedDataset, MidLevelDataset] + DistributedDataset = Union[ - BlendedDataset, MegatronDataset, MMapIndexedDataset, torch.utils.data.Dataset + TopLevelDataset, MidLevelDataset, LowLevelDataset, torch.utils.data.Dataset ] @@ -33,13 +36,15 @@ class BlendedMegatronDatasetBuilder(object): """ def __init__( - self, cls: Type[MegatronDataset], sizes: List[int], config: BlendedMegatronDatasetConfig, + self, cls: Type[MidLevelDataset], sizes: List[int], config: BlendedMegatronDatasetConfig, ): self.cls = cls self.sizes = sizes self.config = config - def build(self) -> List[Optional[Union[BlendedDataset, MegatronDataset]]]: + assert not self.config.mock or issubclass(self.cls, MockDataset) + + def build(self) -> List[Optional[TopLevelDataset]]: """Build all dataset splits according to the provided blend(s) This method is distributed-aware and must be called on all ranks. @@ -50,24 +55,28 @@ def build(self) -> List[Optional[Union[BlendedDataset, MegatronDataset]]]: splits from separate distributions. Returns: - List[Optional[Union[BlendedDataset, MegatronDataset]]]: A list of either - MegatronDataset or BlendedDataset (or None) per split + List[Optional[TopLevelDataset]]: A list containing a dataset instance (or None) per + split """ return self._build_blended_dataset_splits() - def _build_blended_dataset_splits( - self, - ) -> List[Optional[Union[BlendedDataset, MegatronDataset]]]: + def _build_blended_dataset_splits(self,) -> List[Optional[TopLevelDataset]]: """Build all dataset splits according to the provided blend(s) See the BlendedMegatronDatasetBuilder.build alias for more information. Returns: - List[Optional[Union[BlendedDataset, MegatronDataset]]]: A list of either - MegatronDataset or BlendedDataset (or None) per split + List[Optional[TopLevelDataset]]: A list containing a dataset instance (or None) per + split """ - if self.config.blend: + # Return fake "mock" datasets + if self.config.mock: + + return self._build_megatron_dataset_splits(None, None, self.sizes) + + # All splits come from the same distribution + elif self.config.blend: blend = self.config.blend split = self.config.split_matrix @@ -117,6 +126,7 @@ def _build_blended_dataset_splits( return blended_datasets + # Each split comes from a separate distribution else: blended_datasets = [] for i in range(len(Split)): @@ -170,30 +180,33 @@ def _build_blended_dataset_splits( return blended_datasets def _build_megatron_dataset_splits( - self, path_prefix: str, split: List[float], sizes: List[int], - ) -> List[Optional[MegatronDataset]]: - """Build each MegatronDataset split from a single MMapIndexedDataset + self, dataset_path: Optional[str], split: List[float], sizes: List[int], + ) -> List[Optional[MidLevelDataset]]: + """Build each MidLevelDataset split from a single LowLevelDataset Args: - path_prefix (str): The MMapIndexedDataset .bin and .idx file prefix + dataset_path (Optional[str]): The path on disk which defines the underlying + LowLevelDataset, e.g. the .bin and .idx file prefix when self.cls is of type + IndexedMegatronDataset or None when self.cls is of type MockDataset split (List[Tuple[float, float]]): The dataset split matrix sizes (List[int]): The number of total samples to draw from each split Returns: - List[Optional[MegatronDataset]]: The MegatronDatset (or None) per split + List[Optional[MidLevelDataset]]: The MidLevelDataset (or None) per split """ - indexed_dataset = self.build_generic_dataset( - MMapIndexedDataset, self.config.is_built_on_rank, path_prefix, self.cls.is_multimodal(), - ) - - if indexed_dataset is not None: - if self.cls.is_split_by_sequence(): - num_elements = indexed_dataset.sequence_lengths.shape[0] - else: - num_elements = indexed_dataset.document_indices.shape[0] - 1 + # Build the low level dataset + if issubclass(self.cls, MockDataset): + low_level_dataset = None + elif issubclass(self.cls, MegatronDataset): + low_level_dataset = self.cls.build_low_level_dataset(dataset_path, self.config) + else: + raise NotImplementedError + # Build the split indices for the low level dataset + if low_level_dataset is not None: + num_elements = self.cls.numel_low_level_dataset(low_level_dataset) split_indices = [] for i, _ in enumerate(Split): if split[i] is not None: @@ -207,16 +220,18 @@ def _build_megatron_dataset_splits( else: split_indices = [None for _ in Split] - megatron_datasets = [] + # Build the mid level dataset + mid_level_datasets = [] for i, _split in enumerate(Split): - if split[i] is None: - megatron_datasets.append(None) + if not self.config.mock and (split[i] is None or sizes[i] == 0): + mid_level_datasets.append(None) else: - megatron_datasets.append( + mid_level_datasets.append( self.build_generic_dataset( self.cls, self.config.is_built_on_rank, - indexed_dataset, + low_level_dataset, + dataset_path, split_indices[i], sizes[i], _split, @@ -224,19 +239,21 @@ def _build_megatron_dataset_splits( ) ) - return megatron_datasets + return mid_level_datasets @staticmethod def build_generic_dataset( - cls: Type[DistributedDataset], is_built_on_rank: Callable, *args: Any - ) -> Optional[DistributedDataset]: + cls: Union[Type[DistributedDataset], Callable], is_built_on_rank: Callable, *args: Any + ) -> Optional[Union[DistributedDataset, Iterable]]: """Build the DistributedDataset - Return None if and only if the underlying MegatronDataset class is not built on the current - rank and torch.distributed is initialized. + Return None if and only if the underlying dataset class is not built on the current rank + and torch.distributed is initialized. Args: - cls (Type[DistributedDataset]): The DistributedDataset class to be built + cls (Union[Type[DistributedDataset], Callable]): The DistributedDataset class to be + built. In special cases, e.g. when we are building the low level dataset for a + RawMegatronDataset instance, we can accept a Callable which returns an Iterable. args (Tuple[Any]): The positional arguments used to build the provided DistributedDataset class @@ -245,7 +262,8 @@ def build_generic_dataset( Exception: When the dataset constructor raises an OSError Returns: - Optional[DistributedDataset]: The DistributedDataset instantion or None + Optional[Union[DistributedDataset, Iterable]]: The DistributedDataset instantion, the + Iterable instantiation, or None """ if torch.distributed.is_initialized(): rank = torch.distributed.get_rank() diff --git a/megatron/core/datasets/blended_megatron_dataset_config.py b/megatron/core/datasets/blended_megatron_dataset_config.py index 9f8344e791..60ecdf190b 100644 --- a/megatron/core/datasets/blended_megatron_dataset_config.py +++ b/megatron/core/datasets/blended_megatron_dataset_config.py @@ -8,6 +8,7 @@ import torch +from megatron.core.datasets.megatron_tokenizer import MegatronTokenizer from megatron.core.datasets.utils import Split, log_single_rank, normalize from megatron.core.parallel_state import get_virtual_pipeline_model_parallel_rank @@ -16,8 +17,8 @@ @dataclass class BlendedMegatronDatasetConfig: - """Configuration object for megatron-core blended and megatron datasets - + """Configuration object for Megatron Core datasets + Attributes: is_built_on_rank (Callable): A callable which returns True if the dataset should be built on the current rank. It should be Megatron Core parallelism aware i.e. global rank, group @@ -46,6 +47,12 @@ class BlendedMegatronDatasetConfig: passed in to the constructor. path_to_cache (str): Where all re-useable dataset indices are to be cached. + + mock (bool): Whether to bypass real data loading and validation in favor of mock data + generation. + + tokenizer (Optional[MegatronTokenizer]): The MegatronTokenizer instance or None. Required + for datasets which do online tokenization. """ is_built_on_rank: Callable @@ -62,9 +69,15 @@ class BlendedMegatronDatasetConfig: split_matrix: Optional[List[Tuple[float, float]]] = field(init=False, default=None) - path_to_cache: str = None + path_to_cache: Optional[str] = None + + mock: bool = False - def __post_init__(self): + tokenizer: Optional[MegatronTokenizer] = None + + def __post_init__(self) -> None: + """Do asserts and set fields post init + """ if torch.distributed.is_initialized(): gb_rank = torch.distributed.get_rank() vp_rank = get_virtual_pipeline_model_parallel_rank() @@ -73,20 +86,23 @@ def __post_init__(self): self.is_built_on_rank() ), "is_built_on_rank must return True when global rank = 0 and vp rank = 0" - if self.blend_per_split is not None and any(self.blend_per_split): - assert self.blend is None, "blend and blend_per_split are incompatible" - assert len(self.blend_per_split) == len( - Split - ), f"blend_per_split must contain {len(Split)} blends" - if self.split is not None: - self.split = None - log_single_rank(logger, logging.WARNING, f"Let split = {self.split}") - else: - assert self.blend is not None, "one of either blend or blend_per_split must be provided" - assert self.split is not None, "both blend and split must be provided" - split_vector = parse_and_normalize_split(self.split) - self.split_matrix = convert_split_vector_to_split_matrix(split_vector) - log_single_rank(logger, logging.INFO, f"Let split_matrix = {self.split_matrix}") + log_single_rank(logger, logging.INFO, f"mock = {self.mock}") + + if not self.mock: + if self.blend_per_split is not None and any(self.blend_per_split): + assert self.blend is None, "blend and blend_per_split are incompatible" + assert self.split is None, "split and blend_per_split are incompatible" + assert len(self.blend_per_split) == len( + Split + ), f"blend_per_split must contain {len(Split)} blends" + else: + assert ( + self.blend is not None + ), "one of either blend or blend_per_split must be provided" + assert self.split is not None, "both blend and split must be provided" + split_vector = parse_and_normalize_split(self.split) + self.split_matrix = convert_split_vector_to_split_matrix(split_vector) + log_single_rank(logger, logging.INFO, f"Let split_matrix = {self.split_matrix}") def parse_and_normalize_split(split: str) -> List[float]: diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index c52fe3abfc..4e31d8037f 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -1,7 +1,8 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. import logging import os +import sys import time from dataclasses import dataclass from typing import Dict, Tuple @@ -9,10 +10,12 @@ import numpy import torch +from megatron import get_args, get_tokenizer from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig from megatron.core.datasets.indexed_dataset import MMapIndexedDataset -from megatron.core.datasets.megatron_dataset import MegatronDataset +from megatron.core.datasets.megatron_dataset import MegatronDataset, MockDataset from megatron.core.datasets.utils import Split, log_single_rank +from megatron.tokenizer.tokenizer import FIM_MIDDLE, FIM_PAD, FIM_PREFIX, FIM_SUFFIX logger = logging.getLogger(__name__) @@ -20,9 +23,84 @@ @dataclass class GPTDatasetConfig(BlendedMegatronDatasetConfig): """Configuration object for Megatron Core GPT datasets + + Attributes: + reset_position_ids (bool): Option to reset the position IDs in the dataset at an interval + + reset_attention_mask (bool): Option to reset the attention mask from the dataset + + eod_mask_loss (bool): Option to enable the EOD mask loss + + vocab_size (int): Size of vocabulary + """ - pass + reset_position_ids: bool = None + + reset_attention_mask: bool = None + + eod_mask_loss: bool = None + + vocab_size: int = sys.maxsize + + def __post_init__(self) -> None: + """Do asserts and set fields post init + """ + super().__post_init__() + + assert self.tokenizer is not None + + assert self.reset_position_ids is not None + assert self.reset_attention_mask is not None + assert self.eod_mask_loss is not None + + +class MockGPTDataset(MockDataset): + """The mock GPT dataset + """ + + def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: + """Return a sequence_length + 1 token sequence consisting of the following: + - (1) S, the RNG length-sentinel in the range [0, sequence_length) + - (S) tokens + - (1) end of document token + - (sequence_length - S - 1) padding tokens + + Args: + idx (int): The integer seed for mock data generation + + Returns: + Dict[str, numpy.ndarray]: The mock data + """ + tok = 1 + pad = 2 + eod = 0 + + rng = numpy.random.default_rng(seed=[self.split.value, idx]) + length = rng.integers(low=0, high=self.config.sequence_length) + sample_toks = numpy.zeros(length) + tok + sample_pads = numpy.zeros(self.config.sequence_length - length - 1) + pad + sample = numpy.int64(numpy.concatenate([[length], sample_toks, [eod], sample_pads])) + + text = torch.from_numpy(sample).long() + labels = text[1:].contiguous() + tokens = text[:-1].contiguous() + + attention_mask, loss_mask, position_ids = _get_ltor_masks_and_position_ids( + tokens, + eod, + self.config.reset_position_ids, + self.config.reset_attention_mask, + self.config.eod_mask_loss, + ) + + return { + "tokens": tokens, + "labels": labels, + "attention_mask": attention_mask, + "loss_mask": loss_mask, + "position_ids": position_ids, + } class GPTDataset(MegatronDataset): @@ -32,75 +110,129 @@ class GPTDataset(MegatronDataset): indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the MegatronDataset + dataset_path (str): The real path on disk to the dataset, for bookkeeping + indexed_indices (numpy.ndarray): The set of the documents indices to expose num_samples (int): The number of samples to draw from the indexed dataset index_split (Split): The indexed_indices Split - config (GPTDatasetConfig): The GPT-specific container for all config sourced parameters + config (GPTDatasetConfig): The config """ def __init__( self, indexed_dataset: MMapIndexedDataset, + dataset_path: str, indexed_indices: numpy.ndarray, num_samples: int, index_split: Split, config: GPTDatasetConfig, ) -> None: - super().__init__(indexed_dataset, indexed_indices, num_samples, index_split, config) + super().__init__( + indexed_dataset, dataset_path, indexed_indices, num_samples, index_split, config + ) + self.args = get_args() + self.tokenizer = get_tokenizer() + self.np_rng = numpy.random.RandomState(seed=self.config.random_seed) # rng state for FIM + + self.use_fim = self.args.fim_rate!=0 + if self.use_fim: + self.fim_rate = self.args.fim_rate + self.fim_spm_rate = self.args.fim_spm_rate + self.fragment_fim_rate = self.args.fragment_fim_rate + self.fim_split_sample = self.tokenizer.vocab[self.args.fim_split_sample] if self.args.fim_split_sample is not None else None + + try: + self.suffix_tok_id, self.prefix_tok_id, self.middle_tok_id, self.pad_tok_id = (self.tokenizer.special_tokens[tok] for tok in [FIM_SUFFIX, FIM_PREFIX, FIM_MIDDLE, FIM_PAD]) + except KeyError: + self.suffix_tok_id, self.prefix_tok_id, self.middle_tok_id, self.pad_tok_id = (self.tokenizer.vocab[tok] for tok in [FIM_SUFFIX, FIM_PREFIX, FIM_MIDDLE, FIM_PAD]) + + self.vocab_size = config.vocab_size def _finalize(self) -> None: """Abstract method implementation Load or build/cache the document, sample, and shuffle indices """ - assert isinstance(self.config, GPTDatasetConfig) - ( self.document_index, self.sample_index, self.shuffle_index, ) = self._build_document_sample_shuffle_indices() - def __len__(self) -> int: + @staticmethod + def numel_low_level_dataset(low_level_dataset: MMapIndexedDataset) -> int: """Abstract method implementation + For GPT, the underlying MMapIndexedDataset should be split by sequence, as opposed to, say, + BERT, which should be split by document + + Args: + low_level_dataset (MMapIndexedDataset): The underlying MMapIndexedDataset + Returns: - int: The length of the dataset + int: The number of unique elements in the underlying MMapIndexedDataset """ - return self.sample_index.shape[0] - 1 + return low_level_dataset.sequence_lengths.shape[0] - def __getitem__(self, idx: int) -> Dict[str, numpy.ndarray]: + @staticmethod + def build_low_level_dataset(dataset_path: str, config: GPTDatasetConfig) -> MMapIndexedDataset: """Abstract method implementation Args: - idx (int): The index into the dataset + dataset_path (str): The real path prefix to the MMapIndexedDataset .bin and .idx files + + config (BlendedMegatronDatasetConfig): The dataset config Returns: - Dict[str, numpy.ndarray]: The text ids wrapped in a dictionary + MMapIndexedDataset: The underlying MMapIndexedDataset """ - text, _ = self._query_document_sample_shuffle_indices(idx) - return {"text": text} + return MMapIndexedDataset(dataset_path, False) - @staticmethod - def is_multimodal() -> bool: + def __len__(self) -> int: """Abstract method implementation Returns: - bool: False + int: The length of the dataset """ - return False + return self.sample_index.shape[0] - 1 - @staticmethod - def is_split_by_sequence() -> bool: + def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: """Abstract method implementation + Args: + idx (int): The index into the dataset + Returns: - bool: True + Dict[str, torch.Tensor]: The text ids wrapped in a dictionary """ - return True + text, _ = self._query_document_sample_shuffle_indices(idx) + + text = torch.from_numpy(text).long() + labels = text[1:].contiguous() + tokens = text[:-1].contiguous() + + assert not torch.any( + tokens >= self.vocab_size + ), "An input token is out of bounds of the tokenizer vocabulary" + + attention_mask, loss_mask, position_ids = _get_ltor_masks_and_position_ids( + tokens, + self.config.tokenizer.eod, + self.config.reset_position_ids, + self.config.reset_attention_mask, + self.config.eod_mask_loss, + ) + + return { + "tokens": tokens, + "labels": labels, + "attention_mask": attention_mask, + "loss_mask": loss_mask, + "position_ids": position_ids, + } def _query_document_sample_shuffle_indices( self, idx: int @@ -130,7 +262,7 @@ def _query_document_sample_shuffle_indices( # Add the entire sample sample_parts.append( - self.indexed_dataset.get( + self.dataset.get( self.document_index[doc_index_beg], offset=doc_index_beg_offset, length=doc_index_end_offset - doc_index_beg_offset + 1, @@ -147,11 +279,104 @@ def _query_document_sample_shuffle_indices( offset = 0 if i > doc_index_beg else doc_index_beg_offset length = None if i < doc_index_end else doc_index_end_offset + 1 sample_parts.append( - self.indexed_dataset.get(self.document_index[i], offset=offset, length=length) + self.dataset.get(self.document_index[i], offset=offset, length=length) ) + sample=numpy.concatenate(sample_parts) + + # Code from: https://github.com/EleutherAI/gpt-neox/blob/FIM-clean/megatron/data/gpt2_dataset.py#L109 + # TODO(Hailey): can merge the code below this line with code above this line. + # TODO(Hailey), cont: above already iterates through loop, so just add the permuting in there? + sample = numpy.array(sample, dtype=numpy.int64) + sample_len = sample.shape[0] + # # print(sample, sample.shape) + # # do FIM here, if enabled + # TODO: Do we handle the following point from FIM paper? + # To transform data in the character space for context-level FIM, the tokenized documents have to be decoded back into strings before FIM augmentation. Depending on the vocabulary, some care has to be given to ensure decoding does not introduce any spurious characters into training. For example, utf-8 characters are encoded as multiple tokens with a BPE vocabulary; they can result in fragments from chunking and fail to decode. To prevent unforeseen errors midway through training, we encourage checking for these fragments at the beginning or end of a context and removing them. + eod = self.tokenizer.eod + segment_breaks = numpy.argwhere(sample == eod) # split sample by document + + if not self.use_fim: + return ( + numpy.array(sample, dtype=numpy.int64), + numpy.array(document_ids, dtype=numpy.int64), + ) + + def fim_permute_sequence(sequence, rate): + return permute( + sequence, + self.np_rng, + rate, + self.fim_spm_rate, + self.tokenizer, + truncate_or_pad=False, + suffix_tok_id=self.suffix_tok_id, + prefix_tok_id=self.prefix_tok_id, + middle_tok_id=self.middle_tok_id, + pad_tok_id=self.pad_tok_id, + ) + + def fim_split_and_permute_sequence(sequence): + """ + If self.fim_split_sample is not None, split the sequence. + Then apply FIM on the fragments, or the whole sequence if self.fim_split_sample is None. + """ + if self.fim_split_sample is None: + return fim_permute_sequence(sequence, self.fim_rate) + # fim_split_sample is set: split the sample on this token and permute each fragment separately. + # Typically, if each sample is a repository, then we split again on the file level. + # Each fragment is a file, and we permute the files. + fragment_breaks = numpy.argwhere(sequence == self.fim_split_sample) + if fragment_breaks.shape == (0, 1): + # no split token in this sample + return fim_permute_sequence(sequence, self.fim_rate) + if not self.np_rng.binomial(1, self.fim_rate): + # don't do FIM preproc + return sequence + # Do FIM on each fragment + curr_start_position = 0 + new_samples = [] + for loc in numpy.nditer(fragment_breaks): + if loc - curr_start_position > 0: + permuted = fim_permute_sequence(sequence[curr_start_position:loc], self.fragment_fim_rate) + new_samples += [permuted, [self.fim_split_sample]] + curr_start_position = loc + 1 # Jump over the split token + # Permute the segment after the last split token + permuted = fim_permute_sequence(sequence[curr_start_position:], self.fragment_fim_rate) + new_samples.append(permuted) + return numpy.concatenate(new_samples) + + if segment_breaks.shape != (0, 1): # then there is an EOD token in this example + curr_start_position = 0 + new_samples = [] + for loc in numpy.nditer(segment_breaks): + # Only permute non-empty segments. + if loc - curr_start_position > 0: + # permute {prefix, suffix, middle} or {suffix, prefix, middle} + permuted = fim_split_and_permute_sequence(sample[curr_start_position:loc]) + new_samples += [permuted, [eod]] + + curr_start_position = loc + 1 # jump over the EOD token + # Permute the segment after the last EOD + permuted = fim_split_and_permute_sequence(sample[curr_start_position:]) + new_samples.append(permuted) + + sample = numpy.concatenate(new_samples) + else: + sample = fim_split_and_permute_sequence(sample) + + # Truncate or pad sequence to max-length + diff = sample.shape[0] - sample_len + if diff > 0: # too long + sample = sample[:sample_len] + elif diff < 0: # too short + sample = numpy.concatenate([sample, numpy.full((-1 * diff), self.pad_tok_id)]) + + assert sample.shape[0] == sample_len + # end FIM-specific code + return ( - numpy.array(numpy.concatenate(sample_parts), dtype=numpy.int64), + numpy.array(sample, dtype=numpy.int64), numpy.array(document_ids, dtype=numpy.int64), ) @@ -181,7 +406,7 @@ def _build_document_sample_shuffle_indices( path_to_cache = self.config.path_to_cache if path_to_cache is None: path_to_cache = os.path.join( - self.indexed_dataset.path_prefix, "cache", f"{type(self).__name__}_indices" + self.dataset.path_prefix, "cache", f"{type(self).__name__}_indices" ) get_path_to = lambda suffix: os.path.join( @@ -267,7 +492,7 @@ def _build_document_sample_shuffle_indices( ) t_beg = time.time() document_index = _build_document_index( - self.indexed_indices, num_epochs, numpy_random_state, separate_final_epoch + self.indices, num_epochs, numpy_random_state, separate_final_epoch ) numpy.save(path_to_document_index, document_index, allow_pickle=True) t_end = time.time() @@ -283,9 +508,9 @@ def _build_document_sample_shuffle_indices( from megatron.core.datasets import helpers assert document_index.dtype == numpy.int32 - assert self.indexed_dataset.sequence_lengths.dtype == numpy.int32 + assert self.dataset.sequence_lengths.dtype == numpy.int32 sample_index = helpers.build_sample_idx( - self.indexed_dataset.sequence_lengths, + self.dataset.sequence_lengths, document_index, sequence_length, num_epochs, @@ -368,7 +593,7 @@ def _get_num_tokens_per_epoch(self) -> int: Returns: int: The number of tokens in a single epoch """ - return int(numpy.sum(self.indexed_dataset.sequence_lengths[self.indexed_indices])) + return int(numpy.sum(self.dataset.sequence_lengths[self.indices])) def _get_num_epochs(self, num_tokens_per_epoch: int) -> int: """Calculate the number of epochs @@ -456,3 +681,138 @@ def _build_shuffle_index( numpy_random_state.shuffle(shuffle_idx_last) return numpy.concatenate((shuffle_idx_first, shuffle_idx_last)) + + +# From https://github.com/EleutherAI/gpt-neox/blob/FIM-clean/megatron/data/gpt2_dataset.py#L339 +def permute(sample, np_rng, fim_rate, fim_spm_rate, tokenizer, truncate_or_pad=True, + suffix_tok_id=None, prefix_tok_id=None, middle_tok_id=None, pad_tok_id=None): + """ + Take in a sample (np array w/ size (0,chunklength)) and perform a FIM transformation on it. + Maintain the same sample length (if transform creates a few extra tokens, drop them). + """ + if np_rng.binomial(1, fim_rate): # sample bernoulli dist + + contents = tokenizer.detokenize(sample) + + try: + # A boundary can be =0 (prefix will be empty) + # a boundary can be =len(contents) (suffix will be empty) + # The two boundaries can be equal (middle will be empty) + boundaries = list(np_rng.randint(low=0, high=len(contents) + 1, size=2)) + boundaries.sort() + except ValueError as e: + print(len(contents), contents) + print(e) + raise e + + prefix = contents[:boundaries[0]] + middle = contents[boundaries[0]:boundaries[1]] + suffix = contents[boundaries[1]:] + + prefix = numpy.array([*tokenizer.tokenize(prefix)], dtype=numpy.int64) + middle = numpy.array([*tokenizer.tokenize(middle)], dtype=numpy.int64) + suffix = numpy.array([*tokenizer.tokenize(suffix)], dtype=numpy.int64) + + # here we truncate each given segment to fit the same length as it was before + # A consequence is that we never reach the end of a file? + # we should rather truncate at the context-level + if truncate_or_pad: + # need to make same length as the input. Take the 3 sentinel tokens into account + new_length = suffix.shape[0] + prefix.shape[0] + middle.shape[0] + 3 + diff = new_length - sample.shape[0] + if diff > 0: # too long + if suffix.shape[0] <= diff: # if there's no space to truncate the suffix: stop and report it. atm i should have stopped this from happening + return sample, np_rng + suffix = suffix[:suffix.shape[0] - diff] + elif diff < 0: # too short + suffix = numpy.concatenate([suffix, numpy.full((-1 * diff), pad_tok_id)]) + + if np_rng.binomial(1, fim_spm_rate): + # SPM (variant 2 from FIM paper) + new_sample = numpy.concatenate([ + [prefix_tok_id, suffix_tok_id], suffix, + [middle_tok_id], prefix, middle + ]) + else: + # PSM + new_sample = numpy.concatenate([ + [prefix_tok_id], prefix, + [suffix_tok_id], suffix, + [middle_tok_id], middle + ]) + + else: + # don't do FIM preproc + new_sample = sample + + return new_sample + + +def _get_ltor_masks_and_position_ids( + data: torch.Tensor, + eod_token: int, + reset_position_ids: bool, + reset_attention_mask: bool, + eod_mask_loss: bool, +): + """Build masks and position id for left to right model. + + Args: + data (torch.Tensor): The data tenor that holds the tokens from the dataset + + eod_token (int): ID of the token to that is considered the EOD + + reset_position_ids (bool): Switch to reset the document position ID's + + reset_attention_mask (bool): Switch to reset the attention mask + + eod_mask_loss (bool): Switch to enable the EOD mask loss + + Returns: + torch.Tensor : Attention mask needed to be used for Attention + + torch.Tensor : The mask used for loss value during training + + torch.Tensor : The position ID's of the token + """ + seq_length = data.numel() + + attention_mask = torch.tril(torch.ones((seq_length, seq_length), device=data.device)).unsqueeze( + 0 + ) + + # Loss mask. + loss_mask = torch.ones(seq_length, dtype=torch.float, device=data.device) + if eod_mask_loss: + loss_mask[data == eod_token] = 0.0 + + # Position ids. + position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device) + # We need to clone as the ids will be modifed based on batch index. + if reset_position_ids: + position_ids = position_ids.clone() + + if reset_position_ids or reset_attention_mask: + # Find indices where EOD token is. + eod_index = position_ids[data == eod_token] + # Detach indices from positions if going to modify positions. + if reset_position_ids: + eod_index = eod_index.clone() + + # Loop through EOD indices: + prev_index = 0 + for j in range(eod_index.numel()): + i = eod_index[j] + # Mask attention loss. + if reset_attention_mask: + attention_mask[0, (i + 1) :, : (i + 1)] = 0 + # Reset positions. + if reset_position_ids: + position_ids[(i + 1) :] -= i + 1 - prev_index + prev_index = i + 1 + + # Convert attention mask to binary: + attention_mask = attention_mask < 0.5 + + return attention_mask, loss_mask, position_ids + diff --git a/megatron/core/datasets/masked_dataset.py b/megatron/core/datasets/masked_dataset.py new file mode 100644 index 0000000000..03c922b9d5 --- /dev/null +++ b/megatron/core/datasets/masked_dataset.py @@ -0,0 +1,430 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import logging +import os +import time +from abc import abstractmethod +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy +import torch + +from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig +from megatron.core.datasets.indexed_dataset import MMapIndexedDataset +from megatron.core.datasets.megatron_dataset import MegatronDataset +from megatron.core.datasets.utils import Split, log_single_rank + +logger = logging.getLogger(__name__) + + +@dataclass +class MaskedWordPieceDatasetConfig(BlendedMegatronDatasetConfig): + """Configuration object for Megatron Core Masked WordPiece datasets + + Attributes: + masking_probability (float): The probability we mask a candidate N-gram + + short_sequence_probability (float): The probability we return a sequence shorter than the + target sequence length + + masking_max_ngram (int): The maximum length N-gram to consider masking or permuting + + masking_do_full_word (bool): Whether we mask the the whole word or its component parts + + masking_do_permutation (bool): Whether we shuffle a subset of candidate N-grams in addition + to masking + + masking_use_longer_ngrams (bool): Wehther to favor longer N-grams over shorter N-grams + + masking_use_geometric_distribution (bool): Whether to draw the size of the N-gram from a + geometric distribution according to SpanBERT https://arxiv.org/abs/1907.10529 (Section 3.1) + """ + + masking_probability: float = None + + short_sequence_probability: float = None + + masking_max_ngram: int = None + + masking_do_full_word: bool = None + + masking_do_permutation: bool = None + + masking_use_longer_ngrams: bool = None + + masking_use_geometric_distribution: bool = None + + def __post_init__(self) -> None: + """Do asserts and set fields post init + """ + super().__post_init__() + + assert self.tokenizer is not None + + assert self.masking_probability is not None + assert self.short_sequence_probability is not None + assert self.masking_max_ngram is not None + assert self.masking_do_full_word is not None + assert self.masking_do_permutation is not None + assert self.masking_use_longer_ngrams is not None + assert self.masking_use_geometric_distribution is not None + + assert self.masking_probability > 0 and self.masking_probability < 1.0 + assert self.short_sequence_probability >= 0 and self.short_sequence_probability <= 1.0 + assert self.masking_max_ngram > 0 + assert not (self.masking_use_geometric_distribution and self.masking_do_permutation) + + if self.masking_use_geometric_distribution and self.masking_use_longer_ngrams: + log_single_rank( + logger, + logging.WARNING, + "The use of a geometric distribution overrides the default distribution", + ) + + +class MaskedWordPieceDataset(MegatronDataset): + """The semi-abstract base class for masked WordPiece datasets + + This implementation makes the rigid assumption that all inheritor datasets are built upon the + MMapIndexedDataset class. This assumption may be pushed down to the inheritors in future if + necessary. + + NB: WordPiece tokenization prepends a double hash "##" to all tokens/pieces in a word, save the + first token/piece. + + Args: + indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the + MegatronDataset + + dataset_path (str): The real path on disk to the dataset, for bookkeeping + + indexed_indices (numpy.ndarray): The set of the documents indices to expose + + num_samples (int): The number of samples to draw from the indexed dataset + + index_split (Split): The indexed_indices Split + + config (MaskedWordPieceDatasetConfig): The config + """ + + def __init__( + self, + indexed_dataset: MMapIndexedDataset, + dataset_path: str, + indexed_indices: numpy.ndarray, + num_samples: int, + index_split: Split, + config: MaskedWordPieceDatasetConfig, + ) -> None: + super().__init__( + indexed_dataset, dataset_path, indexed_indices, num_samples, index_split, config + ) + + @staticmethod + def numel_low_level_dataset(low_level_dataset: MMapIndexedDataset) -> int: + return low_level_dataset.document_indices.shape[0] - 1 + + @staticmethod + def build_low_level_dataset( + dataset_path: str, config: MaskedWordPieceDatasetConfig + ) -> MMapIndexedDataset: + return MMapIndexedDataset(dataset_path) + + @staticmethod + def _key_config_attributes() -> List[str]: + """Inherited method implementation + + Returns: + List[str]: The key config attributes + """ + return super(MaskedWordPieceDataset, MaskedWordPieceDataset)._key_config_attributes() + [ + "masking_probability", + "short_sequence_probability", + "masking_max_ngram", + "masking_do_full_word", + "masking_do_permutation", + "masking_use_longer_ngrams", + "masking_use_geometric_distribution", + ] + + def __len__(self) -> int: + return self.sample_index.shape[0] + + def _build_sample_index( + self, sequence_length: int, min_sentences_per_sample: int + ) -> numpy.ndarray: + path_to_cache = self.config.path_to_cache + if path_to_cache is None: + path_to_cache = os.path.join( + self.dataset.path_prefix, "cache", f"{type(self).__name__}_indices" + ) + + get_path_to = lambda suffix: os.path.join( + path_to_cache, f"{self.unique_description_hash}-{type(self).__name__}-{suffix}" + ) + path_to_description = get_path_to("description.txt") + path_to_sample_index = get_path_to("sample_index.npy") + cache_hit = all(map(os.path.isfile, [path_to_description, path_to_sample_index,],)) + + num_epochs = numpy.iinfo(numpy.int32).max - 1 + + if not cache_hit and torch.distributed.get_rank() == 0: + log_single_rank( + logger, + logging.INFO, + f"Build and save the {type(self).__name__} {self.index_split.name} indices", + ) + + os.makedirs(path_to_cache, exist_ok=True) + + # Write the description + with open(path_to_description, "wt") as writer: + writer.write(self.unique_description) + + # Build the sample index + log_single_rank( + logger, + logging.INFO, + f"\tBuild and save the sample index to {os.path.basename(path_to_sample_index)}", + ) + t_beg = time.time() + from megatron.core.datasets import helpers + + # Add +1 for access to document upper bound + indices = numpy.append(self.indices, self.indices[-1] + 1) + + sample_index = helpers.build_mapping( + self.dataset.document_indices[indices], + self.dataset.sequence_lengths, + num_epochs, + self.num_samples, + sequence_length, + self.config.short_sequence_probability, + self.config.random_seed, + False, + min_sentences_per_sample, + ) + numpy.save(path_to_sample_index, sample_index, allow_pickle=True) + t_end = time.time() + log_single_rank(logger, logging.DEBUG, f"\t> time elapsed: {t_end - t_beg:4f} seconds") + + log_single_rank( + logger, logging.INFO, f"> total number of samples: {sample_index.shape[0]}" + ) + log_single_rank(logger, logging.INFO, f"> total number of epochs: {num_epochs}") + + return sample_index + + log_single_rank( + logger, logging.INFO, f"Load the {type(self).__name__} {self.index_split.name} indices" + ) + + log_single_rank( + logger, + logging.INFO, + f"\tLoad the sample index from {os.path.basename(path_to_sample_index)}", + ) + t_beg = time.time() + sample_index = numpy.load(path_to_sample_index, allow_pickle=True, mmap_mode="r") + t_end = time.time() + log_single_rank(logger, logging.DEBUG, f"\t> time elapsed: {t_end - t_beg:4f} seconds") + + return sample_index + + def _create_masked_lm_predictions( + self, + token_ids: List[int], + target_sequence_length: int, + numpy_random_state: numpy.random.RandomState, + ) -> Tuple[List[int], List[int], List[int], List[int], List[Tuple[List[int], List[int]]]]: + """Creates the predictions for the masked LM objective + + Args: + token_ids (List[int]): The token ids + target_sequence_length (int): The target sequence length + numpy_random_state (numpy.random.RandomState): The NumPy random state + + Returns: + Tuple[List[int], List[int], List[int], List[int], List[Tuple[List[int], List[int]]]]: + 1. masked_token_ids -> The masked sequence + 2. masked_positions -> The indices for the masked token ids + 3. masked_labels -> The original token ids for the masked token ids + 4. boundaries -> The sentence and word boundaries for the sequence + 4. masked_spans -> The masked positions and labels with N-gram info intact + """ + # Build the token sentence and word boundaries and the masking candidates + # e.g. [cls, id, ##id, ##id, id, ##id, sep, id, ##id, sep] + # -> boundaries: [1, 1, 0, 0, 1, 0, 1, 1, 0, 1] + # -> candidates with whole word masking: [[1, 2, 3], [4, 5], [7, 8]] + # -> candidates sans whole word masking: [[1], [2], [3], [4], [5], [7], [8]] + boundaries = [] + candidates = [] + for i, token_id in enumerate(token_ids): + if token_id == self.config.tokenizer.cls or token_id == self.config.tokenizer.sep: + boundaries.append(1) + else: + if not self.config.tokenizer.inv_vocab[token_id].startswith("##"): + boundaries.append(1) + candidates.append([i]) + else: + boundaries.append(0) + if self.config.masking_do_full_word and len(candidates) > 0: + candidates[-1].append(i) + else: + candidates.append([i]) + + n_maskings = min( + self.config.masking_probability * target_sequence_length, + max(1, int(round(len(token_ids) * self.config.masking_probability))), + ) + + ngram_nvals = numpy.arange(self.config.masking_max_ngram, dtype=numpy.int64) + 1 + + # By default, the N-gram probabilites are inversely proportional to N + # e.g. N = 3 + # -> P = array([0.54545455, 0.27272727, 0.18181818]) + nprobs = 1.0 / ngram_nvals + nprobs = nprobs / nprobs.sum(keepdims=True) + if self.config.masking_use_longer_ngrams: + nprobs = nprobs[::-1] + + # Create a nested list of depth 3 + # layer 1: the candidate dimension + # layer 2: the N-gram dimension + # layer 3: the token dimension + candidate_ngrams = [ + [candidates[idx : idx + n] for n in ngram_nvals] for idx in range(len(candidates)) + ] + numpy_random_state.shuffle(candidate_ngrams) + + masked_token_ids = list(token_ids) + masked_positions_and_labels = [] + masked_spans = [] + masked_indices = set() + for candidate_idx in range(len(candidate_ngrams)): + n_ngrams = len(candidate_ngrams[candidate_idx]) + + # Stop when we hit our desired number of maskings + if len(masked_positions_and_labels) >= n_maskings: + break + + # Do nothing for candidates with no ngrams + if not candidate_ngrams[candidate_idx]: + continue + + # Choose the initial value of N + if self.config.masking_use_geometric_distribution: + # Sample N from a geometric distribution with p = 0.2 and clip + # i.e. SpanBERT + # -> https://arxiv.org/abs/1907.10529 (Section 3.1) + p = 0.2 + n = min(numpy_random_state.geometric(p), self.config.masking_max_ngram) + else: + p = nprobs[:n_ngrams] / nprobs[:n_ngrams].sum(keepdims=True) + n = numpy_random_state.choice(ngram_nvals[:n_ngrams], p=p) + + while True: + ngram_indices = sum(candidate_ngrams[candidate_idx][n - 1], []) + n = n - 1 + # Success: masking this N-gram puts us below the desired number of maskings + if n_maskings >= len(masked_positions_and_labels) + len(ngram_indices): + skip_candidate = False + break + # Failure: no N-grams remain for this candidate + if n == 0: + skip_candidate = True + break + + # Do nothing for candidates whose 1-gram is too long + if skip_candidate: + continue + + # Do nothing for candidate indices which have already been masked + if any(map(lambda idx: idx in masked_indices, ngram_indices)): + continue + + # Mask the tokens and record their original positions and values + for index in ngram_indices: + masked_indices.add(index) + mask = self._get_token_mask(numpy_random_state) + if mask is None: + masked_token_ids[index] = token_ids[index] + else: + masked_token_ids[index] = mask + masked_positions_and_labels.append((index, token_ids[index])) + + masked_spans.append((ngram_indices, [token_ids[index] for index in ngram_indices])) + + assert len(masked_positions_and_labels) <= n_maskings + + numpy_random_state.shuffle(candidate_ngrams) + + if self.config.masking_do_permutation: + + n_swappings = n_maskings + + permuted_indices = set() + for candidate_idx in range(len(candidate_ngrams)): + n_ngrams = len(candidate_ngrams[candidate_idx]) + + if len(permuted_indices) >= n_swappings: + break + + # Do nothing for candidates with no ngrams + if not candidate_ngrams[candidate_idx]: + continue + + p = nprobs[:n_ngrams] / nprobs[:n_ngrams].sum(keepdims=True) + n = numpy.random.choice(ngram_nvals[:n_ngrams], p=p) + + while True: + ngram_indices = sum(candidate_ngrams[candidate_idx][n - 1], []) + n = n - 1 + # Success: swapping this N-gram puts us below the desired number of swappings + if n_swappings >= len(permuted_indices) + len(ngram_indices): + skip_candidate = False + break + # Failure: no N-grams remain for this candidate + if n == 0: + skip_candidate = True + break + + # Do nothing for candidates whose 1-gram is too long + if skip_candidate: + continue + + # Do nothing for candidate indices which have already been masked or permuted + if any( + map(lambda idx: idx in masked_indices or idx in permuted_indices, ngram_indices) + ): + continue + + for index in ngram_indices: + permuted_indices.add(index) + + assert len(permuted_indices) <= n_swappings + + permuted_indices = sorted(permuted_indices) + permuted_indices_copy = list(permuted_indices) + numpy_random_state.shuffle(permuted_indices_copy) + masked_token_ids_copy = list(masked_token_ids) + + for idx, idx_copy in zip(permuted_indices, permuted_indices_copy): + masked_token_ids[idx] = masked_token_ids_copy[idx_copy] + masked_positions_and_labels.append((idx, masked_token_ids_copy[idx])) + + masked_positions_and_labels = sorted(masked_positions_and_labels, key=lambda x: x[0]) + masked_positions = [] + masked_labels = [] + for position, label in masked_positions_and_labels: + masked_positions.append(position) + masked_labels.append(label) + + masked_spans = sorted(masked_spans, key=lambda x: x[0][0]) + + return masked_token_ids, masked_positions, masked_labels, boundaries, masked_spans + + @abstractmethod + def _get_token_mask(self, numpy_random_state: numpy.random.RandomState) -> Optional[int]: + pass diff --git a/megatron/core/datasets/megatron_dataset.py b/megatron/core/datasets/megatron_dataset.py index 21170afa4e..4c8b962c89 100644 --- a/megatron/core/datasets/megatron_dataset.py +++ b/megatron/core/datasets/megatron_dataset.py @@ -2,9 +2,9 @@ import hashlib import json -from abc import ABC, abstractmethod, abstractstaticmethod +from abc import ABC, abstractmethod from collections import OrderedDict -from typing import Dict, List +from typing import Any, Dict, Iterable, List, Union import numpy import torch @@ -13,63 +13,115 @@ from megatron.core.datasets.indexed_dataset import MMapIndexedDataset from megatron.core.datasets.utils import Split +LowLevelDataset = Union[MMapIndexedDataset, Iterable] + class MegatronDataset(ABC, torch.utils.data.Dataset): - """The wrapper class from which dataset classes should inherit e.g. GPTDataset + """The highest level wrapper class from which all dataset classes should inherit Args: - indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the - MegatronDataset + dataset (LowLevelDataset): The dataset around which to build the MegatronDataset + + dataset_path (str): The real path on disk to the dataset, for bookkeeping. TODO: subsume + this argument by enforcing auto-bookkeeping in the dataset class type. - indexed_indices (numpy.ndarray): The set of the documents indices to expose + indices (numpy.ndarray): The set of the documents indices to expose num_samples (int): The number of samples to draw from the indexed dataset - index_split (Split): The indexed_indices Split + index_split (Split): The indices Split - config (BlendedMegatronDatasetConfig): The container for all config sourced parameters + config (BlendedMegatronDatasetConfig): The config """ def __init__( self, - indexed_dataset: MMapIndexedDataset, - indexed_indices: numpy.ndarray, + dataset: LowLevelDataset, + dataset_path: str, + indices: numpy.ndarray, num_samples: int, index_split: Split, config: BlendedMegatronDatasetConfig, ) -> None: - assert indexed_indices.size > 0 - assert num_samples > 0 - assert self.is_multimodal() == indexed_dataset.multimodal - assert self.is_split_by_sequence() != self.is_split_by_document() - - self.indexed_dataset = indexed_dataset - self.indexed_indices = indexed_indices + self.dataset = dataset + self.dataset_path = dataset_path + self.indices = indices self.num_samples = num_samples self.index_split = index_split self.config = config self.unique_identifiers = OrderedDict() self.unique_identifiers["class"] = type(self).__name__ - self.unique_identifiers["path_prefix"] = self.indexed_dataset.path_prefix + self.unique_identifiers["dataset_path"] = self.dataset_path self.unique_identifiers["num_samples"] = self.num_samples self.unique_identifiers["index_split"] = self.index_split.name for attr in self._key_config_attributes(): self.unique_identifiers[attr] = getattr(self.config, attr) - self.unique_description = json.dumps(self.unique_identifiers, indent=4) + self.unique_description = json.dumps( + self.unique_identifiers, indent=4, default=lambda obj: obj.unique_identifiers + ) self.unique_description_hash = hashlib.md5( self.unique_description.encode("utf-8") ).hexdigest() self._finalize() - @abstractmethod def _finalize(self) -> None: """Build the dataset and assert any subclass-specific conditions """ pass + @staticmethod + def numel_low_level_dataset(low_level_dataset: LowLevelDataset) -> int: + """Return the number of elements in the underlying low level dataset for the purpose of + segregating the train/valid/test split indices + + It may be that the low level dataset can be split any number of ways, depending on the mid + level dataset it supports, which is why we define the "number of elements" function + separately from the __len__ function here in the mid level dataset class + + Args: + low_level_dataset (LowLevelDataset): The underlying low level dataset + + Returns: + int: The number of elements in the underlying low level dataset + """ + raise NotImplementedError + + @staticmethod + def build_low_level_dataset( + dataset_path: str, config: BlendedMegatronDatasetConfig + ) -> LowLevelDataset: + """Build the low level dataset via a function to be called from within + BlendedMegatronDatasetBuilder.build_generic_dataset + + It may be that the low level dataset spans any subset of train/valid/test splits, which is + why we define a static "build" function separately from the constructor in the mid level + dataset class + + Args: + dataset_path (str): The real path on disk to the dataset + + config (BlendedMegatronDatasetConfig): The dataset config + + Returns: + LowLevelDataset: The low level dataset + """ + raise NotImplementedError + + @staticmethod + def _key_config_attributes() -> List[str]: + """Return all config attributes which contribute to uniquely identifying the dataset. + + These attributes will be used to build a uniquely identifying string and MD5 hash which + will be used to cache/load dataset resources from run to run. + + Returns: + List[str]: The key config attributes + """ + return ["random_seed", "sequence_length", "split", "split_matrix", "tokenizer"] + @abstractmethod def __len__(self) -> int: """Return the length of the dataset @@ -80,56 +132,56 @@ def __len__(self) -> int: pass @abstractmethod - def __getitem__(self, idx: int) -> Dict[str, numpy.ndarray]: + def __getitem__(self, idx: int) -> Dict[str, Union[torch.Tensor, numpy.ndarray]]: """Return from the dataset Args: idx (int): The index into the dataset Returns: - Dict[str, numpy.ndarray]: See abstract implementation + Dict[str, Union[torch.Tensor, numpy.ndarray]]: See abstract implementation """ pass - @abstractstaticmethod - def is_multimodal() -> bool: - """Return True if the inheritor class and its internal MMapIndexedDataset are multimodal - Returns: - bool: See abstract implementation - """ - pass +class MockDataset(MegatronDataset): + """The highest level wrapper class from which all dataset classes should inherit - @abstractstaticmethod - def is_split_by_sequence() -> bool: - """Return whether the dataset is split by sequence + The MockDataset is a special, one-off class that should not serve as a precedent for developers + seeking to extend the MegatronDataset. This class is incompatible with BlendedDataset - For example, the GPT train/valid/test split is document agnostic + This class cannibalizes the constructor of the parent class. As such, we do not need to + enumerate the constructor parameters. They may be populated, but most are superfluous and can + be None. Only the split and the config are required. - Returns: - bool: See abstract implementation - """ - pass + Args: + args (Tuple[Any]): The positional arguments used to build an arbitrary MegatronDataset + """ - @classmethod - def is_split_by_document(cls) -> bool: - """Return whether the dataset is split by document + def __init__(self, *args: Any) -> None: + self.split = None + self.config = None - For example, the BERT train/valid/test split is document aware + # Extract a select few parameters + for arg in args: + # Extract the split for RNG parameterization + if issubclass(type(arg), Split): + assert self.split is None + self.split = arg + # Extract the config for sequence_length and mock attribute values + if issubclass(type(arg), BlendedMegatronDatasetConfig): + assert self.config is None + self.config = arg - Returns: - bool: The negation of cls.is_split_by_sequence - """ - return not cls.is_split_by_sequence() + assert self.split is not None + assert self.config is not None - @staticmethod - def _key_config_attributes() -> List[str]: - """Return all config attributes which contribute to uniquely identifying the dataset. + assert self.config.mock - These attributes will be used to build a uniquely identifying string and MD5 hash which - will be used to cache/load the dataset from run to run. + def __len__(self) -> int: + """Return an arbitrary length Returns: - List[str]: The key config attributes + int: The torch.int16 max representable value """ - return ["random_seed", "sequence_length", "split", "split_matrix"] + return torch.iinfo(torch.int16).max diff --git a/megatron/core/datasets/megatron_tokenizer.py b/megatron/core/datasets/megatron_tokenizer.py new file mode 100644 index 0000000000..fbea419969 --- /dev/null +++ b/megatron/core/datasets/megatron_tokenizer.py @@ -0,0 +1,141 @@ +import json +from abc import ABC, abstractmethod +from collections import OrderedDict +from typing import Any + +import numpy + + +class MegatronTokenizer(ABC): + """Abstract class for tokenizer + + Absent a config or class-specific tracking of which objects are uniquely identifying, we must + include all key word arguments as unique identifiers + + Args: + tokenizer_paths (Tuple[str]): All tokenizer source paths or prefixes + + kwargs (Dict[str, Any]): All tokenizer options + """ + + def __init__(self, *tokenizer_paths: str, **tokenizer_options: Any): + + self.unique_identifiers = OrderedDict() + self.unique_identifiers["class"] = type(self).__name__ + self.unique_identifiers["tokenizer_path"] = list(tokenizer_paths) + for option in tokenizer_options: + self.unique_identifiers[option] = str(tokenizer_options[option]) + + self.unique_description = json.dumps(self.unique_identifiers, indent=4) + + super().__init__() + + @abstractmethod + def tokenize(self, text: str) -> numpy.ndarray: + """Convert text to embedding ids + + Args: + text (str): The text to convert + + Returns: + numpy.ndarray: The converted embedding ids + """ + pass + + def detokenize(self, ids: numpy.ndarray) -> str: + """Convert embedding ids to text + + Args: + ids (numpy.ndarray): The ids to convert + + Returns: + str: The converted text + + Raises: + NotImplementedError: Non-abstract, optional method + """ + raise NotImplementedError("{} has no method 'detokenize'".format(type(self).__name__)) + + @property + @abstractmethod + def vocab(self): + """Dictionary from vocab text token to id token + """ + pass + + @property + @abstractmethod + def inv_vocab(self): + """Dictionary from vocab id token to text token + """ + pass + + @property + @abstractmethod + def vocab_size(self): + """The vocabulary size + """ + pass + + @property + def cls(self): + """The CLS token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'cls'".format(type(self).__name__)) + + @property + def sep(self): + """The SEP token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'sep'".format(type(self).__name__)) + + @property + def pad(self): + """The PAD token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'pad'".format(type(self).__name__)) + + @property + def eod(self): + """The EOD token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'eod'".format(type(self).__name__)) + + @property + def bos(self): + """The BOS token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'bos'".format(type(self).__name__)) + + @property + def eos(self): + """The EOS token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'eos'".format(type(self).__name__)) + + @property + def mask(self): + """The MASK token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'mask'".format(type(self).__name__)) diff --git a/megatron/core/datasets/multimodal_dataset.py b/megatron/core/datasets/multimodal_dataset.py new file mode 100644 index 0000000000..3cfd011c77 --- /dev/null +++ b/megatron/core/datasets/multimodal_dataset.py @@ -0,0 +1,58 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from dataclasses import dataclass +from typing import Dict + +import numpy +import torch + +from megatron.core.datasets.gpt_dataset import GPTDatasetConfig, MockGPTDataset + + +@dataclass +class MultimodalDatasetConfig(GPTDatasetConfig): + """Configuration object for Megatron Core Multimodal datasets. + + + Note: This is unused at the moment and may be missing features. Follow-up changes will use this. + + Attributes: + image_h (int): Image height. + image_w (int): Image width. + """ + + image_h: int = None + image_w: int = None + + def __post_init__(self) -> None: + super().__post_init__() + + assert self.image_h is not None + assert self.image_w is not None + + +class MockMultimodalDataset(MockGPTDataset): + """Mock multimodal dataset. + + + This is unused at the moment and may be missing features. Follow-up changes will use this. + """ + + def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: + """Return a sample that contains a dummy image, text sequence and the associated labels and cost and attention masks. + + Args: + idx (int): The integer seed for mock data generation. + + Returns: + Dict[str, numpy.ndarray]: The mock data. + """ + # Get a text sample. + sample = super().__getitem__(idx) + + # Add mock input image. + sample["image"] = torch.zeros( + (3, self.config.image_h, self.config.image_w), dtype=torch.float32 + ) + + return sample diff --git a/megatron/core/datasets/t5_dataset.py b/megatron/core/datasets/t5_dataset.py new file mode 100644 index 0000000000..9baa16368c --- /dev/null +++ b/megatron/core/datasets/t5_dataset.py @@ -0,0 +1,239 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from collections import deque +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Union + +import numpy + +from megatron.core.datasets.indexed_dataset import MMapIndexedDataset +from megatron.core.datasets.masked_dataset import ( + MaskedWordPieceDataset, + MaskedWordPieceDatasetConfig, +) +from megatron.core.datasets.utils import Split + + +@dataclass +class T5MaskedWordPieceDatasetConfig(MaskedWordPieceDatasetConfig): + """Configuration object for Megatron Core T5 WordPiece datasets + + NB: As a temporary holdover from Megatron-LM. The T5 tokenizer has an attribute which defines + a number of special sentinel tokens used during sampling. The assert in __post_init__ serves to + preserve compatibility with Megatron-LM until the T5 tokenizer is in Megatron Core. + + Attributes: + sequence_length_encoder (Optional[int]): A sequence_length alias and the sequence length + for the encoder + + sequence_length_decoder (int): The sequence length for the decoder + """ + + sequence_length_encoder: Optional[int] = field(init=False, default=None) + + sequence_length_decoder: int = None + + def __post_init__(self) -> None: + """Do asserts and set fields post init + """ + super().__post_init__() + + self.sequence_length_encoder = self.sequence_length + + assert self.sequence_length_encoder is not None + assert self.sequence_length_decoder is not None + + assert len(self.tokenizer.additional_special_tokens_ids) > 0 + + +class T5MaskedWordPieceDataset(MaskedWordPieceDataset): + """The T5 dataset that assumes WordPiece tokenization + + Args: + indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the + MegatronDataset + + dataset_path (str): The real path on disk to the dataset, for bookkeeping + + indexed_indices (numpy.ndarray): The set of the documents indices to expose + + num_samples (int): The number of samples to draw from the indexed dataset + + index_split (Split): The indexed_indices Split + + config (T5MaskedWordPieceDatasetConfig): The config + """ + + def __init__( + self, + indexed_dataset: MMapIndexedDataset, + dataset_path: str, + indexed_indices: numpy.ndarray, + num_samples: int, + index_split: Split, + config: T5MaskedWordPieceDatasetConfig, + ) -> None: + super().__init__( + indexed_dataset, dataset_path, indexed_indices, num_samples, index_split, config + ) + + def _finalize(self) -> None: + """Abstract method implementation + """ + self.token_lookup = list(self.config.tokenizer.inv_vocab.keys()) + # Account for the single and single token ids + self.sample_index = self._build_sample_index(self.config.sequence_length - 2, 1) + + @staticmethod + def _key_config_attributes() -> List[str]: + """Inherited method implementation + + Returns: + List[str]: The key config attributes + """ + return super( + T5MaskedWordPieceDataset, T5MaskedWordPieceDataset + )._key_config_attributes() + ["sequence_length_decoder",] + + def __getitem__(self, idx: int) -> Dict[str, Union[int, numpy.ndarray]]: + """Abstract method implementation + + Args: + idx (int): The index into the dataset + + Returns: + Dict[str, Union[int, numpy.ndarray]]: The + """ + idx_beg, idx_end, target_sequence_length = self.sample_index[idx] + sample = [self.dataset[i] for i in range(idx_beg, idx_end)] + + numpy_random_state = numpy.random.RandomState( + seed=(self.config.random_seed + idx) % 2 ** 32 + ) + + assert target_sequence_length <= self.config.sequence_length + + # Flatten the sample into a list of tokens + tokens = [token for sentence in sample for token in sentence] + + # Truncate the list of tokens to a desired length + truncated = len(tokens) > target_sequence_length + tokens = tokens[:target_sequence_length] + + # Masking + (tokens, _, _, _, masked_spans,) = self._create_masked_lm_predictions( + tokens, target_sequence_length, numpy_random_state + ) + + # Prepare the encoder input and decoder input and output + sentinels = deque(self.config.tokenizer.additional_special_tokens_ids) + encoder_input = [] + decoder_input = [self.config.tokenizer.bos] + decoder_output = [] + idx_beg = 0 + for indices, labels in masked_spans: + sentinel = sentinels.popleft() + + # set the end index + idx_end = indices[0] + + encoder_input.extend(tokens[idx_beg:idx_end]) + encoder_input.append(sentinel) + + decoder_input.append(sentinel) + decoder_input.extend(labels) + + decoder_output.append(sentinel) + decoder_output.extend(labels) + + # set the start index + idx_beg = indices[-1] + 1 + + encoder_input.extend(tokens[idx_beg:]) + decoder_output.append(self.config.tokenizer.eos) + + # Pad the sequences and convert to NumPy + length_toks_encoder = len(encoder_input) + length_toks_decoder = len(decoder_input) + length_pads_encoder = self.config.sequence_length_encoder - length_toks_encoder + length_pads_decoder = self.config.sequence_length_decoder - length_toks_decoder + assert length_pads_encoder >= 0 + assert length_pads_decoder >= 0 + + encoder_input = numpy.array(encoder_input, dtype=numpy.int64) + encoder_input = numpy.pad( + encoder_input, (0, length_pads_encoder), constant_values=self.config.tokenizer.pad + ) + + decoder_input = numpy.array(decoder_input, dtype=numpy.int64) + decoder_input = numpy.pad( + decoder_input, (0, length_pads_decoder), constant_values=self.config.tokenizer.pad + ) + + # Create attention and history masks + mask_encoder = self._make_attention_mask(encoder_input, encoder_input) + mask_encoder_decoder = self._make_attention_mask(decoder_input, encoder_input) + mask_decoder = self._make_attention_mask(decoder_input, decoder_input) + mask_decoder = mask_decoder * self._make_history_mask(decoder_input) + + # Mask the labels + decoder_output = numpy.array(decoder_output, dtype=numpy.int64) + decoder_output = numpy.pad(decoder_output, (0, length_pads_decoder), constant_values=-1) + + # Get the loss mask + loss_mask = numpy.zeros(self.config.sequence_length_decoder, dtype=numpy.int64) + loss_mask[:length_toks_decoder] = 1 + + return { + "text_enc": encoder_input, + "text_dec": decoder_input, + "labels": decoder_output, + "loss_mask": loss_mask, + "truncated": int(truncated), + "enc_mask": mask_encoder, + "dec_mask": mask_decoder, + "enc_dec_mask": mask_encoder_decoder, + } + + @staticmethod + def _make_attention_mask( + source_block: numpy.ndarray, target_block: numpy.ndarray + ) -> numpy.ndarray: + """Return a 2-D attention mask + + Args: + source_block (numpy.ndarray): A 1-D array + target_block (numpy.ndarray): A 1-D array + + Returns: + numpy.ndarray: The 2-D attention mask + """ + mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1) + return mask.astype(numpy.int64) + + @staticmethod + def _make_history_mask(block: numpy.ndarray) -> numpy.ndarray: + """Return a 2-D history (lower-left-triangular) mask + + Args: + block (numpy.ndarray): A 1-D array + + Returns: + numpy.ndarray: The 2-D history (lower-left-triangular) mask + """ + arange = numpy.arange(block.shape[0]) + mask = arange[None,] <= arange[:, None] + return mask.astype(numpy.int64) + + def _get_token_mask(self, numpy_random_state: numpy.random.RandomState) -> int: + """Abstract method implementation + + 100% of the time, replace the token id with mask token id. + + Args: + numpy_random_state (RandomState): The NumPy random state + + Returns: + int: The mask token id + """ + return self.config.tokenizer.mask diff --git a/megatron/core/datasets/utils.py b/megatron/core/datasets/utils.py index 8a3279b5f4..def0fb7611 100644 --- a/megatron/core/datasets/utils.py +++ b/megatron/core/datasets/utils.py @@ -2,7 +2,7 @@ import logging from enum import Enum -from typing import List +from typing import Any, List import numpy import torch @@ -30,13 +30,17 @@ def compile_helpers(): sys.exit(1) -def log_single_rank(logger: logging.Logger, *args, rank=0, **kwargs): +def log_single_rank(logger: logging.Logger, *args: Any, rank: int = 0, **kwargs: Any): """If torch distributed is initialized, log only on rank Args: logger (logging.Logger): The logger to write the logs + args (Tuple[Any]): All logging.Logger.log positional arguments + rank (int, optional): The rank to write on. Defaults to 0. + + kwargs (Dict[str, Any]): All logging.Logger.log keyword arguments """ if torch.distributed.is_initialized(): if torch.distributed.get_rank() == rank: diff --git a/megatron/core/deploy/__init__.py b/megatron/core/deploy/__init__.py new file mode 100644 index 0000000000..f8011007a5 --- /dev/null +++ b/megatron/core/deploy/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. diff --git a/megatron/core/deploy/gpt/__init__.py b/megatron/core/deploy/gpt/__init__.py new file mode 100644 index 0000000000..f8011007a5 --- /dev/null +++ b/megatron/core/deploy/gpt/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. diff --git a/megatron/core/deploy/gpt/model_specs.py b/megatron/core/deploy/gpt/model_specs.py new file mode 100644 index 0000000000..50467ef414 --- /dev/null +++ b/megatron/core/deploy/gpt/model_specs.py @@ -0,0 +1,50 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add +from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear +from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules +from megatron.core.transformer.custom_layers.transformer_engine import TENorm +from megatron.core.transformer.dot_product_attention import DotProductAttention +from megatron.core.transformer.enums import AttnMaskType +from megatron.core.transformer.mlp import MLP, MLPSubmodules +from megatron.core.transformer.spec_utils import ModuleSpec +from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules + + +# Use this spec for AMMO PTQ and TensorRT-LLM export +def get_gpt_layer_ammo_spec() -> ModuleSpec: + """Mix the native spec with TENorm. + + This is essentially the native local spec except for the layernorm implementation + is using TENorm from Transformer-Engine. This TENorm supports both FusedLayerNorm and RMSNorm and + prevents the apex dependency. + """ + return ModuleSpec( + module=TransformerLayer, + submodules=TransformerLayerSubmodules( + input_layernorm=TENorm, + self_attention=ModuleSpec( + module=SelfAttention, + params={"attn_mask_type": AttnMaskType.causal}, + submodules=SelfAttentionSubmodules( + linear_qkv=ColumnParallelLinear, + core_attention=DotProductAttention, + linear_proj=RowParallelLinear, + ), + ), + self_attn_bda=get_bias_dropout_add, + pre_mlp_layernorm=TENorm, + mlp=ModuleSpec( + module=MLP, + submodules=MLPSubmodules( + linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, + ), + ), + mlp_bda=get_bias_dropout_add, + # Map TE-layernorm-fusion keys back + sharded_state_dict_keys_map={ + 'input_layernorm.': 'self_attention.linear_qkv.layer_norm_', + 'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_', + }, + ), + ) diff --git a/megatron/core/deploy/gpt/state_dict_hooks.py b/megatron/core/deploy/gpt/state_dict_hooks.py new file mode 100644 index 0000000000..cf1565af89 --- /dev/null +++ b/megatron/core/deploy/gpt/state_dict_hooks.py @@ -0,0 +1,126 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from megatron import print_rank_0 + + +def mcore_gpt_load_classic_state_dict_pre_hook( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, +): + """Register a pre-hook to fix the state_dict key difference. + + This prehook is used when trying to load the classic Megatron-LM GPTModel into its + megatron/core variant that uses native ParallelLinear and Transformer-Engine Norm. + Only this particular spec supports post-training quantization and TensorRT-LLM + config export through `nvidia-ammo` package. + + Args: + state_dict: state dictionary + prefix: module name prefix + local_metadata: local metatdata + strict: whether is in strict mode + missing_keys: missing state dict keys + unexpected_keys: unexpected state dict keys + error_msgs: error messages + """ + if "modelopt_state" in state_dict: + state_dict.pop("modelopt_state") + + if "language_model" in state_dict: + language_model_state_dict = state_dict.pop("language_model") + if "embedding" in language_model_state_dict: + if "word_embeddings" in language_model_state_dict["embedding"]: + for key, param in language_model_state_dict["embedding"]["word_embeddings"].items(): + state_dict.update({"embedding.word_embeddings." + key: param}) + if "position_embeddings" in language_model_state_dict["embedding"]: + for key, param in language_model_state_dict["embedding"][ + "position_embeddings" + ].items(): + state_dict.update({"embedding.position_embeddings." + key: param}) + if "transformer" in language_model_state_dict: + for key, param in language_model_state_dict["transformer"].items(): + state_dict.update({"decoder." + key: param}) + else: + for key, param in language_model_state_dict["encoder"].items(): + state_dict.update({"decoder." + key: param}) + if "output_layer" in language_model_state_dict: + for key, param in language_model_state_dict["output_layer"].items(): + state_dict.update({"output_layer." + key: param}) + + print_rank_0("ModelOptGPTModel {}".format(state_dict.keys())) + + module_name_rewrite_list = [ + ("input_norm", "input_layernorm"), + (".attention.query_key_value", ".self_attention.linear_qkv"), + (".attention.dense", ".self_attention.linear_proj"), + ("self_attention.query_key_value", "self_attention.linear_qkv"), + ("self_attention.dense", "self_attention.linear_proj"), + ("post_attention_layernorm", "pre_mlp_layernorm"), + ("post_attention_norm", "pre_mlp_layernorm"), + ("dense_h_to_4h", "linear_fc1"), + ("dense_4h_to_h", "linear_fc2"), + ("final_norm", "final_layernorm"), + ] + + key_rewrite_list = [] + + for key, _ in state_dict.items(): + for old_name, new_name in module_name_rewrite_list: + if old_name in key: + key_rewrite_list += [(key, key.replace(old_name, new_name))] + + for old_key, new_key in key_rewrite_list: + print_rank_0("replace {} with {}".format(old_key, new_key)) + state_dict[new_key] = state_dict[old_key] + state_dict.pop(old_key) + + +def mcore_gpt_load_te_state_dict_pre_hook( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, +): + """Register a pre-hook to fix the state_dict key difference of. + + This prehook is used when trying to load the megatron/core GPTModel that uses a + fused Transformer-Engine ParallelLinear into the variant that uses native ParallelLinear + and Transformer-Engine Norm (effectively to restore the fusion). + Only this particular spec supports post-training quantization and TensorRT-LLM + config export through `nvidia-ammo` package. + + Args: + state_dict: state dictionary + prefix: module name prefix + local_metadata: local metatdata + strict: whether is in strict mode + missing_keys: missing state dict keys + unexpected_keys: unexpected state dict keys + error_msgs: error messages + """ + if "modelopt_state" in state_dict: + state_dict.pop("modelopt_state") + + key_with_te_extra_state_to_pop = [] + + for key, _ in state_dict.items(): + if "_extra_state" in key: + key_with_te_extra_state_to_pop += [key] + + for key in key_with_te_extra_state_to_pop: + state_dict.pop(key) + + module_name_rewrite_list = [ + ("self_attention.linear_qkv.layer_norm_weight", "input_layernorm.weight"), + ("self_attention.linear_qkv.layer_norm_bias", "input_layernorm.bias"), + ("mlp.linear_fc1.layer_norm_weight", "pre_mlp_layernorm.weight"), + ("mlp.linear_fc1.layer_norm_bias", "pre_mlp_layernorm.bias"), + ] + + key_rewrite_list = [] + + for key, _ in state_dict.items(): + for old_name, new_name in module_name_rewrite_list: + if old_name in key: + key_rewrite_list += [(key, key.replace(old_name, new_name))] + + for old_key, new_key in key_rewrite_list: + print_rank_0("replace {} with {}".format(old_key, new_key)) + state_dict[new_key] = state_dict[old_key] + state_dict.pop(old_key) diff --git a/megatron/core/dist_checkpointing/core.py b/megatron/core/dist_checkpointing/core.py index f20a0c3a2d..50384e661b 100644 --- a/megatron/core/dist_checkpointing/core.py +++ b/megatron/core/dist_checkpointing/core.py @@ -1,5 +1,7 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +""" Module for managing distributed checkpoints metadata. """ + import json from dataclasses import asdict, dataclass from pathlib import Path @@ -9,12 +11,21 @@ class CheckpointingException(Exception): + """ Base checkpointing related exception """ + pass @dataclass class CheckpointingConfig: - """ Documents backends used in the checkpoint. """ + """ Documents backends used in the checkpoint. + + Checkpoint config keeps track of formats used for storing the sharded tensors + (sharded_backend) and other objects (common_backend). + + Note that versioning is not for the checkpoint content (which is application specific), + but for the checkpoint format itself. + """ sharded_backend: str sharded_backend_version: int = 1 @@ -23,10 +34,26 @@ class CheckpointingConfig: def check_is_distributed_checkpoint(checkpoint_dir): + """ Checks if `metadata.json` exists in the checkpoint and is a valid config. + + Args: + checkpoint_dir: checkpoint directory + + Returns: + bool: True if `metadata.json` exists in the checkpoint and is a valid config. + """ return maybe_load_config(checkpoint_dir) is not None def maybe_load_config(checkpoint_dir: str) -> Optional[CheckpointingConfig]: + """ Returns checkpoint config if `checkpoint_dir` is a distributed checkpoint and None otherwise + + Args: + checkpoint_dir: checkpoint directory + + Returns: + CheckpointingConfig (optional): None if checkpoint is not a valid distributed checkpoint + """ config_path = Path(checkpoint_dir, CONFIG_FNAME) if not config_path.exists(): return None @@ -36,6 +63,15 @@ def maybe_load_config(checkpoint_dir: str) -> Optional[CheckpointingConfig]: def save_config(config: CheckpointingConfig, checkpoint_dir: str): + """ Save given config to checkpoint directory. + + Args: + config: checkpoint config + checkpoint_dir: checkpoint directory + + Returns: + None + """ config_path = Path(checkpoint_dir, CONFIG_FNAME) with config_path.open('w') as f: json.dump(asdict(config), f) diff --git a/megatron/core/dist_checkpointing/dict_utils.py b/megatron/core/dist_checkpointing/dict_utils.py index 493a61c91a..95591cd99e 100644 --- a/megatron/core/dist_checkpointing/dict_utils.py +++ b/megatron/core/dist_checkpointing/dict_utils.py @@ -1,6 +1,10 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. -""" Utilities for operating with dicts and lists. """ +""" Utilities for operating with dicts and lists. + +All functions in this module handle nesting of dicts and lists. +Other objects (e.g. tuples) are treated as atomic leaf types that cannot be traversed. +""" from collections import defaultdict from typing import Any, Callable, Iterable, Optional, Tuple, Union @@ -13,7 +17,7 @@ def extract_matching_values( ) -> Tuple[Union[dict, list], Union[dict, list]]: """ Return matching and nonmatching values. Keeps hierarchy. - Arguments: + Args: x (Union[dict, list]) : state dict to process. Top-level argument must be a dict or list predicate (object -> bool): determines matching values return_lists_as_dicts (bool): if True, matching lists will be turned @@ -60,6 +64,21 @@ def _set_elem(target, k, v): def diff(x1: Any, x2: Any, prefix: Tuple = ()) -> Tuple[list, list, list]: + """ Recursive diff of dicts. + + Args: + x1 (object): left dict + x2 (object): right dict + prefix (tuple): tracks recursive calls. Used for reporting differing keys. + + Returns: + Tuple[list, list, list]: tuple of: + - only_left: Prefixes present only in left dict + - only_right: Prefixes present only in right dict + - mismatch: values present in both dicts but not equal across dicts. + For tensors equality of all elems is checked. + Each element is a tuple (prefix, type of left value, type of right value). + """ mismatch = [] if isinstance(x1, dict) and isinstance(x2, dict): only_left = [prefix + (k,) for k in x1.keys() - x2.keys()] @@ -94,22 +113,8 @@ def diff(x1: Any, x2: Any, prefix: Tuple = ()) -> Tuple[list, list, list]: return only_left, only_right, mismatch -def inspect_keys_types(d: dict, prefix: Tuple = (), indent: int = 4): - print_indent = lambda: print(' ' * indent * len(prefix), end='') - for k, v in d.items(): - if isinstance(v, dict): - print_indent() - print(f'> {k}:') - inspect_keys_types(v, prefix + (k,), indent) - else: - print_indent() - if isinstance(v, torch.Tensor): - print(f'> {k}: {type(v)} of shape {v.shape}') - else: - print(f'> {k}: {type(v)}') - - def inspect_types(x: Any, prefix: Tuple = (), indent: int = 4): + """ Helper to print types of (nested) dict values. """ print_indent = lambda: print(' ' * indent * len(prefix), end='') if isinstance(x, dict): print() @@ -137,6 +142,7 @@ def inspect_types(x: Any, prefix: Tuple = (), indent: int = 4): def nested_values(x: Union[dict, list]): + """ Returns iterator over (nested) values of a given dict or list. """ x_iter = x.values() if isinstance(x, dict) else x for v in x_iter: if isinstance(v, (dict, list)): @@ -146,6 +152,7 @@ def nested_values(x: Union[dict, list]): def nested_items_iter(x: Union[dict, list]): + """ Returns iterator over (nested) tuples (container, key, value) of a given dict or list. """ x_iter = x.items() if isinstance(x, dict) else enumerate(x) for k, v in x_iter: if isinstance(v, (dict, list)): @@ -155,16 +162,19 @@ def nested_items_iter(x: Union[dict, list]): def dict_map(f: Callable, d: dict): + """ `map` equivalent for dicts. """ for sub_d, k, v in nested_items_iter(d): sub_d[k] = f(v) def dict_map_with_key(f: Callable, d: dict): + """ `map` equivalent for dicts with a function that accepts tuple (key, value). """ for sub_d, k, v in nested_items_iter(d): sub_d[k] = f(k, v) def dict_list_map_inplace(f: Callable, x: Union[dict, list]): + """ Maps dicts and lists *in-place* with a given function. """ if isinstance(x, dict): for k, v in x.items(): x[k] = dict_list_map_inplace(f, v) @@ -176,6 +186,7 @@ def dict_list_map_inplace(f: Callable, x: Union[dict, list]): def dict_list_map_outplace(f: Callable, x: Union[dict, list]): + """ Maps dicts and lists *out-of-place* with a given function. """ if isinstance(x, dict): return {k: dict_list_map_outplace(f, v) for k, v in x.items()} elif isinstance(x, list): @@ -185,6 +196,7 @@ def dict_list_map_outplace(f: Callable, x: Union[dict, list]): def merge(x1: dict, x2: dict, key: Tuple[str, ...] = ()): + """ Merges dicts and lists recursively. """ if isinstance(x1, dict) and isinstance(x2, dict): for k, v2 in x2.items(): if k not in x1: @@ -211,6 +223,7 @@ def map_reduce( value_fn: Callable = lambda x: x, reduce_fn: Callable = lambda x: x, ) -> dict: + """ Simple map-reduce implementation following `more_itertools.map_reduce` interface. """ res = defaultdict(list) for x in xs: res[key_fn(x)].append(value_fn(x)) diff --git a/megatron/core/dist_checkpointing/mapping.py b/megatron/core/dist_checkpointing/mapping.py index a8307b7c24..362ffd4a8e 100644 --- a/megatron/core/dist_checkpointing/mapping.py +++ b/megatron/core/dist_checkpointing/mapping.py @@ -1,6 +1,11 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. -""" Core library classes. """ +""" Core library classes for representing sharding of tensors and objects. + +The main expected usage is wrapping torch.Tensors in state dicts with +ShardedTensor class (mostly with the ShardedTensor.from_rank_offsets classmethod). +""" + import logging from dataclasses import dataclass, replace from itertools import chain @@ -172,7 +177,21 @@ def __str__(self): return f'{self.__class__.__name__}(key=\'{self.key}\')' -def is_main_replica(replica_id): +def is_main_replica(replica_id: ReplicaId): + """ Checks if given `replica_id` is considered as main. + + "Main" replica is: + - integer 0 + - or an iterable with all 0 elements + + It is the application responsibility to set correct replicas for sharded tensors. + + Args: + replica_id (Union[int, Tuple[int, ...]]): replica id + + Returns: + (bool): True for a "main" replica + """ if isinstance(replica_id, int): return replica_id == 0 return all(r == 0 for r in replica_id) @@ -239,18 +258,35 @@ class ShardedTensorFactory: Builder creates a sub-state-dict out of a tensor before saving, and merger merges the corresponding state dict after loading. + + Args: + key (str): unique identifier of the factory + data (torch.Tensor): original model parameter that will be further transformed by this factory + build_fn (callable): function that transforms the original tensor to a sharded state dict + merge_fn (callable): function that transforms loaded subtree back into a single tensor (inverse of `build_fn`) + replica_id (ReplicaId): indicates factory replication wrt. factories in different processes """ key: str data: torch.Tensor - build_fn: Callable[[str, torch.Tensor], ShardedStateDict] + build_fn: Callable[[str, torch.Tensor, ReplicaId], ShardedStateDict] merge_fn: Callable[[StateDict], torch.Tensor] + replica_id: ReplicaId = 0 def build(self): - return self.build_fn(self.key, self.data) + return self.build_fn(self.key, self.data, self.replica_id) def apply_factories(sharded_state_dict: ShardedStateDict): + """ Turn ShardedTensorFactories into ShardedTensors *in-place*. + + Args: + sharded_state_dict (ShardedStateDict): state dict possibly containing ShardedTensorFactory objects + + Returns: + None: state dict is modified in place + """ + def apply(x): if isinstance(x, ShardedTensorFactory): x = x.build() @@ -259,7 +295,20 @@ def apply(x): dict_list_map_inplace(apply, sharded_state_dict) -def apply_factory_merges(x1: StateDict, x2: ShardedStateDict, key: Tuple[str, ...] = ()): +def apply_factory_merges( + x1: StateDict, x2: ShardedStateDict, key: Tuple[str, ...] = () +) -> StateDict: + """ Apply merges defined by ShardedTensorFactories *in-place*. + + Args: + x1 (StateDict): state dict loaded from the checkpoint + x2 (ShardedStateDict): subset of `x1` (in terms of dict keys) with ShardedTensorFactory + as (possibly nested) values that define how to merge objects from the `x1` state dict + key (Tuple[str, ...]): current key in a recursive call. Used only for reporting meaningful errors + + Returns: + StateDict: `x1` modified in-place + """ if isinstance(x2, ShardedTensorFactory): return x2.merge_fn(x1) diff --git a/megatron/core/dist_checkpointing/optimizer.py b/megatron/core/dist_checkpointing/optimizer.py index d1c698787c..bec174209e 100644 --- a/megatron/core/dist_checkpointing/optimizer.py +++ b/megatron/core/dist_checkpointing/optimizer.py @@ -1,6 +1,6 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. -""" Optimizer related helpers. """ +""" Helpers for defining sharding for optimizer states based on existing sharding for model parameters. """ import logging from copy import deepcopy @@ -20,7 +20,7 @@ ShardedTensorFactory, StateDict, ) -from .utils import extract_sharded_tensors, extract_sharded_tensors_and_factories +from .utils import extract_sharded_tensors_and_factories def get_optim_param_to_id_map(optim_params_iter: Iterable[torch.nn.Parameter]) -> Dict[int, int]: @@ -34,6 +34,17 @@ def get_optim_param_to_id_map(optim_params_iter: Iterable[torch.nn.Parameter]) - def get_param_id_to_sharded_param_map( model_sharded_state_dict: ShardedStateDict, optim_params_iter: Iterable[torch.nn.Parameter] ) -> Dict[int, Union[ShardedTensor, ShardedTensorFactory]]: + """ Generate mapping from optimizer state ids to model sharded parameters. + + Args: + model_sharded_state_dict: sharded state dict with all model sharded tensors (can have any structure) + optim_params_iter: iterable which iterates over model parameters tracked by the optimizer. + The iteration must be in the same order as in the optimizer parameters. + + Returns: + Dict[int, Union[ShardedTensor, ShardedTensorFactory]]: mapping from optimizer state ids + to model sharded parameters. + """ model_sharded_state_dict, _ = extract_sharded_tensors_and_factories(model_sharded_state_dict) id_to_sharded_param_map = {} param_to_id_map = get_optim_param_to_id_map(optim_params_iter) @@ -55,6 +66,16 @@ def get_param_id_to_sharded_param_map( def make_sharded_optimizer_tensor( model_param: Union[ShardedTensor, ShardedTensorFactory], optim_param: torch.Tensor, prefix: str ) -> Union[ShardedTensor, ShardedTensorFactory]: + """ Build a ShardedTensor or ShardedTensorFactory for optimizer param based on model param + + Args: + model_param (Union[ShardedTensor, ShardedTensorFactory]): model param + optim_param (torch.Tensor): corresponding optimizer param + prefix (str): optimizer prefix for the ShardedTensor or ShardedTensorFactory + + Returns: + Union[ShardedTensor, ShardedTensorFactory]: wrapped optimizer parameter + """ if isinstance(model_param, ShardedTensorFactory): return replace(model_param, key=f'{prefix}.{model_param.key}', data=optim_param) @@ -71,6 +92,22 @@ def optim_state_to_sharding_state( id_to_sharded_param_map: Dict[int, ShardedTensor], exclude_keys: Tuple[str] = (), ): + """ Turn optimizer state dict to sharded state dict based on model state dict *in-place*. + + Can be used to add sharding information to most common optimizer state dict. + Creates separate ShardedTensors for each key in `optim_state_dict['state']` + (e.g. for torch.optim.Adam there will be separate tensors for `exp_avg` and `exp_avg_sq`) + + Args: + optim_state_dict (StateDict): optimizer state dict with + state parameters under `state` key and group hyperparameters under `param_groups` -> `params` key. + id_to_sharded_param_map (Dict[int, ShardedTensor]): mapping from optimizer param ids to model sharded tensors. + Can be generated with `get_param_id_to_sharded_param_map` function + exclude_keys (Tuple[str]): optimizer state keys to exclude from the final state dict. + + Returns: + None: state dict is modified in place + """ sharded_state = {} for param_id, param_state in optim_state_dict['state'].items(): sharded_state[param_id] = {} diff --git a/megatron/core/dist_checkpointing/serialization.py b/megatron/core/dist_checkpointing/serialization.py index dfc710a559..96eb54b977 100644 --- a/megatron/core/dist_checkpointing/serialization.py +++ b/megatron/core/dist_checkpointing/serialization.py @@ -1,5 +1,12 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +""" Entrypoints for saving and loading the distributed checkpoints. + +Functions `load` and `save` are equivalents of `torch.load` and `torch.save` +but expect torch.Tensors to be wrapped with classes from the `mapping module`. +Additionally, `load` expects the sharded state dict argument as a guidance for loading the sharded tensors. +""" + import logging import os from collections import Counter, defaultdict @@ -131,7 +138,15 @@ def _verify_checkpoint_and_load_strategy( # TODO: implement it as common torch strategy -def load_common_state_dict(checkpoint_dir: Path): +def load_common_state_dict(checkpoint_dir: Path) -> StateDict: + """ Load common (non-sharded) objects state dict from the checkpoint. + + Args: + checkpoint_dir (Path): checkpoint directory + + Returns: + StateDict: state dict with non-sharded objects from the checkpoint + """ load_path = Path(checkpoint_dir) / COMMON_STATE_FNAME try: return torch.load(load_path, map_location='cpu') @@ -143,6 +158,15 @@ def load_common_state_dict(checkpoint_dir: Path): def load_sharded_objects(sharded_state_dict: ShardedStateDict, checkpoint_dir: Path): + """ Replaces all ShardedObject from a given state dict with values loaded from the checkpoint. + + Args: + sharded_state_dict (ShardedStateDict): sharded state dict defining what objects should be loaded. + checkpoint_dir (Path): checkpoint directory + + Returns: + None: state dict is modified in place + """ sharded_objects, sharded_state_dict = extract_matching_values( sharded_state_dict, lambda v: isinstance(v, ShardedObject) ) @@ -292,6 +316,22 @@ def _extract_and_save_sharded_objects( def validate_sharding_integrity(sharded_tensors: Iterable[ShardedTensor]): + """ Validate if the ShardedTensors from multiple processes define correct sharding of a global tensor. + + Local ShardedTensors metadata is exchanged with `torch.distributed.all_gather_object` + and then process with global rank 0 checks if main replicas of the shards: + - cover the whole global tensors + - don't overlap + + Args: + sharded_tensors (Iterable[ShardedTensor]): sharded tensors local to this process + + Returns: + None + + Raises: + CheckpointingException for invalid access pattern + """ sharding = [ten.without_data() for ten in sharded_tensors] all_sharding = [None] * torch.distributed.get_world_size() torch.distributed.all_gather_object(all_sharding, sharding) diff --git a/megatron/core/dist_checkpointing/strategies/base.py b/megatron/core/dist_checkpointing/strategies/base.py index 3989ea74a2..3af945900f 100644 --- a/megatron/core/dist_checkpointing/strategies/base.py +++ b/megatron/core/dist_checkpointing/strategies/base.py @@ -1,5 +1,7 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +""" Strategies base interfaces. """ + from abc import ABC, abstractmethod from collections import defaultdict from enum import Enum @@ -20,6 +22,7 @@ class StrategyAction(Enum): def get_default_strategy(action: StrategyAction, backend: str, version: int): + """ Retrieves a default strategy for a given action, backend and version. """ try: return default_strategies[action.value][(backend, version)] except KeyError as e: @@ -36,6 +39,8 @@ def get_default_strategy(action: StrategyAction, backend: str, version: int): class LoadStrategyBase(ABC): + """ Base class for a load strategy. Requires implementing checks for compatibility with a given checkpoint version. """ + @abstractmethod def check_backend_compatibility(self, loaded_version): raise NotImplementedError @@ -46,18 +51,24 @@ def check_version_compatibility(self, loaded_version): class SaveStrategyBase(ABC): + """ Base class for a save strategy. Requires defining a backend type and version of the saved format. """ + def __init__(self, backend: str, version: int): self.backend = backend self.version = version class LoadCommonStrategy(LoadStrategyBase): + """ Load strategy for common (non-sharded) objects """ + @abstractmethod def load(self, checkpoint_dir: Path): raise NotImplementedError class LoadShardedStrategy(LoadStrategyBase): + """ Load strategy for sharded tensors """ + @abstractmethod def load(self, sharded_state_dict: ShardedStateDict, checkpoint_dir: Path): raise NotImplementedError @@ -79,12 +90,16 @@ def load_tensors_metadata(self, checkpoint_dir: Path): class SaveCommonStrategy(SaveStrategyBase): + """ Save strategy for common (non-sharded) objects """ + @abstractmethod def save(self, common_state_dict: StateDict, checkpoint_dir: Path): raise NotImplementedError class SaveShardedStrategy(SaveStrategyBase): + """ Save strategy for sharded tensors """ + @abstractmethod def save(self, sharded_tensors: List[ShardedTensor], checkpoint_dir: Path): raise NotImplementedError diff --git a/megatron/core/dist_checkpointing/utils.py b/megatron/core/dist_checkpointing/utils.py index f7976f0074..ad22fe77b9 100644 --- a/megatron/core/dist_checkpointing/utils.py +++ b/megatron/core/dist_checkpointing/utils.py @@ -1,10 +1,13 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. -from typing import Tuple +""" Helpers for manipulating sharded tensors and sharded state dicts. """ + +from typing import Dict, Tuple from .dict_utils import dict_list_map_inplace, extract_matching_values from .mapping import ( LocalNonpersitentObject, + ShardedObject, ShardedStateDict, ShardedTensor, ShardedTensorFactory, @@ -15,12 +18,32 @@ def extract_sharded_tensors( sharded_state_dict: ShardedStateDict, ) -> Tuple[ShardedStateDict, StateDict]: + """ Extract a dict consisting of only ShardedTensor objects from a given state dict with any objects. + + Args: + sharded_state_dict: state dict possibly containing ShardedTensor objects + + Returns: + Tuple[ShardedStateDict, StateDict]: tuple of: + - state dict with all ShardedTensor (keeping the original state dict structure) + - state dict with all objects other than ShardedTensor (keeping the original state dict structure) + """ return extract_matching_values(sharded_state_dict, lambda v: isinstance(v, ShardedTensor)) def extract_sharded_tensors_and_factories( sharded_state_dict: ShardedStateDict, ) -> Tuple[ShardedStateDict, StateDict]: + """ Extract a dict consisting of only ShardedTensor and ShardedTensorFactory objects from a given state dict with any objects. + + Args: + sharded_state_dict: state dict possibly containing ShardedTensor and ShardedTensorFactory objects + + Returns: + Tuple[ShardedStateDict, StateDict]: tuple of: + - state dict with all ShardedTensor and ShardedTensorFactory (keeping the original state dict structure) + - state dict with all other objects (keeping the original state dict structure) + """ return extract_matching_values( sharded_state_dict, lambda v: isinstance(v, (ShardedTensor, ShardedTensorFactory)) ) @@ -29,6 +52,17 @@ def extract_sharded_tensors_and_factories( def extract_sharded_tensors_or_nonpersistent( sharded_state_dict: ShardedStateDict, ) -> Tuple[ShardedStateDict, StateDict]: + """ Extract a dict consisting of only ShardedTensor, ShardedTensorFactory and LocalNonpersitentObject + objects from a given state dict with any objects. + + Args: + sharded_state_dict: state dict possibly containing ShardedTensor, ShardedTensorFactory and LocalNonpersitentObject objects + + Returns: + Tuple[ShardedStateDict, StateDict]: tuple of: + - state dict with all ShardedTensor, ShardedTensorFactory and LocalNonpersitentObject (keeping the original state dict structure) + - state dict with all other objects (keeping the original state dict structure) + """ return extract_matching_values( sharded_state_dict, lambda v: isinstance(v, (ShardedTensor, LocalNonpersitentObject, ShardedTensorFactory)), @@ -36,9 +70,70 @@ def extract_sharded_tensors_or_nonpersistent( def add_prefix_for_sharding(sharded_state_dict: ShardedStateDict, prefix: str): + """ Prepend a given prefix to all ShardedTensor objects in a given state dict *in-place*. + + Args: + sharded_state_dict (ShardedStateDict): sharded state dict + prefix (str): prefix to be prepended + + Returns: + None: state dict is modified in-place + """ + def add_prefix(t): if isinstance(t, ShardedTensor): t.key = f'{prefix}.{t.key}' return t dict_list_map_inplace(add_prefix, sharded_state_dict) + + +def replace_prefix_for_sharding( + sharded_state_dict: ShardedStateDict, old_prefix: str, new_prefix: str +): + """ Replaces the given prefix in *all* sharded keys in a given state dict. + + Errors out if some key does not begin with a given prefix. + + Args: + sharded_state_dict (ShardedStateDict): sharded state dict to replace keys in + old_prefix (str): prefix to be replaced in each key + new_prefix (str): new prefix + + Returns: + None: state dict is modified in place + """ + + def _replace_prefix(x): + if isinstance(x, (ShardedTensor, ShardedTensorFactory, ShardedObject)): + if not x.key.startswith(old_prefix): + raise ValueError(f'Expected {x.key} to begin with prefix {old_prefix}') + x.key = f'{new_prefix}{x.key[len(old_prefix):]}' # str.removeprefix in Python >= 3.9 + return x + + dict_list_map_inplace(_replace_prefix, sharded_state_dict) + + +def apply_prefix_mapping(sharded_state_dict: ShardedStateDict, prefix_map: Dict[str, str]): + """ Replaces prefixes *only in keys matching* with one of prefixes in the map. + + Args: + sharded_state_dict (ShardedStateDict): sharded state dict to replace keys in + prefix_map (Dict[str, str]): map of old->new prefixes. The first matching prefix for each key is used + + Returns: + None: state dict is modified in place + """ + + def _replace_prefixes(x): + if not isinstance(x, (ShardedTensor, ShardedTensorFactory, ShardedObject)): + return x + for old_prefix, new_prefix in prefix_map.items(): + if x.key.startswith(old_prefix): + x.key = ( + f'{new_prefix}{x.key[len(old_prefix):]}' # str.removeprefix in Python >= 3.9 + ) + break + return x + + dict_list_map_inplace(_replace_prefixes, sharded_state_dict) diff --git a/megatron/core/distributed/__init__.py b/megatron/core/distributed/__init__.py index 34c7209a27..328c3101eb 100644 --- a/megatron/core/distributed/__init__.py +++ b/megatron/core/distributed/__init__.py @@ -1,2 +1,5 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + from .distributed_data_parallel import DistributedDataParallel from .finalize_model_grads import finalize_model_grads +from .grad_buffer import shard_buffer diff --git a/megatron/core/distributed/distributed_data_parallel.py b/megatron/core/distributed/distributed_data_parallel.py index e09564b396..d8cc637236 100644 --- a/megatron/core/distributed/distributed_data_parallel.py +++ b/megatron/core/distributed/distributed_data_parallel.py @@ -1,7 +1,7 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. from contextlib import contextmanager -from typing import Dict +from typing import Dict, Optional import torch @@ -33,6 +33,7 @@ class DistributedDataParallel(MegatronModule): disable_bucketing: If true, force assign all parameters to a single bucket. If false, use standard bucketing policy: assign parameters to smaller buckets and all-reduce per bucket _if_ overlap_grad_reduce is True and pp_rank is 0. + check_for_nan_in_grad: If true, check if local grad norm is NaN. """ @@ -44,7 +45,9 @@ def __init__( accumulate_allreduce_grads_in_fp32: bool, overlap_grad_reduce: bool, use_distributed_optimizer: bool, + expert_data_parallel_group: Optional[torch.distributed.ProcessGroup] = None, disable_bucketing: bool = False, + check_for_nan_in_grad: bool = False, bucket_size: int = 40000000, ): super().__init__(config=config) @@ -65,56 +68,81 @@ def __init__( bucket_size = None if disable_bucketing: bucket_size = None + + self.check_for_nan_in_grad = check_for_nan_in_grad self.bucket_size = bucket_size self.module = module - self.grad_buffers = {} - self.expert_grads = [] - self.grad_buffer_param_index_map = {} self.param_to_grad_buffer = {} # Group parameters by their gradient type. - grad_dtype_to_params = {} param_to_name = {} + dense_params = [] + expert_parallel_params = [] for name, param in self.module.named_parameters(): - if param.requires_grad and getattr(param, 'allreduce', True): - param.grad_added_to_main_grad = False - param_to_name[param] = name + if not param.requires_grad: + continue + + param.grad_added_to_main_grad = False + param_to_name[param] = name + + if getattr(param, 'allreduce', True): + dense_params.append(param) + else: + expert_parallel_params.append(param) + + def allocate_grad_buffers_for_parameters( + input_params, data_parallel_group, gradient_scaling_factor=1.0, + ): + grad_dtype_to_params = {} + + # Group parameters by their gradient type. + for param in input_params: + if not param.requires_grad: + continue + dtype = torch.float if accumulate_allreduce_grads_in_fp32 else param.dtype params = grad_dtype_to_params.get(dtype, []) params.append(param) grad_dtype_to_params[dtype] = params - # Allocate the grad buffers and map the grads. - # The grad buffer under the hood creates buckets as appropriate based on bucket_size. - self.data_parallel_world_size = torch.distributed.get_world_size(group=data_parallel_group) - for dtype, params in grad_dtype_to_params.items(): - self.grad_buffers[dtype] = GradBuffer( - dtype, - params, - data_parallel_group, - bucket_size, - param_to_name, - self.overlap_grad_reduce, - self.use_distributed_optimizer, - ) - self.grad_buffer_param_index_map[dtype] = self.grad_buffers[dtype].param_index_map - for param in params: - self.param_to_grad_buffer[param] = self.grad_buffers[dtype] - - # Allocate separate buffer for MoE params' grads. - for param in self.module.parameters(): - if param.requires_grad and not getattr(param, 'allreduce', True): - param.grad_added_to_main_grad = False - dtype = torch.float if accumulate_allreduce_grads_in_fp32 else param.dtype - param.main_grad = torch.zeros( - param.data.shape, - dtype=dtype, - device=torch.cuda.current_device(), - requires_grad=False, + # Allocate the grad buffers and map the grads. + grad_buffers = [] + for dtype, params in grad_dtype_to_params.items(): + grad_buffers.append( + GradBuffer( + dtype, + params, + data_parallel_group, + bucket_size, + param_to_name, + self.overlap_grad_reduce, + self.use_distributed_optimizer, + gradient_scaling_factor, + self.check_for_nan_in_grad, + ) ) - self.expert_grads.append(param.main_grad) + for param in params: + self.param_to_grad_buffer[param] = grad_buffers[-1] + + return grad_buffers + + data_parallel_world_size = torch.distributed.get_world_size(data_parallel_group) + + # Allocate the grad buffers for dense params' grads. + self.grad_buffers = allocate_grad_buffers_for_parameters( + dense_params, + data_parallel_group, + gradient_scaling_factor=1.0 / data_parallel_world_size, + ) + + # Allocate separate grad buffers for expert parallel params' grads. + self.expert_parallel_grad_buffers = allocate_grad_buffers_for_parameters( + expert_parallel_params, + expert_data_parallel_group, + gradient_scaling_factor=1.0 / data_parallel_world_size, + ) # Register backward hook. # Accumulation function for the gradients need to be stored so they @@ -153,6 +181,7 @@ def param_hook(*unused): ): param.main_grad.add_(param.grad.data) param.grad = None + if self.overlap_grad_reduce: param_to_grad_buffer[param].register_grad_ready(param) @@ -163,12 +192,12 @@ def no_sync(self): """ Context manager that turns off gradient synchronization. """ - for grad_buffer in self.grad_buffers.values(): + for grad_buffer in self.grad_buffers + self.expert_parallel_grad_buffers: grad_buffer.is_last_microbatch = False try: yield finally: - for grad_buffer in self.grad_buffers.values(): + for grad_buffer in self.grad_buffers + self.expert_parallel_grad_buffers: grad_buffer.is_last_microbatch = True def start_grad_sync(self, *unused): @@ -180,7 +209,7 @@ def start_grad_sync(self, *unused): calls. When overlap_grad_reduce is set to False, calls synchronous communication ops. """ - for grad_buffer in self.grad_buffers.values(): + for grad_buffer in self.grad_buffers + self.expert_parallel_grad_buffers: grad_buffer.start_grad_sync() def finish_grad_sync(self): @@ -192,12 +221,9 @@ def finish_grad_sync(self): calls to complete. When overlap_grad_reduce is set to False, calls synchronous communication ops. """ - for grad_buffer in self.grad_buffers.values(): + for grad_buffer in self.grad_buffers + self.expert_parallel_grad_buffers: grad_buffer.finish_grad_sync() - for expert_grad in self.expert_grads: - expert_grad /= self.data_parallel_world_size - def zero_grad_buffer(self, zero_buffer): """ Zeros out all grad buffers. Needs to be called at the beginning of each @@ -208,21 +234,28 @@ def zero_grad_buffer(self, zero_buffer): for param in self.module.parameters(): if param.requires_grad: param.grad_added_to_main_grad = False - for grad_buffer in self.grad_buffers.values(): + for grad_buffer in self.grad_buffers + self.expert_parallel_grad_buffers: grad_buffer.reset(zero_buffer) - for expert_grad in self.expert_grads: - expert_grad.zero_() def broadcast_params(self): """ Syncs parameters across all DP ranks. """ for param in self.module.parameters(): - torch.distributed.broadcast( - param.data, - src=parallel_state.get_data_parallel_src_rank(with_context_parallel=True), - group=parallel_state.get_data_parallel_group(with_context_parallel=True), - ) + is_expert_parallel = not getattr(param, 'allreduce', True) + + if is_expert_parallel: + torch.distributed.broadcast( + param.data, + src=torch.distributed.get_process_group_ranks(self.expert_data_parallel_group), + group=self.expert_data_parallel_group, + ) + else: + torch.distributed.broadcast( + param.data, + src=torch.distributed.get_process_group_ranks(self.data_parallel_group), + group=self.data_parallel_group, + ) def state_dict(self, prefix='', keep_vars=False): """ diff --git a/megatron/core/distributed/finalize_model_grads.py b/megatron/core/distributed/finalize_model_grads.py index 916e4f3ecb..f6387b85c4 100644 --- a/megatron/core/distributed/finalize_model_grads.py +++ b/megatron/core/distributed/finalize_model_grads.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. from typing import List @@ -89,35 +89,10 @@ def _allreduce_layernorm_grads(model: List[torch.nn.Module], config: Transformer buf.copy_(synced) -def _allreduce_expert_grads(model: List[torch.nn.Module], config: TransformerConfig): - """ - All-reduce expert grads (for expert parallelism). - """ - - # All-reduce switchmlp parameters across data modulo expert parallel nodes - if ( - config.expert_model_parallel_size > 1 - and config.expert_model_parallel_size < parallel_state.get_data_parallel_world_size() - ): - grads = [] - for model_chunk in model: - for param in get_attr_wrapped_model(model_chunk, 'parameters')(): - if not getattr(param, 'allreduce', True): - grad = param.main_grad - grads.append(grad.data) - coalesced = _flatten_dense_tensors(grads) - torch.distributed.all_reduce( - coalesced, group=parallel_state.get_data_modulo_expert_parallel_group() - ) - for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)): - buf.copy_(synced) - - def finalize_model_grads(model: List[torch.nn.Module]): """ All-reduce all model grads across DP replicas, layernorm grads for sequence parallelism, - embedding grads across first and last pipeline stages (if not tied), and expert grads - for expert parallelism. + embedding grads across first and last pipeline stages (if not tied). """ config = get_model_config(model[0]) @@ -147,12 +122,3 @@ def finalize_model_grads(model: List[torch.nn.Module]): _allreduce_embedding_grads(model, config) if config.timers is not None: config.timers('embedding-grads-all-reduce').stop() - - # All-reduce expert grads (for expert parallelism). - if config.timers is not None: - config.timers('expert-grads-all-reduce', log_level=1).start( - barrier=config.barrier_with_L1_time - ) - _allreduce_expert_grads(model, config) - if config.timers is not None: - config.timers('expert-grads-all-reduce').stop() diff --git a/megatron/core/distributed/grad_buffer.py b/megatron/core/distributed/grad_buffer.py index 8bc88a8e71..9b4202596b 100644 --- a/megatron/core/distributed/grad_buffer.py +++ b/megatron/core/distributed/grad_buffer.py @@ -1,6 +1,7 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. import math +import os from logging import getLogger from typing import Dict, List @@ -33,6 +34,7 @@ class Bucket: params: List of parameters whose gradients are collated in this bucket. data: View in larger GradBuffer that this bucket is responsible for. offset: Offset of this bucket's view in the larger GradBuffer. + numel_unpadded: Number of unpadded elements in bucket. data_parallel_group: Data-parallel process group. data_parallel_world_size: World size using the data-parallel group group. overlap_grad_reduce: If true, overlap communication with backprop computation by @@ -40,6 +42,10 @@ class Bucket: is used instead. use_distributed_optimizer: If true, issue reduce-scatter communication calls as part of distributed optimizer. If false, issue all-reduce communication calls. + gradient_scaling_factor: This factor is utilized to scale gradients prior to their + communication. Its application is twofold: it facilitates the averaging of gradients + and the scaling of gradients in the context of the Mixture of Experts (MoE) model. + check_for_nan_in_grad: If true, check if local grad norm is NaN. """ def __init__( @@ -47,10 +53,13 @@ def __init__( params: List[torch.nn.Parameter], data: torch.Tensor, offset: int, + numel_unpadded: int, data_parallel_group: torch.distributed.ProcessGroup, data_parallel_world_size: int, overlap_grad_reduce: bool, use_distributed_optimizer: bool, + gradient_scaling_factor: float, + check_for_nan_in_grad: bool, ): # State for bookkeeping: params is the set of parameters this bucket is # responsible for, params_with_grad is the set of parameters with grads @@ -63,11 +72,14 @@ def __init__( # The distributed optimizer needs to keep track of this bucket's offset # within the full grad_buffer. self.offset = offset + self.numel_unpadded = numel_unpadded self.data_parallel_group = data_parallel_group self.data_parallel_world_size = data_parallel_world_size self.data_parallel_rank = torch.distributed.get_rank(group=data_parallel_group) self.overlap_grad_reduce = overlap_grad_reduce self.use_distributed_optimizer = use_distributed_optimizer + self.gradient_scaling_factor = gradient_scaling_factor + self.check_for_nan_in_grad = check_for_nan_in_grad self.reset() @@ -92,7 +104,18 @@ def start_grad_sync(self): self.communication_handle is None and not self.communication_issued ), 'Should not have multiple communication calls in flight at once' - self.data /= self.data_parallel_world_size + # Make sure norm of grads in bucket are not NaN + # prior to data-parallel all-reduce / reduce-scatter. + if self.check_for_nan_in_grad: + global_rank = torch.distributed.get_rank() + norm = self.data.norm(p=2) + assert not norm.isnan(), ( + f'Rank {global_rank}: found NaN in local grad norm in ' + f'backward pass before data-parallel communication collective. ' + f'Device: {torch.cuda.current_device()}, node: {os.uname()[1]}' + ) + + self.data *= self.gradient_scaling_factor # Use async_op only when overlap_grad_reduce is True. if self.use_distributed_optimizer: local_data_view = shard_buffer(self.data, self.data_parallel_world_size)[ @@ -162,6 +185,10 @@ class GradBuffer: is used instead. use_distributed_optimizer: If true, issue reduce-scatter communication calls as part of distributed optimizer. If false, issue all-reduce communication calls. + gradient_scaling_factor: This factor is utilized to scale gradients prior to their + communication. Its application is twofold: it facilitates the averaging of gradients + and the scaling of gradients in the context of the Mixture of Experts (MoE) model. + check_for_nan_in_grad: If true, check if local grad norm is NaN. """ def __init__( @@ -173,6 +200,8 @@ def __init__( param_to_name: Dict[torch.nn.Parameter, str], overlap_grad_reduce: bool, use_distributed_optimizer: bool, + gradient_scaling_factor: float, + check_for_nan_in_grad: bool, ): # Check that params are unique. @@ -190,6 +219,8 @@ def __init__( ) self.overlap_grad_reduce = overlap_grad_reduce self.use_distributed_optimizer = use_distributed_optimizer + self.gradient_scaling_factor = gradient_scaling_factor + self.check_for_nan_in_grad = check_for_nan_in_grad self.is_last_microbatch = True # Data structures to store underlying buckets and relevant indexing data. @@ -197,8 +228,10 @@ def __init__( self.param_to_bucket = {} # Param -> bucket mapping. self.param_index_map = {} # Param -> location in buffer mapping (used in dist. optimizer). - def _pad_if_needed(data_index: int): - """Pads data indices if using distributed optimizer (to ensure uniform sharding).""" + def _pad_if_needed(data_index: int) -> int: + """ + Pads data indices if using distributed optimizer (to ensure uniform sharding). + """ if use_distributed_optimizer: return ( int(math.ceil(data_index / self.data_parallel_world_size)) @@ -213,7 +246,26 @@ def _pad_if_needed(data_index: int): bucket_data_start_index = data_start_index bucket_params = set() self.bucket_indices = [] + per_bucket_numel_unpadded = [] bucket_id = 0 + + def _create_new_bucket(data_end_index: int) -> int: + """ + Create the bucket_id'th bucket with collected bucket_params, starting at + bucket_data_start_index. + """ + nonlocal bucket_data_start_index, bucket_params, bucket_id + per_bucket_numel_unpadded.append(data_end_index - bucket_data_start_index) + data_end_index = _pad_if_needed(data_end_index) + # Update bucket metadata. + self.bucket_indices.append((bucket_data_start_index, data_end_index)) + bucket_data_start_index = data_end_index + # Re-set bucket_params and increment bucket_id for next bucket. + bucket_params = set() + bucket_id += 1 + # Return the potentially padded data_end_index. + return data_end_index + for param in params[::-1]: # Iterate through parameters in reverse order to roughly follow backprop order, # and skip parameters that don't require gradients. @@ -221,6 +273,26 @@ def _pad_if_needed(data_index: int): continue this_numel = param.data.nelement() data_end_index = data_start_index + this_numel + + def _does_param_require_new_bucket(param): + """ + Split shared embedding parameters into separate bucket if using distributed + optimizer that makes use of reduce-scatters instead of all-reduces. + This ensures that the first and last pipeline stage partition optimizer state + for the shared embedding parameters the same way across DP replicas, allowing + the DP reduce-scatter to be before the embedding all-reduce. + """ + return getattr(param, "shared_embedding", False) and self.use_distributed_optimizer + + # Create bucket with already collected parameters if current param needs its own bucket. + if _does_param_require_new_bucket(param) and len(bucket_params) > 0: + # We are creating a bucket for the already accumulated parameters, whose params + # end at the current data_start_index. + if use_distributed_optimizer: + # data_start_index should already be padded. + assert data_start_index % self.data_parallel_world_size == 0 + _create_new_bucket(data_start_index) + self.param_index_map[param] = ( data_start_index, data_end_index, @@ -228,31 +300,18 @@ def _pad_if_needed(data_index: int): ) bucket_params.add(param) - # If we have enough elements already, form a new bucket. - # If bucket_size is None, accumulate everything into a single bucket. - - # TODO: Remove len(bucket_params) > 1 when the final head that transforms token - # representations from hidden space to vocabulary space is in a PyTorch module - # whose forward method is called. If it is not and a bucket contains only this - # one parameter, we get incorrect behavior (i.e., higher losses) since we do not - # call the wait function on the bucket's all_gather_handle (we use forward pre- - # hooks on PyTorch modules to do this when --overlap-param-gather is used). - # As a temporary workaround, we make sure that no bucket has only one parameter. - if bucket_size is not None: - if (data_end_index - bucket_data_start_index) >= bucket_size and len( - bucket_params - ) > 1: - data_end_index = _pad_if_needed(data_end_index) - self.bucket_indices.append((bucket_data_start_index, data_end_index)) - bucket_data_start_index = data_end_index - bucket_params = set() - bucket_id += 1 + # If we have enough elements already or the current param is part of the shared embedding + # layer and needs a separate bucket, form a new bucket. + if ( + bucket_size is not None + and (data_end_index - bucket_data_start_index) >= bucket_size + ) or _does_param_require_new_bucket(param): + data_end_index = _create_new_bucket(data_end_index) data_start_index = data_end_index # Add remaining params to a new bucket. if len(bucket_params) > 0: - data_end_index = _pad_if_needed(data_end_index) - self.bucket_indices.append((bucket_data_start_index, data_end_index)) + data_end_index = _create_new_bucket(data_end_index) # Next, create underlying storage for buffer (with numel elements that includes # padding as necessary). @@ -275,7 +334,11 @@ def _pad_if_needed(data_index: int): if bucket_id != cur_bucket_id: bucket_data_end_index = _pad_if_needed(data_start_index) self._set_bucket( - bucket_params, bucket_data_start_index, bucket_data_end_index, cur_bucket_id + bucket_params=bucket_params, + start_index=bucket_data_start_index, + end_index=bucket_data_end_index, + numel_unpadded=per_bucket_numel_unpadded[cur_bucket_id], + bucket_id=cur_bucket_id, ) bucket_data_start_index = bucket_data_end_index bucket_params = set() @@ -288,14 +351,13 @@ def _pad_if_needed(data_index: int): if len(bucket_params) > 0: bucket_data_end_index = _pad_if_needed(data_end_index) self._set_bucket( - bucket_params, bucket_data_start_index, bucket_data_end_index, cur_bucket_id + bucket_params=bucket_params, + start_index=bucket_data_start_index, + end_index=bucket_data_end_index, + numel_unpadded=per_bucket_numel_unpadded[cur_bucket_id], + bucket_id=cur_bucket_id, ) - if not overlap_grad_reduce: - assert len(bucket_params) == len( - params - ), 'All params should be in one bucket when overlap_grad_reduce is False' - # Log buckets for all PP stages. if ( parallel_state.get_data_parallel_rank(with_context_parallel=True) == 0 @@ -328,6 +390,7 @@ def _set_bucket( bucket_params: List[torch.nn.Parameter], start_index: int, end_index: int, + numel_unpadded: int, bucket_id: int, ): """ @@ -348,10 +411,13 @@ def _set_bucket( params=bucket_params, data=bucket_data, offset=start_index, + numel_unpadded=numel_unpadded, data_parallel_group=self.data_parallel_group, data_parallel_world_size=self.data_parallel_world_size, overlap_grad_reduce=self.overlap_grad_reduce, use_distributed_optimizer=self.use_distributed_optimizer, + gradient_scaling_factor=self.gradient_scaling_factor, + check_for_nan_in_grad=self.check_for_nan_in_grad, ) self.buckets.append(bucket) for bucket_param in bucket_params: diff --git a/megatron/core/fusions/fused_bias_dropout.py b/megatron/core/fusions/fused_bias_dropout.py index 14c1fe0d71..08af02b099 100644 --- a/megatron/core/fusions/fused_bias_dropout.py +++ b/megatron/core/fusions/fused_bias_dropout.py @@ -3,6 +3,8 @@ import torch +from megatron.core.jit import jit_fuser + def _bias_dropout_add_func(x_with_bias, residual, prob, training): # type: (Tuple[Tensor, Optional[Tensor]], Tensor, float, bool) -> Tensor @@ -43,14 +45,14 @@ def _bias_dropout_add(x_with_bias, residual, prob): return _bias_dropout_add -@torch.jit.script +@jit_fuser def bias_dropout_add_fused_train( x_with_bias: Tuple[torch.Tensor, Optional[torch.Tensor]], residual: torch.Tensor, prob: float, ) -> torch.Tensor: return _bias_dropout_add_func(x_with_bias, residual, prob, True) -@torch.jit.script +@jit_fuser def bias_dropout_add_fused_inference( x_with_bias: Tuple[torch.Tensor, Optional[torch.Tensor]], residual: torch.Tensor, prob: float, ) -> torch.Tensor: diff --git a/megatron/core/fusions/fused_bias_gelu.py b/megatron/core/fusions/fused_bias_gelu.py index 9c791c1807..2b5467467c 100644 --- a/megatron/core/fusions/fused_bias_gelu.py +++ b/megatron/core/fusions/fused_bias_gelu.py @@ -2,6 +2,8 @@ import torch +from megatron.core.jit import jit_fuser + ###### BIAS GELU FUSION/ NO AUTOGRAD ################ # 1/sqrt(2*pi)-> 0.3989423 # 1/sqrt(2) -> 0.70710678 @@ -11,7 +13,7 @@ # x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) -@torch.jit.script +@jit_fuser def bias_gelu(bias, y): x = bias + y return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) @@ -20,7 +22,7 @@ def bias_gelu(bias, y): # gradient of tanh approximation of gelu # gradient of actual gelu is: # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) -@torch.jit.script +@jit_fuser def bias_gelu_back(g, bias, y): x = bias + y tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) diff --git a/megatron/core/fusions/fused_bias_swiglu.py b/megatron/core/fusions/fused_bias_swiglu.py new file mode 100644 index 0000000000..710a5e1ff7 --- /dev/null +++ b/megatron/core/fusions/fused_bias_swiglu.py @@ -0,0 +1,81 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import torch +import torch.nn.functional as F + +from megatron.core.jit import jit_fuser + +###### BIAS SWIGLU FUSION/ NO AUTOGRAD ################ + + +@jit_fuser +def swiglu(y): + y_1, y_2 = torch.chunk(y, 2, -1) + return F.silu(y_1) * y_2 + + +@jit_fuser +def bias_swiglu(y, bias): + y = y + bias + return swiglu(y) + + +# gradient of tanh approximation of gelu +# gradient of actual gelu is: +# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) +@jit_fuser +def swiglu_back(g, y): + y_1, y_2 = torch.chunk(y, 2, -1) + return torch.cat( + (g * torch.sigmoid(y_1) * (1 + y_1 * (1 - torch.sigmoid(y_1))) * y_2, g * F.silu(y_1)), -1 + ) + + +@jit_fuser +def bias_swiglu_back(g, y, bias): + y = y + bias + return swiglu_back(g, y) + + +class BiasSwiGLUFunction(torch.autograd.Function): + @staticmethod + # bias is an optional argument + def forward(ctx, input, bias): + ctx.save_for_backward(input, bias) + return bias_swiglu(input, bias) + + @staticmethod + def backward(ctx, grad_output): + input, bias = ctx.saved_tensors + tmp = bias_swiglu_back(grad_output, input, bias) + return tmp, tmp + + +class SwiGLUFunction(torch.autograd.Function): + @staticmethod + # bias is an optional argument + def forward(ctx, input): + ctx.save_for_backward(input) + return swiglu(input) + + @staticmethod + def backward(ctx, grad_output): + input = ctx.saved_tensors + tmp = swiglu_back(grad_output, input[0]) + return tmp + + +def bias_swiglu_impl(input, bias): + ori_shape = input.shape + assert len(ori_shape) in [2, 3] + input = input.view(-1, ori_shape[-1]) + if bias is not None: + output = BiasSwiGLUFunction.apply(input, bias) + else: + output = SwiGLUFunction.apply(input) + + return output if len(ori_shape) == 2 else output.view(ori_shape[0], ori_shape[1], -1) + + +# bias_swiglu_impl = BiasSwiGLUFunction.apply +# swiglu_impl = SwiGLUFunction.apply diff --git a/megatron/core/fusions/fused_layer_norm.py b/megatron/core/fusions/fused_layer_norm.py index c12ec173d0..82b4b75b0d 100644 --- a/megatron/core/fusions/fused_layer_norm.py +++ b/megatron/core/fusions/fused_layer_norm.py @@ -1,6 +1,7 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. import importlib +import inspect import numbers import torch @@ -63,10 +64,12 @@ def __init__( ): super().__init__() - self.zero_centered_gamma = config.layernorm_zero_centered_gamma + self.config = config + + self.zero_centered_gamma = self.config.layernorm_zero_centered_gamma assert ( - config.normalization == "LayerNorm" - ), f'({config.normalization}) is not supported in FusedLayerNorm' + self.config.normalization == "LayerNorm" + ), f'({self.config.normalization}) is not supported in FusedLayerNorm' # List of hiddens sizes supported in the persistent layer norm kernel # If the hidden size is not supported, fall back to the non-persistent @@ -97,7 +100,7 @@ def __init__( 49152, 65536, ] - persist_layer_norm = config.persist_layer_norm + persist_layer_norm = self.config.persist_layer_norm if hidden_size not in persist_ln_hidden_sizes or not HAVE_PERSIST_LAYER_NORM: persist_layer_norm = False @@ -113,7 +116,7 @@ def __init__( self.bias = Parameter(torch.Tensor(*hidden_size)) self.reset_parameters() self.persist_layer_norm = persist_layer_norm - self.sequence_parallel = config.sequence_parallel + self.sequence_parallel = self.config.sequence_parallel # set sequence parallelism flag on weight and bias parameters setattr(self.weight, 'sequence_parallel', self.sequence_parallel) @@ -133,7 +136,12 @@ def forward(self, input: Tensor) -> Tensor: weight = self.weight + 1 if self.zero_centered_gamma else self.weight if self.persist_layer_norm: - output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) + if 'memory_efficient' in inspect.getfullargspec(FastLayerNormFN.forward).args: + output = FastLayerNormFN.apply( + input, weight, self.bias, self.eps, self.config.memory_efficient_layer_norm + ) + else: + output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) # Apex's fast layer norm function outputs a 'view' tensor (i.e., has # a populated '_base' field). This will result in schedule.py's @@ -144,8 +152,21 @@ def forward(self, input: Tensor) -> Tensor: ) else: - output = FusedLayerNormAffineFunction.apply( - input, weight, self.bias, self.hidden_size, self.eps - ) + if ( + 'memory_efficient' + in inspect.getfullargspec(FusedLayerNormAffineFunction.forward).args + ): + return FusedLayerNormAffineFunction.apply( + input, + weight, + self.bias, + self.hidden_size, + self.eps, + self.config.memory_efficient_layer_norm, + ) + else: + return FusedLayerNormAffineFunction.apply( + input, weight, self.bias, self.hidden_size, self.eps + ) return output diff --git a/megatron/core/fusions/fused_softmax.py b/megatron/core/fusions/fused_softmax.py index 56eb2e8011..c9c0baef09 100644 --- a/megatron/core/fusions/fused_softmax.py +++ b/megatron/core/fusions/fused_softmax.py @@ -1,10 +1,12 @@ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +from typing import Optional import torch import torch.nn as nn from megatron.core.transformer.enums import AttnMaskType +from megatron.core.transformer.utils import get_default_causal_mask class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function): @@ -131,7 +133,12 @@ def __init__( assert self.scale is None or softmax_in_fp32, "softmax should be in fp32 when scaled" - def forward(self, input, mask): + def forward(self, input: torch.Tensor, mask: Optional[torch.Tensor]): + """Forward pass of softmax with masked input. + + In case attn_mask_type is causal the mask is generated and None can be passed. + A user-defined mask is only needed when attn_mask_type is not causal. + """ # [b, np, sq, sk] assert input.dim() == 4 @@ -186,6 +193,15 @@ def forward_torch_softmax(self, input, mask): if self.scale is not None: input = input * self.scale + + # Generate causal mask if not given + sq, sk = input.size(2), input.size(3) + if self.attn_mask_type == AttnMaskType.causal and mask is None and sq > 1: + # If sq == 1 then either KV cache is used or one-element context is passed + # so keeping mask=None in this case; subsequent code should handle it + assert sq == sk, "causal mask is only for self attention" + mask = get_default_causal_mask(sq) + mask_output = self.mask_func(input, mask) if mask is not None else input probs = torch.nn.Softmax(dim=-1)(mask_output) diff --git a/megatron/core/jit.py b/megatron/core/jit.py new file mode 100644 index 0000000000..8bb18d393c --- /dev/null +++ b/megatron/core/jit.py @@ -0,0 +1,11 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import torch + +TORCH_MAJOR = int(torch.__version__.split(".")[0]) +TORCH_MINOR = int(torch.__version__.split(".")[1]) + +jit_fuser = torch.jit.script +# nvFuser is deprecated in PyTorch JIT starting from 2.2 +if (TORCH_MAJOR > 2) or (TORCH_MAJOR == 2 and TORCH_MINOR >= 2): + jit_fuser = torch.compile diff --git a/megatron/core/model_parallel_config.py b/megatron/core/model_parallel_config.py index 22d34da921..edcfd3ea3c 100644 --- a/megatron/core/model_parallel_config.py +++ b/megatron/core/model_parallel_config.py @@ -1,7 +1,7 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. from dataclasses import dataclass -from typing import Callable, Optional +from typing import Callable, ContextManager, Optional import torch @@ -35,10 +35,10 @@ class ModelParallelConfig: Initialization -------------- - perform_initialization (bool, default=True): If true, weights are initialized. This option can be useful when you - know you are going to load values from a checkpoint. + perform_initialization (bool, optional): If true, weights are initialized. This option can be useful when you + know you are going to load values from a checkpoint. Defaults to True. - use_cpu_initialization: (bool, default=False): When set to False, we initialize the weights directly on the GPU. + use_cpu_initialization: (bool, optional): When set to False, we initialize the weights directly on the GPU. Transferring weights from CPU to GPU can take a significant amount of time for large models. Defaults to False. Training @@ -48,9 +48,10 @@ class ModelParallelConfig: bf16 (bool): If true, train with bf16 mixed precision training. Defaults to False. - params_dtype (torch.dtype): dtype used when intializing the weights. Defaults to torch.float32 + params_dtype (torch.dtype): dtype used when intializing the weights. Defaults to torch.float32. + + timers (optional, default=None): TODO. - timers (optional, default=None): TODO Optimizations ------------- @@ -61,24 +62,30 @@ class ModelParallelConfig: ". Note that the extension requires CUDA>=11. Otherwise, you must turn off gradient accumulation fusion. Defaults to False. - async_tensor_model_parallel_allreduce (bool, default=True): If true, enables asynchronous execution of - tensor-model-parallel all-reduce with weight gradient compuation of a column-linear layer. Defaults to False. + async_tensor_model_parallel_allreduce (bool, optional): If true, enables asynchronous execution of + tensor-model-parallel all-reduce with weight gradient compuation of a column-linear layer. Defaults to True. + + tp_comm_overlap (bool, optional): If true, allows overlapping of Linear layer execution with tensor parallel + communication collectives like AllGather/ReduceScatter. Overlapping is done for the linear layers wherever + possible during the forward and the backward pass. Defaults to False. + + tp_comm_split_ag (bool, optional): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM + and All-Gather splits. Don't care if tp_comm_overlap is False. Defaults to True. - tp_comm_overlap (bool, default=False): If true, allows overlapping of Linear layer execution with tensor parallel - communication collectives like AllGather/ReduceScatter. Overlapping is done for the linear layers wherever possible - during the forward and the backward pass. Defaults to False. + tp_comm_atomic_ag (bool, optional): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM + and All-Gather both done atomically. Don't care if tp_comm_overlap is False. Defaults to False. - tp_comm_split_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM. Don't care if tp_comm_overlap - is False. + tp_comm_split_rs (bool, optional): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the + GEMM and Reduce-Scatter splits. Don't care if tp_comm_overlap is False. Defaults to True. - tp_comm_split_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM. Don't care if - tp_comm_overlap is False. + tp_comm_atomic_rs (bool, optional): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the + GEMM and Reduce-Scatter both done atomically. Don't care if tp_comm_overlap is False. Defaults to False. - tp_comm_bulk_dgrad (bool, default=True): If true, allows All-Gather overlap with Bprop activation gradient GEMM. Don't - care if tp_comm_overlap is False. + tp_comm_bulk_dgrad (bool, optional): If true, allows All-Gather overlap with Bprop activation gradient GEMM. Don't + care if tp_comm_overlap is False. Defaults to True. - tp_comm_bulk_wgrad (bool, default=True): If true, allows Reduce-Scatter overlap with Bprop weight gradient GEMM. Don't - care if tp_comm_overlap is False. + tp_comm_bulk_wgrad (bool, optional): If true, allows Reduce-Scatter overlap with Bprop weight gradient GEMM. Don't + care if tp_comm_overlap is False. Defaults to True. Parallelism ----------- @@ -91,36 +98,38 @@ class ModelParallelConfig: pipeline_dtype (required): dtype used in p2p communication, usually params_dtype - grad_scale_func (optional, default=None): If using loss scaling, this function should take the loss and return the - scaled loss. If None, no function is called on the loss. + grad_scale_func (optional): If using loss scaling, this function should take the loss and return the + scaled loss. If None, no function is called on the loss. Defaults to None. enable_autocast (bool): If true runs the forward step function inside torch.autocast context. Default is False. autocast_dtype (torch.dtype): dtype to pass to torch.amp.autocast when enabled. Default is pipeline_dtype. - variable_seq_lengths (bool, default=False): Support for variable sequence lengths across microbatches. Setting this + variable_seq_lengths (bool, optional): Support for variable sequence lengths across microbatches. Setting this communicates the size of tensors during pipeline parallelism communication, because of this extra overhead it - should only be set if the sequence length varies by microbatch within a global batch. + should only be set if the sequence length varies by microbatch within a global batch. Defaults to False. - num_microbatches_with_partial_activation_checkpoints (int, default=None): If int, set the number of microbatches + num_microbatches_with_partial_activation_checkpoints (int, optional): If int, set the number of microbatches where not all of the layers will be checkpointed and recomputed. The rest of the microbatches within the window of maximum outstanding microbatches will recompute all layers (either full recompute or selective recompute). If - None, the checkpoint and recompute will be left up to the forward_step function. + None, the checkpoint and recompute will be left up to the forward_step function. Defaults to None. - overlap_p2p_comm (bool, optional, default=False): When True some of the peer to peer communication for pipeline - parallelism will overlap with computation. Must be False if batch_p2p_comm is true. + overlap_p2p_comm (bool, optional): When True some of the peer to peer communication for pipeline + parallelism will overlap with computation. Must be False if batch_p2p_comm is true. Defaults to False. - batch_p2p_comm (bool, default=True): Use batch_isend_irecv instead of individual isend/irecv calls. Must be False - if overlap_p2p_comm is True. + batch_p2p_comm (bool, optional): Use batch_isend_irecv instead of individual isend/irecv calls. Must be False + if overlap_p2p_comm is True. Defaults to True. - batch_p2p_sync (bool, default=True): When using batch_isend_irecv, do a cuda.device.synchronize afterward to work - around a bug in older version of PyTorch. + batch_p2p_sync (bool, optional): When using batch_isend_irecv, do a cuda.device.synchronize afterward to work + around a bug in older version of PyTorch. Defaults to True. - use_ring_exchange_p2p (bool, default=False): Use custom ring_exchange kernel instead of + use_ring_exchange_p2p (bool, optional): Use custom ring_exchange kernel instead of torch.distributed.batch_isend_irecv(). Requires custom built torch with torch.distributed.ring_exchange. + Defaults to False. - deallocate_pipeline_outputs (optional, default=False): If True, output data is deallocated after the tensor is sent + deallocate_pipeline_outputs (optional): If True, output data is deallocated after the tensor is sent to the next pipeline stage. Helps with saving memory, does nothing when pipeline parallel is not used. + Defaults to False. no_sync_func (optional): Function that creates a context that suppresses asynchronous data-parallel communication. If the model is an instance of core.distributed.DistributedDataParallel, the default is to use @@ -134,12 +143,20 @@ class ModelParallelConfig: optimizer parameter all-gathers). The function should take one argument: an iterable of parameters to be synchronized. - pipeline_model_parallel_split_rank (int, default=None): If int, rank where encoder and decoder should be split in - cases where the model has both an encoder and decoder (e.g., T5). Ignored if None. + pipeline_model_parallel_split_rank (int, optional): If int, rank where encoder and decoder should be split in + cases where the model has both an encoder and decoder (e.g., T5). Ignored if None. Defaults to None. - barrier_with_L1_time (bool, default=True): If true, use barrier with level 1 time measurements. It is up to the user + barrier_with_L1_time (bool, optional): If true, use barrier with level 1 time measurements. It is up to the user to make sure calling barrier with their timers will not result in hangs. This can happen if for example the user - adds a level 1 timer that is not called by all ranks. + adds a level 1 timer that is not called by all ranks. Defaults to True. + + CPU Offloading + -------------- + + cpu_offloading (bool): When set to True, all the activations are offloaded to the CPU asynchronously. Defaults to True. + cpu_offloading_num_layers (int): Tells the number of transformer layers for which activations has to be offloaded. Defaults to 0. + cpu_offloading_activations (bool): If True, offloads the activations to CPU. Defaults to True. + cpu_offloading_weights (bool): If True, offloads the weights to CPU. Defaults to True. """ @@ -168,7 +185,9 @@ class ModelParallelConfig: # Debug Options tp_comm_split_ag: bool = True + tp_comm_atomic_ag: bool = False tp_comm_split_rs: bool = True + tp_comm_atomic_rs: bool = False tp_comm_bulk_wgrad: bool = True tp_comm_bulk_dgrad: bool = True @@ -192,6 +211,13 @@ class ModelParallelConfig: param_sync_func: Callable = None pipeline_model_parallel_split_rank: Optional[int] = None + # CPU Offloading + cpu_offloading: bool = False + cpu_offloading_num_layers: int = 0 + _cpu_offloading_context: ContextManager = None # Used for internal use only, not to be set by the user. TODO: Need to move to the 'right' place when possible. + cpu_offloading_activations: bool = True + cpu_offloading_weights: bool = True + # Timing barrier_with_L1_time: bool = True diff --git a/megatron/core/models/T5/t5_model.py b/megatron/core/models/T5/t5_model.py index feaed27413..d6010a116f 100644 --- a/megatron/core/models/T5/t5_model.py +++ b/megatron/core/models/T5/t5_model.py @@ -1,12 +1,13 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. import logging -from typing import List, Literal, Optional +from typing import List, Literal, Optional, Tuple import torch from torch import Tensor from megatron.core import InferenceParams, parallel_state, tensor_parallel +from megatron.core.dist_checkpointing.mapping import ShardedStateDict from megatron.core.models.common.embeddings.language_model_embedding import LanguageModelEmbedding from megatron.core.models.common.embeddings.rotary_pos_embedding import RotaryEmbedding from megatron.core.models.common.language_module.language_module import LanguageModule @@ -77,7 +78,7 @@ class T5Model(LanguageModule): transformer_encoder_layer_spec (ModuleSpec): transformer layer customization specs for encoder transformer_decoder_layer_spec (ModuleSpec): transformer layer customization specs for decoder - + vocab_size (int): vocabulary size max_sequence_length (int): maximum size of sequence. This is used for positional embedding @@ -150,7 +151,10 @@ def __init__( # Rotary Position Embeddings if self.position_embedding_type == 'rope': self.rotary_pos_emb = RotaryEmbedding( - self.config.kv_channels, rotary_percent, seq_len_interpolation_factor + kv_channels=self.config.kv_channels, + rotary_percent=rotary_percent, + rotary_interleaved=self.config.rotary_interleaved, + seq_len_interpolation_factor=seq_len_interpolation_factor, ) # Transformer encoder @@ -332,7 +336,8 @@ def shared_embedding_or_output_weight(self) -> Tensor: return self.lm_head.output_layer.weight return None - def sharded_state_dict(self, prefix: str = ''): + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: + assert not sharded_offsets, "Unexpected sharded offsets" sharded_state_dict = {} if self.pre_process: diff --git a/megatron/core/models/T5/t5_spec.py b/megatron/core/models/T5/t5_spec.py index 60f33dbd98..f32f1193f0 100644 --- a/megatron/core/models/T5/t5_spec.py +++ b/megatron/core/models/T5/t5_spec.py @@ -116,6 +116,10 @@ def encoder_model_with_local_spec() -> ModuleSpec: ), ), mlp_bda=get_bias_dropout_add, + sharded_state_dict_keys_map={ + 'input_layernorm.': 'self_attention.linear_qkv.layer_norm_', + 'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_', + }, ), ) @@ -156,6 +160,10 @@ def decoder_model_with_local_spec() -> ModuleSpec: ), ), mlp_bda=get_bias_dropout_add, + sharded_state_dict_keys_map={ + 'input_layernorm.': 'self_attention.linear_qkv.layer_norm_', + 'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_', + }, ), ) diff --git a/megatron/core/models/bert/bert_model.py b/megatron/core/models/bert/bert_model.py index a556ac8ea5..15c49d2a50 100644 --- a/megatron/core/models/bert/bert_model.py +++ b/megatron/core/models/bert/bert_model.py @@ -1,9 +1,11 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +import os from typing import Literal, Optional import torch from torch import Tensor +from megatron.core import parallel_state from megatron.core.models.bert.bert_lm_head import BertLMHead from megatron.core.models.bert.pooler import Pooler from megatron.core.models.common.embeddings.language_model_embedding import LanguageModelEmbedding @@ -58,6 +60,11 @@ def __init__( if return_embeddings: assert self.post_process and self.add_binary_head + assert ( + os.getenv('NVTE_ALLOW_NONDETERMINISTIC_ALGO') == '0' + or os.getenv('NVTE_FLASH_ATTN') == '0' + ), "Bert currently does not support flash attention. Please set env variable NVTE_FLASH_ATTN=0 or set NVTE_ALLOW_NONDETERMINISTIC_ALGO=0" + self.config: TransformerConfig = config self.transformer_layer_spec: ModuleSpec = transformer_layer_spec self.vocab_size = vocab_size @@ -86,7 +93,10 @@ def __init__( if self.position_embedding_type == 'rope': self.rotary_pos_emb = RotaryEmbedding( - self.config.kv_channels, rotary_percent, seq_len_interpolation_factor + kv_channels=self.config.kv_channels, + rotary_percent=rotary_percent, + rotary_interleaved=self.config.rotary_interleaved, + seq_len_interpolation_factor=seq_len_interpolation_factor, ) # Transformer. @@ -193,7 +203,12 @@ def forward( """ extended_attention_mask = self.bert_extended_attention_mask(attention_mask) - position_ids = self.bert_position_ids(input_ids) + if parallel_state.is_pipeline_first_stage(): + input_ids = input_ids + position_ids = self.bert_position_ids(input_ids) + else: + position_ids = None + input_ids = None # Encoder embedding. if self.pre_process: diff --git a/megatron/core/models/common/embeddings/language_model_embedding.py b/megatron/core/models/common/embeddings/language_model_embedding.py index 40d679d7b1..3e1e2114c0 100644 --- a/megatron/core/models/common/embeddings/language_model_embedding.py +++ b/megatron/core/models/common/embeddings/language_model_embedding.py @@ -1,6 +1,6 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -from typing import Literal, Optional +from typing import Literal import torch from torch import Tensor @@ -8,10 +8,6 @@ from megatron.core import tensor_parallel from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.utils import ( - make_sharded_tensor_for_checkpoint, - make_tp_sharded_tensor_for_checkpoint, -) class LanguageModelEmbedding(MegatronModule): @@ -130,34 +126,3 @@ def forward(self, input_ids: Tensor, position_ids: Tensor, tokentype_ids: int = embeddings = self.embedding_dropout(embeddings) return embeddings - - def sharded_state_dict(self, prefix=''): - - sharded_state_dict = {} - - word_embeddings_prefix = f'{prefix}word_embeddings.' - word_embeddings_state_dict = self.word_embeddings.state_dict( - prefix=word_embeddings_prefix, keep_vars=True - ) - - sharded_word_embeddings_key = f'{word_embeddings_prefix}weight' - sharded_word_embeddings_tensor = make_tp_sharded_tensor_for_checkpoint( - tensor=word_embeddings_state_dict[sharded_word_embeddings_key], - key=sharded_word_embeddings_key, - allow_shape_mismatch=True, - ) - sharded_state_dict[sharded_word_embeddings_key] = sharded_word_embeddings_tensor - - if self.add_position_embedding: - position_embeddings_prefix = f'{prefix}position_embeddings.' - position_embeddings_state_dict = self.position_embeddings.state_dict( - prefix=position_embeddings_prefix, keep_vars=True - ) - sharded_position_embeddings_key = f'{position_embeddings_prefix}weight' - sharded_position_embeddings_tensor = make_sharded_tensor_for_checkpoint( - tensor=position_embeddings_state_dict[sharded_position_embeddings_key], - key=sharded_position_embeddings_key, - ) - sharded_state_dict[sharded_position_embeddings_key] = sharded_position_embeddings_tensor - - return sharded_state_dict diff --git a/megatron/core/models/common/embeddings/rotary_pos_embedding.py b/megatron/core/models/common/embeddings/rotary_pos_embedding.py index ee2260e3ae..238838fa6b 100644 --- a/megatron/core/models/common/embeddings/rotary_pos_embedding.py +++ b/megatron/core/models/common/embeddings/rotary_pos_embedding.py @@ -2,17 +2,32 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.transformer.transformer_block import TransformerBlock +import logging + import torch from torch import Tensor, nn from megatron.core import parallel_state +logger = logging.getLogger(__name__) + +try: + from apex.transformer.functional import ( + fused_apply_rotary_pos_emb, + fused_apply_rotary_pos_emb_thd, + ) + + HAVE_APPLY_ROPE_FUSION = True +except: + HAVE_APPLY_ROPE_FUSION = False + + __all__ = ['RotaryEmbedding', 'apply_rotary_pos_emb'] @@ -42,6 +57,7 @@ def __init__( self, kv_channels: int, rotary_percent: float, + rotary_interleaved: bool = False, seq_len_interpolation_factor: float = None, rotary_base: int = 10000, ) -> None: @@ -50,6 +66,7 @@ def __init__( dim = kv_channels if rotary_percent < 1.0: dim = int(dim * rotary_percent) + self.rotary_interleaved = rotary_interleaved self.seq_len_interpolation_factor = seq_len_interpolation_factor self.inv_freq = 1.0 / ( @@ -81,7 +98,12 @@ def forward(self, max_seq_len: int, offset: int = 0) -> Tensor: freqs = torch.outer(seq, self.inv_freq) # first part even vector components, second part odd vector components, # 2 * dim in dimension size - emb = torch.cat((freqs, freqs), dim=-1) + if not self.rotary_interleaved: + emb = torch.cat((freqs, freqs), dim=-1) + else: + emb = torch.stack((freqs.view(-1, 1), freqs.view(-1, 1)), dim=-1).view( + freqs.shape[0], -1 + ) # emb [seq_length, .., dim] emb = emb[:, None, None, :] if parallel_state.get_context_parallel_world_size() > 1: @@ -127,7 +149,7 @@ def get_rotary_seq_len( return rotary_seq_len -def _rotate_half(x: Tensor) -> Tensor: +def _rotate_half(x: Tensor, rotary_interleaved: bool) -> Tensor: """Change sign so the last dimension becomes [-odd, +even] Args: @@ -136,12 +158,17 @@ def _rotate_half(x: Tensor) -> Tensor: Returns: Tensor: Tensor rotated half """ - - x1, x2 = torch.chunk(x, 2, dim=-1) - return torch.cat((-x2, x1), dim=-1) + if not rotary_interleaved: + x1, x2 = torch.chunk(x, 2, dim=-1) + return torch.cat((-x2, x1), dim=-1) + else: + x1 = x[:, :, :, ::2] + x2 = x[:, :, :, 1::2] + x_new = torch.stack((-x2, x1), dim=-1) + return x_new.view(x_new.shape[0], x_new.shape[1], x_new.shape[2], -1) -def apply_rotary_pos_emb(t: Tensor, freqs: Tensor) -> Tensor: +def apply_rotary_pos_emb_bshd(t: Tensor, freqs: Tensor, rotary_interleaved: bool = False) -> Tensor: """Apply rotary positional embedding to input tensor T. check https://kexue.fm/archives/8265 for detailed formulas @@ -163,5 +190,60 @@ def apply_rotary_pos_emb(t: Tensor, freqs: Tensor) -> Tensor: cos_ = torch.cos(freqs).to(t.dtype) sin_ = torch.sin(freqs).to(t.dtype) - t = (t * cos_) + (_rotate_half(t) * sin_) + t = (t * cos_) + (_rotate_half(t, rotary_interleaved) * sin_) return torch.cat((t, t_pass), dim=-1) + + +def apply_rotary_pos_emb_thd( + t: Tensor, cu_seqlens: Tensor, freqs: Tensor, rotary_interleaved: bool = False +) -> Tensor: + + """A baseline implementation of applying RoPE for `thd` format. + + Args: + t (Tensor): Input tensor T is of shape [t, h, d] + cu_seqlens(Tensor): Cumulative sum of sequence lengths in a batch for `t`, + with shape [b + 1] and dtype torch.int32. + freqs (Tensor): Rotary Positional embedding tensor freq is of shape [max_s, 1, 1, d] + + Returns: + Tensor: Shape [t, h, d]. The input tensor after applying RoPE. + """ + + seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist() + return torch.cat( + [ + apply_rotary_pos_emb_bshd(x.unsqueeze(1), freqs[: x.size(0)]) + for x in torch.split(t, seqlens) + ] + ).squeeze(1) + + +def apply_rotary_pos_emb( + t: Tensor, freqs: Tensor, config: TransformerConfig, cu_seqlens: Optional[Tensor] = None, +): + """ + Reroute to the appropriate apply_rotary_pos_emb function depending on + fused/unfused kernels, or bshd (conventional) / thd (packed seq) format + """ + if config.apply_rope_fusion and not HAVE_APPLY_ROPE_FUSION: + # setting apply_rope_fusion in config to False so that subsequent queries to this config also return False + config.apply_rope_fusion = False + if not getattr(apply_rotary_pos_emb, "printed_fused_warning", False): + logger.warning( + "Setting apply_rope_fusion to false because its implementation" + " is not included in Apex. Try upgrading to the latest version" + ) + apply_rotary_pos_emb.printed_fused_warning = True + if config.apply_rope_fusion: + if cu_seqlens is None: + return fused_apply_rotary_pos_emb(t, freqs, transpose_output_memory=True) + else: + return fused_apply_rotary_pos_emb_thd(t, cu_seqlens, freqs) + else: + if cu_seqlens is None: + return apply_rotary_pos_emb_bshd(t, freqs, rotary_interleaved=config.rotary_interleaved) + else: + return apply_rotary_pos_emb_thd( + t, cu_seqlens, freqs, rotary_interleaved=config.rotary_interleaved + ) diff --git a/megatron/core/models/common/language_module/language_module.py b/megatron/core/models/common/language_module/language_module.py index 3883b7acd1..1e8b510824 100644 --- a/megatron/core/models/common/language_module/language_module.py +++ b/megatron/core/models/common/language_module/language_module.py @@ -53,12 +53,17 @@ def initialize_last_stage_with_word_embeddings(self) -> None: self.shared_embedding_or_output_weight().zero_out_wgrad = True return + if self.pre_process and not self.post_process: + assert parallel_state.is_pipeline_first_stage() + self.shared_embedding_or_output_weight().shared_embedding = True + if self.post_process and not self.pre_process: assert not parallel_state.is_pipeline_first_stage() # set word_embeddings weights to 0 here, then copy first # stage's weights using all_reduce below. self.output_layer.weight.data.fill_(0) self.output_layer.weight.shared = True + self.output_layer.weight.shared_embedding = True # Parameters are shared between the word embeddings layers, and the # heads at the end of the model. In a pipelined setup with more than diff --git a/megatron/core/models/common/vision_module/__init__.py b/megatron/core/models/common/vision_module/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/megatron/core/models/common/vision_module/vision_module.py b/megatron/core/models/common/vision_module/vision_module.py new file mode 100644 index 0000000000..5dc51873a4 --- /dev/null +++ b/megatron/core/models/common/vision_module/vision_module.py @@ -0,0 +1,17 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +"""Megatron Vision Module.""" + +from megatron.core.transformer.module import MegatronModule +from megatron.core.transformer.transformer_config import TransformerConfig + + +# Note: This is only a stub at the moment. This will be expanded in follow-up changes. +class VisionModule(MegatronModule): + """Base vision module that has common helper functions used across CLIP, ViT, etc. + + Args: + config (TransformerConfig): Input transformer config for the model + """ + + def __init__(self, config: TransformerConfig) -> None: + super().__init__(config=config) diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index aace1590d8..ef9b5a5184 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -7,18 +7,26 @@ from megatron.core.transformer.custom_layers.transformer_engine import ( TEDotProductAttention, TELayerNormColumnParallelLinear, + TENorm, TERowParallelLinear, ) from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType +from megatron.core.transformer.identity_op import IdentityOp from megatron.core.transformer.mlp import MLP, MLPSubmodules +from megatron.core.transformer.moe.moe_layer import MoELayer from megatron.core.transformer.spec_utils import ModuleSpec -from megatron.core.transformer.switch_mlp import SwitchMLP +from megatron.core.transformer.transformer_block import TransformerBlockSubmodules from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules # Use this spec to use lower level Transformer Engine modules (required for fp8 training) -def get_gpt_layer_with_transformer_engine_spec() -> ModuleSpec: +def get_gpt_layer_with_transformer_engine_spec( + num_experts: int = None, moe_grouped_gemm: bool = False +) -> ModuleSpec: + mlp = _get_mlp_module_spec( + use_te=True, num_experts=num_experts, moe_grouped_gemm=moe_grouped_gemm + ) return ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( @@ -32,19 +40,18 @@ def get_gpt_layer_with_transformer_engine_spec() -> ModuleSpec: ), ), self_attn_bda=get_bias_dropout_add, - mlp=ModuleSpec( - module=MLP, - submodules=MLPSubmodules( - linear_fc1=TELayerNormColumnParallelLinear, linear_fc2=TERowParallelLinear, - ), - ), + pre_mlp_layernorm=TENorm if num_experts else IdentityOp, + mlp=mlp, mlp_bda=get_bias_dropout_add, ), ) # Use this spec for an implementation using only modules in megatron core -def get_gpt_layer_local_spec() -> ModuleSpec: +def get_gpt_layer_local_spec(num_experts: int = None, moe_grouped_gemm: bool = False) -> ModuleSpec: + mlp = _get_mlp_module_spec( + use_te=False, num_experts=num_experts, moe_grouped_gemm=moe_grouped_gemm + ) return ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( @@ -60,64 +67,34 @@ def get_gpt_layer_local_spec() -> ModuleSpec: ), self_attn_bda=get_bias_dropout_add, pre_mlp_layernorm=FusedLayerNorm, - mlp=ModuleSpec( - module=MLP, - submodules=MLPSubmodules( - linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, - ), - ), + mlp=mlp, mlp_bda=get_bias_dropout_add, + sharded_state_dict_keys_map={ + 'input_layernorm.': 'self_attention.linear_qkv.layer_norm_', + 'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_', + }, ), ) -# Use this spec to use lower level Transformer Engine modules and SwitchMLP based MoE -gpt_layer_with_transformer_engine_spec_moe = ModuleSpec( - module=TransformerLayer, - submodules=TransformerLayerSubmodules( - self_attention=ModuleSpec( - module=SelfAttention, - params={"attn_mask_type": AttnMaskType.causal}, - submodules=SelfAttentionSubmodules( - linear_qkv=TELayerNormColumnParallelLinear, - core_attention=TEDotProductAttention, - linear_proj=TERowParallelLinear, - ), - ), - self_attn_bda=get_bias_dropout_add, - pre_mlp_layernorm=FusedLayerNorm, - mlp=ModuleSpec( - module=SwitchMLP, # MOE +# Helper function to get module spec for MLP/MoE +def _get_mlp_module_spec( + use_te: bool = True, num_experts: int = None, moe_grouped_gemm: bool = False +) -> ModuleSpec: + if num_experts is None: + # Dense MLP w/ or w/o TE modules. + return ModuleSpec( + module=MLP, submodules=MLPSubmodules( - linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, + linear_fc1=TELayerNormColumnParallelLinear if use_te else ColumnParallelLinear, + linear_fc2=TERowParallelLinear if use_te else RowParallelLinear, ), - ), - mlp_bda=get_bias_dropout_add, - ), -) - -# Use this spec for an implementation using only modules in megatron core for MoE models -gpt_layer_local_spec_moe = ModuleSpec( - module=TransformerLayer, - submodules=TransformerLayerSubmodules( - input_layernorm=FusedLayerNorm, - self_attention=ModuleSpec( - module=SelfAttention, - params={"attn_mask_type": AttnMaskType.causal}, - submodules=SelfAttentionSubmodules( - linear_qkv=ColumnParallelLinear, - core_attention=DotProductAttention, - linear_proj=RowParallelLinear, - ), - ), - self_attn_bda=get_bias_dropout_add, - pre_mlp_layernorm=FusedLayerNorm, - mlp=ModuleSpec( - module=SwitchMLP, # MOE - submodules=MLPSubmodules( - linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, - ), - ), - mlp_bda=get_bias_dropout_add, - ), -) + ) + else: + # Mixture of experts with modules in megatron core. + return ModuleSpec( + module=MoELayer, + submodules=MLPSubmodules(linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear,) + if not moe_grouped_gemm + else None, + ) diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index 2cf26bacac..ba3dbc9eb7 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -1,20 +1,24 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. import logging -from typing import Literal, Optional, Union +from typing import Literal, Optional, Tuple, Union import torch from torch import Tensor from megatron.core import InferenceParams, parallel_state, tensor_parallel +from megatron.core.dist_checkpointing.mapping import ShardedStateDict from megatron.core.models.common.embeddings.language_model_embedding import LanguageModelEmbedding from megatron.core.models.common.embeddings.rotary_pos_embedding import RotaryEmbedding from megatron.core.models.common.language_module.language_module import LanguageModule +from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.transformer.enums import AttnMaskType, ModelType from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.transformer_block import TransformerBlock from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.utils import make_tp_sharded_tensor_for_checkpoint +from megatron.tensor_logging import log_tensor +from megatron import get_args class GPTModel(LanguageModule): @@ -68,6 +72,10 @@ def __init__( # TODO: remove this dependency ? self.model_type = ModelType.encoder_or_decoder + # These 2 attributes are needed for TensorRT-LLM export. + self.max_position_embeddings = max_sequence_length + self.rotary_percent = rotary_percent + if self.pre_process: self.embedding = LanguageModelEmbedding( config=self.config, @@ -80,6 +88,7 @@ def __init__( self.rotary_pos_emb = RotaryEmbedding( kv_channels=self.config.kv_channels, rotary_percent=rotary_percent, + rotary_interleaved=self.config.rotary_interleaved, seq_len_interpolation_factor=seq_len_interpolation_factor, rotary_base=rotary_base, ) @@ -109,6 +118,58 @@ def __init__( if self.share_embeddings_and_output_weights and (self.pre_process or self.post_process): self.initialize_last_stage_with_word_embeddings() + from megatron import print_rank_0 + print_rank_0(self) + for i, (key, value) in enumerate(self.named_parameters()): + # Store standardized parameter names for debug purposes. + key=key.split(".") + if key[0]=="decoder": + # Remove "encoder" prefix. + key=key[1:] + if key[0]=="layers": + # Shift layer index. + key[1]=str(int(key[1])+1) + if key[2]=="input_layernorm": + key[2]="norm_1" + elif key[2]=="pre_mlp_layernorm": + key[2]="norm_2" + elif key[2]=="self_attention": + if "layer_norm" in key[-1]: + key=[*key[:2], "norm_1", key[-1].split("_")[-1]] + else: + key[2]="self_attn" + if key[3]=="linear_qkv": + key[3]="query_key_value" + elif key[3]=="linear_proj": + key[3]="dense" + + elif key[2]=="mlp": + mlp_key=3 + if "layer_norm" in key[-1]: + key=[*key[:2], "norm_2", key[-1].split("_")[-1]] + else: + if key[3]=="experts": + key.pop(4) + mlp_key=5 + if key[mlp_key]=="linear_fc1": + key[mlp_key]="layer_1" + elif key[mlp_key]=="linear_fc2": + key[mlp_key]="layer_2" + else: + assert key[0]=="final_layernorm", key[0] + key=["layers",str(self.config.num_layers+1), "final_norm"]+key[1:] + elif key[0]=="embedding": + key=["layers", "0", "_".join(key[1:])] + elif key[0] == "output_layer": + key = ["layers", str(self.config.num_layers+1), "output_weights"] + else: + # Not implemented but still ok + pass + + value.param_name = ".".join(key) + value.param_idx = i + + def set_input_tensor(self, input_tensor: Tensor) -> None: """Sets input tensor to the model. @@ -133,6 +194,7 @@ def forward( decoder_input: Tensor = None, labels: Tensor = None, inference_params: InferenceParams = None, + packed_seq_params: PackedSeqParams = None, extra_block_kwargs: dict = None, ) -> Tensor: """Forward function of the GPT Model This function passes the input tensors @@ -149,6 +211,14 @@ def forward( pass elif self.pre_process: decoder_input = self.embedding(input_ids=input_ids, position_ids=position_ids) + args = get_args() + if args.debug_layer_outputs: + log_tensor(f"Global layer 0 fw: Embedding output", decoder_input.transpose(0, 1), level=args.debug_layer_outputs) + if args.debug_layer_gradients: + decoder_input.register_hook(lambda grad: log_tensor( + f"Global layer 1 bw: Embedding output", + grad.transpose(0, 1), level=args.debug_layer_gradients + )) else: # intermediate stage of pipeline # decoder will get hidden_states from encoder.input_tensor @@ -168,6 +238,7 @@ def forward( attention_mask=attention_mask, inference_params=inference_params, rotary_pos_emb=rotary_pos_emb, + packed_seq_params=packed_seq_params, **(extra_block_kwargs or {}), ) @@ -188,7 +259,8 @@ def forward( return loss - def sharded_state_dict(self, prefix: str = '') -> dict: + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: + assert not sharded_offsets, "Unexpected sharded offsets" sharded_state_dict = {} if self.pre_process: @@ -214,7 +286,7 @@ def sharded_state_dict(self, prefix: str = '') -> dict: last_stage_word_emb_replica_id = ( 1, # copy of first stage embedding 0, - parallel_state.get_data_parallel_rank(), + parallel_state.get_data_parallel_rank(with_context_parallel=True), ) sharded_output_layer_tensor = make_tp_sharded_tensor_for_checkpoint( diff --git a/megatron/core/models/multimodal/__init__.py b/megatron/core/models/multimodal/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/megatron/core/models/multimodal/llava_model.py b/megatron/core/models/multimodal/llava_model.py new file mode 100644 index 0000000000..3ab4d1a98c --- /dev/null +++ b/megatron/core/models/multimodal/llava_model.py @@ -0,0 +1,119 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import torch + +from megatron.core import parallel_state, tensor_parallel +from megatron.core.models.gpt import GPTModel +from megatron.core.models.vision.clip_vit_model import CLIPViTModel +from megatron.core.transformer import MegatronModule +from megatron.core.transformer.spec_utils import ModuleSpec +from megatron.core.transformer.transformer_config import TransformerConfig + + +# Note: This is unused at the moment and may be missing features. Follow-up changes will use this. +class LLaVAModel(MegatronModule): + """LLaVA multi-modal model. + + Args: + language_transformer_config (TransformerConfig): Transformer config for the language model. + language_transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers of the language model. + vocab_size (int): Vocabulary size. + max_sequence_length (int): maximum sequence length. This is used for positional embedding. + vision_transformer_config (TransformerConfig): Transformer config for the vision model. + vision_transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers of the vision model. + """ + + def __init__( + self, + language_transformer_config: TransformerConfig, + language_transformer_layer_spec: ModuleSpec, + vocab_size: int, + max_sequence_length: int, + vision_transformer_config: TransformerConfig, + vision_transformer_layer_spec: ModuleSpec, + ) -> None: + super().__init__(config=language_transformer_config) + + if parallel_state.get_pipeline_model_parallel_world_size() > 1: + raise NotImplementedError("pipeline parallelism is not supported in this model yet.") + + self.language_model = GPTModel( + language_transformer_config, + language_transformer_layer_spec, + vocab_size, + max_sequence_length, + ) + + self.vision_model = CLIPViTModel(vision_transformer_config, vision_transformer_layer_spec) + + # Map (intermediate) vision model outputs to the language model input dimension. + # TODO: Separate work is adding a configurable multimodal projection layer. Replace this with that one. + self._vision_projection = tensor_parallel.ColumnParallelLinear( + vision_transformer_config.hidden_size, + language_transformer_config.hidden_size, + config=vision_transformer_config, + init_method=vision_transformer_config.init_method, + bias=False, + skip_bias_add=True, + gather_output=True, + ) + + def set_input_tensor(self, input_tensor: torch.Tensor) -> None: + """Sets input tensor to the model. + + NOTE: Pipeline parallelism is not supported in this model yet. This is just a placeholder implementation. + + Args: + input_tensor (Tensor): Sets the input tensor for the model. + """ + self.vision_model.set_input_tensor(input_tensor) + + def forward( + self, + image: torch.Tensor, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + attention_mask: torch.Tensor, + labels: torch.Tensor = None, + ) -> torch.Tensor: + """Forward function of the LLaVA model. + + Args: + image (torch.Tensor): input image of shape [batch, img_h, img_w]. + input_ids (torch.Tensor): input text ids [batch, text_seq_len]. + position_ids (torch.Tensor): input text position ids [batch, text_seq_len]. + attention_mask (torch.Tensor): attention mask for the language model [batch, 1, combined_seq_len, combined_seq_len]. + labels (torch.Tensor): Optional target text labels [batch, combined_seq_len]. + + Returns: + output (torch.Tensor): Loss of shape [b, s] if labels are provided, otherwise logits of shape [b, s, vocab_size]. + """ + image_embeddings = self.vision_model(image) # [b, img_seq_len, h_vision] + + # map vision model output size to language model input size. + image_embeddings, _ = self._vision_projection( + image_embeddings + ) # [b, img_seq_len, h_language] + + image_embeddings = image_embeddings.permute(1, 0, 2) # [img_seq_len, b, h_language] + language_embeddings = self.language_model.embedding( + input_ids=input_ids, position_ids=position_ids + ) # [text_seq_len, b, h_language] + combined_embeddings = torch.cat( + [image_embeddings, language_embeddings], dim=0 + ) # [combined_seq_len, b, h_language] + + # Embedding is computed above so we can discard input and position ids. + input_ids = None + position_ids = None + + # Note: This returns loss if labels are provided, otherwise logits. + output = self.language_model( + input_ids, + position_ids, + attention_mask, + decoder_input=combined_embeddings, + labels=labels, + ) + + return output diff --git a/megatron/core/models/vision/__init__.py b/megatron/core/models/vision/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/megatron/core/models/vision/clip_vit_model.py b/megatron/core/models/vision/clip_vit_model.py new file mode 100644 index 0000000000..f898f1e54a --- /dev/null +++ b/megatron/core/models/vision/clip_vit_model.py @@ -0,0 +1,139 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from typing import Optional + +import torch + +from megatron.core import tensor_parallel +from megatron.core.models.common.vision_module.vision_module import VisionModule +from megatron.core.transformer.custom_layers.transformer_engine import TENorm +from megatron.core.transformer.enums import ModelType +from megatron.core.transformer.spec_utils import ModuleSpec +from megatron.core.transformer.transformer_block import TransformerBlock +from megatron.core.transformer.transformer_config import TransformerConfig + + +# Note: This is unused at the moment and is missing features like position embedding interpolation. +# Follow-up changes will use this and expand the functionality. +class CLIPViTModel(VisionModule): + """CLIP ViT vision model. + + Args: + transformer_config (TransformerConfig): Transformer config + transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers + patch_dim (int): Image patch size. + img_h (int): Input image height. + img_w (int): Input image width. + add_class_token (bool, optional): Include a class token. Defaults to True. + class_token_len (int): Class token length. Defaults to 1 but 8 may be faster. + """ + + def __init__( + self, + transformer_config: TransformerConfig, + transformer_layer_spec: ModuleSpec, + patch_dim: int = 14, + img_h: int = 336, + img_w: int = 336, + add_class_token: bool = True, + class_token_len: int = 1, + ) -> None: + super().__init__(config=transformer_config) + + self.visual_hidden_size = transformer_config.hidden_size + self.patch_dim = patch_dim + self.img_h = img_h + self.img_w = img_w + assert self.img_h % self.patch_dim == 0 + assert self.img_w % self.patch_dim == 0 + self.num_patches_per_dim_h = self.img_h // self.patch_dim + self.num_patches_per_dim_w = self.img_w // self.patch_dim + self.num_patches = self.num_patches_per_dim_h * self.num_patches_per_dim_w + + self.add_class_token = add_class_token + self.class_token_len = class_token_len + + self.seq_length = self.num_patches + (self.class_token_len if self.add_class_token else 0) + + self.conv1 = torch.nn.Conv2d( + in_channels=3, + out_channels=self.visual_hidden_size, + kernel_size=self.patch_dim, + stride=self.patch_dim, + bias=False, + ) + + self.position_ids = torch.arange(self.seq_length).expand(1, -1).cuda() + + self.position_embeddings = torch.nn.Embedding(self.seq_length, self.visual_hidden_size) + + self.add_class_token = add_class_token + if self.add_class_token: + self.class_token = torch.nn.Parameter( + torch.randn(1, self.class_token_len, self.visual_hidden_size) + ) + + self.ln_pre = TENorm( + config=self.config, + hidden_size=self.visual_hidden_size, + eps=self.config.layernorm_epsilon, + ) + + self.model_type = ModelType.encoder_or_decoder + + # Transformer + final layer norm (via post_process) + # TODO: Follow-up changes will make pre and post_process configurable. They are needed for supporting pipeline parallelism. + self.transformer = TransformerBlock( + config=transformer_config, + spec=transformer_layer_spec, + pre_process=True, + post_process=True, + ) + + # Note: a final linear layer present in some implementations is omitted here. It can be added separately where needed. + + def set_input_tensor(self, input_tensor: torch.Tensor) -> None: + """Sets input tensor to the model. + + Args: + input_tensor (Tensor): Sets the input tensor for the model. + """ + self.transformer.set_input_tensor(input_tensor) + + def forward( + self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None + ) -> torch.Tensor: + """Forward function of the CLIP ViT Model. This function passes the input tensors + through the embedding layer and then the transformer. + + Args: + x (torch.Tensor): input data of shape [batch, img_h, img_w] + attention_mask (torch.Tensor with dtype=bool): Attention mask to use. If none, all ones. + + Returns: + x (torch.Tensor): output after final transformer block of shape [b, s, h]. + """ + x = self.conv1(x) # shape = [batch, hidden_size, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # [batch, hidden_size, grid ** 2] + x = x.permute(0, 2, 1) # [batch, grid ** 2, hidden_size] + + if self.add_class_token: + class_token = self.class_token.expand( + x.shape[0], -1, -1 + ) # [batch, class_token_len, hidden_size] + x = torch.cat( + [class_token, x], dim=1 + ) # [batch, grid ** 2 + class_token_len, hidden_size] + + x = x + self.position_embeddings(self.position_ids) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # [b, s, h] -> [s, b, h] + if attention_mask is None: + attention_mask = torch.ones(1, 1, x.shape[0], x.shape[0]).cuda() # [1, 1, s, s] + attention_mask = attention_mask < 0.5 # to bool + x = self.transformer(x.contiguous(), attention_mask) + x = x.permute(1, 0, 2) # [s, b, h] -> [b, s, h] + x = x.contiguous() + + return x diff --git a/megatron/core/optimizer/__init__.py b/megatron/core/optimizer/__init__.py new file mode 100644 index 0000000000..231d986fb7 --- /dev/null +++ b/megatron/core/optimizer/__init__.py @@ -0,0 +1,245 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from apex.optimizers import FusedAdam as Adam +from apex.optimizers import FusedSGD as SGD + +from megatron.core import mpu + +from .distrib_optimizer import DistributedOptimizer +from .grad_scaler import ConstantGradScaler, DynamicGradScaler +from .optimizer import ChainedOptimizer, Float16OptimizerWithFloat16Params, FP32Optimizer +from .optimizer_config import OptimizerConfig + + +def get_param_groups(model_chunks, no_weight_decay_cond, scale_lr_cond, lr_mult): + """Create parameter groups for optimizer. + + Creates parameter groups based on weight decay condition (regularized vs + non regularized), learning rate scale condition (lr vs lr_mult * lr), + and whether it is expert parameters. scale_lr_cond is used during finetuning + where head of the network requires a scaled version of the base learning rate. + + Args: + model_chunks (List[MegatronModule]): model chunks to create parameter + groups for. + no_weight_decay_cond (func): function to determine whether a parameter + should not perform weight decay. + scale_lr_cond (func): function to determine whether a parameter + should have a scaled learning rate. + lr_mult (float): learning rate multiplier for parameters that + satisfy scale_lr_cond. + """ + # map (wd_mult, lr_mult, is_expert_parallel) to params + params_map = { + (1.0, 1.0, False): [], + (1.0, 1.0, True): [], + (1.0, lr_mult, False): [], + (1.0, lr_mult, True): [], + (0.0, 1.0, False): [], + (0.0, 1.0, True): [], + (0.0, lr_mult, False): [], + (0.0, lr_mult, True): [], + } + + for model_chunk in model_chunks: + for name, param in model_chunk.named_parameters(): + if not param.requires_grad: + continue + + is_expert_parallel = not getattr(param, 'allreduce', True) + + if no_weight_decay_cond is not None: + no_wd = no_weight_decay_cond(name, param) + else: + # do not regularize biases nor Norm parameters + no_wd = name.endswith(".bias") or len(param.shape) == 1 + + if scale_lr_cond is not None: + scale_lr = scale_lr_cond(name, param) + else: + scale_lr = False + + if not no_wd and not scale_lr: + wd_mult, lr_mult = 1.0, 1.0 + elif not no_wd and scale_lr: + wd_mult, lr_mult = 1.0, lr_mult + elif no_wd and not scale_lr: + wd_mult, lr_mult = 0.0, 1.0 + else: + wd_mult, lr_mult = 0.0, lr_mult + + params_map[(wd_mult, lr_mult, is_expert_parallel)].append(param) + + param_groups = [] + for (wd_mult, lr_mult, is_expert_parallel), params in params_map.items(): + if len(params) == 0: + continue + param_groups.append( + { + 'params': params, + 'wd_mult': wd_mult, + 'lr_mult': lr_mult, + 'is_expert_parallel': is_expert_parallel, + } + ) + + return param_groups + + +def get_megatron_optimizer_based_on_param_groups( + config, + param_groups, + per_model_grad_buffers=None, + data_parallel_group=None, + data_parallel_group_gloo=None, +): + """Get megatron optimizer based on parameter groups. + + For distributed optimizer, we need the parameter gradients to be stored in a + contiguous grad_buffer. + + Args: + param_groups (list): list of parameter groups. + per_model_grad_buffers (list, optional): list of gradient buffers for + distributed optimizer. Defaults to None. + data_parallel_group (ProcessGroup, optional): data parallel group for + distributed optimizer. Defaults to None. + data_parallel_group_gloo (ProcessGroup, optional): data parallel + group-gloo for distributed optimizer. Defaults to None. + """ + if config.optimizer == 'adam': + optimizer = Adam( + param_groups, + lr=config.lr, + weight_decay=config.weight_decay, + betas=(config.adam_beta1, config.adam_beta2), + eps=config.adam_eps, + ) + elif config.optimizer == 'sgd': + optimizer = SGD( + param_groups, + lr=config.lr, + weight_decay=config.weight_decay, + momentum=config.sgd_momentum, + ) + else: + raise Exception('{} optimizer is not supported.'.format(config.optimizer)) + + # Determine whether the params have main-grad field. + params_have_main_grad = True + + # Mixed precision optimizer. + # - Note: both the Float16Optimizer and the DistributedOptimizer inherit + # from the MixedPrecisionOptimizer, which manages any optimizer where + # the model params and main params are distinct. + if config.fp16 or config.bf16 or config.use_distributed_optimizer: + + # Grad scaler: + # if loss-scale is provided, instantiate the constant scaler. + # if we are using fp16 and loss-scale is not present, use a + # dynamic scaler. + # otherwise we are running in bf16 with no loss-scale so + # leave it as None. + grad_scaler = None + + # Constant loss scale. + if config.loss_scale: + grad_scaler = ConstantGradScaler(config.loss_scale) + + # Dynamic loss scale. + else: + if config.fp16: + grad_scaler = DynamicGradScaler( + initial_scale=config.initial_loss_scale, + min_scale=config.min_loss_scale, + growth_factor=2.0, + backoff_factor=0.5, + growth_interval=config.loss_scale_window, + hysteresis=config.hysteresis, + ) + + optimizer_args = [ + optimizer, + config.clip_grad, + config.log_num_zeros_in_grad, + params_have_main_grad, + config.fp16, + config.bf16, + config.params_dtype, + grad_scaler, + ] + if config.use_distributed_optimizer: + optimizer = DistributedOptimizer( + *optimizer_args, + per_model_grad_buffers=per_model_grad_buffers, + data_parallel_group=data_parallel_group, + data_parallel_group_gloo=data_parallel_group_gloo, + overlap_param_gather=config.overlap_param_gather, + ) + else: + optimizer = Float16OptimizerWithFloat16Params(*optimizer_args) + + return optimizer + + # FP32. + return FP32Optimizer( + optimizer, config.clip_grad, config.log_num_zeros_in_grad, params_have_main_grad, + ) + + +def get_megatron_optimizer( + config, model_chunks, no_weight_decay_cond=None, scale_lr_cond=None, lr_mult=1.0 +): + """Retrieve the Megatron optimizer for model chunks. + + We use separate optimizers for expert parameters and non-expert parameters. + + Args: + model_chunks (List[MegatronModule]): model chunks to get optimizer for. + no_weight_decay_cond (func, optional): function to determine whether a parameter + should not perform weight decay. Defaults to None. + scale_lr_cond (func, optional): function to determine whether a parameter + should have a scaled learning rate. Defaults to None. + lr_mult (float, optional): learning rate multiplier for parameters that + satisfy scale_lr_cond. Defaults to 1.0. + """ + # Collect param groups. + param_groups = get_param_groups(model_chunks, no_weight_decay_cond, scale_lr_cond, lr_mult) + + # Collect grad buffers for distributed optimizer. + per_model_grad_buffers = {} + per_model_ep_grad_buffers = {} + for model_idx, model_chunk in enumerate(model_chunks): + if hasattr(model_chunk, 'grad_buffers'): + per_model_grad_buffers[model_idx] = model_chunk.grad_buffers + per_model_ep_grad_buffers[model_idx] = model_chunk.expert_parallel_grad_buffers + + # Split param groups into dense and moe. + dense_param_groups = list(filter(lambda g: not g['is_expert_parallel'], param_groups)) + moe_param_groups = list(filter(lambda g: g['is_expert_parallel'], param_groups)) + + # Create optimizers. + optimizers = [ + get_megatron_optimizer_based_on_param_groups( + config, + param_groups=dense_param_groups, + per_model_grad_buffers=per_model_grad_buffers, + data_parallel_group=mpu.get_data_parallel_group(with_context_parallel=True), + data_parallel_group_gloo=mpu.get_data_parallel_group_gloo(with_context_parallel=True), + ) + ] + if len(moe_param_groups): + optimizers.append( + get_megatron_optimizer_based_on_param_groups( + config, + param_groups=moe_param_groups, + per_model_grad_buffers=per_model_ep_grad_buffers, + data_parallel_group=mpu.get_data_modulo_expert_parallel_group(), + data_parallel_group_gloo=mpu.get_data_modulo_expert_parallel_group_gloo(), + ) + ) + + if len(optimizers) == 1: + return optimizers[0] + + return ChainedOptimizer(optimizers) diff --git a/megatron/optimizer/clip_grads.py b/megatron/core/optimizer/clip_grads.py similarity index 71% rename from megatron/optimizer/clip_grads.py rename to megatron/core/optimizer/clip_grads.py index a6a3d294e5..0f94754c9d 100644 --- a/megatron/optimizer/clip_grads.py +++ b/megatron/core/optimizer/clip_grads.py @@ -1,22 +1,21 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. """Gradient clipping.""" import os +import amp_C import torch -from torch import inf - from apex.multi_tensor_apply import multi_tensor_applier -import amp_C +from torch import inf -from megatron.model.module import param_is_not_shared -from megatron.core.tensor_parallel import param_is_not_tensor_parallel_duplicate +from ..tensor_parallel import param_is_not_tensor_parallel_duplicate +from ..transformer.module import param_is_not_shared -def clip_grad_norm_fp32(parameters, grads_for_norm, - max_norm, check_for_nan_in_grad, - norm_type=2, model_parallel_group=None): +def clip_grad_norm_fp32( + parameters, grads_for_norm, max_norm, norm_type=2, model_parallel_group=None, +): """Clips gradient norm of an iterable of parameters whose gradients are in fp32. @@ -30,7 +29,6 @@ def clip_grad_norm_fp32(parameters, grads_for_norm, grads_for_norm (Iterable[Tensor]): an iterable of Tensors or a single Tensor that will be used for calculating the grad norm. max_norm (float or int): max norm of the gradients. - check_for_nan_in_grad (bool): check if gradients have a NaN. norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. model_parallel_group (group): given the nature of the distributed @@ -62,9 +60,9 @@ def clip_grad_norm_fp32(parameters, grads_for_norm, total_norm = max(grad.abs().max() for grad in grads_for_norm) total_norm_cuda = torch.tensor([float(total_norm)], dtype=torch.float, device='cuda') # Take max across all model-parallel GPUs. - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.MAX, - group=model_parallel_group) + torch.distributed.all_reduce( + total_norm_cuda, op=torch.distributed.ReduceOp.MAX, group=model_parallel_group + ) total_norm = total_norm_cuda[0].item() else: @@ -78,7 +76,7 @@ def clip_grad_norm_fp32(parameters, grads_for_norm, amp_C.multi_tensor_l2norm, dummy_overflow_buf, [grads_for_norm], - False # no per-parameter norm + False, # no per-parameter norm ) else: grad_norm = torch.tensor([0], dtype=torch.float, device='cuda') @@ -91,30 +89,19 @@ def clip_grad_norm_fp32(parameters, grads_for_norm, grad_norm = torch.norm(grad, norm_type) total_norm += grad_norm ** norm_type - # Check individual rank grad norms are not NaN - # prior to model-parallel all-reduce. - if check_for_nan_in_grad: - global_rank = torch.distributed.get_rank() - assert not total_norm.isnan(), ( - f'Rank {global_rank}: found NaN in local grad norm in ' - f'backwards pass. Device: {torch.cuda.current_device()}, ' - f'node: {os.uname()[1]}' - ) - # Sum across all model-parallel GPUs. - torch.distributed.all_reduce(total_norm, - op=torch.distributed.ReduceOp.SUM, - group=model_parallel_group) + torch.distributed.all_reduce( + total_norm, op=torch.distributed.ReduceOp.SUM, group=model_parallel_group + ) total_norm = total_norm.item() ** (1.0 / norm_type) # Scale. clip_coeff = max_norm / (total_norm + 1.0e-6) if clip_coeff < 1.0: dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device='cuda') - multi_tensor_applier(amp_C.multi_tensor_scale, - dummy_overflow_buf, - [grads, grads], - clip_coeff) + multi_tensor_applier( + amp_C.multi_tensor_scale, dummy_overflow_buf, [grads, grads], clip_coeff + ) return total_norm @@ -139,9 +126,9 @@ def count_zeros_fp32(parameters, model_parallel_group): total_num_zeros = num_zeros + total_num_zeros # Sum across all model-parallel GPUs. - torch.distributed.all_reduce(total_num_zeros, - op=torch.distributed.ReduceOp.SUM, - group=model_parallel_group) + torch.distributed.all_reduce( + total_num_zeros, op=torch.distributed.ReduceOp.SUM, group=model_parallel_group + ) total_num_zeros = total_num_zeros.item() diff --git a/megatron/optimizer/distrib_optimizer.py b/megatron/core/optimizer/distrib_optimizer.py similarity index 59% rename from megatron/optimizer/distrib_optimizer.py rename to megatron/core/optimizer/distrib_optimizer.py index bb133aa42b..3eb66d7b90 100644 --- a/megatron/optimizer/distrib_optimizer.py +++ b/megatron/core/optimizer/distrib_optimizer.py @@ -1,20 +1,19 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. """Megatron distributed optimizer.""" -from apex.optimizers import FusedAdam as Adam -import math -import torch +import itertools +from logging import getLogger -from megatron import get_args -from megatron import get_timers -from megatron import print_rank_0 -from megatron.core import mpu, tensor_parallel +import torch +from apex.optimizers import FusedAdam as Adam +from .. import tensor_parallel +from ..distributed import shard_buffer from .optimizer import MixedPrecisionOptimizer, _zero_grad_group_helper -from .utils import shard_buffer +logger = getLogger(__name__) class Range: @@ -22,14 +21,18 @@ class Range: A range represents a start and end points for indexing a shard from a full tensor. """ + def __init__(self, start, end): self.start = start self.end = end self.size = end - start - def normalize(self, start = 0): + + def normalize(self, start=0): return Range(start, start + self.size) + def __str__(self): return "%d,%d [%d]" % (self.start, self.end, self.size) + def __len__(self): return self.end - self.start @@ -42,7 +45,6 @@ class DistributedOptimizer(MixedPrecisionOptimizer): clip_grad: clip gradeints with this global L2 norm. Note that clipping is ignored if clip_grad == 0 log_num_zeros_in_grad: return number of zeros in the gradients. - check_for_nan_in_grad: check if gradients have a NaN. params_have_main_grad: flag indicating if parameters have a `main_grad` field. If this is set, we are assuming that the model parameters are store in the `main_grad` @@ -59,12 +61,16 @@ class DistributedOptimizer(MixedPrecisionOptimizer): use any loss scale. Note that for `bf16 = True`, we can have a constnat gradient scaler. Also for `bf16 = False`, we always require a grad scaler. - models: list of models (i.e., the virtual pipelining models). This - is used by the distributed optimizer for mapping parameters. + grad_buffers: the implementation of the distributed optimizer is + centered on using the contiguous grad buffer for communicating + grads & params between the model state and the optimizer state. + You can find a more detailed description in this document + https://github.com/NVIDIA/Megatron-LM/blob/main/docs/source/distrib_optimizer.md + . """ @classmethod - def build_model_gbuf_param_range_map(cls, model, dtype, gbuf_world_range, bucket_offset): + def build_model_gbuf_param_range_map(cls, grad_buffer, gbuf_world_range, bucket_offset): """ Build mapping from param reference to grad buffer shard ranges. @@ -92,40 +98,37 @@ def build_model_gbuf_param_range_map(cls, model, dtype, gbuf_world_range, bucket """ # Param range map. - param_world_index_map = model.grad_buffer_param_index_map[dtype] + param_world_index_map = grad_buffer.param_index_map param_range_map = {} for param, param_world_indexes in param_world_index_map.items(): # Param range. param_world_start, param_world_end, _ = param_world_indexes - param_local_start = max( - 0, - param_world_start - gbuf_world_range.start) - param_local_end = min( - gbuf_world_range.size, - param_world_end - gbuf_world_range.start) + param_local_start = max(0, param_world_start - gbuf_world_range.start) + param_local_end = min(gbuf_world_range.size, param_world_end - gbuf_world_range.start) # Add param, if within local gbuf range. if param_local_end > param_local_start: param_local_range = Range(param_local_start, param_local_end) param_world_range = param_local_range.normalize( - param_local_start + gbuf_world_range.start) - param_world_range_in_bucket = Range(param_world_range.start-bucket_offset, - param_world_range.end-bucket_offset) - sub_param_start = max(0, gbuf_world_range.start-param_world_start) + param_local_start + gbuf_world_range.start + ) + param_world_range_in_bucket = Range( + param_world_range.start - bucket_offset, param_world_range.end - bucket_offset + ) + sub_param_start = max(0, gbuf_world_range.start - param_world_start) sub_param_range = param_local_range.normalize(sub_param_start) param_range_map[param] = { - "gbuf_world" : param_world_range, + "gbuf_world": param_world_range, "gbuf_world_in_bucket": param_world_range_in_bucket, - "gbuf_local" : param_local_range, - "param" : sub_param_range, + "gbuf_local": param_local_range, + "param": sub_param_range, } return param_range_map - @classmethod - def build_model_gbuf_range(cls, model, dtype, bucket_index): + def build_model_gbuf_range(cls, grad_buffer, bucket_index): """ Build mapping between params and their grad buffers. @@ -136,14 +139,15 @@ def build_model_gbuf_range(cls, model, dtype, bucket_index): reduce-scatter and all-gather. """ - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) - data_parallel_world_size = mpu.get_data_parallel_world_size(with_context_parallel=True) + data_parallel_rank = torch.distributed.get_rank(grad_buffer.data_parallel_group) + data_parallel_world_size = grad_buffer.data_parallel_group.size() - bucket = model.grad_buffers[dtype].buckets[bucket_index] + bucket = grad_buffer.buckets[bucket_index] bucket_buffer = bucket.data gbuf_size = bucket_buffer.numel() - assert gbuf_size % data_parallel_world_size == 0, \ - f"Each bucket's buffer size should be divisible by {data_parallel_world_size}" + assert ( + gbuf_size % data_parallel_world_size == 0 + ), f"Each bucket's buffer size should be divisible by {data_parallel_world_size}" max_gbuf_range_size = gbuf_size // data_parallel_world_size # All world ranges (i.e., across all data parallel ranks). @@ -151,64 +155,67 @@ def build_model_gbuf_range(cls, model, dtype, bucket_index): for r in range(data_parallel_world_size): # Compute start of chunk in this bucket. gbuf_world_start = r * max_gbuf_range_size - gbuf_world_end = min(gbuf_size, gbuf_world_start+max_gbuf_range_size) + gbuf_world_end = min(gbuf_size, gbuf_world_start + max_gbuf_range_size) # Add bucket's offset in grad buffer. - gbuf_world_range = Range(gbuf_world_start + bucket.offset, - gbuf_world_end + bucket.offset) + gbuf_world_range = Range( + gbuf_world_start + bucket.offset, gbuf_world_end + bucket.offset + ) gbuf_world_all_ranges.append(gbuf_world_range) # Local DP's ranges. gbuf_world_range = gbuf_world_all_ranges[data_parallel_rank] # Get each param's ranges. - param_range_map = cls.build_model_gbuf_param_range_map(model, - dtype, - gbuf_world_range, - bucket.offset) + param_range_map = cls.build_model_gbuf_param_range_map( + grad_buffer, gbuf_world_range, bucket.offset + ) # Group into dict. data = { - "param_map" : param_range_map, + "param_map": param_range_map, } return data - @classmethod - def build_model_gbuf_range_map(cls, model): + def build_gbuf_range_map(cls, grad_buffer): """ - Create param-to-grad-buffer mappings, for grad buffer data types - within a specific virtual model. + Build mapping between params and their grad buffers. These mappings are + partitioned according to data type. + + Iterate through all buckets of grad buffer to construct param ranges + that this rank "owns" (the dp_rank'th shard of each bucket, where each + shard is 1/dp_world_size of the bucket). + + Args: + grad_buffer (GradBuffer): grad buffer to build mapping for. """ - # Iterate through all buckets to construct param ranges that this rank "owns" - # (the dp_rank'th shard of each bucket, where each shard is 1/dp_world_size - # of the bucket). return { - dtype : [cls.build_model_gbuf_range(model, dtype, bucket_index) - for bucket_index in range(len(model.grad_buffers[dtype].buckets))] - for dtype in model.grad_buffers + grad_buffer.dtype: [ + cls.build_model_gbuf_range(grad_buffer, bucket_index) + for bucket_index in range(len(grad_buffer.buckets)) + ] } - @classmethod - def build_model_param_gbuf_map(cls, model_gbuf_ranges): + def build_model_param_gbuf_map(cls, gbuf_ranges): """ - Create a reverse of the model_gbuf_ranges, for referencing in + Create a reverse of the gbuf_ranges, for referencing in opposite direction. """ param_gbuf_map = {} - for model_index, model_gbuf_range_map in enumerate(model_gbuf_ranges): - for dtype, gbuf_range_map_for_all_buckets in model_gbuf_range_map.items(): + for gbuf_index, gbuf_range_map in enumerate(gbuf_ranges): + for dtype, gbuf_range_map_for_all_buckets in gbuf_range_map.items(): for bucket_index, gbuf_range_map in enumerate(gbuf_range_map_for_all_buckets): for param, _ in gbuf_range_map["param_map"].items(): - assert param not in param_gbuf_map, \ - "Param should not be in param_gbuf_map; each param only belongs to a single bucket" - param_gbuf_map[param] = (model_index, dtype, bucket_index) + assert ( + param not in param_gbuf_map + ), "Param should not be in param_gbuf_map; each param only belongs to a single bucket" + param_gbuf_map[param] = (gbuf_index, dtype, bucket_index) return param_gbuf_map - @classmethod - def build_optimizer_group_ranges(cls, param_groups, model_gbuf_ranges): + def build_optimizer_group_ranges(cls, param_groups, gbuf_ranges): """ Create optimizer groups. @@ -239,16 +246,15 @@ def build_optimizer_group_ranges(cls, param_groups, model_gbuf_ranges): # the group. The group index and order are particularly important for # saving and loading checkpoints. local_param_group_map = {} - group_ranges = [ {"params": []} for _ in param_groups ] - for model_gbuf_range_map in model_gbuf_ranges: - for dtype, gbuf_range_map_for_all_buckets in model_gbuf_range_map.items(): + group_ranges = [{"params": []} for _ in param_groups] + for gbuf_range_map in gbuf_ranges: + for dtype, gbuf_range_map_for_all_buckets in gbuf_range_map.items(): for gbuf_range_map in gbuf_range_map_for_all_buckets: for param in gbuf_range_map["param_map"]: group_index = world_param_group_map[param] group_range = group_ranges[group_index] group_range["params"].append(param) - local_param_group_map[param] = \ - (group_index, len(group_range["params"]) - 1) + local_param_group_map[param] = (group_index, len(group_range["params"]) - 1) # Squeeze zero-size group ranges. for group_index, group_range in enumerate(group_ranges): @@ -257,12 +263,8 @@ def build_optimizer_group_ranges(cls, param_groups, model_gbuf_ranges): return local_param_group_map, group_ranges - @classmethod - def build_model_and_main_param_groups(cls, - model_gbuf_ranges, - param_gbuf_map, - opt_group_ranges): + def build_model_and_main_param_groups(cls, gbuf_ranges, param_gbuf_map, opt_group_ranges): """ Create main parameter groups needed for the optimizer step. @@ -299,29 +301,30 @@ def build_model_and_main_param_groups(cls, model_fp32_groups.append(model_fp32_params_this_group) shard_float16_groups.append(shard_float16_params_this_group) shard_fp32_groups.append(shard_fp32_params_this_group) - shard_fp32_from_float16_groups.append( - shard_fp32_from_float16_params_this_group) + shard_fp32_from_float16_groups.append(shard_fp32_from_float16_params_this_group) for model_param in group_range["params"]: assert model_param.requires_grad - model_index, dtype, bucket_index = param_gbuf_map[model_param] - gbuf_range = model_gbuf_ranges[model_index][dtype][bucket_index] + gbuf_index, dtype, bucket_index = param_gbuf_map[model_param] + gbuf_range = gbuf_ranges[gbuf_index][dtype][bucket_index] param_range = gbuf_range["param_map"][model_param]["param"] # fp16, bf16 params. - if model_param.type() in ['torch.cuda.HalfTensor', - 'torch.cuda.BFloat16Tensor']: + if model_param.type() in ['torch.cuda.HalfTensor', 'torch.cuda.BFloat16Tensor']: # Clone model -> main. - shard_model_param = model_param.detach().view(-1) \ - [param_range.start:param_range.end] + shard_model_param = model_param.detach().view(-1)[ + param_range.start : param_range.end + ] shard_main_param = shard_model_param.clone().float() tensor_parallel.copy_tensor_model_parallel_attributes( - shard_model_param, model_param) + shard_model_param, model_param + ) tensor_parallel.copy_tensor_model_parallel_attributes( - shard_main_param, model_param) + shard_main_param, model_param + ) if hasattr(model_param, 'shared'): shard_model_param.shared = model_param.shared shard_main_param.shared = model_param.shared @@ -333,21 +336,23 @@ def build_model_and_main_param_groups(cls, # fp32 params. elif model_param.type() == 'torch.cuda.FloatTensor': - shard_model_param = model_param.view(-1) \ - [param_range.start:param_range.end] + shard_model_param = model_param.view(-1)[param_range.start : param_range.end] model_fp32_params_this_group.append(model_param) shard_fp32_params_this_group.append(shard_model_param) tensor_parallel.copy_tensor_model_parallel_attributes( - shard_model_param, model_param) + shard_model_param, model_param + ) if hasattr(model_param, 'shared'): shard_model_param.shared = model_param.shared else: - raise TypeError('Wrapped parameters must be one of ' - 'torch.cuda.FloatTensor, ' - 'torch.cuda.HalfTensor, or ' - 'torch.cuda.BFloat16Tensor. ' - 'Received {}'.format(model_param.type())) + raise TypeError( + 'Wrapped parameters must be one of ' + 'torch.cuda.FloatTensor, ' + 'torch.cuda.HalfTensor, or ' + 'torch.cuda.BFloat16Tensor. ' + 'Received {}'.format(model_param.type()) + ) # Update optimizer's params. group_range["orig_group"]["params"] = [ @@ -363,10 +368,21 @@ def build_model_and_main_param_groups(cls, shard_fp32_from_float16_groups, ) - - def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, fp16, - bf16, params_dtype, grad_scaler, models): + def __init__( + self, + optimizer, + clip_grad, + log_num_zeros_in_grad, + params_have_main_grad, + fp16, + bf16, + params_dtype, + grad_scaler, + per_model_grad_buffers, + overlap_param_gather, + data_parallel_group, + data_parallel_group_gloo, + ): """ See top of class definition for argument descriptions. @@ -378,28 +394,49 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, """ super().__init__( - optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, - fp16, bf16, params_dtype, grad_scaler, models) + optimizer, + clip_grad, + log_num_zeros_in_grad, + params_have_main_grad, + fp16, + bf16, + params_dtype, + grad_scaler, + ) - assert isinstance(optimizer, Adam), \ - "Only Adam currently supported, due to checkpointing requirements." + assert isinstance( + optimizer, Adam + ), "Only Adam currently supported, due to checkpointing requirements." # Model grad buffer ranges. - self.model_gbuf_ranges = [] + assert per_model_grad_buffers, "grad_buffers must be provided" + self.grad_buffers = list(itertools.chain(*per_model_grad_buffers.values())) + self.per_model_grad_buffers = per_model_grad_buffers + self.data_parallel_group = data_parallel_group + self.data_parallel_group_gloo = data_parallel_group_gloo + self.gbuf_idx_to_model_idx_map = {} + gbuf_idx = 0 + for model_idx, grad_buffers in self.per_model_grad_buffers.items(): + for _ in grad_buffers: + self.gbuf_idx_to_model_idx_map[gbuf_idx] = model_idx + gbuf_idx += 1 + self.gbuf_ranges = [] self.per_bucket_numel = [] - for _, model_chunk in enumerate(self.models): + self.per_bucket_numel_unpadded = [] + for grad_buffer in self.grad_buffers: self.per_bucket_numel.append( - {dtype: [bucket.data.numel() for bucket in model_chunk.grad_buffers[dtype].buckets] - for dtype in model_chunk.grad_buffers}) - self.model_gbuf_ranges.append(self.build_model_gbuf_range_map(model_chunk)) - self.model_param_gbuf_map = \ - self.build_model_param_gbuf_map(self.model_gbuf_ranges) + {grad_buffer.dtype: [bucket.data.numel() for bucket in grad_buffer.buckets]} + ) + self.per_bucket_numel_unpadded.append( + {grad_buffer.dtype: [bucket.numel_unpadded for bucket in grad_buffer.buckets]} + ) + self.gbuf_ranges.append(self.build_gbuf_range_map(grad_buffer)) + self.model_param_gbuf_map = self.build_model_param_gbuf_map(self.gbuf_ranges) # Optimizer ranges. - self.model_param_group_index_map, self.opt_group_ranges = \ - self.build_optimizer_group_ranges(self.optimizer.param_groups, - self.model_gbuf_ranges) + self.model_param_group_index_map, self.opt_group_ranges = self.build_optimizer_group_ranges( + self.optimizer.param_groups, self.gbuf_ranges + ) # Allocate main param shards. ( @@ -408,105 +445,99 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, self.shard_float16_groups, self.shard_fp32_groups, self.shard_fp32_from_float16_groups, - ) = self.build_model_and_main_param_groups(self.model_gbuf_ranges, - self.model_param_gbuf_map, - self.opt_group_ranges) + ) = self.build_model_and_main_param_groups( + self.gbuf_ranges, self.model_param_gbuf_map, self.opt_group_ranges + ) # Initialize param buffers. # - These are views on the DDP model's grad buffers, that share # storage & have their own dtype. This is safe because the param # dtype size is always <= grad dtype size. self.param_buffers = [] - for model_index, model in enumerate(self.models): - current_param_buffers = {} - for dtype, grad_buffer in model.grad_buffers.items(): - size_ratio = torch.finfo(dtype).bits // torch.finfo(params_dtype).bits - current_param_buffers[dtype] = [] - for bucket in grad_buffer.buckets: - - # Handle older/newer method for getting untyped storage. - try: - storage = bucket.data.untyped_storage() - except: - try: - storage = bucket.data.storage()._untyped() - except: - storage = bucket.data.storage().untyped() - - # Typed param buffer. - param_buffer = torch.tensor( - storage, - dtype = params_dtype, - device = bucket.data.device) - - # .storage() ignores views / slices, so param_buffer now points to the start - # of the grad_buffer instead of to the start of each bucket. As a result, - # add bucket.offset to make sure param_buffers point to the right region of - # memory. - # Since we want the start of each bucket's param_buffer to coincide with the - # start of the same bucket's grad_buffer (this ensures that zeroing the grad - # buffer does not zero out params in the param_buffer before they are copied - # into the model_params), multiply the offset by the size ratio of grads and - # params. - offset = bucket.offset * size_ratio - param_buffer = param_buffer[offset:offset+bucket.data.numel()] - assert param_buffer.data_ptr() == bucket.data.data_ptr(), \ - "param_buffer and grad_buffer for same bucket should start at the same byte address" - assert param_buffer.numel() == bucket.data.numel(), \ - "param_buffer and grad_buffer for same bucket should have the same number of elements" - current_param_buffers[dtype].append(param_buffer) + for gbuf_index, grad_buffer in enumerate(self.grad_buffers): + size_ratio = torch.finfo(grad_buffer.dtype).bits // torch.finfo(params_dtype).bits + assert ( + size_ratio >= 1 + ), "param_dtype size should be smaller than or equal to grad_dtype size" + current_param_buffers = [] + for bucket in grad_buffer.buckets: + param_buffer = bucket.data.view(dtype=params_dtype) + param_buffer = param_buffer[: bucket.data.numel()] + assert ( + param_buffer.data_ptr() == bucket.data.data_ptr() + ), "param_buffer and grad_buffer for same bucket should start at the same byte address" + assert ( + param_buffer.numel() == bucket.data.numel() + ), "param_buffer and grad_buffer for same bucket should have the same number of elements" + current_param_buffers.append(param_buffer) self.param_buffers.append(current_param_buffers) # Now construct data structures to manage all-gather handles. self.all_gather_handles = [] self.all_gather_handle_index_to_bucket_index_map = [] self.model_index_to_all_gather_handle_index_map = {} + self.all_gather_handle_indices = [] self.param_to_all_gather_handle_index_map = {} self.param_buffer_copied = [] self.pbuf_view_items = self.get_model_param_buffer_dp_views() - for (model_index, dtype, bucket_index, _, _) in self.pbuf_view_items: - self.all_gather_handle_index_to_bucket_index_map.append((model_index, dtype, bucket_index)) + for (gbuf_index, dtype, bucket_index, _, _) in self.pbuf_view_items: + self.all_gather_handle_index_to_bucket_index_map.append( + (gbuf_index, dtype, bucket_index) + ) all_gather_handle_index = len(self.all_gather_handle_index_to_bucket_index_map) - 1 + self.all_gather_handles.append(None) + + # Store all all_gather_handle_indices. + model_idx = self.gbuf_idx_to_model_idx_map[gbuf_index] + if model_idx not in self.model_index_to_all_gather_handle_index_map: + self.model_index_to_all_gather_handle_index_map[model_idx] = [] + self.model_index_to_all_gather_handle_index_map[model_idx].append( + all_gather_handle_index + ) - # Store all all_gather_handle_indices relevant to a particular model chunk. - if model_index not in self.model_index_to_all_gather_handle_index_map: - self.model_index_to_all_gather_handle_index_map[model_index] = [] - self.model_index_to_all_gather_handle_index_map[model_index].append(all_gather_handle_index) - - for param in self.models[model_index].grad_buffers[dtype].buckets[bucket_index].params_list: + for param in self.grad_buffers[gbuf_index].buckets[bucket_index].params_list: self.param_to_all_gather_handle_index_map[param] = all_gather_handle_index self.param_buffer_copied.append(False) self.num_all_gather_handles = len(self.all_gather_handle_index_to_bucket_index_map) - self.overlap_param_gather = get_args().overlap_param_gather + self.overlap_param_gather = overlap_param_gather + self.remove_pre_hook_handle = None if self.overlap_param_gather: - self.remove_pre_hook_handle = torch.nn.modules.module.register_module_forward_pre_hook( - self._make_forward_pre_hook()) - else: - self.remove_pre_hook_handle = None + self.enable_pre_hook() self.update_successful = False # Update optimizer groups. # - Also, leverage state_dict() and load_state_dict() to # recast preexisting per-param state tensors. - self.optimizer.param_groups = \ - [ g["orig_group"] for g in self.opt_group_ranges ] + self.optimizer.param_groups = [g["orig_group"] for g in self.opt_group_ranges] self.optimizer.load_state_dict(self.optimizer.state_dict()) + def disable_pre_hook(self): + assert self.remove_pre_hook_handle is not None + self.remove_pre_hook_handle.remove() + self.remove_pre_hook_handle = None + + # Make sure all-gathers are completed as needed. + self._reset_metadata_and_sync_gather_all_model_params(force_sync=True) + + def enable_pre_hook(self): + assert self.remove_pre_hook_handle is None + self.remove_pre_hook_handle = torch.nn.modules.module.register_module_forward_pre_hook( + self._make_forward_pre_hook() + ) def get_model_param_range_map(self, param): """ Given a model param, get the index sub-range of the param that this data-parallel rank owns. """ - model_index, dtype, bucket_index = self.model_param_gbuf_map[param] - gbuf_range_map = self.model_gbuf_ranges[model_index][dtype][bucket_index] + gbuf_index, dtype, bucket_index = self.model_param_gbuf_map[param] + gbuf_range_map = self.gbuf_ranges[gbuf_index][dtype][bucket_index] param_range_map = gbuf_range_map["param_map"][param] return param_range_map - def get_model_parallel_group(self): """ With the distributed optimizer, the model parallel group is the @@ -514,7 +545,6 @@ def get_model_parallel_group(self): """ return None - def state_dict(self): """ The state dict contains all non-DP-rank-dependent (i.e., non-parameter- @@ -528,9 +558,7 @@ def state_dict(self): # Optimizer state (do not store parameter state here). state_dict['optimizer'] = { - k : v - for k, v in self.optimizer.state_dict().items() - if k != "state" + k: v for k, v in self.optimizer.state_dict().items() if k != "state" } for param_group in state_dict["optimizer"]["param_groups"]: del param_group["params"] @@ -541,7 +569,6 @@ def state_dict(self): return state_dict - def load_state_dict(self, state_dict): """Load the state dict. @@ -578,65 +605,63 @@ def load_state_dict(self, state_dict): # the ordering of parameters within its flattened parameter state # list. inner_state_dict = self.optimizer.state_dict() - state_dict_param_groups = [{ - **group, - "params" : list(inner_state_dict["param_groups"][idx]["params"]), - } for idx, group in enumerate(state_dict["optimizer"]["param_groups"])] + state_dict_param_groups = [ + {**group, "params": list(inner_state_dict["param_groups"][idx]["params"]),} + for idx, group in enumerate(state_dict["optimizer"]["param_groups"]) + ] # Allocate 'dummy' data for optimizer state (i.e., torch.empty() below) # - Real data is overwritten during load_parameter_state(). state_dict_state = [] - for gbuf_range_maps in self.model_gbuf_ranges: + for gbuf_range_maps in self.gbuf_ranges: for gbuf_range_map_for_all_buckets in gbuf_range_maps.values(): for gbuf_range_map in gbuf_range_map_for_all_buckets: - for model_param, param_range_map in \ - gbuf_range_map["param_map"].items(): + for model_param, param_range_map in gbuf_range_map["param_map"].items(): # Get parameter ordering information (see method docstring # for details). - group_index, group_order = \ - self.model_param_group_index_map[model_param] - state_order = inner_state_dict["param_groups"] \ - [group_index]["params"][group_order] + group_index, group_order = self.model_param_group_index_map[model_param] + state_order = inner_state_dict["param_groups"][group_index]["params"][ + group_order + ] # Allocate dummy tensors. numel = len(param_range_map["gbuf_world"]) - init_shard = lambda : torch.empty( - (numel,), - dtype=torch.float32, - device=torch.cuda.current_device()) + init_shard = lambda: torch.empty( + (numel,), dtype=torch.float32, device=torch.cuda.current_device() + ) - state_dict_state.append((state_order, { - "exp_avg" : init_shard(), - "exp_avg_sq" : init_shard(), - })) + state_dict_state.append( + (state_order, {"exp_avg": init_shard(), "exp_avg_sq": init_shard(),}) + ) # Sort by state order (see method docstring for details). - state_dict_state.sort(key = lambda s : s[0]) - state_dict_state = {s[0]:s[1] for s in state_dict_state} + state_dict_state.sort(key=lambda s: s[0]) + state_dict_state = {s[0]: s[1] for s in state_dict_state} # Optimizer. - self.optimizer.load_state_dict({ - "state" : state_dict_state, - "param_groups" : state_dict_param_groups, - }) + self.optimizer.load_state_dict( + {"state": state_dict_state, "param_groups": state_dict_param_groups,} + ) # Grad scaler. if 'grad_scaler' not in state_dict: if self.fp16: - print_rank_0('***WARNING*** found an old checkpoint, will not ' - 'load grad scaler ...') + logger.info( + '***WARNING*** found an old checkpoint, will not ' 'load grad scaler ...' + ) else: if self.grad_scaler: self.grad_scaler.load_state_dict(state_dict['grad_scaler']) else: - print_rank_0('***WARNING*** fould the grad scaler in the ' - 'checkpoint but it is None in the class. ' - 'Skipping loading grad scaler ...') - + logger.info( + '***WARNING*** fould the grad scaler in the ' + 'checkpoint but it is None in the class. ' + 'Skipping loading grad scaler ...' + ) - def save_parameter_state(self, filename): - """Save parameter state (i.e., parameter & optimizer tensors). + def get_parameter_state(self): + """Get parameter state (i.e., parameter & optimizer tensors). This method performs three steps: - For each DP rank, copy param & optimizer shards to contiguous CPU @@ -644,18 +669,22 @@ def save_parameter_state(self, filename): exp_avg_sq). - Gather contiguous buffers on DP rank 0 and concatenate to world buffers. - - Save world buffers to disk (i.e., distrib_opt.pt). """ # Data parallelism variables. - data_parallel_world_size = mpu.get_data_parallel_world_size(with_context_parallel=True) - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) - data_parallel_group_gloo = mpu.get_data_parallel_group_gloo(with_context_parallel=True) - data_parallel_global_ranks = list(mpu._DATA_PARALLEL_GLOBAL_RANKS_WITH_CP) + data_parallel_world_size = self.data_parallel_group_gloo.size() + data_parallel_rank = torch.distributed.get_rank(self.data_parallel_group_gloo) + data_parallel_group_gloo = self.data_parallel_group_gloo + data_parallel_global_ranks = torch.distributed.get_process_group_ranks( + self.data_parallel_group_gloo + ) # Collect param states. - state = {"per_bucket_numel": self.per_bucket_numel} - for model_idx, gbuf_range_maps in enumerate(self.model_gbuf_ranges): + state = { + "per_bucket_numel": self.per_bucket_numel, + "per_bucket_numel_unpadded": self.per_bucket_numel_unpadded, + } + for gbuf_idx, gbuf_range_maps in enumerate(self.gbuf_ranges): # Iterate grad buffers (by data type). dtype_state = {} @@ -665,28 +694,24 @@ def save_parameter_state(self, filename): for bucket_idx, gbuf_range_map in enumerate(gbuf_range_map_for_all_buckets): # Compute local DP contiguous shard's size. - model = self.models[model_idx] - gbuf_world_numel = model.grad_buffers[dtype].buckets[bucket_idx].data.numel() + gbuf_world_numel = self.grad_buffers[gbuf_idx].buckets[bucket_idx].data.numel() assert gbuf_world_numel % data_parallel_world_size == 0 gbuf_local_numel = gbuf_world_numel // data_parallel_world_size - local_shards = {key: torch.empty((gbuf_local_numel,), - dtype=torch.float32, - device="cpu") - for key in ("param", "exp_avg", "exp_avg_sq")} + local_shards = { + key: torch.empty((gbuf_local_numel,), dtype=torch.float32, device="cpu") + for key in ("param", "exp_avg", "exp_avg_sq") + } # Build contiguous DP rank shards (for param + optim states). - for model_param, param_range_map in \ - gbuf_range_map["param_map"].items(): + for model_param, param_range_map in gbuf_range_map["param_map"].items(): # Main param & optimizer states. - group_index, group_order = \ - self.model_param_group_index_map[model_param] - main_param = self.optimizer.param_groups \ - [group_index]["params"][group_order] + group_index, group_order = self.model_param_group_index_map[model_param] + main_param = self.optimizer.param_groups[group_index]["params"][group_order] optim_state = self.optimizer.state[main_param] tensors = { - "param" : main_param, + "param": main_param, **optim_state, } @@ -694,18 +719,19 @@ def save_parameter_state(self, filename): gbuf_local_start = param_range_map["gbuf_local"].start gbuf_local_end = param_range_map["gbuf_local"].end for key in local_shards: - local_shards[key][gbuf_local_start:gbuf_local_end] \ - .data.copy_(tensors[key].detach().cpu()) + local_shards[key][gbuf_local_start:gbuf_local_end].data.copy_( + tensors[key].detach().cpu() + ) # Gather contiguous shards on DP rank 0. for key, send_tensor in local_shards.items(): # Gather tensor list. if data_parallel_rank == 0: - recv_tensors = [torch.empty((gbuf_local_numel,), - dtype=torch.float32, - device="cpu") - for _ in range(data_parallel_world_size)] + recv_tensors = [ + torch.empty((gbuf_local_numel,), dtype=torch.float32, device="cpu") + for _ in range(data_parallel_world_size) + ] else: recv_tensors = None @@ -725,18 +751,25 @@ def save_parameter_state(self, filename): # Collect world state. dtype_state[dtype] = world_tensors - state[model_idx] = dtype_state + state[gbuf_idx] = dtype_state - # Save param state. - if data_parallel_rank == 0: - torch.save(state, filename) + return state + def save_parameter_state(self, filename): + """Save the distributed parameter state on DP rank 0. - def load_parameter_state(self, filename): + Args: + filename (str): path to save parameter state to. + """ + + state_dict = self.get_parameter_state() + if torch.distributed.get_rank(self.data_parallel_group) == 0: + torch.save(state_dict, filename) + + def load_parameter_state_from_state_dict(self, state_dict): """Load parameter state (i.e., parameter & optimizer tensors). - This method performs the reverse of save_parameter_state(): - - Load world buffers from disk (i.e., distrib_opt.pt). + This method performs the reverse of get_parameter_state(): - Scatter contiguous buffers from DP rank 0 to each DP rank (each DP rank receives its relevant subset of the world buffers). - For each DP rank, copy param & optimizer shards from contiguous CPU @@ -745,54 +778,88 @@ def load_parameter_state(self, filename): """ # Data parallelism variables. - data_parallel_world_size = mpu.get_data_parallel_world_size(with_context_parallel=True) - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) - data_parallel_group_gloo = mpu.get_data_parallel_group_gloo(with_context_parallel=True) - data_parallel_global_ranks = list(mpu._DATA_PARALLEL_GLOBAL_RANKS_WITH_CP) - - # Load on DP rank 0. - if data_parallel_rank == 0: - loaded_state = torch.load(filename) - if "per_bucket_numel" in loaded_state: - per_bucket_numel_in_checkpoint = loaded_state["per_bucket_numel"] - assert self.per_bucket_numel == per_bucket_numel_in_checkpoint, \ - (f"Number of elements in each bucket need to be the same in current run " - f"({self.per_bucket_numel}) and checkpoint ({per_bucket_numel_in_checkpoint})") + data_parallel_world_size = self.data_parallel_group_gloo.size() + data_parallel_rank = torch.distributed.get_rank(self.data_parallel_group_gloo) + data_parallel_group_gloo = self.data_parallel_group_gloo + data_parallel_global_ranks = torch.distributed.get_process_group_ranks( + self.data_parallel_group_gloo + ) # Scatter tensors to all DP ranks. - for model_idx, gbuf_range_maps in enumerate(self.model_gbuf_ranges): + for gbuf_idx, gbuf_range_maps in enumerate(self.gbuf_ranges): for dtype, gbuf_range_map_for_all_buckets in gbuf_range_maps.items(): for bucket_idx, gbuf_range_map in enumerate(gbuf_range_map_for_all_buckets): # Compute local DP contiguous shard's size. - model = self.models[model_idx] - gbuf_world_numel = model.grad_buffers[dtype].buckets[bucket_idx].data.numel() + gbuf_world_numel = self.grad_buffers[gbuf_idx].buckets[bucket_idx].data.numel() + assert gbuf_world_numel == self.per_bucket_numel[gbuf_idx][dtype][bucket_idx] assert gbuf_world_numel % data_parallel_world_size == 0 gbuf_local_numel = gbuf_world_numel // data_parallel_world_size # Contiguous local shards (received from DP rank 0). - local_shards = {key: torch.empty((gbuf_local_numel,), - dtype=torch.float32, - device="cpu") - for key in ("param", "exp_avg", "exp_avg_sq")} + local_shards = { + key: torch.empty((gbuf_local_numel,), dtype=torch.float32, device="cpu") + for key in ("param", "exp_avg", "exp_avg_sq") + } # Scatter local shards from DP rank 0. for key, recv_tensor in local_shards.items(): # Scatter tensor list. if data_parallel_rank == 0: - world_tensor_for_all_buckets = loaded_state[model_idx][dtype][key] + world_tensor_for_all_buckets = state_dict[gbuf_idx][dtype][key] if not isinstance(world_tensor_for_all_buckets, list): world_tensor_for_all_buckets = [world_tensor_for_all_buckets] - assert bucket_idx < len(world_tensor_for_all_buckets), \ - (f"Trying to load state for bucket_id {bucket_idx} (out of " - f"{len(gbuf_range_map_for_all_buckets)} buckets) from checkpoint; " - f"checkpoint only has {len(world_tensor_for_all_buckets)} bucket(s)") + assert bucket_idx < len(world_tensor_for_all_buckets), ( + f"Trying to load state for bucket_id {bucket_idx} (out of " + f"{len(gbuf_range_map_for_all_buckets)} buckets) from checkpoint; " + f"checkpoint only has {len(world_tensor_for_all_buckets)} bucket(s)" + ) + # This tensor might be bigger or smaller than expected (depending on + # relative sizes of per_bucket_numel_in_checkpoint and self.per_bucket_numel). world_tensor = world_tensor_for_all_buckets[bucket_idx] - gbuf_start_idxs = \ - list(range(0, gbuf_world_numel, gbuf_local_numel)) - send_tensors = [world_tensor[i:(i+gbuf_local_numel)] - for i in gbuf_start_idxs] + if "per_bucket_numel" in state_dict: + numel_in_checkpoint = state_dict["per_bucket_numel"][gbuf_idx][ + dtype + ][bucket_idx] + numel = self.per_bucket_numel[gbuf_idx][dtype][bucket_idx] + numel_unpadded = self.per_bucket_numel_unpadded[gbuf_idx][dtype][ + bucket_idx + ] + assert world_tensor.numel() == numel_in_checkpoint + assert numel_unpadded <= world_tensor.numel(), ( + "True number of elements should be fewer than number of elements in " + "checkpoint tensor" + ) + if world_tensor.numel() > numel: + # Truncate extra values, which are padding anyway. + logger.info( + f"Truncating extra values from checkpoint (numel_in_checkpoint={numel_in_checkpoint}, " + f"numel={numel}, numel_unpadded={numel_unpadded})" + ) + world_tensor = world_tensor[:numel] + elif world_tensor.numel() < numel: + # In this case, numel > world_tensor.numel() (which is numel_in_checkpoint). + # Create new tensor with right number of values, then copy and use new tensor. + logger.info( + f"Expanding tensor from checkpoint (numel_in_checkpoint={numel_in_checkpoint}, " + f"numel={numel}, numel_unpadded={numel_unpadded})" + ) + world_tensor_reshaped = torch.empty( + (numel,), + dtype=world_tensor.dtype, + device=world_tensor.device, + ) + world_tensor_reshaped[:numel_in_checkpoint].copy_(world_tensor) + world_tensor = world_tensor_reshaped + else: + logger.info( + "***WARNING*** Using older checkpoint so skipping padding checks" + ) + gbuf_start_idxs = list(range(0, gbuf_world_numel, gbuf_local_numel)) + send_tensors = [ + world_tensor[i : (i + gbuf_local_numel)] for i in gbuf_start_idxs + ] else: send_tensors = None @@ -805,18 +872,15 @@ def load_parameter_state(self, filename): ) # Copy local contiguous shards to param/optim shards. - for model_param, param_range_map in \ - gbuf_range_map["param_map"].items(): + for model_param, param_range_map in gbuf_range_map["param_map"].items(): # Main param & optimizer states. - group_index, group_order = \ - self.model_param_group_index_map[model_param] - main_param = self.optimizer.param_groups \ - [group_index]["params"][group_order] + group_index, group_order = self.model_param_group_index_map[model_param] + main_param = self.optimizer.param_groups[group_index]["params"][group_order] optim_state = self.optimizer.state[main_param] tensors = { - "param" : main_param, + "param": main_param, **optim_state, } @@ -825,8 +889,27 @@ def load_parameter_state(self, filename): gbuf_local_end = param_range_map["gbuf_local"].end for key in local_shards: tensors[key].data.copy_( - local_shards[key][gbuf_local_start:gbuf_local_end]) + local_shards[key][gbuf_local_start:gbuf_local_end] + ) + def load_parameter_state(self, filename): + """Load the distributed parameter state from disk. + + Args: + filename (str): path to load parameter state from. + """ + state_dict = None + if torch.distributed.get_rank(self.data_parallel_group) == 0: + state_dict = torch.load(filename) + if "per_bucket_numel_unpadded" in state_dict: + per_bucket_numel_unpadded_in_checkpoint = state_dict["per_bucket_numel_unpadded"] + assert self.per_bucket_numel_unpadded == per_bucket_numel_unpadded_in_checkpoint, ( + f"Number of unpadded elements in each bucket need to be the same in current run " + f"({self.per_bucket_numel_unpadded}) and checkpoint " + f"({per_bucket_numel_unpadded_in_checkpoint})" + ) + + self.load_parameter_state_from_state_dict(state_dict) def zero_grad(self, set_to_none=True): """ @@ -839,11 +922,12 @@ def zero_grad(self, set_to_none=True): used by this field can be safely deallocated at this point. """ for groups in ( - self.model_float16_groups, - self.model_fp32_groups, - self.shard_float16_groups, # grad empty/unused here? - self.shard_fp32_groups, # throws grad-access warning - self.shard_fp32_from_float16_groups): + self.model_float16_groups, + self.model_fp32_groups, + self.shard_float16_groups, # grad empty/unused here? + self.shard_fp32_groups, # throws grad-access warning + self.shard_fp32_from_float16_groups, + ): for group in groups: _zero_grad_group_helper(group, set_to_none) @@ -856,7 +940,6 @@ def zero_grad(self, set_to_none=True): if self.overlap_param_gather: self._dispatch_gather_model_params(all_gather_handle_index=0) - def get_model_param_buffer_dp_views(self): """ Get shard views of each of the param buffers. @@ -881,18 +964,22 @@ def get_model_param_buffer_dp_views(self): # In all cases, we want all_gather and all_gather_handle.wait() to be called in the same order, # and all_gather_handle.wait() needs to be called just before the corresponding forward pass. view_items = [] - for model_index, buffers in enumerate(self.param_buffers): + for gbuf_index, buffers in enumerate(self.param_buffers): view_items_per_model_chunk = [] - for dtype, buf_for_all_buckets in buffers.items(): - for bucket_index, buf in enumerate(buf_for_all_buckets): - buf_views = shard_buffer(buf) - view_items_per_model_chunk.insert(0, (model_index, dtype, bucket_index, buf, buf_views)) + dtype = self.grad_buffers[gbuf_index].dtype + for bucket_index, buf in enumerate(buffers): + data_parallel_world_size = torch.distributed.get_world_size( + self.data_parallel_group + ) + buf_views = shard_buffer(buf, data_parallel_world_size) + view_items_per_model_chunk.insert( + 0, (gbuf_index, dtype, bucket_index, buf, buf_views) + ) view_items.extend(view_items_per_model_chunk) return view_items - - def _dispatch_gather_model_params(self, all_gather_handle_index): + def _dispatch_gather_model_params(self, all_gather_handle_index, force_sync=False): """ All-gather updated model params. @@ -900,33 +987,33 @@ def _dispatch_gather_model_params(self, all_gather_handle_index): tensors are dynamically allocated. After the all-gather, the params can be copied from the param buffer to the param. """ + async_op = self.overlap_param_gather and not force_sync if self.update_successful: - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) - data_parallel_group = mpu.get_data_parallel_group(with_context_parallel=True) + data_parallel_group = self.data_parallel_group + data_parallel_rank = torch.distributed.get_rank(data_parallel_group) # All-gather updated main params. # All param_buf views are guaranteed to have the same number of elements # across all data-parallel ranks, due to padding (done in grad_buffer.py), # and extended to the param_bufs. Thus, all sub-views will have consistent # start / end indexes across data-parallel ranks. - (model_index, dtype, bucket_index, pbuf, pbuf_views) = self.pbuf_view_items[all_gather_handle_index] - assert all_gather_handle_index == len(self.all_gather_handles) + (gbuf_index, dtype, bucket_index, pbuf, pbuf_views) = self.pbuf_view_items[ + all_gather_handle_index + ] + assert all_gather_handle_index < len(self.all_gather_handles) all_gather_handle = torch.distributed._all_gather_base( - pbuf, - pbuf_views[data_parallel_rank], - group = data_parallel_group, - async_op = self.overlap_param_gather + pbuf, pbuf_views[data_parallel_rank], group=data_parallel_group, async_op=async_op, + ) + self.all_gather_handles[all_gather_handle_index] = all_gather_handle + assert self.all_gather_handle_index_to_bucket_index_map[all_gather_handle_index] == ( + gbuf_index, + dtype, + bucket_index, ) - self.all_gather_handles.append(all_gather_handle) - assert self.all_gather_handle_index_to_bucket_index_map[all_gather_handle_index] == \ - (model_index, dtype, bucket_index) - self.param_buffer_copied.append(False) - if not self.overlap_param_gather: + if not async_op: self._copy_params_from_param_buffer(all_gather_handle_index) - - def _make_forward_pre_hook(self): """ Create a forward pre-hook to wait on all-gather handles when necessary (i.e., @@ -935,7 +1022,9 @@ def _make_forward_pre_hook(self): """ def hook(module, *unused): - assert self.overlap_param_gather, "Should use pre-hook only when overlap_param_gather is True" + assert ( + self.overlap_param_gather + ), "Should use pre-hook only when overlap_param_gather is True" # Make sure all parameters in this module have been all-gathered as necessary. for param in module.parameters(recurse=False): @@ -949,16 +1038,17 @@ def hook(module, *unused): return hook - def finish_param_sync(self, model_index, *unused): """ Finishes all necessary param syncs for the model_index'th model chunk. """ + if model_index not in self.model_index_to_all_gather_handle_index_map: + return + all_gather_handle_indices = self.model_index_to_all_gather_handle_index_map[model_index] for all_gather_handle_index in all_gather_handle_indices: self._finish_param_sync_helper(all_gather_handle_index) - def _finish_param_sync_helper(self, all_gather_handle_index): """ Waits on all_gather_handle if necessary, then copies params from param_buffer @@ -967,9 +1057,7 @@ def _finish_param_sync_helper(self, all_gather_handle_index): # First check if there is an outstanding all-gather handle for this param. # If so, wait on the handle to ensure the communication is finished. - if all_gather_handle_index >= len(self.all_gather_handles): - return - + assert all_gather_handle_index < len(self.all_gather_handles) all_gather_handle = self.all_gather_handles[all_gather_handle_index] if all_gather_handle is not None: all_gather_handle.wait() @@ -990,62 +1078,59 @@ def _finish_param_sync_helper(self, all_gather_handle_index): self._copy_params_from_param_buffer(all_gather_handle_index) self.param_buffer_copied[all_gather_handle_index] = True - def _copy_params_from_param_buffer(self, all_gather_handle_index): """ Copy params from param_buffer to model_params. """ - (model_index, dtype, bucket_index) = self.all_gather_handle_index_to_bucket_index_map[ - all_gather_handle_index] - model = self.models[model_index] + (gbuf_index, dtype, bucket_index) = self.all_gather_handle_index_to_bucket_index_map[ + all_gather_handle_index + ] + grad_buffer = self.grad_buffers[gbuf_index] + if self.update_successful: # Copy from param buffer to each param. - param_map = model.grad_buffer_param_index_map[dtype] + param_map = grad_buffer.param_index_map for param, (buf_start, buf_end, bucket_index_in_param_map) in param_map.items(): if bucket_index == bucket_index_in_param_map: - bucket_offset = model.grad_buffers[dtype].buckets[bucket_index].offset - param_buf = self.param_buffers[model_index][dtype][bucket_index] + bucket_offset = grad_buffer.buckets[bucket_index].offset + param_buf = self.param_buffers[gbuf_index][bucket_index] # buf_start and buf_end store position of this parameter in the full grad_buffer, # so need to adjust these indices (by subtracting out bucket_offset) since we # have independent param_bufs for each bucket. - param_buf_shard = param_buf[buf_start-bucket_offset:buf_end-bucket_offset] + param_buf_shard = param_buf[buf_start - bucket_offset : buf_end - bucket_offset] assert param.data.nelement() == param_buf_shard.nelement() param.view(-1).detach().copy_(param_buf_shard) # Zero out the grad buffer in preparation for next set of fwd / bwd passes after copy # completes (since param_buffer and grad_buffer are shared for each bucket). - param_buf = self.param_buffers[model_index][dtype][bucket_index] - grad_buf = model.grad_buffers[dtype].buckets[bucket_index].data + param_buf = self.param_buffers[gbuf_index][bucket_index] + grad_buf = grad_buffer.buckets[bucket_index].data assert param_buf.data_ptr() == grad_buf.data_ptr() grad_buf.zero_() - def _collect_main_grad_data_for_unscaling(self): """ Note: this should be equivalent to the float-16 optimizer's method, but writtent differently, so the two should be combined. """ return [ - param.grad.data - for group in self.optimizer.param_groups - for param in group["params"] + param.grad.data for group in self.optimizer.param_groups for param in group["params"] ] - def _get_model_and_main_params_data_float16(self): """ Get aligned list of model and main params. """ model_data = [] main_data = [] - for model_group, main_group in zip(self.shard_float16_groups, - self.shard_fp32_from_float16_groups): + for model_group, main_group in zip( + self.shard_float16_groups, self.shard_fp32_from_float16_groups + ): for model_param, main_param in zip(model_group, main_group): model_data.append(model_param.data) main_data.append(main_param.data) return model_data, main_data - def _copy_model_grads_to_main_grads(self): """ Copy model grads to main grads. @@ -1057,26 +1142,20 @@ def _copy_model_grads_to_main_grads(self): # Utility method for copying group grads. def copy_group_grads(model_groups, shard_main_groups): - for model_group, shard_main_group in zip(model_groups, - shard_main_groups): - for model_param, shard_main_param in zip(model_group, - shard_main_group): + for model_group, shard_main_group in zip(model_groups, shard_main_groups): + for model_param, shard_main_param in zip(model_group, shard_main_group): param_range_map = self.get_model_param_range_map(model_param) param_range = param_range_map["param"] assert param_range.size == shard_main_param.nelement() model_grad = model_param.main_grad - shard_model_grad = model_grad.view(-1) \ - [param_range.start:param_range.end] + shard_model_grad = model_grad.view(-1)[param_range.start : param_range.end] shard_main_param.grad = shard_model_grad.float() # Copy model groups to shard groups. - copy_group_grads(self.model_float16_groups, - self.shard_fp32_from_float16_groups) - copy_group_grads(self.model_fp32_groups, - self.shard_fp32_groups) - + copy_group_grads(self.model_float16_groups, self.shard_fp32_from_float16_groups) + copy_group_grads(self.model_fp32_groups, self.shard_fp32_groups) def _copy_main_params_to_model_params(self): """ @@ -1089,30 +1168,26 @@ def _copy_main_params_to_model_params(self): # Utility method for copying group params. def copy_group_params(shard_main_groups, model_groups): - for shard_main_group, model_group in zip(shard_main_groups, - model_groups): - for shard_main_param, model_param in zip(shard_main_group, - model_group): + for shard_main_group, model_group in zip(shard_main_groups, model_groups): + for shard_main_param, model_param in zip(shard_main_group, model_group): param_range_map = self.get_model_param_range_map(model_param) world_range = param_range_map["gbuf_world_in_bucket"] assert world_range.size == shard_main_param.nelement() - model_id, dtype, bucket_id = self.model_param_gbuf_map[model_param] - model_param_buffer = self.param_buffers[model_id][dtype][bucket_id] + gbuf_index, dtype, bucket_id = self.model_param_gbuf_map[model_param] + model_param_buffer = self.param_buffers[gbuf_index][bucket_id] - shard_model_param = model_param_buffer.view(-1) \ - [world_range.start:world_range.end] + shard_model_param = model_param_buffer.view(-1)[ + world_range.start : world_range.end + ] shard_model_param.data.copy_(shard_main_param) # Copy shard groups to model groups. - copy_group_params(self.shard_fp32_from_float16_groups, - self.model_float16_groups) - copy_group_params(self.shard_fp32_groups, - self.model_fp32_groups) - + copy_group_params(self.shard_fp32_from_float16_groups, self.model_float16_groups) + copy_group_params(self.shard_fp32_groups, self.model_fp32_groups) def _copy_model_params_to_main_params(self): """ @@ -1125,40 +1200,43 @@ def _copy_model_params_to_main_params(self): # Utility method for copying group params. def copy_group_params(model_groups, shard_main_groups): - for model_group, shard_main_group in zip(model_groups, - shard_main_groups): - for model_param, shard_main_param in zip(model_group, - shard_main_group): + for model_group, shard_main_group in zip(model_groups, shard_main_groups): + for model_param, shard_main_param in zip(model_group, shard_main_group): param_range_map = self.get_model_param_range_map(model_param) param_range = param_range_map["param"] assert param_range.size == shard_main_param.nelement() - shard_model_param = model_param.view(-1) \ - [param_range.start:param_range.end] + shard_model_param = model_param.view(-1)[param_range.start : param_range.end] shard_main_param.data.copy_(shard_model_param) # Copy model groups to shard groups. - copy_group_params(self.model_float16_groups, - self.shard_fp32_from_float16_groups) - copy_group_params(self.model_fp32_groups, - self.shard_fp32_groups) + copy_group_params(self.model_float16_groups, self.shard_fp32_from_float16_groups) + copy_group_params(self.model_fp32_groups, self.shard_fp32_groups) + def _reset_metadata_and_sync_gather_all_model_params(self, force_sync): + # Reset metadata needed to track results of all-gathers. + self.all_gather_handles = [None for _ in range(len(self.all_gather_handles))] + self.param_buffer_copied = [False for _ in range(len(self.param_buffer_copied))] + + # Launch synchronous all-gather if --overlap-param-gather is turned on or if force_sync + # is explicitly set to True (e.g., if we are going to turn off all-gather overlapping for + # validation / test iterations). + if not self.overlap_param_gather or force_sync: + for all_gather_handle_index in range(self.num_all_gather_handles): + self._dispatch_gather_model_params(all_gather_handle_index, force_sync=force_sync) @torch.no_grad() def step(self, args, timers): self.update_successful, grad_norm, num_zeros_in_grad = super().step(args, timers) - # Reset metadata needed to track results of all-gathers. - self.all_gather_handles = [] - self.param_buffer_copied = [] - # If not overlapping all-gather for parameters, launch synchronous all-gather - # communication calls here. - if not self.overlap_param_gather: - timers('params-all-gather', log_level=1).start(barrier=args.barrier_with_L1_time) - for all_gather_handle_index in range(self.num_all_gather_handles): - self._dispatch_gather_model_params(all_gather_handle_index) - timers('params-all-gather').stop() + # communication calls here. If overlapping all-gather for parameters, the following + # call to _gather_all_model_params is a no-op: the first all-gather is launched + # asynchronously in the next optimizer.zero_grad() call and subsequent all-gathers + # are launched in the forward pre-hook. + timers('params-all-gather', log_level=1).start(barrier=args.barrier_with_L1_time) + self._reset_metadata_and_sync_gather_all_model_params(force_sync=False) + timers('params-all-gather').stop() return self.update_successful, grad_norm, num_zeros_in_grad diff --git a/megatron/optimizer/grad_scaler.py b/megatron/core/optimizer/grad_scaler.py similarity index 91% rename from megatron/optimizer/grad_scaler.py rename to megatron/core/optimizer/grad_scaler.py index f77da3fc69..d9ef633b23 100644 --- a/megatron/optimizer/grad_scaler.py +++ b/megatron/core/optimizer/grad_scaler.py @@ -1,15 +1,13 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. """Megatron grad scaler.""" -from abc import ABC -from abc import abstractmethod +from abc import ABC, abstractmethod import torch class MegatronGradScaler(ABC): - def __init__(self, initial_scale): """Initialize scale value with the input initial scale.""" assert initial_scale > 0.0 @@ -36,9 +34,7 @@ def load_state_dict(self, state_dict): pass - class ConstantGradScaler(MegatronGradScaler): - def update(self, found_inf): pass @@ -49,12 +45,10 @@ def load_state_dict(self, state_dict): pass - class DynamicGradScaler(MegatronGradScaler): - - def __init__(self, initial_scale, min_scale, - growth_factor, backoff_factor, - growth_interval, hysteresis): + def __init__( + self, initial_scale, min_scale, growth_factor, backoff_factor, growth_interval, hysteresis + ): """"Grad scaler with dynamic scale that gets adjusted during training.""" super(DynamicGradScaler, self).__init__(initial_scale) @@ -82,7 +76,6 @@ def __init__(self, initial_scale, min_scale, self._growth_tracker = 0 self._hysteresis_tracker = self.hysteresis - def update(self, found_inf): # If we have an inf/nan, growth tracker is set to 0 @@ -92,8 +85,7 @@ def update(self, found_inf): self._hysteresis_tracker -= 1 # Now if we are out of hysteresis count, scale down the loss. if self._hysteresis_tracker <= 0: - self._scale = torch.max(self._scale * self.backoff_factor, - self.min_scale) + self._scale = torch.max(self._scale * self.backoff_factor, self.min_scale) else: # If there is no nan/inf, increment the growth tracker. self._growth_tracker += 1 @@ -105,7 +97,6 @@ def update(self, found_inf): # and scale up the loss scale. self._scale = self._scale * self.growth_factor - def state_dict(self): state_dict = {} state_dict['scale'] = self._scale @@ -113,7 +104,6 @@ def state_dict(self): state_dict['hysteresis_tracker'] = self._hysteresis_tracker return state_dict - def load_state_dict(self, state_dict): self._scale = state_dict['scale'].cuda(torch.cuda.current_device()) self._growth_tracker = state_dict['growth_tracker'] diff --git a/megatron/optimizer/optimizer.py b/megatron/core/optimizer/optimizer.py similarity index 64% rename from megatron/optimizer/optimizer.py rename to megatron/core/optimizer/optimizer.py index 47d2001dbb..5ad9a903f7 100644 --- a/megatron/optimizer/optimizer.py +++ b/megatron/core/optimizer/optimizer.py @@ -1,21 +1,25 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. """Megatron optimizer.""" -from abc import ABC -from abc import abstractmethod -from apex.multi_tensor_apply import multi_tensor_applier +import math +from abc import ABC, abstractmethod +from logging import getLogger + import amp_C import torch +from apex.multi_tensor_apply import multi_tensor_applier -from megatron import get_timers -from megatron import print_rank_0 -from megatron.core import mpu, tensor_parallel -from megatron.model import Float16Module +from megatron import get_args from megatron.model.module import param_is_not_shared +from megatron.tensor_logging import log_tensor, log_generator +from .. import parallel_state, tensor_parallel +from ..transformer.module import param_is_not_shared from .clip_grads import clip_grad_norm_fp32, count_zeros_fp32 +logger = getLogger(__name__) + def _zero_grad_group_helper(group, set_to_none): """Zero out the gradient for a group of parameters. @@ -40,24 +44,16 @@ def _multi_tensor_copy_this_to_that(this, that, overflow_buf=None): if overflow_buf: overflow_buf.fill_(0) # Scaling with factor `1.0` is equivalent to copy. - multi_tensor_applier(amp_C.multi_tensor_scale, - overflow_buf, - [this, that], - 1.0) + multi_tensor_applier(amp_C.multi_tensor_scale, overflow_buf, [this, that], 1.0) else: for this_, that_ in zip(this, that): that_.copy_(this_) - class MegatronOptimizer(ABC): - - - def __init__(self, optimizer, clip_grad, - log_num_zeros_in_grad, - check_for_nan_in_grad, - params_have_main_grad, - models): + def __init__( + self, optimizer, clip_grad, log_num_zeros_in_grad, params_have_main_grad, + ): """Input optimizer is the base optimizer for example Adam.""" self.optimizer = optimizer @@ -65,13 +61,17 @@ def __init__(self, optimizer, clip_grad, # Set gradient clipping and logging params. self.clip_grad = clip_grad self.log_num_zeros_in_grad = log_num_zeros_in_grad - self.check_for_nan_in_grad = check_for_nan_in_grad self.params_have_main_grad = params_have_main_grad - # 'models' are retained for access to the contiguous grad buffers. - # (see distributed optimizer) - self.models = models + args=get_args() + if args.debug_param_init: + log_generator("CPU generator after reset", torch.random.default_generator) + log_generator("PP init generator after reset") + with tensor_parallel.get_cuda_rng_tracker().fork(): + log_generator("TP init generator after reset") + for param in sorted(self.get_parameters(), key=lambda p: p.param_idx): + log_tensor(f"Global param: {param.param_name}", param, level=args.debug_param_init) def get_parameters(self): params = [] @@ -80,7 +80,6 @@ def get_parameters(self): params.append(param) return params - def get_main_grads_for_grad_norm(self): # Filter parameters based on: @@ -99,43 +98,34 @@ def get_main_grads_for_grad_norm(self): return grads_for_norm - def get_model_parallel_group(self): """Default returned here, but the distributed optimizer overrides this.""" - return mpu.get_model_parallel_group() - + return parallel_state.get_model_parallel_group() - def clip_grad_norm(self, clip_grad, check_for_nan_in_grad): + def clip_grad_norm(self, clip_grad): params = self.get_parameters() grads_for_norm = self.get_main_grads_for_grad_norm() return clip_grad_norm_fp32( - params, grads_for_norm, clip_grad, - check_for_nan_in_grad, - model_parallel_group=self.get_model_parallel_group()) - + params, grads_for_norm, clip_grad, model_parallel_group=self.get_model_parallel_group(), + ) def count_zeros(self): params = self.get_parameters() - return count_zeros_fp32(params, - model_parallel_group=self.get_model_parallel_group()) - + return count_zeros_fp32(params, model_parallel_group=self.get_model_parallel_group()) @abstractmethod def zero_grad(self, set_to_none=True): pass - @abstractmethod def get_loss_scale(self): """The output should be a cuda tensor of size 1.""" pass - def scale_loss(self, loss): """Simple scaling.""" return self.get_loss_scale() * loss - @abstractmethod def reload_model_params(self): """Refreshes any internal state from the current model parameters. @@ -145,17 +135,14 @@ def reload_model_params(self): with main parameters, the main parameters need to also be updated.""" pass - @abstractmethod def state_dict(self): pass - @abstractmethod def load_state_dict(self, state_dict): pass - # Promote state so it can be retrieved or set via # "optimizer_instance.state" def _get_state(self): @@ -166,7 +153,6 @@ def _set_state(self, value): state = property(_get_state, _set_state) - # Promote param_groups so it can be retrieved or set via # "optimizer_instance.param_groups" # (for example, to adjust the learning rate) @@ -178,13 +164,11 @@ def _set_param_groups(self, value): param_groups = property(_get_param_groups, _set_param_groups) - @abstractmethod def step(self, args, timers): pass - class MixedPrecisionOptimizer(MegatronOptimizer): """Base class for both the float-16 and the distributed optimizer. @@ -193,7 +177,6 @@ class MixedPrecisionOptimizer(MegatronOptimizer): clip_grad: clip gradeints with this global L2 norm. Note that clipping is ignored if clip_grad == 0 log_num_zeros_in_grad: return number of zeros in the gradients. - check_for_nan_in_grad: check if gradients have a NaN. params_have_main_grad: flag indicating if parameters have a `main_grad` field. If this is set, we are assuming that the model parameters are store in the `main_grad` @@ -211,18 +194,23 @@ class MixedPrecisionOptimizer(MegatronOptimizer): use any loss scale. Note that for `bf16 = True`, we can have a constnat gradient scaler. Also for `bf16 = False`, we always require a grad scaler. - models: list of models (i.e., the virtual pipelining models). This - is used by the distributed optimizer for mapping parameters. """ - def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, - fp16, bf16, params_dtype, grad_scaler, models): + def __init__( + self, + optimizer, + clip_grad, + log_num_zeros_in_grad, + params_have_main_grad, + fp16, + bf16, + params_dtype, + grad_scaler, + ): super().__init__( - optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, - models) + optimizer, clip_grad, log_num_zeros_in_grad, params_have_main_grad, + ) self.fp16 = fp16 self.bf16 = bf16 @@ -252,17 +240,14 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, if self.grad_scaler is None: self._scale_one = torch.tensor([1.0], dtype=torch.float, device='cuda') - def get_loss_scale(self): if self.grad_scaler is None: return self._scale_one return self.grad_scaler.scale - def reload_model_params(self): self._copy_model_params_to_main_params() - def _unscale_main_grads_and_check_for_nan(self): # Collect main grads. @@ -273,25 +258,24 @@ def _unscale_main_grads_and_check_for_nan(self): # Unscale and set found inf/nan torch._amp_foreach_non_finite_check_and_unscale_( - main_grads, self.found_inf, self.grad_scaler.inv_scale) + main_grads, self.found_inf, self.grad_scaler.inv_scale + ) # Update across all model parallel instances. - torch.distributed.all_reduce(self.found_inf, - op=torch.distributed.ReduceOp.MAX, - group=self.get_model_parallel_group()) + torch.distributed.all_reduce( + self.found_inf, op=torch.distributed.ReduceOp.MAX, group=self.get_model_parallel_group() + ) # Check for nan. - found_inf_flag = (self.found_inf.item() > 0) + found_inf_flag = self.found_inf.item() > 0 return found_inf_flag - @torch.no_grad() def step(self, args, timers): # Copy gradients from model params to main params. - timers('optimizer-copy-to-main-grad', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-copy-to-main-grad', log_level=1).start(barrier=args.barrier_with_L1_time) self._copy_model_grads_to_main_grads() timers('optimizer-copy-to-main-grad').stop() @@ -301,7 +285,8 @@ def step(self, args, timers): # Unscale and check for inf/nan. timers('optimizer-unscale-and-check-inf', log_level=1).start( - barrier=args.barrier_with_L1_time) + barrier=args.barrier_with_L1_time + ) found_inf_flag = self._unscale_main_grads_and_check_for_nan() timers('optimizer-unscale-and-check-inf').stop() @@ -313,34 +298,42 @@ def step(self, args, timers): if found_inf_flag: return False, None, None + if args.debug_all_param_gradients: + params=[] + for param in self.get_parameters(): + if param.grad is not None: + params.append(param) + for param in sorted(params, key=lambda p: p.param_idx): + log_tensor(f"Global gradient: {param.param_name}", param.grad, level=args.debug_all_param_gradients) + # Clip the main gradients. - timers('optimizer-clip-main-grad', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-clip-main-grad', log_level=1).start(barrier=args.barrier_with_L1_time) grad_norm = None if self.clip_grad > 0.0: - grad_norm = self.clip_grad_norm(self.clip_grad, - self.check_for_nan_in_grad) + grad_norm = self.clip_grad_norm(self.clip_grad) timers('optimizer-clip-main-grad').stop() # Count the zeros in the grads. - timers('optimizer-count-zeros', log_level=1).start( - barrier=args.barrier_with_L1_time) - num_zeros_in_grad = self.count_zeros() if \ - self.log_num_zeros_in_grad else None + timers('optimizer-count-zeros', log_level=1).start(barrier=args.barrier_with_L1_time) + num_zeros_in_grad = self.count_zeros() if self.log_num_zeros_in_grad else None timers('optimizer-count-zeros').stop() # Step the optimizer. - timers('optimizer-inner-step', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-inner-step', log_level=1).start(barrier=args.barrier_with_L1_time) self.optimizer.step() timers('optimizer-inner-step').stop() # Update params from main params. timers('optimizer-copy-main-to-model-params', log_level=1).start( - barrier=args.barrier_with_L1_time) + barrier=args.barrier_with_L1_time + ) self._copy_main_params_to_model_params() timers('optimizer-copy-main-to-model-params').stop() + if args.debug_param_update: + for param in sorted(self.get_parameters(), key=lambda p: p.param_idx): + log_tensor(f"Global param: {param.param_name}", param, level=args.debug_param_init) + # Successful update. return True, grad_norm, num_zeros_in_grad @@ -353,7 +346,6 @@ class Float16OptimizerWithFloat16Params(MixedPrecisionOptimizer): clip_grad: clip gradeints with this global L2 norm. Note that clipping is ignored if clip_grad == 0 log_num_zeros_in_grad: return number of zeros in the gradients. - check_for_nan_in_grad: check if gradients have a NaN. params_have_main_grad: flag indicating if parameters have a `main_grad` field. If this is set, we are assuming that the model parameters are store in the `main_grad` @@ -370,18 +362,30 @@ class Float16OptimizerWithFloat16Params(MixedPrecisionOptimizer): use any loss scale. Note that for `bf16 = True`, we can have a constnat gradient scaler. Also for `bf16 = False`, we always require a grad scaler. - models: list of models (i.e., the virtual pipelining models). This - is used by the distributed optimizer for mapping parameters. """ - def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, fp16, bf16, - params_dtype, grad_scaler, models): + def __init__( + self, + optimizer, + clip_grad, + log_num_zeros_in_grad, + params_have_main_grad, + fp16, + bf16, + params_dtype, + grad_scaler, + ): super().__init__( - optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, - fp16, bf16, params_dtype, grad_scaler, models) + optimizer, + clip_grad, + log_num_zeros_in_grad, + params_have_main_grad, + fp16, + bf16, + params_dtype, + grad_scaler, + ) # ====================== # main parameter stuff @@ -405,42 +409,42 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, if param.requires_grad: # float16 params: - if param.type() in ['torch.cuda.HalfTensor', - 'torch.cuda.BFloat16Tensor']: + if param.type() in ['torch.cuda.HalfTensor', 'torch.cuda.BFloat16Tensor']: float16_params_this_group.append(param) # Create a copy main_param = param.detach().clone().float() # Copy tensor model parallel attributes. - tensor_parallel.copy_tensor_model_parallel_attributes(main_param, - param) + tensor_parallel.copy_tensor_model_parallel_attributes(main_param, param) if hasattr(param, 'shared'): main_param.shared = param.shared + if hasattr(param, 'param_name'): + main_param.param_name=param.param_name + main_param.param_idx=param.param_idx # Replace the optimizer params with the new fp32 copy. param_group['params'][i] = main_param fp32_from_float16_params_this_group.append(main_param) # Reset existing state dict key to the new main param. if param in self.optimizer.state: - self.optimizer.state[main_param] \ - = self.optimizer.state.pop(param) + self.optimizer.state[main_param] = self.optimizer.state.pop(param) # fp32 params. elif param.type() == 'torch.cuda.FloatTensor': fp32_params_this_group.append(param) param_group['params'][i] = param else: - raise TypeError('Wrapped parameters must be one of ' - 'torch.cuda.FloatTensor, ' - 'torch.cuda.HalfTensor, or ' - 'torch.cuda.BFloat16Tensor. ' - 'Received {}'.format(param.type())) + raise TypeError( + 'Wrapped parameters must be one of ' + 'torch.cuda.FloatTensor, ' + 'torch.cuda.HalfTensor, or ' + 'torch.cuda.BFloat16Tensor. ' + 'Received {}'.format(param.type()) + ) self.float16_groups.append(float16_params_this_group) - self.fp32_from_float16_groups.append( - fp32_from_float16_params_this_group) + self.fp32_from_float16_groups.append(fp32_from_float16_params_this_group) self.fp32_from_fp32_groups.append(fp32_params_this_group) - def zero_grad(self, set_to_none=True): """We only need to zero the model related parameters, i.e., float16_groups & fp32_from_fp32_groups. We additionally zero @@ -454,7 +458,6 @@ def zero_grad(self, set_to_none=True): for group in self.fp32_from_fp32_groups: _zero_grad_group_helper(group, set_to_none) - def _collect_main_grad_data_for_unscaling(self): main_grads = [] @@ -470,25 +473,21 @@ def _collect_main_grad_data_for_unscaling(self): for main_param in main_group: if main_param.grad is not None: main_grads.append(main_param.grad.data) - - return main_grads + return main_grads def _get_model_and_main_params_data_float16(self): model_data = [] main_data = [] - for model_group, main_group in zip(self.float16_groups, - self.fp32_from_float16_groups): + for model_group, main_group in zip(self.float16_groups, self.fp32_from_float16_groups): for model_param, main_param in zip(model_group, main_group): model_data.append(model_param.data) main_data.append(main_param.data) return model_data, main_data - def _copy_model_grads_to_main_grads(self): # This only needs to be done for the float16 group. - for model_group, main_group in zip(self.float16_groups, - self.fp32_from_float16_groups): + for model_group, main_group in zip(self.float16_groups, self.fp32_from_float16_groups): for model_param, main_param in zip(model_group, main_group): if self.params_have_main_grad and hasattr(model_param, 'main_grad'): main_param.grad = model_param.main_grad.float() @@ -507,20 +506,19 @@ def _copy_model_grads_to_main_grads(self): for model_param in model_group: model_param.grad = model_param.main_grad - def _copy_main_params_to_model_params(self): # Only needed for the float16 params. model_data, main_data = self._get_model_and_main_params_data_float16() - _multi_tensor_copy_this_to_that(this=main_data, that=model_data, - overflow_buf=self._dummy_overflow_buf) - + _multi_tensor_copy_this_to_that( + this=main_data, that=model_data, overflow_buf=self._dummy_overflow_buf + ) def _copy_model_params_to_main_params(self): # Only needed for the float16 params. model_data, main_data = self._get_model_and_main_params_data_float16() - _multi_tensor_copy_this_to_that(this=model_data, that=main_data, - overflow_buf=self._dummy_overflow_buf) - + _multi_tensor_copy_this_to_that( + this=model_data, that=main_data, overflow_buf=self._dummy_overflow_buf + ) def state_dict(self): state_dict = {} @@ -530,75 +528,68 @@ def state_dict(self): state_dict['fp32_from_fp16_params'] = self.fp32_from_float16_groups return state_dict - def load_state_dict(self, state_dict): # Optimizer. optimizer_key = 'optimizer' if optimizer_key not in state_dict: optimizer_key = 'optimizer_state_dict' - print_rank_0('***WARNING*** loading optimizer from ' - 'an old checkpoint ...') + logger.info('***WARNING*** loading optimizer from ' 'an old checkpoint ...') self.optimizer.load_state_dict(state_dict[optimizer_key]) # Grad scaler. if 'grad_scaler' not in state_dict: if self.fp16: - print_rank_0('***WARNING*** found an old checkpoint, will not ' - 'load grad scaler ...') + logger.info( + '***WARNING*** found an old checkpoint, will not ' 'load grad scaler ...' + ) else: if self.grad_scaler: self.grad_scaler.load_state_dict(state_dict['grad_scaler']) else: - print_rank_0('***WARNING*** fould the grad scaler in the ' - 'checkpoint but it is None in the class. ' - 'Skipping loading grad scaler ...') + logger.info( + '***WARNING*** fould the grad scaler in the ' + 'checkpoint but it is None in the class. ' + 'Skipping loading grad scaler ...' + ) # Copy data for the main params. fp32_from_float16_params_key = 'fp32_from_fp16_params' if fp32_from_float16_params_key not in state_dict: fp32_from_float16_params_key = 'fp32_from_fp16' for current_group, saved_group in zip( - self.fp32_from_float16_groups, - state_dict[fp32_from_float16_params_key]): + self.fp32_from_float16_groups, state_dict[fp32_from_float16_params_key] + ): for current_param, saved_param in zip(current_group, saved_group): current_param.data.copy_(saved_param.data) class FP32Optimizer(MegatronOptimizer): - - def __init__(self, optimizer, clip_grad, - log_num_zeros_in_grad, - check_for_nan_in_grad, - params_have_main_grad, - models): + def __init__( + self, optimizer, clip_grad, log_num_zeros_in_grad, params_have_main_grad, + ): super(FP32Optimizer, self).__init__( - optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, - models) + optimizer, clip_grad, log_num_zeros_in_grad, params_have_main_grad, + ) self._scale = torch.tensor([1.0], dtype=torch.float, device='cuda') - def zero_grad(self, set_to_none=True): """Copied from torch.optim.optimizer""" for group in self.optimizer.param_groups: _zero_grad_group_helper(group['params'], set_to_none) - def get_loss_scale(self): """FP32 optimizer does not do any scaling.""" return self._scale - @torch.no_grad() def step(self, args, timers): """Clip gradients (if needed) and step the base optimizer. Always return successful since there is no overflow.""" # Copy main_grads to grads. - timers('optimizer-copy-to-main-grad', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-copy-to-main-grad', log_level=1).start(barrier=args.barrier_with_L1_time) if self.params_have_main_grad: for param_group in self.optimizer.param_groups: for param in param_group['params']: @@ -606,39 +597,146 @@ def step(self, args, timers): timers('optimizer-copy-to-main-grad').stop() + if args.debug_all_param_gradients: + params=[] + for param in self.get_parameters(): + if param.grad is not None: + params.append(param) + for param in sorted(params, key=lambda p: p.param_idx): + log_tensor(f"Global gradient: {param.param_name}", param.grad, level=args.debug_all_param_gradients) + # Clip gradients. - timers('optimizer-clip-main-grad', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-clip-main-grad', log_level=1).start(barrier=args.barrier_with_L1_time) grad_norm = None if self.clip_grad > 0.0: - grad_norm = self.clip_grad_norm(self.clip_grad, - self.check_for_nan_in_grad) + grad_norm = self.clip_grad_norm(self.clip_grad) timers('optimizer-clip-main-grad').stop() # count the zeros in the grads - timers('optimizer-count-zeros', log_level=1).start( - barrier=args.barrier_with_L1_time) - num_zeros_in_grad = self.count_zeros() if \ - self.log_num_zeros_in_grad else None + timers('optimizer-count-zeros', log_level=1).start(barrier=args.barrier_with_L1_time) + num_zeros_in_grad = self.count_zeros() if self.log_num_zeros_in_grad else None timers('optimizer-count-zeros').stop() # Update parameters. - timers('optimizer-inner-step', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-inner-step', log_level=1).start(barrier=args.barrier_with_L1_time) self.optimizer.step() timers('optimizer-inner-step').stop() # No overflow for FP32 optimizer. return True, grad_norm, num_zeros_in_grad - def reload_model_params(self): pass - def state_dict(self): return self.optimizer.state_dict() - def load_state_dict(self, state_dict): self.optimizer.load_state_dict(state_dict) + + +class ChainedOptimizer(MegatronOptimizer): + """ChainedOptimizer is designed for chain of multiple optimizers. + + These optimizers are responsible for different parts of multiple models for + a training task and will be executed one by one when the model is updated. + + Args: + chained_optimizers: a list of optimizers. + """ + + # Remove these attributes which inherits from MegatronOptimizer. + state = None + param_groups = None + + def __init__(self, chained_optimizers): + self.chained_optimizers = chained_optimizers + self.param_groups = [] + for optimizer in self.chained_optimizers: + self.param_groups += optimizer.param_groups + + def zero_grad(self, set_to_none=True): + for optimizer in self.chained_optimizers: + optimizer.zero_grad(set_to_none) + + def get_loss_scale(self): + return self.chained_optimizers[0].get_loss_scale() + + def reload_model_params(self): + for optimizer in self.chained_optimizers: + optimizer.reload_model_params() + + def state_dict(self): + return [optimizer.state_dict() for optimizer in self.chained_optimizers] + + def load_state_dict(self, state_dict): + for optimizer, state in zip(self.chained_optimizers, state_dict): + optimizer.load_state_dict(state) + + def step(self, args, timers): + """ChainedOptimizer will step all optimizers one by one. + + Args: + args (argparse.Namespace): command-line arguments. + timers (Timers): timers used for profiling. + """ + + update_successful, grad_norm, num_zeros_in_grad = True, 0, 0 + grad_norms = [] + for optimizer in self.chained_optimizers: + _update_successful, _grad_norm, _num_zeros_in_grad = optimizer.step(args, timers) + update_successful &= _update_successful + grad_norms += [_grad_norm if _grad_norm else 0.0] + num_zeros_in_grad += _num_zeros_in_grad if _num_zeros_in_grad else 0 + grad_norm = math.sqrt(sum([x ** 2 for x in grad_norms])) + + return update_successful, grad_norm, num_zeros_in_grad + + def save_parameter_state(self, filename): + """Save the distributed parameter states of all optimizers to a file. + + Args: + filename (str): path to save parameter state to. + """ + save_states = False + states = [] + for optimizer in self.chained_optimizers: + if hasattr(optimizer, 'get_parameter_state'): + state_dict = optimizer.get_parameter_state() + + # Save checkpoint economically, only when DP rank = 0, state dict + # needs to be saved. + if torch.distributed.get_rank(optimizer.data_parallel_group) == 0: + states.append(state_dict) + save_states = True + else: + states.append(None) + else: + states.append(None) + + if save_states: + torch.save(states, filename) + + def load_parameter_state(self, filename): + """Load the distributed parameter states of all optimizers from a file. + + Args: + filename (str): path to load parameter state from. + """ + states = None + for idx, optimizer in enumerate(self.chained_optimizers): + if not hasattr(optimizer, 'load_parameter_state_from_state_dict'): + continue + + # Lazy loading checkpoint, state dict is needed only when DP rank = 0. + if torch.distributed.get_rank(optimizer.data_parallel_group) == 0 and states is None: + states = torch.load(filename) + + state_dict = states[idx] if states else None + optimizer.load_parameter_state_from_state_dict(state_dict) + + def finish_param_sync(self, model_index): + """Finish parameter synchronization for all optimizers. + """ + for optimizer in self.chained_optimizers: + optimizer.finish_param_sync(model_index) diff --git a/megatron/core/optimizer/optimizer_config.py b/megatron/core/optimizer/optimizer_config.py new file mode 100644 index 0000000000..664e7c9036 --- /dev/null +++ b/megatron/core/optimizer/optimizer_config.py @@ -0,0 +1,113 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from dataclasses import dataclass +from typing import Optional + +import torch + + +@dataclass +class OptimizerConfig: + """ + Configuration for optimizer. + + + Precision + --------- + + fp16 (bool): If true, train with fp16 mixed precision training. Defaults to False. + + bf16 (bool): If true, train with bf16 mixed precision training. Defaults to False. + + params_dtype (torch.dtype): dtype used when intializing the weights. Defaults to torch.float32. + + + General Optimizer + ----------------- + + optimizer (str): Optimizer to use (one of Adam or SGD). + + lr (float, optional): Initial learning rate. Depending on decay style and initial warmup, the learning + rate at each iteration would be different. + + + Loss Scaler + ----------- + + loss_scale (float, optional): Static loss scaling, positive power of 2 values can improve fp16 convergence. + If None, dynamic loss scaling is used. + + initial_loss_scale (float): Initial loss-scale for dynamic loss scaling. + + min_loss_scale (float): Minimum loss scale for dynamic loss scaling. + + loss_scale_window (float): Window over which to raise/lower dynamic scale. + + hysteresis (int): Hysteresis for dynamic loss scaling. + + + Weight Decay + ------------ + + weight_decay (float): Weight decay coefficient for L2 regularization. + + + Base Optimizer + -------------- + + adam_beta1 (float): First coefficient for computing running averages of gradient and its square in Adam optimizer. + + adam_beta2 (float): Second coefficient for computing running averages of gradient and its square in Adam optimizer. + + adam_eps (float): Term added to the denominator to improve numerical stability in Adam optimizer. + + sgd_momentum (float): Momentum factor for SGD optimizer. + + + Distributed Optimizer + --------------------- + + use_distributed_optimizer (bool): Distribute optimizer state over data-parallel replicas. + + overlap_param_gather (bool): If true, overlap param all-gather with forward compute in distributed optimizer. + + + Miscellaneous + ------------- + + clip_grad (float): Gradient clipping based on global L2 norm. + + log_num_zeros_in_grad (bool): If true, calculate and log the number of zeros in gradient. + """ + + # Precision. + fp16: bool = False + bf16: bool = False + params_dtype: torch.dtype = torch.float32 + + optimizer: str = 'adam' + lr: Optional[float] = None + + # Loss scaling. + loss_scale: Optional[float] = None + initial_loss_scale: float = 2 ** 32 + min_loss_scale: float = 1.0 + loss_scale_window: float = 1000 + hysteresis: int = 2 + + weight_decay: float = 0.01 + + # Adam. + adam_beta1: float = 0.9 + adam_beta2: float = 0.999 + adam_eps: float = 1e-08 + # SGD. + sgd_momentum: float = 0.9 + + # Distributed optimizer. + use_distributed_optimizer: bool = False + overlap_param_gather: bool = False + + # Miscellaneous. + clip_grad: float = 1.0 + log_num_zeros_in_grad: bool = False diff --git a/megatron/core/package_info.py b/megatron/core/package_info.py index 55c49b1785..07de3fba41 100644 --- a/megatron/core/package_info.py +++ b/megatron/core/package_info.py @@ -2,7 +2,7 @@ MAJOR = 0 -MINOR = 4 +MINOR = 5 PATCH = 0 PRE_RELEASE = 'rc0' diff --git a/megatron/core/packed_seq_params.py b/megatron/core/packed_seq_params.py new file mode 100644 index 0000000000..478c17265f --- /dev/null +++ b/megatron/core/packed_seq_params.py @@ -0,0 +1,13 @@ +from dataclasses import dataclass + +from torch import Tensor + + +@dataclass +class PackedSeqParams: + # parameters to TEDotProductAttention and fused rope kernels for the `thd` (packed) sequence format, + qkv_format: str = None + cu_seqlens_q: Tensor = None + cu_seqlens_kv: Tensor = None + max_seqlen_q: Tensor = None + max_seqlen_kv: Tensor = None diff --git a/megatron/core/parallel_state.py b/megatron/core/parallel_state.py index 5652b20846..45cccc6463 100644 --- a/megatron/core/parallel_state.py +++ b/megatron/core/parallel_state.py @@ -28,6 +28,7 @@ # Expert parallel group that the current rank belongs to. _TENSOR_AND_EXPERT_PARALLEL_GROUP = None _DATA_MODULO_EXPERT_PARALLEL_GROUP = None +_DATA_MODULO_EXPERT_PARALLEL_GROUP_GLOO = None _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None @@ -37,8 +38,10 @@ # These values enable us to change the mpu sizes on the fly. _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None +_MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE = None _MPU_TENSOR_MODEL_PARALLEL_RANK = None _MPU_PIPELINE_MODEL_PARALLEL_RANK = None +_MPU_EXPERT_MODEL_PARALLEL_RANK = None # A list of ranks that have a copy of the embedding. _EMBEDDING_GLOBAL_RANKS = None @@ -456,6 +459,7 @@ def initialize_model_parallel( assert ( _DATA_MODULO_EXPERT_PARALLEL_GROUP is None ), 'Data modulo expert group is already initialized' + global _DATA_MODULO_EXPERT_PARALLEL_GROUP_GLOO tensor_and_data_group_size: int = tensor_model_parallel_size * data_parallel_size num_tensor_and_data_groups: int = world_size // tensor_and_data_group_size tensor_and_expert_group_size: int = tensor_model_parallel_size * expert_model_parallel_size @@ -479,8 +483,10 @@ def initialize_model_parallel( group = torch.distributed.new_group( ranks, pg_options=get_nccl_options('dp_modulo_exp', nccl_comm_cfgs) ) + group_gloo = torch.distributed.new_group(ranks, backend="gloo") if rank in ranks: _DATA_MODULO_EXPERT_PARALLEL_GROUP = group + _DATA_MODULO_EXPERT_PARALLEL_GROUP_GLOO = group_gloo # Initialize global memory buffer # This isn't really "parallel state" but there isn't another good place to @@ -622,6 +628,18 @@ def get_data_modulo_expert_parallel_group(): return _DATA_MODULO_EXPERT_PARALLEL_GROUP +def get_data_modulo_expert_parallel_group_gloo(): + assert ( + _DATA_MODULO_EXPERT_PARALLEL_GROUP_GLOO is not None + ), 'data modulo expert parallel group-gloo is not initialized' + return _DATA_MODULO_EXPERT_PARALLEL_GROUP_GLOO + + +def set_expert_model_parallel_world_size(world_size): + global _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE + _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE = world_size + + def set_tensor_model_parallel_world_size(world_size): """Set the tensor model parallel size""" global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE @@ -656,6 +674,12 @@ def get_pipeline_model_parallel_world_size(): return torch.distributed.get_world_size(group=get_pipeline_model_parallel_group()) +def set_expert_model_parallel_rank(rank): + """Set expert model parallel rank.""" + global _MPU_EXPERT_MODEL_PARALLEL_RANK + _MPU_EXPERT_MODEL_PARALLEL_RANK = rank + + def set_tensor_model_parallel_rank(rank): """Set tensor model parallel rank.""" global _MPU_TENSOR_MODEL_PARALLEL_RANK @@ -888,7 +912,9 @@ def get_context_parallel_rank(): def get_expert_model_parallel_world_size(): - """Return my rank for the expert parallel group""" + """Return world size for the expert model parallel group""" + if _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE: + return _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE if torch.distributed.is_available() and torch.distributed.is_initialized(): tensor_and_expert_parallel_world_size = torch.distributed.get_world_size( group=get_tensor_and_expert_parallel_group() @@ -898,8 +924,23 @@ def get_expert_model_parallel_world_size(): return 0 +def get_tensor_and_expert_parallel_world_size(): + """Return world size for the expert model parallel group times model parallel group. + Currently, each expert will also be distributed across TP group by default. + """ + if torch.distributed.is_available() and torch.distributed.is_initialized(): + tensor_and_expert_parallel_world_size = torch.distributed.get_world_size( + group=get_tensor_and_expert_parallel_group() + ) + return tensor_and_expert_parallel_world_size + else: + return 0 + + def get_expert_model_parallel_rank(): """Return my rank for the expert parallel group""" + if _MPU_EXPERT_MODEL_PARALLEL_RANK: + return _MPU_EXPERT_MODEL_PARALLEL_RANK if torch.distributed.is_available() and torch.distributed.is_initialized(): tensor_and_expert_parallel_rank = torch.distributed.get_rank( group=get_tensor_and_expert_parallel_group() @@ -978,3 +1019,7 @@ def destroy_model_parallel(): _MPU_PIPELINE_MODEL_PARALLEL_RANK = None global _GLOBAL_MEMORY_BUFFER _GLOBAL_MEMORY_BUFFER = None + global _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE + _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE = None + global _MPU_EXPERT_MODEL_PARALLEL_RANK + _MPU_EXPERT_MODEL_PARALLEL_RANK = None diff --git a/megatron/core/pipeline_parallel/schedules.py b/megatron/core/pipeline_parallel/schedules.py index 992da78127..6dc4011fe2 100644 --- a/megatron/core/pipeline_parallel/schedules.py +++ b/megatron/core/pipeline_parallel/schedules.py @@ -9,6 +9,7 @@ from megatron.core import parallel_state from megatron.core.enums import ModelType from megatron.core.pipeline_parallel import p2p_communication +from megatron.core.transformer.moe.router import MoEAuxLossAutoScaler from megatron.core.utils import get_attr_wrapped_model, get_model_config, get_model_type # Types @@ -88,6 +89,9 @@ def forward_step(data_iterator, model): collect_non_loss_data (optional, bool, default=False): TODO + first_val_step (bool, optional): Is the first step of the validation phase. Used by + Transformer Engine modules to only update their fp8 weights only on the first validation step. + """ pipeline_model_parallel_size = parallel_state.get_pipeline_model_parallel_world_size() if pipeline_model_parallel_size > 1: @@ -156,7 +160,9 @@ def forward_step( config, collect_non_loss_data=False, checkpoint_activations_microbatch=None, + is_first_microbatch=False, ): + """Forward step for passed-in model. If first stage, input tensor is obtained from data_iterator, otherwise @@ -166,6 +172,9 @@ def forward_step( if config.timers is not None: config.timers('forward-compute', log_level=2).start() + if is_first_microbatch and hasattr(model, 'set_is_first_microbatch'): + model.set_is_first_microbatch() + unwrap_output_tensor = False if not isinstance(input_tensor, list): input_tensor = [input_tensor] @@ -199,6 +208,18 @@ def forward_step( if config.timers is not None: config.timers('forward-compute').stop() + # Set the loss scale for the auxiliary loss of the MoE layer. + # Since we use a trick to do backward on the auxiliary loss, we need to set the scale explicitly. + if hasattr(config, 'num_moe_experts') and config.num_moe_experts is not None: + # Calculate the loss scale based on the grad_scale_func if available, else default to 1. + loss_scale = ( + config.grad_scale_func(torch.tensor(1.0, device=loss.device)) + if config.grad_scale_func is not None + else torch.tensor(1.0) + ) + # Set the loss scale + MoEAuxLossAutoScaler.set_loss_scale(loss_scale / num_microbatches) + # If T5 model (or other model with encoder and decoder) # and in decoder stack, then send encoder_hidden_state # downstream as well. @@ -280,6 +301,13 @@ def backward_step(input_tensor, output_tensor, output_tensor_grad, model_type, c return input_tensor_grad +def check_first_val_step(first_val_step, forward_only, cond): + if (first_val_step is not None) and forward_only: + return first_val_step and cond + else: + return cond + + def forward_backward_no_pipelining( *, forward_step_func, @@ -291,6 +319,7 @@ def forward_backward_no_pipelining( decoder_seq_length: int = None, # unused forward_only: bool = False, collect_non_loss_data: bool = False, + first_val_step: bool = None, ): """Run forward and backward passes with no pipeline parallelism (no inter-stage communication). @@ -333,6 +362,7 @@ def forward_backward_no_pipelining( forward_data_store, config, collect_non_loss_data, + is_first_microbatch=check_first_val_step(first_val_step, forward_only, i == 0), ) if not forward_only: backward_step(input_tensor, output_tensor, output_tensor_grad, model_type, config) @@ -348,6 +378,9 @@ def forward_backward_no_pipelining( forward_data_store, config, collect_non_loss_data, + is_first_microbatch=check_first_val_step( + first_val_step, forward_only, num_microbatches == 1 + ), ) if not forward_only: @@ -375,6 +408,7 @@ def forward_backward_pipelining_with_interleaving( decoder_seq_length: int = None, forward_only: bool = False, collect_non_loss_data: bool = False, + first_val_step: bool = None, ): """Run interleaved 1F1B schedule (model split into model chunks), with communication between pipeline stages as needed. @@ -458,6 +492,7 @@ def enable_grad_sync(): ) tensor_shape = [seq_length, micro_batch_size, config.hidden_size] + tensor_shape[0] = tensor_shape[0] // parallel_state.get_context_parallel_world_size() if config.sequence_parallel: tensor_shape[0] = tensor_shape[0] // parallel_state.get_tensor_model_parallel_world_size() @@ -559,6 +594,7 @@ def forward_step_helper(microbatch_id, checkpoint_activations_microbatch): if len(input_tensors[model_chunk_id]) == len(output_tensors[model_chunk_id]): input_tensors[model_chunk_id].append(None) input_tensor = input_tensors[model_chunk_id][-1] + output_tensor = forward_step( forward_step_func, data_iterator[model_chunk_id], @@ -569,6 +605,9 @@ def forward_step_helper(microbatch_id, checkpoint_activations_microbatch): config, collect_non_loss_data, checkpoint_activations_microbatch, + check_first_val_step( + first_val_step, forward_only, is_first_microbatch_for_model_chunk(microbatch_id), + ), ) output_tensors[model_chunk_id].append(output_tensor) @@ -958,6 +997,10 @@ def get_tensor_shapes( # Otherwise, send one tensor (pre-transpose). tensor_shapes = [] + seq_length = seq_length // parallel_state.get_context_parallel_world_size() + if model_type == ModelType.encoder_and_decoder: + decoder_seq_length = decoder_seq_length // parallel_state.get_context_parallel_world_size() + if config.sequence_parallel: seq_length = seq_length // parallel_state.get_tensor_model_parallel_world_size() if model_type == ModelType.encoder_and_decoder: @@ -1055,6 +1098,7 @@ def forward_backward_pipelining_without_interleaving( decoder_seq_length: int = None, forward_only: bool = False, collect_non_loss_data: bool = False, + first_val_step: bool = None, ): """Run non-interleaved 1F1B schedule, with communication between pipeline stages. @@ -1174,6 +1218,7 @@ def enable_grad_sync(): config, collect_non_loss_data, checkpoint_activations_microbatch, + check_first_val_step(first_val_step, forward_only, i == 0), ) send_forward(output_tensor, send_tensor_shapes, config) @@ -1210,6 +1255,9 @@ def enable_grad_sync(): config, collect_non_loss_data, checkpoint_activations_microbatch, + check_first_val_step( + first_val_step, forward_only, (i == 0) and (num_warmup_microbatches == 0) + ), ) if forward_only: diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 38379cb34d..a73803a5a3 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -3,10 +3,11 @@ # Parts of the code here are adapted from PyTorch # repo: https://github.com/pytorch/pytorch +import io import math import os import warnings -from typing import Callable, Optional +from typing import Any, Callable, Optional, Tuple import torch import torch.nn.functional as F @@ -22,6 +23,9 @@ get_tensor_model_parallel_world_size, ) +from ..dist_checkpointing.mapping import ShardedStateDict +from ..transformer.utils import make_sharded_tensors_for_checkpoint +from ..utils import make_tp_sharded_tensor_for_checkpoint from .mappings import ( copy_to_tensor_model_parallel_region, gather_from_sequence_parallel_region, @@ -202,9 +206,6 @@ def __init__( _initialize_affine_weight_gpu(self.weight, init_method, partition_dim=0, stride=1) def forward(self, input_): - assert not torch.any( - (input_ < 0) | (input_ >= self.num_embeddings) - ), "An input token is out of bounds of the embedding table" if self.tensor_model_parallel_size > 1: # Build the mask. input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index) @@ -222,6 +223,22 @@ def forward(self, input_): output = reduce_from_tensor_model_parallel_region(output_parallel) return output + def sharded_state_dict( + self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () + ) -> ShardedStateDict: + """ Non-default implementation for embeddings due to `allow_shape_mismatch` param """ + state_dict = self.state_dict(prefix='', keep_vars=True) + + weight_prefix = f'{prefix}weight' + return { + weight_prefix: make_tp_sharded_tensor_for_checkpoint( + tensor=state_dict['weight'], + key=weight_prefix, + allow_shape_mismatch=True, + prepend_offsets=sharded_offsets, + ) + } + class LinearWithFrozenWeight(torch.autograd.Function): """Linear operator that does not calculate gradient for weight. @@ -370,12 +387,13 @@ def backward(ctx, grad_output): # https://github.com/pytorch/pytorch/blob/c47cf9bc7f9e02f649ab4ed53fe4d35732c92ab6/torch/_refs/__init__.py#L2761 grad_output = grad_output.contiguous() # Convert the tensor shapes to 2D for execution compatibility - grad_output = grad_output.view( - grad_output.shape[0] * grad_output.shape[1], grad_output.shape[2] - ) - total_input = total_input.view( - total_input.shape[0] * total_input.shape[1], total_input.shape[2] - ) + if grad_output.dim() == 3: + grad_output = grad_output.view( + grad_output.shape[0] * grad_output.shape[1], grad_output.shape[2] + ) + total_input = total_input.view( + total_input.shape[0] * total_input.shape[1], total_input.shape[2] + ) if ctx.async_grad_allreduce: # Asynchronous all-reduce @@ -691,6 +709,13 @@ def __init__( self.sequence_parallel or self.expert_parallel ) + # Hook adding a default empty _extra_state for state dict + self._register_load_state_dict_pre_hook( + lambda state_dict, prefix, *args, **kwargs: state_dict.setdefault( + f'{prefix}_extra_state' + ) + ) + def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None): """Forward of ColumnParallelLinear @@ -721,6 +746,12 @@ def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None): f"not {expected_shape} as expected" ) + if self.config._cpu_offloading_context is not None: + if self.config._cpu_offloading_context.inside_context == True: + assert ( + self.config.cpu_offloading == False + ), "CPU Offloading cannot be enabled while using non-TE modules" + bias = self.bias if not self.skip_bias_add else None if ( @@ -756,6 +787,20 @@ def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None): output_bias = self.bias if self.skip_bias_add else None return output, output_bias + def sharded_state_dict(self, prefix='', sharded_offsets=()): + """ Sharding along axis 0, bias sharded """ + state_dict = self.state_dict(prefix='', keep_vars=True) + return make_sharded_tensors_for_checkpoint( + state_dict, prefix, {'weight': 0, 'bias': 0}, sharded_offsets + ) + + def set_extra_state(self, state: Any): + """ Extra state is ignored """ + + def get_extra_state(self) -> None: + """ Keep compatibility with TE state dict. """ + return None + class RowParallelLinear(torch.nn.Module): """Linear layer with row parallelism. @@ -878,6 +923,13 @@ def __init__( self.sequence_parallel or self.expert_parallel ) + # Hook adding a default empty _extra_state for state dict + self._register_load_state_dict_pre_hook( + lambda state_dict, prefix, *args, **kwargs: state_dict.setdefault( + f'{prefix}_extra_state' + ) + ) + def forward(self, input_): """Forward of RowParallelLinear @@ -888,6 +940,13 @@ def forward(self, input_): - output - bias """ + + if self.config._cpu_offloading_context is not None: + if self.config._cpu_offloading_context.inside_context == True: + assert ( + self.config.cpu_offloading == False + ), "CPU Offloading cannot be enabled while using non-TE modules" + # Set up backprop all-reduce. if self.input_is_parallel: input_parallel = input_ @@ -923,3 +982,17 @@ def forward(self, input_): output = output_ output_bias = self.bias return output, output_bias + + def sharded_state_dict(self, prefix='', sharded_offsets=()): + """ Sharding along axis 1, bias not sharded """ + state_dict = self.state_dict(prefix='', keep_vars=True) + return make_sharded_tensors_for_checkpoint( + state_dict, prefix, {'weight': 1}, sharded_offsets + ) + + def set_extra_state(self, state: Any): + """ Extra state is ignored """ + + def get_extra_state(self) -> None: + """ Keep compatibility with TE state dict. """ + return None diff --git a/megatron/timers.py b/megatron/core/timers.py similarity index 54% rename from megatron/timers.py rename to megatron/core/timers.py index a9478fa014..b61eb4ed22 100644 --- a/megatron/timers.py +++ b/megatron/core/timers.py @@ -2,16 +2,14 @@ """Megatron timers.""" -from abc import ABC -from abc import abstractmethod import time +from abc import ABC, abstractmethod +from typing import List import torch - class TimerBase(ABC): - def __init__(self, name): self.name = name @@ -32,9 +30,7 @@ def elapsed(self, reset=True, barrier=False): pass - class DummyTimer(TimerBase): - def __init__(self): super().__init__('dummy timer') @@ -48,13 +44,13 @@ def reset(self): return def elapsed(self, reset=True, barrier=False): - raise Exception('dummy timer should not be used to ' - 'calculate elapsed time') - + raise Exception('dummy timer should not be used to calculate elapsed time') class Timer(TimerBase): """ + Timer class with ability to start/stop. + Comment on using `barrier`: If this flag is passed, then all the caller processes will wait till all reach the timing routine. It is up to the user to make sure all the ranks in `barrier_group` @@ -64,20 +60,33 @@ class Timer(TimerBase): """ def __init__(self, name): + """Initialize Timer. + + Args: + name (str): Name of the timer. + """ super().__init__(name) self._elapsed = 0.0 + self._active_time = 0.0 self._started = False # Note that None will default to the global process group self._barrier_group = None self._start_time = time.time() - def set_barrier_group(self, barrier_group): - self._barrier_group = barrier_group + """Sets barrier group. + Args: + barrier_group (ProcessGroup): Torch ProcessGroup for barrier. + """ + self._barrier_group = barrier_group def start(self, barrier=False): - """Start the timer.""" + """Start the timer. + + Args: + barrier (bool, optional): Synchronizes ranks before starting. Defaults to False. + """ assert not self._started, 'timer has already been started' if barrier: torch.distributed.barrier(group=self._barrier_group) @@ -85,25 +94,38 @@ def start(self, barrier=False): self._start_time = time.time() self._started = True - def stop(self, barrier=False): - """Stop the timer.""" + """Stop the timer. + + Args: + barrier (bool, optional): Synchronizes ranks before stopping. Defaults to False. + """ assert self._started, 'timer is not started' if barrier: torch.distributed.barrier(group=self._barrier_group) torch.cuda.synchronize() - self._elapsed += (time.time() - self._start_time) + elapsed = time.time() - self._start_time + self._elapsed += elapsed + self._active_time += elapsed self._started = False - def reset(self): - """Reset timer.""" + """Reset timer. + """ + # Don't reset _active_time self._elapsed = 0.0 self._started = False - def elapsed(self, reset=True, barrier=False): - """Calculate the elapsed time.""" + """Calculates the elapsed time and restarts timer. + + Args: + reset (bool, optional): Resets timer before restarting. Defaults to True. + barrier (bool, optional): Synchronizes ranks before stopping. Defaults to False. + + Returns: + float: Elapsed time. + """ _started = self._started # If the timing in progress, end it first. if self._started: @@ -118,37 +140,54 @@ def elapsed(self, reset=True, barrier=False): self.start(barrier=barrier) return _elapsed + def active_time(self): + return self._active_time class Timers: - """Group of timers.""" + """Class for a group of Timers. + """ def __init__(self, log_level, log_option): + """Initialize group of timers. + + Args: + log_level (int): Log level to control what timers are enabled. + log_option (str): Setting for logging statistics over ranks for all the timers. Allowed: ['max', 'minmax', 'all']. + """ self._log_level = log_level + allowed_log_options = set(['max', 'minmax', 'all']) + assert ( + log_option in allowed_log_options + ), 'input log option {} is invalid. It must be one of {}'.format( + log_option, allowed_log_options + ) self._log_option = log_option self._timers = {} self._log_levels = {} self._dummy_timer = DummyTimer() self._max_log_level = 2 - def __call__(self, name, log_level=None): + """Call timer with name and log level.""" # If the timer has already been set, then check if the log-level # is provided, it matches the one that the timer was created with. if name in self._timers: if log_level is not None: - assert log_level == self._log_levels[name], \ - 'input log level {} does not match already existing '\ - 'log level {} for {} timer'.format( - log_level, self._log_levels[name], name) + assert log_level == self._log_levels[name], ( + 'input log level {} does not match already existing ' + 'log level {} for {} timer'.format(log_level, self._log_levels[name], name) + ) return self._timers[name] # If timer does not exist and no log level is provided, # set it to the max log level which is 2. if log_level is None: log_level = self._max_log_level - assert log_level <= self._max_log_level, \ - 'log level {} is larger than max supported log level {}'.format( - log_level, self._max_log_level) + assert ( + log_level <= self._max_log_level + ), 'log level {} is larger than max supported log level {}'.format( + log_level, self._max_log_level + ) # Now if the input log level is larger than the one set for # the timers class, just ignore it and return a dummy timer. if log_level > self._log_level: @@ -158,18 +197,21 @@ def __call__(self, name, log_level=None): self._log_levels[name] = log_level return self._timers[name] - def _get_elapsed_time_all_ranks(self, names, reset, barrier): - """ + """Returns elapsed times of timers in names. Assumptions: - All the ranks call this function. - `names` are identical on all ranks. If the above assumptions are not met, calling this function will result in hang. - Arguments: - - names: list of timer names - - reset: reset the timer after recording the elapsed time - - barrier: if set, do a global barrier before time measurments + + Args: + names (List[str]): list of timer names + reset (bool): reset the timer after recording the elapsed time + barrier (bool): if set, do a global barrier before time measurments + + Returns: + torch.tensor: Tensor of size [world_size, len(names)] with times in float. """ # First make sure all the callers are in sync. @@ -184,30 +226,28 @@ def _get_elapsed_time_all_ranks(self, names, reset, barrier): # pytorch yet. It is simpler to deal with a single tensor # and since we are only gathering a small amount of data, # it should be ok to use all-gather instead of gather. - rank_name_to_time = torch.zeros((world_size, len(names)), - dtype=torch.float, - device=torch.cuda.current_device()) + rank_name_to_time = torch.zeros( + (world_size, len(names)), dtype=torch.float, device=torch.cuda.current_device() + ) for i, name in enumerate(names): if name in self._timers: # Here we don't need to pass the barrier flag as all # the processes are already in sync. This avoids the # issue of different timers having different barrier # groups inside their class. - rank_name_to_time[rank, i] = self._timers[name].elapsed( - reset=reset) + rank_name_to_time[rank, i] = self._timers[name].elapsed(reset=reset) # See the note above for why we are not using gather. - torch.distributed._all_gather_base(rank_name_to_time.view(-1), - rank_name_to_time[rank, :].view(-1)) + torch.distributed._all_gather_base( + rank_name_to_time.view(-1), rank_name_to_time[rank, :].view(-1) + ) return rank_name_to_time - def _get_global_min_max_time(self, names, reset, barrier, normalizer): """Report only min and max times across all ranks.""" - rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset, - barrier) + rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset, barrier) name_to_min_max_time = {} for i, name in enumerate(names): rank_to_time = rank_name_to_time[:, i] @@ -217,32 +257,32 @@ def _get_global_min_max_time(self, names, reset, barrier, normalizer): if rank_to_time.numel() > 0: name_to_min_max_time[name] = ( rank_to_time.min().item() / normalizer, - rank_to_time.max().item() / normalizer) + rank_to_time.max().item() / normalizer, + ) return name_to_min_max_time - - def _get_global_min_max_time_string(self, names, reset, barrier, - normalizer, max_only): - name_to_min_max_time = self._get_global_min_max_time( - names, reset, barrier, normalizer) + def _get_global_min_max_time_string(self, names, reset, barrier, normalizer, max_only): + """Report strings for max/minmax times across all ranks.""" + name_to_min_max_time = self._get_global_min_max_time(names, reset, barrier, normalizer) if not name_to_min_max_time: return None - output_string = '(min, max) time across ranks (ms):' + if max_only: + output_string = 'max time across ranks (ms):' + else: + output_string = '(min, max) time across ranks (ms):' for name in name_to_min_max_time: min_time, max_time = name_to_min_max_time[name] if max_only: - output_string += '\n {}: {:.2f}'.format( - (name+' ').ljust(48, '.'), max_time) + output_string += '\n {}: {:.2f}'.format((name + ' ').ljust(48, '.'), max_time) else: output_string += '\n {}: ({:.2f}, {:.2f})'.format( - (name+' ').ljust(48, '.'), min_time, max_time) + (name + ' ').ljust(48, '.'), min_time, max_time + ) return output_string - def _get_all_ranks_time_string(self, names, reset, barrier, normalizer): """Report times across all ranks.""" - rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset, - barrier) + rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset, barrier) output_string = 'times across ranks (ms):' no_reported_timing = True @@ -255,49 +295,103 @@ def _get_all_ranks_time_string(self, names, reset, barrier, normalizer): not_yet_found = False output_string += '\n {}:'.format(name) output_string += '\n rank {:2d}: {:.2f}'.format( - rank, rank_name_to_time[rank, i] / normalizer) + rank, rank_name_to_time[rank, i] / normalizer + ) if no_reported_timing: return None return output_string + def get_all_timers_string( + self, + names: List[str] = None, + normalizer: float = 1.0, + reset: bool = True, + barrier: bool = False, + ): + """Returns the output string with logged timer values according to configured options. + + Args: + names (List[str]): Names of the timers to log. If None, all registered timers are fetched. Defaults to None. + normalizer (float, optional): Normalizes the timer values by the factor. Defaults to 1.0. + reset (bool, optional): Whether to reset timer values after logging. Defaults to True. + barrier (bool, optional): Whether to do a global barrier before time measurments. Defaults to False. + + Raises: + Exception: Raises if log option is invalid. + + Returns: + str: Formatted string with the timer values. + """ - def log(self, names, rank=None, normalizer=1.0, reset=True, barrier=False): - """Log a group of timers.""" + if names == None: # get all registered timers + names = self._timers.keys() - # Print. assert normalizer > 0.0 if self._log_option in ['max', 'minmax']: max_only = False if self._log_option == 'max': max_only = True output_string = self._get_global_min_max_time_string( - names, reset, barrier, normalizer/1000.0, max_only) + names, reset, barrier, normalizer / 1000.0, max_only + ) elif self._log_option == 'all': - output_string = self._get_all_ranks_time_string(names, - reset, barrier, - normalizer/1000.0) + output_string = self._get_all_ranks_time_string( + names, reset, barrier, normalizer / 1000.0 + ) else: - raise Exception('unknown timing log option {}'.format( - self._log_option)) + raise Exception('unknown timing log option {}'.format(self._log_option)) + return output_string + def log( + self, + names: List[str], + rank: int = None, + normalizer: float = 1.0, + reset: bool = True, + barrier: bool = False, + ): + """logs the timers passed in names to stdout. Example usage is to log average per step value for timer 'foo', + this function can be called with normalizer factor set to logging interval. + + Args: + names (List[str]): Names of the timers to log. + rank (int, optional): logs the timers to a specific rank. If set to None, logs to the last rank. Defaults to None. + normalizer (float, optional): Normalizes the timer values by the factor. Defaults to 1.0. + reset (bool, optional): Whether to reset timer values after logging. Defaults to True. + barrier (bool, optional): Whether to do a global barrier before time measurments. Defaults to False. + """ + + output_string = self.get_all_timers_string(names, normalizer, reset, barrier) # If no input rank is provided, log on last rank. if rank is None: rank = torch.distributed.get_world_size() - 1 if rank == torch.distributed.get_rank() and output_string is not None: print(output_string, flush=True) - - def write(self, names, writer, iteration, normalizer=1.0, - reset=False, barrier=False): - """Write timers to a tensorboard writer - Note that we only report maximum time across ranks to tensorboard. + def write( + self, + names: List[str], + writer, + iteration: int, + normalizer: float = 1.0, + reset: bool = True, + barrier: bool = False, + ): + """Write timers to a tensorboard writer. Note that we only report maximum time across ranks to tensorboard. + + Args: + names (List[str]): Names of the timers to log. + writer (SummaryWriter): Tensorboard SummaryWriter object + iteration (int): Current iteration. + normalizer (float, optional): Normalizes the timer values by the factor. Defaults to 1.0. + reset (bool, optional): Whether to reset timer values after logging. Defaults to True. + barrier (bool, optional): Whether to do a global barrier before time measurments. Defaults to False. """ # currently when using add_scalars, # torch.utils.add_scalars makes each timer its own run, which # polutes the runs list, so we just add each as a scalar assert normalizer > 0.0 - name_to_min_max_time = self._get_global_min_max_time( - names, reset, barrier, normalizer) + name_to_min_max_time = self._get_global_min_max_time(names, reset, barrier, normalizer) if writer is not None: for name in name_to_min_max_time: _, max_time = name_to_min_max_time[name] diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index c725c7f3a2..fba2c7af29 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -1,13 +1,15 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. - from abc import ABC, abstractmethod from dataclasses import dataclass +from importlib.metadata import version from typing import Union import torch +from pkg_resources import packaging from megatron.core import parallel_state, tensor_parallel from megatron.core.models.common.embeddings.rotary_pos_embedding import apply_rotary_pos_emb +from megatron.core.transformer.custom_layers.transformer_engine import SplitAlongDim from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp from megatron.core.transformer.module import MegatronModule @@ -17,8 +19,7 @@ from .enums import AttnMaskType from .transformer_config import TransformerConfig -from .utils import make_sharded_tensors_for_checkpoint - +from megatron.tensor_logging import log_tensor @dataclass class SelfAttentionSubmodules: @@ -57,6 +58,10 @@ def __init__( self.attn_mask_type = attn_mask_type self.attention_type = attention_type + from megatron import get_args + args = get_args() + self._debug_transformer=args.debug_transformer + # For normal attention without groups, num_query_groups == num_attention_heads, # so these two will be the same self.query_projection_size = self.config.kv_channels * self.config.num_attention_heads @@ -95,7 +100,14 @@ def __init__( ) def _checkpointed_attention_forward( - self, query, key, value, attention_mask, rotary_pos_emb=None, attn_mask_type=None + self, + query, + key, + value, + attention_mask, + rotary_pos_emb=None, + attn_mask_type=None, + packed_seq_params=None, ): """Forward method with selective activation checkpointing.""" @@ -107,7 +119,12 @@ def custom_forward(*inputs): attn_mask_type = inputs[5] attn_mask_type = AttnMaskType(attn_mask_type.item()) output_ = self.core_attention( - query, key, value, attention_mask, attn_mask_type=attn_mask_type + query, + key, + value, + attention_mask, + attn_mask_type=attn_mask_type, + packed_seq_params=packed_seq_params, ) return output_ @@ -115,7 +132,14 @@ def custom_forward(*inputs): attn_mask_type = self.attn_mask_type attn_mask_type = torch.tensor([attn_mask_type.value], dtype=torch.int) hidden_states = tensor_parallel.checkpoint( - custom_forward, False, query, key, value, attention_mask, rotary_pos_emb, attn_mask_type + custom_forward, + False, + query, + key, + value, + attention_mask, + rotary_pos_emb, + attn_mask_type, ) return hidden_states @@ -218,6 +242,7 @@ def forward( key_value_states=None, inference_params=None, rotary_pos_emb=None, + packed_seq_params=None, ): # hidden_states: [sq, b, h] @@ -239,13 +264,29 @@ def forward( inference_params, key, value, rotary_pos_emb ) + if packed_seq_params is not None: + query = query.squeeze(1) + key = key.squeeze(1) + value = value.squeeze(1) + # ================================================ # relative positional embedding (rotary embedding) # ================================================ if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - query = apply_rotary_pos_emb(query, q_pos_emb) - key = apply_rotary_pos_emb(key, k_pos_emb) + + if packed_seq_params is not None: + cu_seqlens_q = packed_seq_params.cu_seqlens_q + cu_seqlens_kv = packed_seq_params.cu_seqlens_kv + else: + cu_seqlens_q = cu_seqlens_kv = None + query = apply_rotary_pos_emb( + query, q_pos_emb, config=self.config, cu_seqlens=cu_seqlens_q, + ) + key = apply_rotary_pos_emb( + key, k_pos_emb, config=self.config, cu_seqlens=cu_seqlens_kv, + ) + # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect @@ -255,21 +296,44 @@ def forward( # core attention computation # ================================== - if self.checkpoint_core_attention: + if self.checkpoint_core_attention and self.training: core_attn_out = self._checkpointed_attention_forward( - query, key, value, attention_mask, attn_mask_type=attn_mask_type + query, + key, + value, + attention_mask, + attn_mask_type=attn_mask_type, + packed_seq_params=packed_seq_params, ) else: core_attn_out = self.core_attention( - query, key, value, attention_mask, attn_mask_type=attn_mask_type + query, + key, + value, + attention_mask, + attn_mask_type=attn_mask_type, + packed_seq_params=packed_seq_params, ) + if packed_seq_params is not None: + # reshape to same output shape as unpacked case + # (t, np, hn) -> (t, b=1, h=np*hn) + # t is the pack size = sum (sq_i) + # note that batch is a dummy dimension in the packed case + core_attn_out = core_attn_out.reshape(core_attn_out.size(0), 1, -1) + # ================= # Output. [sq, b, h] # ================= output, bias = self.linear_proj(core_attn_out) + if self._debug_transformer: + log_tensor(f"Layer {self.layer_number} Query", query.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Key", key.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Value", value.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Attn context", core_attn_out.transpose(0,1), level=self._debug_transformer) + return output, bias @@ -302,7 +366,7 @@ def __init__( config=self.config, init_method=self.config.init_method, gather_output=False, - bias=self.config.add_bias_linear, + bias=self.config.add_bias_linear or self.config.add_qkv_bias, skip_bias_add=False, is_expert=False, tp_comm_buffer_name='qkv', @@ -325,40 +389,30 @@ def get_query_key_value_tensors(self, hidden_states, key_value_states=None): ) mixed_qkv = mixed_qkv.view(*new_tensor_shape) - # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] - (query, key, value) = torch.split( - mixed_qkv, - [ - ( - self.num_attention_heads_per_partition - // self.num_query_groups_per_partition - * self.hidden_size_per_attention_head - ), - self.hidden_size_per_attention_head, - self.hidden_size_per_attention_head, - ], - dim=3, - ) + split_arg_list = [ + ( + self.num_attention_heads_per_partition + // self.num_query_groups_per_partition + * self.hidden_size_per_attention_head + ), + self.hidden_size_per_attention_head, + self.hidden_size_per_attention_head, + ] + + if SplitAlongDim is not None: + + # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] + (query, key, value) = SplitAlongDim(mixed_qkv, 3, split_arg_list,) + else: + + # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] + (query, key, value) = torch.split(mixed_qkv, split_arg_list, dim=3,) + # [sq, b, ng, np/ng * hn] -> [sq, b, np, hn] query = query.reshape(query.size(0), query.size(1), -1, self.hidden_size_per_attention_head) return query, key, value - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): - sharded_key_prefix = prefix if sharded_key_prefix is None else sharded_key_prefix - sharded_state_dict = {} - for name, module in ( - ('linear_qkv', self.linear_qkv), - ('linear_proj', self.linear_proj), - ): - sub_sd = module.sharded_state_dict( - prefix=f'{prefix}{name}.', - sharded_key_prefix=f'{sharded_key_prefix}{name}.', - sharded_offsets=sharded_offsets, - ) - sharded_state_dict.update(sub_sd) - return sharded_state_dict - class CrossAttention(Attention): """Cross-attention layer class diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index d784184623..eb4b917227 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -1,3 +1,4 @@ +import dataclasses import os from importlib.metadata import version from typing import Callable @@ -8,6 +9,7 @@ from torch import Tensor from megatron.core import ModelParallelConfig +from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.parallel_state import ( get_context_parallel_global_ranks, get_context_parallel_group, @@ -33,6 +35,10 @@ def _get_extra_te_kwargs(config: TransformerConfig): return extra_transformer_engine_kwargs +def condition_init_method(config, init_method): + return init_method if config.perform_initialization else (lambda w: None) + + class TENorm: """ A conditional wrapper to initialize an instance of Transformer-Engine's @@ -98,7 +104,7 @@ def __init__( # ourselves. This way our forward always returns two values # and we don't have to deal with the zero length Tensor. self.te_return_bias = skip_bias_add and bias - + self.is_first_microbatch = True if skip_weight_param_allocation: raise ValueError( 'Transformer Engine linear layers do not support skip_weight_param_allocation' @@ -110,7 +116,9 @@ def __init__( if te_version >= packaging.version.Version("0.8.0"): if self.config.tp_comm_overlap: extra_kwargs["ub_split_ag"] = self.config.tp_comm_split_ag + extra_kwargs["ub_atomic_gemm_ag"] = self.config.tp_comm_atomic_ag extra_kwargs["ub_split_rs"] = self.config.tp_comm_split_rs + extra_kwargs["ub_atomic_gemm_rs"] = self.config.tp_comm_atomic_rs if te_version > packaging.version.Version("1.0.0"): assert ( tp_comm_buffer_name is not None @@ -125,7 +133,7 @@ def __init__( tp_group=get_tensor_model_parallel_group(check_initialized=False), tp_size=self.config.tensor_model_parallel_size, get_rng_state_tracker=get_cuda_rng_tracker, - init_method=init_method, + init_method=condition_init_method(config, init_method), bias=bias, return_bias=self.te_return_bias, parallel_mode=parallel_mode, @@ -133,7 +141,8 @@ def __init__( ) def forward(self, x): - out = super().forward(x) + out = super().forward(x, is_first_microbatch=self.is_first_microbatch) + self.is_first_microbatch = False # TE only returns a tuple when return_bias is True, otherwise # it returns a single Tensor, we always want to return two @@ -182,7 +191,7 @@ def __init__( # ourselves. This way our forward always returns two values # and we don't have to deal with the zero length Tensor. self.te_return_bias = skip_bias_add and bias - + self.is_first_microbatch = True extra_kwargs = _get_extra_te_kwargs(config) # Only Transformer-Engine version >= 0.11.0 supports `RMSNorm` @@ -198,6 +207,7 @@ def __init__( if self.config.tp_comm_overlap: extra_kwargs["ub_bulk_wgrad"] = self.config.tp_comm_bulk_wgrad extra_kwargs["ub_bulk_dgrad"] = self.config.tp_comm_bulk_dgrad + extra_kwargs["ub_atomic_gemm_ag"] = self.config.tp_comm_atomic_ag extra_kwargs["ub_split_ag"] = self.config.tp_comm_split_ag if te_version > packaging.version.Version("1.0.0"): assert ( @@ -214,7 +224,7 @@ def __init__( tp_group=get_tensor_model_parallel_group(check_initialized=False), tp_size=self.config.tensor_model_parallel_size, get_rng_state_tracker=get_cuda_rng_tracker, - init_method=init_method, + init_method=condition_init_method(config, init_method), bias=bias, return_bias=self.te_return_bias, parallel_mode="column", @@ -224,7 +234,8 @@ def __init__( ) def forward(self, x): - out = super().forward(x) + out = super().forward(x, is_first_microbatch=self.is_first_microbatch) + self.is_first_microbatch = False # TE only returns a tuple when return_bias is True, otherwise # it returns a single Tensor, we always want to return two @@ -233,11 +244,11 @@ def forward(self, x): return out return out, None - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + def sharded_state_dict(self, prefix='', sharded_offsets=()): """ Sharding along axis 0, bias sharded """ state_dict = self.state_dict(prefix='', keep_vars=True) return make_sharded_tensors_for_checkpoint( - state_dict, prefix, sharded_key_prefix, {'weight': 0, 'bias': 0}, sharded_offsets + state_dict, prefix, {'weight': 0, 'bias': 0}, sharded_offsets ) @@ -272,18 +283,18 @@ def __init__( output_size=output_size, parallel_mode="column", config=config, - init_method=init_method, + init_method=condition_init_method(config, init_method), bias=bias, skip_bias_add=skip_bias_add, skip_weight_param_allocation=skip_weight_param_allocation, tp_comm_buffer_name=tp_comm_buffer_name, ) - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + def sharded_state_dict(self, prefix='', sharded_offsets=()): """ Sharding along axis 0, bias sharded """ state_dict = self.state_dict(prefix='', keep_vars=True) return make_sharded_tensors_for_checkpoint( - state_dict, prefix, sharded_key_prefix, {'weight': 0, 'bias': 0}, sharded_offsets + state_dict, prefix, {'weight': 0, 'bias': 0}, sharded_offsets ) @@ -319,18 +330,18 @@ def __init__( output_size=output_size, parallel_mode="row", config=config, - init_method=init_method, + init_method=condition_init_method(config, init_method), bias=bias, skip_bias_add=skip_bias_add, skip_weight_param_allocation=False, # We don't currently use this for row parallel layers tp_comm_buffer_name=tp_comm_buffer_name, ) - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + def sharded_state_dict(self, prefix='', sharded_offsets=()): """ Sharding along axis 1, bias not sharded """ state_dict = self.state_dict(prefix='', keep_vars=True) return make_sharded_tensors_for_checkpoint( - state_dict, prefix, sharded_key_prefix, {'weight': 1}, sharded_offsets + state_dict, prefix, {'weight': 1}, sharded_offsets ) @@ -356,6 +367,7 @@ def __init__( ): self.config = config self.te_forward_mask_type = False + self.qkv_format: str = 'sbhd' if self.config.apply_query_key_layer_scaling != bool( int(os.getenv('NVTE_APPLY_QK_LAYER_SCALING', '0')) @@ -400,6 +412,13 @@ def __init__( self.config.context_parallel_size == 1 ), "Only Transformer-Engine version >= 1.0.0 supports context parallelism!" + if config.window_size is not None: + # Check version + assert te_version >= packaging.version.Version( + "1.2.0" + ), f"Transformer-Engine version ({str(te_version)}) must be >= 1.2.0 to support sliding window attention." + extra_kwargs['window_size'] = config.window_size + super().__init__( num_attention_heads=self.config.num_attention_heads, kv_channels=self.config.kv_channels, @@ -422,10 +441,59 @@ def forward( value: Tensor, attention_mask: Tensor, attn_mask_type: AttnMaskType, + packed_seq_params: PackedSeqParams = None, ): + packed_seq_kwargs = ( + dataclasses.asdict(packed_seq_params) if packed_seq_params is not None else {} + ) + te_version = packaging.version.Version(version("transformer-engine")) + # overwrite self.qkv_format depending on self.config.apply_rope_fusion, which can be set after init + if self.config.apply_rope_fusion and te_version > packaging.version.Version("0.13.0"): + self.qkv_format = 'bshd' + + qkv_format = packed_seq_kwargs.get('qkv_format', self.qkv_format) + + if te_version < packaging.version.Version("1.3.0"): + # TE 1.3.0 introduces precomputing max_seqlen to remove unnecessary kernels and D2H copies (#555) + # These two arguments did not exist prior to 1.3.0 + packed_seq_kwargs.pop("max_seqlen_q", None) + packed_seq_kwargs.pop("max_seqlen_kv", None) + + if self.config.apply_rope_fusion and qkv_format == 'bshd': + query, key, value = [x.transpose(0, 1).contiguous() for x in (query, key, value)] + if self.te_forward_mask_type: - return super().forward( - query, key, value, attention_mask, attn_mask_type=attn_mask_type.name + core_attn_out = super().forward( + query, + key, + value, + attention_mask, + attn_mask_type=attn_mask_type.name, + **packed_seq_kwargs, ) else: - return super().forward(query, key, value, attention_mask) + core_attn_out = super().forward(query, key, value, attention_mask, **packed_seq_kwargs,) + + if self.config.apply_rope_fusion and qkv_format == 'bshd': + return core_attn_out.transpose(0, 1) + else: + return core_attn_out + + +try: + + from transformer_engine.pytorch.attention import _SplitAlongDim + + SplitAlongDim = _SplitAlongDim.apply + +except ImportError: + + SplitAlongDim = None + +try: + + from transformer_engine.pytorch.cpu_offload import get_cpu_offload_context + +except ImportError: + + get_cpu_offload_context = None diff --git a/megatron/core/transformer/dot_product_attention.py b/megatron/core/transformer/dot_product_attention.py index 7eab478bd0..967d0ce8d8 100644 --- a/megatron/core/transformer/dot_product_attention.py +++ b/megatron/core/transformer/dot_product_attention.py @@ -8,6 +8,7 @@ from megatron.core import parallel_state, tensor_parallel from megatron.core.fusions.fused_softmax import FusedScaleMaskSoftmax +from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig @@ -46,6 +47,10 @@ def __init__( self.config.context_parallel_size == 1 ), "Context parallelism is only supported by TEDotProductAttention!" + assert ( + self.config.window_size is None + ), "Sliding Window Attention is only supported by TEDotProductAttention!" + self.layer_number = max(1, layer_number) self.attn_mask_type = attn_mask_type self.attention_type = attention_type # unused for now @@ -89,7 +94,12 @@ def forward( value: Tensor, attention_mask: Tensor, attn_mask_type: AttnMaskType = None, + packed_seq_params: PackedSeqParams = None, ): + assert packed_seq_params is None, ( + "Packed sequence is not supported by DotProductAttention." + "Please use TEDotProductAttention instead." + ) # =================================== # Raw attention scores. [b, n/p, s, s] diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index 8f5575b724..a7df9caa45 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -8,8 +8,13 @@ from megatron.core import parallel_state from megatron.core.dist_checkpointing import ShardedTensor -from megatron.core.dist_checkpointing.mapping import ShardedTensorFactory +from megatron.core.dist_checkpointing.mapping import ( + ReplicaId, + ShardedStateDict, + ShardedTensorFactory, +) from megatron.core.fusions.fused_bias_gelu import bias_gelu_impl +from megatron.core.fusions.fused_bias_swiglu import bias_swiglu_impl from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig @@ -64,15 +69,7 @@ def __init__( tp_comm_buffer_name='fc1', ) - if self.config.gated_linear_unit: - - def glu(x): - x = torch.chunk(x, 2, dim=-1) - return self.config.activation_func(x[0]) * x[1] - - self.activation_func = glu - else: - self.activation_func = self.config.activation_func + self.activation_func = self.config.activation_func self.linear_fc2 = build_module( submodules.linear_fc2, @@ -92,33 +89,40 @@ def forward(self, hidden_states): # [s, b, 4 * h/p] intermediate_parallel, bias_parallel = self.linear_fc1(hidden_states) - if self.config.bias_gelu_fusion: - assert self.config.add_bias_linear is True - assert self.activation_func == F.gelu - intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) + if self.config.bias_activation_fusion: + if self.activation_func == F.gelu: + assert self.config.add_bias_linear is True + intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) + elif self.activation_func == F.silu and self.config.gated_linear_unit: + intermediate_parallel = bias_swiglu_impl(intermediate_parallel, bias_parallel) + else: + raise ValueError("Only support fusion of gelu and swiglu") else: if bias_parallel is not None: intermediate_parallel = intermediate_parallel + bias_parallel - intermediate_parallel = self.activation_func(intermediate_parallel) + if self.config.gated_linear_unit: + + def glu(x): + x = torch.chunk(x, 2, dim=-1) + return self.config.activation_func(x[0]) * x[1] + + intermediate_parallel = glu(intermediate_parallel) + else: + intermediate_parallel = self.activation_func(intermediate_parallel) # [s, b, h] output, output_bias = self.linear_fc2(intermediate_parallel) return output, output_bias - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): - sharded_key_prefix = prefix if sharded_key_prefix is None else sharded_key_prefix + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: sharded_state_dict = {} for name, module in self._modules.items(): if name == 'linear_fc1' and self.config.gated_linear_unit: - sub_sd = self._sharded_state_dict_for_glu( - name, module, prefix, sharded_key_prefix, sharded_offsets - ) + sub_sd = self._sharded_state_dict_for_glu(name, module, prefix, sharded_offsets) else: sub_sd = module.sharded_state_dict( - prefix=f'{prefix}{name}.', - sharded_key_prefix=f'{sharded_key_prefix}{name}.', - sharded_offsets=sharded_offsets, + prefix=f'{prefix}{name}.', sharded_offsets=sharded_offsets, ) sharded_state_dict.update(sub_sd) return sharded_state_dict @@ -128,14 +132,11 @@ def _sharded_state_dict_for_glu( module_name: str, module: torch.nn.Module, prefix: str, - sharded_key_prefix: str, sharded_offsets: Tuple[Tuple[int, int, int]], ): assert module_name == 'linear_fc1', module_name sharded_state_dict = module.sharded_state_dict( - prefix=f'{prefix}{module_name}.', - sharded_key_prefix=f'{sharded_key_prefix}{module_name}.', - sharded_offsets=sharded_offsets, + prefix=f'{prefix}{module_name}.', sharded_offsets=sharded_offsets, ) weight_key = f'{prefix}{module_name}.weight' prev_sh_ten = sharded_state_dict[weight_key] @@ -147,10 +148,9 @@ def _sharded_state_dict_for_glu( tp_size = parallel_state.get_tensor_model_parallel_world_size() tp_shard_axis = 0 - replica_id = prev_sh_ten.replica_id prepend_axis_num = len(sharded_offsets) - def sh_ten_build_fn(key: str, t: torch.Tensor): + def sh_ten_build_fn(key: str, t: torch.Tensor, replica_id: ReplicaId): offset_w = (tp_shard_axis + prepend_axis_num, tp_rank, tp_size * 2) offset_v = (tp_shard_axis + prepend_axis_num, tp_size + tp_rank, tp_size * 2) with torch.no_grad(): @@ -162,7 +162,7 @@ def sh_ten_build_fn(key: str, t: torch.Tensor): *sharded_offsets, offset_w, replica_id=replica_id, - prepend_axis_num=1, + prepend_axis_num=prepend_axis_num, ), ShardedTensor.from_rank_offsets( key, @@ -170,7 +170,7 @@ def sh_ten_build_fn(key: str, t: torch.Tensor): *sharded_offsets, offset_v, replica_id=replica_id, - prepend_axis_num=1, + prepend_axis_num=prepend_axis_num, ), ] @@ -179,6 +179,10 @@ def sh_ten_merge_fn(sub_state_dict): return torch.cat(sub_state_dict) sharded_state_dict[weight_key] = ShardedTensorFactory( - prev_sh_ten.key, prev_sh_ten.data, sh_ten_build_fn, sh_ten_merge_fn + prev_sh_ten.key, + prev_sh_ten.data, + sh_ten_build_fn, + sh_ten_merge_fn, + prev_sh_ten.replica_id, ) return sharded_state_dict diff --git a/megatron/core/transformer/module.py b/megatron/core/transformer/module.py index d20074aa07..4a7301376a 100644 --- a/megatron/core/transformer/module.py +++ b/megatron/core/transformer/module.py @@ -1,12 +1,18 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. """Megatron Module.""" +from typing import Tuple import torch from torch.autograd import Variable from torch.nn.parameter import Parameter from megatron.core import parallel_state +from megatron.core.dist_checkpointing.mapping import ShardedStateDict from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.transformer.utils import ( + make_sharded_tensors_for_checkpoint, + sharded_state_dict_default, +) _FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor) _HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor) @@ -46,18 +52,43 @@ def state_dict_for_save_checkpoint(self, prefix: str = '', keep_vars: bool = Fal return self.state_dict(prefix=prefix, keep_vars=keep_vars) - def sharded_state_dict(self, prefix: str = ''): - """Override sharded state dict with Dist Checkpointing. + def sharded_state_dict( + self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () + ) -> ShardedStateDict: + """Default implementation for sharded state dict for distributed checkpointing. - Override sharded_state_dict when using distributed checkpointing. keep_vars must always be set to True so that optimizer states can be sharded. + General definition of sharded_state_dict simply calls `sharded_state_dict_default` + (which call sharded_state_dict method if possible or a default implementation otherwise) + recursively on all submodules. Args: - prefix (str, optional): _description_. Defaults to ''. + prefix (str): prefix for the state dict keys + sharded_offsets (Tuple[Tuple[int, int, int]], optional): sharding already + applied (e.g. PP related) by sup-modules. Passed along to ShardedTensor Returns: - _type_: _description_ + dict: dictionary of state dict keys mapped to ShardedTensors + """ + sharded_state_dict = {} + # Save parameters + self._save_to_state_dict(sharded_state_dict, '', keep_vars=True) + sharded_state_dict = make_sharded_tensors_for_checkpoint( + sharded_state_dict, prefix, sharded_offsets=sharded_offsets + ) + # Recurse into submodules + for name, module in self.named_children(): + sharded_state_dict.update( + sharded_state_dict_default(module, f'{prefix}{name}.', sharded_offsets) + ) + return sharded_state_dict + + def set_is_first_microbatch(self): + """Sets the is_first_microbatch flag if it exists. When this flag is set, TE modules will update their fp8 parameter cache. + """ - return self.state_dict(prefix=prefix, keep_vars=True) + for m in self.modules(): + if hasattr(m, "is_first_microbatch"): + m.is_first_microbatch = True def conversion_helper(val, conversion): @@ -146,12 +177,9 @@ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): """Retrieve state_dict from the module being wrapped.""" return self.module.state_dict_for_save_checkpoint(prefix=prefix, keep_vars=keep_vars) - def sharded_state_dict(self, prefix=''): - """Retrieve state_dict from the module being wrapped. - - When using distributed checkpointing, keep_vars must always be set to True. - """ - return self.module.sharded_state_dict(prefix=prefix) + def sharded_state_dict(self, prefix='', *args, **kwargs): + """Retrieve sharded_state_dict from the module being wrapped.""" + return self.module.sharded_state_dict(prefix, *args, **kwargs) def load_state_dict(self, state_dict, strict=True): self.module.load_state_dict(state_dict, strict=strict) diff --git a/megatron/core/transformer/moe/README.md b/megatron/core/transformer/moe/README.md new file mode 100644 index 0000000000..8e53c723e5 --- /dev/null +++ b/megatron/core/transformer/moe/README.md @@ -0,0 +1,194 @@ +# Megatron Core MoE Key Features + +### Parallelism + +- **Expert Parallel** + - A specific method of parallelism for MoE models, where experts are partitioned onto different workers and each worker processes a different batch of training samples, each worker process one or more experts for each MoE layer. +- **3D Parallel**: Data Parallel , Tensor Parallel, Pipeline Parallel, Sequence Parallel + - Note: When using MoE with expert parallelism and tensor parallelism, sequence parallelism must be used. +- **Richer parallel mappings**: EP can be combined with DP/TP/PP/SP for handling larger MoE variants. +- **Distributed optimizer.** + +### Router and Load Balancing + +- Router type: + - Top-K MLP router + - Expert Choice router (coming soon) +- Load Balancing algorithms: + - Sinkhorn (S-BASE) + - Aux loss / Load balancing loss + +### Performance Optimizations + +- GroupedGEMM when num local experts > 1 + - Supported dtype: bf16 + +### Token Dispatch Mechanism + +- Dropless / No token drop. +- Token drop. (coming soon) + +### Ease of use +- Checkpoint converter (coming soon) + +## Upcoming features + +- Enhanced cutlass GroupedGEMM kernels + - Reduced host-device syncs. + - More supported dtype: fp32/bf16/fp16 + - Kernel heuristics tuned for A100/A10/L40S + - BWD cutlass GroupedGEMM kernels supported +- Token permutation / unpermutation fusion +- Fused Sinkhorn Kernel +- Context Parallel with MoE +- FP8 training support +- Enable ’--tp-comm-overlap‘ for MoE +- Distributed optimizer for MoE params. + +# User Guide + +### MoE Related Arguments + +| Item | Description | +| --- | --- | +| num-experts | Number of Experts in MoE (None means no MoE) | +| expert-model-parallel-size | Degree of expert model parallelism. | +| moe-grouped-gemm | When there are multiple experts per rank, compress multiple local gemms into a single kernel launch to improve the utilization and performance by leveraging the Grouped GEMM feature introduced since CUTLASS 2.8 | +| moe-router-load-balancing-type | Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "none" implies no load balancing. The default is "aux_loss". | +| moe-router-topk | Number of experts to route to for each token. The default is 2. | +| moe-aux-loss-coeff | Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended. | +| moe-z-loss-coeff | Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended. | +| moe-input-jitter-eps | Add noise to the input tensor by applying jitter with a specified epsilon value. | +| moe-token-dropping | This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to GShard, Switch-Transformer, and DeepSpeed-MoE. Note: Currently unsupported. | + +### Example + +To train a top-2 MoE model with an auxiliary loss, include the following arguments: + +```python +--num-experts 8 +--expert-model-parallel-size 8 +--moe-grouped-gemm +--moe-router-load-balancing-type aux_loss # options: aux_loss, sinkhorn, none. Default is aux_loss. +--moe-router-topk 2 +--moe-aux-loss-coeff 1e-2 +--use-distributed-optimizer +``` +## A detailed MoE script: +
+Click here. + +```python +#!/bin/bash + +# Runs Mixtral 8x7B model on 16 A100 GPUs + +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=${MASTER_ADDR:-"localhost"} +MASTER_PORT=${MASTER_PORT:-"6000"} +NNODES=${NNODES:-"1"} +NODE_RANK=${RANK:-"0"} +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +CHECKPOINT_PATH=$1 +TOKENIZER_MODEL=$2 +DATA_PATH=$3 + +DISTRIBUTED_ARGS=( + --nproc_per_node $GPUS_PER_NODE + --nnodes $NNODES + --node_rank $NODE_RANK + --master_addr $MASTER_ADDR + --master_port $MASTER_PORT +) + +MODEL_ARGS=( + --use-mcore-models + --disable-bias-linear + --seq-length 2048 + --max-position-embeddings 32768 + --num-layers 32 + --hidden-size 4096 + --ffn-hidden-size 14336 + --num-attention-heads 32 + --init-method-std 0.01 + --attention-dropout 0.0 + --hidden-dropout 0.0 + --normalization RMSNorm + --position-embedding-type rope + --swiglu + --untie-embeddings-and-output-weights + --group-query-attention + --num-query-groups 8 + --no-masked-softmax-fusion + --no-position-embedding +) + +MOE_ARGS=( + --num-experts 8 + --expert-model-parallel-size 4 + --moe-router-load-balancing-type aux_loss # options: aux_loss, sinkhorn, None. Default is aux_loss. + --moe-router-topk 2 + --moe-aux-loss-coeff 1e-2 + --moe-grouped-gemm +) + +DATA_ARGS=( + --tokenizer-type Llama2Tokenizer + --tokenizer-model ${TOKENIZER_MODEL} + --data-path $DATA_PATH + --split 99990,8,2 +) + +TRAINING_ARGS=( + --micro-batch-size 1 + --global-batch-size 128 + --lr 1e-4 + --train-iters 500000 + --lr-decay-iters 320000 + --lr-decay-style cosine + --min-lr 1.0e-5 + --weight-decay 0.1 + --lr-warmup-iters 500 + --clip-grad 1.0 + --bf16 +) + +MODEL_PARALLEL_ARGS=( + --tensor-model-parallel-size 4 + --pipeline-model-parallel-size 1 + --sequence-parallel + --use-distributed-optimizer +) + +LOGGING_ARGS=( + --log-interval 1 \ + --save-interval 10000 \ + --eval-interval 1000 \ + --eval-iters 10 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --tensorboard-dir "${CHECKPOINT_PATH}/tensorboard" \ + --no-load-optim \ + --no-load-rng +) + +if [ -n "${WANDB_API_KEY}" ]; then + LOGGING_ARGS+=( + --wandb-project ${WANDB_PROJECT:-"Mixtral-Finetuning"} + --wandb-exp-name ${WANDB_NAME:-"Mixtral_8x7B"} + ) +fi + +torchrun ${DISTRIBUTED_ARGS[@]} pretrain_gpt.py \ + ${MODEL_ARGS[@]} \ + ${MOE_ARGS[@]} \ + ${DATA_ARGS[@]} \ + ${TRAINING_ARGS[@]} \ + ${MODEL_PARALLEL_ARGS[@]} \ + ${LOGGING_ARGS[@]} +``` +
\ No newline at end of file diff --git a/megatron/core/transformer/moe/__init__.py b/megatron/core/transformer/moe/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/megatron/core/transformer/moe/experts.py b/megatron/core/transformer/moe/experts.py new file mode 100644 index 0000000000..1f0ea46cb5 --- /dev/null +++ b/megatron/core/transformer/moe/experts.py @@ -0,0 +1,235 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +from typing import Tuple + +import numpy as np +import torch +from torch.nn.parameter import Parameter + +from megatron.core import parallel_state +from megatron.core.dist_checkpointing.mapping import ShardedStateDict +from megatron.core.dist_checkpointing.utils import replace_prefix_for_sharding +from megatron.core.tensor_parallel.layers import ( + _initialize_affine_weight_cpu, + _initialize_affine_weight_gpu, +) +from megatron.core.tensor_parallel.utils import divide +from megatron.core.transformer.mlp import MLP, MLPSubmodules +from megatron.core.transformer.module import MegatronModule +from megatron.core.transformer.moe import grouped_gemm_util as gg +from megatron.core.transformer.transformer_config import TransformerConfig + + +class GroupedMLP(MegatronModule): + """An efficient implementation of the Experts layer using CUTLASS GroupedGEMM. + + This class is designed to execute multiple experts in parallel, thereby maximizing computational efficiency. + """ + + def __init__(self, num_local_experts: int, config: TransformerConfig): + super().__init__(config=config) + self.config: TransformerConfig = config + self.num_local_experts = num_local_experts + gg.assert_grouped_gemm_is_available() + assert ( + config.add_bias_linear == False + ), "bias in the expert layer is not supported in Grouped GEMM yet, please set '--disable-bias-linear' instead." + + self.expert_parallel = config.expert_model_parallel_size > 1 + if self.config.gated_linear_unit: + + def glu(x): + x = torch.chunk(x, 2, dim=-1) + return self.config.activation_func(x[0]) * x[1] + + self.activation_func = glu + else: + self.activation_func = self.config.activation_func + + # How many feature each rank holds for fc1 and fc2, respectively. + tp_size = parallel_state.get_tensor_model_parallel_world_size() + fc1_output_size = self.config.ffn_hidden_size * self.num_local_experts + if config.gated_linear_unit: + # Project to 4h. If using swiglu double the output width, + # see https://arxiv.org/pdf/2002.05202.pdf + fc1_output_size *= 2 + fc1_output_size_per_partition = divide(fc1_output_size, tp_size) + + fc2_input_size = self.config.ffn_hidden_size * self.num_local_experts + fc2_input_size_per_partition = divide(fc2_input_size, tp_size) + + # Note: The current kernel implementations of grouped_gemm + # does not support transposition with CUTLASS grouped GEMM + # (https://github.com/fanshiqing/grouped_gemm/blob/main/csrc/grouped_gemm.cu#L355-L358) + # and as a result we avoid allocate the transpose of weights. + # Initialize weight. + if config.use_cpu_initialization: + self.weight1 = Parameter( + torch.empty( + self.config.hidden_size, + fc1_output_size_per_partition, + dtype=config.params_dtype, + ) + ) + self.weight2 = Parameter( + torch.empty( + fc2_input_size_per_partition, + self.config.hidden_size, + dtype=config.params_dtype, + ) + ) + if config.perform_initialization: + _initialize_affine_weight_cpu( + self.weight1, + self.config.hidden_size, + fc1_output_size, + fc1_output_size_per_partition, + partition_dim=1, + init_method=config.init_method, + params_dtype=config.params_dtype, + ) + _initialize_affine_weight_cpu( + self.weight2, + fc2_input_size, + self.config.hidden_size, + fc2_input_size_per_partition, + partition_dim=0, + init_method=config.output_layer_init_method, + params_dtype=config.params_dtype, + ) + else: + self.weight1 = Parameter( + torch.empty( + self.config.hidden_size, + fc1_output_size_per_partition, + device=torch.cuda.current_device(), + dtype=config.params_dtype, + ) + ) + self.weight2 = Parameter( + torch.empty( + fc2_input_size_per_partition, + self.config.hidden_size, + device=torch.cuda.current_device(), + dtype=config.params_dtype, + ) + ) + if config.perform_initialization: + _initialize_affine_weight_gpu( + self.weight1, + config.init_method, + partition_dim=1, + expert_parallel=self.expert_parallel, + ) + _initialize_affine_weight_gpu( + self.weight2, + config.output_layer_init_method, + partition_dim=0, + expert_parallel=self.expert_parallel, + ) + setattr(self.weight1, 'allreduce', not self.expert_parallel) + setattr(self.weight2, 'allreduce', not self.expert_parallel) + + def forward(self, permuted_local_hidden_states, tokens_per_expert): + if permuted_local_hidden_states.nelement() != 0: + # Reshape the weights for the grouped GEMMs. + w1 = self.weight1.view(self.num_local_experts, self.config.hidden_size, -1) + w2 = self.weight2.view(self.num_local_experts, -1, self.config.hidden_size) + + fc1_output = gg.ops.gmm( + permuted_local_hidden_states, w1, tokens_per_expert, trans_b=False + ) + + intermediate_parallel = self.activation_func(fc1_output) + + fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=False) + else: + # None token is allocated for local experts. + assert torch.count_nonzero(tokens_per_expert) == 0 + fc2_output = permuted_local_hidden_states + + return fc2_output, None + + def sharded_state_dict(self, prefix='', sharded_offsets=()): + raise NotImplementedError( + 'Currently distributed checkpointing is not supported for GroupedMLP' + ) + + +class SequentialMLP(MegatronModule): + """An implementation of the Experts layer using a sequence of MLP layers. + + This class executes each expert sequentially. + """ + + def __init__(self, num_local_experts, config: TransformerConfig, submodules: MLPSubmodules): + super().__init__(config=config) + self.add_bias = config.add_bias_linear + self.num_local_experts = num_local_experts + self.local_experts = torch.nn.ModuleList() + for _ in range(self.num_local_experts): + expert = MLP(self.config, submodules, is_expert=True) + self.local_experts.append(expert) + + def forward(self, permuted_local_hidden_states, tokens_per_expert): + output_local = torch.zeros_like(permuted_local_hidden_states) + output_bias_local = None + if self.add_bias: + output_bias_local = torch.zeros_like(permuted_local_hidden_states) + + cumsum_num_tokens = torch.cumsum(tokens_per_expert, dim=0) + # Insert zero at the begining for offset index's convenience + zero_tensor = torch.zeros(1, dtype=torch.long) + cumsum_num_tokens = torch.cat((zero_tensor, cumsum_num_tokens)) + for expert_num, expert in enumerate(self.local_experts): + start = cumsum_num_tokens[expert_num] + end = cumsum_num_tokens[expert_num + 1] + hidden = permuted_local_hidden_states[start:end] + output, output_bias = expert(hidden) + + output_local[start:end] = output + if self.add_bias: + output_bias = output_bias.expand_as(output) + output_bias_local[start:end, :] = output_bias + + return output_local, output_bias_local + + def sharded_state_dict(self, prefix='', sharded_offsets=()): + """ Maps local expert to global experts. """ + sharded_state_dict = {} + num_global_experts = ( + parallel_state.get_expert_model_parallel_world_size() * self.num_local_experts + ) + local_expert_indices_offset = ( + parallel_state.get_expert_model_parallel_rank() * self.num_local_experts + ) + + expert_sharded_prefix = f'{prefix}experts.' + for expert_local_idx, expert in enumerate(self.local_experts): + expert_global_idx = local_expert_indices_offset + expert_local_idx + expert_state_dict_prefix = f'{prefix}local_experts.{expert_local_idx}.' + expert_sharded_offsets = ( + *sharded_offsets, + (len(sharded_offsets), expert_global_idx, num_global_experts), + ) + + expert_state_dict = expert.sharded_state_dict( + expert_state_dict_prefix, expert_sharded_offsets + ) + # Remove expert layers indexing from sharded keys + replace_prefix_for_sharding( + expert_state_dict, expert_state_dict_prefix, expert_sharded_prefix + ) + # Adjust replica ids - replication along DP modulo EP + for k, sh_ten in expert_state_dict.items(): + replica_id = sh_ten.replica_id + assert ( + len(replica_id) == 3 + ), f'Expected replica_id for {k} to be in (PP, TP, DP) format, got: {replica_id}' + sh_ten.replica_id = ( + *replica_id[:2], + parallel_state.get_data_modulo_expert_parallel_rank(), + ) + + sharded_state_dict.update(expert_state_dict) + return sharded_state_dict diff --git a/megatron/core/transformer/moe/grouped_gemm_util.py b/megatron/core/transformer/moe/grouped_gemm_util.py new file mode 100644 index 0000000000..07c576c24b --- /dev/null +++ b/megatron/core/transformer/moe/grouped_gemm_util.py @@ -0,0 +1,20 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +try: + import grouped_gemm +except ImportError: + grouped_gemm = None + + +def grouped_gemm_is_available(): + return grouped_gemm is not None + + +def assert_grouped_gemm_is_available(): + assert grouped_gemm_is_available(), ( + "Grouped GEMM is not available. Please run " + "`pip install git+https://github.com/fanshiqing/grouped_gemm@main`." + ) + + +ops = grouped_gemm.ops if grouped_gemm_is_available() else None diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py new file mode 100644 index 0000000000..6b10f6c4b0 --- /dev/null +++ b/megatron/core/transformer/moe/moe_layer.py @@ -0,0 +1,80 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +from abc import ABC, abstractmethod + +import torch + +from megatron.core import parallel_state +from megatron.core.transformer.mlp import MLPSubmodules +from megatron.core.transformer.module import MegatronModule +from megatron.core.transformer.moe.experts import GroupedMLP, SequentialMLP +from megatron.core.transformer.moe.router import TopKRouter +from megatron.core.transformer.moe.token_dispatcher import MoEDroplessTokenDispatcher +from megatron.core.transformer.transformer_config import TransformerConfig + + +class BaseMoELayer(MegatronModule, ABC): + """Base class for a mixture of experts layer. + + Args: + config (TransformerConfig): Configuration object for the transformer model. + """ + + def __init__(self, config: TransformerConfig): + super(BaseMoELayer, self).__init__(config) + self.config = config + self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size() + assert self.expert_parallel_size > 0, "Expected non-negative expert parallel size" + assert self.config.num_moe_experts % self.expert_parallel_size == 0 + self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size + local_expert_indices_offset = ( + parallel_state.get_expert_model_parallel_rank() * self.num_local_experts + ) + self.local_expert_indices = [ + local_expert_indices_offset + i for i in range(self.num_local_experts) + ] + assert all(map(lambda x: x < self.config.num_moe_experts, self.local_expert_indices)) + self.router = None + self.experts = None + self.token_dispatcher = None + + @abstractmethod + def forward(self, hidden_states): + pass + + +class MoELayer(BaseMoELayer): + """Mixture of experts Layer **currently only supports no token dropping**. + + Args: + BaseMoELayer (MegatronModule): Base class for MoE layers + """ + + def __init__(self, config: TransformerConfig, submodules: MLPSubmodules = None): + self.submodules = submodules + super(MoELayer, self).__init__(config=config) + self.router = TopKRouter(config=self.config) + if self.config.moe_grouped_gemm: + self.experts = GroupedMLP(self.num_local_experts, self.config) + else: + assert isinstance(self.submodules, MLPSubmodules) + self.experts = SequentialMLP(self.num_local_experts, self.config, self.submodules) + self.token_dispatcher = MoEDroplessTokenDispatcher( + self.num_local_experts, self.local_expert_indices, config=self.config + ) + + def forward(self, hidden_states: torch.Tensor): + # process MoE + scores, indices = self.router(hidden_states) + ( + dispatched_input, + tokens_per_expert, + scores, + indices, + global_local_map, + ) = self.token_dispatcher.token_permutation(hidden_states, scores, indices) + expert_output, mlp_bias = self.experts(dispatched_input, tokens_per_expert) + output, mlp_bias = self.token_dispatcher.token_unpermutation( + expert_output, scores, indices, global_local_map, mlp_bias + ) + return output, mlp_bias diff --git a/megatron/core/transformer/moe/moe_utils.py b/megatron/core/transformer/moe/moe_utils.py new file mode 100644 index 0000000000..3e42151642 --- /dev/null +++ b/megatron/core/transformer/moe/moe_utils.py @@ -0,0 +1,101 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import torch + + +def switch_load_balancing_loss_func(gates, mask, moe_aux_loss_coeff): + """Calculate the auxiliary loss for better load balacing. + Please refer to the Switch Transformer paper (https://arxiv.org/abs/2101.03961) for details. + + Args: + gates (torch.Tensor): The gates tensor representing the routing probabilities for each expert. + mask (torch.Tensor): The 2D mask tensor indicating which experts are selected. + + Returns: + torch.Tensor: The auxiliary loss for load balancing. + """ + num_experts = mask.size(-1) + gates_mean = gates.mean(dim=0) + top_k = mask[0].count_nonzero() + selection_mean = mask.float().mean(dim=0) / top_k + aux_loss = torch.sum(gates_mean * selection_mean) * num_experts + aux_loss *= moe_aux_loss_coeff + return aux_loss + + +def z_loss_func(logits, z_loss_coeff): + """Encourages the router's logits to remain small to enhance stability. + Please refer to the ST-MoE paper (https://arxiv.org/pdf/2202.08906.pdf) for details. + + Args: + logits (torch.Tensor): The logits of the router. + + Returns: + torch.Tensor: The logits after applying the z-loss. + """ + + z_loss = torch.mean(torch.square(torch.logsumexp(logits, dim=-1))) * z_loss_coeff + return z_loss + + +def sinkhorn(cost: torch.Tensor, tol: float = 0.0001): + """Sinkhorn based MoE routing function""" + cost = torch.exp(cost) + d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype) + d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype) + + eps = 0.00000001 + error = 1e9 + d1_old = d1 + while error > tol: + d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps) + d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps) + error = torch.mean(torch.abs(d1_old - d1)) + d1_old = d1 + return d1 * cost * d0.unsqueeze(1) + + +class MoEAuxLossAutoScaler(torch.autograd.Function): + """An AutoScaler that compute and scales the grad for auxiliary loss. + + """ + + main_loss_backward_scale: torch.Tensor = torch.tensor(1.0) + + @staticmethod + def forward(ctx, output: torch.Tensor, aux_loss: torch.Tensor): + """Preserve the aux_loss by storing it in the context to avoid garbage collection. + + Args: + output (torch.Tensor): The output tensor. + aux_loss (torch.Tensor): The auxiliary loss tensor. + + Returns: + torch.Tensor: The output tensor. + """ + ctx.save_for_backward(aux_loss) + return output + + @staticmethod + def backward(ctx, grad_output: torch.Tensor): + """Compute and scale the gradient for auxiliary loss.. + + Args: + grad_output (torch.Tensor): The gradient of the output. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The gradient of the output, scaled auxiliary loss gradient. + """ + (aux_loss,) = ctx.saved_tensors + aux_loss_backward_scale = MoEAuxLossAutoScaler.main_loss_backward_scale + scaled_aux_loss_grad = torch.ones_like(aux_loss) * aux_loss_backward_scale + return grad_output, scaled_aux_loss_grad + + @staticmethod + def set_loss_scale(scale: torch.Tensor): + """set the scale of the aux loss. + + Args: + scale (torch.Tensor): The scale value to set. Please ensure that the scale passed in matches the scale of the main_loss. + """ + MoEAuxLossAutoScaler.main_loss_backward_scale = scale diff --git a/megatron/core/transformer/moe/router.py b/megatron/core/transformer/moe/router.py new file mode 100644 index 0000000000..672565192f --- /dev/null +++ b/megatron/core/transformer/moe/router.py @@ -0,0 +1,242 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import math +from abc import ABC, abstractmethod +from typing import Callable, List + +import torch + +from megatron.core import parallel_state, tensor_parallel +from megatron.core.parallel_state import get_tensor_and_expert_parallel_group +from megatron.core.tensor_parallel import get_cuda_rng_tracker, get_data_parallel_rng_tracker_name +from megatron.core.tensor_parallel.random import ( + get_cuda_rng_tracker, + get_data_parallel_rng_tracker_name, +) +from megatron.core.transformer.module import MegatronModule +from megatron.core.transformer.moe.moe_utils import ( + MoEAuxLossAutoScaler, + sinkhorn, + switch_load_balancing_loss_func, + z_loss_func, +) +from megatron.core.transformer.transformer_config import TransformerConfig + + +class Router(ABC, MegatronModule): + """Base Router class""" + + def __init__(self, config: TransformerConfig) -> None: + """ + Initialize the Router module. + + Args: + config (TransformerConfig): Configuration object for the Transformer model. + """ + super().__init__(config) + self.config = config + self.num_experts = self.config.num_moe_experts + self.moe_aux_loss_func = None + + # Initialize the gate weights. + self.weight = torch.nn.Parameter( + torch.empty((self.config.num_moe_experts, self.config.hidden_size)) + ) + with get_cuda_rng_tracker().fork(get_data_parallel_rng_tracker_name()): + config.init_method(self.weight) + setattr(self.weight, 'sequence_parallel', config.sequence_parallel) + + def gating(self, input: torch.Tensor): + """Forward pass of the router gate. + + Args: + input (torch.Tensor): Input tensor. + + Returns: + torch.Tensor: Logits tensor. + """ + logits = torch.nn.functional.linear(input, self.weight) + return logits + + @abstractmethod + def routing(self, logits: torch.Tensor): + """Routing function. + + Args: + logits (torch.Tensor): Logits tensor. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Tuple of tensors representing max probs and the indices. + """ + raise NotImplementedError("Routing function not implemented.") + + def forward(self, input: torch.Tensor): + """ + Forward pass of the router. + + Args: + input (torch.Tensor): Input tensor. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: scores and indices. + """ + self.hidden = input.shape[-1] + + logits = self.gating(input) + logits = logits.view(-1, self.config.num_moe_experts) + + scores, indices = self.routing(logits) + + return scores, indices + + +class TopKRouter(Router): + """Route each token to the top-k experts.""" + + def __init__(self, config: TransformerConfig,) -> None: + """Initialize the zero token dropping router. + + Args: + config (TransformerConfig): The configuration for the transformer model. + """ + super().__init__(config=config) + assert config.moe_token_dropping is False + self.topk = self.config.moe_router_topk + self.routing_type = self.config.moe_router_load_balancing_type + self.moe_aux_loss_func = switch_load_balancing_loss_func + self.input_jitter = None + + def sinkhorn_load_balancing(self, logits: torch.Tensor): + """Apply sinkhorn routing to the logits tensor. + + Args: + logits (torch.Tensor): The logits tensor. + + Returns: + torch.Tensor: The logits tensor after applying sinkhorn routing. + """ + + def _sinkhorn_activation(logits): + if self.topk == 1: + logits = torch.sigmoid(logits) + else: # k > 1 + logits = torch.softmax(logits, dim=-1, dtype=torch.float32).type_as(logits) + return logits + + assert self.config.moe_aux_loss_coeff == 0, "Sinkhorn routing does not support aux loss." + if self.training: + with torch.no_grad(): + norm_logits = sinkhorn( + logits.to(dtype=torch.float32) + ) # explicit fp32 conversion for stability + _, indices = torch.topk(norm_logits, k=self.topk, dim=1) + logits = _sinkhorn_activation(logits) + scores = torch.gather(logits, 1, indices) + else: + logits = _sinkhorn_activation(logits) + scores, indices = torch.topk(logits, k=self.topk, dim=1) + return scores, indices + + def aux_loss_load_balancing(self, logits: torch.Tensor): + """Apply loss-based load balancing to the logits tensor. + + Args: + logits (torch.Tensor): The logits tensor. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The scores and the indices tensor after applying load balancing. + """ + top_logits, indices = torch.topk(logits, k=self.topk, dim=1) + scores = torch.softmax(top_logits, dim=-1, dtype=torch.float32).type_as(logits) + # Apply load balancing loss + probs = torch.softmax(logits, dim=-1, dtype=torch.float32) + scores = self.apply_aux_loss(self.moe_aux_loss_func, probs, indices, activation=scores) + return scores, indices + + def apply_aux_loss( + self, + loss_func: Callable, + probs: torch.Tensor, + indices: torch.Tensor, + activation: torch.Tensor, + ): + """Applies auxiliary loss to the MoE layer. + + Args: + loss_func (callable): The loss function to be used. + probs (torch.Tensor): The probabilities output by the MoE layer. + indices (torch.Tensor): The indices of the selected experts. + activation (torch.Tensor): The activation tensor to attach the gradient function to. + + Returns: + torch.Tensor: The activation tensor with the attached gradient function. + """ + mask = torch.nn.functional.one_hot(indices, num_classes=self.num_experts).sum(dim=1) + aux_loss = loss_func(probs, mask, self.config.moe_aux_loss_coeff) + activation = MoEAuxLossAutoScaler.apply(activation, aux_loss) + return activation + + def apply_z_loss(self, logits): + """Encourages the router's logits to remain small to enhance stability. + Please refer to the ST-MoE paper (https://arxiv.org/pdf/2202.08906.pdf) for details. + + Args: + logits (torch.Tensor): The logits of the router. + + Returns: + torch.Tensor: The logits after applying the z-loss. + """ + if self.config.moe_z_loss_coeff is not None: + z_loss = z_loss_func(logits, self.config.moe_z_loss_coeff) + logits = MoEAuxLossAutoScaler.apply(logits, z_loss) + return logits + + def apply_input_jitter(self, input: torch.Tensor): + """Add noise to the input tensor. + Refer to https://arxiv.org/abs/2101.03961. + + Args: + input (Tensor): Input tensor. + + Returns: + Tensor: Jittered input. + """ + if self.config.moe_input_jitter_eps is not None: + eps = self.config.moe_input_jitter_eps + if self.input_jitter is None: + self.input_jitter = torch.distributions.uniform.Uniform( + torch.tensor(1.0 - eps, device=input.device), + torch.tensor(1.0 + eps, device=input.device), + ).rsample + return input * self.input_jitter(input.shape) + else: + return input + + def routing(self, logits: torch.Tensor): + """Top-k routing function + + Args: + logits (torch.Tensor): Logits tensor. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Probs and the indices tensor. + """ + logits = logits.view(-1, self.config.num_moe_experts) + + # Apply Z-Loss + logits = self.apply_z_loss(logits) + # Apply input jitter + logits = self.apply_input_jitter(logits) + + if self.routing_type == "sinkhorn": + scores, indices = self.sinkhorn_load_balancing(logits) + elif self.routing_type == "aux_loss": + scores, indices = self.aux_loss_load_balancing(logits) + elif self.routing_type == "none": + # A naive top-k routing without load balancing + top_logits, indices = torch.topk(logits, k=self.topk, dim=1) + scores = torch.softmax(top_logits, dim=-1, dtype=torch.float32).type_as(logits) + else: + raise ValueError(f"Unsupported MoE routing type: {self.routing_type}") + + return scores, indices diff --git a/megatron/core/transformer/moe/token_dispatcher.py b/megatron/core/transformer/moe/token_dispatcher.py new file mode 100644 index 0000000000..69bace767e --- /dev/null +++ b/megatron/core/transformer/moe/token_dispatcher.py @@ -0,0 +1,279 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from abc import abstractmethod +from typing import List + +import torch + +from megatron.core import parallel_state, tensor_parallel +from megatron.core.parallel_state import get_tensor_and_expert_parallel_group +from megatron.core.transformer.transformer_config import TransformerConfig + + +class MoETokenDispatcher: + """ + MoE Token Dispatcher + """ + + def __init__(self, config: TransformerConfig) -> None: + """ + Initialize the MoE Token Dispatcher. + """ + self.config = config + + @abstractmethod + def token_permutation( + self, tokens: torch.Tensor, indices: torch.Tensor, + ): + """Dispatch tokens to experts. + + Args: + tokens (torch.Tensor): Input tokens. + indices (torch.Tensor): indices tensor. + + Returns: + torch.Tensor: Tokens tensor. + """ + raise NotImplementedError("Dispatch function not implemented.") + + @abstractmethod + def token_unpermutation( + self, expert_output: torch.Tensor, scores: torch.Tensor, indices: torch.Tensor, + ): + """Restores the expert output to its original ordering. + + Args: + expert_output (torch.Tensor): The output tensor from the expert models. + scores (torch.Tensor): Each token's score with each expert. + indices (torch.Tensor): The indices used to reorder the expert output. + + Returns: + (torch.Tensor, torch.Tensor): Unpermuted activation and optional bias. + """ + raise NotImplementedError("Restore function not implemented.") + + +class MoEDroplessTokenDispatcher(MoETokenDispatcher): + """ + Token dispatcher without token dropping. + """ + + def __init__( + self, num_local_experts: int, local_expert_indices: List[int], config: TransformerConfig, + ) -> None: + """ + Initialize the zero token dropping router. + """ + super().__init__(config=config) + self.num_local_experts = num_local_experts + assert self.num_local_experts > 0, "Expected at least one expert" + self.local_expert_indices = local_expert_indices + assert len(self.local_expert_indices) > 0, "Expected at least one local expert index" + self.router_topk = config.moe_router_topk + self.add_bias = config.add_bias_linear + + def token_permutation( + self, hidden_states: torch.Tensor, max_prob: torch.Tensor, max_ind: torch.Tensor + ): + """Dispatch tokens to local experts. It's composed of two stages: + (1) Permute the tokens across the expert parallel devices. After this stage, + each device receives all of the tokens assigned to its local set of experts + in its local HBM. + (2) Permute the tokens locally so that they are grouped by their expert + assignment. After the stage (1), the tokens are grouped by which device + they came from. We re-order them locally for subsequent efficient computation. + + Args: + hidden_states: input tokens of shape [SeqLen/TP, MBS, HiddenSize] + max_prob: probs of token assignment to local experts. + max_ind: token assignment to local experts. + + Returns: + permuted_local_hidden_states: Permutation of tokens to local experts group. + tokens_per_expert: the number of tokens each local expert to process. + indices: The indices of `local_indices` (which holds the un-sorted expert + indices of tokens that local expert can process) that give its sorted order along dim 0. + global_local_map (optional): 2D tensor. A mask of mapping between global and local tokens where each + element is True if it's between the local_expert_indices. Only useful + when cross device token permutation is enabled and **AllGahter** is performed. + """ + self.hidden_shape = hidden_states.shape + # [S/TP, B, H] -> [S*B/TP, H] + hidden_states = hidden_states.view(-1, self.hidden_shape[-1]) + + # Permute the tokens across the expert parallel devices. + if self.config.sequence_parallel or (self.config.expert_model_parallel_size > 1): + # [S*B/TP, H] -> [S*B, H] + global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe( + hidden_states + ) + with torch.no_grad(): + global_indices = tensor_parallel.gather_from_sequence_parallel_region_to_moe( + max_ind + ) + # Create a mask of mapping between global and local tokens where each + # element is True if it's between the local_expert_indices + global_local_mask = (global_indices >= self.local_expert_indices[0]) & ( + global_indices <= self.local_expert_indices[-1] + ) + local_indices = global_indices.masked_select(global_local_mask) + + if self.router_topk > 1: # k > 1 + global_probs = tensor_parallel.gather_from_sequence_parallel_region_to_moe(max_prob) + local_probs = global_probs.masked_select(global_local_mask) + else: + local_probs = max_prob + + # Reshape global_local_mask to be compatible with Tensor.gather + global_local_map = global_local_mask.nonzero()[:, 0] + global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1]) + local_hidden_states = torch.gather(global_hidden_states, 0, global_local_map) + else: + if self.router_topk > 1: + global_local_map = torch.ones_like(max_ind).bool() + local_indices = max_ind.masked_select(global_local_map) + local_probs = max_prob.masked_select(global_local_map) + global_local_map = global_local_map.nonzero()[:, 0] + global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1]) + local_hidden_states = torch.gather(hidden_states, 0, global_local_map) + else: + local_indices = max_ind + local_probs = max_prob + local_hidden_states = hidden_states + global_local_map = None + + with torch.no_grad(): + # The indices of local_indices that give its sorted order along dim 0. + indices = torch.argsort(local_indices, dim=0) + tokens_per_expert = torch.histc( + local_indices, + bins=self.num_local_experts, + min=self.local_expert_indices[0], + max=self.local_expert_indices[-1], + ) + tokens_per_expert = tokens_per_expert.cpu().to(torch.long) + + # Stage2: permute the tokens locally so that they are grouped by their expert assignment + # Reshape indices to be compatible with Tensor.gather + indices = indices.view(-1, 1).expand(-1, hidden_states.shape[-1]) + permuted_local_hidden_states = torch.gather(local_hidden_states, 0, indices) + return ( + permuted_local_hidden_states, + tokens_per_expert, + local_probs, + indices, + global_local_map, + ) + + def token_unpermutation( + self, + hidden_states: torch.Tensor, + scores: torch.Tensor, + indices: torch.Tensor, + global_local_map: torch.Tensor = None, + bias: torch.Tensor = None, + ): + """ + Reverse process of `dispatch()` which permutes the ouput of local + experts locallay and across expert parallel rank into the original order to + produce the final output. + + Args: + hidden_states: 2D tensor of shape [sum_tokens_of_all_local_experts, HiddenSize], + ouput of local experts. + scores: 2D tensor of the probs of token assignment to local experts. + indices: 2D tensor of the indices of `local_indices` (which holds the un-sorted expert + indices of tokens that local expert can process) that give its sorted order along dim 0. + global_local_map (optional): 2D tensor, a mask of mapping between global and local tokens where each + element is True if it's between the local_expert_indices. Only useful + when cross device token permutation is enabled and **AllGather** is performed. + bias (optional): The bias tensor. + + Returns: + output_total: un-permuted updated hidden states output from all local experts + with shape of [SeqLen/TP, MBS, HiddenSize] + """ + # Stage1: unpermute the tokens and bias locally respectively. + scores = scores.to(dtype=hidden_states.dtype) + unpermuted_local_hidden = torch.zeros_like(hidden_states) + assert indices.shape == hidden_states.shape + unpermuted_local_hidden = unpermuted_local_hidden.scatter(0, indices, hidden_states) + + # Scale the expert output prior to reduction and subsequent to local unpermutation if k > 1. + if self.router_topk > 1: + unpermuted_local_hidden = unpermuted_local_hidden * scores.view(-1, 1) + + unpermuted_local_bias = None + if self.add_bias: + assert bias is not None + unpermuted_local_bias = torch.zeros_like(hidden_states) + assert indices.shape == bias.shape + unpermuted_local_bias = unpermuted_local_bias.scatter(0, indices, bias) + if self.router_topk > 1: + unpermuted_local_bias = unpermuted_local_bias * scores.view(-1, 1) + + output_total = unpermuted_local_hidden + output_bias_total = unpermuted_local_bias + + # Unpermute the tokens across expert parallel devices. + if self.config.sequence_parallel or (self.config.expert_model_parallel_size > 1): + assert global_local_map is not None, "global_local_map is necessary for `AllGather`." + ep_group_size = parallel_state.get_tensor_and_expert_parallel_world_size() + # hidden_shape: [SeqLen/TP, MBS, HiddenSize], glboal_num_tokens = SeqLen/TP*MBS*(TP*EP) + global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1] * ep_group_size + global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]] + unpermuted_global_hidden = torch.zeros( + global_hidden_shape, dtype=hidden_states.dtype, device=torch.cuda.current_device() + ) + # Reshape global_local_map to be compatible with Tensor.scatter + assert global_local_map.shape == unpermuted_local_hidden.shape + unpermuted_global_hidden = unpermuted_global_hidden.scatter_add( + 0, global_local_map, unpermuted_local_hidden + ) + output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( + unpermuted_global_hidden + ) + if self.add_bias: + # Unpermute the bias across expert parallel devices. + unpermuted_global_bias = torch.zeros_like(unpermuted_global_hidden) + unpermuted_global_bias = unpermuted_global_bias.scatter_add( + 0, global_local_map, unpermuted_local_bias + ) + output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( + unpermuted_global_bias + ) + # bias is duplicated across tensor parallelism ranks; + # reduce scatter reduces bias across tensor parallel_ranks + output_bias_total = ( + output_bias_total / parallel_state.get_tensor_model_parallel_world_size() + ) + else: + if self.router_topk > 1: + global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1] + global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]] + unpermuted_global_hidden = torch.zeros( + global_hidden_shape, + dtype=hidden_states.dtype, + device=torch.cuda.current_device(), + ) + output_total = unpermuted_global_hidden.scatter_add( + 0, global_local_map, unpermuted_local_hidden + ) + if self.add_bias: + unpermuted_global_bias = torch.zeros_like(unpermuted_global_hidden) + output_bias_total = unpermuted_global_bias.scatter_add( + 0, global_local_map, unpermuted_local_bias + ) + + if self.router_topk == 1: + output_total = output_total * scores + output_total = output_total.view(self.hidden_shape) + if self.add_bias: + assert output_bias_total is not None + if self.router_topk == 1: + output_bias_total = output_bias_total * scores + output_bias_total = output_bias_total.view(self.hidden_shape) + else: + output_bias_total = None + + return output_total, output_bias_total diff --git a/megatron/core/transformer/switch_mlp.py b/megatron/core/transformer/switch_mlp.py deleted file mode 100644 index 092c6c6402..0000000000 --- a/megatron/core/transformer/switch_mlp.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. - -import torch - -from megatron.core import parallel_state, tensor_parallel -from megatron.core.parallel_state import ( - get_tensor_and_expert_parallel_group, - get_tensor_model_parallel_group, -) -from megatron.core.tensor_parallel import get_cuda_rng_tracker, get_data_parallel_rng_tracker_name -from megatron.core.transformer.module import MegatronModule -from megatron.core.transformer.transformer_config import TransformerConfig - -from .mlp import MLP, MLPSubmodules - - -def sinkhorn(cost, tol=0.0001): - "Sinkhorn based MoE routing function" - cost = torch.exp(cost) - d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype) - d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype) - - eps = 0.00000001 - error = 1e9 - d1_old = d1 - while error > tol: - d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps) - d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps) - error = torch.mean(torch.abs(d1_old - d1)) - d1_old = d1 - return d1 * cost * d0.unsqueeze(1) - - -def get_router_linear_layer(config): - router = torch.nn.Linear(config.hidden_size, config.num_moe_experts, bias=False) - with get_cuda_rng_tracker().fork(get_data_parallel_rng_tracker_name()): - config.init_method(router.weight) - setattr(router.weight, 'sequence_parallel', config.sequence_parallel) - return router - - -class SwitchMLP(MegatronModule): - """ - Top-1 Mixture of Experts Layer. Routes input to one of N MLP "experts" - Curently supports Sinkhorn based expert routing. - """ - - def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): - super().__init__(config=config) - - self.config: TransformerConfig = config - - self.router = get_router_linear_layer(self.config) - self.add_bias = config.add_bias_linear - self.sequence_parallel = config.sequence_parallel - self.route_algo = sinkhorn - self.router_activation = torch.sigmoid - self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size() - - assert self.config.num_moe_experts % self.expert_parallel_size == 0 - self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size - local_expert_indices_offset = ( - parallel_state.get_expert_model_parallel_rank() * self.num_local_experts - ) - self.local_expert_indices = [ - local_expert_indices_offset + i for i in range(self.num_local_experts) - ] - - self.local_experts = torch.nn.ModuleList() - for _ in range(self.num_local_experts): - expert = MLP(self.config, submodules, is_expert=True) - self.local_experts.append(expert) - - def gather_indices(self, local_indices): - """ Gather tensors and concatenate along the first dimension.""" - group = get_tensor_and_expert_parallel_group() - world_size = torch.distributed.get_world_size(group=group) - # Bypass the function if we are using only 1 GPU. - if world_size == 1: - return local_indices - - dim_size = list(local_indices.size()) - dim_size[0] = dim_size[0] * world_size - - # TODO pre allocate memory - output = torch.empty( - dim_size, dtype=local_indices.dtype, device=torch.cuda.current_device() - ) - torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group) - return output - - def forward(self, hidden_states): - hidden_shape = hidden_states.shape - route = self.router(hidden_states) - route = route.view(-1, self.config.num_moe_experts) - - if self.training: - with torch.no_grad(): - norm_route = self.route_algo( - route.detach().to(dtype=torch.float32) - ) # explicit fp32 conversion for stability - _, max_ind = torch.max(norm_route, dim=1) - route = self.router_activation(route) - max_prob = route[torch.arange(route.size(0)), max_ind] - else: - route = self.router_activation(route) - max_prob, max_ind = torch.max(route, dim=1) - - max_prob = torch.unsqueeze(max_prob, 1) - hidden_states = hidden_states.view(-1, hidden_shape[-1]) - - if self.sequence_parallel or (self.expert_parallel_size > 1): - global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe( - hidden_states - ) - global_indices = self.gather_indices(max_ind) - else: - global_hidden_states = hidden_states - global_indices = max_ind - - output_total = torch.zeros_like(global_hidden_states) - if self.add_bias: - output_bias_total = torch.zeros_like(global_hidden_states) - - for expert_num, expert in enumerate(self.local_experts): - local_expert_index = self.local_expert_indices[expert_num] - local_indices = (global_indices == local_expert_index).nonzero() - hidden = global_hidden_states[local_indices, :] - output, output_bias = expert(hidden) - - output_total[local_indices, :] = output - if self.add_bias: - output_bias = output_bias.expand_as(output) - output_bias_total[local_indices, :] = output_bias - - if self.sequence_parallel or (self.expert_parallel_size > 1): - output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( - output_total - ) - if self.add_bias: - output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( - output_bias_total - ) - # bias is duplicated across tensor parallelism ranks; - # reduce scatter reduces bias across tensor parallel_ranks - output_bias_total = ( - output_bias_total / parallel_state.get_tensor_model_parallel_world_size() - ) - - output_total = output_total * max_prob - output_total = output_total.view(hidden_shape) - if self.add_bias: - output_bias_total = output_bias_total * max_prob - output_bias_total = output_bias_total.view(hidden_shape) - else: - output_bias_total = None - - return output_total, output_bias_total diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py old mode 100644 new mode 100755 index 74bf29c859..a4d4da760c --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -3,20 +3,28 @@ import re from contextlib import nullcontext from dataclasses import dataclass -from typing import List, Union +from typing import List, Tuple, Union import torch from torch import Tensor from megatron.core import InferenceParams, parallel_state, tensor_parallel +from megatron.core.dist_checkpointing.mapping import ShardedStateDict +from megatron.core.dist_checkpointing.utils import replace_prefix_for_sharding from megatron.core.fusions.fused_layer_norm import FusedLayerNorm -from megatron.core.transformer.custom_layers.transformer_engine import TENorm +from megatron.core.packed_seq_params import PackedSeqParams +from megatron.core.transformer.custom_layers.transformer_engine import ( + TENorm, + get_cpu_offload_context, +) from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.transformer.transformer_layer import TransformerLayer +from megatron.core.transformer.transformer_layer import BaseTransformerLayer, TransformerLayer +from megatron.core.transformer.utils import sharded_state_dict_default from megatron.core.utils import make_sharded_tensor_for_checkpoint, make_viewless_tensor +from megatron.tensor_logging import log_tensor def get_num_layers_to_build(config: TransformerConfig) -> int: @@ -66,11 +74,13 @@ def _get_block_submodules( if isinstance(spec, TransformerBlockSubmodules): return spec - # ModuleSpec here is generally assumed to be for a transformer layer. + # ModuleSpec here is generally assumed to be for a transformer layer that + # is implemented in `transformer_layer.py` or if it subclasses + # `BaseTransformerLayer` from the `transformer_layer.py` file. elif isinstance(spec, ModuleSpec): if issubclass(spec.module, TransformerBlock): return spec.submodules - elif issubclass(spec.module, TransformerLayer): + elif issubclass(spec.module, BaseTransformerLayer): num_layers = get_num_layers_to_build(config) return TransformerBlockSubmodules(layer_specs=[spec] * num_layers) else: @@ -92,6 +102,11 @@ def __init__( ): super().__init__(config=config) + from megatron import get_args + args = get_args() + self._debug_layer_outputs = args.debug_layer_outputs + self._debug_layer_gradients = args.debug_layer_gradients + self.submodules = _get_block_submodules(config, spec) self.post_layer_norm = post_layer_norm self.pre_process = pre_process @@ -102,6 +117,27 @@ def __init__( self.checkpoint_core_attention = self.config.recompute_granularity == 'selective' + if get_cpu_offload_context is not None: + ( + self.offload_context, + self.group_prefetch_offload_commit_async, + ) = get_cpu_offload_context( + self.config.cpu_offloading, + self.config.cpu_offloading_num_layers, + self.config.cpu_offloading_activations, + self.config.cpu_offloading_weights, + ) + self.config._cpu_offloading_context = ( + self.offload_context if self.config.cpu_offloading else None + ) + else: + assert ( + self.config.cpu_offloading == False + ), "CPU Offloading is enabled when TE is not present" + + self.offload_context, self.group_prefetch_offload_commit_async = nullcontext(), None + self.config._cpu_offloading_context = None + self._build_layers() self.num_layers_per_pipeline_rank = len(self.layers) @@ -156,12 +192,18 @@ def _checkpointed_forward( context: Tensor, context_mask: Tensor, rotary_pos_emb: Tensor, + packed_seq_params: PackedSeqParams, ): """Forward method with activation checkpointing.""" def custom(start: int, end: int): def custom_forward( - hidden_states, attention_mask, context, context_mask, rotary_pos_emb, + hidden_states, + attention_mask, + context, + context_mask, + rotary_pos_emb, + packed_seq_params, ): for index in range(start, end): layer = self._get_layer(index) @@ -172,25 +214,48 @@ def custom_forward( context_mask=context_mask, rotary_pos_emb=rotary_pos_emb, inference_params=None, + packed_seq_params=packed_seq_params, ) return hidden_states, context return custom_forward - if self.config.recompute_method == 'uniform': - # Uniformly divide the total number of Transformer layers and checkpoint - # the input activation of each divided chunk. - # A method to further reduce memory usage reducing checkpoints. - l = 0 - while l < self.num_layers_per_pipeline_rank: - hidden_states, context = tensor_parallel.checkpoint( - custom(l, l + self.config.recompute_num_layers), + def checkpoint_handler(forward_func): + if self.config.fp8: + from transformer_engine.pytorch.distributed import checkpoint as te_checkpoint + + return te_checkpoint( + forward_func, self.config.distribute_saved_activations, + tensor_parallel.random.get_cuda_rng_tracker, + parallel_state.get_tensor_model_parallel_group(), hidden_states, attention_mask, context, context_mask, rotary_pos_emb, + packed_seq_params, + ) + else: + return tensor_parallel.checkpoint( + forward_func, + self.config.distribute_saved_activations, + hidden_states, + attention_mask, + context, + context_mask, + rotary_pos_emb, + packed_seq_params, + ) + + if self.config.recompute_method == 'uniform': + # Uniformly divide the total number of Transformer layers and checkpoint + # the input activation of each divided chunk. + # A method to further reduce memory usage reducing checkpoints. + l = 0 + while l < self.num_layers_per_pipeline_rank: + hidden_states, context = checkpoint_handler( + custom(l, l + self.config.recompute_num_layers) ) l += self.config.recompute_num_layers @@ -201,18 +266,15 @@ def custom_forward( # A method fully use the device memory removing redundant re-computation. for l in range(self.num_layers_per_pipeline_rank): if l < self.config.recompute_num_layers: - hidden_states, context = tensor_parallel.checkpoint( - custom(l, l + 1), - self.config.distribute_saved_activations, + hidden_states, context = checkpoint_handler(custom(l, l + 1)) + else: + hidden_states, context = custom(l, l + 1)( hidden_states, attention_mask, context, context_mask, rotary_pos_emb, - ) - else: - hidden_states, context = custom(l, l + 1)( - hidden_states, attention_mask, context, context_mask, rotary_pos_emb, + packed_seq_params, ) else: raise ValueError("Invalid activation recompute method.") @@ -237,6 +299,7 @@ def forward( context_mask: Tensor = None, rotary_pos_emb: Tensor = None, inference_params: InferenceParams = None, + packed_seq_params: PackedSeqParams = None, ): # hidden_states (float): [s, b, h] # attention_mask (bool): [1, 1, s, s] @@ -298,24 +361,45 @@ def forward( with rng_context and fp8_context: # Forward pass. - if self.config.recompute_granularity == 'full': + if self.config.recompute_granularity == 'full' and self.training: hidden_states = self._checkpointed_forward( hidden_states=hidden_states, attention_mask=attention_mask, context=context, context_mask=context_mask, rotary_pos_emb=rotary_pos_emb, + packed_seq_params=packed_seq_params, ) else: - for layer in self.layers: - hidden_states, context = layer( - hidden_states=hidden_states, - attention_mask=attention_mask, - context=context, - context_mask=context_mask, - rotary_pos_emb=rotary_pos_emb, - inference_params=inference_params, - ) + for index, layer in enumerate(self.layers): + with self.offload_context: + hidden_states, context = layer( + hidden_states=hidden_states, + attention_mask=attention_mask, + context=context, + context_mask=context_mask, + rotary_pos_emb=rotary_pos_emb, + inference_params=inference_params, + packed_seq_params=packed_seq_params, + ) + if self._debug_layer_outputs: + log_tensor( + f"Global layer {index + 1} fw: Transformer layer {index+1} output", + hidden_states.transpose(0, 1), level=self._debug_layer_outputs + ) + if self._debug_layer_gradients: + fn=lambda idx:(lambda grad: log_tensor( + f"Global layer {idx + 2} bw: Transformer layer {idx+1} output", + grad.transpose(0, 1), level=self._debug_layer_gradients + )) + hidden_states.register_hook(fn(index)) + + if ( + torch.is_grad_enabled() + and self.config.cpu_offloading + and self.group_prefetch_offload_commit_async is not None + ): + hidden_states = self.group_prefetch_offload_commit_async(hidden_states) # Final layer norm. if self.post_process and self.post_layer_norm: @@ -323,27 +407,31 @@ def forward( return hidden_states - def sharded_state_dict(self, prefix: str = ''): - + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: + assert not sharded_offsets, "Unexpected sharded offsets" sharded_state_dict = {} layer_prefix = f'{prefix}layers.' + num_layers = self.config.num_layers for layer in self.layers: - sharded_state_dict.update(layer.sharded_state_dict(prefix=layer_prefix)) - - if self.post_process and self.post_layer_norm: - state_dict = self.state_dict(keep_vars=True) - - tensor = state_dict['final_layernorm.weight'] - layer_name = f'{prefix}final_layernorm.weight' - sharded_state_dict[layer_name] = make_sharded_tensor_for_checkpoint(tensor, layer_name) - - # RMSNorm doesn't have bias. - if 'final_layernorm.bias' in state_dict.keys(): - tensor = state_dict['final_layernorm.bias'] - layer_name = f'{prefix}final_layernorm.bias' - sharded_state_dict[layer_name] = make_sharded_tensor_for_checkpoint( - tensor, layer_name + offset = layer._get_layer_offset() + + global_layer_offset = layer.layer_number - 1 # self.layer_number starts at 1 + state_dict_prefix = f'{layer_prefix}{global_layer_offset - offset}.' # module list index in TransformerBlock + sharded_pp_offset = [ + (0, global_layer_offset, num_layers) + ] # PP sharding offset for ShardedTensors + layer_sharded_state_dict = layer.sharded_state_dict( + prefix=state_dict_prefix, sharded_offsets=sharded_pp_offset + ) + replace_prefix_for_sharding(layer_sharded_state_dict, state_dict_prefix, layer_prefix) + sharded_state_dict.update(layer_sharded_state_dict) + + # Add modules other than self.layers + for name, module in self.named_children(): + if not module is self.layers: + sharded_state_dict.update( + sharded_state_dict_default(module, f'{prefix}{name}.', sharded_offsets) ) return sharded_state_dict diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 47647e657a..d85473c948 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -2,7 +2,7 @@ import types from dataclasses import dataclass -from typing import Callable +from typing import Callable, Optional, Tuple import torch import torch.nn.functional as F @@ -29,9 +29,11 @@ class TransformerConfig(ModelParallelConfig): layernorm_epsilon (float): Layernorm epsilon. Defaults to 1e-5. layernorm_zero_centered_gamma (bool): if set to 'True', the LayerNorm is adjusted to center the gamma values around 0. This improves numerical stability. Defaults to False. add_bias_linear (bool): Include a bias term in all linear layers (QKV projections, after core attention, and two in MLP layer). Default is True. + add_qkv_bias (bool): Add a bias term only for QKV projections. Default is False. gated_linear_unit (bool): Use a gated linear unit for the first linear layer in the MLP. Defaults to False. activation_func (Callable): Activation function to use for the non-linearity in the MLP. Defaults to F.gelu. - num_moe_experts (int): Number of experts to use for Mixture of Experts. When set, it replaces MLP with Switch MLP. Defaults to None (no MoE). + num_moe_experts (int): Number of experts to use for MoE layer. When set, it replaces MLP with MoE layer. Defaults to None (no MoE). + rotary_interleaved (bool): True is rotate pairs of even and odd dimensions (RoFormer style), False is rotate pairs of first half and second half (LLaMa style). Default to False. init_method (Callable): Method to initialize weights. Note that bias is always set to zero. Should be a function that takes a single Tensor and initializes it. Defaults to megatron.core.utils.init_method_normal(init_method_std) which is torch nn init normal with mean=0.0 and std=init_method_Std. output_layer_init_method (Callable): Method to initialize weights of the output layer of both attention and MLP blocks. Defaults to megatron.core.utils.scaled_init_method_normal(init_method_std) which is torch nn init normal with mean=0.0 and std=init_method_std / math.sqrt(2.0 * num_layers). init_method_std (float): Standard deviation of the zero mean normal for the default initialization method, not used if init_method and output_layer_init_method are provided. Defaults to 0.02. @@ -40,6 +42,7 @@ class TransformerConfig(ModelParallelConfig): bias_gelu_fustion (bool): If true, fuses bias and gelu. Defaults to False. masked_softmax_fusion (bool): If true, uses softmax fusion. persist_layer_norm (bool): If true, uses the persistent fused layer norm kernel. This kernel only supports a fixed set of hidden sizes. Defaults to False. + memory_efficient_layer_norm(bool): If True, and using local layers (not from TransformerEngine), tells Apex to use the memory efficient fused LayerNorm kernel. Ignored if not using LayerNorm. Defaults to False. bias_dropout_fusion (bool): If true, uses bias dropout fusion. recompute_granularity (str): megatron-core supports 'selective' activation checkpointing where only the memory intensive part of attention is checkpointed. These memory intensive activations are also less compute intensive which makes activation checkpointing more efficient for LLMs (20B+). See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details. 'full' will checkpoint the entire transformer layer. Must be 'selective' or 'full'. 'selective' always uses all layers. Defaults to None. recompute_method (str): uniform will uniformly divide the total number of transformer layers in a transformer block and recompute the input activation of each divided chunk at the specified granularity. block will recompute the input activations for only a set number of transformer layers per pipeline stage. The rest of the layers in the pipeline stage will not have any activations recomputed. Must be 'uniform' or 'block'. Defaults to None. @@ -53,6 +56,15 @@ class TransformerConfig(ModelParallelConfig): fp8_wgrad (bool): When set to False, override FP8 config options and do the wgrad computation in higher precision. Defaults to True. clone_scatter_output_in_embedding (bool): When set to true, clone the output of scatter_to_sequence_parallel_region in embedding layer to facilitate garbage collection of input. normalization (str): Swtich b/w `LayerNorm` and `RMSNorm` as normalization layers. For now, these are primarily used by Transformer-Engine's layers like `LayerNormLinear`. Default value is `LayerNorm`. + window_size ((int,int) or None): If not None, then will use sliding window attention. The size of the window is specified by the numbers inside the tuple; -1 is special value meaning "infinite window size". + moe_router_load_balancing_type (str): Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "none" implies no load balancing. The default is "aux_loss". + moe_router_topk (int): Number of experts to route to for each token. The default is 2. + moe_grouped_gemm (bool): When there are multiple experts per rank, compress multiple local (potentially small) + gemms in a single kernel launch to improve the utilization and performance by leveraging the Grouped GEMM feature introduced since CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm). + moe_aux_loss_coeff (float): Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended. + moe_z_loss_coeff (float): Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended. + moe_input_jitter_eps (float): Add noise to the input tensor by applying jitter with a specified epsilon value. + moe_token_dropping (bool): This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to GShard, Switch-Transformer, and DeepSpeed-MoE. Note: Currently unsupported. """ # model architecture @@ -71,9 +83,12 @@ class TransformerConfig(ModelParallelConfig): layernorm_epsilon: float = 1e-5 layernorm_zero_centered_gamma: bool = False add_bias_linear: bool = True + add_qkv_bias: bool = False gated_linear_unit: bool = False activation_func: Callable = F.gelu num_moe_experts: int = None + rotary_interleaved: bool = False + window_size: Optional[Tuple[int, int]] = None # initialization init_method: Callable = None @@ -87,10 +102,12 @@ class TransformerConfig(ModelParallelConfig): # communication # fusion - bias_gelu_fusion: bool = False # TODO: this should be bias_activation_fusion ? + bias_activation_fusion: bool = False masked_softmax_fusion: bool = False persist_layer_norm: bool = False + memory_efficient_layer_norm: bool = False bias_dropout_fusion: bool = False # TODO: this should be bias_dropout_add_fusion? + apply_rope_fusion: bool = False # activation recomputation recompute_granularity: str = None @@ -112,6 +129,19 @@ class TransformerConfig(ModelParallelConfig): # experimental section (TODO: move to apt. section above once stable) normalization: bool = "LayerNorm" # alt value supported by TE: "RMSNorm" + # MoE related + moe_router_load_balancing_type: str = "aux_loss" + moe_router_topk: int = 2 + moe_grouped_gemm: bool = False + moe_aux_loss_coeff: float = 0 # 1e-2 would be a good start value for load balance loss. + moe_z_loss_coeff: float = None # 1e-3 would be a good start value for z-loss + moe_input_jitter_eps: float = None + moe_token_dropping: bool = False # TODO: Support token dropping. + + # These 2 attributes are WAR for TRTLLM export. DO NOT USE!! WILL BE DEPRECATED SOON!! + max_position_embeddings: int = 0 + rotary_percent: float = 0 + def __post_init__(self): """ Python dataclass method that is used to modify attributes after initialization. See https://docs.python.org/3/library/dataclasses.html#post-init-processing for more details. @@ -149,6 +179,26 @@ def __post_init__(self): if self.expert_model_parallel_size > 1 and self.num_moe_experts is None: raise ValueError(f'num_moe_experts must be non None to use expert-parallel.') + if self.num_moe_experts is not None and self.num_moe_experts <= 0: + raise ValueError(f'num_moe_experts must be non-negative.') + + if self.cpu_offloading and ( + self.cpu_offloading_num_layers < 0 or self.cpu_offloading_num_layers >= self.num_layers + ): + raise ValueError( + f'CPU offloading can be done only for layers less than {self.num_layers}' + ) + + if self.cpu_offloading and self.pipeline_model_parallel_size > 1: + raise ValueError( + f'Currently there is no support for Pipeline parallelism with CPU offloading' + ) + + if self.cpu_offloading and self.recompute_granularity is not None: + raise ValueError( + f'CPU offloading does not work when activation recomputation is enabled' + ) + if self.recompute_granularity is not None: if not self.recompute_granularity in ['full', 'selective']: raise ValueError( @@ -191,14 +241,17 @@ def __post_init__(self): if self.apply_query_key_layer_scaling: self.attention_softmax_in_fp32 = True - if self.bias_gelu_fusion: - if not self.add_bias_linear: + if self.bias_activation_fusion: + if self.activation_func not in [F.gelu, F.silu]: raise ValueError( - "When bias_gelu_fusion is True, add_bias_linear must also be True." + "When bias_activation_fusion is True, activation function should be either gelu or swiglu" ) - - if self.activation_func != F.gelu: - raise ValueError(f'When bias_gelu_fusion is True, activation_func must be F.gelu.') + if self.activation_func == F.gelu and not self.add_bias_linear: + raise ValueError( + "When bias_activation_fusion is True and activation function is gelu, add_bias_linear must also be True." + ) + if self.apply_rope_fusion and self.rotary_interleaved: + raise ValueError(f'rotary_interleaved does not work with apply_rope_fusion.') if self.init_method is None: self.init_method = init_method_normal(self.init_method_std) diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index b9951d4347..75f26b3f0c 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -1,18 +1,21 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -from dataclasses import dataclass -from typing import Union +from abc import ABC +from dataclasses import dataclass, field +from typing import Dict, Union import torch from megatron.core import parallel_state -from megatron.core.dist_checkpointing.mapping import ShardedObject, ShardedTensor +from megatron.core.dist_checkpointing.mapping import ShardedStateDict +from megatron.core.dist_checkpointing.utils import apply_prefix_mapping from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.utils import make_viewless_tensor +from megatron.tensor_logging import log_tensor @dataclass @@ -29,8 +32,27 @@ class TransformerLayerSubmodules: mlp: Union[ModuleSpec, type] = IdentityOp mlp_bda: Union[ModuleSpec, type] = IdentityFuncOp + # Mapping for sharded tensor keys to be applied in `sharded_state_dict` method + sharded_state_dict_keys_map: Dict[str, str] = field(default_factory=dict) -class TransformerLayer(MegatronModule): + +class BaseTransformerLayer(ABC): + """ A common parent class for `TransformerLayer` like implementations. + + A dummy class that is subclassed by similar `TransformerLayer`s e.g. the + `TransformerLayer` in this file and possibly other `TransformerLayer` + implementations that aim to use `TransformerBlock` as the base module. + The main purpose is to check if any layer (or module) provided in the spec + is a subclass of this class to allow fanning-out of that spec for all the + layers in the `TransformerBlock`. See `_get_block_submodules` method + implementation in `transformer_block.py` file for more details. + """ + + def __init__(self): + pass + + +class TransformerLayer(MegatronModule, BaseTransformerLayer): """A single transformer layer. Transformer layer takes input with size [s, b, h] and returns an @@ -45,6 +67,11 @@ def __init__( hidden_dropout: float = None, ): super().__init__(config=config) + self.submodules_config = submodules + + from megatron import get_args + args = get_args() + self._debug_transformer=args.debug_transformer self.layer_number = layer_number + self._get_layer_offset() self.hidden_dropout = config.hidden_dropout if hidden_dropout is None else hidden_dropout @@ -92,7 +119,7 @@ def __init__( ## [Module 8: MLP block] # TODO how to set the gpt_layer_spec.py when we have moe_frequency > 1, - # where MLP and SwitchMLP both appear alternately? + # where MLP and MoE layer both appear alternately? self.mlp = build_module(submodules.mlp, config=self.config) ## [Module 9: BiasDropoutFusion] @@ -140,6 +167,7 @@ def forward( context_mask=None, rotary_pos_emb=None, inference_params=None, + packed_seq_params=None, ): # hidden_states: [s, b, h] @@ -155,6 +183,7 @@ def forward( attention_mask=attention_mask, inference_params=inference_params, rotary_pos_emb=rotary_pos_emb, + packed_seq_params=packed_seq_params, ) # TODO: could we move `bias_dropout_add_exec_handler` itself @@ -167,6 +196,19 @@ def forward( # Residual connection. residual = hidden_states + if self._debug_transformer: + attention_output, attention_bias = attention_output_with_bias + log_tensor( + f"Layer {self.layer_number} norm 1", + input_layernorm_output.transpose(0,1), + level=self._debug_transformer + ) + log_tensor( + f"Layer {self.layer_number} Attn output", + (attention_output if attention_bias is None else attention_output + attention_bias).transpose(0,1), + level=self._debug_transformer + ) + log_tensor(f"Layer {self.layer_number} Attn residual", residual.transpose(0,1), level=self._debug_transformer) # Optional Layer norm after self-attention pre_cross_attn_layernorm_output = self.pre_cross_attn_layernorm(hidden_states) @@ -214,32 +256,27 @@ def forward( inp=hidden_states, requires_grad=hidden_states.requires_grad, keep_graph=True ) - return output, context - - def sharded_state_dict(self, prefix=''): - offset = self._get_layer_offset() - num_layers = self.config.num_layers - - global_layer_offset = self.layer_number - 1 # self.layer_number starts at 1 - state_dict_prefix = ( - f'{prefix}{global_layer_offset - offset}.' # module list index in TransformerBlock - ) - sharded_pp_offset = [ - (0, global_layer_offset, num_layers) - ] # PP sharding offset for ShardedTensors - - attn_state_dict = self.self_attention.sharded_state_dict( - prefix=f'{state_dict_prefix}self_attention.', - sharded_key_prefix=f'{prefix}self_attention.', - sharded_offsets=sharded_pp_offset, - ) - - mlp_state_dict = self.mlp.sharded_state_dict( - prefix=f'{state_dict_prefix}mlp.', - sharded_key_prefix=f'{prefix}mlp.', - sharded_offsets=sharded_pp_offset, - ) + if self._debug_transformer: + mlp_output, mlp_bias = mlp_output_with_bias if isinstance(mlp_output_with_bias, tuple) else (mlp_output_with_bias, None) + log_tensor( + f"Layer {self.layer_number} norm 2", + pre_mlp_layernorm_output.transpose(0,1), + level=self._debug_transformer + ) + log_tensor( + f"Layer {self.layer_number} MLP output", + (mlp_output if mlp_bias is None else mlp_output + mlp_bias).transpose(0,1), + level=self._debug_transformer + ) - sharded_state_dict = {**mlp_state_dict, **attn_state_dict} + return output, context + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: + sharded_state_dict = super().sharded_state_dict(prefix, sharded_offsets) + prefixed_map = { + f'{prefix}{k}': f'{prefix}{v}' + for k, v in self.submodules_config.sharded_state_dict_keys_map.items() + } + if prefixed_map: + apply_prefix_mapping(sharded_state_dict, prefixed_map) return sharded_state_dict diff --git a/megatron/core/transformer/utils.py b/megatron/core/transformer/utils.py index d7d002734f..d128255aa8 100644 --- a/megatron/core/transformer/utils.py +++ b/megatron/core/transformer/utils.py @@ -1,13 +1,15 @@ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. """Utilities for transformer layers.""" +from functools import lru_cache from operator import itemgetter -from typing import Any, Dict, Iterable, Optional, Tuple, Union +from typing import Any, Dict, Iterable, Iterator, Optional, Tuple, Union import torch from megatron.core import parallel_state -from megatron.core.dist_checkpointing.mapping import ShardedObject, StateDict +from megatron.core.dist_checkpointing.mapping import ShardedObject, ShardedStateDict, StateDict +from megatron.core.jit import jit_fuser from megatron.core.utils import ( make_sharded_tensor_for_checkpoint, make_tp_sharded_tensor_for_checkpoint, @@ -24,12 +26,18 @@ def get_linear_layer(rows, columns, init_method, perform_initialization=True): return layer +@lru_cache(maxsize=32) +def get_default_causal_mask(sq: int) -> torch.Tensor: + """Return the causal upper triangular mask for softmax input.""" + return torch.triu(torch.ones(sq, sq, device="cuda"), diagonal=1).bool() + + def attention_mask_func(attention_scores, attention_mask): attention_scores.masked_fill_(attention_mask, -10000.0) return attention_scores -@torch.jit.script +@jit_fuser def gelu_impl(x): """OpenAI's gelu implementation.""" return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 + 0.044715 * x * x))) @@ -40,7 +48,7 @@ def openai_gelu(x): # This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter -@torch.jit.script +@jit_fuser def erf_gelu(x): return ( x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype) + torch.ones_like(x).to(dtype=x.dtype)) @@ -49,8 +57,7 @@ def erf_gelu(x): def make_sharded_tensors_for_checkpoint( state_dict: StateDict, - state_dict_prefix: str, - sharded_key_prefix: Optional[str] = None, + prefix: str, tensor_parallel_layers_axis_map: Optional[Dict[str, int]] = None, sharded_offsets: Iterable[Tuple[int, int, int]] = (), extra_state_suffix: str = '_extra_state', @@ -64,8 +71,7 @@ def make_sharded_tensors_for_checkpoint( Args: state_dict (StateDict): state_dict to convert - state_dict_prefix (str): prefix appended to keys in final state dict - sharded_key_prefix (str, optional): prefix appended to ShardedTensor keys + prefix (str): prefix appended to keys in final state dict tensor_parallel_layers_axis_map (Dict[str, int], optional): dict mapping layer names to the axis for TP sharding sharded_offsets (Iterable[Tuple[int, int, int]], optional): sharding already @@ -74,8 +80,6 @@ def make_sharded_tensors_for_checkpoint( suffix will be wrapped with ShardedObject instead of ShardedTensor. """ - if sharded_key_prefix is None: - sharded_key_prefix = state_dict_prefix if tensor_parallel_layers_axis_map is None: tensor_parallel_layers_axis_map = {} @@ -83,23 +87,22 @@ def make_sharded_tensors_for_checkpoint( sharded_state_dict = {} for layer_name in state_dict.keys(): tensor = state_dict[layer_name] - layer_key = f'{state_dict_prefix}{layer_name}' - sharded_key = f'{sharded_key_prefix}{layer_name}' + layer_key = f'{prefix}{layer_name}' if layer_name.endswith(extra_state_suffix): sharded_state_dict[layer_key] = make_sharded_object_for_checkpoint( - tensor, sharded_key, sharded_offsets + tensor, layer_key, sharded_offsets ) elif layer_name in tensor_parallel_layers_axis_map: tp_axis = tensor_parallel_layers_axis_map[layer_name] sharded_state_dict[layer_key] = make_tp_sharded_tensor_for_checkpoint( - tensor, sharded_key, tp_axis, prepend_offsets=sharded_offsets, + tensor, layer_key, tp_axis, prepend_offsets=sharded_offsets, ) else: sharded_state_dict[layer_key] = make_sharded_tensor_for_checkpoint( - tensor, sharded_key, prepend_offsets=sharded_offsets, + tensor, layer_key, prepend_offsets=sharded_offsets, ) return sharded_state_dict @@ -126,7 +129,7 @@ def make_sharded_object_for_checkpoint( replica_id = ( 0, parallel_state.get_tensor_model_parallel_rank(), - parallel_state.get_data_parallel_rank(), + parallel_state.get_data_parallel_rank(with_context_parallel=True), ) return ShardedObject(key, obj, *_get_extra_state_offsets(sharded_offsets), replica_id, **kwargs) @@ -146,3 +149,36 @@ def _get_extra_state_offsets( extra_state_shape = (1,) extra_state_offset = (0,) return extra_state_shape, extra_state_offset + + +def sharded_state_dict_default( + module: torch.nn.Module, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () +) -> ShardedStateDict: + """Provides implementation for sharded_state_dict method for non-MegatronModules. + + Tries to call `module.sharded_state_dict` when possible, + otherwise uses regular state dict and assumes tensors are replicated across TP and DP. + + `keep_vars=True` is passed to module.state_dict so that optimizer states + can be sharded later on. + + Args: + module (torch.nn.Module): module which sharded state dict we want to obtain + prefix (str): prefix for the state dict keys + sharded_offsets (Tuple[Tuple[int, int, int]], optional): sharding already + applied (e.g. PP related) by sup-modules. Passed along to ShardedTensor + + Returns: + dict: dictionary of state dict keys mapped to ShardedTensors + """ + + if hasattr(module, 'sharded_state_dict'): + module_sharded_sd = module.sharded_state_dict( + prefix=prefix, sharded_offsets=sharded_offsets, + ) + else: + module_sd = module.state_dict(prefix='', keep_vars=True) + module_sharded_sd = make_sharded_tensors_for_checkpoint( + module_sd, prefix, {}, sharded_offsets, + ) + return module_sharded_sd diff --git a/megatron/core/utils.py b/megatron/core/utils.py index d4e042b2d4..bcf9cab55a 100644 --- a/megatron/core/utils.py +++ b/megatron/core/utils.py @@ -194,7 +194,7 @@ def make_tp_sharded_tensor_for_checkpoint( prepend_axis_num = len(prepend_offsets) if replica_id is None: - replica_id = (0, 0, parallel_state.get_data_parallel_rank()) + replica_id = (0, 0, parallel_state.get_data_parallel_rank(with_context_parallel=True)) return ShardedTensor.from_rank_offsets( key, @@ -223,7 +223,7 @@ def make_sharded_tensor_for_checkpoint(tensor, key, prepend_offsets=(), replica_ replica_id = ( 0, parallel_state.get_tensor_model_parallel_rank(), - parallel_state.get_data_parallel_rank(), + parallel_state.get_data_parallel_rank(with_context_parallel=True), ) return ShardedTensor.from_rank_offsets( diff --git a/megatron/data/bert_dataset.py b/megatron/data/bert_dataset.py deleted file mode 100644 index 036e6bccc9..0000000000 --- a/megatron/data/bert_dataset.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. - -"""BERT Style dataset.""" - -import numpy as np -import torch - -from megatron import ( - get_args, - get_tokenizer, - mpu, - print_rank_0 -) -from megatron.data.dataset_utils import ( - get_samples_mapping, - get_a_and_b_segments, - truncate_segments, - create_tokens_and_tokentypes, - create_masked_lm_predictions -) - -class BertDataset(torch.utils.data.Dataset): - - def __init__(self, name, indexed_dataset, data_prefix, - num_epochs, max_num_samples, masked_lm_prob, - max_seq_length, short_seq_prob, seed, binary_head): - - # Params to store. - self.name = name - self.seed = seed - self.masked_lm_prob = masked_lm_prob - self.max_seq_length = max_seq_length - self.binary_head = binary_head - - # Dataset. - self.indexed_dataset = indexed_dataset - - # Build the samples mapping. - self.samples_mapping = get_samples_mapping(self.indexed_dataset, - data_prefix, - num_epochs, - max_num_samples, - self.max_seq_length - 3, # account for added tokens - short_seq_prob, - self.seed, - self.name, - self.binary_head) - - # Vocab stuff. - tokenizer = get_tokenizer() - self.vocab_id_list = list(tokenizer.inv_vocab.keys()) - self.vocab_id_to_token_dict = tokenizer.inv_vocab - self.cls_id = tokenizer.cls - self.sep_id = tokenizer.sep - self.mask_id = tokenizer.mask - self.pad_id = tokenizer.pad - - def __len__(self): - return self.samples_mapping.shape[0] - - def __getitem__(self, idx): - start_idx, end_idx, seq_length = self.samples_mapping[idx] - sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)] - # Note that this rng state should be numpy and not python since - # python randint is inclusive whereas the numpy one is exclusive. - # We % 2**32 since numpy requres the seed to be between 0 and 2**32 - 1 - np_rng = np.random.RandomState(seed=((self.seed + idx) % 2**32)) - return build_training_sample(sample, seq_length, - self.max_seq_length, # needed for padding - self.vocab_id_list, - self.vocab_id_to_token_dict, - self.cls_id, self.sep_id, - self.mask_id, self.pad_id, - self.masked_lm_prob, np_rng, - self.binary_head) - - - - -def build_training_sample(sample, - target_seq_length, max_seq_length, - vocab_id_list, vocab_id_to_token_dict, - cls_id, sep_id, mask_id, pad_id, - masked_lm_prob, np_rng, binary_head): - """Biuld training sample. - - Arguments: - sample: A list of sentences in which each sentence is a list token ids. - target_seq_length: Desired sequence length. - max_seq_length: Maximum length of the sequence. All values are padded to - this length. - vocab_id_list: List of vocabulary ids. Used to pick a random id. - vocab_id_to_token_dict: A dictionary from vocab ids to text tokens. - cls_id: Start of example id. - sep_id: Separator id. - mask_id: Mask token id. - pad_id: Padding token id. - masked_lm_prob: Probability to mask tokens. - np_rng: Random number genenrator. Note that this rng state should be - numpy and not python since python randint is inclusive for - the opper bound whereas the numpy one is exclusive. - """ - - if binary_head: - # We assume that we have at least two sentences in the sample - assert len(sample) > 1 - assert target_seq_length <= max_seq_length - - # Divide sample into two segments (A and B). - if binary_head: - tokens_a, tokens_b, is_next_random = get_a_and_b_segments(sample, - np_rng) - else: - tokens_a = [] - for j in range(len(sample)): - tokens_a.extend(sample[j]) - tokens_b = [] - is_next_random = False - - # Truncate to `target_sequence_length`. - max_num_tokens = target_seq_length - truncated = truncate_segments(tokens_a, tokens_b, len(tokens_a), - len(tokens_b), max_num_tokens, np_rng) - - # Build tokens and toketypes. - tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b, - cls_id, sep_id) - - # Masking. - max_predictions_per_seq = masked_lm_prob * max_num_tokens - (tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions( - tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob, - cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng) - - # Padding. - tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np \ - = pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, - masked_labels, pad_id, max_seq_length) - - train_sample = { - 'text': tokens_np, - 'types': tokentypes_np, - 'labels': labels_np, - 'is_random': int(is_next_random), - 'loss_mask': loss_mask_np, - 'padding_mask': padding_mask_np, - 'truncated': int(truncated)} - return train_sample - - -def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, - masked_labels, pad_id, max_seq_length): - """Pad sequences and convert them to numpy.""" - - # Some checks. - num_tokens = len(tokens) - padding_length = max_seq_length - num_tokens - assert padding_length >= 0, \ - f"num_tokens ({num_tokens}) is greater than " \ - "max_seq_length ({max_seq_length})." - assert len(tokentypes) == num_tokens - assert len(masked_positions) == len(masked_labels) - - # Tokens and token types. - filler = [pad_id] * padding_length - tokens_np = np.array(tokens + filler, dtype=np.int64) - tokentypes_np = np.array(tokentypes + filler, dtype=np.int64) - - # Padding mask. - padding_mask_np = np.array([1] * num_tokens + [0] * padding_length, - dtype=np.int64) - - # Lables and loss mask. - labels = [-1] * max_seq_length - loss_mask = [0] * max_seq_length - for i in range(len(masked_positions)): - assert masked_positions[i] < num_tokens - labels[masked_positions[i]] = masked_labels[i] - loss_mask[masked_positions[i]] = 1 - labels_np = np.array(labels, dtype=np.int64) - loss_mask_np = np.array(loss_mask, dtype=np.int64) - - return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np diff --git a/megatron/data/data_samplers.py b/megatron/data/data_samplers.py index 8dec2c1922..e9622fe3d0 100644 --- a/megatron/data/data_samplers.py +++ b/megatron/data/data_samplers.py @@ -11,7 +11,7 @@ from megatron.core import mpu -def build_pretraining_data_loader(dataset, consumed_samples): +def build_pretraining_data_loader(dataset, consumed_samples, num_workers=None): """Buld dataloader given an input dataset.""" if dataset is None: @@ -39,11 +39,14 @@ def build_pretraining_data_loader(dataset, consumed_samples): raise Exception('{} dataloader type is not supported.'.format( args.dataloader_type)) + num_workers = args.num_workers if num_workers is None else num_workers # Torch dataloader. return torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, - num_workers=args.num_workers, - pin_memory=True) + num_workers=num_workers, + pin_memory=True, + persistent_workers=True if args.num_workers > 0 else False, + ) class MegatronPretrainingSampler: diff --git a/megatron/data/dataset_utils.py b/megatron/data/dataset_utils.py index e8e5855db4..a7f45f5b32 100644 --- a/megatron/data/dataset_utils.py +++ b/megatron/data/dataset_utils.py @@ -535,11 +535,12 @@ def build_dataset(name, data_prefix, max_num_samples, max_seq_length_dec, dataset_type='standard_bert', indexed_dataset=None): - from megatron.data.bert_dataset import BertDataset from megatron.data.ict_dataset import ICTDataset - from megatron.data.t5_dataset import T5Dataset from megatron.data.multimodal_dataset import MultiModalDataset + if dataset_type == DSET_TYPE_BERT or dataset_type == DSET_TYPE_T5: + raise ValueError("The Megatron-LM BERT and T5 datasets are deprecated.") + if dataset_type not in DSET_TYPES: raise ValueError("Invalid dataset_type: ", dataset_type) @@ -571,24 +572,6 @@ def build_dataset(name, data_prefix, max_num_samples, binary_head=binary_head, **kwargs ) - elif dataset_type == DSET_TYPE_T5: - args = get_args() - dataset = T5Dataset( - indexed_dataset=indexed_dataset, - masked_lm_prob=args.mask_prob, - max_seq_length_dec=max_seq_length_dec, - short_seq_prob=args.short_seq_prob, - **kwargs - ) - elif dataset_type == DSET_TYPE_BERT: - args = get_args() - dataset = BertDataset( - indexed_dataset=indexed_dataset, - masked_lm_prob=args.mask_prob, - short_seq_prob=args.short_seq_prob, - binary_head=binary_head, - **kwargs - ) elif dataset_type == DSET_TYPE_MULTIMODAL: args = get_args() dataset = MultiModalDataset( diff --git a/megatron/data/t5_dataset.py b/megatron/data/t5_dataset.py deleted file mode 100644 index 075b089f8e..0000000000 --- a/megatron/data/t5_dataset.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. - -"""T5 Style dataset.""" - -import collections - -import numpy as np -import torch - -from megatron import get_tokenizer -from megatron.data.dataset_utils import ( - create_masked_lm_predictions, - get_samples_mapping -) - -class T5Dataset(torch.utils.data.Dataset): - - def __init__(self, name, indexed_dataset, data_prefix, - num_epochs, max_num_samples, masked_lm_prob, - max_seq_length, max_seq_length_dec, - short_seq_prob, seed): - - # Params to store. - self.name = name - self.desc = name - self.seed = seed - self.masked_lm_prob = masked_lm_prob - self.max_seq_length = max_seq_length - self.max_seq_length_dec = max_seq_length_dec - - # Dataset. - self.indexed_dataset = indexed_dataset - - # Build the samples mapping. - self.samples_mapping = get_samples_mapping(self.indexed_dataset, - data_prefix, - num_epochs, - max_num_samples, - self.max_seq_length - 2, # account for added tokens - short_seq_prob, - self.seed, - self.name, - False) - - # Vocab stuff. - tokenizer = get_tokenizer() - self.vocab_id_list = list(tokenizer.inv_vocab.keys()) - self.vocab_id_to_token_dict = tokenizer.inv_vocab - self.cls_id = tokenizer.cls - self.sep_id = tokenizer.sep - self.mask_id = tokenizer.mask - self.pad_id = tokenizer.pad - self.bos_id = tokenizer.bos_token_id - self.eos_id = tokenizer.eos_token_id - self.sentinel_tokens = tokenizer.additional_special_tokens_ids - assert len(self.sentinel_tokens) > 0, "Provide the argument --vocab-extra-ids 100 to the script" - - def __len__(self): - return self.samples_mapping.shape[0] - - def __getitem__(self, idx): - - start_index, end_index, seq_length = self.samples_mapping[idx] - sample = [] - for index in range(start_index, end_index): - sample.append(self.indexed_dataset[index]) - # Note that this rng state should be numpy and not python since - # python randint is inclusive whereas the numpy one is exclusive. - np_rng = np.random.RandomState(seed=(self.seed + idx)) - return build_training_sample(sample, seq_length, - self.max_seq_length, # needed for padding - self.max_seq_length_dec, - self.vocab_id_list, - self.vocab_id_to_token_dict, - self.cls_id, self.sep_id, - self.mask_id, self.pad_id, - self.masked_lm_prob, np_rng, - self.bos_id, self.eos_id, - self.sentinel_tokens) - - -def build_training_sample(sample, target_seq_length, - max_seq_length, max_seq_length_dec, - vocab_id_list, vocab_id_to_token_dict, - cls_id, sep_id, mask_id, pad_id, - masked_lm_prob, np_rng, bos_id=None, - eos_id=None, sentinel_tokens=None): - """Build training sample. - - Arguments: - sample: A list of sentences in which each sentence is a list token ids. - target_seq_length: Desired sequence length. - max_seq_length: Maximum length of the sequence. All values are padded to - this length. - vocab_id_list: List of vocabulary ids. Used to pick a random id. - vocab_id_to_token_dict: A dictionary from vocab ids to text tokens. - cls_id: Start of example id. - sep_id: Separator id. - mask_id: Mask token id. - pad_id: Padding token id. - masked_lm_prob: Probability to mask tokens. - np_rng: Random number genenrator. Note that this rng state should be - numpy and not python since python randint is inclusive for - the opper bound whereas the numpy one is exclusive. - bos_id: start of decoder example id - eos_id: end of generation id - sentinel_tokens: unique value to be substituted for every replaced span - """ - - assert target_seq_length <= max_seq_length - - # flatten sentences into one list - tokens = [token for sentence in sample for token in sentence] - - # Truncate to `target_sequence_length`. - max_num_tokens = target_seq_length - truncated = len(tokens) > max_num_tokens - tokens = tokens[:max_num_tokens] - - # Masking. - max_predictions_per_seq = masked_lm_prob * max_num_tokens - (tokens, masked_positions, masked_labels, _, masked_spans) = create_masked_lm_predictions( - tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob, - cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng, - max_ngrams=10, geometric_dist=True, masking_style="t5") - - # Padding. - tokens_enc, tokens_dec_in, labels, enc_mask, \ - dec_mask, enc_dec_mask, loss_mask \ - = pad_and_convert_to_numpy(tokens, masked_positions, - masked_labels, pad_id, max_seq_length, - max_seq_length_dec, masked_spans, - bos_id, eos_id, sentinel_tokens) - - train_sample = { - 'text_enc': tokens_enc, - 'text_dec': tokens_dec_in, - 'labels': labels, - 'loss_mask': loss_mask, - 'truncated': int(truncated), - 'enc_mask': enc_mask, - 'dec_mask': dec_mask, - 'enc_dec_mask': enc_dec_mask, - } - return train_sample - - -def pad_and_convert_to_numpy(tokens, masked_positions, - masked_labels, pad_id, - max_seq_length, max_seq_length_dec, - masked_spans=None, bos_id=None, - eos_id=None, sentinel_tokens=None): - """Pad sequences and convert them to numpy.""" - - sentinel_tokens = collections.deque(sentinel_tokens) - t5_input = [] - (t5_decoder_in, t5_decoder_out) = ([bos_id], []) - (start_index, end_index) = (0, None) - for span in masked_spans: - flag = sentinel_tokens.popleft() - - # Append the same tokens in decoder input and output - t5_decoder_in.append(flag) - t5_decoder_in.extend(span.label) - t5_decoder_out.append(flag) - t5_decoder_out.extend(span.label) - - end_index = span.index[0] - t5_input.extend(tokens[start_index: end_index]) - t5_input.append(flag) - - # the next start index is the token after the last span token - start_index = span.index[-1] + 1 - - # Add token to the t5_decoder_out - t5_decoder_out.append(eos_id) - - # Add the remaining tokens to the t5 input - t5_input.extend(tokens[start_index:]) - - # assert (len(t5_input) - len(masked_spans)) + \ - # (len(t5_decoder_in) - (len(masked_spans) + 1)) == len(tokens) - - # Some checks. - - # Encoder-side padding mask. - num_tokens = len(t5_input) - padding_length = max_seq_length - num_tokens - assert padding_length >= 0 - assert len(masked_positions) == len(masked_labels) - - # Tokens.. - filler = [pad_id] * padding_length - tokens_enc = np.array(t5_input + filler, dtype=np.int64) - - # Decoder-side padding mask. - num_tokens_dec = len(t5_decoder_in) - padding_length_dec = max_seq_length_dec - num_tokens_dec - assert padding_length_dec >= 0 - filler_dec = [pad_id] * padding_length_dec - tokens_dec_in = np.array(t5_decoder_in + filler_dec, dtype=np.int64) - - # Create attention masks - enc_mask = make_attention_mask(tokens_enc, tokens_enc) - enc_dec_mask = make_attention_mask(tokens_dec_in, tokens_enc) - dec_mask = make_attention_mask(tokens_dec_in, tokens_dec_in) - dec_mask = dec_mask * make_history_mask(tokens_dec_in) - - # Labels mask. - labels = t5_decoder_out + ([-1] * padding_length_dec) - labels = np.array(labels, dtype=np.int64) - - # Loss mask - loss_mask = ([1] * num_tokens_dec) + ([0] * padding_length_dec) - loss_mask = np.array(loss_mask, dtype=np.int64) - - return tokens_enc, tokens_dec_in, labels, enc_mask, \ - dec_mask, enc_dec_mask, loss_mask - - -def make_attention_mask(source_block, target_block): - """ - Returns a 2-dimensional (2-D) attention mask - :param source_block: 1-D array - :param target_block: 1-D array - """ - mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1) - mask = mask.astype(np.int64) - # (source_length, target_length) - return mask - - -def make_attention_mask_3d(source_block, target_block): - """ - Returns a 3-dimensional (3-D) attention mask - :param source_block: 1-D array - :param target_block: 1-D array - """ - mask = (target_block[:, None, :] >= 1) * (source_block[:, :, None] >= 1) - # (batch, source_length, target_length) - # mask = mask.astype(np.int64) - return mask - - -def make_history_mask(block): - length = block.shape[0] - arange = np.arange(length) - history_mask = (arange[None, ] <= arange[:, None]) - history_mask = history_mask.astype(np.int64) - return history_mask - - -def make_history_mask_3d(block): - batch, length = block.shape - arange = torch.arange(length, device=block.device) - history_mask = (arange[None, ] <= arange[:, None])[None, ] - history_mask = history_mask.expand(batch, length, length) - return history_mask diff --git a/megatron/deploy/__init__.py b/megatron/deploy/__init__.py new file mode 100644 index 0000000000..f8011007a5 --- /dev/null +++ b/megatron/deploy/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. diff --git a/megatron/deploy/arguments.py b/megatron/deploy/arguments.py new file mode 100644 index 0000000000..c03e70cdb6 --- /dev/null +++ b/megatron/deploy/arguments.py @@ -0,0 +1,25 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +def add_ammo_args(parser): + """Add additional arguments for ammo.""" + group = parser.add_argument_group(title="ammo-generic") + + group.add_argument( + "--ammo-load-classic-megatron-to-mcore", + action="store_true", + help="Load a classic megatron-lm checkpoint to a new megatron-core model.", + ) + group.add_argument( + "--ammo-convert-te-to-local-spec", + action="store_true", + help="Load a megatron-core transformer-engine checkpoint to a model with local spec.", + ) + group.add_argument( + "--ammo-quant-cfg", + type=str, + default=None, + choices=["int8_sq", "fp8", "int4_awq", "None"], + help="Algorithms supported by atq.quantize.", + ) + + return parser diff --git a/megatron/deploy/gpt/__init__.py b/megatron/deploy/gpt/__init__.py new file mode 100644 index 0000000000..f8011007a5 --- /dev/null +++ b/megatron/deploy/gpt/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. diff --git a/megatron/deploy/gpt/model_provider.py b/megatron/deploy/gpt/model_provider.py new file mode 100644 index 0000000000..39fb49f8c3 --- /dev/null +++ b/megatron/deploy/gpt/model_provider.py @@ -0,0 +1,73 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +"""ModelOpt GPT model provider.""" + +from typing import Union + +from megatron import get_args, print_rank_0 +from megatron.arguments import core_transformer_config_from_args +from megatron.core.deploy.gpt.model_specs import get_gpt_layer_ammo_spec +from megatron.core.deploy.gpt.state_dict_hooks import ( + mcore_gpt_load_classic_state_dict_pre_hook, + mcore_gpt_load_te_state_dict_pre_hook, +) +from megatron.core.models.gpt import GPTModel as MCoreGPTModel + + +def model_provider( + pre_process=True, post_process=True, parallel_output=True, +) -> Union[MCoreGPTModel]: + """Builds the GPT model. + + This model_provider only sypport use_mcore_models=True. + + Args: + pre_process (bool, optional): Set to true if you need to compute embedings. Defaults to True. + post_process (bool, optional): Set to true if you need to want to compute output logits/loss. Defaults to True. + parallel_output (bool): whether to allgather the output logits? This must be + True if `model_provider` is called in text_generation_server. + + Returns: + Union[MCoreGPTModel]: The returned model + """ + args = get_args() + + print_rank_0("building GPT model ...") + config = core_transformer_config_from_args(get_args()) + + if args.use_mcore_models: + if args.spec is not None: + raise ValueError("Custom layer specs are not supported!") + else: + if args.num_experts is None: + transformer_layer_spec = get_gpt_layer_ammo_spec() + else: + raise ValueError("MoE is not supported for now!") + + model_type = MCoreGPTModel + model_kwargs = { + "config": config, + "transformer_layer_spec": transformer_layer_spec, + "vocab_size": args.padded_vocab_size, + "max_sequence_length": args.max_position_embeddings, + "pre_process": pre_process, + "post_process": post_process, + "fp16_lm_cross_entropy": args.fp16_lm_cross_entropy, + "parallel_output": parallel_output, + "share_embeddings_and_output_weights": not args.untie_embeddings_and_output_weights, + "position_embedding_type": args.position_embedding_type, + "rotary_percent": args.rotary_percent, + } + else: + raise ValueError("Classic Megatron-LM models are not supported!") + + model = model_type(**model_kwargs) + print_rank_0(str(model)) + + if args.use_mcore_models: + if args.ammo_load_classic_megatron_to_mcore: + model._register_load_state_dict_pre_hook(mcore_gpt_load_classic_state_dict_pre_hook) + elif args.ammo_convert_te_to_local_spec: + model._register_load_state_dict_pre_hook(mcore_gpt_load_te_state_dict_pre_hook) + + return model diff --git a/megatron/global_vars.py b/megatron/global_vars.py index b1b4b043e8..50517657d2 100644 --- a/megatron/global_vars.py +++ b/megatron/global_vars.py @@ -7,9 +7,9 @@ import torch from megatron import dist_signal_handler +from megatron.core import Timers from megatron.tokenizer import build_tokenizer from .microbatches import build_num_microbatches_calculator -from .timers import Timers _GLOBAL_ARGS = None _GLOBAL_RETRO_ARGS = None @@ -17,6 +17,7 @@ _GLOBAL_TOKENIZER = None _GLOBAL_TENSORBOARD_WRITER = None _GLOBAL_WANDB_WRITER = None +_GLOBAL_ONE_LOGGER = None _GLOBAL_ADLR_AUTORESUME = None _GLOBAL_TIMERS = None _GLOBAL_SIGNAL_HANDLER = None @@ -63,6 +64,11 @@ def get_wandb_writer(): return _GLOBAL_WANDB_WRITER +def get_one_logger(): + """Return one logger. It can be None so no need + to check if it is initialized.""" + return _GLOBAL_ONE_LOGGER + def get_adlr_autoresume(): """ADLR autoresume object. It can be None so no need to check if it is initialized.""" @@ -100,6 +106,7 @@ def set_global_variables(args, build_tokenizer=True): _ = _build_tokenizer(args) _set_tensorboard_writer(args) _set_wandb_writer(args) + _set_one_logger(args) _set_adlr_autoresume(args) _set_timers(args) @@ -166,8 +173,13 @@ def _set_wandb_writer(args): _ensure_var_is_not_initialized(_GLOBAL_WANDB_WRITER, 'wandb writer') if getattr(args, 'wandb_project', '') and args.rank == (args.world_size - 1): + # Wandb login from file + api_key_path = os.environ.get("WANDB_API_KEY_PATH") + if api_key_path: + os.environ["WANDB_API_KEY"]=open(api_key_path,"r").read().strip() if args.wandb_exp_name == '': - raise ValueError("Please specify the wandb experiment name!") + name=os.path.basename(args.save) + print(f"Setting wandb experiment name to \"{name}\"") import wandb if args.wandb_save_dir: @@ -179,12 +191,35 @@ def _set_wandb_writer(args): 'dir': save_dir, 'name': args.wandb_exp_name, 'project': args.wandb_project, - 'config': vars(args)} + 'entity': args.wandb_entity_name, + 'group': args.wandb_group_name, + 'config': vars(args), + } os.makedirs(wandb_kwargs['dir'], exist_ok=True) wandb.init(**wandb_kwargs) _GLOBAL_WANDB_WRITER = wandb +def _set_one_logger(args): + global _GLOBAL_ONE_LOGGER + _ensure_var_is_not_initialized(_GLOBAL_ONE_LOGGER, 'one logger') + + if args.enable_one_logger and args.rank == (args.world_size - 1): + try: + from one_logger.core import OneLogger + config = { + 'project': args.one_logger_project, + 'entity': args.one_logger_entity, + 'name': args.one_logger_run_name + } + one_logger = OneLogger(config=config) + _GLOBAL_ONE_LOGGER = one_logger + except BaseException: + print('WARNING: one_logger package is required to enable e2e metrics ' + 'tracking. Try pip install ' + '--index-url=https://sc-hw-artf.nvidia.com/api/pypi/hwinf-ml-pypi/simple' + ' one_logger to install it') + def _set_adlr_autoresume(args): """Initialize ADLR autoresume.""" global _GLOBAL_ADLR_AUTORESUME @@ -220,4 +255,3 @@ def _ensure_var_is_not_initialized(var, name): assert var is None, '{} is already initialized.'.format(name) - diff --git a/megatron/initialize.py b/megatron/initialize.py index fb7866ab03..80d0953d16 100644 --- a/megatron/initialize.py +++ b/megatron/initialize.py @@ -2,8 +2,12 @@ """Megatron initialization.""" +import logging +import logging.config +import math import random import os +import sys import time import numpy as np @@ -16,6 +20,7 @@ from megatron import get_tensorboard_writer from megatron.core import mpu, tensor_parallel from megatron.arguments import parse_args, validate_args +from megatron.yaml_arguments import validate_yaml from megatron.checkpointing import load_args_from_checkpoint from megatron.global_vars import set_global_variables from megatron.model.transformer import bias_dropout_add_fused_train @@ -47,7 +52,11 @@ def initialize_megatron( assert args.load is not None, "--use-checkpoints-args requires --load argument" load_args_from_checkpoint(args) - validate_args(args, args_defaults) + if args.yaml_cfg is not None: + args = validate_yaml(args, args_defaults) + else: + validate_args(args, args_defaults) + # set global args, build tokenizer, and set adlr-autoresume, # tensorboard-writer, and timers. @@ -58,6 +67,7 @@ def finish_mpu_init(): args = get_args() # Pytorch distributed. _initialize_distributed() + _configure_logging() # Random seeds for reproducibility. if args.rank == 0: @@ -95,6 +105,58 @@ def finish_mpu_init(): return None + +def _configure_logging(): + args=get_args() + rank = torch.distributed.get_rank() + if args.structured_logs: + world_size=torch.distributed.get_world_size() + rank_str = str(rank).zfill(math.ceil(math.log10(world_size))) + format = f"%(asctime)s {'' if world_size==1 else f'[Rank {rank_str}] '}%(message)s" + else: + format=None + + logging_config = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "default": { + "format": format, + "use_colors": True, + } + }, + "handlers": { + "default": { + "level": "INFO", + "formatter": "default", + "class": "logging.StreamHandler", + "stream": "ext://sys.stdout", + } + }, + "loggers": {"default": {"level": "DEBUG", "handlers": ["default"]}}, + "root": {"handlers": ["default"], "level": "INFO"}, + } + if args.structured_logs_dir is not None: + log_dir=args.structured_logs_dir + os.makedirs(log_dir, exist_ok=True) + logging_config["handlers"]["file"] = { + "level": "INFO", + "formatter": "default", + "class": "logging.FileHandler", + "filename": os.path.join(log_dir, f"logs_rank_{rank}.txt"), + } + logging_config["root"]["handlers"].append("file") + logging_config["loggers"]["default"]["handlers"].append("file") + logging.config.dictConfig(logging_config) + + if args.structured_logs: + # Add these methods so that stdout can be redirected to logging. + logging.write = lambda msg: logging.info(msg) if msg != '\n' else None + logging.flush = lambda : None + + sys.stdout=logging + sys.stderr=logging + def _compile_dependencies(): args = get_args() @@ -115,6 +177,15 @@ def _compile_dependencies(): flush=True, ) + try: + # Skip the rest if the kernels are unnecessary or already available (ex. from apex) + if args.use_flash_attn or args.masked_softmax_fusion: + import scaled_upper_triang_masked_softmax_cuda + import scaled_masked_softmax_cuda + return + except ImportError: + pass + # ================== # Load fused kernels # ================== @@ -318,7 +389,10 @@ def set_jit_fusion_options(): torch._C._jit_override_can_fuse_on_cpu(True) torch._C._jit_override_can_fuse_on_gpu(True) - _warmup_jit_function() + # Prevent the function from messing up the random state. + tensor_parallel.get_cuda_rng_tracker().add("Warmup jit", 0) + with tensor_parallel.get_cuda_rng_tracker().fork("Warmup jit"): + _warmup_jit_function() def _warmup_jit_function(): diff --git a/megatron/model/fused_bias_gelu.py b/megatron/model/fused_bias_gelu.py index 29222db024..e00e63148b 100644 --- a/megatron/model/fused_bias_gelu.py +++ b/megatron/model/fused_bias_gelu.py @@ -1,6 +1,7 @@ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import torch +from megatron.core.jit import jit_fuser ###### BIAS GELU FUSION/ NO AUTOGRAD ################ @@ -11,7 +12,7 @@ # actual gelu is: # x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) -@torch.jit.script +@jit_fuser def bias_gelu(bias, y): x = bias + y return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) @@ -19,7 +20,7 @@ def bias_gelu(bias, y): # gradient of tanh approximation of gelu # gradient of actual gelu is: # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) -@torch.jit.script +@jit_fuser def bias_gelu_back(g, bias, y): x = bias + y tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) diff --git a/megatron/model/fused_layer_norm.py b/megatron/model/fused_layer_norm.py index c91a674e8c..2b90a78d30 100644 --- a/megatron/model/fused_layer_norm.py +++ b/megatron/model/fused_layer_norm.py @@ -5,6 +5,7 @@ with some changes. """ import numbers +import inspect import torch from torch.nn.parameter import Parameter from torch.nn import init @@ -15,13 +16,16 @@ try: from apex.contrib.layer_norm.layer_norm import FastLayerNormFN HAVE_PERSIST_LAYER_NORM = True + _fast_layer_norm_has_mem_efficient = ( + "memory_efficient" in inspect.signature(FastLayerNormFN.forward).parameters + ) except: HAVE_PERSIST_LAYER_NORM = False try: - from apex.normalization.fused_layer_norm import FusedLayerNormAffineFunction + from apex.normalization.fused_layer_norm import fused_layer_norm_affine except: - FusedLayerNormAffineFunction = None + fused_layer_norm_affine = None global fused_layer_norm_cuda fused_layer_norm_cuda = None @@ -79,11 +83,15 @@ def forward(self, input): weight = self.weight + 1 if self.apply_layernorm_1p else self.weight if self.no_persist_layer_norm: - assert FusedLayerNormAffineFunction is not None, \ - "FusedLayerNormAffineFunction is not available, please install apex from https://github.com/NVIDIA/apex" - return FusedLayerNormAffineFunction.apply(input, weight, self.bias, self.normalized_shape, self.eps) + assert fused_layer_norm_affine is not None, \ + "fused_layer_norm_affine is not available, please install apex from https://github.com/NVIDIA/apex" + return fused_layer_norm_affine(input, weight, self.bias, self.normalized_shape, eps=self.eps) else: - output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) + if _fast_layer_norm_has_mem_efficient: + output = FastLayerNormFN.apply(input, weight, self.bias, self.eps, False) + else: + output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) + # Apex's fast layer norm function outputs a 'view' tensor (i.e., has # a populated '_base' field). This will result in schedule.py's diff --git a/megatron/model/language_model.py b/megatron/model/language_model.py index 69bfa2e801..9786502b13 100644 --- a/megatron/model/language_model.py +++ b/megatron/model/language_model.py @@ -376,9 +376,10 @@ def __init__(self, # Wang and Komatsuzaki et al # https://github.com/kingoflolz/mesh-transformer-jax/ self.rotary_pos_emb = RotaryEmbedding( - rotary_dim, - args.rotary_percent, - seq_len_interpolation_factor=args.rotary_seq_len_interpolation_factor + kv_channels=rotary_dim, + rotary_percent=args.rotary_percent, + seq_len_interpolation_factor=args.rotary_seq_len_interpolation_factor, + rotary_base=args.rotary_theta, ) # Encoder (usually set to True, False if part of an encoder-decoder @@ -425,6 +426,47 @@ def __init__(self, bias=False) # Setting bias to False always to keep it consistent with embedding tying that also does not have a bias. self._output_layer_key = 'output_layer' + for i, (key, value) in enumerate(self.named_parameters()): + # Store standardized parameter names for debug purposes. + args=get_args() + key=key.split(".") + if key[0]=="encoder": + # Remove "encoder" prefix. + key=key[1:] + if key[0]=="layers": + # Shift layer index. + key[1]=str(int(key[1])+1) + if key[2]=="input_norm": + key[2]="norm_1" + elif key[2]=="post_attention_norm": + key[2]="norm_2" + elif key[2]=="self_attention": + key[2]="self_attn" + elif key[2]=="mlp": + mlp_key=3 + if key[3] in ("local_experts","router"): + key[2]="mixture_of_experts" + if key[3]=="local_experts": + key[3]="experts" + mlp_key=5 + if key[mlp_key]=="dense_h_to_4h": + key[mlp_key]="layer_1" + elif key[mlp_key]=="dense_4h_to_h": + key[mlp_key]="layer_2" + else: + assert key[0]=="final_norm", key[0] + key=["layers",str(args.encoder_num_layers+1), "final_norm"]+key[1:] + elif key[0]=="embedding": + key=["layers", "0", "_".join(key[1:])] + elif key[0] == "output_layer": + key = ["layers", str(args.encoder_num_layers+1), "output_weights"] + else: + # Not implemented but still ok + pass + + value.param_name = ".".join(key) + value.param_idx = i + def set_input_tensor(self, input_tensor): """ See megatron.model.transformer.set_input_tensor()""" diff --git a/megatron/model/module.py b/megatron/model/module.py index dfd01f5667..1741d4b850 100644 --- a/megatron/model/module.py +++ b/megatron/model/module.py @@ -63,6 +63,9 @@ def initialize_word_embeddings(self): self.shared_embedding_or_output_weight().zero_out_wgrad = True return + if mpu.is_pipeline_first_stage() and self.pre_process and not self.post_process: + self.shared_embedding_or_output_weight().shared_embedding = True + # Parameters are shared between the word embeddings layers, and the # heads at the end of the model. In a pipelined setup with more than # one stage, the initial embedding layer and the head are on different @@ -85,6 +88,7 @@ def initialize_word_embeddings(self): config=self.config, init_method=self.config.init_method) self.word_embeddings.weight.data.fill_(0) self.word_embeddings.weight.shared = True + self.word_embeddings.weight.shared_embedding = True # Zero out initial weights for decoder embedding. # NOTE: We don't currently support T5 with the interleaved schedule. diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py index 1b4011eebc..09963fcc3d 100644 --- a/megatron/model/transformer.py +++ b/megatron/model/transformer.py @@ -2,6 +2,7 @@ """Transformer.""" from contextlib import nullcontext +import os import math import numpy as np import torch @@ -24,6 +25,8 @@ get_data_parallel_rng_tracker_name ) from megatron.core.parallel_state import get_tensor_model_parallel_group, get_tensor_and_expert_parallel_group +from megatron.tensor_logging import log_tensor +from megatron.core.jit import jit_fuser try: from einops import rearrange @@ -441,6 +444,7 @@ class FlashSelfAttention(torch.nn.Module): def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None): super().__init__() + self.window_size=get_args().window_size assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, ' 'e.g., with pip install flash-attn') assert rearrange is not None, 'Please install einops first, e.g., with pip install einops' @@ -480,10 +484,14 @@ def forward(self, q, k, v): device=q.device) dropout_p = 0 + # Older versions don't support the argument. + window_arg={} if self.window_size is None else {"window_size":(self.window_size - 1, 0)} + output = flash_attn_unpadded_func( q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k, dropout_p, - softmax_scale=self.softmax_scale, causal=is_causal + softmax_scale=self.softmax_scale, causal=is_causal, + **window_arg, ) output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) @@ -507,7 +515,9 @@ def __init__(self, config, layer_number, self.attn_mask_type = attn_mask_type self.params_dtype = config.params_dtype self.sequence_parallel = config.sequence_parallel + self._debug_transformer=args.debug_transformer + self.config = config self.group_query_attention = args.group_query_attention self.num_query_groups = args.num_query_groups @@ -554,7 +564,7 @@ def __init__(self, config, layer_number, query_projection_size + 2 * kv_projection_size, config=config, init_method=config.init_method, - bias=args.add_bias_linear, + bias=args.add_bias_linear or args.add_qkv_bias, gather_output=False) else: assert attention_type == AttnType.cross_attn @@ -781,8 +791,8 @@ def forward(self, hidden_states, attention_mask, # apply relative positional encoding (rotary embedding) if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - query_layer = apply_rotary_pos_emb(query_layer, q_pos_emb) - key_layer = apply_rotary_pos_emb(key_layer, k_pos_emb) + query_layer = apply_rotary_pos_emb(query_layer, q_pos_emb,self.config) + key_layer = apply_rotary_pos_emb(key_layer, k_pos_emb,self.config) # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect @@ -805,6 +815,12 @@ def forward(self, hidden_states, attention_mask, context_layer = self.core_attention_flash(q, k, v) context_layer = rearrange(context_layer, 'b s h d -> s b (h d)').contiguous() + if self._debug_transformer: + log_tensor(f"Layer {self.layer_number} Query", query_layer.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Key", key_layer.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Value", value_layer.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Attn context", context_layer.transpose(0,1), level=self._debug_transformer) + # ================= # Output. [sq, b, h] # ================= @@ -829,7 +845,7 @@ def _bias_dropout_add(x, bias, residual, prob): return _bias_dropout_add -@torch.jit.script +@jit_fuser def bias_dropout_add_fused_train(x: torch.Tensor, bias: Optional[torch.Tensor], residual: torch.Tensor, @@ -837,7 +853,7 @@ def bias_dropout_add_fused_train(x: torch.Tensor, return bias_dropout_add(x, bias, residual, prob, True) -@torch.jit.script +@jit_fuser def bias_dropout_add_fused_inference(x: torch.Tensor, bias: Optional[torch.Tensor], residual: torch.Tensor, @@ -861,6 +877,7 @@ def __init__(self, config, super(ParallelTransformerLayer, self).__init__() self.layer_number = layer_number self.layer_type = layer_type + self._debug_transformer=args.debug_transformer self.apply_residual_connection_post_norm \ = config.apply_residual_connection_post_layernorm @@ -1156,6 +1173,13 @@ def forward(self, hidden_states, attention_mask, # Layer norm at the beginning of the transformer layer. norm_output = self.input_norm(hidden_states) + if self._debug_transformer: + log_tensor( + f"Layer {self.layer_number} norm 1", + norm_output.transpose(0,1), + level=self._debug_transformer + ) + # Self attention. attention_output, attention_bias = \ self.self_attention( @@ -1164,6 +1188,13 @@ def forward(self, hidden_states, attention_mask, inference_params=inference_params, rotary_pos_emb=rotary_pos_emb) + if self._debug_transformer: + log_tensor( + f"Layer {self.layer_number} Attn output", + (hidden_states if attention_bias is None else hidden_states + attention_bias).transpose(0,1), + level=self._debug_transformer + ) + # Residual connection. if self.apply_residual_connection_post_norm: residual = norm_output @@ -1197,9 +1228,18 @@ def forward(self, hidden_states, attention_mask, training=self.training) norm_input = residual + self.drop_path(out) + if self._debug_transformer: + log_tensor(f"Layer {self.layer_number} Attn residual", norm_input.transpose(0,1), level=self._debug_transformer) + # Layer norm post the self attention. norm_output = self.post_attention_norm(norm_input) + if self._debug_transformer: + log_tensor( + f"Layer {self.layer_number} norm 2", + norm_output.transpose(0,1), + level=self._debug_transformer + ) # Cross attention. if self.layer_type == LayerType.encoder: pass @@ -1236,6 +1276,13 @@ def forward(self, hidden_states, attention_mask, # MLP. mlp_output, mlp_bias = self.mlp(norm_output) + if self._debug_transformer: + log_tensor( + f"Layer {self.layer_number} MLP output", + (mlp_output if mlp_bias is None else mlp_output + mlp_bias).transpose(0,1), + level=self._debug_transformer + ) + # Second residual connection. if self.apply_residual_connection_post_norm: residual = norm_output @@ -1497,6 +1544,10 @@ def build_layer(layer_number): extra_transformer_engine_kwargs["activation"] = "swiglu" if args.swiglu else "gelu" if self.transformer_engine_v_0_11: extra_transformer_engine_kwargs["normalization"] = args.normalization + assert config.attention_softmax_in_fp32, "TransformerEngine only supports softmax compute in FP32." + assert ( + (bool(int(os.getenv("NVTE_APPLY_QK_LAYER_SCALING", "0"))) and args.fp16) == config.apply_query_key_layer_scaling + ), "Unsupported config for apply_query_key_layer_scaling in TransformerEngine." return transformer_engine.pytorch.TransformerLayer( config.hidden_size, config.ffn_hidden_size, @@ -1512,8 +1563,6 @@ def build_layer(layer_number): tp_group=mpu.get_tensor_model_parallel_group(), get_rng_state_tracker=tensor_parallel.get_cuda_rng_tracker, fuse_wgrad_accumulation=config.gradient_accumulation_fusion, - apply_query_key_layer_scaling=config.apply_query_key_layer_scaling, - attention_softmax_in_fp32=config.attention_softmax_in_fp32, seq_length=args.seq_length, micro_batch_size=args.micro_batch_size, sequence_parallel=config.sequence_parallel, @@ -1689,6 +1738,15 @@ def forward(self, hidden_states, attention_mask, rotary_pos_emb=None): # hidden_states: [s, b, h] + args = get_args() + if args.debug_layer_outputs: + log_tensor(f"Global layer 0 fw: Embedding output", hidden_states.transpose(0, 1), level=args.debug_layer_outputs) + if args.debug_layer_gradients: + hidden_states.register_hook(lambda grad: log_tensor( + f"Global layer 1 bw: Embedding output", + grad.transpose(0, 1), level=args.debug_layer_gradients + )) + # Checks. if inference_params: assert self.recompute_granularity is None, \ @@ -1774,6 +1832,18 @@ def forward(self, hidden_states, attention_mask, attention_mask, **forward_kwargs) + if args.debug_layer_outputs: + log_tensor( + f"Global layer {index + 1} fw: Transformer layer {index+1} output", + hidden_states.transpose(0, 1), level=args.debug_layer_outputs + ) + if args.debug_layer_gradients: + fn=lambda idx:(lambda grad: log_tensor( + f"Global layer {idx + 2} bw: Transformer layer {idx+1} output", + grad.transpose(0, 1), level=args.debug_layer_gradients + )) + hidden_states.register_hook(fn(index)) + # First Retro decoder layer returns both hidden_states # and retriever_output. Make retriever_output available # to subsequence Retro layers. diff --git a/megatron/model/utils.py b/megatron/model/utils.py index 15fbe9ad9e..ace7f346c4 100644 --- a/megatron/model/utils.py +++ b/megatron/model/utils.py @@ -8,6 +8,7 @@ from megatron import get_args from megatron.model import LayerNorm, RMSNorm +from megatron.core.jit import jit_fuser def init_method_normal(sigma): """Init method based on N(0, sigma).""" @@ -42,7 +43,7 @@ def get_linear_layer(rows, columns, init_method): return layer -@torch.jit.script +@jit_fuser def gelu_impl(x): """OpenAI's gelu implementation.""" return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * @@ -53,7 +54,7 @@ def openai_gelu(x): #This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter -@torch.jit.script +@jit_fuser def erf_gelu(x): return x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype)+torch.ones_like(x).to(dtype=x.dtype)) diff --git a/megatron/optimizer/__init__.py b/megatron/optimizer/__init__.py deleted file mode 100644 index 33744a2f3a..0000000000 --- a/megatron/optimizer/__init__.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. - -from apex.optimizers import FusedAdam as Adam -from apex.optimizers import FusedSGD as SGD - -from megatron import get_args - -from .distrib_optimizer import DistributedOptimizer -from .grad_scaler import ConstantGradScaler, DynamicGradScaler -from .optimizer import Float16OptimizerWithFloat16Params, FP32Optimizer - -def get_param_groups(modules, - no_weight_decay_cond, - scale_lr_cond, - lr_mult): - """creates param groups based on weight decay condition (regularized vs non regularized) - and learning rate scale condition (args.lr vs lr_mult * args.lr) - scale_lr_cond is used during finetuning where head of the network requires a scaled - version of the base learning rate. - """ - wd_no_scale_lr = [] - wd_scale_lr = [] - no_wd_no_scale_lr = [] - no_wd_scale_lr = [] - for module in modules: - for name, param in module.named_parameters(): - if not param.requires_grad: - continue - - if no_weight_decay_cond is not None: - no_wd = no_weight_decay_cond(name, param) - else: - # do not regularize biases nor Norm parameters - no_wd = name.endswith(".bias") or len(param.shape) == 1 - - if scale_lr_cond is not None: - scale_lr = scale_lr_cond(name, param) - else: - scale_lr = False - - if not no_wd and not scale_lr: - wd_no_scale_lr.append(param) - elif not no_wd and scale_lr: - wd_scale_lr.append(param) - elif no_wd and not scale_lr: - no_wd_no_scale_lr.append(param) - else: - no_wd_scale_lr.append(param) - - param_groups = [] - if len(wd_no_scale_lr): - param_groups.append({'params': wd_no_scale_lr, 'wd_mult': 1.0, 'lr_mult': 1.0}) - if len(wd_scale_lr): - param_groups.append({'params': wd_scale_lr, 'wd_mult': 1.0, 'lr_mult': lr_mult}) - if len(no_wd_no_scale_lr): - param_groups.append({'params': no_wd_no_scale_lr, 'wd_mult': 0.0, 'lr_mult': 1.0}) - if len(no_wd_scale_lr): - param_groups.append({'params': no_wd_scale_lr, 'wd_mult': 0.0, 'lr_mult': lr_mult}) - - return param_groups - -def get_megatron_optimizer(model, - no_weight_decay_cond=None, - scale_lr_cond=None, - lr_mult=1.0): - args = get_args() - - # Base optimizer. - param_groups = get_param_groups(model, - no_weight_decay_cond, - scale_lr_cond, - lr_mult) - - if args.optimizer == 'adam': - optimizer = Adam(param_groups, - lr=args.lr, - weight_decay=args.weight_decay, - betas=(args.adam_beta1, args.adam_beta2), - eps=args.adam_eps) - elif args.optimizer == 'sgd': - optimizer = SGD(param_groups, - lr=args.lr, - weight_decay=args.weight_decay, - momentum=args.sgd_momentum) - else: - raise Exception('{} optimizer is not supported.'.format( - args.optimizer)) - - # Determine whether the params have main-grad field. - params_have_main_grad = True - - # Mixed precision optimizer. - # - Note: both the Float16Optimizer and the DistributedOptimizer inherit - # from the MixedPrecisionOptimizer, which manages any optimizer where - # the model params and main params are distinct. - if args.fp16 or args.bf16 or args.use_distributed_optimizer: - - # Grad scaler: - # if loss-scale is provided, instantiate the constant scaler. - # if we are using fp16 and loss-scale is not present, use a - # dynamic scaler. - # otherwise we are running in bf16 with no loss-scale so - # leave it as None. - grad_scaler = None - - # Constant loss scale. - if args.loss_scale: - grad_scaler = ConstantGradScaler(args.loss_scale) - - # Dynamic loss scale. - else: - if args.fp16: - grad_scaler = DynamicGradScaler( - initial_scale=args.initial_loss_scale, - min_scale=args.min_loss_scale, - growth_factor=2.0, - backoff_factor=0.5, - growth_interval=args.loss_scale_window, - hysteresis=args.hysteresis) - - # Megatron optimizer. - opt_ty = DistributedOptimizer \ - if args.use_distributed_optimizer else \ - Float16OptimizerWithFloat16Params - return opt_ty(optimizer, - args.clip_grad, - args.log_num_zeros_in_grad, - args.check_for_nan_in_loss_and_grad, - params_have_main_grad, - args.fp16, - args.bf16, - args.params_dtype, - grad_scaler, - model) - - # FP32. - return FP32Optimizer(optimizer, args.clip_grad, - args.log_num_zeros_in_grad, - args.check_for_nan_in_loss_and_grad, - params_have_main_grad, - model) diff --git a/megatron/optimizer/utils.py b/megatron/optimizer/utils.py deleted file mode 100644 index f4b7cbd634..0000000000 --- a/megatron/optimizer/utils.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. - -"""Utility functions for Megatron optimizer.""" - - -from megatron.core import mpu - - -def shard_buffer(buffer): - """ - Shard buffer into dp_size chunks of equal size. - """ - data_parallel_world_size = mpu.get_data_parallel_world_size(with_context_parallel=True) - assert buffer.numel() % data_parallel_world_size == 0 - shard_size = buffer.numel() // data_parallel_world_size - sharded_buffer = [buffer[(r*shard_size):((r+1)*shard_size)] - for r in range(data_parallel_world_size)] - return sharded_buffer - diff --git a/megatron/tensor_logging.py b/megatron/tensor_logging.py new file mode 100644 index 0000000000..44b9b90794 --- /dev/null +++ b/megatron/tensor_logging.py @@ -0,0 +1,135 @@ +import contextlib +import logging +import math +import sys +import time +import traceback +import typing + +import torch + + +logger = logging.getLogger(__name__) + + +# A global buffer for holding logged tensor stats. +_tensor_log_stats: list | None = None + + +@contextlib.contextmanager +def run_and_log_exception(): + try: + yield + except Exception: + logger.critical(traceback.format_exc()) + # TODO: This is needed because ngc crops the logs. + time.sleep(10) + sys.exit(1) + + +def reset_tensor_stats_logging(enabled=True): + global _tensor_log_stats + _tensor_log_stats = [] if enabled else None + + +def get_logged_tensor_stats(): + return _tensor_log_stats + + +def format_number(x, prec=4, exp_threshold=3): + digits = 0 if x == 0 else math.log10(abs(x)) + if math.isfinite(digits) and -exp_threshold < math.floor(digits) < prec + exp_threshold: + return f"{x:.{prec}f}" + else: + return f"{x:.{prec-1}e}" + + +def log_tensor( + name: str, + tensor: torch.Tensor, + *, + scale: float = 1.0, + level: int = 2, + storage: bool = False, + log_fn: typing.Callable[[str], typing.Any] | None = logger.info, +): + if level < 1: + return + save_stats = _tensor_log_stats is not None + shape = tuple(tensor.shape) + _, dtype = str(tensor.dtype).split("torch.") + txt = [ + (None, name, 50), + ("shape", shape, 18), + ("dtype", dtype, 9), + ("device", tensor.device, 7), + ] + stats = dict( + name=name, + shape=list(shape), + dtype=dtype, + device=str(tensor.device), + ) + if level >= 2 and tensor.device.type != "meta": + v_float = tensor.float() + + stats.update( + mu=v_float.mean().item(), + std=v_float.std().item(), + stride=tensor.stride(), + min=v_float.min().item(), + max=v_float.max().item(), + ) + txt.extend( + [ + ("mu", format_number(stats["mu"] * scale), 10), + ("std", format_number(stats["std"] * scale), 10), + ("stride", stats["stride"], 20), + ] + ) + if storage: + storage = tensor.untyped_storage() + storage_float = torch.tensor(storage, dtype=tensor.dtype, device=tensor.device).float() + stats.update( + storage=str(storage.data_ptr())[-8:], + storage_size=storage.size(), + storage_mu=storage_float.mean().item() * scale, + storage_std=storage_float.std().item() * scale, + ) + txt.extend( + [ + (f"storage", stats["storage"], 8), + (f"s size", f"{stats['storage_size']:,d}", 12), + (f"s mu", format_number(stats["storage_mu"]), 10), + (f"s std", format_number(stats["storage_std"]), 10), + ] + ) + if level >= 3: + target_samples = 2 ** (level - 3) + step = max(tensor.numel() // target_samples, 1) + while step > 1 and any(step % s == 0 and s > 1 for s in shape): + step -= 1 + samples = tensor.flatten()[: target_samples * step : step].cpu() + stats.update(samples=samples, step=step) + samples = [format_number(x) for x in samples.tolist()] + samples = ",".join(f"{sample:10s}" for sample in samples) + txt.append((f"{f'samples (step={step})':21s}", f" ({samples})", target_samples * 11 + 3)) + out, len_ = "", 0 + if save_stats: + _tensor_log_stats.append(stats) + for prefix, val, col_len in txt: + prefix = "" if prefix is None else f" {prefix}=" + len_ += col_len + len(prefix) + 1 + out = f"{f'{out}{prefix}{str(val)}':{len_}s}" + return log_fn(out) + + +def log_generator( + name, + generator: torch.Tensor | torch.Generator | None = None, + log_fn: typing.Callable[[str], typing.Any] | None = logger.info, +): + if generator is None: + generator = torch.cuda.default_generators[torch.cuda.current_device()] + tensor = generator.get_state() if isinstance(generator, torch.Generator) else generator + return log_fn(f"{name} {tensor.view(dtype=torch.int64)[-8:].tolist()}") diff --git a/megatron/text_generation/tokenization.py b/megatron/text_generation/tokenization.py index 441add74f9..2e1627c726 100644 --- a/megatron/text_generation/tokenization.py +++ b/megatron/text_generation/tokenization.py @@ -36,6 +36,8 @@ def detokenize_generations(tokens_gpu_tensor, word = tokenizer.decoder[token] elif args.tokenizer_type == 'NullTokenizer': word = str(token) + elif args.tokenizer_type in ['TokenizerFromFile', 'TokenizerFromFileWithFIM']: + word = tokenizer.detokenize([token]) else: word = tokenizer.tokenizer.decoder[token] word = bytearray( diff --git a/megatron/theoretical_memory_usage.py b/megatron/theoretical_memory_usage.py index 1a6fb6b5b3..445a14561c 100644 --- a/megatron/theoretical_memory_usage.py +++ b/megatron/theoretical_memory_usage.py @@ -18,7 +18,7 @@ def compute_weight_and_optimizer_memory(args, verbose=False): * args.hidden_size * args.hidden_size * ( - 1 + ((1 + (args.ffn_hidden_size / args.hidden_size)) / 5.0) + (args.num_query_groups / (5.0 * args.num_attention_heads)) + (2 / (5 * args.hidden_size)) + (1 / (5 * args.num_layers * args.hidden_size)) @@ -26,15 +26,18 @@ def compute_weight_and_optimizer_memory(args, verbose=False): ) embedding_size = args.hidden_size * args.padded_vocab_size if args.untie_embeddings_and_output_weights: - num_total_parameters_with_embeddings = num_parameters_in_transformer_layers + ( - 2 * embedding_size - ) + num_parameters_in_embedding_layers = 2 * embedding_size else: - num_total_parameters_with_embeddings = num_parameters_in_transformer_layers + embedding_size + num_parameters_in_embedding_layers = embedding_size + num_total_parameters = num_parameters_in_transformer_layers + num_parameters_in_embedding_layers if verbose: print( - f"Number of parameters in billions: {num_total_parameters_with_embeddings / 10**9:.2f}" + f"Number of parameters in transformer layers in billions: {num_parameters_in_transformer_layers / 10**9: .2f}" + ) + print( + f"Number of parameters in embedding layers in billions: {num_parameters_in_embedding_layers / 10**9:.2f}" ) + print(f"Total number of parameters in billions: {num_total_parameters / 10**9:.2f}") # Most loaded model shard has (1/pp_size transformer layers + 1 embedding layer) / tp_size. num_parameters_on_most_loaded_model_shard = ( @@ -75,7 +78,9 @@ def compute_activation_memory(args, num_microbatches, verbose=False): # are for the first pipeline stage. # Memory footprint from transformer layer (self-attention and MLP). - activation_memory = (args.seq_length * args.micro_batch_size * args.hidden_size) * 34 + activation_memory = (args.seq_length * args.micro_batch_size * args.hidden_size) * ( + 18 + (4 * (args.ffn_hidden_size / args.hidden_size)) + ) if verbose: print( f"Activation memory footprint per transformer layer: " diff --git a/megatron/tokenizer/gpt2_tokenization.py b/megatron/tokenizer/gpt2_tokenization.py index 3f37e44908..ff89504351 100644 --- a/megatron/tokenizer/gpt2_tokenization.py +++ b/megatron/tokenizer/gpt2_tokenization.py @@ -281,7 +281,7 @@ def encode(self, text): return self.convert_tokens_to_ids(self.tokenize(text)) def decode(self, tokens): - text = ''.join([self.decoder[token] for token in tokens]) + text = ''.join(self.convert_ids_to_tokens(tokens)) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text diff --git a/megatron/tokenizer/tokenizer.py b/megatron/tokenizer/tokenizer.py index 98643343c5..be75fc7080 100644 --- a/megatron/tokenizer/tokenizer.py +++ b/megatron/tokenizer/tokenizer.py @@ -5,9 +5,18 @@ from abc import ABC from abc import abstractmethod +from megatron.core.datasets.megatron_tokenizer import MegatronTokenizer + +from transformers import PreTrainedTokenizerFast from .bert_tokenization import FullTokenizer as FullBertTokenizer from .gpt2_tokenization import GPT2Tokenizer +FIM_PREFIX = "" +FIM_MIDDLE = "" +FIM_SUFFIX = "" +FIM_PAD = "" +EOD = "<|endoftext|>" + def build_tokenizer(args): """Initialize tokenizer.""" if args.rank == 0: @@ -29,6 +38,16 @@ def build_tokenizer(args): assert args.vocab_file is not None assert args.merge_file is not None tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file) + elif args.tokenizer_type == 'GPT2BPETokenizerWithFIM': + assert args.vocab_file is not None + assert args.merge_file is not None + tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file, special_tokens=[FIM_PREFIX, FIM_MIDDLE, FIM_SUFFIX, FIM_PAD]) + elif args.tokenizer_type == "TokenizerFromFile": + assert args.tokenizer_file is not None + tokenizer = _HFTokenizer(args.tokenizer_file, special_tokens=[EOD]) + elif args.tokenizer_type == "TokenizerFromFileWithFIM": + assert args.tokenizer_file is not None + tokenizer = _HFTokenizer(args.tokenizer_file, special_tokens=[EOD, FIM_PREFIX, FIM_MIDDLE, FIM_SUFFIX, FIM_PAD]) elif args.tokenizer_type == 'SentencePieceTokenizer': assert args.tokenizer_model is not None tokenizer = _SentencePieceTokenizer(args.tokenizer_model, vocab_extra_ids=args.vocab_extra_ids) @@ -47,6 +66,8 @@ def build_tokenizer(args): # Add vocab size (if not already set from a checkpoint). if getattr(args, "padded_vocab_size", None) is None: + # TODO: For most tokenizers, vocab_size does not take special_tokens into account. + # Might cause an issue if vocab_size + len(special_tokens) exceeds padded_vocab_size? args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args) @@ -69,73 +90,11 @@ def _vocab_size_with_padding(orig_vocab_size, args): return after -class AbstractTokenizer(ABC): - """Abstract class for tokenizer.""" - - def __init__(self, name): - self.name = name - super().__init__() - - @property - @abstractmethod - def vocab_size(self): - pass - - @property - @abstractmethod - def vocab(self): - """Dictionary from vocab text token to id token.""" - pass - - @property - @abstractmethod - def inv_vocab(self): - """Dictionary from vocab id token to text token.""" - pass - - @abstractmethod - def tokenize(self, text): - pass - - def detokenize(self, token_ids): - raise NotImplementedError('detokenizer is not implemented for {} ' - 'tokenizer'.format(self.name)) - - @property - def cls(self): - raise NotImplementedError('CLS is not provided for {} ' - 'tokenizer'.format(self.name)) - - @property - def sep(self): - raise NotImplementedError('SEP is not provided for {} ' - 'tokenizer'.format(self.name)) - - @property - def pad(self): - raise NotImplementedError('PAD is not provided for {} ' - 'tokenizer'.format(self.name)) - - @property - def eod(self): - raise NotImplementedError('EOD is not provided for {} ' - 'tokenizer'.format(self.name)) - - @property - def mask(self): - raise NotImplementedError('MASK is not provided for {} ' - 'tokenizer'.format(self.name)) - - -class _BertWordPieceTokenizer(AbstractTokenizer): +class _BertWordPieceTokenizer(MegatronTokenizer): """Original BERT wordpiece tokenizer.""" def __init__(self, vocab_file, lower_case=True, vocab_extra_ids=0): - if lower_case: - name = 'BERT Lower Case' - else: - name = 'BERT Upper Case' - super().__init__(name) + super().__init__(vocab_file, lower_case=lower_case, vocab_extra_ids=vocab_extra_ids) self.tokenizer = FullBertTokenizer(vocab_file, do_lower_case=lower_case) self.cls_id = self.tokenizer.vocab['[CLS]'] self.sep_id = self.tokenizer.vocab['[SEP]'] @@ -223,6 +182,16 @@ def pad(self): def mask(self): return self.mask_id + @property + def bos(self): + """ Id of the beginning of sentence token in the vocabulary.""" + return self._bos_token_id + + @property + def eos(self): + """ Id of the end of sentence token in the vocabulary.""" + return self._eos_token_id + @property def bos_token(self): """ Beginning of sentence token id """ @@ -238,16 +207,6 @@ def additional_special_tokens(self): """ All the additional special tokens you may want to use (list of strings).""" return self._additional_special_tokens - @property - def bos_token_id(self): - """ Id of the beginning of sentence token in the vocabulary.""" - return self._bos_token_id - - @property - def eos_token_id(self): - """ Id of the end of sentence token in the vocabulary.""" - return self._eos_token_id - @property def additional_special_tokens_ids(self): """ Ids of all the additional special tokens in the vocabulary (list of integers).""" @@ -258,16 +217,17 @@ def additional_special_tokens(self, value): self._additional_special_tokens = value -class _GPT2BPETokenizer(AbstractTokenizer): +class _GPT2BPETokenizer(MegatronTokenizer): """Original GPT2 BPE tokenizer.""" - def __init__(self, vocab_file, merge_file): - name = 'GPT2 BPE' - super().__init__(name) + def __init__(self, vocab_file, merge_file, special_tokens=None): + super().__init__(vocab_file, merge_file) + special_tokens = special_tokens if special_tokens is not None else [] self.tokenizer = GPT2Tokenizer(vocab_file, merge_file, errors='replace', - special_tokens=[], max_len=None) + special_tokens=special_tokens, max_len=None) self.eod_id = self.tokenizer.encoder['<|endoftext|>'] + self.special_tokens = self.tokenizer.special_tokens @property def vocab_size(self): @@ -292,12 +252,51 @@ def eod(self): return self.eod_id -class _SentencePieceTokenizer(AbstractTokenizer): +class _HFTokenizer(MegatronTokenizer): + """HF Tokenizer.""" + + def __init__(self, tokenizer_file, special_tokens=None): + name = 'HF Tokenizer' + super().__init__(name) + + special_tokens = special_tokens if special_tokens is not None else [] + self.tokenizer = PreTrainedTokenizerFast(tokenizer_file=tokenizer_file, errors='replace', max_len=None) + self.tokenizer.add_special_tokens({'additional_special_tokens': special_tokens}) + self.eod_id = self.tokenizer.vocab[EOD] + # Token->id mapping for additional special-tokens + self.special_tokens = { + tok: self.tokenizer.vocab[tok] for tok in special_tokens + } + self._inv_vocab = {v: k for k, v in self.tokenizer.vocab.items()} + + @property + def vocab_size(self): + return len(self.tokenizer) + + @property + def vocab(self): + return self.tokenizer.vocab + + @property + def inv_vocab(self): + return self._inv_vocab + + def tokenize(self, text): + return self.tokenizer.encode(text) + + def detokenize(self, token_ids): + return self.tokenizer.decode(token_ids) + + @property + def eod(self): + return self.eod_id + + +class _SentencePieceTokenizer(MegatronTokenizer): """SentencePieceTokenizer-Megatron wrapper""" def __init__(self, model_file, vocab_extra_ids=0): - name = 'SentencePieceTokenizer' - super().__init__(name) + super().__init__(model_file, vocab_extra_ids=vocab_extra_ids) import sentencepiece self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=model_file) @@ -438,10 +437,6 @@ def sep(self): def pad(self): return self._pad_id - @property - def bos_token_id(self): - return self._bos_id - @property def bos(self): return self._bos_id @@ -450,10 +445,6 @@ def bos(self): def eod(self): return self._eod_id - @property - def eos_token_id(self): - return self._eos_id - @property def eos(self): return self._eos_id @@ -466,6 +457,7 @@ def mask(self): def additional_special_tokens_ids(self): return [self.vocab[k] for k in self._t5_tokens] + class _GPTSentencePieceTokenizer(_SentencePieceTokenizer): """SentencePieceTokenizer-Megatron wrapper""" @@ -505,6 +497,7 @@ def eod(self): def additional_special_tokens_ids(self): return None + class _Llama2Tokenizer(_SentencePieceTokenizer): """SentencePieceTokenizer-Megatron wrapper""" @@ -554,6 +547,7 @@ def eod(self): def additional_special_tokens_ids(self): return None + class _NullTokenizer: def __init__(self, vocab_size): vocab_size = int(vocab_size) diff --git a/megatron/training.py b/megatron/training.py index d18d3c3b91..88636085ab 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -2,10 +2,14 @@ """Pretrain utilities.""" +import contextlib import gc +import dataclasses from datetime import datetime import math +import os import logging +import os import sys from .log_handler import CustomHandler # Make default logging level INFO, but filter out all log messages not from MCore. @@ -21,6 +25,7 @@ from megatron import get_timers from megatron import get_tensorboard_writer from megatron import get_wandb_writer +from megatron import get_one_logger from megatron import get_current_global_batch_size from megatron import get_num_microbatches from megatron import is_last_rank @@ -36,7 +41,7 @@ from megatron.core.distributed import DistributedDataParallel as DDP from megatron.core.distributed import finalize_model_grads from megatron.core.enums import ModelType -from megatron.optimizer import get_megatron_optimizer +from megatron.core.optimizer import get_megatron_optimizer, OptimizerConfig from megatron.initialize import initialize_megatron from megatron.initialize import write_args_to_tensorboard from megatron.initialize import set_jit_fusion_options @@ -48,7 +53,7 @@ from megatron.core.pipeline_parallel import get_forward_backward_func from megatron.utils import report_memory from megatron.model.vision.knn_monitor import compute_feature_bank - +from megatron.tensor_logging import get_logged_tensor_stats, reset_tensor_stats_logging def print_datetime(string): """Note that this call will sync across all ranks.""" @@ -68,7 +73,7 @@ def num_floating_point_operations(args, batch_size): * args.hidden_size * args.hidden_size * ( - 1 + ((1 + (args.ffn_hidden_size / args.hidden_size)) / 5.0) + (args.num_query_groups / (5 * args.num_attention_heads)) + (args.seq_length / (5 * args.hidden_size)) + (args.padded_vocab_size / (10 * args.num_layers * args.hidden_size)) @@ -76,6 +81,65 @@ def num_floating_point_operations(args, batch_size): ) +def append_to_progress_log(string): + args = get_args() + if args.save is None: + return + progress_log_filename = os.path.join(args.save, "progress.txt") + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + with open(progress_log_filename, 'a') as f: + job_id = os.getenv('SLURM_JOB_ID', '') + num_gpus = args.world_size + f.write(f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\tJob ID: {job_id}\t" + f"# GPUs: {num_gpus}\t{string}\n") + + +def get_start_time_from_progress_log(): + """ + Gets start time of earliest job with same world size. Also returns the number + of floating-point operations completed in last saved checkpoint. + """ + args = get_args() + assert args.save is not None + progress_log_filename = os.path.join(args.save, "progress.txt") + + # start_time is time when job with same world size started. + # start_num_floating_point_operations is the number of floating-point operations + # completed when this job started. + # latest_num_floating_point_operations is the number of floating-point operations + # completed in most recent saved checkpoint. + start_time = None + start_num_floating_point_operations = None + latest_num_floating_point_operations = 0 + + def _get_field(string, type): + return type(string.split(': ')[1]) + + with open(progress_log_filename, 'r') as f: + for line in f: + line = line.strip() + line_tokens = line.split('\t') + world_size_in_line = _get_field(line_tokens[2], int) + if line_tokens[3] == "Saved checkpoint": + latest_num_floating_point_operations = \ + _get_field(line_tokens[7], float) + if world_size_in_line != args.world_size: + # Re-start search if we see a different world size. + start_time = None + start_num_floating_point_operations = None + continue + if line_tokens[3] == "Starting job": + if start_time is None: + start_time = line_tokens[0] + start_num_floating_point_operations = \ + latest_num_floating_point_operations + assert start_time is not None and start_num_floating_point_operations is not None, \ + "Should have seen at least one 'Starting job' entry with same world_size" + return datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S'), \ + start_num_floating_point_operations + + def pretrain(train_valid_test_dataset_provider, model_provider, model_type, @@ -115,6 +179,13 @@ def pretrain(train_valid_test_dataset_provider, # Initalize and get arguments, timers, and Tensorboard writer. initialize_megatron(extra_args_provider=extra_args_provider, args_defaults=args_defaults) + + args = get_args() + timers = get_timers() + + if args.log_progress: + append_to_progress_log("Starting job") + # Set pytorch JIT layer fusion options and warmup JIT functions. set_jit_fusion_options() @@ -135,10 +206,20 @@ def pretrain(train_valid_test_dataset_provider, args = get_args() timers = get_timers() + if args.structured_logs_dir is not None: + reset_tensor_stats_logging() + + one_logger = get_one_logger() + if one_logger: + one_logger.log_metrics({ + 'train_iterations_warmup': 5 + }) + # Model, optimizer, and learning rate. timers('model-and-optimizer-setup', log_level=0).start(barrier=True) model, optimizer, opt_param_scheduler = setup_model_and_optimizer( model_provider, model_type) + timers('model-and-optimizer-setup').stop() print_datetime('after model, optimizer, and learning rate ' 'scheduler are built') @@ -170,6 +251,8 @@ def pretrain(train_valid_test_dataset_provider, timers.log(['model-and-optimizer-setup', 'train/valid/test-data-iterators-setup'], barrier=True) + save_tensor_logs("init") + if not args.skip_train: print_rank_0('training ...') @@ -179,15 +262,17 @@ def pretrain(train_valid_test_dataset_provider, iteration = 0 if args.do_train and args.train_iters > 0: - iteration = train(forward_step_func, - model, optimizer, opt_param_scheduler, - train_data_iterator, valid_data_iterator, - process_non_loss_data_func, config) + iteration, num_floating_point_operations_so_far = train( + forward_step_func, + model, optimizer, opt_param_scheduler, + train_data_iterator, valid_data_iterator, + process_non_loss_data_func, config) print_datetime('after training is done') if args.save and iteration != 0: - save_checkpoint(iteration, model, optimizer, opt_param_scheduler) + save_checkpoint(iteration, model, optimizer, opt_param_scheduler, + num_floating_point_operations_so_far) else: print_rank_0('skipping training (--skip-train is on) ...') @@ -208,6 +293,14 @@ def pretrain(train_valid_test_dataset_provider, verbose=True, write_to_tensorboard=not args.skip_train) +def save_tensor_logs(step:str): + args=get_args() + if args.structured_logs_dir is not None and (tensor_log_stats:=get_logged_tensor_stats()): + tensor_logs_dir = os.path.join(args.structured_logs_dir, f"runs/0/artifacts/{torch.distributed.get_rank()}") + os.makedirs(tensor_logs_dir, exist_ok=True) + torch.save(tensor_log_stats, os.path.join(tensor_logs_dir, f"tensor_logs_{step}.pt")) + reset_tensor_stats_logging() + def update_train_iters(args): # For iteration-based training, we don't need to do anything @@ -328,12 +421,14 @@ def get_model(model_provider_func, model_type=ModelType.encoder_or_decoder, wrap model = [DDP(config, model_chunk, data_parallel_group=mpu.get_data_parallel_group(with_context_parallel=True), + expert_data_parallel_group=mpu.get_data_modulo_expert_parallel_group(), accumulate_allreduce_grads_in_fp32=args.accumulate_allreduce_grads_in_fp32, overlap_grad_reduce=args.overlap_grad_reduce, use_distributed_optimizer=args.use_distributed_optimizer, # Turn off bucketing for model_chunk 2 onwards, since communication for these # model chunks is overlapped with compute anyway. - disable_bucketing=(model_chunk_idx > 0)) + disable_bucketing=(model_chunk_idx > 0), + check_for_nan_in_grad=args.check_for_nan_in_loss_and_grad) for (model_chunk_idx, model_chunk) in enumerate(model)] # Broadcast params from data parallel src rank to other data parallel ranks. @@ -405,18 +500,25 @@ def setup_model_and_optimizer(model_provider_func, model = get_model(model_provider_func, model_type) unwrapped_model = unwrap_model(model) - optimizer = get_megatron_optimizer(model, no_wd_decay_cond, + kwargs = {} + for f in dataclasses.fields(OptimizerConfig): + if hasattr(args, f.name): + kwargs[f.name] = getattr(args, f.name) + config = OptimizerConfig(**kwargs) + optimizer = get_megatron_optimizer(config, model, no_wd_decay_cond, scale_lr_cond, lr_mult) opt_param_scheduler = get_optimizer_param_scheduler(optimizer) if args.load is not None: timers = get_timers() timers('load-checkpoint', log_level=0).start(barrier=True) - args.iteration = load_checkpoint(model, optimizer, opt_param_scheduler) + args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( + model, optimizer, opt_param_scheduler) timers('load-checkpoint').stop(barrier=True) timers.log(['load-checkpoint']) else: args.iteration = 0 + args.num_floating_point_operations_so_far = 0 # get model without FP16 and/or DDP wrappers if args.iteration == 0 and len(unwrapped_model) == 1 \ @@ -461,7 +563,7 @@ def train_step(forward_step_func, data_iterator, torch.cuda.empty_cache() # Vision gradients. - if args.vision_pretraining and args.vision_pretraining_type == "dino": + if getattr(args, 'vision_pretraining', False) and args.vision_pretraining_type == "dino": unwrapped_model = unwrap_model(model[0]) unwrapped_model.cancel_gradients_last_layer(args.curr_iteration) @@ -471,7 +573,7 @@ def train_step(forward_step_func, data_iterator, timers('optimizer').stop() # Vision momentum. - if args.vision_pretraining and args.vision_pretraining_type == "dino": + if getattr(args, 'vision_pretraining', False) and args.vision_pretraining_type == "dino": unwrapped_model = unwrap_model(model[0]) unwrapped_model.update_momentum(args.curr_iteration) @@ -507,6 +609,7 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, timers = get_timers() writer = get_tensorboard_writer() wandb_writer = get_wandb_writer() + one_logger = get_one_logger() # Advanced, skipped, and Nan iterations. advanced_iters_key = 'advanced iterations' @@ -568,6 +671,12 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, batch_size = args.micro_batch_size * args.data_parallel_size * \ get_num_microbatches() + # Track app tag & app tag ID + if one_logger: + job_name = os.environ.get('SLURM_JOB_NAME', None) + current_app_tag = f'{job_name}_{batch_size}_{args.world_size}' + one_logger.log_app_tag(current_app_tag) + total_iterations = total_loss_dict[advanced_iters_key] + \ total_loss_dict[skipped_iters_key] @@ -650,8 +759,10 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, if iteration % args.log_interval == 0: elapsed_time = timers('interval-time').elapsed(barrier=True) elapsed_time_per_iteration = elapsed_time / total_iterations + throughput = num_floating_point_operations(args, batch_size) / ( elapsed_time_per_iteration * 10**12 * args.world_size) + tokens_per_sec_per_gpu = (args.seq_length * batch_size) / args.world_size / elapsed_time_per_iteration if args.log_timers_to_tensorboard: if writer: writer.add_scalar('iteration-time', @@ -665,22 +776,25 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, args.consumed_train_samples) log_string += ' elapsed time per iteration (ms): {:.1f} |'.format( elapsed_time_per_iteration * 1000.0) - if args.log_throughput: - log_string += f' throughput per GPU (TFLOP/s/GPU): {throughput:.1f} |' - if args.log_timers_to_tensorboard: - if writer: - writer.add_scalar('throughput', throughput, iteration) - if wandb_writer: - wandb_writer.log({'throughput': throughput}, iteration) + log_string += f' throughput per GPU (TFLOP/s/GPU): {throughput:.1f} |' + if args.log_timers_to_tensorboard: + if writer: + writer.add_scalar('throughput', throughput, iteration) + if wandb_writer: + wandb_writer.log({'throughput': throughput}, iteration) + log_string += ' tokens-per-second-per-gpu: {:.2f} |'.format(tokens_per_sec_per_gpu) + if wandb_writer: + wandb_writer.log({'tokens_per_sec_per_gpu': tokens_per_sec_per_gpu}, iteration) log_string += ' learning rate: {:.3E} |'.format(learning_rate) log_string += ' global batch size: {:5d} |'.format(batch_size) + loss_dict_avg={} for key in total_loss_dict: if key not in [advanced_iters_key, skipped_iters_key, nan_iters_key]: - avg = total_loss_dict[key].item() / \ + loss_dict_avg[key] = total_loss_dict[key].item() / \ float(max(1, total_loss_dict[advanced_iters_key])) - if avg > 0.0: - log_string += ' {}: {:.6E} |'.format(key, avg) + if loss_dict_avg[key] > 0.0: + log_string += ' {}: {:.6E} |'.format(key, loss_dict_avg[key]) total_loss_dict[key] = torch.tensor([0.0], dtype=torch.float, device='cuda') log_string += ' loss scale: {:.1f} |'.format(loss_scale) if grad_norm is not None: @@ -706,18 +820,71 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, report_memory_flag = False timers.log(timers_to_log, normalizer=args.log_interval) + # Weights and biases reporting + if is_last_rank() and wandb_writer is not None: + metrics = { + 'learning_rate': learning_rate, + 'consumed_samples': args.consumed_train_samples, + 'loss_scale': loss_scale, + 'grad_norm': grad_norm, + 'tflops': throughput, + 'tokens_per_sec_per_gpu': tokens_per_sec_per_gpu, + **loss_dict_avg + } + wandb_writer.log({"Training":metrics}, step=iteration) + return report_memory_flag -def save_checkpoint_and_time(iteration, model, optimizer, opt_param_scheduler): +def compute_throughputs_and_append_to_progress_log(iteration, + num_floating_point_operations_so_far): + args = get_args() + if args.save is None: + return + + # Compute job throughput. + # args.num_floating_point_operations_so_far keeps track of floating-point operations + # completed at the start of job. + global _TRAIN_START_TIME + job_throughput = \ + (num_floating_point_operations_so_far - + args.num_floating_point_operations_so_far) / ( + (time.time() - _TRAIN_START_TIME) * 10**12 * args.world_size) + + # Compute cumulative throughput since jobs of this world size were launched. + # `get_start_time_from_progress_log` returns start time and number of floating-point + # operations of first job of this world size. + start_time, start_num_floating_point_operations = get_start_time_from_progress_log() + elapsed_time = (datetime.now() - start_time).total_seconds() + cumulative_throughput = \ + (num_floating_point_operations_so_far - + start_num_floating_point_operations) / ( + elapsed_time * 10**12 * args.world_size) + + tokens_so_far = args.consumed_train_samples * args.seq_length + + append_to_progress_log(f"Saved checkpoint\tIteration: {iteration}\t" + f"Job throughput: {job_throughput:.1f} TFLOP/s/GPU\t" + f"Cumulative throughput: {cumulative_throughput:.1f} TFLOP/s/GPU\t" + f"Floating-point operations: {num_floating_point_operations_so_far:.2e}\t" + f"Tokens (in billions): {tokens_so_far / 10**9:.2f}") + + +def save_checkpoint_and_time(iteration, model, optimizer, opt_param_scheduler, + num_floating_point_operations_so_far): + args = get_args() timers = get_timers() - # Extra barrier is added to make sure - # all ranks report the max time. + # Extra barrier is added to make sure all ranks report the max time. timers('save-checkpoint', log_level=0).start(barrier=True) - save_checkpoint(iteration, model, optimizer, opt_param_scheduler) + save_checkpoint(iteration, model, optimizer, opt_param_scheduler, + num_floating_point_operations_so_far) timers('save-checkpoint').stop(barrier=True) timers.log(['save-checkpoint']) + if args.log_progress: + compute_throughputs_and_append_to_progress_log(iteration, + num_floating_point_operations_so_far) + def train(forward_step_func, model, optimizer, opt_param_scheduler, train_data_iterator, valid_data_iterator, @@ -738,6 +905,19 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, # Iterations. iteration = args.iteration + one_logger = get_one_logger() + if one_logger: + iteration_start = iteration + train_samples_start = args.consumed_train_samples + train_samples_target = args.train_samples + one_logger.log_metrics({ + 'train_samples_start': args.consumed_train_samples, + 'train_iterations_start': iteration, + 'train_samples_target': train_samples_target, + 'train_iterations_target': args.train_iters, + }) + + num_floating_point_operations_so_far = args.num_floating_point_operations_so_far # Setup some training config params config.grad_scale_func = optimizer.scale_loss @@ -773,14 +953,76 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, gc.disable() gc.collect() - while iteration < args.train_iters: + rank = torch.distributed.get_rank() + if args.torch_profile_dir is not None and rank in args.profile_ranks: + os.makedirs(args.torch_profile_dir, exist_ok=True) + def trace_fn(p: torch.profiler.profile): + path=os.path.join(args.torch_profile_dir, f"profile_rank_{rank}_step_{iteration}") + print(f"Saving trace to {path}") + p.export_chrome_trace(path) + + schedule = torch.profiler.schedule( + skip_first=0, + warmup=args.profile_step_start, + wait=0, + active=args.profile_step_end-args.profile_step_start, + repeat=1, + ) + profiler = torch.profiler.profile( + schedule=schedule, + activities=[torch.profiler.ProfilerActivity.CUDA], + on_trace_ready=trace_fn, + with_modules=True, + ) + else: + profiler = None + + num_microbatches = get_num_microbatches() + eval_duration = 0.0 + eval_iterations = 0 + def track_e2e_metrics(): + # Nested function to track a bunch of E2E APP metrics + if one_logger: + train_duration = timers('interval-time').active_time() # overall_elapsed + train_samples = args.consumed_train_samples - train_samples_start + train_iterations = iteration - iteration_start + train_iterations_time_msecs_avg = (train_duration * 1000.0) / train_iterations + if eval_iterations: + validation_iterations_time_msecs_avg = (eval_duration * 1000.0) / eval_iterations + else: + validation_iterations_time_msecs_avg = None + + one_logger.log_metrics({ + 'train_iterations_end': iteration, + 'train_samples_end': args.consumed_train_samples, + 'train_iterations': train_iterations, + 'train_samples': train_samples, + 'train_iterations_time_msecs_avg': train_iterations_time_msecs_avg, + 'validation_iterations_time_msecs_avg': validation_iterations_time_msecs_avg + }) + + with contextlib.nullcontext() if profiler is None else profiler: + while iteration < args.train_iters: if args.profile and \ iteration == args.profile_step_start and \ torch.distributed.get_rank() in args.profile_ranks: torch.cuda.cudart().cudaProfilerStart() torch.autograd.profiler.emit_nvtx(record_shapes=True).__enter__() - update_num_microbatches(args.consumed_train_samples) + # Update number of microbatches first without consistency check to decide if a + # checkpoint should be saved. If the number of microbatches is different + # from the previous iteration, save a checkpoint. Then run consistency check + # to make sure training configuration is still valid. + update_num_microbatches(args.consumed_train_samples, consistency_check=False) + if get_num_microbatches() != num_microbatches and iteration != 0: + assert get_num_microbatches() > num_microbatches, \ + "number of microbatches should be increasing due to batch size rampup" + save_checkpoint_and_time(iteration, model, optimizer, + opt_param_scheduler, + num_floating_point_operations_so_far) + num_microbatches = get_num_microbatches() + update_num_microbatches(args.consumed_train_samples, consistency_check=True) + args.curr_iteration = iteration loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = \ train_step(forward_step_func, @@ -790,21 +1032,32 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, opt_param_scheduler, config) iteration += 1 - args.consumed_train_samples += mpu.get_data_parallel_world_size() * \ - args.micro_batch_size * \ - get_num_microbatches() + batch_size = mpu.get_data_parallel_world_size() * \ + args.micro_batch_size * \ + get_num_microbatches() + args.consumed_train_samples += batch_size + num_floating_point_operations_so_far += num_floating_point_operations(args, batch_size) # Logging. loss_scale = optimizer.get_loss_scale().item() params_norm = None if args.log_params_norm: params_norm = calc_params_l2_norm(model) + + if iteration % args.log_interval == 0: + track_e2e_metrics() + report_memory_flag = training_log(loss_dict, total_loss_dict, optimizer.param_groups[0]['lr'], iteration, loss_scale, report_memory_flag, skipped_iter, grad_norm, params_norm, num_zeros_in_grad) + if profiler is not None: + profiler.step() + + save_tensor_logs(f"train_{iteration}") + # Autoresume if args.adlr_autoresume and \ (iteration % args.adlr_autoresume_interval == 0): @@ -815,17 +1068,25 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if args.eval_interval and iteration % args.eval_interval == 0 and \ args.do_valid: timers('interval-time').stop() + if args.use_distributed_optimizer and args.overlap_param_gather: + optimizer.disable_pre_hook() if args.manual_gc and args.manual_gc_eval: # Collect all objects. gc.collect() prefix = 'iteration {}'.format(iteration) + timers('eval-time', log_level=0).start(barrier=True) evaluate_and_print_results(prefix, forward_step_func, valid_data_iterator, model, iteration, process_non_loss_data_func, config, False) + eval_duration += timers('eval-time').elapsed() + eval_iterations += args.eval_iters + timers('eval-time').stop() if args.manual_gc and args.manual_gc_eval: # Collect only the objects created and used in evaluation. gc.collect(generation=0) + if args.use_distributed_optimizer and args.overlap_param_gather: + optimizer.enable_pre_hook() timers('interval-time', log_level=0).start(barrier=True) # Checkpointing @@ -834,7 +1095,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, signal_handler = get_signal_handler() if any(signal_handler.signals_received()): save_checkpoint_and_time(iteration, model, optimizer, - opt_param_scheduler) + opt_param_scheduler, + num_floating_point_operations_so_far) print_datetime('exiting program after receiving SIGTERM.') exit = True break @@ -843,7 +1105,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, iteration % args.save_interval == 0: timers('interval-time').stop() save_checkpoint_and_time(iteration, model, optimizer, - opt_param_scheduler) + opt_param_scheduler, + num_floating_point_operations_so_far) saved_checkpoint = True timers('interval-time', log_level=0).start(barrier=True) @@ -859,7 +1122,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if done: if not saved_checkpoint: save_checkpoint_and_time(iteration, model, optimizer, - opt_param_scheduler) + opt_param_scheduler, + num_floating_point_operations_so_far) print_datetime('exiting program after {} minutes'.format(train_time)) exit = True break @@ -868,7 +1132,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if args.exit_interval and iteration % args.exit_interval == 0: if args.save and not saved_checkpoint: save_checkpoint_and_time(iteration, model, optimizer, - opt_param_scheduler) + opt_param_scheduler, + num_floating_point_operations_so_far) torch.distributed.barrier() print_datetime('exiting program at iteration {}'.format(iteration)) exit = True @@ -883,6 +1148,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if args.manual_gc_interval != 0 and iteration % args.manual_gc_interval == 0: gc.collect() + track_e2e_metrics() + # Flush TensorBoard and WandB writers. writer = get_tensorboard_writer() if writer: @@ -891,11 +1158,15 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if wandb_writer: wandb_writer.finish() + # Close out pre-hooks if using distributed optimizer and overlapped param gather. + if args.use_distributed_optimizer and args.overlap_param_gather: + optimizer.disable_pre_hook() + # If any exit conditions (signal handler, duration, iterations) have been reached, exit. if exit: sys.exit() - return iteration + return iteration, num_floating_point_operations_so_far def evaluate(forward_step_func, @@ -1111,10 +1382,10 @@ def build_train_valid_test_data_loaders( train_dataloader = build_pretraining_data_loader( train_ds, args.consumed_train_samples) if args.skip_train: - valid_dataloader = build_pretraining_data_loader(valid_ds, 0) + valid_dataloader = build_pretraining_data_loader(valid_ds, 0, num_workers=args.valid_num_workers) else: valid_dataloader = build_pretraining_data_loader( - valid_ds, args.consumed_valid_samples) + valid_ds, args.consumed_valid_samples, num_workers=args.valid_num_workers) test_dataloader = build_pretraining_data_loader(test_ds, 0) # Flags to know if we need to do training/validation/testing. diff --git a/megatron/utils.py b/megatron/utils.py index 8f6b18220c..fe284a378a 100644 --- a/megatron/utils.py +++ b/megatron/utils.py @@ -235,17 +235,18 @@ def get_batch_on_this_cp_rank(batch): if cp_size > 1: cp_rank = mpu.get_context_parallel_rank() for key, val in batch.items(): - seq_dim = 1 if key != 'attention_mask' else 2 - val = val.view( - *val.shape[0:seq_dim], - 2 * cp_size, - val.shape[seq_dim] // (2 * cp_size), - *val.shape[(seq_dim + 1) :], - ) - index = torch.tensor([cp_rank, (2 * cp_size - cp_rank - 1)], device=val.device) - val = val.index_select(seq_dim, index) - val = val.view(*val.shape[0:seq_dim], -1, *val.shape[(seq_dim + 2) :]) - batch[key] = val + if val is not None: + seq_dim = 1 if key != 'attention_mask' else 2 + val = val.view( + *val.shape[0:seq_dim], + 2 * cp_size, + val.shape[seq_dim] // (2 * cp_size), + *val.shape[(seq_dim + 1) :], + ) + index = torch.tensor([cp_rank, (2 * cp_size - cp_rank - 1)], device=val.device) + val = val.index_select(seq_dim, index) + val = val.view(*val.shape[0:seq_dim], -1, *val.shape[(seq_dim + 2) :]) + batch[key] = val return batch @@ -269,3 +270,84 @@ def print_rank_last(message): print(message, flush=True) else: print(message, flush=True) + + +def get_batch_on_this_tp_rank(data_iterator): + + args = get_args() + + def _broadcast(item): + torch.distributed.broadcast(item, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + + if mpu.get_tensor_model_parallel_rank() == 0: + + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + + batch = { + 'tokens': data["tokens"].cuda(non_blocking = True), + 'labels': data["labels"].cuda(non_blocking = True), + 'loss_mask': data["loss_mask"].cuda(non_blocking = True), + 'attention_mask': data["attention_mask"].cuda(non_blocking = True), + 'position_ids': data["position_ids"].cuda(non_blocking = True) + } + + if args.pipeline_model_parallel_size == 1: + _broadcast(batch['tokens']) + _broadcast(batch['labels']) + _broadcast(batch['loss_mask']) + _broadcast(batch['attention_mask']) + _broadcast(batch['position_ids']) + + elif mpu.is_pipeline_first_stage(): + _broadcast(batch['tokens']) + _broadcast(batch['attention_mask']) + _broadcast(batch['position_ids']) + + elif mpu.is_pipeline_last_stage(): + _broadcast(batch['labels']) + _broadcast(batch['loss_mask']) + _broadcast(batch['attention_mask']) + + else: + + tokens=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + labels=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + loss_mask=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.float32 , device = torch.cuda.current_device()) + attention_mask=torch.empty((args.micro_batch_size,1,args.seq_length,args.seq_length), dtype = torch.bool , device = torch.cuda.current_device()) + position_ids=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + + if args.pipeline_model_parallel_size == 1: + _broadcast(tokens) + _broadcast(labels) + _broadcast(loss_mask) + _broadcast(attention_mask) + _broadcast(position_ids) + + elif mpu.is_pipeline_first_stage(): + labels=None + loss_mask=None + + _broadcast(tokens) + _broadcast(attention_mask) + _broadcast(position_ids) + + elif mpu.is_pipeline_last_stage(): + tokens=None + position_ids=None + + _broadcast(labels) + _broadcast(loss_mask) + _broadcast(attention_mask) + + batch = { + 'tokens': tokens, + 'labels': labels, + 'loss_mask': loss_mask, + 'attention_mask': attention_mask, + 'position_ids': position_ids + } + + return batch diff --git a/megatron/yaml_arguments.py b/megatron/yaml_arguments.py new file mode 100644 index 0000000000..f86c74efb7 --- /dev/null +++ b/megatron/yaml_arguments.py @@ -0,0 +1,479 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +"""Megatron arguments.""" + +import argparse +import dataclasses +import json +import os +import torch +import types + +from itertools import chain, starmap +from types import SimpleNamespace +import yaml, re, os +from types import SimpleNamespace + +import torch.nn.functional as F +from megatron.global_vars import set_retro_args, get_retro_args + +from megatron.core.models.retro import RetroConfig +from megatron.core.transformer import TransformerConfig + +def get_retro_args_path(workdir): + '''Argument copy stored within retro workdir.''' + return os.path.join(workdir, "args.json") + +# Taken from https://stackoverflow.com/questions/65414773/parse-environment-variable-from-yaml-with-pyyaml +# Allows for yaml to use environment variables +env_pattern = re.compile(r".*?\${(.*?)}.*?") +def env_constructor(loader, node): + value = loader.construct_scalar(node) + for group in env_pattern.findall(value): + assert os.environ.get(group) is not None, f"environment variable {group} in yaml not found" + value = value.replace(f"${{{group}}}", os.environ.get(group)) + return value +yaml.add_implicit_resolver("!pathex", env_pattern) +yaml.add_constructor("!pathex", env_constructor) + + +str_dtype_to_torch = { + "float32" : torch.float32, + "float16" : torch.float16, + "bfloat16" : torch.bfloat16 +} + +def validate_yaml(args, defaults={}): + + # This is for legacy script env var setting + if type(args.data_path) is str: + # If no white space its a single path + split_data_path = args.data_path.split() + if len(split_data_path) != 1: + args.data_path = split_data_path + + # Tensor model parallel size. + args.model_parallel.tensor_model_parallel_size = min( + args.model_parallel.tensor_model_parallel_size, args.world_size) + assert args.world_size % args.model_parallel.tensor_model_parallel_size == 0, 'world size'\ + ' ({}) is not divisible by tensor model parallel size ({})'.format( + args.world_size, args.model_parallel.tensor_model_parallel_size) + # Pipeline model parallel size. + args.model_parallel.pipeline_model_parallel_size = min( + args.model_parallel.pipeline_model_parallel_size, + (args.world_size // args.model_parallel.tensor_model_parallel_size)) + args.model_parallel.transformer_pipeline_model_parallel_size = ( + args.model_parallel.pipeline_model_parallel_size - 1 + if args.standalone_embedding_stage else + args.model_parallel.pipeline_model_parallel_size + ) + # Checks. + model_parallel_size = args.model_parallel.pipeline_model_parallel_size * \ + args.model_parallel.tensor_model_parallel_size + assert args.world_size % (model_parallel_size * args.model_parallel.context_parallel_size) == 0, \ + 'world size ({}) is not divisible by tensor parallel size ({}) times ' \ + 'pipeline parallel size ({}) times context parallel size ({})'.format( + args.world_size, args.model_parallel.tensor_model_parallel_size, + args.model_parallel.pipeline_model_parallel_size, args.model_parallel.context_parallel_size) + + # data_parallel_size is not in model parallel config + args.data_parallel_size = args.world_size // (model_parallel_size * args.model_parallel.context_parallel_size) + if args.rank == 0: + print('using world size: {}, data-parallel size: {}, ' + 'context-parallel size: {} ' + 'tensor-model-parallel size: {}, ' + 'pipeline-model-parallel size: {} '.format( + args.world_size, args.data_parallel_size, + args.model_parallel.context_parallel_size, + args.model_parallel.tensor_model_parallel_size, + args.model_parallel.pipeline_model_parallel_size), flush=True) + if args.model_parallel.pipeline_model_parallel_size > 1: + if args.model_parallel.pipeline_model_parallel_split_rank is not None: + assert args.model_parallel.pipeline_model_parallel_split_rank < \ + args.model_parallel.pipeline_model_parallel_size, 'split rank needs'\ + ' to be less than pipeline model parallel size ({})'.format( + args.model_parallel.pipeline_model_parallel_size) + + if args.model_parallel.tp_comm_overlap: + assert args.model_parallel.sequence_parallel == True, 'Tensor parallel communication/GEMM overlap can happen only when sequence parallelism is enabled' + + # Set input defaults. + for key in defaults: + # For default to be valid, it should not be provided in the + # arguments that are passed to the program. We check this by + # ensuring the arg is set to None. + if getattr(args, key, None) is not None: + if args.rank == 0: + print('WARNING: overriding default arguments for {key}:{v} \ + with {key}:{v2}'.format(key=key, v=defaults[key], + v2=getattr(args, key)), + flush=True) + else: + setattr(args, key, defaults[key]) + + # Batch size. + assert args.micro_batch_size is not None + assert args.micro_batch_size > 0 + if args.global_batch_size is None: + args.global_batch_size = args.micro_batch_size * args.data_parallel_size + if args.rank == 0: + print('setting global batch size to {}'.format( + args.global_batch_size), flush=True) + assert args.global_batch_size > 0 + + # num_layers_per_virtual_pipeline_stage is not insde model parallel for checkpointing + if args.num_layers_per_virtual_pipeline_stage is not None: + assert args.model_parallel.pipeline_model_parallel_size > 2, \ + 'pipeline-model-parallel size should be greater than 2 with ' \ + 'interleaved schedule' + assert args.language_model.num_layers % args.model_parallel.transformer_pipeline_model_parallel_size == 0, \ + 'number of layers should be divisible by the pipeline parallel size' + num_layers_per_pipeline_stage = args.language_model.num_layers // args.model_parallel.transformer_pipeline_model_parallel_size + assert num_layers_per_pipeline_stage % args.num_layers_per_virtual_pipeline_stage == 0, \ + 'number of layers per pipeline stage must be divisible number of layers per virtual pipeline stage' + args.model_parallel.virtual_pipeline_model_parallel_size = num_layers_per_pipeline_stage // \ + args.num_layers_per_virtual_pipeline_stage + else: + args.model_parallel.virtual_pipeline_model_parallel_size = None + # Overlap P2P communication is disabled if not using the interleaved schedule. + args.model_parallel.overlap_p2p_comm = False + if args.rank == 0: + print('WARNING: Setting args.overlap_p2p_comm to False since non-interleaved ' + 'schedule does not support overlapping p2p communication') + + if args.overlap_param_gather: + assert args.use_distributed_optimizer, \ + '--overlap-param-gather only supported with distributed optimizer' + assert args.overlap_grad_reduce, \ + '--overlap-grad-reduce should be turned on when using --overlap-param-gather' + + # Parameters dtype. + if args.model_parallel.fp16: + assert not args.model_parallel.bf16 + args.model_parallel.params_dtype = torch.half + if args.model_parallel.bf16: + assert not args.model_parallel.fp16 + args.model_parallel.params_dtype = torch.bfloat16 + # bfloat16 requires gradient accumulation and all-reduce to + # be done in fp32. + if not args.accumulate_allreduce_grads_in_fp32: + args.accumulate_allreduce_grads_in_fp32 = True + if args.rank == 0: + print('accumulate and all-reduce gradients in fp32 for ' + 'bfloat16 data type.', flush=True) + + if args.rank == 0: + print('using {} for parameters ...'.format(args.model_parallel.params_dtype), + flush=True) + + if args.dataloader_type is None: + args.dataloader_type = 'single' + + # Consumed tokens. + args.consumed_train_samples = 0 + args.consumed_valid_samples = 0 + + # Support for variable sequence lengths across batches/microbatches. + # set it if the dataloader supports generation of variable sequence lengths + # across batches/microbatches. Due to additional communication overhead + # during pipeline parallelism, it should not be set if sequence length + # is constant during training. + args.model_parallel.variable_seq_lengths = False + + # Iteration-based training. + if args.train_iters: + # If we use iteration-based training, make sure the + # sample-based options are off. + assert args.train_samples is None, \ + 'expected iteration-based training' + assert args.lr_decay_samples is None, \ + 'expected iteration-based learning rate decay' + assert args.lr_warmup_samples == 0, \ + 'expected iteration-based learning rate warmup' + assert args.rampup_batch_size is None, \ + 'expected no batch-size rampup for iteration-based training' + if args.lr_warmup_fraction is not None: + assert args.lr_warmup_iters == 0, \ + 'can only specify one of lr-warmup-fraction and lr-warmup-iters' + + # Sample-based training. + if args.train_samples: + # If we use sample-based training, make sure the + # iteration-based options are off. + assert args.train_iters is None, \ + 'expected sample-based training' + assert args.lr_decay_iters is None, \ + 'expected sample-based learning rate decay' + assert args.lr_warmup_iters == 0, \ + 'expected sample-based learnig rate warmup' + if args.lr_warmup_fraction is not None: + assert args.lr_warmup_samples == 0, \ + 'can only specify one of lr-warmup-fraction ' \ + 'and lr-warmup-samples' + + # How to handle this better + if args.language_model.num_layers is not None: + assert args.encoder_num_layers is None, \ + 'cannot have both num-layers and encoder-num-layers specified' + args.encoder_num_layers = args.language_model.num_layers + else: + assert args.encoder_num_layers is not None, \ + 'either num-layers or encoder-num-layers should be specified' + args.language_model.num_layers = args.encoder_num_layers + + # Check required arguments. + # removed max_position_embeddings from reqs + required_args = ['num_layers', 'hidden_size', 'num_attention_heads'] + for req_arg in required_args: + _check_arg_is_not_none(args.language_model, req_arg) + + # Checks. + if args.language_model.ffn_hidden_size is None: + if args.language_model.activation_func == "swiglu": + # reduce the dimnesion for MLP since projections happens on + # two linear layers. this keeps the number of paramters in + # the same ballpark as the counterpart with 4*h size + # we keep it a multiple of 64, which means the actual tensor size + # will be a multiple of 64 / tp_size + args.language_model.ffn_hidden_size = int((4 * args.language_model.hidden_size * 2 / 3) / 64) * 64 + else: + args.language_model.ffn_hidden_size = 4 * args.language_model.hidden_size + + if args.language_model.kv_channels is None: + assert args.language_model.hidden_size % args.language_model.num_attention_heads == 0 + args.language_model.kv_channels = args.language_model.hidden_size // args.language_model.num_attention_heads + + #TODO: Implement arguments for encoder-decoder + if args.seq_length is not None: + assert args.encoder_seq_length is None + args.encoder_seq_length = args.seq_length + else: + assert args.encoder_seq_length is not None + args.seq_length = args.encoder_seq_length + + if args.seq_length is not None: + assert args.max_position_embeddings >= args.seq_length + if args.decoder_seq_length is not None: + assert args.max_position_embeddings >= args.decoder_seq_length + if args.lr is not None: + assert args.min_lr <= args.lr + if args.save is not None: + assert args.save_interval is not None + # Mixed precision checks. + if args.fp16_lm_cross_entropy: + assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.' + if args.language_model.fp32_residual_connection: + assert args.model_parallel.fp16 or args.model_parallel.bf16, \ + 'residual connection in fp32 only supported when using fp16 or bf16.' + + if args.language_model.moe_grouped_gemm: + assert args.model_parallel.bf16, 'Currently GroupedGEMM for MoE only supports bf16 dtype.' + dc = torch.cuda.get_device_capability() + assert dc[0] >= 8, "Unsupported compute capability for GroupedGEMM kernels." + + if args.weight_decay_incr_style == 'constant': + assert args.start_weight_decay is None + assert args.end_weight_decay is None + args.start_weight_decay = args.weight_decay + args.end_weight_decay = args.weight_decay + else: + assert args.start_weight_decay is not None + assert args.end_weight_decay is not None + + TORCH_MAJOR = int(torch.__version__.split('.')[0]) + TORCH_MINOR = int(torch.__version__.split('.')[1]) + # Persistent fused layer norm. + if TORCH_MAJOR < 1 or (TORCH_MAJOR == 1 and TORCH_MINOR < 11): + args.language_model.persist_layer_norm = False + if args.rank == 0: + print('Persistent fused layer norm kernel is supported from ' + 'pytorch v1.11 (nvidia pytorch container paired with v1.11). ' + 'Defaulting to no_persist_layer_norm=True') + + # Activation recomputing. + if args.language_model.distribute_saved_activations: + assert args.model_parallel.tensor_model_parallel_size > 1, 'can distribute ' \ + 'recomputed activations only across tensor model ' \ + 'parallel groups' + assert args.language_model.recompute_granularity == 'full', \ + 'distributed recompute activations is only '\ + 'application to full recompute granularity' + assert args.language_model.recompute_method is not None, \ + 'for distributed recompute activations to work you '\ + 'need to use a recompute method ' + assert (TORCH_MAJOR, TORCH_MINOR) >= (1, 10), \ + 'distributed recompute activations are supported for pytorch ' \ + 'v1.10 and above (Nvidia Pytorch container >= 21.07). Current ' \ + 'pytorch version is v%s.%s.' % (TORCH_MAJOR, TORCH_MINOR) + + if args.language_model.recompute_granularity == 'selective': + assert args.language_model.recompute_method is None, \ + 'recompute method is not yet supported for ' \ + 'selective recomputing granularity' + + # disable sequence parallelism when tp=1 + # to avoid change in numerics when + # sequence_parallelism is enabled. + if args.model_parallel.tensor_model_parallel_size == 1: + args.model_parallel.sequence_parallel = False + + # disable async_tensor_model_parallel_allreduce when + # model parallel memory optimization is enabled + if args.model_parallel.sequence_parallel: + args.model_parallel.async_tensor_model_parallel_allreduce = False + + if os.environ.get('CUDA_DEVICE_MAX_CONNECTIONS') != "1": + if args.model_parallel.sequence_parallel: + raise RuntimeError( + "Using sequence parallelism requires setting the environment variable " + "CUDA_DEVICE_MAX_CONNECTIONS to 1") + if args.model_parallel.async_tensor_model_parallel_allreduce: + raise RuntimeError( + "Using async gradient all reduce requires setting the environment " + "variable CUDA_DEVICE_MAX_CONNECTIONS to 1") + + # Retro checks. + if getattr(args, 'retro_add_retriever', False): + + # Sequence parallelism unsupported. + assert not args.sequence_parallel, \ + "retro currently does not support sequence parallelism." + + # Pipeline parallelism unsupported. + assert args.pipeline_model_parallel_size == 1, \ + "retro currently does not support pipeline parallelism." + + #TODO: Retro args loading not tested + # Load retro args (used by both Retro & GPT). + if getattr(args, 'retro_workdir', None) is not None: + retro_args_path = get_retro_args_path(args.retro_workdir) + assert os.path.exists(retro_args_path), "retro workdir missing args.json" + with open(retro_args_path) as f: + retro_args = types.SimpleNamespace(**json.load(f)) + retro_args.retro_return_doc_ids = args.retro_return_doc_ids + retro_args.retro_gpt_retrieved_length = \ + args.retro_num_retrieved_chunks * \ + retro_args.retro_gpt_chunk_length + set_retro_args(retro_args) + + if args.language_model.rotary_interleaved and args.language_model.apply_rope_fusion: + raise RuntimeError('--rotary-interleaved does not work with rope_fusion.') + + # MoE Spec check + if args.language_model.num_moe_experts is not None: + assert args.spec is None, "Model Spec must be None when using MoEs" + if args.model_parallel.tensor_model_parallel_size > 1: + assert args.model_parallel.sequence_parallel, \ + "When using MoE and tensor parallelism, sequence parallelism must be used." + + # Expert parallelism check + if args.model_parallel.expert_model_parallel_size > 1: + assert args.language_model.num_moe_experts is not None, "num_experts must be non None to use expert model parallelism" + assert args.language_model.num_moe_experts % args.model_parallel.expert_model_parallel_size == 0, \ + "Number of experts should be a multiple of expert model parallel_size." + assert not args.model_parallel.fp16, \ + "Expert parallelism is not supported with fp16 training." + + # Print arguments. + _print_args("arguments", args) + retro_args = get_retro_args() + if retro_args and args != retro_args: + _print_args("retro arguments", types.SimpleNamespace(**{k:v for k,v in vars(retro_args).items() if k.startswith("retro")}, rank=args.rank)) + + #TODO: Added as much of the global initialization requires the model parallel arguments + args = SimpleNamespace(**args.__dict__, **args.model_parallel.__dict__) + args = SimpleNamespace(**args.__dict__, **args.language_model.__dict__) + # For GPT Layer spec in pretrain_gpt + args.num_experts = args.language_model.num_moe_experts + + return args + +def _print_args(title, args): + """Print arguments.""" + if args.rank == 0: + print(f'------------------------ {title} ------------------------', + flush=True) + str_list = [] + for arg in vars(args): + dots = '.' * (48 - len(arg)) + str_list.append(' {} {} {}'.format(arg, dots, getattr(args, arg))) + for arg in sorted(str_list, key=lambda x: x.lower()): + print(arg, flush=True) + print(f'-------------------- end of {title} ---------------------', + flush=True) + +def core_config_from_args(args, dataclass=TransformerConfig): + """Builds core config object from namespace args from given dataclass + + Raises exception if argument missing in args + + Args: + args(SimpleNamespace, optional): Namespace to pull argument values from + dataclass (dataclass, optional): Core dataclass config to pull argument names from + + + Returns: + SimpleNamespace: The returned namespace to build core config from + """ + kw_args = {} + for f in dataclasses.fields(dataclass): + if hasattr(args, f.name): + kw_args[f.name] = getattr(args, f.name) + else: + raise Exception(f"Missing argument {f.name} for {str(dataclass)} config") + return kw_args + +def _check_arg_is_not_none(args, arg): + assert getattr(args, arg) is not None, '{} argument is None'.format(arg) + +def core_transformer_config_from_yaml(args, transfomer_key = "language_model"): + # Combine transfomer config with model parallel args + args = SimpleNamespace(**vars(getattr(args, transfomer_key)), **vars(args.model_parallel)) + # Translate args to core transformer configuration + kw_args = core_config_from_args(args, TransformerConfig) + + # Hardcoded + kw_args['deallocate_pipeline_outputs'] = True + kw_args['pipeline_dtype'] = kw_args['params_dtype'] + kw_args['batch_p2p_comm'] = not args.overlap_p2p_comm + + assert args.activation_func in ["swiglu","squaredrelu","gelu"], f"{args.activation_func} is not a supported activation function" + if args.activation_func == "swiglu": + kw_args['activation_func'] = F.silu + kw_args['gated_linear_unit'] = True + kw_args['bias_activation_fusion'] = args.bias_swiglu_fusion + elif args.activation_func == "squaredrelu": + def squared_relu(x): + return torch.pow(F.relu(x), 2) + kw_args['activation_func'] = squared_relu + elif args.activation_func == "gelu": + kw_args['activation_func'] = F.gelu + if args.add_bias_linear: + kw_args['bias_activation_fusion'] = False + else: + kw_args['bias_activation_fusion'] = args.bias_activation_fusion + + if args.init_method == "xavier_uniform": + kw_args['init_method'] = torch.nn.init.xavier_uniform_ + kw_args['scaled_init_method'] = torch.nn.init.xavier_uniform_ + + #TODO: untested handling of retro + # If using Retro, return Retro config. + retro_args = get_retro_args() + if retro_args: + kw_args['retro_preprocess'] = retro_args + return RetroConfig(**kw_args) + + # Return Transformer config. + return TransformerConfig(**kw_args) + +def load_yaml(yaml_path): + print(f"warning using experimental yaml arguments feature, argparse arguments will be ignored") + with open(yaml_path, "r") as f: + config = yaml.load(f,Loader=yaml.FullLoader) + # Convert to nested namespace + config_namespace = json.loads(json.dumps(config), object_hook=lambda item: SimpleNamespace(**item)) + # Add config location to namespace + config_namespace.yaml_cfg = yaml_path + return config_namespace + diff --git a/pretrain_bert.py b/pretrain_bert.py index 47db48c2be..e6b2f66896 100644 --- a/pretrain_bert.py +++ b/pretrain_bert.py @@ -8,18 +8,22 @@ import torch.nn.functional as F from megatron import get_args +from megatron import get_tokenizer from megatron import print_rank_0 from megatron import get_timers from megatron.core import tensor_parallel from megatron.core.enums import ModelType -from megatron.data.dataset_utils import build_train_valid_test_datasets import megatron.model from megatron.core.models.bert.bert_model import BertModel from megatron.training import pretrain from megatron.utils import average_losses_across_data_parallel_group from megatron.arguments import core_transformer_config_from_args from megatron.core.transformer.spec_utils import import_module -from megatron.core.models.bert.bert_layer_specs import bert_layer_with_transformer_engine_spec +from megatron.core.models.bert.bert_layer_specs import bert_layer_with_transformer_engine_spec, bert_layer_local_spec +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder +from megatron.core.datasets.bert_dataset import BERTMaskedWordPieceDataset, BERTMaskedWordPieceDatasetConfig +from megatron.core import mpu, tensor_parallel + def model_provider(pre_process=True, post_process=True): """Build the model.""" @@ -32,10 +36,15 @@ def model_provider(pre_process=True, post_process=True): if args.use_mcore_models: - if args.spec is not None: + + if args.spec is None: + transformer_layer_spec = bert_layer_with_transformer_engine_spec #default spec + elif args.spec[0] == 'local': + print_rank_0('Using Local spec for transformer layers') + transformer_layer_spec = bert_layer_local_spec + else : transformer_layer_spec = import_module(args.spec) - else: - transformer_layer_spec = bert_layer_with_transformer_engine_spec + model = BertModel( config=config, @@ -137,15 +146,41 @@ def train_valid_test_datasets_provider(train_val_test_num_samples): """Build train, valid, and test datasets.""" args = get_args() + tokenizer = get_tokenizer() + + config = BERTMaskedWordPieceDatasetConfig( + is_built_on_rank=lambda: mpu.get_tensor_model_parallel_rank() == 0, + random_seed=args.seed, + sequence_length=args.seq_length, + blend=args.data_path, + blend_per_split=[ + args.train_data_path, + args.valid_data_path, + args.test_data_path, + ], + split=args.split, + path_to_cache=args.data_cache_path, + mock=False, + tokenizer=tokenizer, + masking_probability=args.mask_prob, + short_sequence_probability=args.short_seq_prob, + masking_max_ngram=3, + masking_do_full_word=True, + masking_do_permutation=False, + masking_use_longer_ngrams=False, + masking_use_geometric_distribution=False, + classification_head=args.bert_binary_head, + ) + print_rank_0('> building train, validation, and test datasets ' 'for BERT ...') - train_ds, valid_ds, test_ds = build_train_valid_test_datasets( - data_prefix=args.data_path, - splits_string=args.split, - train_valid_test_num_samples=train_val_test_num_samples, - max_seq_length=args.seq_length, - seed=args.seed, - binary_head=args.bert_binary_head) + + train_ds, valid_ds, test_ds = BlendedMegatronDatasetBuilder( + BERTMaskedWordPieceDataset, + train_val_test_num_samples, + config, + ).build() + print_rank_0("> finished creating BERT datasets ...") return train_ds, valid_ds, test_ds @@ -153,6 +188,9 @@ def train_valid_test_datasets_provider(train_val_test_num_samples): if __name__ == "__main__": + # Temporary for transition to core datasets + train_valid_test_datasets_provider.is_distributed = True + pretrain(train_valid_test_datasets_provider, model_provider, ModelType.encoder_or_decoder, forward_step, args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'}) diff --git a/pretrain_gpt.py b/pretrain_gpt.py index 47b3e91881..8d424644f9 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -3,32 +3,31 @@ import os import torch -from torch import Tensor from functools import partial from typing import Union from megatron import get_args from megatron import print_rank_0 from megatron import get_timers from megatron import get_tokenizer -from megatron.core import mpu, tensor_parallel +from megatron.core import mpu from megatron.core.enums import ModelType from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder from megatron.core.datasets.gpt_dataset import GPTDatasetConfig -from megatron.core.datasets.gpt_dataset import GPTDataset +from megatron.core.datasets.gpt_dataset import MockGPTDataset, GPTDataset import megatron.model from megatron.core.models.gpt import GPTModel from megatron.training import pretrain from megatron.core.transformer.spec_utils import import_module from megatron.utils import ( - get_ltor_masks_and_position_ids, get_batch_on_this_cp_rank, + get_batch_on_this_tp_rank, average_losses_across_data_parallel_group ) +from megatron.tensor_logging import log_tensor, run_and_log_exception from megatron.arguments import core_transformer_config_from_args -from megatron.core.models.gpt.gpt_layer_specs import ( - get_gpt_layer_with_transformer_engine_spec, - gpt_layer_with_transformer_engine_spec_moe -) +from megatron.yaml_arguments import core_transformer_config_from_yaml +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec, get_gpt_layer_local_spec + def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megatron.model.GPTModel]: """Builds the model. @@ -46,16 +45,21 @@ def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megat args = get_args() print_rank_0('building GPT model ...') - config = core_transformer_config_from_args(get_args()) + # Experimental loading arguments from yaml + if args.yaml_cfg is not None: + config = core_transformer_config_from_yaml(args, "language_model") + else: + config = core_transformer_config_from_args(args) if args.use_mcore_models: if args.spec is not None: transformer_layer_spec = import_module(args.spec) + elif args.transformer_impl=="local": + transformer_layer_spec = get_gpt_layer_local_spec(args.num_experts, args.moe_grouped_gemm) + elif args.transformer_impl=="transformer_engine": + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec(args.num_experts, args.moe_grouped_gemm) else: - if args.num_experts is None: - transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec() - else: - transformer_layer_spec = gpt_layer_with_transformer_engine_spec_moe + raise NotImplementedError() model = GPTModel( config=config, @@ -68,7 +72,7 @@ def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megat parallel_output=True, share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights, position_embedding_type=args.position_embedding_type, - rotary_percent=args.rotary_percent + rotary_percent=args.rotary_percent, ) else: assert(args.context_parallel_size == 1), "Context parallelism is only supported with Megatron Core!" @@ -91,52 +95,21 @@ def get_batch(data_iterator): if (not mpu.is_pipeline_first_stage()) and (not mpu.is_pipeline_last_stage()): return None, None, None, None, None - args = get_args() - tokenizer = get_tokenizer() - - # Items and their type. - keys = ['text'] - datatype = torch.int64 + # get batches based on the TP rank you are on + batch = get_batch_on_this_tp_rank(data_iterator) - # Broadcast data. - if data_iterator is not None: - data = next(data_iterator) - else: - data = None - data_b = tensor_parallel.broadcast_data(keys, data, datatype) - - # Unpack. - tokens_ = data_b['text'].long() - labels = tokens_[:, 1:].contiguous() - tokens = tokens_[:, :-1].contiguous() - - # Get the masks and postition ids. - attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( - tokens, - tokenizer.eod, - args.reset_position_ids, - args.reset_attention_mask, - args.eod_mask_loss) - - batch = { - 'tokens': tokens, - 'labels': labels, - 'loss_mask': loss_mask, - 'attention_mask': attention_mask, - 'position_ids': position_ids - } # slice batch along sequence dimension for context parallelism batch = get_batch_on_this_cp_rank(batch) return batch.values() -def loss_func(loss_mask: Tensor, output_tensor: Tensor): +def loss_func(loss_mask: torch.Tensor, output_tensor: torch.Tensor): """Loss function. Args: - loss_mask (Tensor): Used to mask out some portions of the loss - output_tensor (Tensor): The tensor with the losses - """ + loss_mask (torch.Tensor): Used to mask out some portions of the loss + output_tensor (torch.Tensor): The tensor with the losses + """ args = get_args() losses = output_tensor.float() @@ -156,6 +129,9 @@ def loss_func(loss_mask: Tensor, output_tensor: Tensor): f'Device: {torch.cuda.current_device()}, node: {os.uname()[1]}' ) + args = get_args() + log_tensor(f"Global layer {args.num_layers+1} fw: Loss", loss, level=args.debug_layer_outputs) + # Reduce loss for logging. averaged_loss = average_losses_across_data_parallel_group([loss]) @@ -189,6 +165,8 @@ def is_dataset_built_on_rank(): def core_gpt_dataset_config_from_args(args): + tokenizer = get_tokenizer() + return GPTDatasetConfig( is_built_on_rank=is_dataset_built_on_rank, random_seed=args.seed, @@ -197,6 +175,12 @@ def core_gpt_dataset_config_from_args(args): blend_per_split=[args.train_data_path, args.valid_data_path, args.test_data_path], split=args.split, path_to_cache=args.data_cache_path, + mock=args.mock_data, + tokenizer=tokenizer, + reset_position_ids=args.reset_position_ids, + reset_attention_mask=args.reset_attention_mask, + eod_mask_loss=args.eod_mask_loss, + vocab_size=get_tokenizer().vocab_size, ) @@ -208,12 +192,19 @@ def train_valid_test_datasets_provider(train_val_test_num_samples): """ args = get_args() + config = core_gpt_dataset_config_from_args(args) + + if config.mock: + dataset_type = MockGPTDataset + else: + dataset_type = GPTDataset + print_rank_0("> building train, validation, and test datasets for GPT ...") train_ds, valid_ds, test_ds = BlendedMegatronDatasetBuilder( - GPTDataset, + dataset_type, train_val_test_num_samples, - core_gpt_dataset_config_from_args(args) + config ).build() print_rank_0("> finished creating GPT datasets ...") @@ -226,8 +217,9 @@ def train_valid_test_datasets_provider(train_val_test_num_samples): # Temporary for transition to core datasets train_valid_test_datasets_provider.is_distributed = True - pretrain(train_valid_test_datasets_provider, - model_provider, - ModelType.encoder_or_decoder, - forward_step, - args_defaults={'tokenizer_type': 'GPT2BPETokenizer'}) + with run_and_log_exception(): + pretrain(train_valid_test_datasets_provider, + model_provider, + ModelType.encoder_or_decoder, + forward_step, + args_defaults={'tokenizer_type': 'GPT2BPETokenizer'}) diff --git a/pretrain_t5.py b/pretrain_t5.py index 8ad2ca86d8..f6b93cabd5 100644 --- a/pretrain_t5.py +++ b/pretrain_t5.py @@ -5,25 +5,26 @@ from functools import partial import torch -from torch import Tensor from megatron import ( get_args, get_timers, + get_tokenizer, print_rank_0 ) -from megatron.core import tensor_parallel +from megatron.core import mpu, tensor_parallel from megatron.core.enums import ModelType -from megatron.data.dataset_utils import build_train_valid_test_datasets from megatron.core.models.T5 import T5Model from megatron.training import pretrain from megatron.utils import average_losses_across_data_parallel_group from megatron.arguments import core_transformer_config_from_args -from megatron.core.transformer.spec_utils import import_module +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder +from megatron.core.datasets.t5_dataset import T5MaskedWordPieceDataset, T5MaskedWordPieceDatasetConfig from megatron.core.models.T5.t5_spec import (get_t5_encoder_with_transformer_engine_block_spec, get_t5_decoder_with_transformer_engine_block_spec, get_t5_encoder_with_local_block_spec, get_t5_decoder_with_local_block_spec) +from megatron.model import T5Model as NonCoreT5Model """ Pipeline parallelism for T5 @@ -99,7 +100,7 @@ def model_provider(pre_process=True, post_process=True, add_encoder=True, add_de rotary_percent=args.rotary_percent ) else: - model = megatron.model.T5Model(config=config, + model = NonCoreT5Model(config=config, num_tokentypes=0, parallel_output=True, pre_process=pre_process, @@ -137,12 +138,12 @@ def get_batch(data_iterator): enc_mask, dec_mask, enc_dec_mask -def loss_func(loss_mask: Tensor, output_tensor: Tensor): +def loss_func(loss_mask: torch.Tensor, output_tensor: torch.Tensor): """Loss function. Args: - loss_mask (Tensor): Used to mask out some portions of the loss - output_tensor (Tensor): The tensor with the losses + loss_mask (torch.Tensor): Used to mask out some portions of the loss + output_tensor (torch.Tensor): The tensor with the losses """ lm_loss_ = output_tensor.float() lm_loss = torch.sum( @@ -190,16 +191,41 @@ def train_valid_test_datasets_provider(train_val_test_num_samples: int): """ args = get_args() + tokenizer = get_tokenizer() + + config = T5MaskedWordPieceDatasetConfig( + is_built_on_rank=lambda: mpu.get_tensor_model_parallel_rank() == 0, + random_seed=args.seed, + sequence_length=args.encoder_seq_length, + sequence_length_decoder=args.decoder_seq_length, + blend=args.data_path, + blend_per_split=[ + args.train_data_path, + args.valid_data_path, + args.test_data_path, + ], + split=args.split, + path_to_cache=args.data_cache_path, + mock=False, + tokenizer=tokenizer, + masking_probability=args.mask_prob, + short_sequence_probability=args.short_seq_prob, + masking_max_ngram=10, + masking_do_full_word=True, + masking_do_permutation=False, + masking_use_longer_ngrams=False, + masking_use_geometric_distribution=True, + ) + print_rank_0('> building train, validation, and test datasets ' 'for T5 ...') - train_ds, valid_ds, test_ds = build_train_valid_test_datasets( - data_prefix=args.data_path, - splits_string=args.split, - train_valid_test_num_samples=train_val_test_num_samples, - max_seq_length=args.encoder_seq_length, - max_seq_length_dec=args.decoder_seq_length, - seed=args.seed, - dataset_type='t5') + + train_ds, valid_ds, test_ds = BlendedMegatronDatasetBuilder( + T5MaskedWordPieceDataset, + train_val_test_num_samples, + config, + ).build() + print_rank_0("> finished creating T5 datasets ...") return train_ds, valid_ds, test_ds @@ -207,5 +233,8 @@ def train_valid_test_datasets_provider(train_val_test_num_samples: int): if __name__ == "__main__": + # Temporary for transition to core datasets + train_valid_test_datasets_provider.is_distributed = True + pretrain(train_valid_test_datasets_provider, model_provider, ModelType.encoder_and_decoder, forward_step, args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'}) \ No newline at end of file diff --git a/tests/functional_tests/jet_recipes/MR-bert.yaml b/tests/functional_tests/jet_recipes/MR-bert.yaml new file mode 100644 index 0000000000..7fb5baf561 --- /dev/null +++ b/tests/functional_tests/jet_recipes/MR-bert.yaml @@ -0,0 +1,60 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: bert + variant: 345m + build: mcore-pyt + scope: merge-request + nodes: 1 + gpus: 8 + platforms: [dgx_a100] + steps: 50 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + args_meta: null + micro_batch_size: 4 # MBS + batch_size: 128 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1200 + artifacts: {/workspace/data/bert_data: text/the_pile/bert_shard00} + checkpoint_resume_test: 0 + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh \ + DATA_PATH=/workspace/data/bert_data/my-bert_00_text_sentence \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + CHECKPOINT_RESUME_TEST={checkpoint_resume_test} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + # MCore + - {tp_size: [2], pp_size: [2]} + - {tp_size: [2], pp_size: [2], extra_args: ['"--spec local"'], args_meta: ["local_spec"]} + # Non-MCore + - {use_mcore: [False], tp_size: [2], pp_size: [2]} + - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [2]} + # Checkpoint resume + - {checkpoint_resume_test: [1], scope: [merge-request-resume], steps: [100], use_mcore: [False], tp_size: [1], pp_size: [2]} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + args_meta: args diff --git a/tests/functional_tests/jet_recipes/MR-gpt.yaml b/tests/functional_tests/jet_recipes/MR-gpt.yaml new file mode 100644 index 0000000000..81ac77fc28 --- /dev/null +++ b/tests/functional_tests/jet_recipes/MR-gpt.yaml @@ -0,0 +1,82 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: gpt3 + variant: 345m + build: mcore-pyt + scope: merge-request + nodes: 1 + gpus: 8 + platforms: [dgx_a100] + steps: 50 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + args_meta: null + micro_batch_size: 4 # MBS + batch_size: 32 # GBS, JET schema requires 'batch_size' + moe_grouped_gemm: 0 + precision: bf16 + time_limit: 1200 + artifacts: {/workspace/data/gpt3_data: text/the_pile/shard00} + checkpoint_resume_test: 0 + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh \ + DATA_PATH=/workspace/data/gpt3_data/my-gpt3_00_text_document \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + VOCAB_FILE=/workspace/data/gpt3_data/bpe/vocab.json \ + MERGE_FILE=/workspace/data/gpt3_data/bpe/merges.txt \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + MOE_GROUPED_GEMM={moe_grouped_gemm} \ + CHECKPOINT_RESUME_TEST={checkpoint_resume_test} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + # MCore + - {tp_size: [2], pp_size: [2]} + - {tp_size: [1], pp_size: [4], vp_size: [1]} + - {tp_size: [1], pp_size: [2], extra_args: ['"--position-embedding-type rope"'], args_meta: ["rope_embeddings"]} + - {tp_size: [1], pp_size: [2], extra_args: ['"--position-embedding-type rope --rotary-interleaved --no-rope-fusion"'], args_meta: ["rope_embeddings_interleaved_no_fusion"]} + - {tp_size: [1], pp_size: [4], extra_args: ["--swiglu"], args_meta: ["swiglu"]} + - {tp_size: [1], pp_size: [4], extra_args: ["--disable-bias-linear"], args_meta: ["disable_bias_linear"]} + - {tp_size: [1], pp_size: [4], extra_args: ["--untie-embeddings-and-output-weights"], args_meta: ["untie_embeddings_and_outputs"]} + - {tp_size: [1], pp_size: [4], extra_args: ["--sequence-parallel"], args_meta: ["sequence_parallel"]} + - {tp_size: [1], pp_size: [1], extra_args: ['"--recompute-granularity full --recompute-method uniform --recompute-num-layers 1"'], args_meta: ["uniform_full_recompute"]} + # - {tp_size: [2], pp_size: [1,2], extra_args: ['"--context-parallel-size 2 --sequence-parallel --hidden-dropout 0.0 --attention-dropout 0.0"']} # TODO: need updated container with TE > 1.0.0 + - {tp_size: [2], pp_size: [1], extra_args: ['"--sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_8experts2parallel"]} + - {tp_size: [2], pp_size: [1], extra_args: ['"--sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --use-distributed-optimizer --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_8experts2parallel_dist_optimizer"]} + - {tp_size: [2], pp_size: [1], extra_args: ['"--moe-grouped-gemm --disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], moe_grouped_gemm: [1], args_meta: ["te_8experts2parallel_groupedGEMM"]} + - {tp_size: [2], pp_size: [1], extra_args: ['"--disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type aux_loss --moe-router-topk 2 --moe-aux-loss-coeff 1e-2"'], moe_grouped_gemm: [1], args_meta: ["te_8experts2parallel_top2router"]} + - {tp_size: [1], pp_size: [1], extra_args: ["--use-distributed-optimizer"], args_meta: ["dist_optimizer"]} + - {tp_size: [4], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"'], args_meta: ["dist_optimizer_overlap_grad_reduce"]} + - {tp_size: [4], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather"'], args_meta: ["dist_optimizer_overlap_grad_reduce_param_gather"]} + - {tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"'], args_meta: ["dist_optimizer_overlap_grad_reduce"]} + - {tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --untie-embeddings-and-output-weights"'], args_meta: ["dist_optimizer_overlap_grad_reduce_untied"]} + - {tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather"'], args_meta: ["dist_optimizer_overlap_grad_reduce_param_gather"]} + # Non-MCore + - {use_mcore: [False], use_te: [False, True], tp_size: [2], pp_size: [2]} + - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [1]} + # Checkpoint resume + - {checkpoint_resume_test: [1], scope: [merge-request-resume], steps: [100], use_mcore: [False], tp_size: [1], pp_size: [2]} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + args_meta: args diff --git a/tests/functional_tests/jet_recipes/MR-t5.yaml b/tests/functional_tests/jet_recipes/MR-t5.yaml new file mode 100644 index 0000000000..adf22b987c --- /dev/null +++ b/tests/functional_tests/jet_recipes/MR-t5.yaml @@ -0,0 +1,51 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: t5 + variant: 220m + build: mcore-pyt + scope: merge-request + nodes: 1 + gpus: 8 + platforms: [dgx_a100] + steps: 100 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + args_meta: null + micro_batch_size: 4 # MBS + batch_size: 32 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1800 + artifacts: {/workspace/data/t5_data: text/the_pile/t5_shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh \ + DATA_PATH="/workspace/data/t5_data/my-t5_00_text_document" \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + - {use_te: [True], tp_size: [1], pp_size: [1], vp_size: [1]} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + args_meta: args diff --git a/tests/functional_tests/jet_recipes/build-pyt.yaml b/tests/functional_tests/jet_recipes/build-pyt.yaml new file mode 100644 index 0000000000..5bc86217bc --- /dev/null +++ b/tests/functional_tests/jet_recipes/build-pyt.yaml @@ -0,0 +1,21 @@ +type: build +format_version: 1 +maintainers: [maanug] +spec: + name: pyt + platforms: [linux/amd64] + source: + image: nvcr.io/nvidia/pytorch:23.04-py3 + +--- +type: build +format_version: 1 +maintainers: [maanug] +spec: + name: mcore-pyt + platforms: [linux/amd64] + parent: pyt + source: + repo: https://gitlab-master.nvidia.com/ADLR/megatron-lm.git + ref: main + dockerfile: Dockerfile.ci diff --git a/tests/functional_tests/jet_recipes/monthly-t5.yaml b/tests/functional_tests/jet_recipes/monthly-t5.yaml new file mode 100644 index 0000000000..d99bf92b9c --- /dev/null +++ b/tests/functional_tests/jet_recipes/monthly-t5.yaml @@ -0,0 +1,57 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: t5 + variant: 220m + build: mcore-pyt + scope: monthly + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 100 + use_te: False + use_mcore: True + vp_size: 1 + extra_args: null + args_meta: null + micro_batch_size: 4 # MBS + batch_size: 32 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1800 + artifacts: {/workspace/data/t5_data: text/the_pile/t5_shard00} + checkpoint_resume_test: 0 + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh \ + DATA_PATH="/workspace/data/t5_data/my-t5_00_text_document" \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + CHECKPOINT_RESUME_TEST={checkpoint_resume_test} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + - { tp_size: [1,2], pp_size: [1], vp_size: [1] } + - {use_te: [True], tp_size: [2], pp_size: [1], vp_size: [1]} + - {use_te: [True], tp_size: [2], pp_size: [1], vp_size: [1], extra_args: ["--sequence-parallel"], args_meta: ["sequence_parallel"]} + # Checkpoint resume + - {checkpoint_resume_test: [1], scope: [monthly-resume], use_te: [False, True], tp_size: [1], pp_size: [1], vp_size: [1]} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + args_meta: args diff --git a/tests/functional_tests/jet_recipes/nightly-bert.yaml b/tests/functional_tests/jet_recipes/nightly-bert.yaml new file mode 100644 index 0000000000..6641d7926a --- /dev/null +++ b/tests/functional_tests/jet_recipes/nightly-bert.yaml @@ -0,0 +1,53 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: bert + variant: 345m + build: mcore-pyt + scope: nightly + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 50 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + args_meta: null + micro_batch_size: 4 # MBS + batch_size: 128 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1200 + artifacts: {/workspace/data/bert_data: text/the_pile/bert_shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh \ + DATA_PATH=/workspace/data/bert_data/my-bert_00_text_sentence \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + - {tp_size: [1], pp_size: [4], vp_size: [2]} + - {use_mcore: [True, False], tp_size: [4], pp_size: [1]} + - {use_mcore: [True, False], tp_size: [1], pp_size: [2]} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + args_meta: args diff --git a/tests/functional_tests/jet_recipes/nightly-gpt.yaml b/tests/functional_tests/jet_recipes/nightly-gpt.yaml new file mode 100644 index 0000000000..b00de0da54 --- /dev/null +++ b/tests/functional_tests/jet_recipes/nightly-gpt.yaml @@ -0,0 +1,65 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: gpt3 + variant: 345m + build: mcore-pyt + scope: nightly + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 50 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + args_meta: null + micro_batch_size: 4 # MBS + batch_size: 32 # GBS, JET schema requires 'batch_size' + moe_grouped_gemm: 0 + precision: bf16 + time_limit: 1200 + artifacts: {/workspace/data/gpt3_data: text/the_pile/shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh \ + DATA_PATH=/workspace/data/gpt3_data/my-gpt3_00_text_document \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + VOCAB_FILE=/workspace/data/gpt3_data/bpe/vocab.json \ + MERGE_FILE=/workspace/data/gpt3_data/bpe/merges.txt \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + MOE_GROUPED_GEMM={moe_grouped_gemm} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + - {use_mcore: [True, False], tp_size: [4], pp_size: [1]} + - {use_mcore: [True, False], tp_size: [1], pp_size: [2,4]} + - {tp_size: [2], pp_size: [2], extra_args: ['"--num-experts 2 --sequence-parallel --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_2experts"]} + - {tp_size: [2], pp_size: [2], extra_args: ['"--sequence-parallel --num-experts 4 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_4experts2parallel"]} +# Non-MCore + - {use_mcore: [False], tp_size: [1,4], pp_size: [1], extra_args: ["--overlap-grad-reduce"], args_meta: ["overlap_grad_reduce"]} + - {use_mcore: [False], tp_size: [1], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"'], args_meta: ["dist_optimizer_overlap_grad_reduce"]} + - {use_mcore: [False], tp_size: [1], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather"'], args_meta: ["dist_optimizer_overlap_grad_reduce_param_gather"]} + - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [null, 1], extra_args: ["--overlap-grad-reduce"], args_meta: ["overlap_grad_reduce"]} + - {use_mcore: [False], tp_size: [2], pp_size: [2], extra_args: ["--overlap-grad-reduce"], args_meta: ["overlap_grad_reduce"]} + - {use_mcore: [False], tp_size: [2], pp_size: [2], extra_args: ['"--sequence-parallel --num-experts 4 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["4experts"]} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + args_meta: args diff --git a/tests/functional_tests/python_test_utils/jet_test_pipeline.py b/tests/functional_tests/python_test_utils/jet_test_pipeline.py index 6bf2a483e3..27d00df49f 100644 --- a/tests/functional_tests/python_test_utils/jet_test_pipeline.py +++ b/tests/functional_tests/python_test_utils/jet_test_pipeline.py @@ -5,23 +5,38 @@ from jet.logs.queries import JETLogsQuery, Field -def select_asset(assets, prefix): - for asset in assets: - if asset['s_name'].startswith(prefix): - return asset['s_url'] +def select_asset(result_obj, prefix): + if result_obj['obj_ci']['s_job_status'] != "skipped": + assets = result_obj.get('nested_assets', None) + if assets is not None: + for asset in assets: + if asset['s_name'].startswith(prefix): + return asset['s_url'] + return 'not found' -def query_results(ephemeral_branch): +def query_results(triggering_pipeline_id): service = JETInstance().log_service() query = ( JETLogsQuery() - .filter(Field('obj_workloads_registry.s_commit_ref') == ephemeral_branch) + .filter(Field('obj_ci.obj_upstream.l_pipeline_id') == triggering_pipeline_id) .filter(Field('obj_workload.s_type') == 'recipe') - .select('l_exit_code', 'nested_assets', 'obj_workload.s_key', 'obj_workload.obj_spec') - .orderby('-ts_created') # decreasing (most recent in case of timestamp) + .select('l_exit_code', 'nested_assets', 'obj_workload.s_key', 'obj_workload.obj_spec', 'obj_ci', 'ts_created') + .orderby('ts_created') # increasing (least recent in case of timestamp) ) return service.query(query, flatten=False) +def dedupe_results(results): + deduped = {} + for result in results: + key = result['obj_workload']['s_key'] + if key not in deduped: + deduped[key] = result + else: + if result['ts_created'] > deduped[key]['ts_created']: + deduped[key] = result + + return deduped.values() def check_exitcodes(results): from prettytable import PrettyTable @@ -30,49 +45,49 @@ def check_exitcodes(results): log_urls = [] names = [] for result in results: - exit_codes.append(result['l_exit_code']) - log_urls.append(select_asset(result['nested_assets'], 'output_script.log')) - name = result['obj_workload']['s_key'].strip('recipe/') - remove_substr = result['obj_workload']['obj_spec']['s_build'] + \ - '_' + result['obj_workload']['obj_spec']['s_scope'] - names.append(''.join(name.split(remove_substr))) + exit_codes.append(result.get('l_exit_code', -1)) + log_urls.append(select_asset(result, 'output_script-0.log')) + names.append(result['obj_workload']['s_key'].lstrip('recipe/')) table = PrettyTable() table.add_column("Job Key", names) table.add_column("Exit Code", exit_codes) table.add_column("Log URL", log_urls) exit_codes_good = [ec == 0 for ec in exit_codes] - if not all(exit_codes_good): + if exit_codes_good == []: + raise Exception("Can't find any jobs, something went wrong.\n" + table.get_string()) + if exit_codes_good == [] or not all(exit_codes_good): raise Exception("Some jobs failed to complete successfully\n" + table.get_string()) else: print(table) print("All jobs completed successfully!") -def check_baselines(results): +def _download_log(url, save_dir): import requests - import pytest - from tempfile import TemporaryDirectory + if not os.path.exists(save_dir): + os.mkdir(save_dir) + filepath = os.path.join(save_dir, url.split('/')[-1]) + + r = requests.get(url) + if r.ok: + with open(filepath, mode='wb') as f: + f.write(r.content) + else: + print(f"WARNING: Unable to download file at {url}. Received status {r.status_code}") - def download_log(url, save_dir): - if not os.path.exists(save_dir): - os.mkdir(save_dir) - filepath = os.path.join(save_dir, url.split('/')[-1]) - r = requests.get(url) - if r.ok: - with open(filepath, mode='wb') as f: - f.write(r.content) - else: - print(f"WARNING: Unable to download file at {url}. Received status {r.status_code}") +def check_baselines(results): + import pytest + from tempfile import TemporaryDirectory with TemporaryDirectory() as tmpdir: # Download TB event logs for result in results: - event_log_url = select_asset(result['nested_assets'], 'events.out.tfevents') + event_log_url = select_asset(result, 'events.out.tfevents') target_dir = result['obj_workload']['s_key'].lstrip('recipe/') target_dir = os.path.join(tmpdir, target_dir) - download_log(event_log_url, target_dir) + _download_log(event_log_url, target_dir) # Run pytest on logs os.environ["EXPECTED_METRICS_DIR"] = "tests/functional_tests/test_results/jet" @@ -81,15 +96,33 @@ def download_log(url, save_dir): ['tests/functional_tests/python_test_utils/multitest_ci_pipeline.py::TestBulkCIPipeline'])) +def fetch_metrics_files(results, save_dir): + for result in results: + metrics_url = select_asset(result, 'results.json') + if metrics_url is not None: + cfg = result['obj_workload']['s_key'].lstrip('recipe/') + target_dir = os.path.join(save_dir, cfg) + _download_log(metrics_url, target_dir) + + with open(os.path.join(target_dir, 'results.json'), 'r') as full_results_file: + with open(os.path.join(target_dir, cfg+'.json'), 'w') as golden_file: + golden_file.write(full_results_file.readlines()[-1].strip()) + + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( - 'eph_branch', help="JET Workloads registry ephemeral branch created by 'jet-generate' job in this pipeline") - parser.add_argument('--test', required=True, choices=[ + 'pipeline_id', help="Pipeline ID for pipeline in MLM repo that triggers the JET CI") + parser.add_argument('--test', required=False, choices=[ 'exit', 'metrics'], help="Check exit status of jobs with 'exit' or perf and loss with 'metrics'") + parser.add_argument('--download_metrics_dir', help="Directory in which to save the results.json files from jobs. Will not save files if not set. Set this if you want to update golden values.") args = parser.parse_args() - results = query_results(args.eph_branch) + results = query_results(args.pipeline_id) + results = dedupe_results(results) + + if args.download_metrics_dir: + fetch_metrics_files(results, args.download_metrics_dir) if args.test == 'exit': check_exitcodes(results) diff --git a/tests/functional_tests/python_test_utils/test_resume_checkpoint_pipeline.py b/tests/functional_tests/python_test_utils/test_resume_checkpoint_pipeline.py index 41b7a0e7d8..417297eaff 100644 --- a/tests/functional_tests/python_test_utils/test_resume_checkpoint_pipeline.py +++ b/tests/functional_tests/python_test_utils/test_resume_checkpoint_pipeline.py @@ -1,11 +1,16 @@ import os + os.environ['OPENBLAS_NUM_THREADS'] = '1' -import sys +import glob import json import shutil -import glob +import sys + +import pytest from tensorboard.backend.event_processing import event_accumulator +from tests.functional_tests.python_test_utils.common import TypeOfTest + LOGS_DIR = os.getenv('LOGS_DIR') STEP_INTERVAL = 5 @@ -36,10 +41,11 @@ def collect_train_test_metrics(logs_dir, index): class TestCIPipeline: + margin_loss = 0.05 train_metrics_100 = collect_train_test_metrics(LOGS_DIR, 0) train_metrics_50_to_100 = collect_train_test_metrics(LOGS_DIR, 1) - def _test_helper(self, loss_type): + def _test_helper(self, loss_type, test_type): expected = self.train_metrics_100[loss_type] assert len(expected) == 100 // STEP_INTERVAL, \ f"Train metrics from first run (before checkpoint load) should have {100 // STEP_INTERVAL} elements" @@ -48,14 +54,18 @@ def _test_helper(self, loss_type): assert len(actual) == 50 // STEP_INTERVAL, \ f"Train metrics from second run (after checkpoint load) should have {50 // STEP_INTERVAL} elements" print('actual : ' + str(actual)) - # NOTE : Doing this way because in gpt3 model when I run from 0 - 100 directly, it produces 1 extra element - # i.e expected is [10.84266, 10.89696, 10.90542, 10.87498, 10.86265, 10.83608, 10.64368, 10.62319, 10.53908, 10.25005, 10.20907, 9.96542, 9.96802, 9.92436, 9.79086, 9.26718, 9.61784, 9.19018, 9.45986, 9.62168, 9.73772, 8.85732, 9.43185, 9.27912, 9.6832, 9.5127, 9.5419, 9.02549, 8.55077, 8.91355, 8.83375, 9.17722, 9.22436, 9.19436, 9.11323, 9.09711, 9.04421, 9.36795] - # actual is : [9.73772, 8.85732, 9.43185, 9.27912, 9.6832, 9.5127, 9.5419, 9.02549, 8.55077, 8.91355, 8.83375, 9.17722, 9.22435, 9.19435, 9.11322, 9.09711, 9.04422] - # That extra element in expected is causing some issues. So doing it this way. Need to figure out whats happening - start_idx_expected = expected.index(actual[0]) # First element of actual + start_idx_expected = len(expected) - len(actual) + print('start_idx_expected:', start_idx_expected) # Here we will just be comparing values of actual and second half (50-100) of expected - for i in range(len(actual)): - assert actual[i] == expected[start_idx_expected + i], f"The value at step {i} should be {expected[start_idx_expected + i]} but it is {actual[i]}." + for i, (expected_val, actual_val) in enumerate(zip(expected[start_idx_expected:], actual)): + step = start_idx_expected + i * STEP_INTERVAL + if test_type == TypeOfTest.APPROX: + assert actual_val == pytest.approx(expected=expected_val, rel=self.margin_loss), f"The loss at step {step} should be approximately {expected_val} but it is {actual_val}." + else: + assert actual_val == expected_val, f"The value at step {step} should be {expected_val} but it is {actual_val}." def test_lm_loss_deterministic(self): - self._test_helper("lm loss") + self._test_helper("lm loss", TypeOfTest.DETERMINISTIC) + + def test_lm_loss_approx(self): + self._test_helper("lm loss", TypeOfTest.APPROX) diff --git a/tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh b/tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh index e7c8c3c88f..d454932abb 100755 --- a/tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh +++ b/tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh @@ -44,11 +44,11 @@ export GOTO_NUM_THREADS=2 export OPENBLAS_NUM_THREADS=2 # step 5 : CREATING A COPY OF THE SBATCH SCRIPT THAT WILL BE RUN FOR DEBUGGING -envsubst '$BASE_DIR $PYTORCH_IMAGE $BUILD_DIR $DATA_DIR $MBS $GBS $ADDITIONAL_PARAMS $USE_TE $TP_SIZE $PP_SIZE $VP_SIZE $NUM_NODES $MAX_STEPS $USE_CORE' <$BUILD_DIR/tests/functional_tests/test_scripts/$RUN_MODEL/sbatch_${RUN_MODEL}_distributed_test.sh > $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/debug/sbatch_${RUN_MODEL}_distributed_test.sh +envsubst '$BASE_DIR $PYTORCH_IMAGE $BUILD_DIR $DATA_DIR $MBS $GBS $MOE_GROUPED_GEMM $ADDITIONAL_PARAMS $USE_TE $TP_SIZE $PP_SIZE $VP_SIZE $NUM_NODES $MAX_STEPS $USE_CORE' <$BUILD_DIR/tests/functional_tests/test_scripts/$RUN_MODEL/sbatch_${RUN_MODEL}_distributed_test.sh > $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/debug/sbatch_${RUN_MODEL}_distributed_test.sh # step 6 : SUBMITTING THE JOB -sbatch_submission=`sbatch -t $TIME_LIMIT $BUILD_DIR/tests/functional_tests/test_scripts/$RUN_MODEL/sbatch_${RUN_MODEL}_distributed_test.sh --export=BASE_DIR,BUILD_DIR,DATA_DIR,USE_TE,TP_SIZE,PP_SIZE,VP_SIZE,NUM_NODES,MAX_STEPS,MBS,GBS,PYTORCH_IMAGE,ADDITIONAL_PARAMS` +sbatch_submission=`sbatch -t $TIME_LIMIT $BUILD_DIR/tests/functional_tests/test_scripts/$RUN_MODEL/sbatch_${RUN_MODEL}_distributed_test.sh --export=BASE_DIR,BUILD_DIR,DATA_DIR,USE_TE,TP_SIZE,PP_SIZE,VP_SIZE,NUM_NODES,MAX_STEPS,MBS,GBS,MOE_GROUPED_GEMM,PYTORCH_IMAGE,ADDITIONAL_PARAMS` export SLURM_JOBID=$(echo $sbatch_submission| grep 'Submitted batch job' | awk '{ print $4 }'); # step 7 : WAITING FOR JOB TO COMPLETE AND PRINTING JOB INFO diff --git a/tests/functional_tests/test_results/bert/bert_tp2_pp2_1nodes_50steps_core_enabled_local_spec.json b/tests/functional_tests/test_results/bert/bert_tp2_pp2_1nodes_50steps_core_enabled_local_spec.json new file mode 100644 index 0000000000..60d32e4938 --- /dev/null +++ b/tests/functional_tests/test_results/bert/bert_tp2_pp2_1nodes_50steps_core_enabled_local_spec.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.49849, 10.48909, 10.48383, 10.45052, 10.4396, 10.34793, 10.13229, 10.03818, 9.86253, 9.67165]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2210.0, 2505.0, 2330.0, 2235.0, 2290.0, 2400.0, 2866.0, 3249.0, 3522.0, 2958.0]}, "iteration_timing_avg": 0.6923926470588235} diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json b/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json new file mode 100644 index 0000000000..4ceb167669 --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.87174, 10.89545, 10.88847, 10.88533, 10.893, 10.84895, 10.70048, 10.64124, 10.53839, 10.3107]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1238.0, 1318.0, 1648.0, 1423.0, 1535.0, 1350.0, 1271.0]}, "iteration_timing_avg": 0.06580882352941175} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp4_interleaved_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json b/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp4_interleaved_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json new file mode 100644 index 0000000000..3ad3d83d39 --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp4_interleaved_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.7951, 10.84939, 10.87411, 10.83459, 10.82865, 10.78676, 10.56492, 10.57063, 10.48544, 10.19547]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [2586.0, 2828.0, 2105.0, 2725.0, 2711.0, 2428.0, 2946.0]}, "iteration_timing_avg": 0.12188999999999997} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json index 4f0233160c..103f0ef6cd 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80055, 10.86883, 10.86422, 10.80142, 10.71115, 10.63973, 10.2006, 10.30993, 10.21958, 9.92011]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16139.0, 19489.0, 19350.0, 18806.0, 16997.0, 18210.0, 15507.0, 18409.0, 19032.0, 19709.0]}, "iteration_timing_avg": 0.2878829411764705} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79896, 10.8601, 10.87152, 10.79856, 10.71624, 10.6355, 10.19683, 10.30917, 10.21632, 9.90782]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16152.0, 19202.0, 19645.0, 18594.0, 17375.0, 17768.0, 15576.0, 17888.0, 18387.0, 18810.0]}, "iteration_timing_avg": 0.2777326470588235} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json new file mode 100644 index 0000000000..93557798a7 --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80961, 10.86075, 10.86755, 10.80331, 10.71906, 10.64746, 10.21053, 10.32037, 10.22013, 9.92389]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16604.0, 19509.0, 19801.0, 18644.0, 17084.0, 17721.0, 14980.0, 17754.0, 18357.0, 18375.0]}, "iteration_timing_avg": 0.18734941176470588} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json new file mode 100644 index 0000000000..defdb50cec --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80682, 10.86725, 10.87968, 10.79328, 10.66888, 10.57819, 10.06276, 10.18504, 10.1014, 9.76741]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62567.0, 65584.0, 65506.0, 65118.0, 64028.0, 64819.0, 63611.0, 65997.0, 66843.0, 67788.0]}, "iteration_timing_avg": 0.26514323529411765} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_4experts.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_4experts.json index 022dee643b..4bdd9b671d 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_4experts.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_4experts.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79931, 10.855, 10.86219, 10.8371, 10.83378, 10.8008, 10.60169, 10.6114, 10.53828, 10.26949]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [8398.0, 8514.0, 7788.0, 8985.0, 9107.0, 8981.0, 9279.0]}, "iteration_timing_avg": 0.37232617647058813} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80342, 10.85864, 10.86188, 10.83807, 10.83268, 10.80489, 10.60813, 10.61632, 10.53669, 10.27118]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [8302.0, 7865.0, 7784.0, 8919.0, 9202.0, 9007.0, 9274.0]}, "iteration_timing_avg": 0.3891070588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json new file mode 100644 index 0000000000..8aaab492e2 --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.93293, 10.93657, 10.88786, 10.86127, 10.71506, 10.61068, 10.06701, 10.17618, 10.07536, 9.74958]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [599.0, 655.0, 664.0, 679.0, 596.0, 643.0, 577.0, 776.0, 817.0, 805.0]}, "iteration_timing_avg": 0.3355429411764707} diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json index f007a01b52..8617eca761 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79116, 10.83954, 10.81173, 10.75983, 10.65557, 10.56982, 10.08268, 10.21338, 10.10761, 9.8191]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2917.0, 3465.0, 3576.0, 3347.0, 3187.0, 3215.0, 2817.0, 3455.0, 3838.0, 3755.0]}, "iteration_timing_avg": 0.23038411764705882} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79674, 10.84347, 10.81547, 10.76604, 10.65416, 10.56322, 10.08548, 10.21617, 10.1139, 9.8322]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2912.0, 3584.0, 3414.0, 3357.0, 3298.0, 3173.0, 2816.0, 3211.0, 3817.0, 3728.0]}, "iteration_timing_avg": 0.2862067647058823} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json index fbf3695098..98fc4c9355 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82661, 10.87444, 10.85653, 10.80493, 10.70751, 10.63374, 10.15545, 10.27641, 10.18349, 9.87672]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [6999.0, 8493.0, 8974.0, 8653.0, 7725.0, 8045.0, 7067.0, 8642.0, 8950.0, 9562.0]}, "iteration_timing_avg": 0.24783852941176465} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82194, 10.86461, 10.85816, 10.80566, 10.71345, 10.63249, 10.15902, 10.27938, 10.18516, 9.88286]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [7126.0, 8754.0, 8834.0, 8614.0, 7854.0, 8202.0, 7007.0, 8641.0, 9234.0, 9655.0]}, "iteration_timing_avg": 0.30157323529411767} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp4_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json b/tests/functional_tests/test_results/gpt3/gpt3_tp4_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json new file mode 100644 index 0000000000..b12f79670b --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp4_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85921, 10.8797, 10.87381, 10.88658, 10.88912, 10.84826, 10.68571, 10.62947, 10.54289, 10.26918]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2288.0, 2326.0, 2454.0, 2011.0, 2111.0, 2436.0, 2446.0]}, "iteration_timing_avg": 0.20696529411764708} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..b1917e084a --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.50685, 10.49816, 10.47982, 10.48566, 10.49533, 10.46662, 10.42394, 10.30694, 10.15979, 9.96957, 9.87618, 9.75265, 9.63628, 9.54661, 9.49972, 9.35969, 9.33181, 9.26258, 9.26438, 9.21491]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [18772.0, 19035.0, 22296.0, 18412.0, 20887.0, 23006.0, 22439.0, 26762.0, 24562.0, 25459.0, 17508.0, 32488.0, 28332.0, 20718.0, 37258.0, 30914.0, 26407.0]}, "iteration_timing_avg": 0.394903880597015} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..021bbc8a4b --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.50685, 10.49817, 10.47983, 10.48565, 10.49536, 10.46664, 10.42393, 10.30694, 10.15981, 9.96956, 9.87619, 9.75265, 9.63628, 9.54659, 9.49972, 9.35968, 9.33181, 9.26259, 9.26438, 9.21492]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [18721.0, 19240.0, 22286.0, 18535.0, 20820.0, 23201.0, 22673.0, 26963.0, 24453.0, 25622.0, 17093.0, 32342.0, 27958.0, 20877.0, 37551.0, 30594.0, 26468.0]}, "iteration_timing_avg": 0.37912223880597} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json new file mode 100644 index 0000000000..39bb4585d2 --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.54837, 10.54636, 10.55694, 10.54151, 10.53088, 10.48503, 10.46275, 10.31499, 10.17122, 9.97326]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [22606.0, 20619.0, 26292.0, 23607.0, 21666.0, 21672.0, 23313.0]}, "iteration_timing_avg": 0.7795826470588233} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json new file mode 100644 index 0000000000..9afb0ee0df --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.49849, 10.48909, 10.48383, 10.45052, 10.4396, 10.34793, 10.13229, 10.03818, 9.86253, 9.67165]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2210.0, 2505.0, 2330.0, 2235.0, 2290.0, 2400.0, 2866.0, 3249.0, 3522.0, 2958.0]}, "iteration_timing_avg": 0.7140176470588235} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..5a553ebb81 --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.44877, 10.43852, 10.44018, 10.44113, 10.45623, 10.44143, 10.39045, 10.25681, 10.13301, 9.95744]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [27844.0, 20265.0, 28481.0, 26139.0, 24126.0, 21087.0, 21026.0]}, "iteration_timing_avg": 0.7523635294117648} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json new file mode 100644 index 0000000000..d411d8c1a7 --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.49838, 10.48932, 10.4839, 10.45043, 10.43933, 10.34765, 10.1322, 10.03809, 9.86242, 9.67174]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2309.0, 2556.0, 2286.0, 2336.0, 2345.0, 2428.0, 2974.0, 3161.0, 3625.0, 2918.0]}, "iteration_timing_avg": 0.8110379411764704} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..bf335a35d0 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.51554, 10.51032, 10.52063, 10.52247, 10.51818, 10.5092, 10.43695, 10.29864, 10.16893, 9.98643, 9.9146, 9.78576, 9.67452, 9.55758, 9.50388, 9.35033, 9.34043, 9.27911, 9.27768, 9.20722]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [21174.0, 21615.0, 24124.0, 18698.0, 23551.0, 18803.0, 19627.0, 27198.0, 25001.0, 25778.0, 15220.0, 35074.0, 26410.0, 22075.0, 37860.0, 28583.0, 23027.0]}, "iteration_timing_avg": 0.24888507462686574} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json new file mode 100644 index 0000000000..a8886517f5 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.42108, 10.43552, 10.43934, 10.43349, 10.42826, 10.42499, 10.37549, 10.2337, 10.1091, 9.93972]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19496.0, 22201.0, 23780.0, 21779.0, 22701.0, 20018.0, 22409.0]}, "iteration_timing_avg": 0.5799538235294118} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json new file mode 100644 index 0000000000..163496d61e --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.47903, 10.47213, 10.46828, 10.4513, 10.4294, 10.35818, 10.16921, 10.09081, 9.918, 9.74324]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2380.0, 1691.0, 2420.0, 2698.0, 2183.0, 2873.0, 2112.0, 3007.0, 1784.0, 2883.0]}, "iteration_timing_avg": 0.48770147058823515} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..e3733adeb7 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.46209, 10.46586, 10.47036, 10.48285, 10.46953, 10.4551, 10.4144, 10.27757, 10.15408, 9.98652]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19468.0, 20366.0, 23078.0, 23209.0, 20501.0, 21956.0, 23051.0]}, "iteration_timing_avg": 0.47122588235294105} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json new file mode 100644 index 0000000000..2936e747d2 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.4791, 10.47202, 10.4682, 10.45128, 10.42934, 10.35805, 10.16903, 10.0907, 9.91791, 9.7432]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2250.0, 1699.0, 2376.0, 2808.0, 2117.0, 2783.0, 2170.0, 2896.0, 1835.0, 2867.0]}, "iteration_timing_avg": 0.6237708823529412} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..583d5ed358 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.79022, 10.84034, 10.85603, 10.82319, 10.83355, 10.78173, 10.59641, 10.58331, 10.49545, 10.22799, 10.19949, 9.94816, 9.94997, 9.91997, 9.79865, 9.25223, 9.61408, 9.19153, 9.46281, 9.62472]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2130.0, 2531.0, 2368.0, 2204.0, 2141.0, 2068.0, 2772.0, 2715.0, 2831.0, 2384.0, 2870.0, 2893.0, 3396.0, 3064.0, 3136.0, 2916.0, 3917.0]}, "iteration_timing_avg": 0.06181014925373134} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json new file mode 100644 index 0000000000..c7c5e0bab9 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.89293, 10.8995, 10.87875, 10.855, 10.73496, 10.63535, 10.1566, 10.24211, 10.15574, 9.82117]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1653.0, 1779.0, 1911.0, 1928.0, 1880.0, 1881.0, 1618.0, 1983.0, 2375.0, 2352.0]}, "iteration_timing_avg": 0.05425676470588235} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json new file mode 100644 index 0000000000..8abb3869de --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.89293, 10.89952, 10.87875, 10.85504, 10.73491, 10.63533, 10.15658, 10.2421, 10.15573, 9.82116]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1608.0, 1717.0, 1868.0, 1920.0, 1891.0, 1766.0, 1630.0, 1955.0, 2416.0, 2390.0]}, "iteration_timing_avg": 0.04569411764705883} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json new file mode 100644 index 0000000000..b68287b6eb --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.89293, 10.8995, 10.87875, 10.855, 10.73496, 10.63535, 10.1566, 10.24211, 10.15574, 9.82117]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1653.0, 1779.0, 1911.0, 1928.0, 1880.0, 1881.0, 1618.0, 1983.0, 2375.0, 2352.0]}, "iteration_timing_avg": 0.06516882352941178} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json new file mode 100644 index 0000000000..a4f609529b --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85699, 10.89518, 10.87243, 10.82432, 10.68786, 10.58313, 10.08482, 10.18068, 10.10597, 9.75607]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1858.0, 1946.0, 2096.0, 1900.0, 2011.0, 1803.0, 1737.0, 2092.0, 2335.0, 2201.0]}, "iteration_timing_avg": 0.06518264705882353} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json new file mode 100644 index 0000000000..345d7fcc5f --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.858, 10.89563, 10.87285, 10.8249, 10.68816, 10.58405, 10.08513, 10.18125, 10.1058, 9.75605]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1864.0, 2004.0, 2086.0, 1978.0, 1975.0, 1889.0, 1656.0, 2059.0, 2227.0, 2306.0]}, "iteration_timing_avg": 0.08140323529411765} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json new file mode 100644 index 0000000000..2dcc249220 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85699, 10.89518, 10.87243, 10.82432, 10.68786, 10.58313, 10.08482, 10.18068, 10.10597, 9.75607]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1858.0, 1946.0, 2096.0, 1900.0, 2011.0, 1803.0, 1737.0, 2092.0, 2335.0, 2201.0]}, "iteration_timing_avg": 0.07560441176470588} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json new file mode 100644 index 0000000000..ac62b7581a --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85535, 10.89042, 10.88142, 10.82973, 10.70858, 10.61199, 10.1184, 10.22418, 10.13702, 9.80781]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1629.0, 1692.0, 1882.0, 1929.0, 1936.0, 1669.0, 1603.0, 1903.0, 2128.0, 2278.0]}, "iteration_timing_avg": 0.07373852941176468} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json new file mode 100644 index 0000000000..cfde369603 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85534, 10.88947, 10.8806, 10.8283, 10.70687, 10.60921, 10.11533, 10.22106, 10.13408, 9.80477]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1669.0, 1816.0, 1897.0, 1831.0, 1824.0, 1649.0, 1484.0, 1877.0, 2140.0, 2202.0]}, "iteration_timing_avg": 0.07589941176470587} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json new file mode 100644 index 0000000000..42d4cd72ba --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78152, 10.8477, 10.85991, 10.80229, 10.72398, 10.64556, 10.25979, 10.36953, 10.30726, 9.969]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2441.0, 2962.0, 2986.0, 2963.0, 2701.0, 2657.0, 2300.0, 2619.0, 2655.0, 2484.0]}, "iteration_timing_avg": 0.07880588235294116} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json new file mode 100644 index 0000000000..2800068b0b --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.91778, 10.93688, 10.92414, 10.85264, 10.74695, 10.66448, 10.16759, 10.27157, 10.17695, 9.86116]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22728092.0, 23020904.0, 22500632.0, 22830582.0, 22739828.0, 22547742.0, 22955712.0, 22588520.0, 22658932.0, 22885368.0]}, "iteration_timing_avg": 0.07554499999999999} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json new file mode 100644 index 0000000000..018a6ecd39 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85535, 10.89042, 10.88142, 10.82973, 10.70858, 10.61199, 10.1184, 10.22418, 10.13702, 9.80781]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1629.0, 1692.0, 1882.0, 1929.0, 1936.0, 1669.0, 1603.0, 1903.0, 2128.0, 2278.0]}, "iteration_timing_avg": 0.0864920588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..23a753821c --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88916, 10.82637, 10.70814, 10.61007, 10.11963, 10.22997, 10.15772, 9.83339]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1801.0, 1862.0, 1827.0, 1711.0, 1708.0, 1954.0, 2328.0, 2335.0]}, "iteration_timing_avg": 0.09368529411764706} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..4113dfc61d --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.92853, 10.937, 10.92943, 10.87789, 10.75133, 10.67044, 10.17418, 10.27899, 10.1883, 9.87023]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22727964.0, 23020600.0, 22500812.0, 22830580.0, 22739790.0, 22548252.0, 22955676.0, 22589500.0, 22659010.0, 22884684.0]}, "iteration_timing_avg": 0.085995} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..262b2c579e --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88916, 10.82637, 10.70814, 10.61007, 10.11963, 10.22997, 10.15772, 9.83339]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1801.0, 1862.0, 1827.0, 1711.0, 1708.0, 1954.0, 2328.0, 2335.0]}, "iteration_timing_avg": 0.08397176470588234} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json new file mode 100644 index 0000000000..e4c1262364 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85534, 10.88947, 10.8806, 10.8283, 10.70687, 10.60921, 10.11533, 10.22106, 10.13408, 9.80477]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1669.0, 1816.0, 1897.0, 1831.0, 1824.0, 1649.0, 1484.0, 1877.0, 2140.0, 2202.0]}, "iteration_timing_avg": 0.0912420588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json new file mode 100644 index 0000000000..6775db704b --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78152, 10.8477, 10.85991, 10.80229, 10.72398, 10.64556, 10.25979, 10.36953, 10.30726, 9.969]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2441.0, 2962.0, 2986.0, 2963.0, 2701.0, 2657.0, 2300.0, 2619.0, 2655.0, 2484.0]}, "iteration_timing_avg": 0.09503617647058824} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json new file mode 100644 index 0000000000..cc1244e378 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.91778, 10.93688, 10.92414, 10.85264, 10.74695, 10.66448, 10.16759, 10.27157, 10.17695, 9.86116]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22728092.0, 23020904.0, 22500632.0, 22830582.0, 22739828.0, 22547742.0, 22955712.0, 22588520.0, 22658932.0, 22885368.0]}, "iteration_timing_avg": 0.09069441176470588} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..61d841b3d7 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80628, 10.6169, 10.59573, 10.50423, 10.22238]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2348.0, 2524.0, 2517.0, 2205.0, 2198.0, 2558.0, 2398.0]}, "iteration_timing_avg": 0.07500764705882351} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..a99307432e --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88918, 10.82635, 10.70816, 10.61006, 10.11963, 10.22999, 10.15774, 9.83337]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1846.0, 1868.0, 1856.0, 1652.0, 1638.0, 1903.0, 2315.0, 2381.0]}, "iteration_timing_avg": 0.08791117647058823} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json new file mode 100644 index 0000000000..8c98a7e5ab --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79006, 10.84111, 10.85509, 10.77861, 10.65335, 10.5612, 10.0453, 10.17548, 10.08263, 9.73342]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62799.0, 65700.0, 66095.0, 65614.0, 64292.0, 65219.0, 63857.0, 66058.0, 67089.0, 67822.0]}, "iteration_timing_avg": 0.30804088235294114} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json new file mode 100644 index 0000000000..04eb336aac --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79066, 10.83474, 10.85443, 10.77921, 10.69997, 10.61398, 10.15871, 10.27978, 10.19497, 9.86981]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [30950.0, 37387.0, 37772.0, 36424.0, 33230.0, 34567.0, 30132.0, 34960.0, 36224.0, 37476.0]}, "iteration_timing_avg": 0.20243735294117646} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json new file mode 100644 index 0000000000..f464650d3b --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80426, 10.84849, 10.86146, 10.81012, 10.72201, 10.64589, 10.2092, 10.32252, 10.23908, 9.92465]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16350.0, 19608.0, 19689.0, 19043.0, 17602.0, 17956.0, 15632.0, 18288.0, 18606.0, 19277.0]}, "iteration_timing_avg": 0.13919470588235297} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json new file mode 100644 index 0000000000..761c53aecb --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78922, 10.8416, 10.85552, 10.77966, 10.65528, 10.56398, 10.04054, 10.17415, 10.08488, 9.73406]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [13541.0, 16797.0, 17213.0, 16564.0, 15382.0, 15817.0, 14915.0, 17089.0, 17939.0, 18387.0]}, "iteration_timing_avg": 0.21506794117647057} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json new file mode 100644 index 0000000000..f58d4c4ceb --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79066, 10.83467, 10.85342, 10.77851, 10.70005, 10.61316, 10.15957, 10.27971, 10.19511, 9.87028]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16055.0, 19166.0, 19161.0, 18797.0, 17405.0, 17721.0, 15678.0, 18223.0, 18580.0, 19742.0]}, "iteration_timing_avg": 0.20099058823529406} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..a465e34711 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85706, 10.8832, 10.88511, 10.87562, 10.8708, 10.83108, 10.65065, 10.63723, 10.53201, 10.25681]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2479.0, 2534.0, 2786.0, 2310.0, 2385.0, 2586.0, 2472.0]}, "iteration_timing_avg": 0.09594764705882353} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json new file mode 100644 index 0000000000..c218a0ad40 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85899, 10.88286, 10.87687, 10.82429, 10.69664, 10.60784, 10.11662, 10.2347, 10.14673, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1627.0, 1874.0, 1894.0, 1862.0, 1901.0, 1649.0, 1553.0, 1949.0, 2281.0, 2225.0]}, "iteration_timing_avg": 0.10429970588235296} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json new file mode 100644 index 0000000000..79db29b177 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86873, 10.891, 10.89716, 10.84022, 10.70435, 10.61599, 10.11661, 10.23183, 10.14875, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1619.0, 1839.0, 1712.0, 1853.0, 1810.0, 1682.0, 1567.0, 1997.0, 2186.0, 2376.0]}, "iteration_timing_avg": 0.1169185294117647} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json new file mode 100644 index 0000000000..baf2c64a93 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84429, 10.86285, 10.86177, 10.81156, 10.69316, 10.61799, 10.16498, 10.25035, 10.15231, 9.83669]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1932.0, 2302.0, 2138.0, 2193.0, 2254.0, 2189.0, 1844.0, 2313.0, 2538.0, 2473.0]}, "iteration_timing_avg": 0.16636205882352936} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json new file mode 100644 index 0000000000..5db54e4e03 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84429, 10.86285, 10.86177, 10.81156, 10.69316, 10.61799, 10.16498, 10.25035, 10.15231, 9.83669]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1932.0, 2302.0, 2138.0, 2193.0, 2254.0, 2189.0, 1844.0, 2313.0, 2538.0, 2473.0]}, "iteration_timing_avg": 0.1574994117647059} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..a042df661f --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83801, 10.8696, 10.87494, 10.85972, 10.85916, 10.81678, 10.65633, 10.6236, 10.52854, 10.29768]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1304.0, 1403.0, 1377.0, 1380.0, 1272.0, 1176.0, 1272.0]}, "iteration_timing_avg": 0.04439352941176471} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json new file mode 100644 index 0000000000..35f8847c88 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83801, 10.8696, 10.87494, 10.85972, 10.85916, 10.81678, 10.65633, 10.6236, 10.52854, 10.29768]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1227.0, 1343.0, 1547.0, 1357.0, 1571.0, 1230.0, 1219.0]}, "iteration_timing_avg": 0.03908823529411766} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..d1b26c3e5a --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79022, 10.84034, 10.85603, 10.82319, 10.83355, 10.78173, 10.59641, 10.58331, 10.49545, 10.22799]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2130.0, 2531.0, 2368.0, 2204.0, 2141.0, 2068.0, 2772.0]}, "iteration_timing_avg": 0.05724441176470588} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json new file mode 100644 index 0000000000..49c0ec8442 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85892, 10.88861, 10.86994, 10.82442, 10.69985, 10.60452, 10.11465, 10.21649, 10.13247, 9.80078]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1630.0, 1743.0, 1840.0, 1746.0, 1857.0, 1749.0, 1522.0, 1957.0, 2244.0, 2275.0]}, "iteration_timing_avg": 0.05806264705882354} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..33edc35038 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.76735, 10.82061, 10.85176, 10.80762, 10.80235, 10.75942, 10.55108, 10.55646, 10.48053, 10.18986]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2463.0, 2560.0, 2625.0, 2343.0, 2301.0, 2659.0, 2515.0]}, "iteration_timing_avg": 0.07604500000000002} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..9caed9a476 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80628, 10.6169, 10.59573, 10.50423, 10.22238]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2348.0, 2524.0, 2517.0, 2205.0, 2198.0, 2558.0, 2398.0]}, "iteration_timing_avg": 0.07640823529411767} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json new file mode 100644 index 0000000000..c9fed16590 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.76735, 10.82061, 10.85176, 10.80762, 10.80235, 10.75942, 10.55108, 10.55646, 10.48053, 10.18986]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2463.0, 2560.0, 2625.0, 2343.0, 2301.0, 2659.0, 2515.0]}, "iteration_timing_avg": 0.07574117647058824} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json new file mode 100644 index 0000000000..f78097878b --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85534, 10.88947, 10.8806, 10.8283, 10.70687, 10.60921, 10.11533, 10.22106, 10.13408, 9.80477]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1669.0, 1816.0, 1897.0, 1831.0, 1824.0, 1649.0, 1484.0, 1877.0, 2140.0, 2202.0]}, "iteration_timing_avg": 0.07627117647058825} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json new file mode 100644 index 0000000000..198829bc86 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78716, 10.84699, 10.85759, 10.78461, 10.67832, 10.57601, 10.12353, 10.23947, 10.14691, 9.8453]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2854.0, 3564.0, 3434.0, 3325.0, 3414.0, 3098.0, 2890.0, 3447.0, 3763.0, 3722.0]}, "iteration_timing_avg": 0.1694220588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json new file mode 100644 index 0000000000..e9f91c3218 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83396, 10.86879, 10.87134, 10.85907, 10.8533, 10.82064, 10.63379, 10.6223, 10.54684, 10.28702]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [8033.0, 8627.0, 7962.0, 8736.0, 9022.0, 8598.0, 9184.0]}, "iteration_timing_avg": 0.24976352941176466} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..66db39da61 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85706, 10.8832, 10.88511, 10.87562, 10.8708, 10.83108, 10.65065, 10.63723, 10.53201, 10.25681]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2479.0, 2534.0, 2786.0, 2310.0, 2385.0, 2586.0, 2472.0]}, "iteration_timing_avg": 0.08829235294117646} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json new file mode 100644 index 0000000000..8406f71c56 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82019, 10.86146, 10.84723, 10.80694, 10.71538, 10.62576, 10.19501, 10.29544, 10.20202, 9.89846]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [7232.0, 8819.0, 8924.0, 8402.0, 7411.0, 8004.0, 6922.0, 8255.0, 8761.0, 8825.0]}, "iteration_timing_avg": 0.18263705882352937} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..241acc5584 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84879, 10.88953, 10.88082, 10.88243, 10.86947, 10.8354, 10.64786, 10.63862, 10.52242, 10.23812]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2301.0, 2328.0, 2442.0, 1993.0, 2210.0, 2464.0, 2376.0]}, "iteration_timing_avg": 0.12472558823529412} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json new file mode 100644 index 0000000000..cf0bfe8b21 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84879, 10.88953, 10.88082, 10.88243, 10.86947, 10.8354, 10.64786, 10.63862, 10.52242, 10.23812]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2301.0, 2328.0, 2442.0, 1993.0, 2210.0, 2464.0, 2376.0]}, "iteration_timing_avg": 0.1177205882352941} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json new file mode 100644 index 0000000000..65ce4c00d4 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84429, 10.86285, 10.86177, 10.81154, 10.69313, 10.61794, 10.16497, 10.25034, 10.15227, 9.83669]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1932.0, 2302.0, 2138.0, 2132.0, 2358.0, 2122.0, 1902.0, 2296.0, 2565.0, 2589.0]}, "iteration_timing_avg": 0.13276323529411763} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json new file mode 100644 index 0000000000..5b613dea44 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.34848, 9.45337, 8.89369, 8.56467, 8.28131, 8.12832, 7.82238, 7.55462, 7.42172, 7.28716, 7.32811, 7.22045, 7.11648, 7.03859, 6.87728, 6.94356, 6.94705, 7.02828, 6.71597, 6.9486]}, "num-zeros": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [43307.0, 40999.0, 44043.0, 41749.0, 44811.0, 44001.0, 41304.0, 42490.0, 44698.0, 43956.0, 41137.0, 43230.0, 39726.0, 45427.0, 43358.0, 43930.0, 45426.0, 45701.0, 46301.0, 44734.0]}, "iteration_timing_avg": 0.12808164179104478} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-.json deleted file mode 100644 index 33dc6ccf25..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-.json +++ /dev/null @@ -1 +0,0 @@ - {"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83721, 10.87648, 10.85329, 10.79637, 10.67873, 10.60491, 10.12635, 10.22253, 10.13979, 9.82348]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1589.0, 1913.0, 1924.0, 1876.0, 2005.0, 1749.0, 1631.0, 1981.0, 2346.0, 2380.0]}, "iteration_timing_avg": 0.07807617647058823} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2.json deleted file mode 100644 index dbab21195c..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 36, "step_interval": 5, "values": [10.83273, 10.86849, 10.89112, 10.80713, 10.68491, 10.61253, 10.09319, 10.21393]}, "num-zeros": {"start_step": 0, "end_step": 36, "step_interval": 5, "values": [1551.0, 1809.0, 1799.0, 1862.0, 1872.0, 1643.0, 1596.0, 1880.0]}, "iteration_timing_avg": 0.09391500000000001} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-.json deleted file mode 100644 index 0e1b686347..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-.json +++ /dev/null @@ -1 +0,0 @@ - {"lm loss": {"start_step": 0, "end_step": 49, "step_interval": 5, "values": [10.84608, 10.87634, 10.90424, 10.81754, 10.67579, 10.60283, 10.06667, 10.19261, 10.11413, 9.7617]}, "num-zeros": {"start_step": 0, "end_step": 49, "step_interval": 5, "values": [1709.0, 2192.0, 2059.0, 1960.0, 2164.0, 1846.0, 1614.0, 2074.0, 2176.0, 2249.0]}, "iteration_timing_avg": 0.10411636363636363} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4.json deleted file mode 100644 index 41ec145eb9..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 29, "step_interval": 5, "values": [10.79373, 10.86651, 10.89091, 10.78164, 10.66101, 10.58089]}, "num-zeros": {"start_step": 0, "end_step": 29, "step_interval": 5, "values": [1670.0, 1864.0, 1826.0, 1965.0, 1861.0, 1605.0]}, "iteration_timing_avg": 0.12559400000000004} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear.json deleted file mode 100644 index 47f6b7f2d7..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 38, "step_interval": 5, "values": [10.79374, 10.86745, 10.89179, 10.78304, 10.66262, 10.58362, 10.08688, 10.19342]}, "num-zeros": {"start_step": 0, "end_step": 38, "step_interval": 5, "values": [1567.0, 1904.0, 1912.0, 1931.0, 1799.0, 1722.0, 1591.0, 1950.0]}, "iteration_timing_avg": 0.12253038461538461} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel.json deleted file mode 100644 index 6f18af2e36..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 42, "step_interval": 5, "values": [10.79373, 10.86651, 10.89091, 10.78164, 10.66101, 10.58089, 10.08413, 10.19034, 10.13461]}, "num-zeros": {"start_step": 0, "end_step": 42, "step_interval": 5, "values": [1670.0, 1864.0, 1826.0, 1965.0, 1861.0, 1605.0, 1609.0, 1931.0, 2343.0]}, "iteration_timing_avg": 0.12682214285714286} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu.json deleted file mode 100644 index 610578a37a..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 29, "step_interval": 5, "values": [10.73353, 10.81676, 10.83941, 10.7586, 10.70146, 10.62786]}, "num-zeros": {"start_step": 0, "end_step": 28, "step_interval": 5, "values": [2536.0, 2988.0, 2925.0, 2895.0, 2617.0, 2603.0]}, "iteration_timing_avg": 0.1284436842105263} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights.json deleted file mode 100644 index c707a0a903..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 28, "step_interval": 5, "values": [10.8968, 10.90735, 10.91688, 10.84693, 10.70699, 10.63243]}, "num-zeros": {"start_step": 0, "end_step": 28, "step_interval": 5, "values": [22727844.0, 23021590.0, 22500488.0, 22830910.0, 22739472.0, 22546526.0]}, "iteration_timing_avg": 0.12624631578947368} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2.json deleted file mode 100644 index 3b63e1c3d0..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 40, "step_interval": 5, "values": [10.92392, 10.93645, 10.89657, 10.86919, 10.74782, 10.658, 10.15864, 10.24906]}, "num-zeros": {"start_step": 0, "end_step": 40, "step_interval": 5, "values": [1735.0, 1861.0, 2111.0, 1844.0, 1762.0, 1858.0, 1554.0, 2031.0]}, "iteration_timing_avg": 0.14889185185185186} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1.json deleted file mode 100644 index 74da2480d5..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86174, 10.88685, 10.87663, 10.83061, 10.71359, 10.60783, 10.13039, 10.23076, 10.15871, 9.83396]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1747.0, 2204.0, 2061.0, 2108.0, 2163.0, 1914.0, 1682.0, 2267.0, 2474.0, 2569.0]}, "iteration_timing_avg": 0.20121235294117648} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..cb29680bfe --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.84009, 10.89053, 10.90905, 10.87933, 10.86561, 10.83752, 10.64582, 10.62396, 10.53554, 10.25187, 10.20873, 9.96714, 9.96605, 9.92368, 9.79178, 9.26741, 9.61926, 9.18974, 9.46019, 9.62277]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2118.0, 2371.0, 2498.0, 2225.0, 2122.0, 2090.0, 2315.0, 2784.0, 2701.0, 2324.0, 2745.0, 2871.0, 3475.0, 3095.0, 3249.0, 3160.0, 3877.0]}, "iteration_timing_avg": 0.09977388059701493} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..a7699776dd --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.84008, 10.89053, 10.90905, 10.87934, 10.86562, 10.83752, 10.64582, 10.62396, 10.53554, 10.25187, 10.20874, 9.96714, 9.96605, 9.92367, 9.79178, 9.26741, 9.61926, 9.18973, 9.46019, 9.62277]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2078.0, 2328.0, 2420.0, 2256.0, 2180.0, 2078.0, 2313.0, 2857.0, 2696.0, 2315.0, 2912.0, 2942.0, 3493.0, 3045.0, 3229.0, 3100.0, 3718.0]}, "iteration_timing_avg": 0.10716462686567164} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json new file mode 100644 index 0000000000..c92bb929d1 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.87174, 10.89545, 10.88847, 10.88533, 10.893, 10.84895, 10.70048, 10.64124, 10.53839, 10.3107]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1238.0, 1318.0, 1648.0, 1423.0, 1535.0, 1350.0, 1271.0]}, "iteration_timing_avg": 0.06317382352941177} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json new file mode 100644 index 0000000000..633847bc15 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83721, 10.87648, 10.85327, 10.79634, 10.67874, 10.60491, 10.12636, 10.22252, 10.13977, 9.82346]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1640.0, 1873.0, 1930.0, 1910.0, 1936.0, 1807.0, 1630.0, 1962.0, 2317.0, 2314.0]}, "iteration_timing_avg": 0.06904588235294119} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json new file mode 100644 index 0000000000..2b29a51a27 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83721, 10.87648, 10.85329, 10.79637, 10.67873, 10.60491, 10.12635, 10.22253, 10.13979, 9.82348]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1589.0, 1913.0, 1924.0, 1876.0, 2005.0, 1749.0, 1631.0, 1981.0, 2346.0, 2380.0]}, "iteration_timing_avg": 0.09164500000000002} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json new file mode 100644 index 0000000000..4357d8badf --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84407, 10.87551, 10.90356, 10.81577, 10.67451, 10.60208, 10.06584, 10.19215, 10.11381, 9.76133]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1717.0, 2136.0, 2046.0, 1923.0, 2052.0, 1910.0, 1717.0, 2008.0, 2269.0, 2231.0]}, "iteration_timing_avg": 0.11052176470588236} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json new file mode 100644 index 0000000000..b4db7bde9b --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84608, 10.87634, 10.90424, 10.81754, 10.67579, 10.60283, 10.06667, 10.19261, 10.11413, 9.7617]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1709.0, 2192.0, 2059.0, 1960.0, 2164.0, 1846.0, 1614.0, 2074.0, 2176.0, 2249.0]}, "iteration_timing_avg": 0.11051617647058823} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json new file mode 100644 index 0000000000..eedf2baa8b --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79374, 10.86745, 10.89179, 10.78304, 10.66262, 10.58362, 10.08688, 10.19342, 10.13764, 9.81438]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1567.0, 1904.0, 1912.0, 1931.0, 1799.0, 1722.0, 1591.0, 1950.0, 2428.0, 2378.0]}, "iteration_timing_avg": 0.12243558823529416} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..6362aacb7c --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.7951, 10.84939, 10.87411, 10.83459, 10.82865, 10.78676, 10.56492, 10.57063, 10.48544, 10.19547]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [2586.0, 2828.0, 2105.0, 2725.0, 2711.0, 2428.0, 2946.0]}, "iteration_timing_avg": 0.12451529411764707} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..cd7044ddda --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82096, 10.87269, 10.88192, 10.79677, 10.68633, 10.59654, 10.09782, 10.21295, 10.13917, 9.80682]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1501.0, 1749.0, 1845.0, 1786.0, 1912.0, 1741.0, 1567.0, 1927.0, 2280.0, 2405.0]}, "iteration_timing_avg": 0.12873676470588236} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..d8ea1345ac --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.9362, 10.93543, 10.9456, 10.87817, 10.75688, 10.66385, 10.16947, 10.27156, 10.19469, 9.85867]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22727572.0, 23021722.0, 22500652.0, 22830476.0, 22739252.0, 22547046.0, 22954704.0, 22589164.0, 22659710.0, 22883876.0]}, "iteration_timing_avg": 0.12799705882352944} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..11b747f2d3 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.7951, 10.84939, 10.87411, 10.83459, 10.82865, 10.78676, 10.56492, 10.57063, 10.48544, 10.19547]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [2586.0, 2828.0, 2105.0, 2725.0, 2711.0, 2428.0, 2946.0]}, "iteration_timing_avg": 0.11798852941176469} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..c9e2aa6032 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82096, 10.87269, 10.88192, 10.79677, 10.68633, 10.59654, 10.09782, 10.21295, 10.13917, 9.80682]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1501.0, 1749.0, 1845.0, 1786.0, 1912.0, 1741.0, 1567.0, 1927.0, 2280.0, 2405.0]}, "iteration_timing_avg": 0.12168999999999999} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json new file mode 100644 index 0000000000..ac3c1f57f2 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79373, 10.86651, 10.89091, 10.78164, 10.66101, 10.58089, 10.08413, 10.19034, 10.13461, 9.81138]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1670.0, 1864.0, 1826.0, 1965.0, 1861.0, 1605.0, 1609.0, 1931.0, 2343.0, 2347.0]}, "iteration_timing_avg": 0.12348235294117646} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json new file mode 100644 index 0000000000..a2d5ed7952 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.73353, 10.81676, 10.83941, 10.7586, 10.70146, 10.62786, 10.20836, 10.36754, 10.26496, 9.94346]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2536.0, 2988.0, 2925.0, 2895.0, 2617.0, 2603.0, 2325.0, 2704.0, 2592.0, 2406.0]}, "iteration_timing_avg": 0.12725500000000006} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json new file mode 100644 index 0000000000..e294c75c0f --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8968, 10.90735, 10.91688, 10.84693, 10.70699, 10.63243, 10.15516, 10.26078, 10.15949, 9.83311]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22727844.0, 23021590.0, 22500488.0, 22830910.0, 22739472.0, 22546526.0, 22955764.0, 22588942.0, 22658932.0, 22884080.0]}, "iteration_timing_avg": 0.1246464705882353} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..c051895065 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.7951, 10.84939, 10.87411, 10.83459, 10.82865, 10.78676, 10.56492, 10.57063, 10.48545, 10.19548]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [2561.0, 2771.0, 2141.0, 2656.0, 2737.0, 2472.0, 2991.0]}, "iteration_timing_avg": 0.12433176470588231} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..3da54b9c18 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82096, 10.87269, 10.88192, 10.79677, 10.68633, 10.59654, 10.09776, 10.21294, 10.13909, 9.80679]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1501.0, 1749.0, 1794.0, 1829.0, 1913.0, 1793.0, 1585.0, 1815.0, 2296.0, 2266.0]}, "iteration_timing_avg": 0.12502588235294115} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json new file mode 100644 index 0000000000..1818cb41de --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79896, 10.8594, 10.87122, 10.79881, 10.71717, 10.6354, 10.19743, 10.30887, 10.2168, 9.90751]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [30665.0, 37001.0, 37644.0, 35953.0, 33382.0, 35191.0, 30525.0, 35253.0, 36653.0, 37931.0]}, "iteration_timing_avg": 0.2890776470588235} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json new file mode 100644 index 0000000000..f45f321721 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80961, 10.86075, 10.86755, 10.80331, 10.71906, 10.64746, 10.21053, 10.32037, 10.22013, 9.92387]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16604.0, 19509.0, 19801.0, 18644.0, 17084.0, 17721.0, 14980.0, 17754.0, 18357.0, 18520.0]}, "iteration_timing_avg": 0.19267441176470584} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json new file mode 100644 index 0000000000..ade8011335 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80682, 10.86708, 10.88001, 10.79339, 10.66648, 10.57654, 10.05866, 10.18464, 10.10235, 9.76286]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [13270.0, 16578.0, 17037.0, 16415.0, 15006.0, 15965.0, 14350.0, 17035.0, 17408.0, 18260.0]}, "iteration_timing_avg": 0.3051714705882352} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json new file mode 100644 index 0000000000..8f14311c51 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79896, 10.8601, 10.87152, 10.79856, 10.71624, 10.6355, 10.19683, 10.30917, 10.21632, 9.90782]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16152.0, 19202.0, 19645.0, 18594.0, 17375.0, 17768.0, 15576.0, 17888.0, 18387.0, 18810.0]}, "iteration_timing_avg": 0.29991823529411765} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..457294168c --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85543, 10.89355, 10.87608, 10.87365, 10.88042, 10.84182, 10.67177, 10.62853, 10.52511, 10.2523]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2472.0, 2462.0, 2480.0, 2235.0, 2268.0, 2619.0, 2429.0]}, "iteration_timing_avg": 0.14061323529411762} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json new file mode 100644 index 0000000000..ddd7132a35 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85632, 10.88791, 10.86527, 10.81439, 10.69842, 10.61079, 10.109, 10.21405, 10.12865, 9.80275]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1714.0, 1877.0, 1928.0, 1863.0, 1960.0, 1646.0, 1648.0, 2023.0, 2318.0, 2333.0]}, "iteration_timing_avg": 0.14203264705882354} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json new file mode 100644 index 0000000000..e5c571448d --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.92392, 10.93645, 10.89657, 10.86919, 10.74782, 10.658, 10.15864, 10.24906, 10.15088, 9.83933]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1735.0, 1861.0, 2111.0, 1844.0, 1762.0, 1858.0, 1554.0, 2031.0, 2309.0, 2225.0]}, "iteration_timing_avg": 0.15396205882352942} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json new file mode 100644 index 0000000000..5ead3b3cae --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85921, 10.8797, 10.87381, 10.88658, 10.88912, 10.84826, 10.68571, 10.62947, 10.54289, 10.26918]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2288.0, 2326.0, 2454.0, 2011.0, 2111.0, 2436.0, 2446.0]}, "iteration_timing_avg": 0.2084426470588236} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json new file mode 100644 index 0000000000..ef3ee44978 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86174, 10.88685, 10.8766, 10.83063, 10.71362, 10.60782, 10.13037, 10.2308, 10.15865, 9.83394]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1747.0, 2204.0, 2128.0, 2098.0, 2033.0, 1943.0, 1761.0, 2152.0, 2427.0, 2590.0]}, "iteration_timing_avg": 0.22043823529411763} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..9c4d0796ed --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85921, 10.8797, 10.87381, 10.88658, 10.88912, 10.84826, 10.68571, 10.62947, 10.54289, 10.26918]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2288.0, 2326.0, 2454.0, 2011.0, 2111.0, 2436.0, 2446.0]}, "iteration_timing_avg": 0.20483676470588236} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json new file mode 100644 index 0000000000..447f6efaf8 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86174, 10.88685, 10.8766, 10.83063, 10.71362, 10.60782, 10.13037, 10.2308, 10.15865, 9.83394]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1747.0, 2204.0, 2128.0, 2098.0, 2033.0, 1943.0, 1761.0, 2152.0, 2427.0, 2590.0]}, "iteration_timing_avg": 0.2256223529411765} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json b/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json new file mode 100644 index 0000000000..e0b067d9f2 --- /dev/null +++ b/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.33235, 9.41913, 8.85861, 8.55638, 8.28439, 8.11201, 7.83824, 7.54562, 7.41436, 7.31027, 7.34805, 7.22802, 7.12902, 7.06142, 6.91137, 6.96105, 6.96531, 7.04832, 6.7364, 6.97504]}, "num-zeros": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [43300.0, 40964.0, 44028.0, 41739.0, 44792.0, 43949.0, 41300.0, 42529.0, 44700.0, 43963.0, 41174.0, 43285.0, 39762.0, 45371.0, 43317.0, 43929.0, 45404.0, 45705.0, 46310.0, 44692.0]}, "iteration_timing_avg": 0.17640776119402987} \ No newline at end of file diff --git a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh deleted file mode 100755 index 48dccc39d6..0000000000 --- a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh +++ /dev/null @@ -1,104 +0,0 @@ -#! /bin/bash - -echo "------ARGUMENTS LIST --------" -for ARGUMENT in "$@" -do - KEY=$(echo $ARGUMENT | cut -f1 -d=) - - KEY_LENGTH=${#KEY} - VALUE="${ARGUMENT:$KEY_LENGTH+1}" - - export "$KEY"="$VALUE" - echo "$KEY=$VALUE" -done -echo "---------------------------------" - -GPUS_PER_NODE=8 -# Change for multinode config -MASTER_ADDR=localhost -MASTER_PORT=6000 -NODE_RANK=0 -WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -export CUDA_DEVICE_MAX_CONNECTIONS=1 - - -# Runs the "345M" parameter model -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" - -# Run for 100 iterations -torchrun $DISTRIBUTED_ARGS \ - pretrain_bert.py \ - --use-checkpoint-args \ - --use-checkpoint-opt_param-scheduler \ - --num-layers 24 \ - --hidden-size 1024 \ - --num-attention-heads 16 \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --micro-batch-size 4 \ - --global-batch-size 128 \ - --seq-length 512 \ - --max-position-embeddings 512 \ - --train-iters 100 \ - --timing-log-level 2 \ - --lr-decay-iters 990000 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --data-path $DATA_PATH \ - --vocab-file /workspace/data/bert_data/vocab.txt \ - --split 949,50,1 \ - --distributed-backend nccl \ - --lr 0.0001 \ - --min-lr 0.00001 \ - --lr-warmup-fraction 0.01 \ - --log-interval 1 \ - --save-interval 50 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --no-gradient-accumulation-fusion \ - --fp16 - -echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt - -# Resume from 50th iteration ckpt and continue to 100 iterations -torchrun $DISTRIBUTED_ARGS \ - pretrain_bert.py \ - --use-checkpoint-args \ - --use-checkpoint-opt_param-scheduler \ - --num-layers 24 \ - --hidden-size 1024 \ - --num-attention-heads 16 \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --micro-batch-size 4 \ - --global-batch-size 128 \ - --seq-length 512 \ - --max-position-embeddings 512 \ - --train-iters 100 \ - --timing-log-level 2 \ - --lr-decay-iters 990000 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --data-path $DATA_PATH \ - --vocab-file /workspace/data/bert_data/vocab.txt \ - --split 949,50,1 \ - --distributed-backend nccl \ - --lr 0.0001 \ - --min-lr 0.00001 \ - --lr-warmup-fraction 0.01 \ - --log-interval 1 \ - --save-interval 10000 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --no-gradient-accumulation-fusion \ - --fp16 \ No newline at end of file diff --git a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh index 11f427276c..3dbfd683ec 100755 --- a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh +++ b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh @@ -12,9 +12,10 @@ do done echo "---------------------------------" -set -x +set -exo pipefail if [[ -z $MBS ]]; then MBS=4; fi if [[ -z $GBS ]]; then GBS=128; fi +if [[ -z $VOCAB_FILE ]]; then VOCAB_FILE="/workspace/data/bert_data/vocab.txt" ; fi # Change for multinode config GPUS_PER_NODE=8 @@ -34,7 +35,17 @@ if [[ $USE_CORE -eq 1 ]]; then command="$command export NVTE_ALLOW_NONDETERMINISTIC_ALGO=0;" USE_MCORE=1 fi - +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + echo "Running checkpoint resume test..." + __SAVE_INTERVAL=50 + ADDITIONAL_PARAMS+=" --use-checkpoint-args --use-checkpoint-opt_param-scheduler" + if [[ $MAX_STEPS -ne 100 ]]; then + echo "Overriding MAX_STEPS=100" + MAX_STEPS=100 + fi +else + __SAVE_INTERVAL=10000 # inf +fi # Runs the "345M" parameter model DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" @@ -58,14 +69,14 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --save $CHECKPOINT_PATH \ --load $CHECKPOINT_PATH \ --data-path $DATA_PATH \ - --vocab-file /workspace/data/bert_data/vocab.txt \ + --vocab-file $VOCAB_FILE \ --split 949,50,1 \ --distributed-backend nccl \ --lr 0.0001 \ --min-lr 0.00001 \ --lr-warmup-fraction 0.01 \ --log-interval 1 \ - --save-interval 10000 \ + --save-interval $__SAVE_INTERVAL \ --eval-interval 1000 \ --eval-iters 10 \ --tensor-model-parallel-size $TP_SIZE \ @@ -74,6 +85,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ ${USE_MCORE:+--use-mcore-models} \ ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS} \ --no-gradient-accumulation-fusion \ + ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ --${TRAINING_DTYPE}" if [[ "${TRAINING_DTYPE}" == "fp16" ]]; then @@ -81,6 +93,9 @@ if [[ "${TRAINING_DTYPE}" == "fp16" ]]; then fi command="$command $torch_run_cmd" +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + command="$command; rm -rf $CHECKPOINT_PATH/iter_0000100; echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt; $torch_run_cmd" +fi echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" echo "$command" echo "-----------------------------------------------------------------------------" diff --git a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh deleted file mode 100755 index 83caf3f669..0000000000 --- a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh +++ /dev/null @@ -1,112 +0,0 @@ -#! /bin/bash -echo "------ARGUMENTS LIST --------" -for ARGUMENT in "$@" -do - KEY=$(echo $ARGUMENT | cut -f1 -d=) - - KEY_LENGTH=${#KEY} - VALUE="${ARGUMENT:$KEY_LENGTH+1}" - - export "$KEY"="$VALUE" - echo "$KEY=$VALUE" -done -echo "---------------------------------" - -GPUS_PER_NODE=8 -# Change for multinode config -MASTER_ADDR=localhost -MASTER_PORT=6000 -NODE_RANK=0 -WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -export CUDA_DEVICE_MAX_CONNECTIONS=1 - - -# Runs the "345M" parameter model -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" - -# Run for 100 iterations and save checkpoint at 50 -torchrun $DISTRIBUTED_ARGS \ - pretrain_gpt.py \ - --use-checkpoint-args \ - --use-checkpoint-opt_param-scheduler \ - --num-layers 12 \ - --hidden-size 512 \ - --num-attention-heads 8 \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --micro-batch-size 4 \ - --global-batch-size 32 \ - --seq-length 1024 \ - --max-position-embeddings 1024 \ - --train-iters 100 \ - --timing-log-level 2 \ - --lr-decay-iters 320000 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --data-path $DATA_PATH \ - --vocab-file /workspace/data/gpt3_data/gpt2-vocab.json \ - --merge-file /workspace/data/gpt3_data/gpt2-merges.txt \ - --split 949,50,1 \ - --distributed-backend nccl \ - --lr 0.00015 \ - --lr-decay-style cosine \ - --min-lr 1.0e-5 \ - --weight-decay 1e-2 \ - --clip-grad 1.0 \ - --lr-warmup-fraction .01 \ - --log-interval 1 \ - --save-interval 50 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --no-gradient-accumulation-fusion \ - --fp16 - -echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt - -# Resume from 50th iteration ckpt and continue to 100 iterations -torchrun $DISTRIBUTED_ARGS \ - pretrain_gpt.py \ - --use-checkpoint-args \ - --use-checkpoint-opt_param-scheduler \ - --num-layers 12 \ - --hidden-size 512 \ - --num-attention-heads 8 \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --micro-batch-size 4 \ - --global-batch-size 32 \ - --seq-length 1024 \ - --max-position-embeddings 1024 \ - --train-iters 100 \ - --timing-log-level 2 \ - --lr-decay-iters 320000 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --data-path $DATA_PATH \ - --vocab-file /workspace/data/gpt3_data/gpt2-vocab.json \ - --merge-file /workspace/data/gpt3_data/gpt2-merges.txt \ - --split 949,50,1 \ - --distributed-backend nccl \ - --lr 0.00015 \ - --lr-decay-style cosine \ - --min-lr 1.0e-5 \ - --weight-decay 1e-2 \ - --clip-grad 1.0 \ - --lr-warmup-fraction .01 \ - --log-interval 1 \ - --save-interval 10000 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --no-gradient-accumulation-fusion \ - --fp16 - diff --git a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh index 3cad97cc60..b6ef7f2ce5 100755 --- a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh +++ b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh @@ -12,9 +12,10 @@ do done echo "---------------------------------" -set -x +set -exo pipefail if [[ -z $MBS ]]; then MBS=4; fi if [[ -z $GBS ]]; then GBS=32; fi +if [[ -z $MOE_GROUPED_GEMM ]]; then MOE_GROUPED_GEMM=0; fi if [[ -z $VOCAB_FILE ]]; then VOCAB_FILE="/workspace/data/gpt3_data/vocab.json" ; fi if [[ -z $MERGE_FILE ]]; then MERGE_FILE="/workspace/data/gpt3_data/merges.txt" ; fi @@ -38,13 +39,32 @@ if [[ $USE_CORE -eq 1 ]]; then USE_MCORE=1 fi +if [[ $MOE_GROUPED_GEMM -eq 1 ]]; then + echo "Running MoE with Grouped GEMM" + command="$command pip install git+https://github.com/fanshiqing/grouped_gemm@main;" + TRAINING_DTYPE=bf16 # Currently GroupedGEMM for MoE only supports bf16 dtype +fi + if [[ $USE_TE -eq 1 ]]; then echo "Running with TransformerEngine ..." TRANSFORMER_IMPL=transformer_engine TRAINING_DTYPE=bf16 + ADDITIONAL_PARAMS+=" --attention-softmax-in-fp32" else echo "Running with local transformer implementation ..." fi + +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + echo "Running checkpoint resume test..." + __SAVE_INTERVAL=50 + ADDITIONAL_PARAMS+=" --use-checkpoint-args --use-checkpoint-opt_param-scheduler" + if [[ $MAX_STEPS -ne 100 ]]; then + echo "Overriding MAX_STEPS=100" + MAX_STEPS=100 + fi +else + __SAVE_INTERVAL=10000 # inf +fi set +x # Runs the "345M" parameter model DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" @@ -80,12 +100,14 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --clip-grad 1.0 \ --lr-warmup-fraction .01 \ --log-interval 1 \ - --save-interval 10000 \ + --save-interval $__SAVE_INTERVAL \ --eval-interval 1000 \ --eval-iters 10 \ --transformer-impl $TRANSFORMER_IMPL \ --tensor-model-parallel-size $TP_SIZE \ --pipeline-model-parallel-size $PP_SIZE \ + --no-bias-swiglu-fusion \ + --no-rope-fusion \ ${VP_SIZE:+--num-layers-per-virtual-pipeline-stage "$VP_SIZE"} \ ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS} \ ${USE_MCORE:+--use-mcore-models} \ @@ -98,6 +120,9 @@ if [[ "${TRAINING_DTYPE}" == "fp16" ]]; then fi command="$command $torch_run_cmd" +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + command="$command; rm -rf $CHECKPOINT_PATH/iter_0000100; echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt; $torch_run_cmd" +fi echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" echo "$command" echo "-----------------------------------------------------------------------------" diff --git a/tests/functional_tests/test_scripts/gpt3/sbatch_gpt3_distributed_test.sh b/tests/functional_tests/test_scripts/gpt3/sbatch_gpt3_distributed_test.sh index ba2a1b4b62..0319880575 100755 --- a/tests/functional_tests/test_scripts/gpt3/sbatch_gpt3_distributed_test.sh +++ b/tests/functional_tests/test_scripts/gpt3/sbatch_gpt3_distributed_test.sh @@ -16,4 +16,4 @@ echo 'Running tests using $PYTORCH_IMAGE image' srun --output $BASE_DIR/debug/slurm-%j.out --error $BASE_DIR/debug/slurm-%j.out --container-image $PYTORCH_IMAGE --container-mounts $BASE_DIR/tensorboard_logs:/workspace/tensorboard_logs,$BASE_DIR/debug:/workspace/debug,$BASE_DIR/checkpoints:/workspace/checkpoints,$BUILD_DIR:/workspace/megatron-lm,$DATA_DIR:/workspace/data --no-container-mount-home bash -c " ls cd /workspace/megatron-lm - ./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh DATA_PATH=$DATA_PATH CHECKPOINT_PATH=$CHECKPOINT_PATH TENSORBOARD_DIR=$TENSORBOARD_DIR SCRIPTS_DIR=$SCRIPTS_DIR USE_TE=$USE_TE TP_SIZE=$TP_SIZE PP_SIZE=$PP_SIZE VP_SIZE=$VP_SIZE NUM_NODES=$NUM_NODES MAX_STEPS=$MAX_STEPS USE_CORE=$USE_CORE MBS=$MBS GBS=$GBS ADDITIONAL_PARAMS=\"$ADDITIONAL_PARAMS\"" + ./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh DATA_PATH=$DATA_PATH CHECKPOINT_PATH=$CHECKPOINT_PATH TENSORBOARD_DIR=$TENSORBOARD_DIR SCRIPTS_DIR=$SCRIPTS_DIR USE_TE=$USE_TE TP_SIZE=$TP_SIZE PP_SIZE=$PP_SIZE VP_SIZE=$VP_SIZE NUM_NODES=$NUM_NODES MAX_STEPS=$MAX_STEPS USE_CORE=$USE_CORE MBS=$MBS GBS=$GBS MOE_GROUPED_GEMM=$MOE_GROUPED_GEMM ADDITIONAL_PARAMS=\"$ADDITIONAL_PARAMS\"" diff --git a/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_resume_checkpoint_test.sh deleted file mode 100755 index c62fea1aad..0000000000 --- a/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_resume_checkpoint_test.sh +++ /dev/null @@ -1,127 +0,0 @@ -#! /bin/bash - -echo "------ARGUMENTS LIST --------" -for ARGUMENT in "$@" -do - KEY=$(echo $ARGUMENT | cut -f1 -d=) - - KEY_LENGTH=${#KEY} - VALUE="${ARGUMENT:$KEY_LENGTH+1}" - - export "$KEY"="$VALUE" - echo "$KEY=$VALUE" -done -echo "---------------------------------" - -set -x -if [[ -z $MBS ]]; then MBS=4; fi - -GPUS_PER_NODE=8 -# Change for multinode config -MASTER_ADDR=localhost -MASTER_PORT=6000 -NODE_RANK=0 -WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -export CUDA_DEVICE_MAX_CONNECTIONS=1 - -TRANSFORMER_IMPL=local -TRAINING_DTYPE=bf16 - -if [[ $USE_CORE -eq 1 ]]; then - echo "Running using megatron core" - TRANSFORMER_IMPL=local - TRAINING_DTYPE=bf16 - command="$command export NVTE_ALLOW_NONDETERMINISTIC_ALGO=0;" - USE_MCORE=1 - export NVTE_ALLOW_NONDETERMINISTIC_ALGO=0 -fi - -if [[ $USE_TE -eq 1 ]]; then - echo "Running with TransformerEngine ..." - TRANSFORMER_IMPL=transformer_engine - TRAINING_DTYPE=bf16 -else - echo "Running with local transformer implementation ..." -fi -set +x - -# Runs the "345M" parameter model -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" - -# Arguments. -ARGS=" \ - --recompute-activations \ - --use-flash-attn \ - --apply-layernorm-1p \ - --untie-embeddings-and-output-weights \ - --disable-bias-linear \ - --no-position-embedding \ - --use-rotary-position-embeddings \ - --rotary-percent 0.5 \ - --swiglu \ - --attention-dropout 0.0 \ - --hidden-dropout 0.0 \ - --exit-duration-in-mins 220 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size 1 \ - --num-layers 24 \ - --hidden-size 1024 \ - --num-attention-heads 16 \ - --seq-length 2048 \ - --max-position-embeddings 2048 \ - --micro-batch-size $MBS \ - --global-batch-size 256 \ - --train-samples 100000 \ - --lr-decay-samples 99000 \ - --lr-warmup-samples 1000 \ - --lr 2.5e-5 \ - --min-lr 2.5e-6 \ - --lr-decay-style cosine \ - --log-interval 5 \ - --eval-iters 100 \ - --eval-interval 2000 \ - --tokenizer-type GPT2BPETokenizer \ - --vocab-file /workspace/data/retro_data/vocab/gpt2-vocab.json \ - --merge-file /workspace/data/retro_data/vocab/gpt2-merges.txt \ - --data-path /workspace/data/retro_data/inputs/wiki-200k_text_document \ - --split 98,2,0 \ - --clip-grad 1.0 \ - --weight-decay 0.1 \ - --adam-beta1 0.9 \ - --adam-beta2 0.95 \ - --init-method-std 0.007 \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --save-interval 50 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --bf16 \ - --transformer-impl $TRANSFORMER_IMPL \ - --${TRAINING_DTYPE} \ - ${USE_MCORE:+--use-mcore-models} \ - ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS} \ - --retro-workdir /workspace/data/retro_data/neighbors - --retro-add-retriever \ - --num-workers 32 \ -" - -pip install h5py -pip install transformers -pip install faiss-gpu - -# Run for 100 iterations and save checkpoint at 50 -torchrun $DISTRIBUTED_ARGS \ - pretrain_retro.py \ - $ARGS \ - --exit-interval 100 - -echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt - -# Resume from 50th iteration ckpt and continue to 100 iterations -torchrun $DISTRIBUTED_ARGS \ - pretrain_retro.py \ - $ARGS \ - --exit-interval 50 diff --git a/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh b/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh index fe3271cb46..b06dc336f8 100755 --- a/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh +++ b/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh @@ -13,7 +13,7 @@ do done echo "---------------------------------" -set -x +set -exo pipefail if [[ -z $MBS ]]; then MBS=4; fi GPUS_PER_NODE=8 @@ -44,11 +44,23 @@ if [[ $USE_TE -eq 1 ]]; then else echo "Running with local transformer implementation ..." fi + +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + echo "Running checkpoint resume test..." + __SAVE_INTERVAL=50 + if [[ $MAX_STEPS -ne 100 ]]; then + echo "Overriding MAX_STEPS=100" + MAX_STEPS=100 + fi +else + __SAVE_INTERVAL=10000 # inf +fi set +x # Runs the "345M" parameter model DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" -ARGS=" \ +build_args() { + ARGS=" \ --exit-interval $MAX_STEPS \ \ --recompute-activations \ @@ -96,7 +108,7 @@ ARGS=" \ --log-validation-ppl-to-tensorboard \ --log-timers-to-tensorboard \ --tensorboard-dir ${TENSORBOARD_DIR} \ - --save-interval 10000 \ + --save-interval $__SAVE_INTERVAL \ --save $CHECKPOINT_PATH \ --load $CHECKPOINT_PATH \ --bf16 \ @@ -108,12 +120,23 @@ ARGS=" \ --retro-add-retriever \ --num-workers 32 \ " +} +build_args torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ pretrain_retro.py \ ${ARGS}" command="$command $torch_run_cmd" + +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + MAX_STEPS=50 + build_args + torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ + pretrain_retro.py \ + ${ARGS}" + command="$command; rm -rf $CHECKPOINT_PATH/iter_0000100; echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt; $torch_run_cmd" +fi echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" echo "$command" echo "-----------------------------------------------------------------------------" diff --git a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh deleted file mode 100755 index fa4d62667a..0000000000 --- a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh +++ /dev/null @@ -1,172 +0,0 @@ -#! /bin/bash -echo "------ARGUMENTS LIST --------" -for ARGUMENT in "$@" -do - KEY=$(echo $ARGUMENT | cut -f1 -d=) - - KEY_LENGTH=${#KEY} - VALUE="${ARGUMENT:$KEY_LENGTH+1}" - - export "$KEY"="$VALUE" - echo "$KEY=$VALUE" -done -echo "---------------------------------" - -set -x -if [[ -z $MBS ]]; then MBS=4; fi -if [[ -z $GBS ]]; then GBS=32; fi - -GPUS_PER_NODE=8 -# Change for multinode config -MASTER_ADDR=localhost -MASTER_PORT=6000 -NODE_RANK=0 -WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) - -command="export CUDA_DEVICE_MAX_CONNECTIONS=1;" - -TRANSFORMER_IMPL=local -TRAINING_DTYPE=fp16 - -if [[ $USE_CORE -eq 1 ]]; then - echo "Running using megatron core" - TRANSFORMER_IMPL=local - TRAINING_DTYPE=bf16 - command="$command export NVTE_ALLOW_NONDETERMINISTIC_ALGO=0;" - USE_MCORE=1 - export NVTE_ALLOW_NONDETERMINISTIC_ALGO=0 -fi - -if [[ $NO_FA -eq 1 ]]; then - echo "Turn off flash attention environment variable" - export NVTE_FLASH_ATTN=0 - export NVTE_FUSED_ATTN=0 -fi - -if [[ $USE_TE -eq 1 ]]; then - echo "Running with TransformerEngine ..." - TRANSFORMER_IMPL=transformer_engine - TRAINING_DTYPE=bf16 -else - echo "Running with local transformer implementation ..." -fi -set +x - -# install neccessary library -pip install pydantic==2.2.1 - -# Runs the "220M" parameter model -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" - -# Run for 100 iterations and save checkpoint at 50 -torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ - pretrain_t5.py \ - --encoder-num-layers 12 \ - --decoder-num-layers 12 \ - --hidden-size 768 \ - --num-attention-heads 12 \ - --kv-channels 64 \ - --ffn-hidden-size 3072 \ - --encoder-seq-length 512 \ - --decoder-seq-length 128 \ - --max-position-embeddings 512 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --micro-batch-size ${MBS:-4} \ - --global-batch-size ${GBS:-32} \ - --lr 0.0001 \ - --train-iters 100 \ - --lr-decay-iters $MAX_STEPS \ - --lr-decay-style linear \ - --min-lr 0.00001 \ - --weight-decay 1e-2 \ - --lr-warmup-fraction .01 \ - --clip-grad 1.0 \ - --${TRAINING_DTYPE} \ - --vocab-extra-ids 100 \ - --init-method-std 0.015 \ - --transformer-impl $TRANSFORMER_IMPL \ - --use-mcore-models \ - --data-path $DATA_PATH \ - --vocab-file $VOCAB_PATH \ - --tokenizer-type BertWordPieceCase \ - --split 99982,9,9 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --timing-log-level 2 \ - --log-interval 1 \ - --save-interval 50 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --distributed-backend nccl \ - ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS}" - -command1="$command $torch_run_cmd" -echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" -echo "$command1" -echo "-----------------------------------------------------------------------------" -echo "$command1" >> $SCRIPTS_DIR/pretrain_t5_distributed_command.sh -eval $command1 - -echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt - -# Resume from 50th iteration ckpt and continue to 100 iterations -torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ - pretrain_t5.py \ - --encoder-num-layers 12 \ - --decoder-num-layers 12 \ - --hidden-size 768 \ - --num-attention-heads 12 \ - --kv-channels 64 \ - --ffn-hidden-size 3072 \ - --encoder-seq-length 512 \ - --decoder-seq-length 128 \ - --max-position-embeddings 512 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --micro-batch-size ${MBS:-4} \ - --global-batch-size ${GBS:-32} \ - --lr 0.0001 \ - --train-iters 100 \ - --lr-decay-iters $MAX_STEPS \ - --lr-decay-style linear \ - --min-lr 0.00001 \ - --weight-decay 1e-2 \ - --lr-warmup-fraction .01 \ - --clip-grad 1.0 \ - --${TRAINING_DTYPE} \ - --vocab-extra-ids 100 \ - --init-method-std 0.015 \ - --transformer-impl $TRANSFORMER_IMPL \ - --use-mcore-models \ - --data-path $DATA_PATH \ - --vocab-file $VOCAB_PATH \ - --tokenizer-type BertWordPieceCase \ - --split 99982,9,9 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --timing-log-level 2 \ - --log-interval 1 \ - --save-interval 50 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --distributed-backend nccl \ - ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS}" - -command2="$command $torch_run_cmd" -echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" -echo "$command2" -echo "-----------------------------------------------------------------------------" - -echo "$command2" >> $SCRIPTS_DIR/pretrain_t5_distributed_command.sh -eval $command2 \ No newline at end of file diff --git a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh index 90d78f4917..241d844839 100755 --- a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh +++ b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh @@ -12,9 +12,10 @@ do done echo "---------------------------------" -set -x +set -exo pipefail if [[ -z $MBS ]]; then MBS=4; fi if [[ -z $GBS ]]; then GBS=32; fi +if [[ -z $VOCAB_PATH ]]; then VOCAB_PATH="/workspace/data/t5_data/bert-large-cased-vocab.txt"; fi GPUS_PER_NODE=8 # Change for multinode config @@ -50,6 +51,17 @@ if [[ $USE_TE -eq 1 ]]; then else echo "Running with local transformer implementation ..." fi + +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + echo "Running checkpoint resume test..." + __SAVE_INTERVAL=50 + if [[ $MAX_STEPS -ne 100 ]]; then + echo "Overriding MAX_STEPS=100" + MAX_STEPS=100 + fi +else + __SAVE_INTERVAL=10000 # inf +fi set +x # install neccessary library @@ -99,16 +111,20 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --log-timers-to-tensorboard \ --timing-log-level 2 \ --log-interval 1 \ - --save-interval 5000 \ + --save-interval $__SAVE_INTERVAL \ --eval-interval 1000 \ --eval-iters 10 \ --distributed-backend nccl \ + ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS}" command="$command $torch_run_cmd" +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + command="$command; rm -rf $CHECKPOINT_PATH/iter_0000100; echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt; $torch_run_cmd" +fi echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" echo "$command" echo "-----------------------------------------------------------------------------" echo "$command" > $SCRIPTS_DIR/pretrain_t5_distributed_command.sh -eval $command \ No newline at end of file +eval $command diff --git a/tests/unit_tests/data/__init__.py b/tests/unit_tests/data/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit_tests/data/test_builder.py b/tests/unit_tests/data/test_builder.py new file mode 100644 index 0000000000..1052c2fdb2 --- /dev/null +++ b/tests/unit_tests/data/test_builder.py @@ -0,0 +1,165 @@ +## +# Compile megatron.core.datasets.helpers dependencies before BlendedDataset import +## + +import torch + +from megatron.core.datasets.utils import compile_helpers +from tests.unit_tests.test_utilities import Utils + +if torch.distributed.is_available(): + Utils.initialize_distributed() + if torch.distributed.get_rank() == 0: + compile_helpers() + torch.distributed.barrier() +else: + compile_helpers() + +## +# Done +## + +import os +import tempfile +from collections import defaultdict +from typing import Dict + +import numpy +import torch + +from megatron.core.datasets.blended_dataset import BlendedDataset +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder +from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig +from megatron.core.datasets.megatron_dataset import LowLevelDataset, MegatronDataset +from megatron.core.datasets.utils import Split + + +_NUM_DATASETS = 10 + +_SEQUENCE_LENGTH = 10 + +_SIZES_PER_SPLIT = { + Split.train: 900, + Split.valid: 90, + Split.test: 10, +} + + +def do_setup(odir): + paths = defaultdict(list) + + for i in range(_NUM_DATASETS): + path_to_data = os.path.join(odir, str(i)) + os.mkdir(path_to_data) + + for split in _SIZES_PER_SPLIT: + data = numpy.zeros((_SIZES_PER_SPLIT[split], _SEQUENCE_LENGTH)) + path = os.path.join(path_to_data, f"{split.name}.npy") + numpy.save(path, data) + paths[split].append(path) + + return paths + + +def test_builder(): + + # Define the class here to avoid pytest warnings + + class TestDataset(MegatronDataset): + def _finalize(self) -> None: + self.sample_index = numpy.random.choice(self.indices, size=self.num_samples) + + @staticmethod + def numel_low_level_dataset(low_level_dataset: LowLevelDataset) -> int: + return len(low_level_dataset) + + @staticmethod + def build_low_level_dataset( + dataset_path: str, config: BlendedMegatronDatasetConfig + ) -> LowLevelDataset: + return numpy.load(dataset_path) + + def __len__(self) -> int: + return len(self.sample_index) + + def __getitem__(self, idx: int) -> Dict[str, numpy.ndarray]: + return {"text": self.dataset[self.sample_index[idx]]} + + with tempfile.TemporaryDirectory() as temp_dir: + + paths = do_setup(temp_dir) + + blends = { + split: [ + weight_or_path + for pair in zip(list(range(len(paths[split]))), paths[split]) + for weight_or_path in pair + ] + for split in Split + } + + # one dataset, one split AND multiple datasets, one split + config = BlendedMegatronDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=_SEQUENCE_LENGTH, + blend_per_split=[[paths[Split.train][0]], blends[Split.valid], None,], + ) + datasets = BlendedMegatronDatasetBuilder(TestDataset, [100, 100, 100], config).build() + assert len(datasets[0]) == 100 and isinstance(datasets[0], TestDataset) + assert len(datasets[1]) >= 100 and isinstance(datasets[1], BlendedDataset) + assert datasets[2] is None + + # blend_per_split, all splits + config = BlendedMegatronDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=_SEQUENCE_LENGTH, + blend_per_split=[blends[Split.train], blends[Split.valid], blends[Split.test],], + ) + datasets = BlendedMegatronDatasetBuilder(TestDataset, [100, 100, 100], config).build() + assert len(datasets[0]) >= 100 + assert len(datasets[1]) >= 100 + assert len(datasets[2]) >= 100 + + # blend_per_split, one split + config = BlendedMegatronDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=_SEQUENCE_LENGTH, + blend_per_split=[blends[Split.train], None, None,], + ) + datasets = BlendedMegatronDatasetBuilder(TestDataset, [100, 100, 100], config).build() + assert len(datasets[0]) >= 100 + assert datasets[1] is None + assert datasets[2] is None + + # blend, 90,9,1 split + config = BlendedMegatronDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=_SEQUENCE_LENGTH, + blend=blends[Split.train], + split="90,9,1", + ) + datasets = BlendedMegatronDatasetBuilder(TestDataset, [100, 100, 100], config).build() + assert len(datasets[0]) >= 100 + assert len(datasets[1]) >= 100 + assert len(datasets[2]) >= 100 + + # blend, 100,0,0 split + config = BlendedMegatronDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=_SEQUENCE_LENGTH, + blend=blends[Split.train], + split="100,0,0", + ) + datasets = BlendedMegatronDatasetBuilder(TestDataset, [100, 100, 100], config).build() + assert len(datasets[0]) >= 100 + assert datasets[1] is None + assert datasets[2] is None + + +if __name__ == "__main__": + test_builder() diff --git a/tests/unit_tests/data/test_mock_gpt_dataset.py b/tests/unit_tests/data/test_mock_gpt_dataset.py new file mode 100644 index 0000000000..4c91569d22 --- /dev/null +++ b/tests/unit_tests/data/test_mock_gpt_dataset.py @@ -0,0 +1,54 @@ +import random +import sys +from types import SimpleNamespace + +import numpy + +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder +from megatron.core.datasets.gpt_dataset import GPTDatasetConfig, MockGPTDataset + + +def sample_N(dataset, N, randomize): + if randomize: + indices = [random.randint(0, sys.maxsize) for _ in range(N)] + else: + indices = list(range(N)) + samples = [dataset[index]["tokens"].numpy() for index in indices] + return samples + + +def test_builder_mock_data(): + config = GPTDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=1024, + mock=True, + reset_position_ids=True, + reset_attention_mask=True, + eod_mask_loss=True, + tokenizer=SimpleNamespace(), + ) + + datasets = BlendedMegatronDatasetBuilder(MockGPTDataset, [None, None, None], config).build() + + N = 10 + + # Check iso-index split variance + subsets = [sample_N(dataset, N, randomize=False) for dataset in datasets] + assert not numpy.allclose(subsets[0], subsets[1]) + assert not numpy.allclose(subsets[0], subsets[2]) + assert not numpy.allclose(subsets[1], subsets[2]) + + # Check iso-split / iso-index identity + subset_1A = sample_N(datasets[0], N, randomize=False) + subset_1B = sample_N(datasets[0], N, randomize=False) + assert numpy.allclose(subset_1A, subset_1B) + + # Check iso-split index variance + subset_1A = sample_N(datasets[0], N, randomize=True) + subset_1B = sample_N(datasets[0], N, randomize=True) + assert not numpy.allclose(subset_1A, subset_1B) + + +if __name__ == "__main__": + test_builder_mock_data() diff --git a/tests/unit_tests/data/test_multimodal_dataset.py b/tests/unit_tests/data/test_multimodal_dataset.py new file mode 100644 index 0000000000..70c6fbf63c --- /dev/null +++ b/tests/unit_tests/data/test_multimodal_dataset.py @@ -0,0 +1,33 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from types import SimpleNamespace + +import torch + +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder +from megatron.core.datasets.multimodal_dataset import MockMultimodalDataset, MultimodalDatasetConfig + + +def test_mock_multimodal_dataset(): + config = MultimodalDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=1024, + mock=True, + reset_position_ids=False, + reset_attention_mask=False, + eod_mask_loss=True, + tokenizer=SimpleNamespace(), + image_h=336, + image_w=336, + ) + + datasets = BlendedMegatronDatasetBuilder( + MockMultimodalDataset, [None, None, None], config + ).build() + + for ds in datasets: + sample = ds[0] + assert "image" in sample + assert sample["image"].shape == torch.Size([3, 336, 336]) + assert "tokens" in sample diff --git a/tests/unit_tests/data/test_preprocess_data.py b/tests/unit_tests/data/test_preprocess_data.py index 63dba573fc..06e2be1f4e 100644 --- a/tests/unit_tests/data/test_preprocess_data.py +++ b/tests/unit_tests/data/test_preprocess_data.py @@ -22,6 +22,12 @@ "https://huggingface.co/bert-base-uncased/raw/main/vocab.txt" ) +__LOCAL_BERT_VOCAB = "/home/gitlab-runner/data/bert_data/vocab.txt" + +__LOCAL_GPT2_MERGE = "/home/gitlab-runner/data/gpt3_data/gpt2-merges.txt" + +__LOCAL_GPT2_VOCAB = "/home/gitlab-runner/data/gpt3_data/gpt2-vocab.json" + def dummy_jsonl(odir): # numbers @@ -92,7 +98,7 @@ def tokens_to_string(toks): return getattr(encoder.tokenizer, option)(toks) except: continue - raise RuntimeError(f"{type(encoder.tokenizer)} tokenizer cannot `decode` or `detokenize`.") + raise RuntimeError(f"{type(encoder.tokenizer)} tokenizer cannot decode or detokenize") merged_index = 0 merged_dataset = MMapIndexedDataset(os.path.join(path_to_data, "merge")) @@ -161,6 +167,8 @@ def tokens_to_string(toks): def gpt2_vocab(odir): + if os.path.exists(__LOCAL_GPT2_VOCAB): + return __LOCAL_GPT2_VOCAB path = os.path.join(odir, "vocab.json") with open(path, "wb") as writer: writer.write(requests.get(PRETRAINED_VOCAB_ARCHIVE_MAP['gpt2']).content) @@ -168,6 +176,8 @@ def gpt2_vocab(odir): def gpt2_merge(odir): + if os.path.exists(__LOCAL_GPT2_MERGE): + return __LOCAL_GPT2_MERGE path = os.path.join(odir, "merge.txt") with open(path, "wb") as writer: writer.write(requests.get(PRETRAINED_MERGES_ARCHIVE_MAP['gpt2']).content) @@ -196,6 +206,8 @@ def test_preprocess_data_gpt(): def bert_vocab(odir): + if os.path.exists(__LOCAL_BERT_VOCAB): + return __LOCAL_BERT_VOCAB path = os.path.join(odir, "vocab.txt") with open(path, "wb") as writer: writer.write(requests.get(__HUGGINGFACE_BERT_BASE_UNCASED_VOCAB).content) diff --git a/tests/unit_tests/data/test_preprocess_mmdata.py b/tests/unit_tests/data/test_preprocess_mmdata.py index 34cd441827..08975a3889 100644 --- a/tests/unit_tests/data/test_preprocess_mmdata.py +++ b/tests/unit_tests/data/test_preprocess_mmdata.py @@ -9,7 +9,7 @@ import numpy from megatron.core.datasets.indexed_dataset import MMapIndexedDataset -from tests.unit_tests.data.test_preprocess_data import dummy_jsonl, gpt2_vocab, gpt2_merge +from tests.unit_tests.data.test_preprocess_data import dummy_jsonl, gpt2_merge, gpt2_vocab from tools.merge_datasets import main as merge_main from tools.preprocess_mmdata import Encoder from tools.preprocess_mmdata import get_args as build_args @@ -22,9 +22,11 @@ def dummy_img(odir_txt, odir_img): length = sum(1 for _ in reader_txt) os.makedirs(os.path.join(odir_img, os.path.splitext(name)[0]), exist_ok=False) for i in range(length): - with open(os.path.join(odir_img, os.path.splitext(name)[0], f"{str(i).zfill(4)}.img"), "wb") as writer_img: + with open( + os.path.join(odir_img, os.path.splitext(name)[0], f"{str(i).zfill(4)}.img"), "wb" + ) as writer_img: # 32 * 32 - 1 to induce preprocessing 0-index padding - writer_img.write(bytes([random.randint(0 , 255) for _ in range(32 * 32 - 1)])) + writer_img.write(bytes([random.randint(0, 255) for _ in range(32 * 32 - 1)])) def build_datasets(idir_txt, idir_img, odir, extra_args=[]): @@ -42,7 +44,14 @@ def build_datasets(idir_txt, idir_img, odir, extra_args=[]): def merge_datasets(idir): - sys.argv = [sys.argv[0], "--input", idir, "--output-prefix", os.path.join(idir, "merge"), "--multimodal"] + sys.argv = [ + sys.argv[0], + "--input", + idir, + "--output-prefix", + os.path.join(idir, "merge"), + "--multimodal", + ] merge_main() @@ -72,7 +81,15 @@ def do_test_preprocess_mmdata(temp_dir, extra_args=[]): # merge the datasets merge_datasets(path_to_data) - sys.argv = [sys.argv[0], "--input", None, "--input-image", None, "--output-prefix", None,] + extra_args + sys.argv = [ + sys.argv[0], + "--input", + None, + "--input-image", + None, + "--output-prefix", + None, + ] + extra_args encoder = Encoder(build_args()) encoder.initializer() @@ -119,7 +136,13 @@ def tokens_to_string(toks): merged_doc_index_index += len(dataset.document_indices) - 1 with open(realpath_raw_txt, "rt") as reader: - for json_line, image_path in zip(reader, [os.path.join(realpath_raw_img, basename) for basename in os.listdir(realpath_raw_img)]): + for json_line, image_path in zip( + reader, + [ + os.path.join(realpath_raw_img, basename) + for basename in os.listdir(realpath_raw_img) + ], + ): toks, image, length = encoder.encode((json_line, image_path)) raw_text = tokens_to_string(toks) @@ -133,14 +156,14 @@ def tokens_to_string(toks): processed_image = dataset[dataset_index + 1][0] assert dataset[dataset_index + 1][1] == 1 # reverse to account for preprocessing 0-index padding - processed_image = processed_image[::-1][0:raw_image.size] + processed_image = processed_image[::-1][0 : raw_image.size] assert ( raw_text == processed_text ), f"ERROR: {basename.split('_')[:-2]}: raw and processed documents (text) do not match" - assert ( - numpy.allclose(raw_image, processed_image) + assert numpy.allclose( + raw_image, processed_image ), f"ERROR: {basename.split('_')[:-2]}: raw and processed documents (image) do not match" dataset_index += 2 @@ -152,14 +175,14 @@ def tokens_to_string(toks): merged_image = merged_dataset[merged_index + 1][0] assert merged_dataset[merged_index + 1][1] == 1 # reverse to account for preprocessing 0-index padding - merged_image = merged_image[::-1][0:raw_image.size] + merged_image = merged_image[::-1][0 : raw_image.size] assert ( raw_text == merged_text ), f"ERROR: {basename.split('_')[:-2]}: raw and merged documents (text) do not match" - assert ( - numpy.allclose(raw_image, merged_image) + assert numpy.allclose( + raw_image, merged_image ), f"ERROR: {basename.split('_')[:-2]}: raw and merged documents (image) do not match" merged_index += 2 diff --git a/tests/unit_tests/dist_checkpointing/models/__init__.py b/tests/unit_tests/dist_checkpointing/models/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py index 742171f950..6547d44339 100644 --- a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py +++ b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py @@ -14,10 +14,10 @@ from tests.unit_tests.test_utilities import Utils from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.models.gpt.gpt_layer_specs import \ - get_gpt_layer_with_transformer_engine_spec, get_gpt_layer_local_spec + get_gpt_layer_with_transformer_engine_spec as gpt_te_spec, get_gpt_layer_local_spec as gpt_local_spec -def initialize_gpt_model(seed, use_te=True, **config_kwargs): +def initialize_gpt_model(seed, layer_spec_fn=gpt_te_spec, **config_kwargs): torch.manual_seed(seed) model_parallel_cuda_manual_seed(seed) @@ -26,8 +26,7 @@ def initialize_gpt_model(seed, use_te=True, **config_kwargs): transformer_config = TransformerConfig(**default_config_kwargs) pre_process = ps.is_pipeline_first_stage() post_process = ps.is_pipeline_last_stage() - layer_spec = get_gpt_layer_with_transformer_engine_spec() if use_te else get_gpt_layer_local_spec() - model = GPTModel(config=transformer_config, transformer_layer_spec=layer_spec, vocab_size=128, max_sequence_length=4, + model = GPTModel(config=transformer_config, transformer_layer_spec=layer_spec_fn(), vocab_size=128, max_sequence_length=4, pre_process=pre_process, post_process=post_process) with torch.no_grad(): @@ -37,58 +36,72 @@ def initialize_gpt_model(seed, use_te=True, **config_kwargs): class TestGPTModel: - - def setup_method(self, method): + @pytest.mark.parametrize('src_layer_spec_fn', [gpt_te_spec, gpt_local_spec]) + @pytest.mark.parametrize('dst_layer_spec_fn', [gpt_te_spec, gpt_local_spec]) + def test_sharded_state_dict_save_load(self, tmp_path_dist_ckpt, + src_layer_spec_fn, dst_layer_spec_fn): Utils.initialize_model_parallel(2,4) - - def teardown_method(self, method): - Utils.destroy_model_parallel() - - @pytest.mark.parametrize('use_te', [True]) # non-TE not supported yet - def test_sharded_state_dict_save_load(self, use_te, tmp_path_dist_ckpt): - gpt_model = initialize_gpt_model(use_te) + gpt_model = initialize_gpt_model(1, src_layer_spec_fn) with TempNamedDir(tmp_path_dist_ckpt / 'test_gpt_model') as ckpt_dir: # Save sharded_state_dict = gpt_model.sharded_state_dict() save(sharded_state_dict, ckpt_dir) # Load + gpt_model = initialize_gpt_model(2, dst_layer_spec_fn) sharded_state_dict = gpt_model.sharded_state_dict() state_dict = load(sharded_state_dict, ckpt_dir) gpt_model.load_state_dict(state_dict) + Utils.destroy_model_parallel() class TestGPTModelReconfiguration: - @pytest.mark.parametrize("src_tp_pp,dest_tp_pp", [ - ((2, 4), (4, 2)), - ((1, 8), (8, 1)), - ((2, 1), (1, 8)), - ((1, 1), (2, 2)), + @pytest.mark.parametrize("src_tp_pp,dest_tp_pp,src_layer_spec_fn,dst_layer_spec_fn", [ + ((2, 4), (4, 2), gpt_te_spec, gpt_te_spec), + ((1, 8), (8, 1), gpt_te_spec, gpt_te_spec), + ((2, 1), (1, 8), gpt_te_spec, gpt_te_spec), + ((1, 1), (2, 2), gpt_te_spec, gpt_te_spec), + ((2, 1), (1, 8), gpt_local_spec, gpt_local_spec), + ((1, 1), (2, 4), gpt_te_spec, gpt_local_spec), + ((1, 8), (2, 1), gpt_local_spec, gpt_te_spec), ]) - def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp, dest_tp_pp): + def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp, dest_tp_pp, + src_layer_spec_fn, dst_layer_spec_fn): """ Test model saving and loading with different TP/PP """ with TempNamedDir(tmp_path_dist_ckpt / 'test_gpt_model_reconfiguration_model_A') as ckpt_dir_A, \ TempNamedDir(tmp_path_dist_ckpt / 'test_gpt_model_reconfiguration_model_B') as ckpt_dir_B: # Save checkpoint A Utils.initialize_model_parallel(*src_tp_pp) - gpt_model_A = initialize_gpt_model(1) + gpt_model_A = initialize_gpt_model(1, src_layer_spec_fn) save(gpt_model_A.sharded_state_dict(), ckpt_dir_A) + regular_state_dict_A = gpt_model_A.state_dict() Utils.destroy_model_parallel() # Load checkpoint A with different TP/PP and save as checkpoint B Utils.initialize_model_parallel(*dest_tp_pp) - gpt_model_B = initialize_gpt_model(2) + gpt_model_B = initialize_gpt_model(2, dst_layer_spec_fn) state_dict = load(gpt_model_B.sharded_state_dict(), ckpt_dir_A) gpt_model_B.load_state_dict(state_dict) save(gpt_model_B.sharded_state_dict(), ckpt_dir_B) + regular_state_dict_B = gpt_model_A.state_dict() Utils.destroy_model_parallel() # Test both checkpoints are equal Utils.initialize_model_parallel(1, 1) - state_dict_A = load_plain_tensors(ckpt_dir_A) - state_dict_B = load_plain_tensors(ckpt_dir_B) - diffs = diff(state_dict_A, state_dict_B) + plain_state_dict_A = load_plain_tensors(ckpt_dir_A) + plain_state_dict_B = load_plain_tensors(ckpt_dir_B) + diffs = diff(plain_state_dict_A, plain_state_dict_B) + assert not any(map(bool, diffs)), diffs + + # Test both regular state dicts are equal, turning FP8 states to bytes first + regular_state_dict_A = {k: v for k, v in regular_state_dict_A.items() + if not k.endswith('_extra_state')} + regular_state_dict_B = {k: v for k, v in regular_state_dict_B.items() + if not k.endswith('_extra_state')} + diffs = diff(regular_state_dict_A, regular_state_dict_B) assert not any(map(bool, diffs)), diffs + Utils.destroy_model_parallel() + def test_state_dict_comparison(self, tmp_path_dist_ckpt): Utils.initialize_model_parallel(2, 4) diff --git a/tests/unit_tests/dist_checkpointing/models/test_sequential_mlp.py b/tests/unit_tests/dist_checkpointing/models/test_sequential_mlp.py new file mode 100644 index 0000000000..663c2bc418 --- /dev/null +++ b/tests/unit_tests/dist_checkpointing/models/test_sequential_mlp.py @@ -0,0 +1,84 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import pytest +import torch + +from megatron.core import parallel_state +from megatron.core.dist_checkpointing import save, load, load_plain_tensors +from megatron.core.dist_checkpointing.dict_utils import diff +from megatron.core.models.gpt.gpt_layer_specs import \ + get_gpt_layer_with_transformer_engine_spec +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.moe.experts import SequentialMLP +from megatron.core.transformer.transformer_config import TransformerConfig +from tests.unit_tests.dist_checkpointing import TempNamedDir +from tests.unit_tests.test_utilities import Utils + + +def initialize_sequential_mlp(seed, glu=True, **config_kwargs): + torch.manual_seed(seed) + model_parallel_cuda_manual_seed(seed) + + pp_size = parallel_state.get_pipeline_model_parallel_world_size() + num_moe_experts = 8 + num_local_experts = num_moe_experts // parallel_state.get_expert_model_parallel_world_size() + default_config_kwargs = dict(num_layers=pp_size, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, + gated_linear_unit=glu) + default_config_kwargs.update(**config_kwargs) + transformer_config = TransformerConfig(**default_config_kwargs) + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec(num_experts=num_moe_experts, moe_grouped_gemm=False) + model = SequentialMLP(num_local_experts, + transformer_config, + transformer_layer_spec.submodules.mlp.submodules) + return model + + +def get_pp_offsets(): + pp_rank = parallel_state.get_pipeline_model_parallel_rank() + pp_size = parallel_state.get_pipeline_model_parallel_world_size() + return ((0, pp_rank, pp_size),) + + +class TestSequentialMLPReconfiguration: + @pytest.mark.parametrize("src_tp_pp_exp,dest_tp_pp_exp,use_glu", [ + # changing PP is impossible because the number of layers must be the same + ((2, 4, 1), (2, 4, 1), False), + ((1, 1, 1), (1, 1, 1), False), + ((1, 1, 1), (1, 1, 4), False), + ((1, 1, 8), (1, 1, 2), False), + ((2, 2, 2), (4, 2, 1), False), + ((1, 1, 4), (8, 1, 1), False), + ((1, 8, 1), (1, 8, 1), False), + ((1, 1, 4), (2, 1, 1), False), + ((1, 1, 1), (1, 1, 1), True), + ((1, 1, 1), (1, 1, 4), True), + ((1, 1, 1), (2, 1, 1), True), + ((1, 1, 4), (8, 1, 1), True), + ]) + def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp_exp, dest_tp_pp_exp, use_glu): + """ Test model saving and loading with different TP/PP/expert parallelism """ + src_tp, src_pp, src_exp = src_tp_pp_exp + dest_tp, dest_pp, dest_exp = dest_tp_pp_exp + with TempNamedDir(tmp_path_dist_ckpt / 'test_sequential_mlp_reconfiguration_model_A') as ckpt_dir_A, \ + TempNamedDir(tmp_path_dist_ckpt / 'test_sequential_mlp_reconfiguration_model_B') as ckpt_dir_B: + # Save checkpoint A + Utils.initialize_model_parallel(src_tp, src_pp, expert_model_parallel_size=src_exp) + model_A = initialize_sequential_mlp(1, use_glu) + sharded_state_dict = model_A.sharded_state_dict(sharded_offsets=get_pp_offsets()) + save(sharded_state_dict, ckpt_dir_A) + Utils.destroy_model_parallel() + + # Load checkpoint A with different TP/PP/expert and save as checkpoint B + Utils.initialize_model_parallel(dest_tp, dest_pp, expert_model_parallel_size=dest_exp) + model_B = initialize_sequential_mlp(2, use_glu) + state_dict = load(model_B.sharded_state_dict(sharded_offsets=get_pp_offsets()), ckpt_dir_A) + model_B.load_state_dict(state_dict) + save(model_B.sharded_state_dict(sharded_offsets=get_pp_offsets()), ckpt_dir_B) + Utils.destroy_model_parallel() + + # Test both checkpoints are equal + Utils.initialize_model_parallel(1, 1) + state_dict_A = load_plain_tensors(ckpt_dir_A) + state_dict_B = load_plain_tensors(ckpt_dir_B) + diffs = diff(state_dict_A, state_dict_B) + assert not any(map(bool, diffs)), diffs \ No newline at end of file diff --git a/tests/unit_tests/dist_checkpointing/test_mapping.py b/tests/unit_tests/dist_checkpointing/test_mapping.py index 5e55669828..fcd742ee65 100644 --- a/tests/unit_tests/dist_checkpointing/test_mapping.py +++ b/tests/unit_tests/dist_checkpointing/test_mapping.py @@ -38,10 +38,10 @@ def test_from_rank_offsets_constructor(self, dtype=torch.float, device='cuda'): class TestShardedTensorFactory: def test_build_and_merge(self): - def build_fn(key, tensor): + def build_fn(key, tensor, replica_id): return { - 'level2_a': ShardedTensor.from_rank_offsets(key + 'part1', tensor + 1), - 'level2_b': ShardedTensor.from_rank_offsets(key + 'part2', tensor + 2) + 'level2_a': ShardedTensor.from_rank_offsets(key + 'part1', tensor + 1, replica_id=replica_id), + 'level2_b': ShardedTensor.from_rank_offsets(key + 'part2', tensor + 2, replica_id=replica_id) } # state_dict will be modified in-place diff --git a/tests/unit_tests/dist_checkpointing/test_serialization.py b/tests/unit_tests/dist_checkpointing/test_serialization.py index fef536fd89..233215d56a 100644 --- a/tests/unit_tests/dist_checkpointing/test_serialization.py +++ b/tests/unit_tests/dist_checkpointing/test_serialization.py @@ -27,6 +27,7 @@ def test_single_process_save_load(self, tmp_path_dist_ckpt): with TempNamedDir(tmp_path_dist_ckpt / 'test_single_process_save_load') as ckpt_dir: save(sharded_state_dict, ckpt_dir) + torch.distributed.barrier() assert (ckpt_dir / 'keyA').is_dir() assert (ckpt_dir / 'keyB').is_dir() @@ -161,6 +162,7 @@ def test_load_tensors_metadata(self, tmp_path_dist_ckpt): with TempNamedDir(tmp_path_dist_ckpt / 'test_load_tensors_metadata') as ckpt_dir: save(state_dict, ckpt_dir) + torch.distributed.barrier() assert (ckpt_dir / 'keyA').is_dir() del state_dict @@ -190,11 +192,11 @@ def test_load_tensors_metadata(self, tmp_path_dist_ckpt): def test_can_mix_sharded_tensors_and_factories(self, tmp_path_dist_ckpt): Utils.initialize_model_parallel(1, 1) - def _build_fn(key, tensor): + def _build_fn(key, tensor, replica_id): return [ - ShardedTensor.from_rank_offsets(key + 'part1', tensor, replica_id=Utils.rank), - ShardedTensor.from_rank_offsets(key + 'part2', tensor, replica_id=Utils.rank), - ShardedTensor.from_rank_offsets(key + 'part3', tensor, replica_id=Utils.rank), + ShardedTensor.from_rank_offsets(key + 'part1', tensor, replica_id=replica_id), + ShardedTensor.from_rank_offsets(key + 'part2', tensor, replica_id=replica_id), + ShardedTensor.from_rank_offsets(key + 'part3', tensor, replica_id=replica_id), ] # state dict can be modified by dist_checkpointing.save, so two copies @@ -203,7 +205,7 @@ def get_sharded_state_dict(base=0): ShardedTensor.from_rank_offsets('A', torch.arange(2) + base, replica_id=Utils.rank), ShardedTensor.from_rank_offsets('B', torch.arange(3) + base, replica_id=Utils.rank), ShardedTensor.from_rank_offsets('C', torch.arange(4) + base, replica_id=Utils.rank), - ShardedTensorFactory('D', torch.arange(5) + base, _build_fn, sum), + ShardedTensorFactory('D', torch.arange(5) + base, _build_fn, sum, replica_id=Utils.rank), ]} with TempNamedDir(tmp_path_dist_ckpt / 'test_can_mix_sharded_tensors_and_factories') as ckpt_dir: diff --git a/tests/unit_tests/fusions/test_torch_softmax.py b/tests/unit_tests/fusions/test_torch_softmax.py new file mode 100644 index 0000000000..e09c08936c --- /dev/null +++ b/tests/unit_tests/fusions/test_torch_softmax.py @@ -0,0 +1,44 @@ +import pytest +import torch + +from megatron.core.fusions.fused_softmax import FusedScaleMaskSoftmax +from megatron.core.transformer.enums import AttnMaskType +from megatron.core.transformer.utils import attention_mask_func + + +class TestTorchSoftmax: + def setup_method(self, method): + # The important settings tested are forward_torch_softmax path + # with locally generated casual mask for attention_mask_func: + self.softmax = FusedScaleMaskSoftmax( + input_in_fp16=False, + input_in_bf16=False, + attn_mask_type=AttnMaskType.causal, + scaled_masked_softmax_fusion=False, + mask_func=attention_mask_func, + softmax_in_fp32=True, + scale=None, + ) + + def test_output_shape(self): + x = torch.randn(8, 2, 4, 4, device="cuda") + y = self.softmax(x, None) + assert x.shape == y.shape + + def test_causal_mask_input_shape_assert(self): + x = torch.randn(1, 1, 4, 16, device="cuda") + with pytest.raises(AssertionError): + self.softmax(x, None) + + def test_causal_mask_equal_scores(self): + # For equal input values (e.g. zero) correctly masked softmax should + # produce equal scores among non-masked elements. For example, in case + # sq == sk == 2 the expected output is (ignoring b and np dimensions): + # [[1.0, 0.0], + # [0.5, 0.5]] + b, np, sq, sk = 8, 2, 32, 32 + x = torch.zeros([b, np, sq, sk]).cuda() + y = self.softmax(x, None) + y_expected = torch.tril(torch.ones(b, np, sq, sk, device="cuda")) + y_expected /= torch.arange(1, sq + 1, device="cuda").reshape((-1, 1)) + assert torch.allclose(y, y_expected, rtol=1e-08, atol=1e-08) diff --git a/tests/unit_tests/models/test_bert_model.py b/tests/unit_tests/models/test_bert_model.py index 00c1becc91..e1d01557dd 100644 --- a/tests/unit_tests/models/test_bert_model.py +++ b/tests/unit_tests/models/test_bert_model.py @@ -3,6 +3,7 @@ import pytest import torch +import os from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.models.bert.bert_model import BertModel @@ -13,6 +14,7 @@ class TestBertModel: def setup_method(self, method): + os.environ['NVTE_ALLOW_NONDETERMINISTIC_ALGO'] = '0' #Bert does not support flash attention Utils.initialize_model_parallel(1,1) model_parallel_cuda_manual_seed(123) transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True, perform_initialization=True) diff --git a/tests/unit_tests/models/test_clip_vit_model.py b/tests/unit_tests/models/test_clip_vit_model.py new file mode 100644 index 0000000000..3c15684fb4 --- /dev/null +++ b/tests/unit_tests/models/test_clip_vit_model.py @@ -0,0 +1,55 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import pytest +import torch + +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec +from megatron.core.models.vision.clip_vit_model import CLIPViTModel +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.transformer_config import TransformerConfig +from tests.unit_tests.test_utilities import Utils + + +class TestCLIPViTModel: + """Test CLIP ViT model.""" + + def setup_method(self, method): + Utils.initialize_model_parallel(1, 1) + model_parallel_cuda_manual_seed(123) + transformer_config = TransformerConfig( + num_layers=2, hidden_size=64, num_attention_heads=4, use_cpu_initialization=True + ) + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec() + self.model = CLIPViTModel(transformer_config, transformer_layer_spec) + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + def test_constructor(self): + assert isinstance(self.model, CLIPViTModel) + + num_weights = sum([p.numel() for p in self.model.parameters()]) + assert num_weights == 174848 + + def test_set_input_tensor(self): + # [s, b, h] expected to the transformer. + expected_shape = (577, 2, 64) + input_tensor = torch.zeros(expected_shape) + + self.model.set_input_tensor(input_tensor) + + assert self.model.transformer.input_tensor.shape == torch.Size(expected_shape) + + def test_forward(self): + self.model.cuda() + + img = torch.zeros((2, 3, 336, 336)).cuda() + + out = self.model.forward(img) + assert out.shape == torch.Size([2, 577, 64]) + + def test_save_load(self, tmp_path): + path = tmp_path / "model.pt" + torch.save(self.model.state_dict(), path) + + self.model.load_state_dict(torch.load(path)) diff --git a/tests/unit_tests/models/test_llava_model.py b/tests/unit_tests/models/test_llava_model.py new file mode 100644 index 0000000000..4f947ba681 --- /dev/null +++ b/tests/unit_tests/models/test_llava_model.py @@ -0,0 +1,71 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import pytest +import torch + +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec +from megatron.core.models.multimodal.llava_model import LLaVAModel +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.transformer_config import TransformerConfig +from tests.unit_tests.test_utilities import Utils + + +class TestLLaVAModel: + def setup_method(self, method): + Utils.initialize_model_parallel(1, 1) + model_parallel_cuda_manual_seed(123) + language_config = TransformerConfig( + num_layers=3, hidden_size=128, num_attention_heads=8, use_cpu_initialization=True + ) + vision_config = TransformerConfig( + num_layers=2, hidden_size=64, num_attention_heads=4, use_cpu_initialization=True + ) + layer_spec = get_gpt_layer_with_transformer_engine_spec() + self.model = LLaVAModel( + language_transformer_config=language_config, + language_transformer_layer_spec=layer_spec, + vocab_size=2048, + max_sequence_length=1024, + vision_transformer_config=vision_config, + vision_transformer_layer_spec=layer_spec, + ) + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + def test_constructor(self): + assert isinstance(self.model, LLaVAModel) + + num_weights = sum([p.numel() for p in self.model.parameters()]) + assert num_weights == 1433472 + + def test_set_input_tensor(self): + expected_shape = (1, 2, 3, 4) + input_tensor = torch.zeros(expected_shape) + self.model.set_input_tensor(input_tensor) + assert self.model.vision_model.transformer.input_tensor.shape == expected_shape + + def test_forward(self): + self.model.cuda() + + img = torch.randn((2, 3, 336, 336)).cuda() + input_ids = torch.randint(0, 2048, (2, 1024)).cuda() + position_ids = torch.arange(0, 1024, dtype=torch.int).cuda() + position_ids = position_ids.expand(2, 1024) + # With default image and patch sizes of 336 and 14, respectively, and a class token, the combined sequence length is 1024 + (336/14) ** 2 + 1 = 1601. + attention_mask = torch.tril(torch.ones((2, 1, 1601, 1601))).cuda() + attention_mask = attention_mask < 0.5 + labels = torch.randint(0, 2048, (2, 1601)).cuda() + + # Try with and without labels. + loss = self.model.forward(img, input_ids, position_ids, attention_mask, labels) + assert loss.shape == torch.Size((2, 1601)) + + logits = self.model.forward(img, input_ids, position_ids, attention_mask, labels=None) + assert logits.shape == torch.Size((2, 1601, 2048)) + + def test_save_load(self, tmp_path): + path = tmp_path / "model.pt" + torch.save(self.model.state_dict(), path) + + self.model.load_state_dict(torch.load(path)) diff --git a/tests/unit_tests/test_utilities.py b/tests/unit_tests/test_utilities.py index b35c77b58d..a9d9fe5175 100644 --- a/tests/unit_tests/test_utilities.py +++ b/tests/unit_tests/test_utilities.py @@ -9,13 +9,14 @@ class Utils: @staticmethod def initialize_distributed(): - print(f'Initializing torch.distributed with rank: {Utils.rank}, world_size: {Utils.world_size}') - torch.cuda.set_device(Utils.rank % torch.cuda.device_count()) - init_method = 'tcp://' - master_ip = os.getenv('MASTER_ADDR', 'localhost') - master_port = os.getenv('MASTER_PORT', '6000') - init_method += master_ip + ':' + master_port - torch.distributed.init_process_group(backend='nccl', world_size=Utils.world_size, rank=Utils.rank, init_method=init_method) + if not torch.distributed.is_initialized(): + print(f'Initializing torch.distributed with rank: {Utils.rank}, world_size: {Utils.world_size}') + torch.cuda.set_device(Utils.rank % torch.cuda.device_count()) + init_method = 'tcp://' + master_ip = os.getenv('MASTER_ADDR', 'localhost') + master_port = os.getenv('MASTER_PORT', '6000') + init_method += master_ip + ':' + master_port + torch.distributed.init_process_group(backend='nccl', world_size=Utils.world_size, rank=Utils.rank, init_method=init_method) @staticmethod def destroy_model_parallel(): @@ -23,8 +24,7 @@ def destroy_model_parallel(): torch.distributed.barrier() @staticmethod - def initialize_model_parallel(tensor_model_parallel_size = 1, pipeline_model_parallel_size = 1, virtual_pipeline_model_parallel_size = None, pipeline_model_parallel_split_rank = None): + def initialize_model_parallel(tensor_model_parallel_size = 1, pipeline_model_parallel_size = 1, virtual_pipeline_model_parallel_size = None, pipeline_model_parallel_split_rank = None, **kwargs): ps.destroy_model_parallel() - if not torch.distributed.is_initialized(): - Utils.initialize_distributed() - ps.initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank) \ No newline at end of file + Utils.initialize_distributed() + ps.initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank, **kwargs) \ No newline at end of file diff --git a/tests/unit_tests/transformer/moe/__init__.py b/tests/unit_tests/transformer/moe/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py new file mode 100644 index 0000000000..e443272db8 --- /dev/null +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -0,0 +1,178 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import pytest + +import torch +import torch.nn.functional as F + +from megatron.arguments import parse_args +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec +from megatron.core.transformer.moe import grouped_gemm_util as gg +from megatron.core.transformer.moe.moe_layer import MoELayer +from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.initialize import _set_random_seed +from megatron.model import Float16Module +from tests.unit_tests.test_utilities import Utils + +DEVICE_CAPABILITY = None +if torch.cuda.is_available(): + DEVICE_CAPABILITY = torch.cuda.get_device_capability() + + +class TestParallelGroupedMLP: + + def setup_method(self, method, use_cpu_initialization=False, swiglu=True): + print("============") + print("Test for use_cpu_initilization={} and swiglu={}.".format(use_cpu_initialization, swiglu)) + print("============") + Utils.initialize_model_parallel(1,1) + num_layers = 1 # 2 + self.hidden_size = 2 # 12 + self.num_experts = 2 + self.gated_linear_unit = True + self.use_cpu_initialization = use_cpu_initialization + self.gated_linear_unit = False + if swiglu: + self.gated_linear_unit = True + + tf_config = TransformerConfig( + num_layers=num_layers, hidden_size=self.hidden_size, num_attention_heads=4, + num_moe_experts=self.num_experts, use_cpu_initialization=self.use_cpu_initialization, + add_bias_linear=False, gated_linear_unit=self.gated_linear_unit, + bias_activation_fusion=False, + bf16=True, params_dtype=torch.bfloat16, moe_router_load_balancing_type="sinkhorn", moe_router_topk=1) + + self.fc1_ffn_hidden_size = tf_config.ffn_hidden_size + self.fc2_ffn_hidden_size = tf_config.ffn_hidden_size + # If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf + if self.gated_linear_unit: + self.fc1_ffn_hidden_size *= 2 + + ## Vanilla sequential GEMM + # Set random seed for reproducability + _set_random_seed(seed_=123, data_parallel_random_init=False) + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( + self.num_experts, moe_grouped_gemm=False) + self.sequential_mlp = MoELayer(tf_config, + transformer_layer_spec.submodules.mlp.submodules) + + self.args = parse_args(ignore_unknown_args=True) + self.args.bf16=True + # Bias is not supported in grouped gemm currently, thus we disable the + # bias in the linear layer. + self.args.add_bias_linear=False + self.sequential_mlp = Float16Module(self.sequential_mlp, self.args).module + print("done intializing for sequential gemm") + + ## Grouped GEMM + _set_random_seed(seed_=123, data_parallel_random_init=False) + tf_config.moe_grouped_gemm = True + self.grouped_mlp = MoELayer(tf_config) + self.grouped_mlp = Float16Module(self.grouped_mlp, self.args).module + print("done intializing for grouped gemm") + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + def test_constructor(self): + assert isinstance(self.sequential_mlp, MoELayer) + assert isinstance(self.grouped_mlp, MoELayer) + + num_weights_smm = sum([p.numel() for p in self.sequential_mlp.parameters()]) + num_weights_gmm = sum([p.numel() for p in self.grouped_mlp.parameters()]) + + # For the same hyper-parm model configs except the `moe_grouped_gemm`, + # GroupedGEMM and sequential GEMMs should hold the same number of parms. + assert num_weights_smm == num_weights_gmm + # expected num weights: router linear weights+bias + MLP weights(no bias) of all experts + expected_num_weights = \ + self.hidden_size * self.num_experts + \ + self.hidden_size * (self.fc1_ffn_hidden_size + self.fc2_ffn_hidden_size) * self.num_experts + assert num_weights_smm == expected_num_weights + + assert torch.equal(self.sequential_mlp.router.weight, self.grouped_mlp.router.weight) + + # weight1: [h, num_experts*4h] + # weight2: [num_experts*4h, h] + assert self.grouped_mlp.experts.weight1.shape[0] == self.hidden_size + assert self.grouped_mlp.experts.weight1.shape[1] == self.num_experts * self.fc1_ffn_hidden_size + if self.gated_linear_unit: + assert self.grouped_mlp.experts.weight2.shape[0] == self.num_experts * self.fc2_ffn_hidden_size + assert self.grouped_mlp.experts.weight2.shape[1] == self.hidden_size + else: + assert self.grouped_mlp.experts.weight1.shape == self.grouped_mlp.experts.weight2.t().shape + + def test_weight_init_value_the_same(self): + gmm_w1 = self.grouped_mlp.experts.weight1.view(self.num_experts, -1, self.hidden_size) + gmm_w2 = self.grouped_mlp.experts.weight2.view(self.num_experts, self.hidden_size, -1) + gmm_expert1_fc1 = gmm_w1[0] + gmm_expert1_fc2 = gmm_w2[0] + gmm_expert2_fc1 = gmm_w1[1] + gmm_expert2_fc2 = gmm_w2[1] + + smm_expert1_fc1 = self.sequential_mlp.experts.local_experts[0].linear_fc1.weight + smm_expert1_fc2 = self.sequential_mlp.experts.local_experts[0].linear_fc2.weight + smm_expert2_fc1 = self.sequential_mlp.experts.local_experts[1].linear_fc1.weight + smm_expert2_fc2 = self.sequential_mlp.experts.local_experts[1].linear_fc2.weight + + assert torch.equal(gmm_expert1_fc1, smm_expert1_fc1) + if not self.use_cpu_initialization: + assert torch.equal(gmm_expert1_fc2, smm_expert1_fc2) + # the param init value is not exactly the same between gmm and smm (refer to test_weight_init_value_the_same.) + # TODO: is it necessary to keep smm and gmm share exactly the same init params? + # assert torch.equal(gmm_expert2_fc1, smm_expert2_fc1) + if self.use_cpu_initialization: + assert torch.equal(gmm_expert2_fc2, smm_expert2_fc2) + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + @pytest.mark.skipif( + not DEVICE_CAPABILITY or DEVICE_CAPABILITY[0] < 8, reason='GroupedGEMM kernels are not supported on this device.' + ) + def test_gpu_forward(self): + self.sequential_mlp.cuda() + self.grouped_mlp.cuda() + # [sequence length, batch size, hidden size] + seq_len = 3 #32 + batch_size = 2 + hidden_states = torch.rand( + (seq_len, batch_size, self.sequential_mlp.config.hidden_size), + dtype=torch.bfloat16) + hidden_states = hidden_states.cuda() + output_smm, _ = self.sequential_mlp(hidden_states) + output_gmm, _ = self.grouped_mlp(hidden_states) + + # The following assert fails due to the param init value is not exactly + # the same between gmm and smm (refer to test_weight_init_value_the_same.) + # assert torch.equal(output_smm, output_gmm) + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + @pytest.mark.skipif( + not DEVICE_CAPABILITY or DEVICE_CAPABILITY[0] < 8, reason='GroupedGEMM kernels are not supported on this device.' + ) + def test_gpu_forward_with_no_tokens_allocated(self): + """Test the case when no token is allocated for groupedGEMM kernels.""" + w1 = self.grouped_mlp.experts.weight1.view(self.num_experts, -1, self.hidden_size) + num_allocated_tokens = 0 + tokens_per_expert = torch.zeros(self.num_experts) + hidden_states = torch.rand((num_allocated_tokens, self.hidden_size), dtype=torch.bfloat16) + hidden_states = hidden_states.cuda() + try: + gg.ops.gmm(hidden_states, w1, tokens_per_expert, trans_b=False) + except Exception as e: + print("Expected error message from groupedGEMM:", e) + assert str(e) == "Input batch_sizes should not be all zeros!" + + +if __name__ == "__main__": + for use_cpu_unitilization in [True, False]: + for swiglu in [True, False]: + GMLP_test = TestParallelGroupedMLP() + GMLP_test.setup_method( + method=None, + use_cpu_initialization=use_cpu_unitilization, + swiglu=swiglu) + GMLP_test.test_constructor() + GMLP_test.test_weight_init_value_the_same() + GMLP_test.test_gpu_forward() + GMLP_test.test_gpu_forward_with_no_tokens_allocated() + GMLP_test.teardown_method(method=None) diff --git a/tests/unit_tests/transformer/moe/test_routers.py b/tests/unit_tests/transformer/moe/test_routers.py new file mode 100644 index 0000000000..f1db99f371 --- /dev/null +++ b/tests/unit_tests/transformer/moe/test_routers.py @@ -0,0 +1,86 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import pytest + +import torch + +from megatron.core.transformer.moe.router import Router +from megatron.initialize import _set_random_seed +from tests.unit_tests.test_utilities import Utils +from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.transformer.moe.moe_layer import MoELayer +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec + + +class TestTop2Router: + def setup_method(self, method): + Utils.initialize_model_parallel(1, 1) + _set_random_seed(seed_=123, data_parallel_random_init=False) + print("done intializing") + num_moe_experts = 4 + self.transformer_config = TransformerConfig( + num_layers=2, + hidden_size=12, + num_attention_heads=4, + num_moe_experts=num_moe_experts, + use_cpu_initialization=True, + moe_router_load_balancing_type="aux_loss", + moe_router_topk=2, + moe_aux_loss_coeff=0, + ) + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( + num_experts=num_moe_experts, moe_grouped_gemm=False + ) + self.sequential_mlp = MoELayer( + self.transformer_config, transformer_layer_spec.submodules.mlp.submodules + ) + self.router = self.sequential_mlp.router + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + def test_constructor(self): + assert isinstance(self.router, Router) + + num_weights = sum([p.numel() for p in self.router.parameters()]) + assert num_weights == 12 * 4, num_weights + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + def test_router_forward(self): + with torch.no_grad(): + self.router = self.router.cuda() + # [num tokens, hidden size] + hidden_states = torch.randn((32, 2, self.router.config.hidden_size)) + hidden_states = hidden_states.cuda() + scores, indices = self.router(hidden_states) + print(scores.shape, indices.shape) + assert scores.shape == (64, 2) + assert indices.shape == (64, 2) + print( + (indices == 0).sum(), (indices == 1).sum(), (indices == 2).sum(), (indices == 3).sum() + ) + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + def test_aux_loss(self): + self.sequential_mlp = self.sequential_mlp.cuda() + + # Without aux loss + hidden_states = torch.randn((32, 2, self.router.config.hidden_size)) + hidden_states = hidden_states.cuda() + out = self.sequential_mlp(hidden_states)[0] + out.sum().mul_(0).backward() + assert self.sequential_mlp.router.weight.grad.abs().sum() == 0 + + # With aux loss + self.transformer_config.moe_aux_loss_coeff = 1 + out = self.sequential_mlp(hidden_states)[0] + out.sum().mul_(0).backward() + assert self.sequential_mlp.router.weight.grad.abs().sum() > 0 + + # With Z loss + self.transformer_config.moe_aux_loss_coeff = 0 + self.transformer_config.moe_z_loss_coeff = 1 + self.sequential_mlp.router.weight.grad.fill_(0) + out = self.sequential_mlp(hidden_states)[0] + out.sum().mul_(0).backward() + assert self.sequential_mlp.router.weight.grad.abs().sum() > 0 \ No newline at end of file diff --git a/tests/unit_tests/transformer/moe/test_sequential_mlp.py b/tests/unit_tests/transformer/moe/test_sequential_mlp.py new file mode 100644 index 0000000000..0ebb85333e --- /dev/null +++ b/tests/unit_tests/transformer/moe/test_sequential_mlp.py @@ -0,0 +1,61 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import pytest + +import torch + +from megatron.core.transformer.moe.moe_layer import MoELayer +from tests.unit_tests.test_utilities import Utils +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec + +class TestParallelSequentialMLP: + + def setup_method(self, method): + Utils.initialize_model_parallel(1,1) + model_parallel_cuda_manual_seed(123) + print("done intializing") + num_moe_experts = 2 + transformer_config = TransformerConfig( + num_layers=2, + hidden_size=12, + num_attention_heads=4, + num_moe_experts=num_moe_experts, + use_cpu_initialization=True, + activation_func=torch.nn.functional.silu, + gated_linear_unit=True, + bias_activation_fusion=True, + moe_router_load_balancing_type="sinkhorn", + moe_router_topk=1 + ) + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( + num_experts=num_moe_experts, moe_grouped_gemm=False) + self.sequential_mlp = MoELayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + def test_constructor(self): + assert isinstance(self.sequential_mlp, MoELayer) + + num_weights = sum([p.numel() for p in self.sequential_mlp.parameters()]) + assert num_weights == 3696 + + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + def test_gpu_forward(self): + sequential_mlp = self.sequential_mlp + sequential_mlp.cuda() + # [sequence length, batch size, hidden size] + hidden_states = torch.ones((32, 2, sequential_mlp.config.hidden_size)) + hidden_states = hidden_states.cuda() + output, output_bias = sequential_mlp(hidden_states) + assert output.shape[0] == 32 + assert output.shape[1] == 2 + assert output.shape[2] == sequential_mlp.config.hidden_size + assert output_bias.shape[2] == sequential_mlp.config.hidden_size + assert output.dtype == torch.float32 + assert output.device.type == 'cuda' + assert output_bias.device.type == 'cuda' + diff --git a/tests/unit_tests/transformer/moe/test_token_dispatcher.py b/tests/unit_tests/transformer/moe/test_token_dispatcher.py new file mode 100644 index 0000000000..ec067a41fb --- /dev/null +++ b/tests/unit_tests/transformer/moe/test_token_dispatcher.py @@ -0,0 +1,69 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import pytest + +import torch + +from megatron.core.transformer.moe.router import Router, TopKRouter +from megatron.core.transformer.moe.token_dispatcher import MoEDroplessTokenDispatcher +from megatron.initialize import _set_random_seed +from tests.unit_tests.test_utilities import Utils +from megatron.core.transformer.transformer_config import TransformerConfig + + +class TestDroplessDispatcher: + def setup_method(self, method): + Utils.initialize_model_parallel(1, 1) + _set_random_seed(seed_=123, data_parallel_random_init=False) + print("done intializing") + num_moe_experts = 4 + transformer_config = TransformerConfig( + num_layers=2, + hidden_size=12, + num_attention_heads=4, + num_moe_experts=num_moe_experts, + use_cpu_initialization=True, + moe_router_load_balancing_type="aux_loss", + moe_router_topk=2, + ) + self.router = TopKRouter( + config=transformer_config, + ) + self.token_dispatcher = MoEDroplessTokenDispatcher( + num_moe_experts, range(num_moe_experts), config=transformer_config + ) + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + def test_gpu_forward(self): + self.router = self.router.cuda() + # [bs, seql, hidden size] + hidden_states = torch.randn((32, 8, self.router.config.hidden_size)) + hidden_states = hidden_states.cuda() + scores, indices = self.router(hidden_states) + assert scores.shape == (256, 2), "Scores shape is not correct" + assert indices.shape == (256, 2), "Indices shape is not correct" + print( + (indices == 0).sum(), (indices == 1).sum(), (indices == 2).sum(), (indices == 3).sum() + ) + ( + permuted_local_hidden_states, + tokens_per_expert, + local_probs, + revert_indices, + global_local_map, + ) = self.token_dispatcher.token_permutation(hidden_states, scores, indices) + probs = torch.ones_like(local_probs) / 2 + restored_hidden_states, restored_bias = self.token_dispatcher.token_unpermutation( + permuted_local_hidden_states, + probs, + revert_indices, + global_local_map, + bias=torch.zeros_like(permuted_local_hidden_states), + ) + + assert torch.allclose( + restored_hidden_states, hidden_states + ), "Restored hidden states do not match original hidden states" diff --git a/tests/unit_tests/transformer/test_attention.py b/tests/unit_tests/transformer/test_attention.py index 7fac9d3eda..4a5680ea05 100644 --- a/tests/unit_tests/transformer/test_attention.py +++ b/tests/unit_tests/transformer/test_attention.py @@ -57,6 +57,30 @@ def test_gpu_forward(self): assert output.shape[2] == config.hidden_size assert bias.shape[0] == config.hidden_size + def test_fused_rope_gpu_forward(self): + self.parallel_attention.config.apply_rope_fusion = True + config = self.parallel_attention.config + sequence_length = 32 + micro_batch_size = 2 + + self.parallel_attention.cuda() + + # [sequence length, batch size, hidden size] + hidden_states = torch.ones((sequence_length, micro_batch_size, self.parallel_attention.config.hidden_size)) + hidden_states = hidden_states.cuda() + + attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda() + rotary_pos_emb = torch.ones(sequence_length, 1, 1, self.parallel_attention.config.kv_channels).cuda() + output, bias = self.parallel_attention(hidden_states, attention_mask, rotary_pos_emb=rotary_pos_emb) + + assert config.recompute_granularity is None + assert output.shape[0] == sequence_length + assert output.shape[1] == micro_batch_size + assert output.shape[2] == config.hidden_size + assert bias.shape[0] == config.hidden_size + self.parallel_attention.config.apply_rope_fusion = False + + def test_checkpointed_gpu_forward(self): transformer_config = self.transformer_config transformer_config.recompute_granularity='selective' diff --git a/tests/unit_tests/transformer/test_attention_packed_seq.py b/tests/unit_tests/transformer/test_attention_packed_seq.py new file mode 100644 index 0000000000..c8be7dba3d --- /dev/null +++ b/tests/unit_tests/transformer/test_attention_packed_seq.py @@ -0,0 +1,131 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import pytest + +import torch + +from megatron.core.packed_seq_params import PackedSeqParams +from megatron.core.transformer.attention import SelfAttention +from megatron.core.transformer.enums import AttnMaskType +from tests.unit_tests.test_utilities import Utils +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec + +# Note: this test requires TE >= 0.13 as well as Flash Attention to run +# FIXME this unit test doesn't work in the current test container. to be fixed soon +""" +def make_test_packed_seq_params(sequence_length): + cu_seqlens = torch.IntTensor([0, 6, 19, 22, sequence_length]).cuda() + seqlens = cu_seqlens[1:] - cu_seqlens[:-1] + max_seqlen, _ = seqlens.max(dim=0, keepdim=True) + packed_seq_params = PackedSeqParams( + cu_seqlens_q=cu_seqlens, + cu_seqlens_kv=cu_seqlens, + max_seqlen_q=max_seqlen, + max_seqlen_kv=max_seqlen, + qkv_format='thd', + ) + return packed_seq_params + + +class TestParallelAttentionWithPackedSequence: + + def setup_method(self, method): + Utils.initialize_model_parallel(1,1) + model_parallel_cuda_manual_seed(123) + # use BF16 and a large enough hidden size to enable FlashAttention for thd format. + self.transformer_config = TransformerConfig(num_layers=2, hidden_size=64, num_attention_heads=4, use_cpu_initialization=True, + bf16=True, params_dtype=torch.bfloat16, + pipeline_dtype=torch.bfloat16, autocast_dtype=torch.bfloat16) + self.parallel_attention = SelfAttention(self.transformer_config, + get_gpt_layer_with_transformer_engine_spec().submodules.self_attention.submodules, + layer_number=1, + attn_mask_type=AttnMaskType.causal) + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + def test_cpu_forward(self): + # we can't currently do this because the global memory buffer is on GPU + pass + + def test_gpu_forward(self): + + config = self.parallel_attention.config + sequence_length = 32 + micro_batch_size = 1 + + self.parallel_attention.cuda() + + # [sequence length, batch size, hidden size] + hidden_states = torch.ones((sequence_length, micro_batch_size, self.parallel_attention.config.hidden_size)) + hidden_states = hidden_states.cuda().to(torch.bfloat16) + + attention_mask = None + + packed_seq_params = make_test_packed_seq_params(sequence_length) + output, bias = self.parallel_attention(hidden_states, attention_mask, packed_seq_params=packed_seq_params) + + assert config.recompute_granularity is None + assert output.shape[0] == sequence_length + assert output.shape[1] == micro_batch_size + assert output.shape[2] == config.hidden_size + assert bias.shape[0] == config.hidden_size + + def test_fused_rope_gpu_forward(self): + self.parallel_attention.config.apply_rope_fusion = True + config = self.parallel_attention.config + sequence_length = 32 + micro_batch_size = 1 + + self.parallel_attention.cuda() + + # [sequence length, batch size, hidden size] + hidden_states = torch.ones((sequence_length, micro_batch_size, self.parallel_attention.config.hidden_size)) + hidden_states = hidden_states.cuda().to(torch.bfloat16) + + attention_mask = None + rotary_pos_emb = torch.ones(sequence_length, 1, 1, self.parallel_attention.config.kv_channels).cuda() + + packed_seq_params = make_test_packed_seq_params(sequence_length) + output, bias = self.parallel_attention(hidden_states, attention_mask, packed_seq_params=packed_seq_params) + + assert config.recompute_granularity is None + assert output.shape[0] == sequence_length + assert output.shape[1] == micro_batch_size + assert output.shape[2] == config.hidden_size + assert bias.shape[0] == config.hidden_size + self.parallel_attention.config.apply_rope_fusion = False + + def test_checkpointed_gpu_forward(self): + transformer_config = self.transformer_config + transformer_config.recompute_granularity='selective' + checkpointed_parallel_attention = SelfAttention(transformer_config, + get_gpt_layer_with_transformer_engine_spec().submodules.self_attention.submodules, + layer_number=1, + attn_mask_type=AttnMaskType.causal) + config = checkpointed_parallel_attention.config + + sequence_length = 32 + micro_batch_size = 1 + + checkpointed_parallel_attention.cuda() + + # [sequence length, batch size, hidden size] + hidden_states = torch.ones( + (sequence_length, micro_batch_size, checkpointed_parallel_attention.config.hidden_size) + ) + hidden_states = hidden_states.cuda().to(torch.bfloat16) + + attention_mask = None + + packed_seq_params = make_test_packed_seq_params(sequence_length) + output, bias = checkpointed_parallel_attention(hidden_states, attention_mask, packed_seq_params=packed_seq_params) + + assert config.recompute_granularity == 'selective' + assert output.shape[0] == sequence_length + assert output.shape[1] == micro_batch_size + assert output.shape[2] == config.hidden_size + assert bias.shape[0] == config.hidden_size +""" \ No newline at end of file diff --git a/tests/unit_tests/transformer/test_spec_customization.py b/tests/unit_tests/transformer/test_spec_customization.py index 03c0f1a7a6..ebefe5de5b 100755 --- a/tests/unit_tests/transformer/test_spec_customization.py +++ b/tests/unit_tests/transformer/test_spec_customization.py @@ -1,12 +1,16 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +import sys from dataclasses import dataclass, fields +from importlib.metadata import version import pytest import torch import transformer_engine as te +from pkg_resources import packaging from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_local_spec from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import ( @@ -15,11 +19,13 @@ TENorm, TERowParallelLinear, ) +from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp from megatron.core.transformer.spec_utils import ModuleSpec, build_module, import_module +from megatron.core.transformer.transformer_block import TransformerBlock, TransformerBlockSubmodules from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.transformer.transformer_layer import TransformerLayerSubmodules +from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules from tests.unit_tests.test_utilities import Utils @@ -41,7 +47,7 @@ def setup_method(self, method): submodules=SelfAttentionSubmodules( linear_qkv=TELayerNormColumnParallelLinear, core_attention=TEDotProductAttention, - linear_proj=TERowParallelLinear + linear_proj=TERowParallelLinear, ), ) @@ -73,6 +79,7 @@ def test_build_module(self): noop_transformer_layer = [ build_module(getattr(self.transformer_layer_spec, field.name)) for field in fields(self.transformer_layer_spec) + if field.name != 'sharded_state_dict_keys_map' ] x = random_input @@ -88,9 +95,7 @@ def test_build_module(self): assert x == random_input # Check SelfAttention - self_attention = build_module( - self.attention_spec, config=self.config, layer_number=1, - ) + self_attention = build_module(self.attention_spec, config=self.config, layer_number=1,) assert isinstance(self_attention, SelfAttention) assert self_attention.layer_number == 1 assert self_attention.attn_mask_type == self.attention_spec.params['attn_mask_type'] @@ -125,3 +130,111 @@ def test_build_module(self): # Check BiasDropoutAdd bda_op = build_module(self.bda_spec) assert id(bda_op) == id(get_bias_dropout_add) + + def test_sliding_window_attention(self): + te_version = packaging.version.Version(version("transformer-engine")) + if te_version < packaging.version.Version("1.2.0"): + print("SWA not tested because TE version is not >= 1.2.0", file=sys.stderr) + return + + config = TransformerConfig( + num_layers=2, + hidden_size=12, + num_attention_heads=4, + use_cpu_initialization=True, + window_size=[10, 0], + ) + # Make sure DotProductAttention throws (swa unsupported). + threw = False + try: + attn = DotProductAttention( + config, layer_number=1, attn_mask_type=AttnMaskType.causal, attention_type='self' + ) + except: + threw = True + finally: + assert threw, 'Expected DotProductAttention to throw exception for SWA' + + # Test TEDotProductAttention + attn = TEDotProductAttention( + config, layer_number=1, attn_mask_type=AttnMaskType.causal, attention_type='self' + ) + # Make sure window-size is what we expect. + assert attn.window_size == config.window_size + + # Single integer window-size unsupported, make sure it throws + threw = False + try: + config.window_size = 11 + attn = TEDotProductAttention( + config, layer_number=1, attn_mask_type=AttnMaskType.causal, attention_type='self' + ) + except: + threw = True + finally: + assert threw, "Expected TEDotProductAttention to throw for integer window-size" + + # `None` makes this causal. + config.window_size = None + attn = TEDotProductAttention( + config, layer_number=1, attn_mask_type=AttnMaskType.causal, attention_type='self' + ) + # Make sure it's causal. + assert attn.window_size == (-1, 0) + + def test_transformer_block_custom(self): + """ + This test checks that the two ways of passing `layer_spec` to a + `TransformerBlock` result in an identical model: + 1. ModuleSpec(module=..., submodules=...) + 2. TransformerBlockSubmodules(layer_specs=...) + """ + + transformer_config = TransformerConfig( + num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True + ) + layer_local_spec = get_gpt_layer_local_spec() + + # The following way can be used to pass a different `TransformerLayer` + # and internally the `TransformerBlock` would fan out the single + # `ModuleSpec` layer spec provided to all the layers of the block. + layer_spec1 = ModuleSpec(module=TransformerLayer, submodules=layer_local_spec.submodules) + model_parallel_cuda_manual_seed(123) + torch.manual_seed(0) + parallel_transformer_block1 = TransformerBlock(transformer_config, layer_spec1) + + layer_spec2 = TransformerBlockSubmodules( + layer_specs=[ + ModuleSpec(module=TransformerLayer, submodules=layer_local_spec.submodules) + ] + * transformer_config.num_layers + ) + # make sure the model init conditions are identical + model_parallel_cuda_manual_seed(123) + torch.manual_seed(0) + parallel_transformer_block2 = TransformerBlock(transformer_config, layer_spec2) + + sequence_length = 32 + micro_batch_size = 2 + parallel_transformer_block1.cuda() + parallel_transformer_block2.cuda() + + # [sequence length, batch size, hidden size] + hidden_states = torch.ones( + (sequence_length, micro_batch_size, transformer_config.hidden_size) + ) + hidden_states = hidden_states.cuda() + + attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda() + + out1 = parallel_transformer_block1( + hidden_states=hidden_states, attention_mask=attention_mask + ) + out2 = parallel_transformer_block2( + hidden_states=hidden_states, attention_mask=attention_mask + ) + + assert torch.all(torch.eq(out1, out2)) + assert out1.shape[0] == sequence_length == out2.shape[0] + assert out1.shape[1] == micro_batch_size == out2.shape[1] + assert out1.shape[2] == transformer_config.hidden_size == out2.shape[2] diff --git a/tests/unit_tests/transformer/test_switch_mlp.py b/tests/unit_tests/transformer/test_switch_mlp.py deleted file mode 100644 index b5f31ca237..0000000000 --- a/tests/unit_tests/transformer/test_switch_mlp.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. - -import pytest - -import torch - -from megatron.core.transformer.switch_mlp import SwitchMLP -from tests.unit_tests.test_utilities import Utils -from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed -from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.models.gpt.gpt_layer_specs import gpt_layer_with_transformer_engine_spec_moe - -class TestParallelSwitchMLP: - - def setup_method(self, method): - Utils.initialize_model_parallel(1,1) - model_parallel_cuda_manual_seed(123) - print("done intializing") - transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts= 2, use_cpu_initialization=True) - self.switch_mlp = SwitchMLP(transformer_config, - gpt_layer_with_transformer_engine_spec_moe.submodules.mlp.submodules) - - def teardown_method(self, method): - Utils.destroy_model_parallel() - - def test_constructor(self): - assert isinstance(self.switch_mlp, SwitchMLP) - - num_weights = sum([p.numel() for p in self.switch_mlp.parameters()]) - assert num_weights == 2448 - - - @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") - def test_gpu_forward(self): - switch_mlp = self.switch_mlp - switch_mlp.cuda() - # [sequence length, batch size, hidden size] - hidden_states = torch.ones((32, 2, switch_mlp.config.hidden_size)) - hidden_states = hidden_states.cuda() - output, output_bias = switch_mlp(hidden_states) - assert output.shape[0] == 32 - assert output.shape[1] == 2 - assert output.shape[2] == switch_mlp.config.hidden_size - assert output_bias.shape[2] == switch_mlp.config.hidden_size - assert output.dtype == torch.float32 - assert output.device.type == 'cuda' - assert output_bias.device.type == 'cuda' - diff --git a/tests/unit_tests/transformer/test_transformer_layer.py b/tests/unit_tests/transformer/test_transformer_layer.py index 2836e54484..be51f2cc1f 100644 --- a/tests/unit_tests/transformer/test_transformer_layer.py +++ b/tests/unit_tests/transformer/test_transformer_layer.py @@ -76,13 +76,12 @@ def test_sharded_state_dict(self, tp_pp): # Test all global shapes. Prepend num layers in front of expected shapes tensor_global_shapes = {k: v.global_shape for k, v in sharded_tensors.items()} - expected_global_shapes = {k: (transformer_config.num_layers, *v) - for k, v in get_tensor_shapes_for_tp(transformer_config, 1).items()} + expected_global_shapes = get_tensor_shapes_for_tp(transformer_config, 1) assert tensor_global_shapes == expected_global_shapes # Test ShardedTensor keys for state_dict_key, sh_ten in sharded_tensors.items(): - assert state_dict_key == f'0.{sh_ten.key}' + assert state_dict_key == sh_ten.key Utils.destroy_model_parallel() Utils.initialize_model_parallel(1, 1) @@ -91,16 +90,16 @@ def test_sharded_state_dict(self, tp_pp): def get_tensor_shapes_for_tp(transformer_config, tp_size): hs = transformer_config.hidden_size return { - '0.mlp.linear_fc1.layer_norm_weight': (hs,), - '0.mlp.linear_fc1.layer_norm_bias': (hs,), - '0.mlp.linear_fc1.weight': (hs * 4 // tp_size, hs), - '0.mlp.linear_fc1.bias': (hs * 4 // tp_size,), - '0.mlp.linear_fc2.weight': (hs, hs * 4 // tp_size), - '0.mlp.linear_fc2.bias': (hs,), - '0.self_attention.linear_proj.weight': (hs, hs // tp_size), - '0.self_attention.linear_proj.bias': (hs,), - '0.self_attention.linear_qkv.layer_norm_weight': (hs,), - '0.self_attention.linear_qkv.layer_norm_bias': (hs,), - '0.self_attention.linear_qkv.weight': (hs * 3 // tp_size, hs), - '0.self_attention.linear_qkv.bias': (hs * 3 // tp_size,), + 'mlp.linear_fc1.layer_norm_weight': (hs,), + 'mlp.linear_fc1.layer_norm_bias': (hs,), + 'mlp.linear_fc1.weight': (hs * 4 // tp_size, hs), + 'mlp.linear_fc1.bias': (hs * 4 // tp_size,), + 'mlp.linear_fc2.weight': (hs, hs * 4 // tp_size), + 'mlp.linear_fc2.bias': (hs,), + 'self_attention.linear_proj.weight': (hs, hs // tp_size), + 'self_attention.linear_proj.bias': (hs,), + 'self_attention.linear_qkv.layer_norm_weight': (hs,), + 'self_attention.linear_qkv.layer_norm_bias': (hs,), + 'self_attention.linear_qkv.weight': (hs * 3 // tp_size, hs), + 'self_attention.linear_qkv.bias': (hs * 3 // tp_size,), } diff --git a/tools/checkpoint/saver_megatron.py b/tools/checkpoint/saver_megatron.py index a1812682bb..b075e648dc 100644 --- a/tools/checkpoint/saver_megatron.py +++ b/tools/checkpoint/saver_megatron.py @@ -402,5 +402,6 @@ def get_models(count, dtype, pre_process, post_process): for tp_rank in range(args.target_tensor_parallel_size): mpu.set_tensor_model_parallel_rank(tp_rank) - save_checkpoint(md.iteration, [models[tp_rank]], None, None) + save_checkpoint(md.iteration, [models[tp_rank]], None, None, + num_floating_point_operations_so_far=0) print("Done!") diff --git a/tools/preprocess_data.py b/tools/preprocess_data.py index 2ff01ff70e..f1f7fbb98e 100644 --- a/tools/preprocess_data.py +++ b/tools/preprocess_data.py @@ -20,6 +20,7 @@ except ImportError: nltk_available = False +from datasets import load_dataset from megatron.tokenizer import build_tokenizer from megatron.core.datasets import indexed_dataset @@ -81,8 +82,7 @@ def split(self, json_line): output[key] = [tokens for partial in tokens_list for tokens in partial] return json.dumps(output), len(json_line) - def encode(self, json_line): - data = json.loads(json_line) + def _encode_data(self, data): ids = {} lens = {} for key in self.args.json_keys: @@ -103,7 +103,16 @@ def encode(self, json_line): sentence_lens[-1] += 1 ids[key] = doc_ids lens[key] = sentence_lens - return ids, lens, len(json_line) + return ids + + def encode(self, json_line): + data = json.loads(json_line) + ids = self._encode_data(data) + return ids, len(json_line) + + def encode_hf(self, sample): + ids = self._encode_data(sample) + return ids, 1 class Partition(object): @@ -143,14 +152,28 @@ def split_sentences(self, file_name): def process_json_file(self, file_name): input_file_name, output_prefix = file_name - print("Opening", input_file_name) - fin = open(input_file_name, 'r', encoding='utf-8') startup_start = time.time() encoder = Encoder(self.args) tokenizer = build_tokenizer(self.args) pool = multiprocessing.Pool(self.workers, initializer=encoder.initializer) - encoded_docs = pool.imap(encoder.encode, fin, 32) + + print("Opening", self.args.input) + + if self.args.input.endswith(".jsonl"): + print("Input is a jsonl file") + assert self.args.subset is None, f"subset argument set to: {self.args.subset}, but loading a jsonl file." + fin = open(self.args.input, 'r', encoding='utf-8') + encoded_docs = pool.imap(encoder.encode, fin, self.args.chunk_size) + #encoded_docs = map(encoder.encode, fin) + else: + # NOTE: this is not recommended for datasets larger than 40-50GB, as iterating through a dataset can be slow. + # Somehow, it seems faster to first dump the dataset to a jsonl file: ds.to_json() and then process the jsonl file. + # NOTE: this will be even slower if the dataset has large objects in other columns. + # In this case, it is recommended to dump as json only the required key: ds = ds.remove_columns(...) then to_json() + print("Input is not a jsonl file, will try to load from HF datasets") + ds = load_dataset(self.args.input, use_auth_token=True, streaming=True, split="train", data_dir=self.args.subset) + encoded_docs = pool.imap(encoder.encode_hf, ds, self.args.chunk_size) level = "document" if self.args.split_sentences: @@ -189,6 +212,8 @@ def get_args(): group = parser.add_argument_group(title='input data') group.add_argument('--input', type=str, required=True, help='Path to input JSON') + group.add_argument('--subset', type=str, default=None, + help='Subset argument when loading input data from a HuggingFace dataset') group.add_argument('--json-keys', nargs='+', default=['text'], help='space separate listed of keys to extract from json') group.add_argument('--split-sentences', action='store_true', @@ -201,7 +226,7 @@ def get_args(): choices=['BertWordPieceLowerCase','BertWordPieceCase', 'GPT2BPETokenizer', 'SentencePieceTokenizer', 'GPTSentencePieceTokenizer', 'Llama2Tokenizer', - 'NullTokenizer'], + 'NullTokenizer', 'TokenizerFromFile'], help='What type of tokenizer to use.') group.add_argument('--tokenizer-model', type=str, default=None, help='YTTM tokenizer model.') @@ -211,6 +236,8 @@ def get_args(): help='size of vocab for use with NullTokenizer') group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file (if necessary).') + group.add_argument('--tokenizer-file', type=str, default=None, + help='Path to the tokenizer file') group.add_argument('--append-eod', action='store_true', help='Append an token to the end of a document.') group.add_argument('--lang', type=str, default='english', diff --git a/tools/retro/README.md b/tools/retro/README.md index c36cb39ce8..f7a38c8a04 100644 --- a/tools/retro/README.md +++ b/tools/retro/README.md @@ -1,34 +1,60 @@ # Retro and InstructRetro -Retro [(Borgeaud et al., 2022)](https://arxiv.org/abs/2112.04426) is an autoregressive decoder-only language model (LM) pretrained with retrieval-augmentation. -Retro features practical scalibility to support large-scale pretraining from scratch by retrieving from trillions of token. -Pretraining with retrieval provides a more efficient storage mechanism of factual knowledge, when compared to storing factual knowledge implicitly within the network's parameters, thus largely reducing model parameters while achieving lower perplexity than standard GPT. +Retro [(Borgeaud et al., 2022)](https://arxiv.org/abs/2112.04426) is an autoregressive decoder-only language model (LM) +pretrained with retrieval-augmentation. +Retro features practical scalability to support large-scale pretraining from scratch by retrieving from trillions of +tokens. +Pretraining with retrieval provides a more efficient storage mechanism of factual knowledge, when compared to storing +factual knowledge implicitly within the network's parameters, thus largely reducing model parameters while achieving +lower perplexity than standard GPT. Retro also provides the flexibility to update the knowledge stored in LMs [(Wang et al., 2023a)](https://arxiv.org/abs/2304.06762) by updating the retrieval database without training LMs again. -InstructRetro [(Wang et al., 2023b)](https://arxiv.org/abs/2310.07713) further scales up the size of Retro to 48B, featuring the largest LLM pretrained with retrieval (as of December 2023). +InstructRetro [(Wang et al., 2023b)](https://arxiv.org/abs/2310.07713) further scales up the size of Retro to 48B, +featuring the largest LLM pretrained with retrieval (as of December 2023). The obtained foundation model, Retro 48B, largely outperforms the GPT counterpart in terms of perplexity. -With instruction tuning on Retro, InstructRetro demonstrates significant improvement over the instruction tuned GPT on downstream tasks in the zero-shot setting. Specifically, the average improvement of InstructRetro is 7% over its GPT counterpart across 8 short-form QA tasks, and 10% over GPT across 4 challenging long-form QA tasks. We also find that one can ablate the encoder from InstructRetro architecture and directly use the InstructRetro decoder backbone as GPT, while achieving comparable results. +With instruction tuning on Retro, InstructRetro demonstrates significant improvement over the instruction tuned GPT on +downstream tasks in the zero-shot setting. Specifically, the average improvement of InstructRetro is 7% over its GPT +counterpart across 8 short-form QA tasks, 10% over GPT across 4 challenging long-form QA tasks, and 16% over GPT across +3 summarization tasks. We also find that one can ablate the encoder from InstructRetro architecture and directly use the +InstructRetro decoder backbone as GPT, while achieving comparable results. This README provides an end-to-end tutorial to reproduce Retro and InstructRetro. # Contents - * [End-to-end Reproduction Guide](#end-to-end-reproduction-guide) - * [Step 0: Prepare the environment](#step-0-prepare-the-environment) + +* [Checkpoints](#checkpoints) +* [End-to-end Reproduction Guide](#end-to-end-reproduction-guide) + * [Step 0: Prepare the environment](#step-0-prepare-the-environment) * [Docker image](#docker-image) * [Install dependencies](#install-dependencies) - * [Step 1: Build retrieval database](#step-1-build-retrieval-database) - * [Step 2: Pretraining](#step-2-pretraining) - * [Step 3: Perplexity evaluation](#step-3-perplexity-evaluation) - * [Step 4: Instruction tuning](#step-4-instruction-tuning) - * [Step 5: Downstream task evaluation](#step-5-downstream-task-evaluation) - * [Citations](#citations) + * [Step 1: Build retrieval database](#step-1-build-retrieval-database) + * [Step 2: Pretraining](#step-2-pretraining) + * [Step 3: Perplexity evaluation](#step-3-perplexity-evaluation) + * [Step 4: Instruction tuning](#step-4-instruction-tuning) + * [Step 5: Downstream task evaluation](#step-5-downstream-task-evaluation) +* [Citations](#citations) + +# Checkpoints + +We provide the pretrained checkpoints of Retro and InstructRetro in the following table. The checkpoints are available +to download through the following links: + +| Model | Size | Instruction Tuning | Download Link 1 | Download Link 2 | Download Link 3 | +|-------------------------|------|--------------------|--------------------------------------------------------------------|--------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------| +| `retro-8b-base-4k` | 8b | | [Huggingface](https://huggingface.co/nvidia/retro-8b-base-4k) | [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/retro-8b-base-4k) | [Google Drive](https://drive.google.com/drive/folders/1uSQ5DAsuvx_8XcbtnVfs_MGvEOcx0uK_?usp=sharing) | +| `retro-8b-instruct-4k` | 8b | ✅ | [Huggingface](https://huggingface.co/nvidia/retro-8b-instruct-4k) | [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/retro-8b-instruct-4k) | [Google Drive](https://drive.google.com/drive/folders/1v5dKaSN0cm2lwyAWpFaJtlTrLhtMZXsI?usp=sharing) | +| `retro-48b-base-4k` | 48b | | [Huggingface](https://huggingface.co/nvidia/retro-48b-base-4k) | [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/retro-48b-base-4k) | [Google Drive](https://drive.google.com/drive/folders/1rtNpf0CiLElSHQcr3aLI3zgfI3teGTP5?usp=sharing) | +| `retro-48b-instruct-4k` | 48b | ✅ | [Huggingface](https://huggingface.co/nvidia/retro-48b-instruct-4k) | [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/retro-48b-instruct-4k) | [Google Drive](https://drive.google.com/drive/folders/1qdb0AQjSsAPGlWaIu3wgHPjf_nwLeY5h?usp=sharing) | # End-to-end Reproduction Guide -In this README, we provide an end-to-end reproduction guide for InstructRetro, covering from large-scale retrieval construction, pretraining, perplexity evaluation, instruction tuning, to downstream task evaluation. +In this README, we provide an end-to-end reproduction guide for InstructRetro, covering from large-scale retrieval +construction, pretraining, perplexity evaluation, instruction tuning, to downstream task evaluation. +If you are interested in evaluation only, we also [open-sourced our checkpoints](#checkpoints) and you can directly go +to [Step 5](#step-5-downstream-task-evaluation) to evaluate the checkpoints on downstream tasks. ## Step 0: Prepare the environment @@ -36,9 +62,8 @@ We recommend using docker environment to run the code. ### Docker image - -We provide a docker build file in [tools/retro/examples/Dockerfile](examples/Dockerfile) for the reproduction. The docker image is based on `nvcr.io/nvidia/pytorch:23.09-py3`. - +We provide a docker build file in [tools/retro/examples/Dockerfile](examples/Dockerfile) for the reproduction. The +docker image is based on the [NGC docker](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch/tags) `nvcr.io/nvidia/pytorch:23.09-py3`. ### Install dependencies @@ -48,7 +73,8 @@ Clone the Megatron repo: git clone --branch InstructRetro https://github.com/NVIDIA/Megatron-LM.git ``` -If docker is not available, we recommend starting from a clean conda environment with the following runtime dependencies: +If docker is not available, we recommend starting from a clean conda environment with the following runtime +dependencies: - Python 3.10 - NVIDIA CUDA® 12.2.1 @@ -58,6 +84,7 @@ If docker is not available, we recommend starting from a clean conda environment - PyTorch 2.1.0a0+32f93b1 Then install Retro-specific dependencies, including: + ```bash pip install -U faiss-gpu pip install -U transformers @@ -67,36 +94,52 @@ pip install -U nltk pip install -U einops ``` - ## Step 1: Build retrieval database -In this step, we build a large-scale retrieval database for InstructRetro through [Faiss](https://github.com/facebookresearch/faiss) to retrieve from trillions of tokens, and preprocess (and save) the retrieval neighbors for the pretraining step. +In this step, we build a large-scale retrieval database for InstructRetro +through [Faiss](https://github.com/facebookresearch/faiss) to retrieve from trillions of tokens, and preprocess (and +save) the retrieval neighbors for the pretraining step. Please refer to [tools/retro/build_db.md](build_db.md) for more details. ## Step 2: Pretraining -*Please strictly follow Step 1 to build the retrieval database before pretraining to make sure the preprocessed retrieval neighbors match the pretraining corpus.* +*Please strictly follow Step 1 to build the retrieval database before pretraining to make sure the preprocessed +retrieval neighbors match the pretraining corpus.* In the pretraining step, we support both pretraining from scratch and continued pretraining from a pretrained GPT model. -We provide a template pretraining script to pretrain 843M Retro from scratch. Prepare your own arguments and update our templates in [tools/retro/examples/pretrain_model.sh](examples/pretrain_model.sh). Please note that the data path should be exactly matching the one used in Step 1 to make sure the preprocessed retrieval neighbors match the pretraining corpus. +We provide a template pretraining script to pretrain 843M Retro from scratch. Prepare your own arguments and update our +templates in [tools/retro/examples/pretrain_model.sh](examples/pretrain_model.sh). Please note that the data path should +be exactly matching the one used in Step 1 to make sure the preprocessed retrieval neighbors match the pretraining +corpus. [//]: # (Take the example of the Wikipedia corpus) ```bash bash tools/retro/examples/pretrain_model.sh ``` -After pretraining, the model checkpoints will be saved in the `--save` directory if you specified the arg in `pretrain_model.sh`. -To continue pretraining with retrieval from a pretrained GPT model, please specify `--load` in `pretrain_model.sh` to load the pretrained GPT model checkpoint (the architecture of GPT, including hidden size, number of layers, and activation methods, should be exactly the same as the one used for Retro). You should also specify `--no-load-optim --finetune` to make sure the optimizer state is not loaded from the pretrained GPT model and the continued pretraining with retrieval is from a clean start. After the first job / the first run, you will continue pretraining with retrieval from your last checkpoint. In the follow-up jobs, you should launch the pretraining without the flags `--no-load-optim --finetune` to make sure the optimizer state is correctly loaded from your last job. +After pretraining, the model checkpoints will be saved in the `--save` directory if you specified the arg +in `pretrain_model.sh`. +To continue pretraining with retrieval from a pretrained GPT model, please specify `--load` in `pretrain_model.sh` to +load the pretrained GPT model checkpoint (the architecture of GPT, including hidden size, number of layers, and +activation methods, should be exactly the same as the one used for Retro). You should also +specify `--no-load-optim --finetune` to make sure the optimizer state is not loaded from the pretrained GPT model and +the continued pretraining with retrieval is from a clean start. After the first job / the first run, you will continue +pretraining with retrieval from your last checkpoint. In the follow-up jobs, you should launch the pretraining without +the flags `--no-load-optim --finetune` to make sure the optimizer state is correctly loaded from your last job. ## Step 3: Perplexity evaluation -During pretraining, we will automatically evaluate the model perplexity on the specified validation corpus every `--eval-interval` steps. The validation corpus should be exactly the same as the one used in Step 1 to make sure the preprocessed retrieval neighbors match the pretraining corpus. +During pretraining, we will automatically evaluate the model perplexity on the specified validation corpus +every `--eval-interval` steps. The validation corpus should be exactly the same as the one used in Step 1 to make sure +the preprocessed retrieval neighbors match the pretraining corpus. -To evaluate the perplexity of a pretrained model, please add `--skip-train` in `pretrain_model.sh` to skip the pretraining step and only evaluate the perplexity of the model specified in `--load` on the validation corpus. Run the above command again to evaluate the perplexity of a pretrained model: +To evaluate the perplexity of a pretrained model, please add `--skip-train` in `pretrain_model.sh` to skip the +pretraining step and only evaluate the perplexity of the model specified in `--load` on the validation corpus. Run the +above command again to evaluate the perplexity of a pretrained model: ```bash bash tools/retro/examples/pretrain_model.sh @@ -104,11 +147,15 @@ bash tools/retro/examples/pretrain_model.sh ## Step 4: Instruction tuning -In this step, we fine-tune the pretrained model on the downstream task with instructions. We provide a template instruction tuning script to fine-tune 843M Retro. +In this step, we fine-tune the pretrained model on the downstream task with instructions. We provide a template +instruction tuning script to fine-tune 843M Retro. -We also provide an open-source blend of instruction tuning datasets. The dataset is available to download through [here](https://drive.google.com/file/d/1nzKwwYf8lYb9gN3P4YO8pFNU_B2nMYe1/view?usp=sharing). The blendable dataset consists of the following open-source instruction tuning datasets: +We also provide an open-source blend of instruction tuning datasets. The dataset is available to download +through [here](https://drive.google.com/file/d/1nzKwwYf8lYb9gN3P4YO8pFNU_B2nMYe1/view?usp=sharing). The blendable +dataset consists of the following open-source instruction tuning datasets: ### Instruction Tuning Dataset Breakdown + | Dataset | Samples | Epochs | Sampling Prob | |------------------------------------------------------------|--------:|-------:|--------------:| | [soda](https://arxiv.org/abs/2212.10465) | 2560 | 0.005 | 0.020 | @@ -123,35 +170,55 @@ We also provide an open-source blend of instruction tuning datasets. The dataset Refer to the paper links above for more details about each instruction tuning dataset. -*We note that the provided instruction tuning dataset is all from open-source instruction tuning datasets. It is slightly different from what we use in [InstructRetro](https://arxiv.org/abs/2310.07713), which contains private and proprietary datasets. Thus a 1-2% accuracy difference in downstream tasks may be expected.* +*We note that the provided instruction tuning dataset is all from open-source instruction tuning datasets. It is +slightly different from what we use in [InstructRetro](https://arxiv.org/abs/2310.07713), which contains private and +proprietary datasets. Thus a 1-2% accuracy difference in downstream tasks may be expected.* ### Instruction tuning script -Download the [blended instruction tuning dataset](https://drive.google.com/file/d/1nzKwwYf8lYb9gN3P4YO8pFNU_B2nMYe1/view?usp=sharing) in your data home directory `$DATA_HOME` and update our templates in [tools/retro/sft/sft_retro_lm.sh](sft/sft_retro_lm.sh). + +Download +the [blended instruction tuning dataset](https://drive.google.com/file/d/1nzKwwYf8lYb9gN3P4YO8pFNU_B2nMYe1/view?usp=sharing) +in your data home directory `$DATA_HOME` and update our templates +in [tools/retro/sft/sft_retro_lm.sh](sft/sft_retro_lm.sh). An example command to run instruction tuning on 843M Retro is as follows: + ```bash [blend-dataset-name] [model-size] [batch-size] [lr] [checkpoints] bash tools/retro/sft/sft_retro_lm.sh open_inst 843m 128 5e-6 ``` -The `blend_dataset_name` argument will blend all the datasets within the `$DATA_HOME` following the weights and configurations specified in the `${blend_dataset_name}.sh` ([open_inst.sh](sft/open_inst.sh) in the example above). -The checkpoints will be saved in the `--save` directory. For example, it will be saved to -`/checkpoints/applications/retro-sft_pp1_same_format_ctx1_843m_128_5e-6`. +The `blend_dataset_name` argument will blend all the datasets within the `$DATA_HOME` following the weights and +configurations specified in the `${blend_dataset_name}.sh` ([open_inst.sh](sft/open_inst.sh) in the example above). +The checkpoints will be saved in the `--save` directory. For example, it will be saved to +`/checkpoints/applications/retro-sft_pp1_same_format_ctx1_843m_128_5e-6`. ## Step 5: Downstream task evaluation -In this step, we demonstrate how to run InstructRetro for zero-shot evaluation on downstream question answering (QA) tasks. +In this step, we demonstrate how to run InstructRetro for zero-shot evaluation on downstream question answering (QA) +tasks. We provide the pre-processed open-source evaluation datasets with a unified format for different tasks. The +evaluation datasets used in our paper are available to download +through [here](https://drive.google.com/drive/folders/1xw-N0LJR_lIWnH6BKzHIb49quVCS_V72?usp=sharing). Please stick to +the same retro workdir used in Step 0-4 to make sure the preprocessed retrieval neighbors match the pretraining corpus. +If you directly come to Step 5, an example retro workdir with `args.json` for 800M Retro is +provided [here](https://drive.google.com/file/d/121GqAdMvf8bJEBZRt-SD4uhW-SRWgI3s/view?usp=sharing). Note that the args +in the json can be overwritten through the command line. -We present an example command to run retro generation given the InstructRetro checkpoints and the Natural Question (NQ) task. The example command is for the 843m InstructRetro obtained in Step 4. Please specify the directory for the NQ dataset and update the command accordingly for other checkpoints. +We present an example command to run retro generation given the InstructRetro checkpoints and the Natural Question (NQ) +task. The example command is for the 843m InstructRetro obtained in Step 4. Please specify the directory for the NQ +dataset and update the command accordingly for other checkpoints. ```bash bash tools/retro/text_generation/retro_generate.sh nq 843m greedy test 0 20000 1000 5 pp1 /checkpoints/applications/retro-sft_pp1_same_format_ctx1_843m_128_5e-6 2 ``` -The generated responses will be saved in the corresponding checkpoint directory. For example, for the 843m InstructRetro, it will be saved to +The generated responses will be saved in the corresponding checkpoint directory. For example, for the 843m +InstructRetro, it will be saved to `/checkpoints/applications/retro-sft_pp1_same_format_ctx1_843m_128_5e-6/retro-generate-nq_5_2_843m_test_greedy_0_20000_1000.txt`. -To evaluate the F1 / Exact Match (EM) scores of the generated responses, we provide an example script to run the evaluation on the NQ dataset. Please specify the directory for the NQ dataset and update the command accordingly for other checkpoints and downstream tasks. +To evaluate the F1 / Exact Match (EM) scores of the generated responses, we provide an example script to run the +evaluation on the NQ dataset. Please specify the directory for the NQ dataset and update the command accordingly for +other checkpoints and downstream tasks. ```bash python3 tools/retro/text_generation/evaluate.py @@ -163,11 +230,12 @@ See more details from our papers: [Shall we Pretrain Autoregressive Language Models with Retrieval? A Comprehensive Study.](https://arxiv.org/abs/2304.06762) -_Boxin Wang, Wei Ping, Peng Xu, Lawrence McAfee, Zihan Liu, Mohammad Shoeybi, Yi Dong, Oleksii Kuchaiev, Bo Li, Chaowei Xiao, Anima Anandkumar, Bryan Catanzaro._ (EMNLP 2023) +_Boxin Wang, Wei Ping, Peng Xu, Lawrence McAfee, Zihan Liu, Mohammad Shoeybi, Yi Dong, Oleksii Kuchaiev, Bo Li, Chaowei +Xiao, Anima Anandkumar, Bryan Catanzaro._ (EMNLP 2023) -[InstructRetro: Instruction Tuning post Retrieval-Augmented Pretraining.](https://arxiv.org/abs/2310.07713) +[InstructRetro: Instruction Tuning post Retrieval-Augmented Pretraining.](https://arxiv.org/abs/2310.07713) -_Boxin Wang, Wei Ping, Lawrence McAfee, Peng Xu, Bo Li, Mohammad Shoeybi, Bryan Catanzaro._ +_Boxin Wang, Wei Ping, Lawrence McAfee, Peng Xu, Bo Li, Mohammad Shoeybi, Bryan Catanzaro._ Please cite the papers as follows if you use the data or code from this repo: diff --git a/tools/retro/query/multi_split_gpt_dataset.py b/tools/retro/query/multi_split_gpt_dataset.py index 502f06d075..7b6d744877 100644 --- a/tools/retro/query/multi_split_gpt_dataset.py +++ b/tools/retro/query/multi_split_gpt_dataset.py @@ -59,6 +59,8 @@ class MultiSplitGPTDataset(GPTDataset): indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the MegatronDataset + dataset_path (str): The real path on disk to the dataset, for bookkeeping + indexed_indices (numpy.ndarray): The set of the documents indices to expose num_samples (int): The number of samples to draw from the indexed dataset @@ -71,12 +73,13 @@ class MultiSplitGPTDataset(GPTDataset): def __init__( self, indexed_dataset: MMapIndexedDataset, + dataset_path: str, indexed_indices: numpy.ndarray, num_samples: int, index_split: Split, config: MultiSplitGPTDatasetConfig, ) -> None: - super().__init__(indexed_dataset, indexed_indices, num_samples, index_split, config) + super().__init__(indexed_dataset, dataset_path, indexed_indices, num_samples, index_split, config) def __getitem__(self, idx: int) -> Dict[str, numpy.ndarray]: """Abstract method implementation diff --git a/tools/retro/sft/dataset_conv.py b/tools/retro/sft/dataset_conv.py index cd41748e87..d7bde54f78 100644 --- a/tools/retro/sft/dataset_conv.py +++ b/tools/retro/sft/dataset_conv.py @@ -1,74 +1,167 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +import re import json +import os +from typing import Any, Iterable, Dict + +from numpy import ndarray +from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig +from megatron.core.datasets.utils import Split import torch -import numpy as np +import numpy import glob from collections import OrderedDict -from megatron import get_tokenizer, get_args, get_retro_args +from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig +from megatron.core.datasets.megatron_dataset import LowLevelDataset, MegatronDataset +from megatron.core.datasets.utils import Split +from dataclasses import dataclass + +_DATASET_NAME_PATTERNS = { + Split.train: r"(?P[^\0]+)\/(?P=name)\_QA\_train.json", + Split.valid: r"(?P[^\0]+)\/(?P=name)\_QA\_dev.json", +} -class FtDataset(torch.utils.data.Dataset): + +@dataclass +class JsonQADatasetConfig(BlendedMegatronDatasetConfig): + """Configuration object for the QA finetuning pipeline """ - This class represents a dataset for fine-tuning GPT models using the Megatron framework. + ft_neighbours: int = 1 + + bert_retriever_neighbours: bool = False + + longform_answer: bool = False + + inference_only: bool = False + + retrieved_neighbours: bool = False - Args: - name (str): Name of the dataset equals to data_prefix + fix_newsqa: bool = True - indexed_dataset (IndexedDataset): The dataset object containing the data samples. + def __post_init__(self) -> None: + super().__post_init__() + assert self.blend_per_split is not None - max_seq_length (int): Maximum sequence length for each sample in the dataset. - fewshot_list (list): A list of few-shot learning examples, if applicable. +@dataclass +class RetroJsonQADatasetConfig(JsonQADatasetConfig): + """Configuration object for the Retro QA finetuning pipeline """ - def __init__(self, name, indexed_dataset, max_seq_length, - fewshot_list=None): + retro_num_neighbors: int = None + + retro_gpt_retrieved_length: int = None + + def __post_init__(self) -> None: + super().__post_init__() + assert self.retro_num_neighbors is not None + assert self.retro_gpt_retrieved_length is not None + + +class JsonQADataset(MegatronDataset): + + def __init__(self, dataset: Any, dataset_path: str, indices: ndarray, num_samples: int, index_split: Split, config: BlendedMegatronDatasetConfig) -> None: + super().__init__(dataset, dataset_path, indices, num_samples, index_split, config) + matches = re.findall(_DATASET_NAME_PATTERNS[index_split], dataset_path) + assert len(matches) == 1 + assert len(matches[0]) > 0 + self.dataset_name = matches[0] - # Params to store. - self.dataset_name = name # dataset_name equals to data_prefix in pretrain - self.max_seq_length = max_seq_length - self.desc = name + @staticmethod + def numel_low_level_dataset(low_level_dataset: LowLevelDataset) -> int: + return len(low_level_dataset) - # For compatibility with Megatron Core BlendedDataset - self.unique_identifiers = OrderedDict() - self.unique_identifiers["class"] = type(self).__name__ - self.unique_identifiers["name"] = name + @staticmethod + def build_low_level_dataset(dataset_path: str, config: JsonQADatasetConfig) -> Iterable: + assert os.path.isfile(dataset_path), f"{dataset_path} does not exist on disk" + return preprocess(dataset_path, config) - # Dataset. - self.indexed_dataset = indexed_dataset + def __len__(self) -> int: + return len(self.dataset) - # Vocab stuff. - tokenizer = get_tokenizer() - self.eos_id = tokenizer.eod - self.pad_id = tokenizer.eod - self.fewshot_list = fewshot_list + def __getitem__(self, idx: int) -> Dict[str, ndarray]: + sample = self.dataset[idx % len(self.dataset)] - self.args = get_args() + # unpack tokens + query, answer, neighbours = sample - def __len__(self): - return len(list(self.indexed_dataset)) + # tokenization + output_tokens = self.config.tokenizer.tokenize(answer) - def __getitem__(self, idx): + input_tokens = reformat_prompt( + query, + neighbours, + self.dataset_name, + self.config.ft_neighbours, + len(output_tokens), + self.config.tokenizer, + self.config.sequence_length + ) - idx = idx % len(self.indexed_dataset) - sample = self.indexed_dataset[idx] + # padding + tokens, answer_mask = pad_and_convert_to_numpy( + input_tokens, output_tokens, self.config.tokenizer.pad, self.config.sequence_length, self.config.tokenizer.eos + ) - if self.args.retro_add_retriever: - return build_retro_training_sample(sample, - self.max_seq_length, # needed for padding - self.pad_id, self.eos_id, - self.dataset_name, - self.args.ft_neighbours, - self.args.shuffle_topn) - else: - return build_normal_training_sample(sample, - self.max_seq_length, # needed for padding - self.pad_id, self.eos_id, - self.dataset_name, - self.args.ft_neighbours, - self.args.shuffle_topn, - self.fewshot_list) + train_sample = { + 'text': tokens, + 'answer_mask': answer_mask, + } + + return train_sample + + +class RetroJsonQADataset(JsonQADataset): + + def __getitem__(self, idx: int) -> Dict[str, ndarray]: + + sample = self.dataset[idx % len(self.dataset)] + + # unpack tokens + query, answer, neighbours = sample + + # tokenization + output_tokens = self.config.tokenizer.tokenize(answer) + + input_tokens = reformat_prompt_retro( + query, + neighbours, + self.dataset_name, + self.config.ft_neighbours, + len(output_tokens), + self.config.tokenizer, + self.config.sequence_length + ) + + # padding + tokens, answer_mask = pad_and_convert_to_numpy( + input_tokens, + output_tokens, + self.config.tokenizer.pad, + self.config.sequence_length, + self.config.tokenizer.eos + ) + + # get retro neighbors + # context chunk and answer chunk + n_chunks_per_sample = 2 + num_neighbors = self.config.retro_num_neighbors + # disable retro encoder + neighbor_tokens = numpy.zeros( + [n_chunks_per_sample, num_neighbors, self.config.retro_gpt_retrieved_length], + dtype=numpy.int64 + ) + + train_sample = { + 'text': tokens, + 'answer_mask': answer_mask, + 'neighbor_tokens': neighbor_tokens, + 'context_len': len(input_tokens) + } + + return train_sample def format_multichoice(multichoice_options): @@ -85,17 +178,16 @@ def format_answer(answer): return " {}".format(answer) -def preprocess(data_file, inference_only=False, retrieved_neighbours=False, fix_newsqa=True): - args = get_args() - assert args.ft_neighbours > 0 - if args.longform_answer: +def preprocess(dataset_path: str, config: JsonQADatasetConfig): + assert config.ft_neighbours > 0 + if config.longform_answer: nq_examples = [] - with open(data_file, "r") as f: + with open(dataset_path, "r") as f: for fn in f: nq_examples.append(json.loads(fn)) else: nq_examples = [] - for my_data_file in sorted(glob.glob(data_file)): + for my_data_file in sorted(glob.glob(dataset_path)): with open(my_data_file, "r", encoding='utf-8') as f: nq_examples.extend(json.load(f)) @@ -104,11 +196,11 @@ def preprocess(data_file, inference_only=False, retrieved_neighbours=False, fix_ question = instance["question"] if 'qa_type' in instance and instance['qa_type'] == "multi_choice_qa": question = format_multichoice_question(question, instance["multichoice_options"]) - if args.bert_retriever_neighbours: + if config.bert_retriever_neighbours: contexts = instance["bert_pretrain_corpus_neighbours"] neighbours = ["source: " + ctx for ctx in contexts] else: - if retrieved_neighbours: + if config.retrieved_neighbours: contexts = instance["ctxs"] neighbours = ["title: " + ctx["title"] + ", source: " + ctx["text"] for ctx in contexts] else: @@ -118,15 +210,15 @@ def preprocess(data_file, inference_only=False, retrieved_neighbours=False, fix_ "title: " + instance["sub-paragraphs"][0] + ", source: " + instance["sub-paragraphs"][1]] else: neighbours = ["title: , source: " + instance["sub-paragraphs"]] - elif fix_newsqa and "sub_paragraph" in instance: + elif config.fix_newsqa and "sub_paragraph" in instance: neighbours = ["title: , source: " + instance["sub_paragraph"]] else: neighbours = ["title: , source: "] - if inference_only: + if config.inference_only: data.append((question, None, neighbours)) else: - if args.longform_answer: + if config.longform_answer: if "longform_answer" in instance: answers = [instance["longform_answer"]] else: @@ -160,28 +252,11 @@ def preprocess(data_file, inference_only=False, retrieved_neighbours=False, fix_ return data -def get_processed_dataset(name, data_folder): - training_file = data_folder + "/{}/{}_QA_train*.json".format(name, name) - validation_file = data_folder + "/{}/{}_QA_dev.json".format(name, name) - - dataset = {} - dataset["train"] = preprocess(training_file) - dataset["valid"] = preprocess(validation_file) - dataset["test"] = preprocess(validation_file) - - print(name, "train", len(dataset["train"])) - print(name, "valid", len(dataset["valid"])) - print(name, "test", len(dataset["test"])) - - return dataset - - -def count_stat(dataset, tokenizer): - args = get_args() +def count_stat(dataset, tokenizer, k): nb_lens = [] for i, d in enumerate(dataset): query, answer, neighbours = d - nb_lens.extend([len(tokenizer.tokenize(neighbour)) for neighbour in neighbours[:args.k]]) + nb_lens.extend([len(tokenizer.tokenize(neighbour)) for neighbour in neighbours[:k]]) print("len of nb", len(nb_lens)) print("max of len nb", max(nb_lens)) @@ -342,75 +417,6 @@ def reformat_prompt_short(query, neighbours, dataset_name, ft_neighbours, \ return input_tokens -def build_normal_training_sample(sample, - max_seq_length, - pad_id, - eos_id, - dataset_name, - ft_neighbours=1, - shuffle_topn=False, - fewshot_list=None): - # unpack tokens - query, answer, neighbours = sample - - # tokenization - tokenizer = get_tokenizer() - output_tokens = tokenizer.tokenize(answer) - - input_tokens = reformat_prompt(query, neighbours, dataset_name, ft_neighbours, len(output_tokens), tokenizer, - max_seq_length) - - # Padding - tokens, answer_mask \ - = pad_and_convert_to_numpy(input_tokens, output_tokens, - pad_id, max_seq_length, eos_id) - - train_sample = { - 'text': tokens, - 'answer_mask': answer_mask, - } - return train_sample - - -def build_retro_training_sample(sample, - max_seq_length, - pad_id, - eos_id, - dataset_name, - ft_neighbours=1, - shuffle_topn=False): - # unpack tokens - query, answer, neighbours = sample - - # tokenization - tokenizer = get_tokenizer() - output_tokens = tokenizer.tokenize(answer) - - input_tokens = reformat_prompt_retro(query, neighbours, dataset_name, ft_neighbours, len(output_tokens), tokenizer, - max_seq_length) - - # Padding - tokens, answer_mask \ - = pad_and_convert_to_numpy(input_tokens, output_tokens, - pad_id, max_seq_length, eos_id) - - # get retro neighbors - args = get_args() - retro_args = get_retro_args() - n_chunks_per_sample = 2 # context chunk and answer chunk - num_neighbors = args.retro_num_neighbors - neighbor_tokens = np.zeros([n_chunks_per_sample, num_neighbors, retro_args.retro_gpt_retrieved_length], - dtype=np.int64) # disable retro encoder - - train_sample = { - 'text': tokens, - 'answer_mask': answer_mask, - 'neighbor_tokens': neighbor_tokens, - 'context_len': len(input_tokens) - } - return train_sample - - def pad_and_convert_to_numpy(input_ids, output_ids, pad_id, max_seq_length, eos_id): @@ -431,10 +437,10 @@ def pad_and_convert_to_numpy(input_ids, output_ids, # Tokens. filler = [pad_id] * padding_length - tokens = np.array(tokens + [eos_id] + filler, dtype=np.int64) + tokens = numpy.array(tokens + [eos_id] + filler, dtype=numpy.int64) # answer mask answer_mask = answer_mask + [1] + [0] * padding_length - answer_mask = np.array(answer_mask, dtype=np.int64) + answer_mask = numpy.array(answer_mask, dtype=numpy.int64) return tokens, answer_mask diff --git a/tools/retro/sft/sft_gpt_dataset.py b/tools/retro/sft/sft_gpt_dataset.py deleted file mode 100644 index 72c9ded849..0000000000 --- a/tools/retro/sft/sft_gpt_dataset.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. - -"""GPT style dataset.""" -from types import SimpleNamespace - -from megatron import print_rank_0, get_args -from megatron.core import mpu -from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder -from megatron.core.datasets.blended_dataset import BlendedDataset -from megatron.data.dataset_utils import get_datasets_weights_and_num_samples -from tools.retro.sft.dataset_conv import FtDataset as SFTDataset -from tools.retro.sft.dataset_conv import get_processed_dataset - - -def build_train_valid_test_datasets(data_prefix, seq_length): - """Build train, valid, and test datasets.""" - - assert data_prefix - - args = get_args() - - if len(data_prefix) == 1: - processed_datasets = get_processed_dataset(data_prefix[0], args.data_folder) - - train_ds = SFTDataset(data_prefix[0], processed_datasets["train"], seq_length) - valid_ds = SFTDataset(data_prefix[0], processed_datasets["valid"], seq_length) - test_ds = SFTDataset(data_prefix[0], processed_datasets["test"], seq_length) - - return train_ds, valid_ds, test_ds - - prefixes, weights, _ = get_datasets_weights_and_num_samples(data_prefix, train_valid_test_num_samples=0) - train_datasets, valid_datasets, test_datasets = [], [], [] - train_size, valid_size, test_size = 0, 0, 0 - - for i in range(len(prefixes)): - processed_datasets = get_processed_dataset(prefixes[i], args.data_folder) - - train_ds = SFTDataset(prefixes[i], processed_datasets["train"], seq_length) - valid_ds = SFTDataset(prefixes[i], processed_datasets["valid"], seq_length) - test_ds = SFTDataset(prefixes[i], processed_datasets["test"], seq_length) - - if train_ds: - train_datasets.append(train_ds) - train_size += len(train_ds) - if valid_ds: - valid_datasets.append(valid_ds) - valid_size += len(valid_ds) - if test_ds: - test_datasets.append(test_ds) - test_size += len(test_ds) - - # Blend - MEGATRON_CORE_DUMMY_CONFIG = SimpleNamespace( - is_built_on_rank=lambda: mpu.get_tensor_model_parallel_rank() == 0, - path_to_cache=getattr(get_args(), "data_cache_path") - ) - - blending_train_dataset = None - if train_datasets: - blending_train_dataset = BlendedMegatronDatasetBuilder.build_generic_dataset( - BlendedDataset, - MEGATRON_CORE_DUMMY_CONFIG.is_built_on_rank, - train_datasets, - weights, - train_size, - MEGATRON_CORE_DUMMY_CONFIG, - ) - blending_valid_dataset = None - if valid_datasets: - blending_valid_dataset = BlendedMegatronDatasetBuilder.build_generic_dataset( - BlendedDataset, - MEGATRON_CORE_DUMMY_CONFIG.is_built_on_rank, - valid_datasets, - weights, - valid_size, - MEGATRON_CORE_DUMMY_CONFIG, - ) - blending_test_dataset = None - if test_datasets: - blending_test_dataset = BlendedMegatronDatasetBuilder.build_generic_dataset( - BlendedDataset, - MEGATRON_CORE_DUMMY_CONFIG.is_built_on_rank, - test_datasets, - weights, - test_size, - MEGATRON_CORE_DUMMY_CONFIG, - ) - - return (blending_train_dataset, blending_valid_dataset, - blending_test_dataset) diff --git a/tools/retro/sft/sft_retro.py b/tools/retro/sft/sft_retro.py index c8d6fb227e..fd95c05586 100644 --- a/tools/retro/sft/sft_retro.py +++ b/tools/retro/sft/sft_retro.py @@ -3,7 +3,7 @@ """Pretrain GPT""" import torch -from functools import partial +from functools import partial, reduce import sys, os sys.path.append(os.path.abspath(os.path.join( @@ -14,11 +14,12 @@ from megatron import get_tokenizer from megatron.core import tensor_parallel from megatron.core.enums import ModelType +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder from megatron.training import pretrain from megatron.utils import get_ltor_masks_and_position_ids from megatron.utils import average_losses_across_data_parallel_group -from pretrain_gpt import model_provider -from tools.retro.sft.sft_gpt_dataset import build_train_valid_test_datasets +from pretrain_gpt import model_provider, is_dataset_built_on_rank +from tools.retro.sft.dataset_conv import JsonQADataset, JsonQADatasetConfig, RetroJsonQADataset, RetroJsonQADatasetConfig def get_tasks_args(parser): @@ -187,12 +188,74 @@ def forward_step(data_iterator, model): def train_valid_test_datasets_provider(train_val_test_num_samples): """Build train, valid, and test datasets.""" args = get_args() + retro_args = get_retro_args() + + tokenizer = get_tokenizer() + + def fix_and_split_blend_pair(pair): + weight, name = pair + return [ + [weight, os.path.join(args.data_folder, name, f"{name}_QA_train.json")], + [weight, os.path.join(args.data_folder, name, f"{name}_QA_dev.json")], + None, + ] + + blend = [args.data_path[i:i+2] for i in range(0, len(args.data_path), 2)] + + if len(blend) == 1: + blend_per_split = [ + os.path.join(args.data_folder, blend[0], f"{blend[0]}_QA_train.json"), + os.path.join(args.data_folder, blend[0], f"{blend[0]}_QA_dev.json"), + None, + ] + else: + blend_per_split = [ + list( + reduce( + lambda x, y: x + y, + list(zip(*map(fix_and_split_blend_pair, blend)))[0] + ) + ), + None, + None, + ] + + extra_kwargs = {} + + if args.retro_add_retriever: + dataset_cls = RetroJsonQADataset + config_cls = RetroJsonQADatasetConfig + extra_kwargs["retro_num_neighbors"] = args.retro_num_neighbors + extra_kwargs["retro_gpt_retrieved_length"] = retro_args.retro_gpt_retrieved_length + else: + dataset_cls = JsonQADataset + config_cls = JsonQADatasetConfig + + config = config_cls( + is_built_on_rank=is_dataset_built_on_rank, + random_seed=args.seed, + sequence_length=args.seq_length, + blend_per_split=blend_per_split, + split=args.split, + path_to_cache=args.data_cache_path, + mock=args.mock_data, + tokenizer=tokenizer, + ft_neighbours=args.ft_neighbours, + bert_retriever_neighbours=args.bert_retriever_neighbours, + longform_answer=args.longform_answer, + inference_only=False, + retrieved_neighbours=False, + fix_newsqa=True, + **extra_kwargs + ) print_rank_0('> building train, validation, and test datasets ' 'for GPT ...') - train_ds, valid_ds, test_ds = build_train_valid_test_datasets( - data_prefix=args.data_path, - seq_length=args.seq_length) + train_ds, valid_ds, test_ds = BlendedMegatronDatasetBuilder( + dataset_cls, + train_val_test_num_samples, + config + ).build() print_rank_0("> finished creating GPT datasets ...") return train_ds, valid_ds, test_ds