Skip to content

Commit

Permalink
Revert Mcore update since it caused regression (#11791)
Browse files Browse the repository at this point in the history
* Revert "ci: Test install on macos (#11429)"

This reverts commit e04e345.

* Revert "chore(beep boop 🤖): Bump `MCORE_TAG=4dc8977...` (2025-01-07) (#11768)"

This reverts commit 06cf0dd.

* Revert "Bump mcore (#11740)"

This reverts commit 8b9d6c7.
  • Loading branch information
pablo-garay authored Jan 9, 2025
1 parent f5d77c3 commit 84b2bf0
Show file tree
Hide file tree
Showing 27 changed files with 110 additions and 94 deletions.
5 changes: 2 additions & 3 deletions .github/workflows/cicd-main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2937,7 +2937,7 @@ jobs:
with:
RUNNER: self-hosted-azure-gpus-2-h100
SCRIPT: |
CUDA_DEVICE_MAX_CONNECTIONS=1 python examples/nlp/language_modeling/tuning/megatron_gpt_finetuning.py \
CUDA_DEVICE_MAX_CONNECTIONS=1 NVTE_FLASH_ATTN=0 NVTE_FUSED_ATTN=1 python examples/nlp/language_modeling/tuning/megatron_gpt_finetuning.py \
trainer.devices=2 \
trainer.log_every_n_steps=1 \
trainer.max_epochs=9999 \
Expand Down Expand Up @@ -2965,7 +2965,6 @@ jobs:
+model.tp_comm_overlap_ag=False \
+model.tp_comm_overlap_rs=False \
+model.tp_comm_overlap_disable_qkv=True \
+model.attention_backend="unfused" \
model.peft.peft_scheme="lora" \
model.peft.lora_tuning.adapter_dim=16 \
model.peft.lora_tuning.alpha=32 \
Expand Down Expand Up @@ -4354,7 +4353,7 @@ jobs:
with:
RUNNER: self-hosted-azure
SCRIPT: |
python3 tests/collections/llm/megatron_mixtral_pretraining.py \
NVTE_FUSED_ATTN=0 NVTE_FLASH_ATTN=0 python3 tests/collections/llm/megatron_mixtral_pretraining.py \
--experiment-dir=/tmp/mixtral_pretrain_results \
--data-path=/home/TestData/nlp/megatron_t5/data/pile_val_small_bert_tokenizer_text_document
Expand Down
79 changes: 50 additions & 29 deletions .github/workflows/import-test.yml
Original file line number Diff line number Diff line change
@@ -1,52 +1,73 @@
name: CI-Import-Check

on:
push:
pull_request:
paths:
- "**"

# Check https://hub.docker.com/r/pytorch/pytorch/tags for latest tags
jobs:
test-imports:
name: test-${{ matrix.collection }}-import-${{ matrix.os }}-py${{ matrix.python }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
collection:
- asr
# - nlp # Currently broken
- tts
python: ['3.10', '3.11', '3.12']

test-asr-imports:
runs-on: ubuntu-latest
container:
image: pytorch/pytorch:2.4.0-cuda11.8-cudnn9-runtime
steps:
- name: Checkout repo
uses: actions/checkout@v2
- uses: actions/setup-python@v5
with:
python-version: '${{ matrix.python }}'
- name: Build wheel
- name: Update base dependencies
run: |
apt-get update && apt-get install -y build-essential
apt-get install -y libsndfile1 make
- name: Install nemo dependencies
id: nemo-wheel
run: |
pip install Cython
# install test requirements
pip install -r requirements/requirements_test.txt
# Build nemo as a wheel
pip install build
python -m build --wheel
python -m build --no-isolation --wheel
# Preserve wheel location
DIST_FILE=$(find ./dist -name "*.whl" | head -n 1)
echo "DIST_FILE=${DIST_FILE}" | tee -a "$GITHUB_OUTPUT"
- name: Install NeMo + test dependencies
echo "::set-output name=DIST_FILE::${DIST_FILE}"
- name: Test ASR Domain Imports
run: |
# Install NeMo Domain
pip install "${{ steps.nemo-wheel.outputs.DIST_FILE }}[asr]"
# Run import checks
python tests/core_ptl/check_imports.py --domain "asr"
# Uninstall NeMo
pip uninstall -y nemo_toolkit
test-tts-imports:
runs-on: ubuntu-latest
container:
image: pytorch/pytorch:2.4.0-cuda11.8-cudnn9-runtime
steps:
- name: Checkout repo
uses: actions/checkout@v2
- name: Update base dependencies
run: |
apt-get update && apt-get install -y build-essential
apt-get install -y libsndfile1 make
- name: Install nemo dependencies
id: nemo-wheel
run: |
pip install Cython
# install test requirements
pip install -r requirements/requirements_test.txt
# Install NeMo Domain
pip install "${{ steps.nemo-wheel.outputs.DIST_FILE }}[test,${{ matrix.collection }}]"
- name: Run ${{ matrix.collection }} checks
# Build nemo as a wheel
pip install build
python -m build --no-isolation --wheel
# Preserve wheel location
DIST_FILE=$(find ./dist -name "*.whl" | head -n 1)
echo "::set-output name=DIST_FILE::${DIST_FILE}"
- name: Test TTS Domain Imports
run: |
# Install NeMo Domain
pip install "${{ steps.nemo-wheel.outputs.DIST_FILE }}[tts]"
# Run import checks
python tests/core_ptl/check_imports.py --domain "${{ matrix.collection }}"

python tests/core_ptl/check_imports.py --domain "tts"
# Uninstall NeMo
pip uninstall -y nemo_toolkit
30 changes: 19 additions & 11 deletions Dockerfile.ci
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,17 @@ EOF
WORKDIR /workspace

# Install Mamba Dependancy
ARG CAUSAL_CONV_TAG=v1.2.2.post1
ARG MAMBA_TAG=v2.2.0
ARG CAUSAL_CONV_TAG=v1.2.2.post1

RUN <<"EOF" bash -ex
# Mamba dependancy installation
MAMBA_FORCE_BUILD=TRUE CAUSAL_CONV1D_FORCE_BUILD=TRUE pip3 install --no-cache-dir -v git+https://github.com/Dao-AILab/causal-conv1d.git@${CAUSAL_CONV_TAG} git+https://github.com/state-spaces/mamba.git@${MAMBA_TAG}

git clone --depth 1 --branch ${CAUSAL_CONV_TAG} https://github.com/Dao-AILab/causal-conv1d && \
cd causal-conv1d && \
python setup.py install && \
cd .. && \
rm -rf causal-conv1d

EOF

RUN pip install hatchling # needed to install nemo-run
Expand All @@ -49,6 +54,8 @@ RUN pip install nemo_run@git+https://github.com/NVIDIA/NeMo-Run.git@${NEMO_RUN_T
# Install NeMo requirements
ARG TE_TAG=7d576ed25266a17a7b651f2c12e8498f67e0baea
ARG MODELOPT_VERSION=0.21.0
ARG MCORE_TAG=bd677bfb13ac2f19deaa927adc6da6f9201d66aa

ARG APEX_TAG=810ffae374a2b9cb4b5c5e28eaeca7d7998fca0c
RUN \
--mount=type=bind,source=requirements,target=requirements \
Expand All @@ -58,22 +65,23 @@ RUN \
--mount=type=bind,source=nemo/__init__.py,target=nemo/__init__.py <<"EOF" bash -ex
pip install --no-cache-dir --no-build-isolation --extra-index-url https://pypi.nvidia.com \
"transformer-engine @ git+https://github.com/NVIDIA/TransformerEngine.git@${TE_TAG}" \
"megatron_core @ git+https://github.com/NVIDIA/Megatron-LM.git@${MCORE_TAG}" \
"nvidia-modelopt[torch]~=${MODELOPT_VERSION}" \
"apex @ git+https://github.com/NVIDIA/apex.git@${APEX_TAG}" \
"unstructured==0.14.9" \
"llama-index==0.10.43" \
"onnxscript @ git+https://github.com/microsoft/onnxscript" \
-r tools/ctc_segmentation/requirements.txt \
".[all]"
EOF

ARG MCORE_TAG=4dc8977167d71f86bdec47a60a98e85c4cfa0031
RUN <<"EOF" bash -ex
# Megatron-LM installation
git clone https://github.com/NVIDIA/Megatron-LM.git
pushd Megatron-LM
git checkout ${MCORE_TAG}
pip install -e .
# Megatron Core installation
git clone https://github.com/NVIDIA/Megatron-LM.git && \
pushd Megatron-LM && \
git checkout ${MCORE_TAG} && \
pushd megatron/core/datasets && \
make && \
popd && \
popd
export PYTHONPATH="${PYTHONPATH}:/workspace/Megatron-LM"

# Install nvidia-resiliency-ext
Expand Down
3 changes: 2 additions & 1 deletion docs/source/nlp/information_retrieval.rst
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,9 @@ Then you can fine-tune the sentence-BERT model using the following script:
VALIDATION_DATASET_PATH= # Path to validation dataset
SAVE_DIR= # where the checkpoint and logs are saved
mkdir -p $SAVE_DIR
export NVTE_FLASH_ATTN=0
export NVTE_ALLOW_NONDETERMINISTIC_ALGO=0
export NVTE_FUSED_ATTN=0
python NeMo/examples/nlp/information_retrieval/megatron_bert_embedding_finetuning.py \
--config-path=${CONFIG_PATH} \
Expand All @@ -85,7 +87,6 @@ Then you can fine-tune the sentence-BERT model using the following script:
model.post_process=False \
model.global_batch_size=8 \ # should be NUM_DEVICES * model.micro_batch_size
model.micro_batch_size=8 \
model.attention_backend="unfused" \
model.optim.lr=0.000005 \
model.optim.sched.min_lr=0.00000001 \
model.optim.sched.warmup_steps=100 \
Expand Down
1 change: 1 addition & 0 deletions nemo/collections/diffusion/scripts/train.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
export WANDB_PROJECT=xxx
export WANDB_RUN_ID=xxx
export WANDB_RESUME=allow
export NVTE_FUSED_ATTN=0
export CUDA_DEVICE_MAX_CONNECTIONS=1
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True

Expand Down
3 changes: 0 additions & 3 deletions nemo/collections/llm/gpt/model/gemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

import torch
from megatron.core import parallel_state
from megatron.core.transformer.enums import AttnBackend
from torch import nn

from nemo.collections.llm.fn.activation import openai_gelu
Expand Down Expand Up @@ -54,8 +53,6 @@ class GemmaConfig(GPTConfig):
# Legacy NeMo does not set layernorm_zero_centered_gamma and instead adds 1 in the HF -> NeMo conversion script
# The present implementation is more in line with the official implementation
layernorm_zero_centered_gamma: bool = True
# Disable cuDNN attention since TE 1.8 does not support head dim > 128
attention_backend: AttnBackend = AttnBackend.flash


@dataclass
Expand Down
2 changes: 2 additions & 0 deletions nemo/collections/llm/recipes/gemma_2b.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ def model() -> run.Config[pl.LightningModule]:
>>> model_config = model()
>>> print(model_config)
"""
# Disable cuDNN attention since TE 1.8 does not support head dim > 128
os.environ['NVTE_FUSED_ATTN'] = "0"
return run.Config(GemmaModel, config=run.Config(GemmaConfig2B))


Expand Down
4 changes: 4 additions & 0 deletions nemo/collections/llm/recipes/gemma_7b.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ def model() -> run.Config[pl.LightningModule]:
>>> model_config = model()
>>> print(model_config)
"""
# Disable cuDNN attention since TE 1.8 does not support head dim > 128
os.environ['NVTE_FUSED_ATTN'] = "0"
return run.Config(GemmaModel, config=run.Config(GemmaConfig7B))


Expand Down Expand Up @@ -171,6 +173,8 @@ def pretrain_recipe(
For more details on pre-training LLMs with NeMo, see the pre-training
guide in the `examples/llm/pretrain/` directory.
"""
# Disable cuDNN attention since TE 1.8 does not support head dim > 128
os.environ['NVTE_FUSED_ATTN'] = "0"

return run.Partial(
fn,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@
try:
from megatron.core import ModelParallelConfig, parallel_state
from megatron.core.distributed import DistributedDataParallel as McoreDDP
from megatron.core.transformer.enums import AttnBackend
from megatron.core.transformer.module import Float16Module as MCoreFloat16Module
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.utils import init_method_normal, scaled_init_method_normal
Expand Down Expand Up @@ -538,9 +537,6 @@ def build_transformer_config(self) -> TransformerConfig:

tp_only_amax_red = self.cfg.get('tp_only_amax_red', False)

attention_backend = self.cfg.get('attention_backend', "auto")
attention_backend = AttnBackend[attention_backend]

# any configs that are not in the nemo model config will be added here
config_mapping = {
'apply_query_key_layer_scaling': apply_query_key_layer_scaling,
Expand All @@ -565,7 +561,6 @@ def build_transformer_config(self) -> TransformerConfig:
'rotary_interleaved': rotary_interleaved,
'deallocate_pipeline_outputs': True,
'tp_only_amax_red': tp_only_amax_red,
'attention_backend': attention_backend,
}

# populate the transformer config dict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@
from megatron.core.models.retro.utils import get_config_path as get_retro_config_path
from megatron.core.models.retro.utils import get_gpt_data_dir as get_retro_data_dir
from megatron.core.pipeline_parallel.schedules import get_forward_backward_func
from megatron.core.transformer.enums import AttnBackend
from megatron.core.transformer.module import Float16Module as MCoreFloat16Module
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.utils import init_method_normal, scaled_init_method_normal
Expand Down Expand Up @@ -432,8 +431,6 @@ def build_retro_config(self) -> RetroConfig:

te_version = packaging.version.Version(version("transformer-engine"))
if te_version >= packaging.version.Version("1.3"):
if HAVE_MEGATRON_CORE:
retro_config.attention_backend = AttnBackend.unfused
try:
os.environ["NVTE_FLASH_ATTN"] = "0"
os.environ["NVTE_FUSED_ATTN"] = "0"
Expand Down
4 changes: 2 additions & 2 deletions nemo/collections/vlm/mllama/model/language.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ def sharded_state_dict(
layer_prefix = f'{prefix}layers.'
num_layers = self.config.num_layers
for layer in self.layers:
offset = layer._get_layer_offset(layer.config)
offset = layer._get_layer_offset()
global_layer_offset = layer.layer_number - 1 # self.layer_number starts at 1
state_dict_prefix = f'{layer_prefix}{global_layer_offset - offset}.' # module list index in TransformerBlock # pylint: disable=line-too-long
sharded_prefix = layer_prefix
Expand All @@ -403,7 +403,7 @@ def sharded_state_dict(
for xlayer in self.xattn_layers:
if isinstance(xlayer, DummyCrossAttentionTransformerLayer):
continue
offset = xlayer._get_layer_offset(xlayer.config)
offset = xlayer._get_layer_offset()
global_layer_offset = xlayer.layer_number - 1
state_dict_prefix = f'{xlayer_prefix}{global_layer_offset - offset}.' # module list index in TransformerBlock # pylint: disable=line-too-long
sharded_prefix = f'{xlayer_prefix}{global_layer_offset}.'
Expand Down
2 changes: 1 addition & 1 deletion nemo/lightning/pytorch/callbacks/peft.py
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ def load_checkpoint(
if getattr(path, "base_model_path", None):
## PEFT Resume, FIRST TIME
self.adapter_ckpt_path = Path(str(path))
adapter_ckpt = self.checkpoint_io.load_checkpoint(path, sharded_state_dict={}) # Loads only metadata
adapter_ckpt = self.checkpoint_io.load_checkpoint(path) # Loads only metadata
# path is adapter path to restore the training metadata, but switch to loading base model here.
path = self.model_ckpt_path = path.base_model_path
elif adapter_meta_path.exists():
Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ classifiers = [
"Topic :: Utilities",
]

[tool.setuptools.dynamic]
dependencies = { file = ["requirements/requirements.txt"] }

[tool.setuptools]
py-modules = ["nemo"]

Expand Down
1 change: 1 addition & 0 deletions requirements/requirements_nlp.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ gdown
h5py
ijson
jieba
mamba-ssm==2.2.2; sys_platform == 'linux'
markdown2
matplotlib>=3.3.2
#megatron_core>0.6.0 # add back once mcore on pypi is compatible again
Expand Down
3 changes: 1 addition & 2 deletions scripts/checkpoint_converters/convert_bert_hf_to_nemo.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,6 @@ def convert(args):
nemo_config.model = adjust_nemo_config(nemo_config.model, hf_model.config.to_dict(), mcore_bert=args.mcore)

nemo_config.trainer["precision"] = args.precision
# Bert doesn't support FLASH_ATTN
nemo_config.model["attention_backend"] = "fused"
trainer = MegatronTrainerBuilder(nemo_config).create_trainer()
model = MegatronBertModel(nemo_config.model, trainer)

Expand Down Expand Up @@ -290,5 +288,6 @@ def convert(args):


if __name__ == '__main__':
os.environ['NVTE_FLASH_ATTN'] = '0' # Bert doesn't support FLASH_ATTN
args = get_args()
convert(args)
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@

import torch
from megatron.core.distributed import DistributedDataParallelConfig as McoreDDPConfig
from megatron.core.transformer.enums import AttnBackend
from megatron.core.utils import init_method_normal, scaled_init_method_normal

from nemo.collections.llm import MixtralConfig8x7B, MixtralModel, PreTrainingDataModule
Expand Down Expand Up @@ -103,7 +102,6 @@ def main(args):
bias_dropout_fusion=True,
apply_rope_fusion=True,
distribute_saved_activations=False,
attention_backend=AttnBackend.unfused,
)

data = PreTrainingDataModule(
Expand Down
4 changes: 2 additions & 2 deletions tests/collections/llm/bitexact/mixtral/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ MCORE_OUTPUT_PATH="/tmp/bex_mixtral_mcore_output/"
NEMO_OUTPUT_PATH="/tmp/bex_mixtral_nemo_output/"

# Run Mcore
CUDA_DEVICE_MAX_CONNECTIONS=1 CUDA_LAUNCH_BLOCKING=1 TORCH_COMPILE_DISABLE=1 \
CUDA_DEVICE_MAX_CONNECTIONS=1 CUDA_LAUNCH_BLOCKING=1 TORCH_COMPILE_DISABLE=1 NVTE_FLASH_ATTN=0 NVTE_FUSED_ATTN=0 \
torchrun --nproc-per-node 1 --nnodes 1 /workspace/Megatron-LM/pretrain_gpt.py \
--apply-layernorm-1p --rotary-percent 1.0 --rotary-base 1000000 \
--no-position-embedding --position-embedding-type rope \
Expand All @@ -30,7 +30,7 @@ torchrun --nproc-per-node 1 --nnodes 1 /workspace/Megatron-LM/pretrain_gpt.py \
--split 99,1,0 --log-interval 10 --save-interval 20000 --eval-interval 1000 --eval-iters 32 \
--save "$MCORE_OUTPUT_PATH" \
--log-num-zeros-in-grad --distributed-timeout-minutes 6000 --moe-router-topk 1 --num-experts 2 \
--moe-router-pre-softmax --expert-model-parallel-size 1 --eval-iters=0 --attention-backend unfused
--moe-router-pre-softmax --expert-model-parallel-size 1 --eval-iters=0

# Run NeMo
CUDA_LAUNCH_BLOCKING=1 TORCH_COMPILE_DISABLE=1 NVTE_FLASH_ATTN=0 NVTE_FUSED_ATTN=0 \
Expand Down
Loading

0 comments on commit 84b2bf0

Please sign in to comment.