From fc2cbac9095075b5af094266e90d23370f6ff0d5 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Tue, 31 Oct 2023 13:39:01 -0700 Subject: [PATCH 001/296] Dataloader optimization to avoid synchronous pageable host to devivce copy --- megatron/core/datasets/gpt_dataset.py | 28 ++++++++++++++-- megatron/training.py | 2 +- megatron/utils.py | 48 +++++++++++---------------- pretrain_gpt.py | 26 ++------------- 4 files changed, 47 insertions(+), 57 deletions(-) diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index 1004e649a2..0198fed47d 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -8,6 +8,10 @@ import numpy import torch +from megatron import get_args +from megatron import get_tokenizer +from megatron.utils import get_ltor_masks_and_position_ids + from megatron.core.datasets.blended_megatron_dataset_config import GPTDatasetConfig from megatron.core.datasets.indexed_dataset import MMapIndexedDataset from megatron.core.datasets.megatron_dataset import MegatronDataset @@ -63,7 +67,7 @@ def __len__(self) -> int: """ return self.sample_index.shape[0] - 1 - def __getitem__(self, idx: int) -> Dict[str, numpy.ndarray]: + def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: """Abstract method implementation Args: @@ -74,10 +78,28 @@ def __getitem__(self, idx: int) -> Dict[str, numpy.ndarray]: dictionary """ text, document_ids = self._query_document_sample_shuffle_indices(idx) + + text = torch.from_numpy(text) + document_ids = torch.from_numpy(document_ids) + + args = get_args() + tokenizer = get_tokenizer() + + tokens_ = text.long() + labels = tokens_[1:].contiguous() + tokens = tokens_[:-1].contiguous() + + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + tokens, + tokenizer.eod, + args.reset_position_ids, + args.reset_attention_mask, + args.eod_mask_loss) + if getattr(self.config, "return_document_ids"): - return {"text": text, "document_ids": document_ids} + return {"tokens": tokens,"labels": labels,"attention_mask": attention_mask,"loss_mask": loss_mask,"position_ids": position_ids,"document_ids": document_ids} else: - return {"text": text} + return {"tokens": tokens,"labels": labels,"attention_mask": attention_mask,"loss_mask": loss_mask,"position_ids": position_ids} @staticmethod def is_multimodal() -> bool: diff --git a/megatron/training.py b/megatron/training.py index c83f40c048..631568829e 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -1036,7 +1036,7 @@ def build_train_valid_test_data_loaders( is_distributed = getattr(build_train_valid_test_datasets_provider, "is_distributed", False) # Construct the data pipeline - if is_distributed or mpu.get_tensor_model_parallel_rank() == 0: + if is_distributed or mpu.is_pipeline_first_stage() or mpu.is_pipeline_last_stage(): # Build datasets. train_ds, valid_ds, test_ds = build_train_valid_test_datasets( diff --git a/megatron/utils.py b/megatron/utils.py index 717c77ec74..98de5b470e 100644 --- a/megatron/utils.py +++ b/megatron/utils.py @@ -167,51 +167,41 @@ def get_ltor_masks_and_position_ids(data, """Build masks and position id for left to right model.""" # Extract batch size and sequence length. - micro_batch_size, seq_length = data.size() + seq_length = data.numel() - # Attention mask (lower triangular). - if reset_attention_mask: - att_mask_batch = micro_batch_size - else: - att_mask_batch = 1 - attention_mask = torch.tril(torch.ones( - (att_mask_batch, seq_length, seq_length), device=data.device)).view( - att_mask_batch, 1, seq_length, seq_length) + attention_mask = torch.tril(torch.ones((seq_length, seq_length),device=data.device)).unsqueeze(0) # Loss mask. - loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device) + loss_mask = torch.ones(seq_length, dtype=torch.float, device=data.device) if eod_mask_loss: loss_mask[data == eod_token] = 0.0 # Position ids. position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device) - position_ids = position_ids.unsqueeze(0).expand_as(data) # We need to clone as the ids will be modifed based on batch index. if reset_position_ids: position_ids = position_ids.clone() if reset_position_ids or reset_attention_mask: - # Loop through the batches: - for b in range(micro_batch_size): - # Find indecies where EOD token is. - eod_index = position_ids[b, data[b] == eod_token] - # Detach indecies from positions if going to modify positions. + # Find indecies where EOD token is. + eod_index = position_ids[data[b] == eod_token] + # Detach indecies from positions if going to modify positions. + if reset_position_ids: + eod_index = eod_index.clone() + + # Loop through EOD indecies: + prev_index = 0 + for j in range(eod_index.numel()): + i = eod_index[j] + # Mask attention loss. + if reset_attention_mask: + attention_mask[ 0, (i + 1):, :(i + 1)] = 0 + # Reset positions. if reset_position_ids: - eod_index = eod_index.clone() - - # Loop through EOD indecies: - prev_index = 0 - for j in range(eod_index.size()[0]): - i = eod_index[j] - # Mask attention loss. - if reset_attention_mask: - attention_mask[b, 0, (i + 1):, :(i + 1)] = 0 - # Reset positions. - if reset_position_ids: - position_ids[b, (i + 1):] -= (i + 1 - prev_index) - prev_index = i + 1 + position_ids[ (i + 1):] -= (i + 1 - prev_index) + prev_index = i + 1 # Convert attention mask to binary: attention_mask = (attention_mask < 0.5) diff --git a/pretrain_gpt.py b/pretrain_gpt.py index ff3bf6ba98..566010f001 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -87,34 +87,12 @@ def get_batch(data_iterator): if (not mpu.is_pipeline_first_stage()) and (not mpu.is_pipeline_last_stage()): return None, None, None, None, None - args = get_args() - tokenizer = get_tokenizer() - - # Items and their type. - keys = ['text'] - datatype = torch.int64 - - # Broadcast data. if data_iterator is not None: data = next(data_iterator) else: data = None - data_b = tensor_parallel.broadcast_data(keys, data, datatype) - - # Unpack. - tokens_ = data_b['text'].long() - labels = tokens_[:, 1:].contiguous() - tokens = tokens_[:, :-1].contiguous() - - # Get the masks and postition ids. - attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( - tokens, - tokenizer.eod, - args.reset_position_ids, - args.reset_attention_mask, - args.eod_mask_loss) - return tokens, labels, loss_mask, attention_mask, position_ids + return data["tokens"].cuda(non_blocking = True), data["labels"].cuda(non_blocking = True), data["loss_mask"].cuda(non_blocking = True), data["attention_mask"].cuda(non_blocking = True), data["position_ids"].cuda(non_blocking = True) def loss_func(loss_mask: Tensor, output_tensor: Tensor): """Loss function. @@ -165,7 +143,7 @@ def forward_step(data_iterator, model: GPTModel): def is_dataset_built_on_rank(): - return (mpu.is_pipeline_first_stage() or mpu.is_pipeline_last_stage()) and mpu.get_tensor_model_parallel_rank() == 0 + return (mpu.is_pipeline_first_stage() or mpu.is_pipeline_last_stage()) def core_gpt_dataset_config_from_args(args): From cb03f3376f800165f849216c9f49bec25974a621 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Wed, 1 Nov 2023 21:08:50 -0700 Subject: [PATCH 002/296] Added a custom torch.split implementation to avoid a redundant cat operation Signed-off-by: Selvaraj Anandaraj --- megatron/core/transformer/attention.py | 6 ++++-- .../core/transformer/custom_layers/transformer_engine.py | 3 +++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index a63b9f00a0..2b6f528952 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -12,6 +12,7 @@ from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module +from megatron.core.transformer.custom_layers.transformer_engine import SplitAlongDim from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.utils import divide @@ -318,8 +319,9 @@ def get_query_key_value_tensors(self, hidden_states, key_value_states=None): mixed_qkv = mixed_qkv.view(*new_tensor_shape) # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] - (query, key, value) = torch.split( + (query, key, value) = SplitAlongDim( mixed_qkv, + 3, [ ( self.num_attention_heads_per_partition @@ -329,8 +331,8 @@ def get_query_key_value_tensors(self, hidden_states, key_value_states=None): self.hidden_size_per_attention_head, self.hidden_size_per_attention_head, ], - dim=3, ) + # [sq, b, ng, np/ng * hn] -> [sq, b, np, hn] query = query.reshape(query.size(0), query.size(1), -1, self.hidden_size_per_attention_head) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index 957187645d..6507e75b2d 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -3,6 +3,7 @@ import torch import transformer_engine as te +from transformer_engine.pytorch.attention import _SplitAlongDim from pkg_resources import packaging from megatron.core.parallel_state import ( @@ -350,3 +351,5 @@ def forward(self, x): if isinstance(out, (list, tuple)): return out return out, None + +SplitAlongDim = _SplitAlongDim.apply From 8127d2a9d9229d19e3be3bf55cfabc0aa28bf0c7 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Tue, 7 Nov 2023 17:54:05 -0800 Subject: [PATCH 003/296] Building on TP rank 0 and broadcasting the datasets to other TP ranks Signed-off-by: Selvaraj Anandaraj --- .../blended_megatron_dataset_config.py | 4 + megatron/core/datasets/gpt_dataset.py | 66 +++++++-- megatron/data/data_samplers.py | 4 +- megatron/training.py | 2 +- megatron/utils.py | 133 +++++++++++++++--- pretrain_gpt.py | 24 ++-- 6 files changed, 185 insertions(+), 48 deletions(-) diff --git a/megatron/core/datasets/blended_megatron_dataset_config.py b/megatron/core/datasets/blended_megatron_dataset_config.py index b7e242a4be..390cc50620 100644 --- a/megatron/core/datasets/blended_megatron_dataset_config.py +++ b/megatron/core/datasets/blended_megatron_dataset_config.py @@ -97,6 +97,10 @@ class GPTDatasetConfig(BlendedMegatronDatasetConfig): """ return_document_ids: bool = False + reset_position_ids: bool = False + reset_attention_mask: bool = False + eod_mask_loss: bool = False + eod_id: int = 0 def _parse_and_normalize_split(split: str) -> List[float]: diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index 0198fed47d..3f03b2e8d3 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -8,10 +8,6 @@ import numpy import torch -from megatron import get_args -from megatron import get_tokenizer -from megatron.utils import get_ltor_masks_and_position_ids - from megatron.core.datasets.blended_megatron_dataset_config import GPTDatasetConfig from megatron.core.datasets.indexed_dataset import MMapIndexedDataset from megatron.core.datasets.megatron_dataset import MegatronDataset @@ -82,19 +78,16 @@ def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: text = torch.from_numpy(text) document_ids = torch.from_numpy(document_ids) - args = get_args() - tokenizer = get_tokenizer() - tokens_ = text.long() labels = tokens_[1:].contiguous() tokens = tokens_[:-1].contiguous() - attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + attention_mask, loss_mask, position_ids = _get_ltor_masks_and_position_ids( tokens, - tokenizer.eod, - args.reset_position_ids, - args.reset_attention_mask, - args.eod_mask_loss) + getattr(self.config,"eod_id"), + getattr(self.config,"reset_position_ids"), + getattr(self.config,"reset_attention_mask"), + getattr(self.config,"eod_mask_loss")) if getattr(self.config, "return_document_ids"): return {"tokens": tokens,"labels": labels,"attention_mask": attention_mask,"loss_mask": loss_mask,"position_ids": position_ids,"document_ids": document_ids} @@ -480,3 +473,52 @@ def _build_shuffle_index( numpy_random_state.shuffle(shuffle_idx_last) return numpy.concatenate((shuffle_idx_first, shuffle_idx_last)) + +def _get_ltor_masks_and_position_ids(data, + eod_token, + reset_position_ids, + reset_attention_mask, + eod_mask_loss): + """Build masks and position id for left to right model.""" + + # Extract batch size and sequence length. + seq_length = data.numel() + + attention_mask = torch.tril(torch.ones((seq_length, seq_length),device=data.device)).unsqueeze(0) + + # Loss mask. + loss_mask = torch.ones(seq_length, dtype=torch.float, device=data.device) + if eod_mask_loss: + loss_mask[data == eod_token] = 0.0 + + # Position ids. + position_ids = torch.arange(seq_length, dtype=torch.long, + device=data.device) + # We need to clone as the ids will be modifed based on batch index. + if reset_position_ids: + position_ids = position_ids.clone() + + if reset_position_ids or reset_attention_mask: + + # Find indecies where EOD token is. + eod_index = position_ids[data[b] == eod_token] + # Detach indecies from positions if going to modify positions. + if reset_position_ids: + eod_index = eod_index.clone() + + # Loop through EOD indecies: + prev_index = 0 + for j in range(eod_index.numel()): + i = eod_index[j] + # Mask attention loss. + if reset_attention_mask: + attention_mask[ 0, (i + 1):, :(i + 1)] = 0 + # Reset positions. + if reset_position_ids: + position_ids[ (i + 1):] -= (i + 1 - prev_index) + prev_index = i + 1 + + # Convert attention mask to binary: + attention_mask = (attention_mask < 0.5) + + return attention_mask, loss_mask, position_ids diff --git a/megatron/data/data_samplers.py b/megatron/data/data_samplers.py index 8dec2c1922..85af2e0872 100644 --- a/megatron/data/data_samplers.py +++ b/megatron/data/data_samplers.py @@ -43,7 +43,9 @@ def build_pretraining_data_loader(dataset, consumed_samples): return torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, - pin_memory=True) + pin_memory=True, + persistent_workers=True if args.num_workers > 0 else False, + ) class MegatronPretrainingSampler: diff --git a/megatron/training.py b/megatron/training.py index 30990e9189..7533a9c983 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -1055,7 +1055,7 @@ def build_train_valid_test_data_loaders( is_distributed = getattr(build_train_valid_test_datasets_provider, "is_distributed", False) # Construct the data pipeline - if is_distributed or mpu.is_pipeline_first_stage() or mpu.is_pipeline_last_stage(): + if is_distributed or mpu.get_tensor_model_parallel_rank() == 0: # Build datasets. train_ds, valid_ds, test_ds = build_train_valid_test_datasets( diff --git a/megatron/utils.py b/megatron/utils.py index c5a4774b87..2c585c674e 100644 --- a/megatron/utils.py +++ b/megatron/utils.py @@ -167,41 +167,51 @@ def get_ltor_masks_and_position_ids(data, """Build masks and position id for left to right model.""" # Extract batch size and sequence length. - seq_length = data.numel() + micro_batch_size, seq_length = data.size() - attention_mask = torch.tril(torch.ones((seq_length, seq_length),device=data.device)).unsqueeze(0) + # Attention mask (lower triangular). + if reset_attention_mask: + att_mask_batch = micro_batch_size + else: + att_mask_batch = 1 + attention_mask = torch.tril(torch.ones( + (att_mask_batch, seq_length, seq_length), device=data.device)).view( + att_mask_batch, 1, seq_length, seq_length) # Loss mask. - loss_mask = torch.ones(seq_length, dtype=torch.float, device=data.device) + loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device) if eod_mask_loss: loss_mask[data == eod_token] = 0.0 # Position ids. position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device) + position_ids = position_ids.unsqueeze(0).expand_as(data) # We need to clone as the ids will be modifed based on batch index. if reset_position_ids: position_ids = position_ids.clone() if reset_position_ids or reset_attention_mask: + # Loop through the batches: + for b in range(micro_batch_size): - # Find indecies where EOD token is. - eod_index = position_ids[data[b] == eod_token] - # Detach indecies from positions if going to modify positions. - if reset_position_ids: - eod_index = eod_index.clone() - - # Loop through EOD indecies: - prev_index = 0 - for j in range(eod_index.numel()): - i = eod_index[j] - # Mask attention loss. - if reset_attention_mask: - attention_mask[ 0, (i + 1):, :(i + 1)] = 0 - # Reset positions. + # Find indecies where EOD token is. + eod_index = position_ids[b, data[b] == eod_token] + # Detach indecies from positions if going to modify positions. if reset_position_ids: - position_ids[ (i + 1):] -= (i + 1 - prev_index) - prev_index = i + 1 + eod_index = eod_index.clone() + + # Loop through EOD indecies: + prev_index = 0 + for j in range(eod_index.size()[0]): + i = eod_index[j] + # Mask attention loss. + if reset_attention_mask: + attention_mask[b, 0, (i + 1):, :(i + 1)] = 0 + # Reset positions. + if reset_position_ids: + position_ids[b, (i + 1):] -= (i + 1 - prev_index) + prev_index = i + 1 # Convert attention mask to binary: attention_mask = (attention_mask < 0.5) @@ -259,3 +269,88 @@ def print_rank_last(message): print(message, flush=True) else: print(message, flush=True) + + +def get_batch_on_this_tp_rank(data_iterator): + + args = get_args() + + if mpu.get_tensor_model_parallel_rank() == 0: + + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + + batch = { + 'tokens': data["tokens"].cuda(non_blocking = True), + 'labels': data["labels"].cuda(non_blocking = True), + 'loss_mask': data["loss_mask"].cuda(non_blocking = True), + 'attention_mask': data["attention_mask"].cuda(non_blocking = True), + 'position_ids': data["position_ids"].cuda(non_blocking = True) + } + + if args.pipeline_model_parallel_size == 1: + torch.distributed.broadcast(batch['tokens'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(batch['labels'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(batch['loss_mask'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(batch['attention_mask'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(batch['position_ids'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + + elif mpu.is_pipeline_first_stage(): + torch.distributed.broadcast(batch['tokens'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(batch['attention_mask'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(batch['position_ids'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + + elif mpu.is_pipeline_last_stage(): + torch.distributed.broadcast(batch['labels'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(batch['loss_mask'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(batch['attention_mask'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + + + else: + + if args.pipeline_model_parallel_size == 1: + tokens=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + labels=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + loss_mask=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.float32 , device = torch.cuda.current_device()) + attention_mask=torch.empty((args.micro_batch_size,args.micro_batch_size,args.seq_length,args.seq_length), dtype = torch.bool , device = torch.cuda.current_device()) + position_ids=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + + torch.distributed.broadcast(tokens, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(labels, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(loss_mask, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(attention_mask, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(position_ids, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + + elif mpu.is_pipeline_first_stage(): + tokens=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + labels=None + loss_mask=None + attention_mask=torch.empty((args.micro_batch_size,args.micro_batch_size,args.seq_length,args.seq_length), dtype = torch.bool , device = torch.cuda.current_device()) + position_ids=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + + torch.distributed.broadcast(tokens, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(attention_mask, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(position_ids, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + + elif mpu.is_pipeline_last_stage(): + tokens=None + labels=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + loss_mask=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.float32 , device = torch.cuda.current_device()) + attention_mask=torch.empty((args.micro_batch_size,args.micro_batch_size,args.seq_length,args.seq_length), dtype = torch.bool , device = torch.cuda.current_device()) + position_ids=None + + torch.distributed.broadcast(labels, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(loss_mask, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + torch.distributed.broadcast(attention_mask, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + + batch = { + 'tokens': tokens, + 'labels': labels, + 'loss_mask': loss_mask, + 'attention_mask': attention_mask, + 'position_ids': position_ids + } + + return batch diff --git a/pretrain_gpt.py b/pretrain_gpt.py index 3b0e0f205f..0ef257587b 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -20,8 +20,8 @@ from megatron.training import pretrain from megatron.core.transformer.spec_utils import import_module from megatron.utils import ( - get_ltor_masks_and_position_ids, get_batch_on_this_cp_rank, + get_batch_on_this_tp_rank, average_losses_across_data_parallel_group ) from megatron.arguments import core_transformer_config_from_args @@ -91,18 +91,8 @@ def get_batch(data_iterator): if (not mpu.is_pipeline_first_stage()) and (not mpu.is_pipeline_last_stage()): return None, None, None, None, None - if data_iterator is not None: - data = next(data_iterator) - else: - data = None - - batch = { - 'tokens': data["tokens"].cuda(non_blocking = True), - 'labels': data["labels"].cuda(non_blocking = True), - 'loss_mask': data["loss_mask"].cuda(non_blocking = True), - 'attention_mask': data["attention_mask"].cuda(non_blocking = True), - 'position_ids': data["position_ids"].cuda(non_blocking = True) - } + # get batches based on the TP rank you are on + batch = get_batch_on_this_tp_rank(data_iterator) # slice batch along sequence dimension for context parallelism batch = get_batch_on_this_cp_rank(batch) @@ -164,7 +154,7 @@ def forward_step(data_iterator, model: GPTModel): def is_dataset_built_on_rank(): - return (mpu.is_pipeline_first_stage() or mpu.is_pipeline_last_stage()) + return (mpu.is_pipeline_first_stage() or mpu.is_pipeline_last_stage()) and mpu.get_tensor_model_parallel_rank() == 0 def core_gpt_dataset_config_from_args(args): @@ -176,7 +166,11 @@ def core_gpt_dataset_config_from_args(args): blend_per_split=[args.train_data_path, args.valid_data_path, args.test_data_path], split=args.split, path_to_cache=args.data_cache_path, - return_document_ids=args.retro_return_doc_ids + return_document_ids=args.retro_return_doc_ids, + reset_position_ids=args.reset_position_ids, + reset_attention_mask=args.reset_attention_mask, + eod_mask_loss=args.eod_mask_loss, + eod_id=get_tokenizer().eod ) From 62aad13d98ffa79e906cf9f0675bcdc5b151bded Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Tue, 7 Nov 2023 17:56:01 -0800 Subject: [PATCH 004/296] Added guard and fallback for TE SplitAlongDim Signed-off-by: Selvaraj Anandaraj --- megatron/core/transformer/attention.py | 51 ++++++++++++++++++-------- 1 file changed, 36 insertions(+), 15 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 2b6f528952..f3937dd384 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -12,7 +12,6 @@ from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module -from megatron.core.transformer.custom_layers.transformer_engine import SplitAlongDim from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.utils import divide @@ -318,20 +317,42 @@ def get_query_key_value_tensors(self, hidden_states, key_value_states=None): ) mixed_qkv = mixed_qkv.view(*new_tensor_shape) - # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] - (query, key, value) = SplitAlongDim( - mixed_qkv, - 3, - [ - ( - self.num_attention_heads_per_partition - // self.num_query_groups_per_partition - * self.hidden_size_per_attention_head - ), - self.hidden_size_per_attention_head, - self.hidden_size_per_attention_head, - ], - ) + try: + + from megatron.core.transformer.custom_layers.transformer_engine import SplitAlongDim + + # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] + (query, key, value) = SplitAlongDim( + mixed_qkv, + 3, + [ + ( + self.num_attention_heads_per_partition + // self.num_query_groups_per_partition + * self.hidden_size_per_attention_head + ), + self.hidden_size_per_attention_head, + self.hidden_size_per_attention_head, + ], + ) + + except ImportError: + + # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] + (query, key, value) = torch.split( + mixed_qkv, + [ + ( + self.num_attention_heads_per_partition + // self.num_query_groups_per_partition + * self.hidden_size_per_attention_head + ), + self.hidden_size_per_attention_head, + self.hidden_size_per_attention_head, + ], + dim=3, + ) + # [sq, b, ng, np/ng * hn] -> [sq, b, np, hn] query = query.reshape(query.size(0), query.size(1), -1, self.hidden_size_per_attention_head) From 80de44fda8da5ff164ffef37733bf4b469966002 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Wed, 15 Nov 2023 03:24:57 -0800 Subject: [PATCH 005/296] add rope and swiglu fusion Signed-off-by: Hongbin Liu --- megatron/core/fusions/fused_bias_swiglu.py | 65 +++++++++++++++++++ megatron/core/transformer/attention.py | 7 +- megatron/core/transformer/mlp.py | 17 +++-- .../core/transformer/transformer_config.py | 11 ++-- 4 files changed, 87 insertions(+), 13 deletions(-) create mode 100644 megatron/core/fusions/fused_bias_swiglu.py diff --git a/megatron/core/fusions/fused_bias_swiglu.py b/megatron/core/fusions/fused_bias_swiglu.py new file mode 100644 index 0000000000..24337aa990 --- /dev/null +++ b/megatron/core/fusions/fused_bias_swiglu.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import torch +import torch.nn.functional as F + +###### BIAS GELU FUSION/ NO AUTOGRAD ################ +# 1/sqrt(2*pi)-> 0.3989423 +# 1/sqrt(2) -> 0.70710678 +# sqrt(2/pi) -> 0.79788456 +# this function is tanh approximation of gelu +# actual gelu is: +# x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) + +@torch.jit.script +def swiglu(y, y_2): + return F.silu(y) * y_2 + +@torch.jit.script +def bias_swiglu(y, bias, y_2, bias_2): + x = bias + y + x_2 = bias_2 + y_2 + return swiglu(x, x_2) + +# gradient of tanh approximation of gelu +# gradient of actual gelu is: +# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) +@torch.jit.script +def swiglu_back(g, y, y_2): + return g * torch.sigmoid(y) * (1 + y * (1 - torch.sigmoid(y))) * y_2, g * F.silu(y) + +@torch.jit.script +def bias_swiglu_back(g, y, bias, y_2, bias_2): + x_1 = bias + y + x_2 = bias_2 + y_2 + return swiglu_back(g, x_1, x_2) + + +class BiasSwiGLUFunction(torch.autograd.Function): + @staticmethod + # bias is an optional argument + def forward(ctx, input, bias, input_2, bias_2): + ctx.save_for_backward(input, bias, input_2, bias_2) + return bias_swiglu(input, bias, input_2, bias_2) + + @staticmethod + def backward(ctx, grad_output): + input, bias, input_2, bias_2 = ctx.saved_tensors + tmp, tmp2 = bias_swiglu_back(grad_output, input, bias, input_2, bias_2) + return tmp, tmp, tmp2, tmp2 + +class SwiGLUFunction(torch.autograd.Function): + @staticmethod + # bias is an optional argument + def forward(ctx, input, input_2): + ctx.save_for_backward(input, input_2) + return swiglu(input, input_2) + + @staticmethod + def backward(ctx, grad_output): + input, input_2 = ctx.saved_tensors + tmp, tmp2 = swiglu_back(grad_output, input, input_2) + return tmp, tmp2 + +bias_swiglu_impl = BiasSwiGLUFunction.apply +swiglu_impl = SwiGLUFunction.apply diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 6f862d1ebf..203da79cb0 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -18,6 +18,7 @@ from .enums import AttnMaskType from .transformer_config import TransformerConfig from .utils import make_sharded_tensors_for_checkpoint +from apex.transformer.functional import fused_apply_rotary_pos_emb @dataclass @@ -235,8 +236,10 @@ def forward( # ================================================ if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - query = apply_rotary_pos_emb(query, q_pos_emb) - key = apply_rotary_pos_emb(key, k_pos_emb) + #query = apply_rotary_pos_emb(query, q_pos_emb) + #key = apply_rotary_pos_emb(key, k_pos_emb) + query = fused_apply_rotary_pos_emb(query, q_pos_emb) + key = fused_apply_rotary_pos_emb(key, k_pos_emb) # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index 1d4e72e783..27edfebbcb 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -7,6 +7,8 @@ import torch.nn.functional as F from megatron.core.fusions.fused_bias_gelu import bias_gelu_impl +from megatron.core.fusions.fused_bias_swiglu import bias_swiglu_impl +from megatron.core.fusions.fused_bias_swiglu import swiglu_impl from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig @@ -89,10 +91,17 @@ def forward(self, hidden_states): # [s, b, 4 * h/p] intermediate_parallel, bias_parallel = self.linear_fc1(hidden_states) - if self.config.bias_gelu_fusion: - assert self.config.add_bias_linear is True - assert self.activation_func == F.gelu - intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) + if self.config.bias_activation_fusion: + if self.activation_func == F.gelu: + assert self.config.add_bias_linear is True + intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) + elif self.activation_func == glu: + x = torch.chunk(intermediate_parallel, 2, dim=-1) + if bias_parallel is not None: + bias = torch.chunk(bias_parallel, 2, dim=-1) + intermediate_parallel = bias_swiglu_impl(x[0], bias[0], x[1], bias[1]) + else: + intermediate_parallel = swiglu_impl(x[0], x[1]) else: if bias_parallel is not None: intermediate_parallel = intermediate_parallel + bias_parallel diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 6d2dd5f525..93e5721d96 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -71,7 +71,7 @@ class TransformerConfig(ModelParallelConfig): This should be true if apply_query_key_layer_scaling is true. # fusion - bias_gelu_fustion (bool): If true, fuses bias and gelu. Defaults to False. + bias_activation_fustion (bool): If true, fuses bias and activation. Defaults to False. masked_softmax_fusion (bool): If true, uses softmax fusion. persist_layer_norm (bool): If true, uses the persistent fused layer norm kernel. This kernel only supports a fixed set of hidden sizes. @@ -162,7 +162,7 @@ class TransformerConfig(ModelParallelConfig): # communication # fusion - bias_gelu_fusion: bool = False # TODO: this should be bias_activation_fusion ? + bias_activation_fusion: bool = False masked_softmax_fusion: bool = False persist_layer_norm: bool = False bias_dropout_fusion: bool = False # TODO: this should be bias_dropout_add_fusion? @@ -263,15 +263,12 @@ def __post_init__(self): if self.apply_query_key_layer_scaling: self.attention_softmax_in_fp32 = True - if self.bias_gelu_fusion: + if self.bias_activation_fusion and self.activation_func == F.gelu: if not self.add_bias_linear: raise ValueError( - "When bias_gelu_fusion is True, add_bias_linear must also be True." + "When bias_activation_fusion is True and activation function is gelu, add_bias_linear must also be True." ) - if self.activation_func != F.gelu: - raise ValueError(f'When bias_gelu_fusion is True, activation_func must be F.gelu.') - if self.init_method is None: self.init_method = init_method_normal(self.init_method_std) From 49f4ec27e584bfee72a2edc9f9ea34f01b9b9dce Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Wed, 15 Nov 2023 04:34:04 -0800 Subject: [PATCH 006/296] make rope_fusion under bias_activation_fusion knob Signed-off-by: Hongbin Liu --- megatron/core/transformer/attention.py | 13 +++++++++---- megatron/core/transformer/mlp.py | 2 +- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 203da79cb0..bf15733d71 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -236,10 +236,15 @@ def forward( # ================================================ if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - #query = apply_rotary_pos_emb(query, q_pos_emb) - #key = apply_rotary_pos_emb(key, k_pos_emb) - query = fused_apply_rotary_pos_emb(query, q_pos_emb) - key = fused_apply_rotary_pos_emb(key, k_pos_emb) + # use bias_activation_fusion to control the knob here + # just for debug + # the if-else block is not needed in normal PR + if self.config.bias_activation_fusion: + query = fused_apply_rotary_pos_emb(query, q_pos_emb) + key = fused_apply_rotary_pos_emb(key, k_pos_emb) + else: + query = apply_rotary_pos_emb(query, q_pos_emb) + key = apply_rotary_pos_emb(key, k_pos_emb) # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index 27edfebbcb..cb0c03e840 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -95,7 +95,7 @@ def forward(self, hidden_states): if self.activation_func == F.gelu: assert self.config.add_bias_linear is True intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) - elif self.activation_func == glu: + else: x = torch.chunk(intermediate_parallel, 2, dim=-1) if bias_parallel is not None: bias = torch.chunk(bias_parallel, 2, dim=-1) From f41b4fd4e56b07943d075a1e66c1284716b3347e Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Thu, 16 Nov 2023 23:42:59 -0800 Subject: [PATCH 007/296] refactor code Signed-off-by: Hongbin Liu --- megatron/core/fusions/fused_bias_swiglu.py | 56 ++++++++++++---------- megatron/core/transformer/attention.py | 2 +- megatron/core/transformer/mlp.py | 36 +++++++------- 3 files changed, 51 insertions(+), 43 deletions(-) diff --git a/megatron/core/fusions/fused_bias_swiglu.py b/megatron/core/fusions/fused_bias_swiglu.py index 24337aa990..bf23b6e4ae 100644 --- a/megatron/core/fusions/fused_bias_swiglu.py +++ b/megatron/core/fusions/fused_bias_swiglu.py @@ -11,55 +11,63 @@ # actual gelu is: # x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) + @torch.jit.script -def swiglu(y, y_2): - return F.silu(y) * y_2 +def swiglu(y): + y_1, y_2 = torch.chunk(y, 2, -1) + return F.silu(y_1) * y_2 + @torch.jit.script -def bias_swiglu(y, bias, y_2, bias_2): - x = bias + y - x_2 = bias_2 + y_2 - return swiglu(x, x_2) +def bias_swiglu(y, bias): + y = y + bias + return swiglu(y) + # gradient of tanh approximation of gelu # gradient of actual gelu is: # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) @torch.jit.script -def swiglu_back(g, y, y_2): - return g * torch.sigmoid(y) * (1 + y * (1 - torch.sigmoid(y))) * y_2, g * F.silu(y) +def swiglu_back(g, y): + y_1, y_2 = torch.chunk(y, 2, -1) + return torch.cat( + (g * torch.sigmoid(y_1) * (1 + y_1 * (1 - torch.sigmoid(y_1))) * y_2, g * F.silu(y_1)), -1 + ) + @torch.jit.script -def bias_swiglu_back(g, y, bias, y_2, bias_2): - x_1 = bias + y - x_2 = bias_2 + y_2 - return swiglu_back(g, x_1, x_2) +def bias_swiglu_back(g, y, bias): + y = y + bias + return swiglu_back(g, y) class BiasSwiGLUFunction(torch.autograd.Function): @staticmethod # bias is an optional argument - def forward(ctx, input, bias, input_2, bias_2): - ctx.save_for_backward(input, bias, input_2, bias_2) - return bias_swiglu(input, bias, input_2, bias_2) + def forward(ctx, input, bias): + ctx.save_for_backward(input, bias) + return bias_swiglu(input, bias) @staticmethod def backward(ctx, grad_output): - input, bias, input_2, bias_2 = ctx.saved_tensors - tmp, tmp2 = bias_swiglu_back(grad_output, input, bias, input_2, bias_2) - return tmp, tmp, tmp2, tmp2 + input, bias = ctx.saved_tensors + tmp = bias_swiglu_back(grad_output, input, bias) + return tmp, tmp + class SwiGLUFunction(torch.autograd.Function): @staticmethod # bias is an optional argument - def forward(ctx, input, input_2): - ctx.save_for_backward(input, input_2) - return swiglu(input, input_2) + def forward(ctx, input): + ctx.save_for_backward(input) + return swiglu(input) @staticmethod def backward(ctx, grad_output): - input, input_2 = ctx.saved_tensors - tmp, tmp2 = swiglu_back(grad_output, input, input_2) - return tmp, tmp2 + input = ctx.saved_tensors + tmp = swiglu_back(grad_output, input[0]) + return tmp + bias_swiglu_impl = BiasSwiGLUFunction.apply swiglu_impl = SwiGLUFunction.apply diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index bf15733d71..9c072e5e60 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -5,6 +5,7 @@ from typing import Union import torch +from apex.transformer.functional import fused_apply_rotary_pos_emb from megatron.core import parallel_state, tensor_parallel from megatron.core.models.common.embeddings.rotary_pos_embedding import apply_rotary_pos_emb @@ -18,7 +19,6 @@ from .enums import AttnMaskType from .transformer_config import TransformerConfig from .utils import make_sharded_tensors_for_checkpoint -from apex.transformer.functional import fused_apply_rotary_pos_emb @dataclass diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index cb0c03e840..02e20fbe9e 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -7,8 +7,7 @@ import torch.nn.functional as F from megatron.core.fusions.fused_bias_gelu import bias_gelu_impl -from megatron.core.fusions.fused_bias_swiglu import bias_swiglu_impl -from megatron.core.fusions.fused_bias_swiglu import swiglu_impl +from megatron.core.fusions.fused_bias_swiglu import bias_swiglu_impl, swiglu_impl from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig @@ -63,16 +62,6 @@ def __init__( tp_comm_buffer_name='fc1', ) - if self.config.gated_linear_unit: - - def glu(x): - x = torch.chunk(x, 2, dim=-1) - return self.config.activation_func(x[0]) * x[1] - - self.activation_func = glu - else: - self.activation_func = self.config.activation_func - self.linear_fc2 = build_module( submodules.linear_fc2, self.config.ffn_hidden_size, @@ -95,17 +84,28 @@ def forward(self, hidden_states): if self.activation_func == F.gelu: assert self.config.add_bias_linear is True intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) - else: - x = torch.chunk(intermediate_parallel, 2, dim=-1) + elif self.activation_func == F.silu: + shape = intermediate_parallel.shape + intermediate_parallel = intermediate_parallel.view(-1, shape[2]) if bias_parallel is not None: - bias = torch.chunk(bias_parallel, 2, dim=-1) - intermediate_parallel = bias_swiglu_impl(x[0], bias[0], x[1], bias[1]) + intermediate_parallel = bias_swiglu_impl(intermediate_parallel, bias_parallel) else: - intermediate_parallel = swiglu_impl(x[0], x[1]) + intermediate_parallel = swiglu_impl(intermediate_parallel) + intermediate_parallel = intermediate_parallel.view(shape[0], shape[1], -1) + else: + raise ValueError("Only support fusion of gelu and swiglu") else: if bias_parallel is not None: intermediate_parallel = intermediate_parallel + bias_parallel - intermediate_parallel = self.activation_func(intermediate_parallel) + if self.config.gated_linear_unit: + + def glu(x): + x = torch.chunk(x, 2, dim=-1) + return self.config.activation_func(x[0]) * x[1] + + intermediate_parallel = glu(intermediate_parallel) + else: + intermediate_parallel = self.activation_func(intermediate_parallel) # [s, b, h] output, output_bias = self.linear_fc2(intermediate_parallel) From 2d0218279abb561bdfea91d3287b877d8cb71fbb Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Sun, 19 Nov 2023 17:16:19 -0800 Subject: [PATCH 008/296] add knob for rope fusion and fix bug in mlp Signed-off-by: Hongbin Liu --- megatron/core/transformer/attention.py | 13 ++++++++----- megatron/core/transformer/mlp.py | 2 ++ megatron/core/transformer/transformer_config.py | 1 + 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 9c072e5e60..aaa7eaf91d 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -5,7 +5,13 @@ from typing import Union import torch -from apex.transformer.functional import fused_apply_rotary_pos_emb +try: + from apex.transformer.functional import fused_apply_rotary_pos_emb + + HAVE_APPLY_ROPE_FUSION = True +except: + HAVE_APPLY_ROPE_FUSION = False + from megatron.core import parallel_state, tensor_parallel from megatron.core.models.common.embeddings.rotary_pos_embedding import apply_rotary_pos_emb @@ -236,10 +242,7 @@ def forward( # ================================================ if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - # use bias_activation_fusion to control the knob here - # just for debug - # the if-else block is not needed in normal PR - if self.config.bias_activation_fusion: + if self.config.apply_rope_fusion and HAVE_ROPE_FUSION: query = fused_apply_rotary_pos_emb(query, q_pos_emb) key = fused_apply_rotary_pos_emb(key, k_pos_emb) else: diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index 02e20fbe9e..9632979ddd 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -62,6 +62,8 @@ def __init__( tp_comm_buffer_name='fc1', ) + self.activation_func = self.config.activation_func + self.linear_fc2 = build_module( submodules.linear_fc2, self.config.ffn_hidden_size, diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 93e5721d96..5e5e4a1bcf 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -166,6 +166,7 @@ class TransformerConfig(ModelParallelConfig): masked_softmax_fusion: bool = False persist_layer_norm: bool = False bias_dropout_fusion: bool = False # TODO: this should be bias_dropout_add_fusion? + apply_rope_fusion: bool = False # activation recomputation recompute_granularity: str = None From e61aa3d59c7f6e048420ddcd82187a194ee7fde7 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Sun, 19 Nov 2023 17:39:19 -0800 Subject: [PATCH 009/296] minor fix Signed-off-by: Hongbin Liu --- megatron/core/transformer/attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index aaa7eaf91d..f4c8f348d6 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -242,7 +242,7 @@ def forward( # ================================================ if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - if self.config.apply_rope_fusion and HAVE_ROPE_FUSION: + if self.config.apply_rope_fusion and HAVE_APPLY_ROPE_FUSION: query = fused_apply_rotary_pos_emb(query, q_pos_emb) key = fused_apply_rotary_pos_emb(key, k_pos_emb) else: From 8503f75401aa49f735b7b153ba82fd76f2d5cd58 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Wed, 15 Nov 2023 03:24:57 -0800 Subject: [PATCH 010/296] add rope and swiglu fusion Signed-off-by: Hongbin Liu --- megatron/core/fusions/fused_bias_swiglu.py | 65 +++++++++++++++++++ megatron/core/transformer/attention.py | 7 +- megatron/core/transformer/mlp.py | 17 +++-- .../core/transformer/transformer_config.py | 11 ++-- 4 files changed, 87 insertions(+), 13 deletions(-) create mode 100644 megatron/core/fusions/fused_bias_swiglu.py diff --git a/megatron/core/fusions/fused_bias_swiglu.py b/megatron/core/fusions/fused_bias_swiglu.py new file mode 100644 index 0000000000..24337aa990 --- /dev/null +++ b/megatron/core/fusions/fused_bias_swiglu.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import torch +import torch.nn.functional as F + +###### BIAS GELU FUSION/ NO AUTOGRAD ################ +# 1/sqrt(2*pi)-> 0.3989423 +# 1/sqrt(2) -> 0.70710678 +# sqrt(2/pi) -> 0.79788456 +# this function is tanh approximation of gelu +# actual gelu is: +# x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) + +@torch.jit.script +def swiglu(y, y_2): + return F.silu(y) * y_2 + +@torch.jit.script +def bias_swiglu(y, bias, y_2, bias_2): + x = bias + y + x_2 = bias_2 + y_2 + return swiglu(x, x_2) + +# gradient of tanh approximation of gelu +# gradient of actual gelu is: +# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) +@torch.jit.script +def swiglu_back(g, y, y_2): + return g * torch.sigmoid(y) * (1 + y * (1 - torch.sigmoid(y))) * y_2, g * F.silu(y) + +@torch.jit.script +def bias_swiglu_back(g, y, bias, y_2, bias_2): + x_1 = bias + y + x_2 = bias_2 + y_2 + return swiglu_back(g, x_1, x_2) + + +class BiasSwiGLUFunction(torch.autograd.Function): + @staticmethod + # bias is an optional argument + def forward(ctx, input, bias, input_2, bias_2): + ctx.save_for_backward(input, bias, input_2, bias_2) + return bias_swiglu(input, bias, input_2, bias_2) + + @staticmethod + def backward(ctx, grad_output): + input, bias, input_2, bias_2 = ctx.saved_tensors + tmp, tmp2 = bias_swiglu_back(grad_output, input, bias, input_2, bias_2) + return tmp, tmp, tmp2, tmp2 + +class SwiGLUFunction(torch.autograd.Function): + @staticmethod + # bias is an optional argument + def forward(ctx, input, input_2): + ctx.save_for_backward(input, input_2) + return swiglu(input, input_2) + + @staticmethod + def backward(ctx, grad_output): + input, input_2 = ctx.saved_tensors + tmp, tmp2 = swiglu_back(grad_output, input, input_2) + return tmp, tmp2 + +bias_swiglu_impl = BiasSwiGLUFunction.apply +swiglu_impl = SwiGLUFunction.apply diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index c725c7f3a2..5e91d2e201 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -18,6 +18,7 @@ from .enums import AttnMaskType from .transformer_config import TransformerConfig from .utils import make_sharded_tensors_for_checkpoint +from apex.transformer.functional import fused_apply_rotary_pos_emb @dataclass @@ -244,8 +245,10 @@ def forward( # ================================================ if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - query = apply_rotary_pos_emb(query, q_pos_emb) - key = apply_rotary_pos_emb(key, k_pos_emb) + #query = apply_rotary_pos_emb(query, q_pos_emb) + #key = apply_rotary_pos_emb(key, k_pos_emb) + query = fused_apply_rotary_pos_emb(query, q_pos_emb) + key = fused_apply_rotary_pos_emb(key, k_pos_emb) # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index 8f5575b724..dbb9ffae38 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -10,6 +10,8 @@ from megatron.core.dist_checkpointing import ShardedTensor from megatron.core.dist_checkpointing.mapping import ShardedTensorFactory from megatron.core.fusions.fused_bias_gelu import bias_gelu_impl +from megatron.core.fusions.fused_bias_swiglu import bias_swiglu_impl +from megatron.core.fusions.fused_bias_swiglu import swiglu_impl from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig @@ -92,10 +94,17 @@ def forward(self, hidden_states): # [s, b, 4 * h/p] intermediate_parallel, bias_parallel = self.linear_fc1(hidden_states) - if self.config.bias_gelu_fusion: - assert self.config.add_bias_linear is True - assert self.activation_func == F.gelu - intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) + if self.config.bias_activation_fusion: + if self.activation_func == F.gelu: + assert self.config.add_bias_linear is True + intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) + elif self.activation_func == glu: + x = torch.chunk(intermediate_parallel, 2, dim=-1) + if bias_parallel is not None: + bias = torch.chunk(bias_parallel, 2, dim=-1) + intermediate_parallel = bias_swiglu_impl(x[0], bias[0], x[1], bias[1]) + else: + intermediate_parallel = swiglu_impl(x[0], x[1]) else: if bias_parallel is not None: intermediate_parallel = intermediate_parallel + bias_parallel diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index adccd4409b..450120b230 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -71,7 +71,7 @@ class TransformerConfig(ModelParallelConfig): This should be true if apply_query_key_layer_scaling is true. # fusion - bias_gelu_fustion (bool): If true, fuses bias and gelu. Defaults to False. + bias_activation_fustion (bool): If true, fuses bias and activation. Defaults to False. masked_softmax_fusion (bool): If true, uses softmax fusion. persist_layer_norm (bool): If true, uses the persistent fused layer norm kernel. This kernel only supports a fixed set of hidden sizes. @@ -166,7 +166,7 @@ class TransformerConfig(ModelParallelConfig): # communication # fusion - bias_gelu_fusion: bool = False # TODO: this should be bias_activation_fusion ? + bias_activation_fusion: bool = False masked_softmax_fusion: bool = False persist_layer_norm: bool = False bias_dropout_fusion: bool = False # TODO: this should be bias_dropout_add_fusion? @@ -270,15 +270,12 @@ def __post_init__(self): if self.apply_query_key_layer_scaling: self.attention_softmax_in_fp32 = True - if self.bias_gelu_fusion: + if self.bias_activation_fusion and self.activation_func == F.gelu: if not self.add_bias_linear: raise ValueError( - "When bias_gelu_fusion is True, add_bias_linear must also be True." + "When bias_activation_fusion is True and activation function is gelu, add_bias_linear must also be True." ) - if self.activation_func != F.gelu: - raise ValueError(f'When bias_gelu_fusion is True, activation_func must be F.gelu.') - if self.init_method is None: self.init_method = init_method_normal(self.init_method_std) From 8f44952c31a315d4af3c558859c4bd36e31182f6 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Wed, 15 Nov 2023 04:34:04 -0800 Subject: [PATCH 011/296] make rope_fusion under bias_activation_fusion knob Signed-off-by: Hongbin Liu --- megatron/core/transformer/attention.py | 13 +++++++++---- megatron/core/transformer/mlp.py | 2 +- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 5e91d2e201..a2bbe6c507 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -245,10 +245,15 @@ def forward( # ================================================ if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - #query = apply_rotary_pos_emb(query, q_pos_emb) - #key = apply_rotary_pos_emb(key, k_pos_emb) - query = fused_apply_rotary_pos_emb(query, q_pos_emb) - key = fused_apply_rotary_pos_emb(key, k_pos_emb) + # use bias_activation_fusion to control the knob here + # just for debug + # the if-else block is not needed in normal PR + if self.config.bias_activation_fusion: + query = fused_apply_rotary_pos_emb(query, q_pos_emb) + key = fused_apply_rotary_pos_emb(key, k_pos_emb) + else: + query = apply_rotary_pos_emb(query, q_pos_emb) + key = apply_rotary_pos_emb(key, k_pos_emb) # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index dbb9ffae38..ae6b18257c 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -98,7 +98,7 @@ def forward(self, hidden_states): if self.activation_func == F.gelu: assert self.config.add_bias_linear is True intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) - elif self.activation_func == glu: + else: x = torch.chunk(intermediate_parallel, 2, dim=-1) if bias_parallel is not None: bias = torch.chunk(bias_parallel, 2, dim=-1) From 6e7be2b2484decd4f692736bd7ce7486c2703cc5 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Thu, 16 Nov 2023 23:42:59 -0800 Subject: [PATCH 012/296] refactor code Signed-off-by: Hongbin Liu --- megatron/core/fusions/fused_bias_swiglu.py | 56 ++++++++++++---------- megatron/core/transformer/attention.py | 2 +- megatron/core/transformer/mlp.py | 36 +++++++------- 3 files changed, 51 insertions(+), 43 deletions(-) diff --git a/megatron/core/fusions/fused_bias_swiglu.py b/megatron/core/fusions/fused_bias_swiglu.py index 24337aa990..bf23b6e4ae 100644 --- a/megatron/core/fusions/fused_bias_swiglu.py +++ b/megatron/core/fusions/fused_bias_swiglu.py @@ -11,55 +11,63 @@ # actual gelu is: # x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) + @torch.jit.script -def swiglu(y, y_2): - return F.silu(y) * y_2 +def swiglu(y): + y_1, y_2 = torch.chunk(y, 2, -1) + return F.silu(y_1) * y_2 + @torch.jit.script -def bias_swiglu(y, bias, y_2, bias_2): - x = bias + y - x_2 = bias_2 + y_2 - return swiglu(x, x_2) +def bias_swiglu(y, bias): + y = y + bias + return swiglu(y) + # gradient of tanh approximation of gelu # gradient of actual gelu is: # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) @torch.jit.script -def swiglu_back(g, y, y_2): - return g * torch.sigmoid(y) * (1 + y * (1 - torch.sigmoid(y))) * y_2, g * F.silu(y) +def swiglu_back(g, y): + y_1, y_2 = torch.chunk(y, 2, -1) + return torch.cat( + (g * torch.sigmoid(y_1) * (1 + y_1 * (1 - torch.sigmoid(y_1))) * y_2, g * F.silu(y_1)), -1 + ) + @torch.jit.script -def bias_swiglu_back(g, y, bias, y_2, bias_2): - x_1 = bias + y - x_2 = bias_2 + y_2 - return swiglu_back(g, x_1, x_2) +def bias_swiglu_back(g, y, bias): + y = y + bias + return swiglu_back(g, y) class BiasSwiGLUFunction(torch.autograd.Function): @staticmethod # bias is an optional argument - def forward(ctx, input, bias, input_2, bias_2): - ctx.save_for_backward(input, bias, input_2, bias_2) - return bias_swiglu(input, bias, input_2, bias_2) + def forward(ctx, input, bias): + ctx.save_for_backward(input, bias) + return bias_swiglu(input, bias) @staticmethod def backward(ctx, grad_output): - input, bias, input_2, bias_2 = ctx.saved_tensors - tmp, tmp2 = bias_swiglu_back(grad_output, input, bias, input_2, bias_2) - return tmp, tmp, tmp2, tmp2 + input, bias = ctx.saved_tensors + tmp = bias_swiglu_back(grad_output, input, bias) + return tmp, tmp + class SwiGLUFunction(torch.autograd.Function): @staticmethod # bias is an optional argument - def forward(ctx, input, input_2): - ctx.save_for_backward(input, input_2) - return swiglu(input, input_2) + def forward(ctx, input): + ctx.save_for_backward(input) + return swiglu(input) @staticmethod def backward(ctx, grad_output): - input, input_2 = ctx.saved_tensors - tmp, tmp2 = swiglu_back(grad_output, input, input_2) - return tmp, tmp2 + input = ctx.saved_tensors + tmp = swiglu_back(grad_output, input[0]) + return tmp + bias_swiglu_impl = BiasSwiGLUFunction.apply swiglu_impl = SwiGLUFunction.apply diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index a2bbe6c507..abb47295a5 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -5,6 +5,7 @@ from typing import Union import torch +from apex.transformer.functional import fused_apply_rotary_pos_emb from megatron.core import parallel_state, tensor_parallel from megatron.core.models.common.embeddings.rotary_pos_embedding import apply_rotary_pos_emb @@ -18,7 +19,6 @@ from .enums import AttnMaskType from .transformer_config import TransformerConfig from .utils import make_sharded_tensors_for_checkpoint -from apex.transformer.functional import fused_apply_rotary_pos_emb @dataclass diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index ae6b18257c..8463aa7c76 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -10,8 +10,7 @@ from megatron.core.dist_checkpointing import ShardedTensor from megatron.core.dist_checkpointing.mapping import ShardedTensorFactory from megatron.core.fusions.fused_bias_gelu import bias_gelu_impl -from megatron.core.fusions.fused_bias_swiglu import bias_swiglu_impl -from megatron.core.fusions.fused_bias_swiglu import swiglu_impl +from megatron.core.fusions.fused_bias_swiglu import bias_swiglu_impl, swiglu_impl from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig @@ -66,16 +65,6 @@ def __init__( tp_comm_buffer_name='fc1', ) - if self.config.gated_linear_unit: - - def glu(x): - x = torch.chunk(x, 2, dim=-1) - return self.config.activation_func(x[0]) * x[1] - - self.activation_func = glu - else: - self.activation_func = self.config.activation_func - self.linear_fc2 = build_module( submodules.linear_fc2, self.config.ffn_hidden_size, @@ -98,17 +87,28 @@ def forward(self, hidden_states): if self.activation_func == F.gelu: assert self.config.add_bias_linear is True intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) - else: - x = torch.chunk(intermediate_parallel, 2, dim=-1) + elif self.activation_func == F.silu: + shape = intermediate_parallel.shape + intermediate_parallel = intermediate_parallel.view(-1, shape[2]) if bias_parallel is not None: - bias = torch.chunk(bias_parallel, 2, dim=-1) - intermediate_parallel = bias_swiglu_impl(x[0], bias[0], x[1], bias[1]) + intermediate_parallel = bias_swiglu_impl(intermediate_parallel, bias_parallel) else: - intermediate_parallel = swiglu_impl(x[0], x[1]) + intermediate_parallel = swiglu_impl(intermediate_parallel) + intermediate_parallel = intermediate_parallel.view(shape[0], shape[1], -1) + else: + raise ValueError("Only support fusion of gelu and swiglu") else: if bias_parallel is not None: intermediate_parallel = intermediate_parallel + bias_parallel - intermediate_parallel = self.activation_func(intermediate_parallel) + if self.config.gated_linear_unit: + + def glu(x): + x = torch.chunk(x, 2, dim=-1) + return self.config.activation_func(x[0]) * x[1] + + intermediate_parallel = glu(intermediate_parallel) + else: + intermediate_parallel = self.activation_func(intermediate_parallel) # [s, b, h] output, output_bias = self.linear_fc2(intermediate_parallel) From a01b42ccac308973ad99b4bb7850a5f54feeed9d Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Sun, 19 Nov 2023 17:16:19 -0800 Subject: [PATCH 013/296] add knob for rope fusion and fix bug in mlp Signed-off-by: Hongbin Liu --- megatron/core/transformer/attention.py | 13 ++++++++----- megatron/core/transformer/mlp.py | 2 ++ megatron/core/transformer/transformer_config.py | 1 + 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index abb47295a5..d51ffe11c4 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -5,7 +5,13 @@ from typing import Union import torch -from apex.transformer.functional import fused_apply_rotary_pos_emb +try: + from apex.transformer.functional import fused_apply_rotary_pos_emb + + HAVE_APPLY_ROPE_FUSION = True +except: + HAVE_APPLY_ROPE_FUSION = False + from megatron.core import parallel_state, tensor_parallel from megatron.core.models.common.embeddings.rotary_pos_embedding import apply_rotary_pos_emb @@ -245,10 +251,7 @@ def forward( # ================================================ if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - # use bias_activation_fusion to control the knob here - # just for debug - # the if-else block is not needed in normal PR - if self.config.bias_activation_fusion: + if self.config.apply_rope_fusion and HAVE_ROPE_FUSION: query = fused_apply_rotary_pos_emb(query, q_pos_emb) key = fused_apply_rotary_pos_emb(key, k_pos_emb) else: diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index 8463aa7c76..a8df733b50 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -65,6 +65,8 @@ def __init__( tp_comm_buffer_name='fc1', ) + self.activation_func = self.config.activation_func + self.linear_fc2 = build_module( submodules.linear_fc2, self.config.ffn_hidden_size, diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 450120b230..20bdb6d626 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -170,6 +170,7 @@ class TransformerConfig(ModelParallelConfig): masked_softmax_fusion: bool = False persist_layer_norm: bool = False bias_dropout_fusion: bool = False # TODO: this should be bias_dropout_add_fusion? + apply_rope_fusion: bool = False # activation recomputation recompute_granularity: str = None From 4b1fc6672cfc0b8117019b4f1a88ece7f44b4724 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Sun, 19 Nov 2023 17:39:19 -0800 Subject: [PATCH 014/296] minor fix Signed-off-by: Hongbin Liu --- megatron/core/transformer/attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index d51ffe11c4..f26503dcf0 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -251,7 +251,7 @@ def forward( # ================================================ if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - if self.config.apply_rope_fusion and HAVE_ROPE_FUSION: + if self.config.apply_rope_fusion and HAVE_APPLY_ROPE_FUSION: query = fused_apply_rotary_pos_emb(query, q_pos_emb) key = fused_apply_rotary_pos_emb(key, k_pos_emb) else: From 3e5ef04d0a4b94a08170ba8161b77d572d34c8ff Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Tue, 21 Nov 2023 04:19:30 -0800 Subject: [PATCH 015/296] avoid contiguous Signed-off-by: Hongbin Liu --- megatron/core/transformer/attention.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index f26503dcf0..9c45ea3c15 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -252,8 +252,8 @@ def forward( if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb if self.config.apply_rope_fusion and HAVE_APPLY_ROPE_FUSION: - query = fused_apply_rotary_pos_emb(query, q_pos_emb) - key = fused_apply_rotary_pos_emb(key, k_pos_emb) + query = fused_apply_rotary_pos_emb(query, q_pos_emb, transpose_output_memory=True) + key = fused_apply_rotary_pos_emb(key, k_pos_emb, transpose_output_memory=True) else: query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) From 993e617074658ce65b4206ddba082405ee996244 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Tue, 21 Nov 2023 04:22:54 -0800 Subject: [PATCH 016/296] format Signed-off-by: Hongbin Liu --- megatron/core/transformer/attention.py | 1 + 1 file changed, 1 insertion(+) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 9c45ea3c15..57d37b599e 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -5,6 +5,7 @@ from typing import Union import torch + try: from apex.transformer.functional import fused_apply_rotary_pos_emb From 0ef8f2a625be141c4336d9d6fc2b303cdcd7ca45 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Tue, 21 Nov 2023 22:08:25 -0800 Subject: [PATCH 017/296] fix bugs in latest TE Signed-off-by: Hongbin Liu --- megatron/core/transformer/attention.py | 19 ++++++++++++++++++- .../custom_layers/transformer_engine.py | 9 ++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 57d37b599e..20f90da786 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -3,6 +3,8 @@ from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Union +from importlib.metadata import version +from pkg_resources import packaging import torch @@ -78,12 +80,22 @@ def __init__( self.num_attention_heads_per_partition = divide(self.config.num_attention_heads, world_size) self.num_query_groups_per_partition = divide(self.config.num_query_groups, world_size) + self.qkv_format = 'sbhd' + te_version = packaging.version.Version(version("transformer-engine")) + # need Kirthi to confirm the version when bshd is supported + if ( + te_version >= packaging.version.Version("0.12.0") + and self.config.apply_rope_fusion + and HAVE_APPLY_ROPE_FUSION + ): + self.qkv_format = 'bshd' self.core_attention = build_module( submodules.core_attention, config=self.config, layer_number=self.layer_number, attn_mask_type=self.attn_mask_type, attention_type=self.attention_type, + qkv_format=self.qkv_format, ) self.checkpoint_core_attention = self.config.recompute_granularity == 'selective' @@ -246,7 +258,6 @@ def forward( key, value, rotary_pos_emb, attn_mask_type = self._adjust_key_value_for_inference( inference_params, key, value, rotary_pos_emb ) - # ================================================ # relative positional embedding (rotary embedding) # ================================================ @@ -255,6 +266,10 @@ def forward( if self.config.apply_rope_fusion and HAVE_APPLY_ROPE_FUSION: query = fused_apply_rotary_pos_emb(query, q_pos_emb, transpose_output_memory=True) key = fused_apply_rotary_pos_emb(key, k_pos_emb, transpose_output_memory=True) + if self.qkv_format == 'bshd': + query, key, value = [ + x.transpose(0, 1).contiguous() for x in (query, key, value) + ] else: query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) @@ -282,6 +297,8 @@ def forward( output, bias = self.linear_proj(core_attn_out) + if self.qkv_format == 'bshd': + output = output.transpose(0, 1) return output, bias diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index d784184623..34e6aabe2a 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -41,7 +41,10 @@ class TENorm: # TODO should we ditch normalization config and just use spec to choose LayerNorm vs RMSNorm? def __new__( - cls, config: TransformerConfig, hidden_size: int, eps: float = 1e-5, + cls, + config: TransformerConfig, + hidden_size: int, + eps: float = 1e-5, ): if config.normalization == "LayerNorm": instance = te.pytorch.LayerNorm( @@ -353,6 +356,7 @@ def __init__( attn_mask_type: AttnMaskType, attention_type: str, attention_dropout: float = None, + qkv_format: str = 'sbhd', ): self.config = config self.te_forward_mask_type = False @@ -386,6 +390,9 @@ def __init__( if te_version > packaging.version.Version("0.12.0"): self.te_forward_mask_type = True + if te_version > packaging.version.Version("0.12.0"): + extra_kwargs["qkv_format"] = qkv_format + # Only Transformer-Engine version >= 1.0.0 supports context parallelism if te_version >= packaging.version.Version("1.0.0"): if getattr(TEDotProductAttention, "cp_stream") is None: From 908108d98b285f8290ca41b4be07dbd22176b08a Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Wed, 22 Nov 2023 17:41:43 -0800 Subject: [PATCH 018/296] fix bug Signed-off-by: Hongbin Liu --- megatron/core/transformer/attention.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 20f90da786..9d6d89243e 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -291,14 +291,15 @@ def forward( query, key, value, attention_mask, attn_mask_type=attn_mask_type ) + if self.qkv_format == 'bshd': + core_attn_out = core_attn_out.transpose(0, 1) + # ================= # Output. [sq, b, h] # ================= output, bias = self.linear_proj(core_attn_out) - if self.qkv_format == 'bshd': - output = output.transpose(0, 1) return output, bias From 82e26aa85c27b60110388972540fc1611bd94492 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Mon, 27 Nov 2023 17:23:18 -0800 Subject: [PATCH 019/296] fix TE version --- megatron/core/transformer/attention.py | 2 +- megatron/core/transformer/custom_layers/transformer_engine.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 9d6d89243e..15ee521373 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -84,7 +84,7 @@ def __init__( te_version = packaging.version.Version(version("transformer-engine")) # need Kirthi to confirm the version when bshd is supported if ( - te_version >= packaging.version.Version("0.12.0") + te_version >= packaging.version.Version("0.13.0") and self.config.apply_rope_fusion and HAVE_APPLY_ROPE_FUSION ): diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index 34e6aabe2a..05180bf155 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -390,7 +390,7 @@ def __init__( if te_version > packaging.version.Version("0.12.0"): self.te_forward_mask_type = True - if te_version > packaging.version.Version("0.12.0"): + if te_version > packaging.version.Version("0.13.0"): extra_kwargs["qkv_format"] = qkv_format # Only Transformer-Engine version >= 1.0.0 supports context parallelism From 003ad9f544a85ef408119c8c387e02af0b23554f Mon Sep 17 00:00:00 2001 From: Xiaowei Ren Date: Mon, 27 Nov 2023 17:27:37 -0800 Subject: [PATCH 020/296] fix seq_length with both CP and PP Signed-off-by: Xiaowei Ren --- megatron/training.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/megatron/training.py b/megatron/training.py index 8c5284c2a6..25c8e4d15b 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -429,7 +429,7 @@ def train_step(forward_step_func, data_iterator, data_iterator=data_iterator, model=model, num_microbatches=get_num_microbatches(), - seq_length=args.seq_length, + seq_length=(args.seq_length // args.context_parallel_size), micro_batch_size=args.micro_batch_size, decoder_seq_length=args.decoder_seq_length, forward_only=False) @@ -906,7 +906,7 @@ def evaluate(forward_step_func, data_iterator=data_iterator, model=model, num_microbatches=eval_num_microbatches, - seq_length=args.seq_length, + seq_length=(args.seq_length // args.context_parallel_size), micro_batch_size=args.micro_batch_size, decoder_seq_length=args.decoder_seq_length, forward_only=True) From 5eaa937e562ee64775a6084e27e920f557e5709e Mon Sep 17 00:00:00 2001 From: Xiaowei Ren Date: Mon, 4 Dec 2023 14:28:25 -0800 Subject: [PATCH 021/296] move seq-length fix to mcore Signed-off-by: Xiaowei Ren --- megatron/core/pipeline_parallel/schedules.py | 5 +++++ megatron/training.py | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/megatron/core/pipeline_parallel/schedules.py b/megatron/core/pipeline_parallel/schedules.py index 992da78127..05a70ec700 100644 --- a/megatron/core/pipeline_parallel/schedules.py +++ b/megatron/core/pipeline_parallel/schedules.py @@ -458,6 +458,7 @@ def enable_grad_sync(): ) tensor_shape = [seq_length, micro_batch_size, config.hidden_size] + tensor_shape[0] = tensor_shape[0] // parallel_state.get_context_parallel_world_size() if config.sequence_parallel: tensor_shape[0] = tensor_shape[0] // parallel_state.get_tensor_model_parallel_world_size() @@ -958,6 +959,10 @@ def get_tensor_shapes( # Otherwise, send one tensor (pre-transpose). tensor_shapes = [] + seq_length = seq_length // parallel_state.get_context_parallel_world_size() + if model_type == ModelType.encoder_and_decoder: + decoder_seq_length = decoder_seq_length // parallel_state.get_context_parallel_world_size() + if config.sequence_parallel: seq_length = seq_length // parallel_state.get_tensor_model_parallel_world_size() if model_type == ModelType.encoder_and_decoder: diff --git a/megatron/training.py b/megatron/training.py index 4eff8f22e6..d18d3c3b91 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -451,7 +451,7 @@ def train_step(forward_step_func, data_iterator, data_iterator=data_iterator, model=model, num_microbatches=get_num_microbatches(), - seq_length=(args.seq_length // args.context_parallel_size), + seq_length=args.seq_length, micro_batch_size=args.micro_batch_size, decoder_seq_length=args.decoder_seq_length, forward_only=False) @@ -941,7 +941,7 @@ def evaluate(forward_step_func, data_iterator=data_iterator, model=model, num_microbatches=eval_num_microbatches, - seq_length=(args.seq_length // args.context_parallel_size), + seq_length=args.seq_length, micro_batch_size=args.micro_batch_size, decoder_seq_length=args.decoder_seq_length, forward_only=True) From 4a6d30cb66b365cd9b343c677b4d9c594a49c15b Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Fri, 8 Dec 2023 09:57:39 -0800 Subject: [PATCH 022/296] Add basic documentation for packages --- docs/source/api-guide/distributed.rst | 16 ++++++++++++---- docs/source/api-guide/pipeline_parallel.rst | 18 ++++++++++++++++++ docs/source/api-guide/tensor_parallel.rst | 6 ++++++ 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/docs/source/api-guide/distributed.rst b/docs/source/api-guide/distributed.rst index 37b315303b..737820331c 100644 --- a/docs/source/api-guide/distributed.rst +++ b/docs/source/api-guide/distributed.rst @@ -1,6 +1,14 @@ distributed package =================== +This package contains various utilities to finalize model weight gradients +on each rank before the optimizer step. This includes a distributed data +parallelism wrapper to all-reduce or reduce-scatter the gradients across +data-parallel replicas, and a `finalize\_model\_grads` method to +synchronize gradients across different parallelism modes (e.g., 'tied' +layers on different pipeline stages, or gradients for experts in a MoE on +different ranks due to expert parallelism). + Submodules ---------- @@ -21,10 +29,10 @@ reduce-scatter on each bucket asynchronously. distributed.finalize\_model\_grads ---------------------------------- -Finalize model grads for optimizer step across all used parallelism modes. -Synchronizes the all-reduce / reduce-scatter of model grads across DP replicas, -and all-reduces the layernorm grads for sequence parallelism, embedding grads -across first and last pipeline stages (if not tied), and expert grads for expert +Finalize model gradients for optimizer step across all used parallelism modes. +Synchronizes the all-reduce / reduce-scatter of model gradients across DP replicas, +all-reduces the layernorm gradients for sequence parallelism, embedding gradients +across first and last pipeline stages (if not tied), and expert gradients for expert parallelism. .. automodule:: core.distributed.finalize_model_grads diff --git a/docs/source/api-guide/pipeline_parallel.rst b/docs/source/api-guide/pipeline_parallel.rst index b7f3511f5b..5c67079a70 100644 --- a/docs/source/api-guide/pipeline_parallel.rst +++ b/docs/source/api-guide/pipeline_parallel.rst @@ -1,12 +1,22 @@ pipeline\_parallel package ========================== +This package contains implementations for two different pipeline parallelism +schedules (one without interleaving and one with interleaving, see `Efficient +Large-Scale Language Model Training on GPU Clusters Using Megatron-LM `_ +for details), and a default no-pipelining schedule. It also contains methods +for the point-to-point communication that is needed between pipeline stages. + Submodules ---------- pipeline\_parallel.p2p\_communication module -------------------------------------------- +Contains implementations for the various point-to-point communication needed +(e.g., `recv_forward` and `recv_backward`) in the different pipeline parallelism +schedules. + .. automodule:: core.pipeline_parallel.p2p_communication :members: :undoc-members: @@ -15,6 +25,14 @@ pipeline\_parallel.p2p\_communication module pipeline\_parallel.schedules module ----------------------------------- +Contains implementations for two pipeline parallelism schedules +(`forward_backward_pipelining_with_interleaving`for pipeline parallelism with +interleaving, `forward_backward_pipelining_without_interleaving` for pipeline +parallelism without interleaving) and a default no-pipelining schedule +(`forward_backward_no_pipelining`). `get_forward_backward_func` returns the right +scheduling function to use based on the configuration being trained +(e.g., if pipeline-parallel size is 1, use `forward_backward_no_pipelining`). + .. automodule:: core.pipeline_parallel.schedules :members: :undoc-members: diff --git a/docs/source/api-guide/tensor_parallel.rst b/docs/source/api-guide/tensor_parallel.rst index 82b29f7866..d8ae9dea22 100644 --- a/docs/source/api-guide/tensor_parallel.rst +++ b/docs/source/api-guide/tensor_parallel.rst @@ -1,6 +1,12 @@ tensor\_parallel package ======================== +This package contains an implementation for tensor parallelism in transformer +models (see `Megatron-LM: Training Multi-Billion Parameter Language Models +Using Model Parallelism `_ and `Reducing +Activation Recomputation in Large Transformer Models `_ +for details). + Submodules ---------- From b63cc64b76545a72a1df3343f91a36702f3deb74 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Sat, 9 Dec 2023 01:10:23 -0800 Subject: [PATCH 023/296] Fixed verbosity and added guards for TE exports --- megatron/core/transformer/attention.py | 39 ++++------- .../custom_layers/transformer_engine.py | 9 ++- megatron/utils.py | 68 +++++++++---------- 3 files changed, 54 insertions(+), 62 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 444df31009..847c5d94c0 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -13,6 +13,7 @@ from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.transformer.custom_layers.transformer_engine import SplitAlongDim from megatron.core.utils import divide from .enums import AttnMaskType @@ -310,42 +311,32 @@ def get_query_key_value_tensors(self, hidden_states, key_value_states=None): ) mixed_qkv = mixed_qkv.view(*new_tensor_shape) - try: + split_arg_list = [ + ( + self.num_attention_heads_per_partition + // self.num_query_groups_per_partition + * self.hidden_size_per_attention_head + ), + self.hidden_size_per_attention_head, + self.hidden_size_per_attention_head, + ] + + if SplitAlongDim is not None: - from megatron.core.transformer.custom_layers.transformer_engine import SplitAlongDim - # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] (query, key, value) = SplitAlongDim( mixed_qkv, 3, - [ - ( - self.num_attention_heads_per_partition - // self.num_query_groups_per_partition - * self.hidden_size_per_attention_head - ), - self.hidden_size_per_attention_head, - self.hidden_size_per_attention_head, - ], + split_arg_list, ) - - except ImportError: + else: # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] (query, key, value) = torch.split( mixed_qkv, - [ - ( - self.num_attention_heads_per_partition - // self.num_query_groups_per_partition - * self.hidden_size_per_attention_head - ), - self.hidden_size_per_attention_head, - self.hidden_size_per_attention_head, - ], + split_arg_list, dim=3, ) - # [sq, b, ng, np/ng * hn] -> [sq, b, np, hn] query = query.reshape(query.size(0), query.size(1), -1, self.hidden_size_per_attention_head) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index b5f9ffb9d9..c2497513ab 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -4,7 +4,6 @@ import torch import transformer_engine as te -from transformer_engine.pytorch.attention import _SplitAlongDim from pkg_resources import packaging from megatron.core import ModelParallelConfig @@ -401,5 +400,11 @@ def __init__( **extra_kwargs, ) +try: -SplitAlongDim = _SplitAlongDim.apply + from transformer_engine.pytorch.attention import _SplitAlongDim + SplitAlongDim = _SplitAlongDim.apply + +except ImportError: + + SplitAlongDim = None diff --git a/megatron/utils.py b/megatron/utils.py index 2c585c674e..fbe6f83ac9 100644 --- a/megatron/utils.py +++ b/megatron/utils.py @@ -275,6 +275,9 @@ def get_batch_on_this_tp_rank(data_iterator): args = get_args() + def _broadcast(item): + torch.distributed.broadcast(item, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + if mpu.get_tensor_model_parallel_rank() == 0: if data_iterator is not None: @@ -291,59 +294,52 @@ def get_batch_on_this_tp_rank(data_iterator): } if args.pipeline_model_parallel_size == 1: - torch.distributed.broadcast(batch['tokens'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(batch['labels'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(batch['loss_mask'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(batch['attention_mask'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(batch['position_ids'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + _broadcast(batch['tokens']) + _broadcast(batch['labels']) + _broadcast(batch['loss_mask']) + _broadcast(batch['attention_mask']) + _broadcast(batch['position_ids']) elif mpu.is_pipeline_first_stage(): - torch.distributed.broadcast(batch['tokens'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(batch['attention_mask'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(batch['position_ids'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + _broadcast(batch['tokens']) + _broadcast(batch['attention_mask']) + _broadcast(batch['position_ids']) elif mpu.is_pipeline_last_stage(): - torch.distributed.broadcast(batch['labels'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(batch['loss_mask'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(batch['attention_mask'], mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - + _broadcast(batch['labels']) + _broadcast(batch['loss_mask']) + _broadcast(batch['attention_mask']) else: - if args.pipeline_model_parallel_size == 1: - tokens=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) - labels=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) - loss_mask=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.float32 , device = torch.cuda.current_device()) - attention_mask=torch.empty((args.micro_batch_size,args.micro_batch_size,args.seq_length,args.seq_length), dtype = torch.bool , device = torch.cuda.current_device()) - position_ids=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) - - torch.distributed.broadcast(tokens, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(labels, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(loss_mask, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(attention_mask, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(position_ids, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + tokens=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + labels=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + loss_mask=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.float32 , device = torch.cuda.current_device()) + attention_mask=torch.empty((args.micro_batch_size,args.micro_batch_size,args.seq_length,args.seq_length), dtype = torch.bool , device = torch.cuda.current_device()) + position_ids=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) + if args.pipeline_model_parallel_size == 1: + _broadcast(tokens) + _broadcast(labels) + _broadcast(loss_mask) + _broadcast(attention_mask) + _broadcast(position_ids) + elif mpu.is_pipeline_first_stage(): - tokens=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) labels=None loss_mask=None - attention_mask=torch.empty((args.micro_batch_size,args.micro_batch_size,args.seq_length,args.seq_length), dtype = torch.bool , device = torch.cuda.current_device()) - position_ids=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) - torch.distributed.broadcast(tokens, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(attention_mask, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(position_ids, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + _broadcast(tokens) + _broadcast(attention_mask) + _broadcast(position_ids) elif mpu.is_pipeline_last_stage(): tokens=None - labels=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) - loss_mask=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.float32 , device = torch.cuda.current_device()) - attention_mask=torch.empty((args.micro_batch_size,args.micro_batch_size,args.seq_length,args.seq_length), dtype = torch.bool , device = torch.cuda.current_device()) position_ids=None - torch.distributed.broadcast(labels, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(loss_mask, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) - torch.distributed.broadcast(attention_mask, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()) + _broadcast(labels) + _broadcast(loss_mask) + _broadcast(attention_mask) batch = { 'tokens': tokens, From d8a1336a3cef4cc9eb43ac2df4c7614acdb796c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 8 Dec 2023 17:16:04 +0100 Subject: [PATCH 024/296] Implement LayerNorms support for dist ckpt --- .../core/transformer/transformer_layer.py | 37 ++++++++++++------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index b9951d4347..79b02c5daa 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -12,6 +12,7 @@ from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.transformer.utils import make_sharded_tensors_for_checkpoint from megatron.core.utils import make_viewless_tensor @@ -228,18 +229,28 @@ def sharded_state_dict(self, prefix=''): (0, global_layer_offset, num_layers) ] # PP sharding offset for ShardedTensors - attn_state_dict = self.self_attention.sharded_state_dict( - prefix=f'{state_dict_prefix}self_attention.', - sharded_key_prefix=f'{prefix}self_attention.', - sharded_offsets=sharded_pp_offset, - ) - - mlp_state_dict = self.mlp.sharded_state_dict( - prefix=f'{state_dict_prefix}mlp.', - sharded_key_prefix=f'{prefix}mlp.', - sharded_offsets=sharded_pp_offset, - ) - - sharded_state_dict = {**mlp_state_dict, **attn_state_dict} + sharded_state_dict = {} + + # TODO: consider `self._modules.items()` instead of explicit enumeration + for name, module in [ + ('input_layernorm', self.input_layernorm), + ('self_attention', self.self_attention), + ('pre_cross_attn_layernorm', self.pre_cross_attn_layernorm), + ('cross_attention', self.cross_attention), + ('pre_mlp_layernorm', self.pre_mlp_layernorm), + ('mlp', self.mlp), + ]: + if hasattr(module, 'sharded_state_dict'): + module_sharded_sd = module.sharded_state_dict( + prefix=f'{state_dict_prefix}{name}.', + sharded_key_prefix=f'{prefix}{name}.', + sharded_offsets=sharded_pp_offset, + ) + else: + module_sd = module.state_dict(prefix='', keep_vars=True) + module_sharded_sd = make_sharded_tensors_for_checkpoint( + module_sd, f'{state_dict_prefix}{name}.', f'{prefix}{name}.', {}, sharded_pp_offset + ) + sharded_state_dict.update(module_sharded_sd) return sharded_state_dict From 796ac7d24c97bcc10048befe7fb52649ca0ff104 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Mon, 11 Dec 2023 15:23:30 +0100 Subject: [PATCH 025/296] Implement local layers support for dist ckpt --- megatron/core/tensor_parallel/layers.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 38379cb34d..7681e12a41 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -32,6 +32,7 @@ ) from .random import get_cuda_rng_tracker, get_expert_parallel_rng_tracker_name from .utils import VocabUtility, divide, split_tensor_along_last_dim +from ..transformer.utils import make_sharded_tensors_for_checkpoint _grad_accum_fusion_available = True try: @@ -756,6 +757,13 @@ def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None): output_bias = self.bias if self.skip_bias_add else None return output, output_bias + def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + """ Sharding along axis 0, bias sharded """ + state_dict = self.state_dict(prefix='', keep_vars=True) + return make_sharded_tensors_for_checkpoint( + state_dict, prefix, sharded_key_prefix, {'weight': 0, 'bias': 0}, sharded_offsets + ) + class RowParallelLinear(torch.nn.Module): """Linear layer with row parallelism. @@ -923,3 +931,10 @@ def forward(self, input_): output = output_ output_bias = self.bias return output, output_bias + + def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + """ Sharding along axis 1, bias not sharded """ + state_dict = self.state_dict(prefix='', keep_vars=True) + return make_sharded_tensors_for_checkpoint( + state_dict, prefix, sharded_key_prefix, {'weight': 1}, sharded_offsets + ) From fdb038c8100afbd0d1bef1690324bda84669d863 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Mon, 11 Dec 2023 15:44:06 +0100 Subject: [PATCH 026/296] Fix style --- megatron/core/tensor_parallel/layers.py | 2 +- megatron/core/transformer/transformer_layer.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 7681e12a41..e527d706b3 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -22,6 +22,7 @@ get_tensor_model_parallel_world_size, ) +from ..transformer.utils import make_sharded_tensors_for_checkpoint from .mappings import ( copy_to_tensor_model_parallel_region, gather_from_sequence_parallel_region, @@ -32,7 +33,6 @@ ) from .random import get_cuda_rng_tracker, get_expert_parallel_rng_tracker_name from .utils import VocabUtility, divide, split_tensor_along_last_dim -from ..transformer.utils import make_sharded_tensors_for_checkpoint _grad_accum_fusion_available = True try: diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index 79b02c5daa..c75e8bf9e0 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -249,7 +249,11 @@ def sharded_state_dict(self, prefix=''): else: module_sd = module.state_dict(prefix='', keep_vars=True) module_sharded_sd = make_sharded_tensors_for_checkpoint( - module_sd, f'{state_dict_prefix}{name}.', f'{prefix}{name}.', {}, sharded_pp_offset + module_sd, + f'{state_dict_prefix}{name}.', + f'{prefix}{name}.', + {}, + sharded_pp_offset, ) sharded_state_dict.update(module_sharded_sd) From 165e68cf1a9d75b9fdddb8ce470f658687aadb9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Mon, 11 Dec 2023 15:44:18 +0100 Subject: [PATCH 027/296] Add local layers test case --- .../dist_checkpointing/models/test_gpt_model.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py index 742171f950..6bcaae1297 100644 --- a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py +++ b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py @@ -14,10 +14,11 @@ from tests.unit_tests.test_utilities import Utils from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.models.gpt.gpt_layer_specs import \ - get_gpt_layer_with_transformer_engine_spec, get_gpt_layer_local_spec + get_gpt_layer_with_transformer_engine_spec, get_gpt_layer_local_spec, \ + gpt_layer_with_transformer_engine_spec_moe, gpt_layer_local_spec_moe -def initialize_gpt_model(seed, use_te=True, **config_kwargs): +def initialize_gpt_model(seed, layer_spec_fn=get_gpt_layer_with_transformer_engine_spec, **config_kwargs): torch.manual_seed(seed) model_parallel_cuda_manual_seed(seed) @@ -26,8 +27,7 @@ def initialize_gpt_model(seed, use_te=True, **config_kwargs): transformer_config = TransformerConfig(**default_config_kwargs) pre_process = ps.is_pipeline_first_stage() post_process = ps.is_pipeline_last_stage() - layer_spec = get_gpt_layer_with_transformer_engine_spec() if use_te else get_gpt_layer_local_spec() - model = GPTModel(config=transformer_config, transformer_layer_spec=layer_spec, vocab_size=128, max_sequence_length=4, + model = GPTModel(config=transformer_config, transformer_layer_spec=layer_spec_fn(), vocab_size=128, max_sequence_length=4, pre_process=pre_process, post_process=post_process) with torch.no_grad(): @@ -44,9 +44,12 @@ def setup_method(self, method): def teardown_method(self, method): Utils.destroy_model_parallel() - @pytest.mark.parametrize('use_te', [True]) # non-TE not supported yet - def test_sharded_state_dict_save_load(self, use_te, tmp_path_dist_ckpt): - gpt_model = initialize_gpt_model(use_te) + @pytest.mark.parametrize('layer_spec_fn', [ + get_gpt_layer_with_transformer_engine_spec, + get_gpt_layer_local_spec, + ]) + def test_sharded_state_dict_save_load(self, layer_spec_fn, tmp_path_dist_ckpt): + gpt_model = initialize_gpt_model(1, layer_spec_fn) with TempNamedDir(tmp_path_dist_ckpt / 'test_gpt_model') as ckpt_dir: # Save sharded_state_dict = gpt_model.sharded_state_dict() From 07b5b2ba00dd97bd48f3f0d8eb8b9602a125a8a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Mon, 11 Dec 2023 17:09:59 +0100 Subject: [PATCH 028/296] Avoid deadlocks in unit tests --- .../dist_checkpointing/models/test_gpt_model.py | 9 ++------- .../unit_tests/dist_checkpointing/test_serialization.py | 2 ++ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py index 6bcaae1297..a910fec52a 100644 --- a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py +++ b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py @@ -37,18 +37,12 @@ def initialize_gpt_model(seed, layer_spec_fn=get_gpt_layer_with_transformer_engi class TestGPTModel: - - def setup_method(self, method): - Utils.initialize_model_parallel(2,4) - - def teardown_method(self, method): - Utils.destroy_model_parallel() - @pytest.mark.parametrize('layer_spec_fn', [ get_gpt_layer_with_transformer_engine_spec, get_gpt_layer_local_spec, ]) def test_sharded_state_dict_save_load(self, layer_spec_fn, tmp_path_dist_ckpt): + Utils.initialize_model_parallel(2,4) gpt_model = initialize_gpt_model(1, layer_spec_fn) with TempNamedDir(tmp_path_dist_ckpt / 'test_gpt_model') as ckpt_dir: # Save @@ -59,6 +53,7 @@ def test_sharded_state_dict_save_load(self, layer_spec_fn, tmp_path_dist_ckpt): sharded_state_dict = gpt_model.sharded_state_dict() state_dict = load(sharded_state_dict, ckpt_dir) gpt_model.load_state_dict(state_dict) + Utils.destroy_model_parallel() class TestGPTModelReconfiguration: diff --git a/tests/unit_tests/dist_checkpointing/test_serialization.py b/tests/unit_tests/dist_checkpointing/test_serialization.py index fef536fd89..25dd9e0a91 100644 --- a/tests/unit_tests/dist_checkpointing/test_serialization.py +++ b/tests/unit_tests/dist_checkpointing/test_serialization.py @@ -27,6 +27,7 @@ def test_single_process_save_load(self, tmp_path_dist_ckpt): with TempNamedDir(tmp_path_dist_ckpt / 'test_single_process_save_load') as ckpt_dir: save(sharded_state_dict, ckpt_dir) + torch.distributed.barrier() assert (ckpt_dir / 'keyA').is_dir() assert (ckpt_dir / 'keyB').is_dir() @@ -161,6 +162,7 @@ def test_load_tensors_metadata(self, tmp_path_dist_ckpt): with TempNamedDir(tmp_path_dist_ckpt / 'test_load_tensors_metadata') as ckpt_dir: save(state_dict, ckpt_dir) + torch.distributed.barrier() assert (ckpt_dir / 'keyA').is_dir() del state_dict From 5558796bb407fca1bf320a006766e4332f4d9c35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Mon, 11 Dec 2023 17:09:02 +0100 Subject: [PATCH 029/296] Generalize sharded_state_dict implementation --- megatron/core/transformer/attention.py | 16 ---------- megatron/core/transformer/module.py | 30 ++++++++++++++++--- .../core/transformer/transformer_layer.py | 10 +------ 3 files changed, 27 insertions(+), 29 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index c725c7f3a2..64ce55d660 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -17,7 +17,6 @@ from .enums import AttnMaskType from .transformer_config import TransformerConfig -from .utils import make_sharded_tensors_for_checkpoint @dataclass @@ -344,21 +343,6 @@ def get_query_key_value_tensors(self, hidden_states, key_value_states=None): return query, key, value - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): - sharded_key_prefix = prefix if sharded_key_prefix is None else sharded_key_prefix - sharded_state_dict = {} - for name, module in ( - ('linear_qkv', self.linear_qkv), - ('linear_proj', self.linear_proj), - ): - sub_sd = module.sharded_state_dict( - prefix=f'{prefix}{name}.', - sharded_key_prefix=f'{sharded_key_prefix}{name}.', - sharded_offsets=sharded_offsets, - ) - sharded_state_dict.update(sub_sd) - return sharded_state_dict - class CrossAttention(Attention): """Cross-attention layer class diff --git a/megatron/core/transformer/module.py b/megatron/core/transformer/module.py index d20074aa07..3356ae9420 100644 --- a/megatron/core/transformer/module.py +++ b/megatron/core/transformer/module.py @@ -7,6 +7,7 @@ from megatron.core import parallel_state from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.transformer.utils import make_sharded_tensors_for_checkpoint _FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor) _HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor) @@ -46,7 +47,7 @@ def state_dict_for_save_checkpoint(self, prefix: str = '', keep_vars: bool = Fal return self.state_dict(prefix=prefix, keep_vars=keep_vars) - def sharded_state_dict(self, prefix: str = ''): + def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): """Override sharded state dict with Dist Checkpointing. Override sharded_state_dict when using distributed checkpointing. keep_vars must always be set to True so that optimizer states can be sharded. @@ -57,7 +58,28 @@ def sharded_state_dict(self, prefix: str = ''): Returns: _type_: _description_ """ - return self.state_dict(prefix=prefix, keep_vars=True) + sharded_key_prefix = prefix if sharded_key_prefix is None else sharded_key_prefix + sharded_state_dict = {} + + for name, module in self._modules.items(): + if hasattr(module, 'sharded_state_dict'): + module_sharded_sd = module.sharded_state_dict( + prefix=f'{prefix}{name}.', + sharded_key_prefix=f'{sharded_key_prefix}{name}.', + sharded_offsets=sharded_offsets, + ) + else: + module_sd = module.state_dict(prefix='', keep_vars=True) + module_sharded_sd = make_sharded_tensors_for_checkpoint( + module_sd, + f'{prefix}{name}.', + f'{sharded_key_prefix}{name}.', + {}, + sharded_offsets, + ) + sharded_state_dict.update(module_sharded_sd) + + return sharded_state_dict def conversion_helper(val, conversion): @@ -146,12 +168,12 @@ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): """Retrieve state_dict from the module being wrapped.""" return self.module.state_dict_for_save_checkpoint(prefix=prefix, keep_vars=keep_vars) - def sharded_state_dict(self, prefix=''): + def sharded_state_dict(self, prefix='', *args, **kwargs): """Retrieve state_dict from the module being wrapped. When using distributed checkpointing, keep_vars must always be set to True. """ - return self.module.sharded_state_dict(prefix=prefix) + return self.module.sharded_state_dict(prefix, *args, **kwargs) def load_state_dict(self, state_dict, strict=True): self.module.load_state_dict(state_dict, strict=strict) diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index c75e8bf9e0..be6a3ec9da 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -231,15 +231,7 @@ def sharded_state_dict(self, prefix=''): sharded_state_dict = {} - # TODO: consider `self._modules.items()` instead of explicit enumeration - for name, module in [ - ('input_layernorm', self.input_layernorm), - ('self_attention', self.self_attention), - ('pre_cross_attn_layernorm', self.pre_cross_attn_layernorm), - ('cross_attention', self.cross_attention), - ('pre_mlp_layernorm', self.pre_mlp_layernorm), - ('mlp', self.mlp), - ]: + for name, module in self._modules.items(): if hasattr(module, 'sharded_state_dict'): module_sharded_sd = module.sharded_state_dict( prefix=f'{state_dict_prefix}{name}.', From f1ac9888ee4da6e00c7d88ef9e76c33f3083f2c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Mon, 11 Dec 2023 17:39:45 +0100 Subject: [PATCH 030/296] Add doc --- megatron/core/transformer/module.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/megatron/core/transformer/module.py b/megatron/core/transformer/module.py index 3356ae9420..df42e48012 100644 --- a/megatron/core/transformer/module.py +++ b/megatron/core/transformer/module.py @@ -48,15 +48,23 @@ def state_dict_for_save_checkpoint(self, prefix: str = '', keep_vars: bool = Fal return self.state_dict(prefix=prefix, keep_vars=keep_vars) def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): - """Override sharded state dict with Dist Checkpointing. + """Sharded state dict with Distributed Checkpointing. - Override sharded_state_dict when using distributed checkpointing. keep_vars must always be set to True so that optimizer states can be sharded. + General definition of sharded_state_dict tries to call `sharded_state_dict` + of submodules when possible, otherwise assumes tensors are replicated + across TP and DP. + When overriding, keep_vars argument of plain `state_dict` method must + always be set to True so that optimizer states can be sharded. Args: - prefix (str, optional): _description_. Defaults to ''. + prefix (str): prefix for the state dict keys + sharded_key_prefix (str, optional): prefix for the ShardedTensor keys. + If None, the same prefix as for state dict keys is assumed. + sharded_offsets (Iterable[Tuple[int, int, int]], optional): sharding already + applied (e.g. PP related) by sup-modules. Passed along to ShardedTensor Returns: - _type_: _description_ + dict: dictionary of state dict keys mapped to ShardedTensors """ sharded_key_prefix = prefix if sharded_key_prefix is None else sharded_key_prefix sharded_state_dict = {} From 042f6d032c525eae349e04113d109c0ed82fdf95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Wed, 13 Dec 2023 16:25:25 +0100 Subject: [PATCH 031/296] Extract _intermediate_sharded_state_dict --- megatron/core/transformer/module.py | 4 ++++ .../core/transformer/transformer_layer.py | 22 +------------------ 2 files changed, 5 insertions(+), 21 deletions(-) diff --git a/megatron/core/transformer/module.py b/megatron/core/transformer/module.py index df42e48012..86314d50a2 100644 --- a/megatron/core/transformer/module.py +++ b/megatron/core/transformer/module.py @@ -48,6 +48,10 @@ def state_dict_for_save_checkpoint(self, prefix: str = '', keep_vars: bool = Fal return self.state_dict(prefix=prefix, keep_vars=keep_vars) def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + self._intermediate_sharded_state_dict(prefix, sharded_key_prefix, sharded_offsets) + + + def _intermediate_sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): """Sharded state dict with Distributed Checkpointing. General definition of sharded_state_dict tries to call `sharded_state_dict` diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index be6a3ec9da..84ae4525a8 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -229,24 +229,4 @@ def sharded_state_dict(self, prefix=''): (0, global_layer_offset, num_layers) ] # PP sharding offset for ShardedTensors - sharded_state_dict = {} - - for name, module in self._modules.items(): - if hasattr(module, 'sharded_state_dict'): - module_sharded_sd = module.sharded_state_dict( - prefix=f'{state_dict_prefix}{name}.', - sharded_key_prefix=f'{prefix}{name}.', - sharded_offsets=sharded_pp_offset, - ) - else: - module_sd = module.state_dict(prefix='', keep_vars=True) - module_sharded_sd = make_sharded_tensors_for_checkpoint( - module_sd, - f'{state_dict_prefix}{name}.', - f'{prefix}{name}.', - {}, - sharded_pp_offset, - ) - sharded_state_dict.update(module_sharded_sd) - - return sharded_state_dict + return self._intermediate_sharded_state_dict(state_dict_prefix, prefix, sharded_pp_offset) From bf10841e45d05918e82a05cfc635e354ba6b846a Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis Date: Thu, 14 Dec 2023 20:34:02 +0000 Subject: [PATCH 032/296] Sliding Window Attention: Add window size option to TransformerConfig --- megatron/core/transformer/transformer_config.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 47647e657a..f77d959217 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -2,7 +2,7 @@ import types from dataclasses import dataclass -from typing import Callable +from typing import Callable, Tuple, Optional import torch import torch.nn.functional as F @@ -53,6 +53,7 @@ class TransformerConfig(ModelParallelConfig): fp8_wgrad (bool): When set to False, override FP8 config options and do the wgrad computation in higher precision. Defaults to True. clone_scatter_output_in_embedding (bool): When set to true, clone the output of scatter_to_sequence_parallel_region in embedding layer to facilitate garbage collection of input. normalization (str): Swtich b/w `LayerNorm` and `RMSNorm` as normalization layers. For now, these are primarily used by Transformer-Engine's layers like `LayerNormLinear`. Default value is `LayerNorm`. + window_size ((int,int) or None): If not None, then will use sliding window attention. The size of the window is specified by the numbers inside the tuple; -1 is special value meaning "infinite window size". """ # model architecture @@ -74,6 +75,7 @@ class TransformerConfig(ModelParallelConfig): gated_linear_unit: bool = False activation_func: Callable = F.gelu num_moe_experts: int = None + window_size: Optional[Tuple[int, int]] = None # initialization init_method: Callable = None From eabcebed480c8aa9afbbde0eabb8afe77849c905 Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis Date: Thu, 14 Dec 2023 21:08:46 +0000 Subject: [PATCH 033/296] Add window_size argument to TEDotProductAttention. --- .../core/transformer/custom_layers/transformer_engine.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index d784184623..fbc1c245b4 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -400,6 +400,13 @@ def __init__( self.config.context_parallel_size == 1 ), "Only Transformer-Engine version >= 1.0.0 supports context parallelism!" + if config.window_size is not None: + # Check version + assert ( + te_version >= packaging.version.Version("1.2.0") + ), f"Transformer-Engine version ({str(te_version)}) must be >= 1.2.0 to support sliding window attention." + extra_kwargs['window_size'] = config.window_size + super().__init__( num_attention_heads=self.config.num_attention_heads, kv_channels=self.config.kv_channels, From 651ec8bf11b6d29fa7adb15bed6f30490a72529d Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis Date: Thu, 14 Dec 2023 21:12:09 +0000 Subject: [PATCH 034/296] check for window_size in dot_product_attention & fail (currently unsupported). --- megatron/core/transformer/dot_product_attention.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/megatron/core/transformer/dot_product_attention.py b/megatron/core/transformer/dot_product_attention.py index 7eab478bd0..859c734306 100644 --- a/megatron/core/transformer/dot_product_attention.py +++ b/megatron/core/transformer/dot_product_attention.py @@ -46,6 +46,10 @@ def __init__( self.config.context_parallel_size == 1 ), "Context parallelism is only supported by TEDotProductAttention!" + assert ( + self.config.window_size is None + ), "Sliding Window Attention is only supported by TEDotProductAttention!" + self.layer_number = max(1, layer_number) self.attn_mask_type = attn_mask_type self.attention_type = attention_type # unused for now From c59ac8b4d3c7714f07f0cd34b59e48a3331afa22 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Thu, 14 Dec 2023 13:38:35 -0800 Subject: [PATCH 035/296] CPU Offload initial commit Signed-off-by: Selvaraj Anandaraj --- megatron/core/__init__.py | 2 + megatron/core/cpu_offload.py | 415 ++++++++++++++++++ .../core/transformer/transformer_block.py | 27 +- .../core/transformer/transformer_config.py | 13 + 4 files changed, 449 insertions(+), 8 deletions(-) create mode 100644 megatron/core/cpu_offload.py diff --git a/megatron/core/__init__.py b/megatron/core/__init__.py index 2858dc692d..cef0b0fbf5 100644 --- a/megatron/core/__init__.py +++ b/megatron/core/__init__.py @@ -1,5 +1,6 @@ import megatron.core.tensor_parallel import megatron.core.utils +import megatron.core.cpu_offload from megatron.core import parallel_state from megatron.core.distributed import DistributedDataParallel from megatron.core.inference_params import InferenceParams @@ -12,6 +13,7 @@ "parallel_state", "tensor_parallel", "utils", + "cpu_offload", "DistributedDataParallel", "InferenceParams", "ModelParallelConfig", diff --git a/megatron/core/cpu_offload.py b/megatron/core/cpu_offload.py new file mode 100644 index 0000000000..8fcc3bc219 --- /dev/null +++ b/megatron/core/cpu_offload.py @@ -0,0 +1,415 @@ +import torch +from typing import Any +from contextlib import nullcontext + +class CpuOffloadSavedTensorHook: + """Contex-manager that executes a pair of pack/unpack hooks for saved tensors. + + In this context, the ``on_save_for_backward`` method will be called every time + a tensor is saved for backward (this includes intermediary results saved using + :func:`~torch.autograd.function._ContextMethodMixin.save_for_backward` but + also those recorded by a PyTorch-defined operation). + + The ``on_get_saved_tensors`` method will be called when the backward function + of this op attempts to retrieve the saved tensor from context (this includes + :func: `torch.Tensor.backward()` or :func: `torch.autograd.grad()`. It takes the + as input the return value of the ``on_save_for_backward``, and is meant to return + an identical copy of the tensor being saved by ``on_save_for_backward`` in terms of + size, device and element values. + + Example: + + >>> import torch + >>> from typing import Any + >>> + >>> class DummyHook(CpuOffloadSavedTensorHook): + ... + ... def on_save_for_backward(self, tensor: torch.Tensor) -> Any: + ... logging.info("On save", tensor) + ... return (tensor,) + ... + ... def on_get_saved_tensor(self, saved_state: Any) -> torch.Tensor: + ... logging.info("On get", saved_state) + ... tensor, = saved_state + ... return tensor + ... + >>> a = torch.ones(5, requires_grad=True) + >>> b = torch.ones(5, requires_grad=True) * 2 + >>> with DummyHook(): + ... y = a * b + ... + On save tensor([1., 1., 1., 1., 1.], requires_grad=True) + On save tensor([2., 2., 2., 2., 2.], grad_fn=) + >>> y.sum().backward() + On get (tensor([1., 1., 1., 1., 1.], requires_grad=True),) + On get (tensor([2., 2., 2., 2., 2.], grad_fn=),) + + """ + + def __init__(self) -> None: + pass + + def __enter__(self): + torch._C._autograd._push_saved_tensors_default_hooks( + self.on_save_for_backward, + self.on_get_saved_tensor + ) + + def __exit__(self, *args: Any): + torch._C._autograd._pop_saved_tensors_default_hooks() + + + def on_save_for_backward(self, tensor: torch.Tensor) -> Any: + raise NotImplementedError("`on_save_for_backward: Callable[[torch.Tensor], Any]`" + "is not implemented in CpuOffloadHook class. Inherit " + "this class and implement your custom hooks") + + def on_get_saved_tensor(self, saved_state: Any) -> torch.Tensor: + raise NotImplementedError("`on_get_saved_tensors: Callable[[Any], torch.Tensor]`" + "is not implemented in CpuOffloadHook class. Inherit " + "this class and implement your custom hooks") + +class CpuOffloadHookWithOffloadHandler(CpuOffloadSavedTensorHook): + """Contex-manager that offloads/recovers tensors through an offload hander. + + The hook just offloads/recovers the tensor object to the handler through `tensor_push` and `tensor_pop` interface. + How the offload-handler manages the offloading, recovering or prefetching timing is transparent to this hook. + """ + def __init__(self, offload_handler, handler_extra_kwargs={}, debug=False) -> None: + self.debug = debug + self.offload_handler = offload_handler + self.handler_extra_kwargs = handler_extra_kwargs + super().__init__() + + def on_save_for_backward(self, tensor: torch.Tensor) -> Any: + retrieve_identifier = self.offload_handler.tensor_push( + tensor, + **self.handler_extra_kwargs + ) + if self.debug: + logging.info(f"On save tensor shape {tensor.shape} parameter {type(tensor)}, offload_handler returns identifier {retrieve_identifier}") + return retrieve_identifier + + def on_get_saved_tensor(self, retrieve_identifier: Any) -> torch.Tensor: + tensor = self.offload_handler.tensor_pop( + retrieve_identifier, + **self.handler_extra_kwargs + ) + if self.debug: + logging.info(f"On get tensor, from identifier {retrieve_identifier} get tensor shape {tensor.shape}") + return tensor + +class OffloadHandler: + """A base class for CPU offload-handler defining two methods.""" + def __init__(self) -> None: + pass + + def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any: + raise NotImplementedError("`tensor_push is not implented in OffloadHandler class. " + "Inherit this class and implement your custom tensor_push.") + + def tensor_pop(self, state: Any, **kwargs): + raise NotImplementedError("`tensor_pop is not implented in OffloadHandler class. " + "Inherit this class and implement your custom tensor_pop.") + +class GroupCommitFunction(torch.autograd.Function): + """this is a dummy op with output identical to input. + However, it is necessary for marking a timepoint for offload handler to accomplish all synchronizations. + Implementing it as a function is necessary because we need to actions in both forward and backward. + """ + @staticmethod + def forward(ctx, tensor, cpu_offload_handler): + cpu_offload_handler.on_group_commit_forward() + ctx.cpu_offload_handler = cpu_offload_handler + # return the identical tensor + return tensor + + @staticmethod + def backward(ctx, grad_output): + cpu_offload_handler = ctx.cpu_offload_handler + cpu_offload_handler.on_group_commit_backward() + return grad_output, None + +group_prefetch_offload_commit = GroupCommitFunction.apply + +class SynchronizedGroupOffloadHandler(OffloadHandler): + """Offload Handler that offloads/reloads in a synchronized way. + The device-to-host and host-to-device copying happen in the same stream + as the computation kernels, thus the copying will block computation. + """ + def __init__(self, + num_offload_group, + tensor_need_offloading_checker=(lambda _: True), + debug=False + ) -> None: + super().__init__() + + self.num_offload_group = num_offload_group + self.tensor_need_offloading_checker = tensor_need_offloading_checker + self.debug = debug + + self.groupid_reset() + + def groupid_reset(self): + # Data structures to label saved tensors and book-keep their cpu copies. + # Currently, on push, create a new cpu tensor and copies; on pop, copies the tensor back to gpu and deletes the cpu tensor + self.current_group, self.tensor_count_current_group = (0, 0) # will increment whenever `group_commit()` is invoked + self.tensor_tag_to_state = dict() + + def on_group_commit_forward(self): + if self.debug: + logging.info(f"on_group_commit_forward current_group: {self.current_group}") + + # finishing up with updating current group and tensor count + self.current_group += 1 # increment + self.tensor_count_current_group = 0 # reset + + def on_group_commit_backward(self): + self.current_group -= 1 + assert self.current_group >= 0 + + if self.debug: + logging.info(f"on_group_commit_backward current_group: {self.current_group}") + + @staticmethod + def offload(src_tensor, pin_memory=True): + cpu_backup = torch.empty(src_tensor.size(), + dtype=src_tensor.dtype, + layout=src_tensor.layout, + device="cpu", + pin_memory=pin_memory) + cpu_backup.copy_(src_tensor, non_blocking=pin_memory) + state = (src_tensor.device, cpu_backup) + return state + + @staticmethod + def reload(state, non_blocking=None): + dev, cpu_backup = state + if non_blocking is None: + non_blocking = cpu_backup.is_pinned() + return cpu_backup.to(dev, non_blocking=non_blocking) + + def tensor_push(self, tensor: torch.Tensor, **kwargs): + # obtain a unique tensor tag + tensor_tag = (self.current_group, self.tensor_count_current_group) + if self.debug: + logging.info("tensor_push", tensor_tag, tensor.shape, type(tensor), + "need_offloading ?", self.tensor_need_offloading_checker(tensor)) + self.tensor_count_current_group += 1 + assert not (tensor_tag in self.tensor_tag_to_state) + if self.current_group < self.num_offload_group and self.tensor_need_offloading_checker(tensor): + state = SynchronizedGroupOffloadHandler.offload(tensor) + self.tensor_tag_to_state[tensor_tag] = state + else: + self.tensor_tag_to_state[tensor_tag] = tensor # will be offloaded together after group commit + return tensor_tag + + def tensor_pop(self, tensor_tag, **kwargs): + assert tensor_tag in self.tensor_tag_to_state + if self.debug: + logging.info("tensor_pop", tensor_tag) + state = self.tensor_tag_to_state.pop(tensor_tag) + if isinstance(state, tuple): + tensor = SynchronizedGroupOffloadHandler.reload(state) + else: + tensor = state + return tensor + +class AsyncDoubleBufferGroupOffloadHandler(SynchronizedGroupOffloadHandler): + """Compared to synchronize, using more memory because of the buffer. But achieves better performance + due to the overlapping. D2h and h2d copying are completely hidden behind computation if computation time + of a layer is longer than host-device communication time. Bulk offloading with delay and bulk reloading + with prefetch are implemented. """ + def __init__(self, + num_offload_group, # must be <= actual number of groups (number of commits) + num_prefetch_group=1, + tensor_need_offloading_checker=(lambda t: True), + debug=False + ) -> None: + super().__init__(num_offload_group=num_offload_group, + tensor_need_offloading_checker=tensor_need_offloading_checker, + debug=debug) + self.num_prefetch_group = num_prefetch_group + + # prepare for tensor buffer + self.tensor_id_to_tensor_buf_double_bufs = [] + for _ in range(2): + self.tensor_id_to_tensor_buf_double_bufs.append(dict()) + + # allocate streams and events for synchronization + self.d2h_stream = torch.cuda.Stream() + self.h2d_stream = torch.cuda.Stream() + self.h2d_finish_events = [] + self.compute_stream_bwd_start_events = [] + for _ in range(self.num_offload_group): + self.h2d_finish_events.append(torch.cuda.Event()) + self.compute_stream_bwd_start_events.append(torch.cuda.Event()) + self.d2h_final_event = torch.cuda.Event() + + def get_tensor_buf_for_offloaded_tensor(self, tensor, tensor_tag): + group_id, tensor_id = tensor_tag + # obtain ping-pong buffer + id_buf_map = self.tensor_id_to_tensor_buf_double_bufs[(group_id % 2)] + + if not tensor_id in id_buf_map: + allocate_new_buf = True + else: + tensor_buf = id_buf_map[tensor_id] + if not (tensor_buf.size() == tensor.size() and tensor_buf.dtype == tensor.dtype): + allocate_new_buf = True + else: + allocate_new_buf = False # in this case, reuse the old buffer + + if allocate_new_buf: + # supposed to only execute once + if self.debug: + logging.info(f"Allocating tensor_buf for group {group_id} tensor {tensor_id} size {tensor.size()}") + id_buf_map[tensor_id] = torch.empty(tensor.size(), + dtype=tensor.dtype, + layout=tensor.layout, + device=tensor.device, + ) + return id_buf_map[tensor_id] + + def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any: + # obtain a unique tensor tag + tensor_tag = (self.current_group, self.tensor_count_current_group) + if self.debug: + logging.info("tensor_push", tensor_tag, tensor.shape, type(tensor), "need_offloading ?", self.tensor_need_offloading_checker(tensor)) + self.tensor_count_current_group += 1 + assert not (tensor_tag in self.tensor_tag_to_state) + + if self.current_group < self.num_offload_group and self.tensor_need_offloading_checker(tensor): + # first copy the tensor to tensorbuf, so that the original tensor will not be deleted + tensor_buf = self.get_tensor_buf_for_offloaded_tensor(tensor, tensor_tag) + tensor_buf.copy_(tensor) + # Here we just save it, and at commit, bulk_offload_group will handle it + self.tensor_tag_to_state[tensor_tag] = tensor_buf + else: + self.tensor_tag_to_state[tensor_tag] = tensor + return tensor_tag + + def tensor_pop(self, tensor_tag, **kwargs): + assert tensor_tag in self.tensor_tag_to_state + if self.debug: + logging.info("tensor_pop", tensor_tag) + tensor = self.tensor_tag_to_state.pop(tensor_tag) + # the tensor should have been copied back in on_group_commit_backward() which invokes bulk_reload_group + assert not isinstance(tensor, tuple) + return tensor + + def bulk_offload_group(self, group_to_offload): + with torch.cuda.stream(self.d2h_stream): + for tensor_tag, state in self.tensor_tag_to_state.items(): + group_id, _ = tensor_tag + if group_id == group_to_offload: + assert not isinstance(state, tuple) + tensor_on_device = state + + # if offload, return the reference to cpu copy + if self.tensor_need_offloading_checker(tensor_on_device): + state = SynchronizedGroupOffloadHandler.offload(tensor_on_device) + self.tensor_tag_to_state[tensor_tag] = state + + def synchronize_on_group_commit_forward(self, current_group): + # the host should wait for the copying of previous group + # to avoid overwriting buffer + previous_group = current_group - 1 + if (previous_group < self.num_offload_group): + torch.cuda.synchronize() + # TODO (guyueh): this part is originally designed to reduce the peak memory usage. + # however, uncommenting this part will cause illegal access, have not figured out why. + + if previous_group + 2 >= self.num_offload_group: + # this buffer is no longer required + self.tensor_id_to_tensor_buf_double_bufs[(previous_group % 2)] = dict() + + # the copying of this group should wait for the computation stream event + if current_group < self.num_offload_group: + # perform bulk offloading + self.bulk_offload_group(current_group) + if current_group == self.num_offload_group - 1: + self.d2h_stream.record_event(self.d2h_final_event) + + def on_group_commit_forward(self): + """This function will cause host device synchronization""" + # handle synchronization events + self.synchronize_on_group_commit_forward(self.current_group) + + # during forward, the next_group_to_fetch always points to the min of + # the last commited group, and the last offloaded group + self.next_group_to_fetch = min(self.current_group, self.num_offload_group -1) + + super().on_group_commit_forward() + + def bulk_reload_group(self, group_to_reload): + assert group_to_reload < self.num_offload_group + if group_to_reload == self.num_offload_group - 1: + self.h2d_stream.wait_event(self.d2h_final_event) + with torch.cuda.stream(self.h2d_stream): + # move back tensors + for tensor_label in self.tensor_tag_to_state.keys(): + group_id, _ = tensor_label + if group_id == group_to_reload: + state = self.tensor_tag_to_state[tensor_label] + if isinstance(state, tuple): + recovered_tensor = SynchronizedGroupOffloadHandler.reload(state) + self.tensor_tag_to_state[tensor_label] = recovered_tensor + else: + self.tensor_tag_to_state[tensor_label] = state + + def on_group_commit_backward(self): + # first decrement the current group. + # after last commit in forward, the group will +1; in backward it -1. Finally it should be decremented to 0 + self.current_group -= 1 + assert self.current_group >= 0 + + if self.debug: + logging.info(f"on_group_commit_backward current_group: {self.current_group}") + + # decide the range of group to prefetch + should_prefetch_until_group = self.current_group - self.num_prefetch_group + if should_prefetch_until_group < 0: + should_prefetch_until_group = 0 + + # do prefetch + if self.debug: + logging.info(f"num_prefetch_group = {self.num_prefetch_group} num_offload_group = {self.num_offload_group} fetch from {self.next_group_to_fetch} to {should_prefetch_until_group}") + for group_num_to_prefetch in range(self.next_group_to_fetch, should_prefetch_until_group - 1, -1): + # record the event in the compute stream, for h2d to wait + torch.cuda.current_stream().record_event(self.compute_stream_bwd_start_events[group_num_to_prefetch]) + + # start of h2d should wait for the compute and the d2h + self.h2d_stream.wait_event(self.compute_stream_bwd_start_events[group_num_to_prefetch]) + + #recover tensors (copy back from host) + self.bulk_reload_group(group_num_to_prefetch) + + # record an event for the backward of this layer to wait + self.h2d_stream.record_event(self.h2d_finish_events[group_num_to_prefetch]) + + self.next_group_to_fetch = min(self.num_offload_group - 1, should_prefetch_until_group - 1) # always is set to -1 at the end of the backward + + # wait for the current group + if self.current_group < self.num_offload_group: + torch.cuda.current_stream().wait_event(self.h2d_finish_events[self.current_group]) + +def get_cpu_offload_context(cpu_offloading, cpu_offloading_num_layers): + + def tensor_need_offloading_checker(tensor): + return (not isinstance(tensor, torch.nn.Parameter)) + + cpu_offload_handler = AsyncDoubleBufferGroupOffloadHandler( + num_offload_group=cpu_offloading_num_layers, + num_prefetch_group=1, + tensor_need_offloading_checker=tensor_need_offloading_checker + ) + + def group_prefetch_offload_commit_async(tensor): + return group_prefetch_offload_commit(tensor,cpu_offload_handler) + + if cpu_offloading: + return CpuOffloadHookWithOffloadHandler(offload_handler = cpu_offload_handler), group_prefetch_offload_commit_async + else: + return nullcontext(), group_prefetch_offload_commit_async + diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index 74bf29c859..b91fac5932 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -17,6 +17,7 @@ from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.transformer.transformer_layer import TransformerLayer from megatron.core.utils import make_sharded_tensor_for_checkpoint, make_viewless_tensor +from megatron.core.cpu_offload import get_cpu_offload_context def get_num_layers_to_build(config: TransformerConfig) -> int: @@ -105,6 +106,11 @@ def __init__( self._build_layers() self.num_layers_per_pipeline_rank = len(self.layers) + self.offload_context, self.group_prefetch_offload_commit_async = get_cpu_offload_context( + self.config.cpu_offloading, + self.config.cpu_offloading_num_layers + ) + def _build_layers(self): # Transformer layers. # @jcasper can we improve how we deal with layer_number? @@ -308,14 +314,19 @@ def forward( ) else: for layer in self.layers: - hidden_states, context = layer( - hidden_states=hidden_states, - attention_mask=attention_mask, - context=context, - context_mask=context_mask, - rotary_pos_emb=rotary_pos_emb, - inference_params=inference_params, - ) + + with self.offload_context: + hidden_states, context = layer( + hidden_states=hidden_states, + attention_mask=attention_mask, + context=context, + context_mask=context_mask, + rotary_pos_emb=rotary_pos_emb, + inference_params=inference_params, + ) + + if torch.is_grad_enabled() and self.config.cpu_offloading: + hidden_states = self.group_prefetch_offload_commit_async(hidden_states) # Final layer norm. if self.post_process and self.post_layer_norm: diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 47647e657a..e55e8d7ab9 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -106,6 +106,10 @@ class TransformerConfig(ModelParallelConfig): fp8_amax_compute_algo: str = "most_recent" fp8_wgrad: bool = True + # cpu offload + cpu_offloading: bool = True + cpu_offloading_num_layers: int = 1 + # miscellaneous clone_scatter_output_in_embedding: bool = True @@ -149,6 +153,15 @@ def __post_init__(self): if self.expert_model_parallel_size > 1 and self.num_moe_experts is None: raise ValueError(f'num_moe_experts must be non None to use expert-parallel.') + if self.cpu_offloading_num_layers < 0 or self.cpu_offloading_num_layers >= self.num_layers: + raise ValueError(f'CPU offloading can be done only for layers less than {self.num_layers}') + + if self.cpu_offloading and self.pipeline_model_parallel_size > 1: + raise ValueError(f'Currently there is no support for Pipeline parallelism with CPU offloading') + + if self.cpu_offloading and self.recompute_granularity is not None: + raise ValueError(f'CPU offloading does not work when activation recomputation is enabled') + if self.recompute_granularity is not None: if not self.recompute_granularity in ['full', 'selective']: raise ValueError( From 28e2a3da066a56a6e88053e1f7e49780dbebeb04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 15 Dec 2023 15:49:49 +0100 Subject: [PATCH 036/296] Set sharded prefix bottom-up --- megatron/core/dist_checkpointing/utils.py | 13 ++++++++++++- megatron/core/tensor_parallel/layers.py | 8 ++++---- .../custom_layers/transformer_engine.py | 12 ++++++------ megatron/core/transformer/mlp.py | 8 ++------ megatron/core/transformer/module.py | 9 +++------ megatron/core/transformer/transformer_block.py | 17 +++++++++++++++-- megatron/core/transformer/transformer_layer.py | 16 ---------------- megatron/core/transformer/utils.py | 17 ++++++----------- 8 files changed, 48 insertions(+), 52 deletions(-) diff --git a/megatron/core/dist_checkpointing/utils.py b/megatron/core/dist_checkpointing/utils.py index f7976f0074..fa7a0b6937 100644 --- a/megatron/core/dist_checkpointing/utils.py +++ b/megatron/core/dist_checkpointing/utils.py @@ -8,7 +8,7 @@ ShardedStateDict, ShardedTensor, ShardedTensorFactory, - StateDict, + StateDict, ShardedObject, ) @@ -42,3 +42,14 @@ def add_prefix(t): return t dict_list_map_inplace(add_prefix, sharded_state_dict) + + +def replace_prefix_for_sharding(sharded_state_dict: ShardedStateDict, old_prefix: str, new_prefix: str): + def replace_prefix(x): + if isinstance(x, (ShardedTensor, ShardedTensorFactory, ShardedObject)): + if not x.key.startswith(old_prefix): + raise ValueError(f'Expected {x.key} to begin with prefix {old_prefix}') + x.key = f'{new_prefix}{x.key.removeprefix(old_prefix)}' + return x + + dict_list_map_inplace(replace_prefix, sharded_state_dict) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index e527d706b3..e9f54e9419 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -757,11 +757,11 @@ def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None): output_bias = self.bias if self.skip_bias_add else None return output, output_bias - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + def sharded_state_dict(self, prefix='', sharded_offsets=()): """ Sharding along axis 0, bias sharded """ state_dict = self.state_dict(prefix='', keep_vars=True) return make_sharded_tensors_for_checkpoint( - state_dict, prefix, sharded_key_prefix, {'weight': 0, 'bias': 0}, sharded_offsets + state_dict, prefix, {'weight': 0, 'bias': 0}, sharded_offsets ) @@ -932,9 +932,9 @@ def forward(self, input_): output_bias = self.bias return output, output_bias - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + def sharded_state_dict(self, prefix='', sharded_offsets=()): """ Sharding along axis 1, bias not sharded """ state_dict = self.state_dict(prefix='', keep_vars=True) return make_sharded_tensors_for_checkpoint( - state_dict, prefix, sharded_key_prefix, {'weight': 1}, sharded_offsets + state_dict, prefix, {'weight': 1}, sharded_offsets ) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index d784184623..a2dc135bbc 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -233,11 +233,11 @@ def forward(self, x): return out return out, None - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + def sharded_state_dict(self, prefix='', sharded_offsets=()): """ Sharding along axis 0, bias sharded """ state_dict = self.state_dict(prefix='', keep_vars=True) return make_sharded_tensors_for_checkpoint( - state_dict, prefix, sharded_key_prefix, {'weight': 0, 'bias': 0}, sharded_offsets + state_dict, prefix, {'weight': 0, 'bias': 0}, sharded_offsets ) @@ -279,11 +279,11 @@ def __init__( tp_comm_buffer_name=tp_comm_buffer_name, ) - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + def sharded_state_dict(self, prefix='', sharded_offsets=()): """ Sharding along axis 0, bias sharded """ state_dict = self.state_dict(prefix='', keep_vars=True) return make_sharded_tensors_for_checkpoint( - state_dict, prefix, sharded_key_prefix, {'weight': 0, 'bias': 0}, sharded_offsets + state_dict, prefix, {'weight': 0, 'bias': 0}, sharded_offsets ) @@ -326,11 +326,11 @@ def __init__( tp_comm_buffer_name=tp_comm_buffer_name, ) - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + def sharded_state_dict(self, prefix='', sharded_offsets=()): """ Sharding along axis 1, bias not sharded """ state_dict = self.state_dict(prefix='', keep_vars=True) return make_sharded_tensors_for_checkpoint( - state_dict, prefix, sharded_key_prefix, {'weight': 1}, sharded_offsets + state_dict, prefix, {'weight': 1}, sharded_offsets ) diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index 8f5575b724..5f36ddf6fc 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -106,18 +106,16 @@ def forward(self, hidden_states): return output, output_bias - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): - sharded_key_prefix = prefix if sharded_key_prefix is None else sharded_key_prefix + def sharded_state_dict(self, prefix='', sharded_offsets=()): sharded_state_dict = {} for name, module in self._modules.items(): if name == 'linear_fc1' and self.config.gated_linear_unit: sub_sd = self._sharded_state_dict_for_glu( - name, module, prefix, sharded_key_prefix, sharded_offsets + name, module, prefix, sharded_offsets ) else: sub_sd = module.sharded_state_dict( prefix=f'{prefix}{name}.', - sharded_key_prefix=f'{sharded_key_prefix}{name}.', sharded_offsets=sharded_offsets, ) sharded_state_dict.update(sub_sd) @@ -128,13 +126,11 @@ def _sharded_state_dict_for_glu( module_name: str, module: torch.nn.Module, prefix: str, - sharded_key_prefix: str, sharded_offsets: Tuple[Tuple[int, int, int]], ): assert module_name == 'linear_fc1', module_name sharded_state_dict = module.sharded_state_dict( prefix=f'{prefix}{module_name}.', - sharded_key_prefix=f'{sharded_key_prefix}{module_name}.', sharded_offsets=sharded_offsets, ) weight_key = f'{prefix}{module_name}.weight' diff --git a/megatron/core/transformer/module.py b/megatron/core/transformer/module.py index 86314d50a2..731929dc7c 100644 --- a/megatron/core/transformer/module.py +++ b/megatron/core/transformer/module.py @@ -47,11 +47,11 @@ def state_dict_for_save_checkpoint(self, prefix: str = '', keep_vars: bool = Fal return self.state_dict(prefix=prefix, keep_vars=keep_vars) - def sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): - self._intermediate_sharded_state_dict(prefix, sharded_key_prefix, sharded_offsets) + def sharded_state_dict(self, prefix='', sharded_offsets=()): + return self._intermediate_sharded_state_dict(prefix, sharded_offsets) - def _intermediate_sharded_state_dict(self, prefix='', sharded_key_prefix=None, sharded_offsets=()): + def _intermediate_sharded_state_dict(self, prefix='', sharded_offsets=()): """Sharded state dict with Distributed Checkpointing. General definition of sharded_state_dict tries to call `sharded_state_dict` @@ -70,14 +70,12 @@ def _intermediate_sharded_state_dict(self, prefix='', sharded_key_prefix=None, s Returns: dict: dictionary of state dict keys mapped to ShardedTensors """ - sharded_key_prefix = prefix if sharded_key_prefix is None else sharded_key_prefix sharded_state_dict = {} for name, module in self._modules.items(): if hasattr(module, 'sharded_state_dict'): module_sharded_sd = module.sharded_state_dict( prefix=f'{prefix}{name}.', - sharded_key_prefix=f'{sharded_key_prefix}{name}.', sharded_offsets=sharded_offsets, ) else: @@ -85,7 +83,6 @@ def _intermediate_sharded_state_dict(self, prefix='', sharded_key_prefix=None, s module_sharded_sd = make_sharded_tensors_for_checkpoint( module_sd, f'{prefix}{name}.', - f'{sharded_key_prefix}{name}.', {}, sharded_offsets, ) diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index 74bf29c859..cb33c5fec7 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -9,6 +9,7 @@ from torch import Tensor from megatron.core import InferenceParams, parallel_state, tensor_parallel +from megatron.core.dist_checkpointing.utils import replace_prefix_for_sharding from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.transformer.custom_layers.transformer_engine import TENorm from megatron.core.transformer.enums import AttnMaskType @@ -323,13 +324,25 @@ def forward( return hidden_states - def sharded_state_dict(self, prefix: str = ''): + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()): sharded_state_dict = {} layer_prefix = f'{prefix}layers.' + num_layers = self.config.num_layers for layer in self.layers: - sharded_state_dict.update(layer.sharded_state_dict(prefix=layer_prefix)) + offset = layer._get_layer_offset() + + global_layer_offset = layer.layer_number - 1 # self.layer_number starts at 1 + state_dict_prefix = ( + f'{layer_prefix}{global_layer_offset - offset}.' # module list index in TransformerBlock + ) + sharded_pp_offset = [ + (0, global_layer_offset, num_layers) + ] # PP sharding offset for ShardedTensors + layer_sharded_state_dict = layer.sharded_state_dict(prefix=state_dict_prefix, sharded_offsets=sharded_pp_offset) + replace_prefix_for_sharding(layer_sharded_state_dict, state_dict_prefix, layer_prefix) + sharded_state_dict.update(layer_sharded_state_dict) if self.post_process and self.post_layer_norm: state_dict = self.state_dict(keep_vars=True) diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index 84ae4525a8..8814b8c32c 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -6,13 +6,11 @@ import torch from megatron.core import parallel_state -from megatron.core.dist_checkpointing.mapping import ShardedObject, ShardedTensor from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.transformer.utils import make_sharded_tensors_for_checkpoint from megatron.core.utils import make_viewless_tensor @@ -216,17 +214,3 @@ def forward( ) return output, context - - def sharded_state_dict(self, prefix=''): - offset = self._get_layer_offset() - num_layers = self.config.num_layers - - global_layer_offset = self.layer_number - 1 # self.layer_number starts at 1 - state_dict_prefix = ( - f'{prefix}{global_layer_offset - offset}.' # module list index in TransformerBlock - ) - sharded_pp_offset = [ - (0, global_layer_offset, num_layers) - ] # PP sharding offset for ShardedTensors - - return self._intermediate_sharded_state_dict(state_dict_prefix, prefix, sharded_pp_offset) diff --git a/megatron/core/transformer/utils.py b/megatron/core/transformer/utils.py index d7d002734f..15fe4da6c1 100644 --- a/megatron/core/transformer/utils.py +++ b/megatron/core/transformer/utils.py @@ -49,8 +49,7 @@ def erf_gelu(x): def make_sharded_tensors_for_checkpoint( state_dict: StateDict, - state_dict_prefix: str, - sharded_key_prefix: Optional[str] = None, + prefix: str, tensor_parallel_layers_axis_map: Optional[Dict[str, int]] = None, sharded_offsets: Iterable[Tuple[int, int, int]] = (), extra_state_suffix: str = '_extra_state', @@ -64,8 +63,7 @@ def make_sharded_tensors_for_checkpoint( Args: state_dict (StateDict): state_dict to convert - state_dict_prefix (str): prefix appended to keys in final state dict - sharded_key_prefix (str, optional): prefix appended to ShardedTensor keys + prefix (str): prefix appended to keys in final state dict tensor_parallel_layers_axis_map (Dict[str, int], optional): dict mapping layer names to the axis for TP sharding sharded_offsets (Iterable[Tuple[int, int, int]], optional): sharding already @@ -74,8 +72,6 @@ def make_sharded_tensors_for_checkpoint( suffix will be wrapped with ShardedObject instead of ShardedTensor. """ - if sharded_key_prefix is None: - sharded_key_prefix = state_dict_prefix if tensor_parallel_layers_axis_map is None: tensor_parallel_layers_axis_map = {} @@ -83,23 +79,22 @@ def make_sharded_tensors_for_checkpoint( sharded_state_dict = {} for layer_name in state_dict.keys(): tensor = state_dict[layer_name] - layer_key = f'{state_dict_prefix}{layer_name}' - sharded_key = f'{sharded_key_prefix}{layer_name}' + layer_key = f'{prefix}{layer_name}' if layer_name.endswith(extra_state_suffix): sharded_state_dict[layer_key] = make_sharded_object_for_checkpoint( - tensor, sharded_key, sharded_offsets + tensor, layer_key, sharded_offsets ) elif layer_name in tensor_parallel_layers_axis_map: tp_axis = tensor_parallel_layers_axis_map[layer_name] sharded_state_dict[layer_key] = make_tp_sharded_tensor_for_checkpoint( - tensor, sharded_key, tp_axis, prepend_offsets=sharded_offsets, + tensor, layer_key, tp_axis, prepend_offsets=sharded_offsets, ) else: sharded_state_dict[layer_key] = make_sharded_tensor_for_checkpoint( - tensor, sharded_key, prepend_offsets=sharded_offsets, + tensor, layer_key, prepend_offsets=sharded_offsets, ) return sharded_state_dict From fa36e3cd750c050f49ae1c97711c4121cec64ad3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 15 Dec 2023 17:05:43 +0100 Subject: [PATCH 037/296] Provide default sharded_state_dict implementation for most of the modules --- megatron/core/models/T5/t5_model.py | 6 ++- .../embeddings/language_model_embedding.py | 38 +---------------- megatron/core/models/gpt/gpt_model.py | 6 ++- megatron/core/tensor_parallel/layers.py | 18 +++++++- megatron/core/transformer/module.py | 42 +++++-------------- .../core/transformer/transformer_block.py | 26 ++++-------- megatron/core/transformer/utils.py | 40 +++++++++++++++++- 7 files changed, 84 insertions(+), 92 deletions(-) diff --git a/megatron/core/models/T5/t5_model.py b/megatron/core/models/T5/t5_model.py index feaed27413..cc32368427 100644 --- a/megatron/core/models/T5/t5_model.py +++ b/megatron/core/models/T5/t5_model.py @@ -1,12 +1,13 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. import logging -from typing import List, Literal, Optional +from typing import List, Literal, Optional, Tuple import torch from torch import Tensor from megatron.core import InferenceParams, parallel_state, tensor_parallel +from megatron.core.dist_checkpointing.mapping import ShardedStateDict from megatron.core.models.common.embeddings.language_model_embedding import LanguageModelEmbedding from megatron.core.models.common.embeddings.rotary_pos_embedding import RotaryEmbedding from megatron.core.models.common.language_module.language_module import LanguageModule @@ -332,7 +333,8 @@ def shared_embedding_or_output_weight(self) -> Tensor: return self.lm_head.output_layer.weight return None - def sharded_state_dict(self, prefix: str = ''): + def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: + assert not sharded_offsets, "We don't expect any sharded offsets at this level of model hierarchy" sharded_state_dict = {} if self.pre_process: diff --git a/megatron/core/models/common/embeddings/language_model_embedding.py b/megatron/core/models/common/embeddings/language_model_embedding.py index 40d679d7b1..93002fcd05 100644 --- a/megatron/core/models/common/embeddings/language_model_embedding.py +++ b/megatron/core/models/common/embeddings/language_model_embedding.py @@ -1,6 +1,6 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -from typing import Literal, Optional +from typing import Literal import torch from torch import Tensor @@ -8,11 +8,6 @@ from megatron.core import tensor_parallel from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.utils import ( - make_sharded_tensor_for_checkpoint, - make_tp_sharded_tensor_for_checkpoint, -) - class LanguageModelEmbedding(MegatronModule): """Language model embeddings. @@ -130,34 +125,3 @@ def forward(self, input_ids: Tensor, position_ids: Tensor, tokentype_ids: int = embeddings = self.embedding_dropout(embeddings) return embeddings - - def sharded_state_dict(self, prefix=''): - - sharded_state_dict = {} - - word_embeddings_prefix = f'{prefix}word_embeddings.' - word_embeddings_state_dict = self.word_embeddings.state_dict( - prefix=word_embeddings_prefix, keep_vars=True - ) - - sharded_word_embeddings_key = f'{word_embeddings_prefix}weight' - sharded_word_embeddings_tensor = make_tp_sharded_tensor_for_checkpoint( - tensor=word_embeddings_state_dict[sharded_word_embeddings_key], - key=sharded_word_embeddings_key, - allow_shape_mismatch=True, - ) - sharded_state_dict[sharded_word_embeddings_key] = sharded_word_embeddings_tensor - - if self.add_position_embedding: - position_embeddings_prefix = f'{prefix}position_embeddings.' - position_embeddings_state_dict = self.position_embeddings.state_dict( - prefix=position_embeddings_prefix, keep_vars=True - ) - sharded_position_embeddings_key = f'{position_embeddings_prefix}weight' - sharded_position_embeddings_tensor = make_sharded_tensor_for_checkpoint( - tensor=position_embeddings_state_dict[sharded_position_embeddings_key], - key=sharded_position_embeddings_key, - ) - sharded_state_dict[sharded_position_embeddings_key] = sharded_position_embeddings_tensor - - return sharded_state_dict diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index 2cf26bacac..23ea2cb426 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -1,12 +1,13 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. import logging -from typing import Literal, Optional, Union +from typing import Literal, Optional, Union, Tuple import torch from torch import Tensor from megatron.core import InferenceParams, parallel_state, tensor_parallel +from megatron.core.dist_checkpointing.mapping import ShardedStateDict from megatron.core.models.common.embeddings.language_model_embedding import LanguageModelEmbedding from megatron.core.models.common.embeddings.rotary_pos_embedding import RotaryEmbedding from megatron.core.models.common.language_module.language_module import LanguageModule @@ -188,7 +189,8 @@ def forward( return loss - def sharded_state_dict(self, prefix: str = '') -> dict: + def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: + assert not sharded_offsets, "We don't expect any sharded offsets at this level of model hierarchy" sharded_state_dict = {} if self.pre_process: diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index e9f54e9419..0b6b6656aa 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -6,7 +6,7 @@ import math import os import warnings -from typing import Callable, Optional +from typing import Callable, Optional, Tuple import torch import torch.nn.functional as F @@ -21,6 +21,7 @@ get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, ) +from ..dist_checkpointing.mapping import ShardedStateDict from ..transformer.utils import make_sharded_tensors_for_checkpoint from .mappings import ( @@ -33,6 +34,7 @@ ) from .random import get_cuda_rng_tracker, get_expert_parallel_rng_tracker_name from .utils import VocabUtility, divide, split_tensor_along_last_dim +from ..utils import make_tp_sharded_tensor_for_checkpoint _grad_accum_fusion_available = True try: @@ -223,6 +225,20 @@ def forward(self, input_): output = reduce_from_tensor_model_parallel_region(output_parallel) return output + def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: + """ Non-default implementation for embeddings due to `allow_shape_mismatch` param """ + state_dict = self.state_dict(prefix='', keep_vars=True) + + weight_prefix = f'{prefix}weight' + return { + weight_prefix: make_tp_sharded_tensor_for_checkpoint( + tensor=state_dict['weight'], + key=weight_prefix, + allow_shape_mismatch=True, + prepend_offsets=sharded_offsets + ) + } + class LinearWithFrozenWeight(torch.autograd.Function): """Linear operator that does not calculate gradient for weight. diff --git a/megatron/core/transformer/module.py b/megatron/core/transformer/module.py index 731929dc7c..bfbf4e99b6 100644 --- a/megatron/core/transformer/module.py +++ b/megatron/core/transformer/module.py @@ -1,13 +1,16 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. """Megatron Module.""" +from typing import Tuple import torch from torch.autograd import Variable from torch.nn.parameter import Parameter from megatron.core import parallel_state +from megatron.core.dist_checkpointing.mapping import ShardedStateDict from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.transformer.utils import make_sharded_tensors_for_checkpoint +from megatron.core.transformer.utils import make_sharded_tensors_for_checkpoint, \ + sharded_state_dict_default _FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor) _HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor) @@ -47,23 +50,15 @@ def state_dict_for_save_checkpoint(self, prefix: str = '', keep_vars: bool = Fal return self.state_dict(prefix=prefix, keep_vars=keep_vars) - def sharded_state_dict(self, prefix='', sharded_offsets=()): - return self._intermediate_sharded_state_dict(prefix, sharded_offsets) + def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: + """Default implementation for sharded state dict for distributed checkpointing. - - def _intermediate_sharded_state_dict(self, prefix='', sharded_offsets=()): - """Sharded state dict with Distributed Checkpointing. - - General definition of sharded_state_dict tries to call `sharded_state_dict` - of submodules when possible, otherwise assumes tensors are replicated - across TP and DP. - When overriding, keep_vars argument of plain `state_dict` method must - always be set to True so that optimizer states can be sharded. + General definition of sharded_state_dict simply calls `sharded_state_dict_default` + (which call sharded_state_dict method if possible or a default implementation otherwise) + recursively on all submodules. Args: prefix (str): prefix for the state dict keys - sharded_key_prefix (str, optional): prefix for the ShardedTensor keys. - If None, the same prefix as for state dict keys is assumed. sharded_offsets (Iterable[Tuple[int, int, int]], optional): sharding already applied (e.g. PP related) by sup-modules. Passed along to ShardedTensor @@ -71,23 +66,8 @@ def _intermediate_sharded_state_dict(self, prefix='', sharded_offsets=()): dict: dictionary of state dict keys mapped to ShardedTensors """ sharded_state_dict = {} - - for name, module in self._modules.items(): - if hasattr(module, 'sharded_state_dict'): - module_sharded_sd = module.sharded_state_dict( - prefix=f'{prefix}{name}.', - sharded_offsets=sharded_offsets, - ) - else: - module_sd = module.state_dict(prefix='', keep_vars=True) - module_sharded_sd = make_sharded_tensors_for_checkpoint( - module_sd, - f'{prefix}{name}.', - {}, - sharded_offsets, - ) - sharded_state_dict.update(module_sharded_sd) - + for name, module in self.named_children(): + sharded_state_dict.update(sharded_state_dict_default(module, f'{prefix}{name}.', sharded_offsets)) return sharded_state_dict diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index cb33c5fec7..b7b19227d9 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -3,12 +3,13 @@ import re from contextlib import nullcontext from dataclasses import dataclass -from typing import List, Union +from typing import List, Union, Tuple import torch from torch import Tensor from megatron.core import InferenceParams, parallel_state, tensor_parallel +from megatron.core.dist_checkpointing.mapping import ShardedStateDict from megatron.core.dist_checkpointing.utils import replace_prefix_for_sharding from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.transformer.custom_layers.transformer_engine import TENorm @@ -17,6 +18,7 @@ from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.transformer.transformer_layer import TransformerLayer +from megatron.core.transformer.utils import sharded_state_dict_default from megatron.core.utils import make_sharded_tensor_for_checkpoint, make_viewless_tensor @@ -324,8 +326,8 @@ def forward( return hidden_states - def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()): - + def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: + assert not sharded_offsets, "We don't expect any sharded offsets at this level of model hierarchy" sharded_state_dict = {} layer_prefix = f'{prefix}layers.' @@ -344,19 +346,9 @@ def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()): replace_prefix_for_sharding(layer_sharded_state_dict, state_dict_prefix, layer_prefix) sharded_state_dict.update(layer_sharded_state_dict) - if self.post_process and self.post_layer_norm: - state_dict = self.state_dict(keep_vars=True) - - tensor = state_dict['final_layernorm.weight'] - layer_name = f'{prefix}final_layernorm.weight' - sharded_state_dict[layer_name] = make_sharded_tensor_for_checkpoint(tensor, layer_name) - - # RMSNorm doesn't have bias. - if 'final_layernorm.bias' in state_dict.keys(): - tensor = state_dict['final_layernorm.bias'] - layer_name = f'{prefix}final_layernorm.bias' - sharded_state_dict[layer_name] = make_sharded_tensor_for_checkpoint( - tensor, layer_name - ) + # Add modules other than self.layers + for name, module in self.named_children(): + if not module is self.layers: + sharded_state_dict.update(sharded_state_dict_default(module, f'{prefix}{name}.', sharded_offsets)) return sharded_state_dict diff --git a/megatron/core/transformer/utils.py b/megatron/core/transformer/utils.py index 15fe4da6c1..3416bdf611 100644 --- a/megatron/core/transformer/utils.py +++ b/megatron/core/transformer/utils.py @@ -2,12 +2,13 @@ """Utilities for transformer layers.""" from operator import itemgetter -from typing import Any, Dict, Iterable, Optional, Tuple, Union +from typing import Any, Dict, Iterable, Optional, Tuple, Union, Iterator import torch from megatron.core import parallel_state -from megatron.core.dist_checkpointing.mapping import ShardedObject, StateDict +from megatron.core.dist_checkpointing.mapping import ShardedObject, StateDict, \ + ShardedStateDict from megatron.core.utils import ( make_sharded_tensor_for_checkpoint, make_tp_sharded_tensor_for_checkpoint, @@ -141,3 +142,38 @@ def _get_extra_state_offsets( extra_state_shape = (1,) extra_state_offset = (0,) return extra_state_shape, extra_state_offset + + +def sharded_state_dict_default(module: torch.nn.Module, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: + """Provides implementation for sharded_state_dict method for non-MegatronModules. + + Tries to call `module.sharded_state_dict` when possible, + otherwise uses regular state dict and assumes tensors are replicated across TP and DP. + + `keep_vars=True` is passed to module.state_dict so that optimizer states + can be sharded later on. + + Args: + module (torch.nn.Module): module which sharded state dict we want to obtain + prefix (str): prefix for the state dict keys + sharded_offsets (Iterable[Tuple[int, int, int]], optional): sharding already + applied (e.g. PP related) by sup-modules. Passed along to ShardedTensor + + Returns: + dict: dictionary of state dict keys mapped to ShardedTensors + """ + + if hasattr(module, 'sharded_state_dict'): + module_sharded_sd = module.sharded_state_dict( + prefix=prefix, + sharded_offsets=sharded_offsets, + ) + else: + module_sd = module.state_dict(prefix='', keep_vars=True) + module_sharded_sd = make_sharded_tensors_for_checkpoint( + module_sd, + prefix, + {}, + sharded_offsets, + ) + return module_sharded_sd From 4ea6c55fff8994f62c17b0cbea12446d7fe548c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 15 Dec 2023 17:05:54 +0100 Subject: [PATCH 038/296] Improve GPT unit test --- .../models/test_gpt_model.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py index a910fec52a..efe5361630 100644 --- a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py +++ b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py @@ -71,6 +71,7 @@ def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp, dest_ Utils.initialize_model_parallel(*src_tp_pp) gpt_model_A = initialize_gpt_model(1) save(gpt_model_A.sharded_state_dict(), ckpt_dir_A) + regular_state_dict_A = gpt_model_A.state_dict() Utils.destroy_model_parallel() # Load checkpoint A with different TP/PP and save as checkpoint B @@ -79,14 +80,25 @@ def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp, dest_ state_dict = load(gpt_model_B.sharded_state_dict(), ckpt_dir_A) gpt_model_B.load_state_dict(state_dict) save(gpt_model_B.sharded_state_dict(), ckpt_dir_B) + regular_state_dict_B = gpt_model_A.state_dict() Utils.destroy_model_parallel() # Test both checkpoints are equal Utils.initialize_model_parallel(1, 1) - state_dict_A = load_plain_tensors(ckpt_dir_A) - state_dict_B = load_plain_tensors(ckpt_dir_B) - diffs = diff(state_dict_A, state_dict_B) + plain_state_dict_A = load_plain_tensors(ckpt_dir_A) + plain_state_dict_B = load_plain_tensors(ckpt_dir_B) + diffs = diff(plain_state_dict_A, plain_state_dict_B) + assert not any(map(bool, diffs)), diffs + + # Test both regular state dicts are equal, turning FP8 states to bytes first + regular_state_dict_A = {k: v.read() if k.endswith('_extra_state') else v + for k, v in regular_state_dict_A.items()} + regular_state_dict_B = {k: v.read() if k.endswith('_extra_state') else v + for k, v in regular_state_dict_B.items()} + diffs = diff(regular_state_dict_A, regular_state_dict_B) assert not any(map(bool, diffs)), diffs + Utils.destroy_model_parallel() + def test_state_dict_comparison(self, tmp_path_dist_ckpt): Utils.initialize_model_parallel(2, 4) From 3065e15b6725a9782bb4d288eda8daa9c48030f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 15 Dec 2023 17:06:23 +0100 Subject: [PATCH 039/296] Fix format --- megatron/core/dist_checkpointing/utils.py | 7 ++++-- megatron/core/models/T5/t5_model.py | 8 +++++-- .../embeddings/language_model_embedding.py | 1 + megatron/core/models/gpt/gpt_model.py | 10 ++++++--- megatron/core/tensor_parallel/layers.py | 10 +++++---- megatron/core/transformer/mlp.py | 10 +++------ megatron/core/transformer/module.py | 14 ++++++++---- .../core/transformer/transformer_block.py | 22 ++++++++++++------- megatron/core/transformer/utils.py | 17 ++++++-------- 9 files changed, 59 insertions(+), 40 deletions(-) diff --git a/megatron/core/dist_checkpointing/utils.py b/megatron/core/dist_checkpointing/utils.py index fa7a0b6937..17aa8fcd5c 100644 --- a/megatron/core/dist_checkpointing/utils.py +++ b/megatron/core/dist_checkpointing/utils.py @@ -5,10 +5,11 @@ from .dict_utils import dict_list_map_inplace, extract_matching_values from .mapping import ( LocalNonpersitentObject, + ShardedObject, ShardedStateDict, ShardedTensor, ShardedTensorFactory, - StateDict, ShardedObject, + StateDict, ) @@ -44,7 +45,9 @@ def add_prefix(t): dict_list_map_inplace(add_prefix, sharded_state_dict) -def replace_prefix_for_sharding(sharded_state_dict: ShardedStateDict, old_prefix: str, new_prefix: str): +def replace_prefix_for_sharding( + sharded_state_dict: ShardedStateDict, old_prefix: str, new_prefix: str +): def replace_prefix(x): if isinstance(x, (ShardedTensor, ShardedTensorFactory, ShardedObject)): if not x.key.startswith(old_prefix): diff --git a/megatron/core/models/T5/t5_model.py b/megatron/core/models/T5/t5_model.py index cc32368427..7fb8d02d28 100644 --- a/megatron/core/models/T5/t5_model.py +++ b/megatron/core/models/T5/t5_model.py @@ -333,8 +333,12 @@ def shared_embedding_or_output_weight(self) -> Tensor: return self.lm_head.output_layer.weight return None - def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: - assert not sharded_offsets, "We don't expect any sharded offsets at this level of model hierarchy" + def sharded_state_dict( + self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () + ) -> ShardedStateDict: + assert ( + not sharded_offsets + ), "We don't expect any sharded offsets at this level of model hierarchy" sharded_state_dict = {} if self.pre_process: diff --git a/megatron/core/models/common/embeddings/language_model_embedding.py b/megatron/core/models/common/embeddings/language_model_embedding.py index 93002fcd05..3e1e2114c0 100644 --- a/megatron/core/models/common/embeddings/language_model_embedding.py +++ b/megatron/core/models/common/embeddings/language_model_embedding.py @@ -9,6 +9,7 @@ from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig + class LanguageModelEmbedding(MegatronModule): """Language model embeddings. diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index 23ea2cb426..858d03947d 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -1,7 +1,7 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. import logging -from typing import Literal, Optional, Union, Tuple +from typing import Literal, Optional, Tuple, Union import torch from torch import Tensor @@ -189,8 +189,12 @@ def forward( return loss - def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: - assert not sharded_offsets, "We don't expect any sharded offsets at this level of model hierarchy" + def sharded_state_dict( + self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () + ) -> ShardedStateDict: + assert ( + not sharded_offsets + ), "We don't expect any sharded offsets at this level of model hierarchy" sharded_state_dict = {} if self.pre_process: diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 0b6b6656aa..c61a837649 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -21,9 +21,10 @@ get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, ) -from ..dist_checkpointing.mapping import ShardedStateDict +from ..dist_checkpointing.mapping import ShardedStateDict from ..transformer.utils import make_sharded_tensors_for_checkpoint +from ..utils import make_tp_sharded_tensor_for_checkpoint from .mappings import ( copy_to_tensor_model_parallel_region, gather_from_sequence_parallel_region, @@ -34,7 +35,6 @@ ) from .random import get_cuda_rng_tracker, get_expert_parallel_rng_tracker_name from .utils import VocabUtility, divide, split_tensor_along_last_dim -from ..utils import make_tp_sharded_tensor_for_checkpoint _grad_accum_fusion_available = True try: @@ -225,7 +225,9 @@ def forward(self, input_): output = reduce_from_tensor_model_parallel_region(output_parallel) return output - def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: + def sharded_state_dict( + self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () + ) -> ShardedStateDict: """ Non-default implementation for embeddings due to `allow_shape_mismatch` param """ state_dict = self.state_dict(prefix='', keep_vars=True) @@ -235,7 +237,7 @@ def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, tensor=state_dict['weight'], key=weight_prefix, allow_shape_mismatch=True, - prepend_offsets=sharded_offsets + prepend_offsets=sharded_offsets, ) } diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index 5f36ddf6fc..8bae1d93d4 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -110,13 +110,10 @@ def sharded_state_dict(self, prefix='', sharded_offsets=()): sharded_state_dict = {} for name, module in self._modules.items(): if name == 'linear_fc1' and self.config.gated_linear_unit: - sub_sd = self._sharded_state_dict_for_glu( - name, module, prefix, sharded_offsets - ) + sub_sd = self._sharded_state_dict_for_glu(name, module, prefix, sharded_offsets) else: sub_sd = module.sharded_state_dict( - prefix=f'{prefix}{name}.', - sharded_offsets=sharded_offsets, + prefix=f'{prefix}{name}.', sharded_offsets=sharded_offsets, ) sharded_state_dict.update(sub_sd) return sharded_state_dict @@ -130,8 +127,7 @@ def _sharded_state_dict_for_glu( ): assert module_name == 'linear_fc1', module_name sharded_state_dict = module.sharded_state_dict( - prefix=f'{prefix}{module_name}.', - sharded_offsets=sharded_offsets, + prefix=f'{prefix}{module_name}.', sharded_offsets=sharded_offsets, ) weight_key = f'{prefix}{module_name}.weight' prev_sh_ten = sharded_state_dict[weight_key] diff --git a/megatron/core/transformer/module.py b/megatron/core/transformer/module.py index bfbf4e99b6..6576b69c73 100644 --- a/megatron/core/transformer/module.py +++ b/megatron/core/transformer/module.py @@ -9,8 +9,10 @@ from megatron.core import parallel_state from megatron.core.dist_checkpointing.mapping import ShardedStateDict from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.transformer.utils import make_sharded_tensors_for_checkpoint, \ - sharded_state_dict_default +from megatron.core.transformer.utils import ( + make_sharded_tensors_for_checkpoint, + sharded_state_dict_default, +) _FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor) _HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor) @@ -50,7 +52,9 @@ def state_dict_for_save_checkpoint(self, prefix: str = '', keep_vars: bool = Fal return self.state_dict(prefix=prefix, keep_vars=keep_vars) - def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: + def sharded_state_dict( + self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () + ) -> ShardedStateDict: """Default implementation for sharded state dict for distributed checkpointing. General definition of sharded_state_dict simply calls `sharded_state_dict_default` @@ -67,7 +71,9 @@ def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, """ sharded_state_dict = {} for name, module in self.named_children(): - sharded_state_dict.update(sharded_state_dict_default(module, f'{prefix}{name}.', sharded_offsets)) + sharded_state_dict.update( + sharded_state_dict_default(module, f'{prefix}{name}.', sharded_offsets) + ) return sharded_state_dict diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index b7b19227d9..7f9febc48b 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -3,7 +3,7 @@ import re from contextlib import nullcontext from dataclasses import dataclass -from typing import List, Union, Tuple +from typing import List, Tuple, Union import torch from torch import Tensor @@ -326,8 +326,12 @@ def forward( return hidden_states - def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: - assert not sharded_offsets, "We don't expect any sharded offsets at this level of model hierarchy" + def sharded_state_dict( + self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () + ) -> ShardedStateDict: + assert ( + not sharded_offsets + ), "We don't expect any sharded offsets at this level of model hierarchy" sharded_state_dict = {} layer_prefix = f'{prefix}layers.' @@ -336,19 +340,21 @@ def sharded_state_dict(self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, offset = layer._get_layer_offset() global_layer_offset = layer.layer_number - 1 # self.layer_number starts at 1 - state_dict_prefix = ( - f'{layer_prefix}{global_layer_offset - offset}.' # module list index in TransformerBlock - ) + state_dict_prefix = f'{layer_prefix}{global_layer_offset - offset}.' # module list index in TransformerBlock sharded_pp_offset = [ (0, global_layer_offset, num_layers) ] # PP sharding offset for ShardedTensors - layer_sharded_state_dict = layer.sharded_state_dict(prefix=state_dict_prefix, sharded_offsets=sharded_pp_offset) + layer_sharded_state_dict = layer.sharded_state_dict( + prefix=state_dict_prefix, sharded_offsets=sharded_pp_offset + ) replace_prefix_for_sharding(layer_sharded_state_dict, state_dict_prefix, layer_prefix) sharded_state_dict.update(layer_sharded_state_dict) # Add modules other than self.layers for name, module in self.named_children(): if not module is self.layers: - sharded_state_dict.update(sharded_state_dict_default(module, f'{prefix}{name}.', sharded_offsets)) + sharded_state_dict.update( + sharded_state_dict_default(module, f'{prefix}{name}.', sharded_offsets) + ) return sharded_state_dict diff --git a/megatron/core/transformer/utils.py b/megatron/core/transformer/utils.py index 3416bdf611..0a4750cd90 100644 --- a/megatron/core/transformer/utils.py +++ b/megatron/core/transformer/utils.py @@ -2,13 +2,12 @@ """Utilities for transformer layers.""" from operator import itemgetter -from typing import Any, Dict, Iterable, Optional, Tuple, Union, Iterator +from typing import Any, Dict, Iterable, Iterator, Optional, Tuple, Union import torch from megatron.core import parallel_state -from megatron.core.dist_checkpointing.mapping import ShardedObject, StateDict, \ - ShardedStateDict +from megatron.core.dist_checkpointing.mapping import ShardedObject, ShardedStateDict, StateDict from megatron.core.utils import ( make_sharded_tensor_for_checkpoint, make_tp_sharded_tensor_for_checkpoint, @@ -144,7 +143,9 @@ def _get_extra_state_offsets( return extra_state_shape, extra_state_offset -def sharded_state_dict_default(module: torch.nn.Module, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = ()) -> ShardedStateDict: +def sharded_state_dict_default( + module: torch.nn.Module, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () +) -> ShardedStateDict: """Provides implementation for sharded_state_dict method for non-MegatronModules. Tries to call `module.sharded_state_dict` when possible, @@ -165,15 +166,11 @@ def sharded_state_dict_default(module: torch.nn.Module, prefix: str = '', sharde if hasattr(module, 'sharded_state_dict'): module_sharded_sd = module.sharded_state_dict( - prefix=prefix, - sharded_offsets=sharded_offsets, + prefix=prefix, sharded_offsets=sharded_offsets, ) else: module_sd = module.state_dict(prefix='', keep_vars=True) module_sharded_sd = make_sharded_tensors_for_checkpoint( - module_sd, - prefix, - {}, - sharded_offsets, + module_sd, prefix, {}, sharded_offsets, ) return module_sharded_sd From ccd5d71365da706e0027f6aa6456006383deaf92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 15 Dec 2023 17:12:09 +0100 Subject: [PATCH 040/296] Simplify interfaces format --- megatron/core/models/T5/t5_model.py | 8 ++------ megatron/core/models/gpt/gpt_model.py | 8 ++------ megatron/core/transformer/mlp.py | 4 ++-- megatron/core/transformer/module.py | 7 ++----- megatron/core/transformer/transformer_block.py | 8 ++------ megatron/core/transformer/utils.py | 2 +- 6 files changed, 11 insertions(+), 26 deletions(-) diff --git a/megatron/core/models/T5/t5_model.py b/megatron/core/models/T5/t5_model.py index 7fb8d02d28..5ad6b26dcc 100644 --- a/megatron/core/models/T5/t5_model.py +++ b/megatron/core/models/T5/t5_model.py @@ -333,12 +333,8 @@ def shared_embedding_or_output_weight(self) -> Tensor: return self.lm_head.output_layer.weight return None - def sharded_state_dict( - self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () - ) -> ShardedStateDict: - assert ( - not sharded_offsets - ), "We don't expect any sharded offsets at this level of model hierarchy" + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: + assert not sharded_offsets, "Unexpected sharded offsets" sharded_state_dict = {} if self.pre_process: diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index 858d03947d..b1b7560398 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -189,12 +189,8 @@ def forward( return loss - def sharded_state_dict( - self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () - ) -> ShardedStateDict: - assert ( - not sharded_offsets - ), "We don't expect any sharded offsets at this level of model hierarchy" + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: + assert not sharded_offsets, "Unexpected sharded offsets" sharded_state_dict = {} if self.pre_process: diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index 8bae1d93d4..00f3ead2dc 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -8,7 +8,7 @@ from megatron.core import parallel_state from megatron.core.dist_checkpointing import ShardedTensor -from megatron.core.dist_checkpointing.mapping import ShardedTensorFactory +from megatron.core.dist_checkpointing.mapping import ShardedStateDict, ShardedTensorFactory from megatron.core.fusions.fused_bias_gelu import bias_gelu_impl from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module @@ -106,7 +106,7 @@ def forward(self, hidden_states): return output, output_bias - def sharded_state_dict(self, prefix='', sharded_offsets=()): + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: sharded_state_dict = {} for name, module in self._modules.items(): if name == 'linear_fc1' and self.config.gated_linear_unit: diff --git a/megatron/core/transformer/module.py b/megatron/core/transformer/module.py index 6576b69c73..eedfa9ce26 100644 --- a/megatron/core/transformer/module.py +++ b/megatron/core/transformer/module.py @@ -63,7 +63,7 @@ def sharded_state_dict( Args: prefix (str): prefix for the state dict keys - sharded_offsets (Iterable[Tuple[int, int, int]], optional): sharding already + sharded_offsets (Tuple[Tuple[int, int, int]], optional): sharding already applied (e.g. PP related) by sup-modules. Passed along to ShardedTensor Returns: @@ -164,10 +164,7 @@ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): return self.module.state_dict_for_save_checkpoint(prefix=prefix, keep_vars=keep_vars) def sharded_state_dict(self, prefix='', *args, **kwargs): - """Retrieve state_dict from the module being wrapped. - - When using distributed checkpointing, keep_vars must always be set to True. - """ + """Retrieve sharded_state_dict from the module being wrapped.""" return self.module.sharded_state_dict(prefix, *args, **kwargs) def load_state_dict(self, state_dict, strict=True): diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index 7f9febc48b..4758a6db59 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -326,12 +326,8 @@ def forward( return hidden_states - def sharded_state_dict( - self, prefix: str = '', sharded_offsets: Tuple[Tuple[int, int, int]] = () - ) -> ShardedStateDict: - assert ( - not sharded_offsets - ), "We don't expect any sharded offsets at this level of model hierarchy" + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: + assert not sharded_offsets, "Unexpected sharded offsets" sharded_state_dict = {} layer_prefix = f'{prefix}layers.' diff --git a/megatron/core/transformer/utils.py b/megatron/core/transformer/utils.py index 0a4750cd90..5e519a4214 100644 --- a/megatron/core/transformer/utils.py +++ b/megatron/core/transformer/utils.py @@ -157,7 +157,7 @@ def sharded_state_dict_default( Args: module (torch.nn.Module): module which sharded state dict we want to obtain prefix (str): prefix for the state dict keys - sharded_offsets (Iterable[Tuple[int, int, int]], optional): sharding already + sharded_offsets (Tuple[Tuple[int, int, int]], optional): sharding already applied (e.g. PP related) by sup-modules. Passed along to ShardedTensor Returns: From 7433f3fa9c2e251597838aaabd563adcbf72ce72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 15 Dec 2023 17:19:20 +0100 Subject: [PATCH 041/296] Adjust TransformerLayer tests --- .../transformer/test_transformer_layer.py | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/tests/unit_tests/transformer/test_transformer_layer.py b/tests/unit_tests/transformer/test_transformer_layer.py index 2836e54484..be51f2cc1f 100644 --- a/tests/unit_tests/transformer/test_transformer_layer.py +++ b/tests/unit_tests/transformer/test_transformer_layer.py @@ -76,13 +76,12 @@ def test_sharded_state_dict(self, tp_pp): # Test all global shapes. Prepend num layers in front of expected shapes tensor_global_shapes = {k: v.global_shape for k, v in sharded_tensors.items()} - expected_global_shapes = {k: (transformer_config.num_layers, *v) - for k, v in get_tensor_shapes_for_tp(transformer_config, 1).items()} + expected_global_shapes = get_tensor_shapes_for_tp(transformer_config, 1) assert tensor_global_shapes == expected_global_shapes # Test ShardedTensor keys for state_dict_key, sh_ten in sharded_tensors.items(): - assert state_dict_key == f'0.{sh_ten.key}' + assert state_dict_key == sh_ten.key Utils.destroy_model_parallel() Utils.initialize_model_parallel(1, 1) @@ -91,16 +90,16 @@ def test_sharded_state_dict(self, tp_pp): def get_tensor_shapes_for_tp(transformer_config, tp_size): hs = transformer_config.hidden_size return { - '0.mlp.linear_fc1.layer_norm_weight': (hs,), - '0.mlp.linear_fc1.layer_norm_bias': (hs,), - '0.mlp.linear_fc1.weight': (hs * 4 // tp_size, hs), - '0.mlp.linear_fc1.bias': (hs * 4 // tp_size,), - '0.mlp.linear_fc2.weight': (hs, hs * 4 // tp_size), - '0.mlp.linear_fc2.bias': (hs,), - '0.self_attention.linear_proj.weight': (hs, hs // tp_size), - '0.self_attention.linear_proj.bias': (hs,), - '0.self_attention.linear_qkv.layer_norm_weight': (hs,), - '0.self_attention.linear_qkv.layer_norm_bias': (hs,), - '0.self_attention.linear_qkv.weight': (hs * 3 // tp_size, hs), - '0.self_attention.linear_qkv.bias': (hs * 3 // tp_size,), + 'mlp.linear_fc1.layer_norm_weight': (hs,), + 'mlp.linear_fc1.layer_norm_bias': (hs,), + 'mlp.linear_fc1.weight': (hs * 4 // tp_size, hs), + 'mlp.linear_fc1.bias': (hs * 4 // tp_size,), + 'mlp.linear_fc2.weight': (hs, hs * 4 // tp_size), + 'mlp.linear_fc2.bias': (hs,), + 'self_attention.linear_proj.weight': (hs, hs // tp_size), + 'self_attention.linear_proj.bias': (hs,), + 'self_attention.linear_qkv.layer_norm_weight': (hs,), + 'self_attention.linear_qkv.layer_norm_bias': (hs,), + 'self_attention.linear_qkv.weight': (hs * 3 // tp_size, hs), + 'self_attention.linear_qkv.bias': (hs * 3 // tp_size,), } From 3a135f8f4b8af979c462100d2cb5fbf903d568a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 15 Dec 2023 17:24:51 +0100 Subject: [PATCH 042/296] Adjust for Python < 3.9 --- megatron/core/dist_checkpointing/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/core/dist_checkpointing/utils.py b/megatron/core/dist_checkpointing/utils.py index 17aa8fcd5c..a5ee251e3b 100644 --- a/megatron/core/dist_checkpointing/utils.py +++ b/megatron/core/dist_checkpointing/utils.py @@ -52,7 +52,7 @@ def replace_prefix(x): if isinstance(x, (ShardedTensor, ShardedTensorFactory, ShardedObject)): if not x.key.startswith(old_prefix): raise ValueError(f'Expected {x.key} to begin with prefix {old_prefix}') - x.key = f'{new_prefix}{x.key.removeprefix(old_prefix)}' + x.key = f'{new_prefix}{x.key[len(old_prefix):]}' # str.removeprefix in Python >= 3.9 return x dict_list_map_inplace(replace_prefix, sharded_state_dict) From 32add31787dfd0a047eb4e5bb9c5ad0034a0675f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 15 Dec 2023 18:10:36 +0100 Subject: [PATCH 043/296] Revert "Improve GPT unit test" This reverts commit 4ea6c55fff8994f62c17b0cbea12446d7fe548c4. --- .../models/test_gpt_model.py | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py index efe5361630..a910fec52a 100644 --- a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py +++ b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py @@ -71,7 +71,6 @@ def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp, dest_ Utils.initialize_model_parallel(*src_tp_pp) gpt_model_A = initialize_gpt_model(1) save(gpt_model_A.sharded_state_dict(), ckpt_dir_A) - regular_state_dict_A = gpt_model_A.state_dict() Utils.destroy_model_parallel() # Load checkpoint A with different TP/PP and save as checkpoint B @@ -80,25 +79,14 @@ def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp, dest_ state_dict = load(gpt_model_B.sharded_state_dict(), ckpt_dir_A) gpt_model_B.load_state_dict(state_dict) save(gpt_model_B.sharded_state_dict(), ckpt_dir_B) - regular_state_dict_B = gpt_model_A.state_dict() Utils.destroy_model_parallel() # Test both checkpoints are equal Utils.initialize_model_parallel(1, 1) - plain_state_dict_A = load_plain_tensors(ckpt_dir_A) - plain_state_dict_B = load_plain_tensors(ckpt_dir_B) - diffs = diff(plain_state_dict_A, plain_state_dict_B) - assert not any(map(bool, diffs)), diffs - - # Test both regular state dicts are equal, turning FP8 states to bytes first - regular_state_dict_A = {k: v.read() if k.endswith('_extra_state') else v - for k, v in regular_state_dict_A.items()} - regular_state_dict_B = {k: v.read() if k.endswith('_extra_state') else v - for k, v in regular_state_dict_B.items()} - diffs = diff(regular_state_dict_A, regular_state_dict_B) + state_dict_A = load_plain_tensors(ckpt_dir_A) + state_dict_B = load_plain_tensors(ckpt_dir_B) + diffs = diff(state_dict_A, state_dict_B) assert not any(map(bool, diffs)), diffs - Utils.destroy_model_parallel() - def test_state_dict_comparison(self, tmp_path_dist_ckpt): Utils.initialize_model_parallel(2, 4) From 204661cf16f8cc7f862bdd73f835e96c2ec4a3fc Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Sun, 17 Dec 2023 20:52:45 -0800 Subject: [PATCH 044/296] Offload everything except the weights fix Signed-off-by: Selvaraj Anandaraj --- megatron/core/cpu_offload.py | 28 +------------------ .../custom_layers/transformer_engine.py | 2 ++ .../core/transformer/transformer_config.py | 4 +-- 3 files changed, 5 insertions(+), 29 deletions(-) diff --git a/megatron/core/cpu_offload.py b/megatron/core/cpu_offload.py index 8fcc3bc219..96999ddadf 100644 --- a/megatron/core/cpu_offload.py +++ b/megatron/core/cpu_offload.py @@ -86,8 +86,6 @@ def on_save_for_backward(self, tensor: torch.Tensor) -> Any: tensor, **self.handler_extra_kwargs ) - if self.debug: - logging.info(f"On save tensor shape {tensor.shape} parameter {type(tensor)}, offload_handler returns identifier {retrieve_identifier}") return retrieve_identifier def on_get_saved_tensor(self, retrieve_identifier: Any) -> torch.Tensor: @@ -95,8 +93,6 @@ def on_get_saved_tensor(self, retrieve_identifier: Any) -> torch.Tensor: retrieve_identifier, **self.handler_extra_kwargs ) - if self.debug: - logging.info(f"On get tensor, from identifier {retrieve_identifier} get tensor shape {tensor.shape}") return tensor class OffloadHandler: @@ -157,9 +153,6 @@ def groupid_reset(self): self.tensor_tag_to_state = dict() def on_group_commit_forward(self): - if self.debug: - logging.info(f"on_group_commit_forward current_group: {self.current_group}") - # finishing up with updating current group and tensor count self.current_group += 1 # increment self.tensor_count_current_group = 0 # reset @@ -168,9 +161,6 @@ def on_group_commit_backward(self): self.current_group -= 1 assert self.current_group >= 0 - if self.debug: - logging.info(f"on_group_commit_backward current_group: {self.current_group}") - @staticmethod def offload(src_tensor, pin_memory=True): cpu_backup = torch.empty(src_tensor.size(), @@ -192,9 +182,6 @@ def reload(state, non_blocking=None): def tensor_push(self, tensor: torch.Tensor, **kwargs): # obtain a unique tensor tag tensor_tag = (self.current_group, self.tensor_count_current_group) - if self.debug: - logging.info("tensor_push", tensor_tag, tensor.shape, type(tensor), - "need_offloading ?", self.tensor_need_offloading_checker(tensor)) self.tensor_count_current_group += 1 assert not (tensor_tag in self.tensor_tag_to_state) if self.current_group < self.num_offload_group and self.tensor_need_offloading_checker(tensor): @@ -206,8 +193,6 @@ def tensor_push(self, tensor: torch.Tensor, **kwargs): def tensor_pop(self, tensor_tag, **kwargs): assert tensor_tag in self.tensor_tag_to_state - if self.debug: - logging.info("tensor_pop", tensor_tag) state = self.tensor_tag_to_state.pop(tensor_tag) if isinstance(state, tuple): tensor = SynchronizedGroupOffloadHandler.reload(state) @@ -262,8 +247,6 @@ def get_tensor_buf_for_offloaded_tensor(self, tensor, tensor_tag): if allocate_new_buf: # supposed to only execute once - if self.debug: - logging.info(f"Allocating tensor_buf for group {group_id} tensor {tensor_id} size {tensor.size()}") id_buf_map[tensor_id] = torch.empty(tensor.size(), dtype=tensor.dtype, layout=tensor.layout, @@ -274,8 +257,6 @@ def get_tensor_buf_for_offloaded_tensor(self, tensor, tensor_tag): def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any: # obtain a unique tensor tag tensor_tag = (self.current_group, self.tensor_count_current_group) - if self.debug: - logging.info("tensor_push", tensor_tag, tensor.shape, type(tensor), "need_offloading ?", self.tensor_need_offloading_checker(tensor)) self.tensor_count_current_group += 1 assert not (tensor_tag in self.tensor_tag_to_state) @@ -291,8 +272,6 @@ def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any: def tensor_pop(self, tensor_tag, **kwargs): assert tensor_tag in self.tensor_tag_to_state - if self.debug: - logging.info("tensor_pop", tensor_tag) tensor = self.tensor_tag_to_state.pop(tensor_tag) # the tensor should have been copied back in on_group_commit_backward() which invokes bulk_reload_group assert not isinstance(tensor, tuple) @@ -364,17 +343,12 @@ def on_group_commit_backward(self): self.current_group -= 1 assert self.current_group >= 0 - if self.debug: - logging.info(f"on_group_commit_backward current_group: {self.current_group}") - # decide the range of group to prefetch should_prefetch_until_group = self.current_group - self.num_prefetch_group if should_prefetch_until_group < 0: should_prefetch_until_group = 0 # do prefetch - if self.debug: - logging.info(f"num_prefetch_group = {self.num_prefetch_group} num_offload_group = {self.num_offload_group} fetch from {self.next_group_to_fetch} to {should_prefetch_until_group}") for group_num_to_prefetch in range(self.next_group_to_fetch, should_prefetch_until_group - 1, -1): # record the event in the compute stream, for h2d to wait torch.cuda.current_stream().record_event(self.compute_stream_bwd_start_events[group_num_to_prefetch]) @@ -397,7 +371,7 @@ def on_group_commit_backward(self): def get_cpu_offload_context(cpu_offloading, cpu_offloading_num_layers): def tensor_need_offloading_checker(tensor): - return (not isinstance(tensor, torch.nn.Parameter)) + return not hasattr(tensor,"avoid_offloading") cpu_offload_handler = AsyncDoubleBufferGroupOffloadHandler( num_offload_group=cpu_offloading_num_layers, diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index 8154ba6012..e02bee5cbd 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -122,6 +122,7 @@ def __init__( out_features=output_size, sequence_parallel=self.config.sequence_parallel, fuse_wgrad_accumulation=self.config.gradient_accumulation_fusion, + cpu_offloading=self.config.cpu_offloading, tp_group=get_tensor_model_parallel_group(check_initialized=False), tp_size=self.config.tensor_model_parallel_size, get_rng_state_tracker=get_cuda_rng_tracker, @@ -211,6 +212,7 @@ def __init__( eps=self.config.layernorm_epsilon, sequence_parallel=self.config.sequence_parallel, fuse_wgrad_accumulation=self.config.gradient_accumulation_fusion, + cpu_offloading=self.config.cpu_offloading, tp_group=get_tensor_model_parallel_group(check_initialized=False), tp_size=self.config.tensor_model_parallel_size, get_rng_state_tracker=get_cuda_rng_tracker, diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index e55e8d7ab9..d89dcfa25b 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -107,8 +107,8 @@ class TransformerConfig(ModelParallelConfig): fp8_wgrad: bool = True # cpu offload - cpu_offloading: bool = True - cpu_offloading_num_layers: int = 1 + cpu_offloading: bool = False + cpu_offloading_num_layers: int = 0 # miscellaneous clone_scatter_output_in_embedding: bool = True From 8f3fe522ecc00f7624fc67ab32dd873aeb4be095 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Sun, 17 Dec 2023 21:01:21 -0800 Subject: [PATCH 045/296] Added comments about offloading configuration variables Signed-off-by: Selvaraj Anandaraj --- megatron/core/transformer/transformer_config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index d89dcfa25b..df3398d29a 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -51,6 +51,8 @@ class TransformerConfig(ModelParallelConfig): fp8_amax_history_len (int): The length of the amax history window used for scaling factor computation. fp8_amax_compute_algo (str): Algorithm used for choosing the `amax` value for the scaling factor computation. There are 2 predefined choices: `max` chooses the largest `amax` in the history window, while `most_recent` always chooses the most recently seen value. fp8_wgrad (bool): When set to False, override FP8 config options and do the wgrad computation in higher precision. Defaults to True. + cpu_offloading (bool): When set to True, all the activations are offloaded to the CPU asynchronously + cpu_offloading_num_layers (int): Tells the number of transformer layers for which activations has to be offloaded. clone_scatter_output_in_embedding (bool): When set to true, clone the output of scatter_to_sequence_parallel_region in embedding layer to facilitate garbage collection of input. normalization (str): Swtich b/w `LayerNorm` and `RMSNorm` as normalization layers. For now, these are primarily used by Transformer-Engine's layers like `LayerNormLinear`. Default value is `LayerNorm`. """ From a8f61bd5ad261dfcaf210c73de182424d0d59580 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Mon, 18 Dec 2023 00:47:44 -0800 Subject: [PATCH 046/296] Need a switch to enable atomic GEMM from NeMo level Signed-off-by: Selvaraj Anandaraj --- megatron/core/model_parallel_config.py | 7 ++++++- .../core/transformer/custom_layers/transformer_engine.py | 3 +++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/megatron/core/model_parallel_config.py b/megatron/core/model_parallel_config.py index 22d34da921..44c97fe8f8 100644 --- a/megatron/core/model_parallel_config.py +++ b/megatron/core/model_parallel_config.py @@ -70,9 +70,12 @@ class ModelParallelConfig: tp_comm_split_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM. Don't care if tp_comm_overlap is False. - + tp_comm_atomic_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM. Don't care if tp_comm_overlap + is False. tp_comm_split_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM. Don't care if tp_comm_overlap is False. + tp_comm_atomic_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM. Don't care if + tp_comm_overlap is False. tp_comm_bulk_dgrad (bool, default=True): If true, allows All-Gather overlap with Bprop activation gradient GEMM. Don't care if tp_comm_overlap is False. @@ -168,7 +171,9 @@ class ModelParallelConfig: # Debug Options tp_comm_split_ag: bool = True + tp_comm_atomic_ag: bool = True tp_comm_split_rs: bool = True + tp_comm_atomic_rs: bool = True tp_comm_bulk_wgrad: bool = True tp_comm_bulk_dgrad: bool = True diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index 8154ba6012..b688f80c65 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -110,7 +110,9 @@ def __init__( if te_version >= packaging.version.Version("0.8.0"): if self.config.tp_comm_overlap: extra_kwargs["ub_split_ag"] = self.config.tp_comm_split_ag + extra_kwargs["ub_atomic_gemm_ag"] = self.config.tp_comm_atomic_ag extra_kwargs["ub_split_rs"] = self.config.tp_comm_split_rs + extra_kwargs["ub_atomic_gemm_rs"] = self.config.tp_comm_atomic_rs if te_version > packaging.version.Version("1.0.0"): assert ( tp_comm_buffer_name is not None @@ -198,6 +200,7 @@ def __init__( if self.config.tp_comm_overlap: extra_kwargs["ub_bulk_wgrad"] = self.config.tp_comm_bulk_wgrad extra_kwargs["ub_bulk_dgrad"] = self.config.tp_comm_bulk_dgrad + extra_kwargs["ub_atomic_gemm_ag"] = self.config.tp_comm_atomic_ag extra_kwargs["ub_split_ag"] = self.config.tp_comm_split_ag if te_version > packaging.version.Version("1.0.0"): assert ( From 43d99ceafb1d31ec282301670bd42327c977ae1a Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Mon, 18 Dec 2023 14:01:32 -0800 Subject: [PATCH 047/296] MR cleanup requirements Signed-off-by: Selvaraj Anandaraj --- .../blended_megatron_dataset_config.py | 15 ----- megatron/core/datasets/gpt_dataset.py | 67 ++++++++++++++----- 2 files changed, 50 insertions(+), 32 deletions(-) diff --git a/megatron/core/datasets/blended_megatron_dataset_config.py b/megatron/core/datasets/blended_megatron_dataset_config.py index 5335c93db9..9f8344e791 100644 --- a/megatron/core/datasets/blended_megatron_dataset_config.py +++ b/megatron/core/datasets/blended_megatron_dataset_config.py @@ -89,21 +89,6 @@ def __post_init__(self): log_single_rank(logger, logging.INFO, f"Let split_matrix = {self.split_matrix}") -@dataclass -class GPTDatasetConfig(BlendedMegatronDatasetConfig): - """Configuration object for megatron-core blended and megatron GPT datasets - - Attributes: - return_document_ids (bool): Whether to return the document ids when querying the dataset. - """ - - return_document_ids: bool = False - reset_position_ids: bool = False - reset_attention_mask: bool = False - eod_mask_loss: bool = False - eod_id: int = 0 - - def parse_and_normalize_split(split: str) -> List[float]: """Parse the dataset split ratios from a string diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index a141e8c2ba..3b7357df71 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -4,7 +4,7 @@ import os import time from dataclasses import dataclass -from typing import Dict, Tuple +from typing import Dict, Tuple, Union import numpy import torch @@ -20,9 +20,25 @@ @dataclass class GPTDatasetConfig(BlendedMegatronDatasetConfig): """Configuration object for Megatron Core GPT datasets + + Attributes: + return_document_ids (bool): Whether to return the document ids when querying the dataset. + + reset_position_ids (bool): Option to reset the position IDs in the dataset at an interval + + reset_attention_mask (bool): Option to reset the attention mask from the dataset + + eod_mask_loss (bool): Option to enable the EOD mask loss + + eod_id (int): Has the identity of the end of document + """ - pass + return_document_ids: bool = False + reset_position_ids: bool = False + reset_attention_mask: bool = False + eod_mask_loss: bool = False + eod_id: int = 0 class GPTDataset(MegatronDataset): @@ -72,7 +88,7 @@ def __len__(self) -> int: """ return self.sample_index.shape[0] - 1 - def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: + def __getitem__(self, idx: int) -> Dict[str, Union[torch.Tensor, numpy.ndarray]]: """Abstract method implementation Args: @@ -91,15 +107,12 @@ def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: attention_mask, loss_mask, position_ids = _get_ltor_masks_and_position_ids( tokens, - getattr(self.config,"eod_id"), - getattr(self.config,"reset_position_ids"), - getattr(self.config,"reset_attention_mask"), - getattr(self.config,"eod_mask_loss")) + self.config.eod_id, + self.config.reset_position_ids, + self.config.reset_attention_mask, + self.config.eod_mask_loss) - if getattr(self.config, "return_document_ids"): - return {"tokens": tokens,"labels": labels,"attention_mask": attention_mask,"loss_mask": loss_mask,"position_ids": position_ids} - else: - return {"tokens": tokens,"labels": labels,"attention_mask": attention_mask,"loss_mask": loss_mask,"position_ids": position_ids} + return {"tokens": tokens,"labels": labels,"attention_mask": attention_mask,"loss_mask": loss_mask,"position_ids": position_ids} @staticmethod def is_multimodal() -> bool: @@ -474,12 +487,32 @@ def _build_shuffle_index( return numpy.concatenate((shuffle_idx_first, shuffle_idx_last)) -def _get_ltor_masks_and_position_ids(data, - eod_token, - reset_position_ids, - reset_attention_mask, - eod_mask_loss): - """Build masks and position id for left to right model.""" +def _get_ltor_masks_and_position_ids(data: torch.Tensor, + eod_token: int, + reset_position_ids: bool, + reset_attention_mask: bool, + eod_mask_loss: bool): + """Build masks and position id for left to right model. + + Args: + data (torch.Tensor): The data tenor that holds the tokens from the dataset + + eod_token (int): ID of the token to that is considered the EOD + + reset_position_ids (bool): Switch to reset the document position ID's + + reset_attention_mask (bool): Switch to reset the attention mask + + eod_mask_loss (bool): Switch to enable the EOD mask loss + + Returns: + attention_mask (torch.Tensor) : Attention mask needed to be used for Attention + + loss_mask (torch.Tensor) : The mask used for loss value during training + + position_ids (torch.Tensor) : The position ID's of the token + + """ # Extract batch size and sequence length. seq_length = data.numel() From 3ada5124d66f6c6d768489e55dbf358619a0de8a Mon Sep 17 00:00:00 2001 From: Xiaowei Ren Date: Mon, 18 Dec 2023 14:26:36 -0800 Subject: [PATCH 048/296] add a functional test of TP2CP2PP2 Signed-off-by: Xiaowei Ren --- .gitlab-ci.yml | 17 +++++++++++++++++ ...ps_core_enabled_context_parallelism_cp2.json | 1 + 2 files changed, 18 insertions(+) create mode 100644 tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c7401cd84e..5c7613a9aa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -500,6 +500,23 @@ train.gpt3_core.345m_cp2_tp2_pp1_1node_50steps: PYTORCH_IMAGE: "/lustre/fsw/adlr/adlr-nlp/adlr_ci/megatron/pytorch_23.10_flash_attn_1.0.9_context_parallelism.sqsh" ADDITIONAL_PARAMS: "--context-parallel-size 2 --sequence-parallel --hidden-dropout 0.0 --attention-dropout 0.0" +train.gpt3_core.345m_cp2_tp2_pp2_1node_50steps: + <<: *selene-test-launcher + variables: + <<: [*VARS] + RUN_MODEL: gpt3 + USE_TE: 0 + TP_SIZE: 2 + PP_SIZE: 2 + NUM_NODES: 1 + MAX_STEPS: 50 + USE_CORE: 1 + TIME_LIMIT: "20:00" + TEST_LEVEL: MR_TESTS + METADATA: "context_parallelism_cp2" + PYTORCH_IMAGE: "/lustre/fsw/adlr/adlr-nlp/adlr_ci/megatron/pytorch_23.10_flash_attn_1.0.9_context_parallelism.sqsh" + ADDITIONAL_PARAMS: "--context-parallel-size 2 --sequence-parallel --hidden-dropout 0.0 --attention-dropout 0.0" + # Note: Core MoE models currently will run TE by default train.te_core_moe_gpt3.345m_tp2_pp2_2experts_1node_50steps: <<: *selene-test-launcher diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json new file mode 100644 index 0000000000..04072985be --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.88757, 10.90849, 10.88103, 10.84524, 10.69287, 10.60192, 10.09546, 10.1824, 10.08766, 9.76749]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [584.0, 665.0, 694.0, 650.0, 684.0, 646.0, 569.0, 699.0, 804.0, 792.0]}, "iteration_timing_avg": 0.3032499999999999} From 94b9a07686d0875e69a0f9c764c0ac8470a525d1 Mon Sep 17 00:00:00 2001 From: Xiaowei Ren Date: Mon, 18 Dec 2023 15:27:08 -0800 Subject: [PATCH 049/296] fix golden state test results of TP2CP2PP2 Signed-off-by: Xiaowei Ren --- ...pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json index 04072985be..8aaab492e2 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_context_parallelism_cp2.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.88757, 10.90849, 10.88103, 10.84524, 10.69287, 10.60192, 10.09546, 10.1824, 10.08766, 9.76749]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [584.0, 665.0, 694.0, 650.0, 684.0, 646.0, 569.0, 699.0, 804.0, 792.0]}, "iteration_timing_avg": 0.3032499999999999} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.93293, 10.93657, 10.88786, 10.86127, 10.71506, 10.61068, 10.06701, 10.17618, 10.07536, 9.74958]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [599.0, 655.0, 664.0, 679.0, 596.0, 643.0, 577.0, 776.0, 817.0, 805.0]}, "iteration_timing_avg": 0.3355429411764707} From f489587190c10a524c79e9e1a4b0be8f78985136 Mon Sep 17 00:00:00 2001 From: Kirthi Shankar Sivamani Date: Tue, 19 Dec 2023 09:38:20 +0000 Subject: [PATCH 050/296] Add arg for memory_efficient Signed-off-by: Kirthi Shankar Sivamani --- megatron/model/fused_layer_norm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/model/fused_layer_norm.py b/megatron/model/fused_layer_norm.py index c91a674e8c..bcb7bd7ecd 100644 --- a/megatron/model/fused_layer_norm.py +++ b/megatron/model/fused_layer_norm.py @@ -81,7 +81,7 @@ def forward(self, input): if self.no_persist_layer_norm: assert FusedLayerNormAffineFunction is not None, \ "FusedLayerNormAffineFunction is not available, please install apex from https://github.com/NVIDIA/apex" - return FusedLayerNormAffineFunction.apply(input, weight, self.bias, self.normalized_shape, self.eps) + return FusedLayerNormAffineFunction.apply(input, weight, self.bias, self.normalized_shape, self.eps, False) else: output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) From b35d061efc6e88dc656c42ba328728679c2e3e02 Mon Sep 17 00:00:00 2001 From: Kirthi Shankar Sivamani Date: Tue, 19 Dec 2023 03:28:28 -0800 Subject: [PATCH 051/296] Fix TE usage for 1.* versions Signed-off-by: Kirthi Shankar Sivamani --- megatron/model/transformer.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py index 1b4011eebc..b74636a755 100644 --- a/megatron/model/transformer.py +++ b/megatron/model/transformer.py @@ -2,6 +2,7 @@ """Transformer.""" from contextlib import nullcontext +import os import math import numpy as np import torch @@ -1497,6 +1498,10 @@ def build_layer(layer_number): extra_transformer_engine_kwargs["activation"] = "swiglu" if args.swiglu else "gelu" if self.transformer_engine_v_0_11: extra_transformer_engine_kwargs["normalization"] = args.normalization + assert config.attention_softmax_in_fp32, "TransformerEngine only supports softmax compute in FP32." + assert ( + bool(int(os.getenv("NVTE_APPLY_QK_LAYER_SCALING", "0"))) and args.fp16 == config.apply_query_key_layer_scaling + ), "Unsupported config for apply_query_key_layer_scaling in TransformerEngine." return transformer_engine.pytorch.TransformerLayer( config.hidden_size, config.ffn_hidden_size, @@ -1512,8 +1517,6 @@ def build_layer(layer_number): tp_group=mpu.get_tensor_model_parallel_group(), get_rng_state_tracker=tensor_parallel.get_cuda_rng_tracker, fuse_wgrad_accumulation=config.gradient_accumulation_fusion, - apply_query_key_layer_scaling=config.apply_query_key_layer_scaling, - attention_softmax_in_fp32=config.attention_softmax_in_fp32, seq_length=args.seq_length, micro_batch_size=args.micro_batch_size, sequence_parallel=config.sequence_parallel, From 26d1c04d10c11b256c871608714bbbfdc6e71ea6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Tue, 19 Dec 2023 14:14:01 +0100 Subject: [PATCH 052/296] Revert "Revert "Improve GPT unit test"" This reverts commit 32add31787dfd0a047eb4e5bb9c5ad0034a0675f. --- .../models/test_gpt_model.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py index a910fec52a..efe5361630 100644 --- a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py +++ b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py @@ -71,6 +71,7 @@ def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp, dest_ Utils.initialize_model_parallel(*src_tp_pp) gpt_model_A = initialize_gpt_model(1) save(gpt_model_A.sharded_state_dict(), ckpt_dir_A) + regular_state_dict_A = gpt_model_A.state_dict() Utils.destroy_model_parallel() # Load checkpoint A with different TP/PP and save as checkpoint B @@ -79,14 +80,25 @@ def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp, dest_ state_dict = load(gpt_model_B.sharded_state_dict(), ckpt_dir_A) gpt_model_B.load_state_dict(state_dict) save(gpt_model_B.sharded_state_dict(), ckpt_dir_B) + regular_state_dict_B = gpt_model_A.state_dict() Utils.destroy_model_parallel() # Test both checkpoints are equal Utils.initialize_model_parallel(1, 1) - state_dict_A = load_plain_tensors(ckpt_dir_A) - state_dict_B = load_plain_tensors(ckpt_dir_B) - diffs = diff(state_dict_A, state_dict_B) + plain_state_dict_A = load_plain_tensors(ckpt_dir_A) + plain_state_dict_B = load_plain_tensors(ckpt_dir_B) + diffs = diff(plain_state_dict_A, plain_state_dict_B) + assert not any(map(bool, diffs)), diffs + + # Test both regular state dicts are equal, turning FP8 states to bytes first + regular_state_dict_A = {k: v.read() if k.endswith('_extra_state') else v + for k, v in regular_state_dict_A.items()} + regular_state_dict_B = {k: v.read() if k.endswith('_extra_state') else v + for k, v in regular_state_dict_B.items()} + diffs = diff(regular_state_dict_A, regular_state_dict_B) assert not any(map(bool, diffs)), diffs + Utils.destroy_model_parallel() + def test_state_dict_comparison(self, tmp_path_dist_ckpt): Utils.initialize_model_parallel(2, 4) From f9ea6636e337bcdd6bb8fee4bf8eba472afdf6e6 Mon Sep 17 00:00:00 2001 From: Kirthi Shankar Sivamani Date: Tue, 19 Dec 2023 05:32:07 -0800 Subject: [PATCH 053/296] Fix Signed-off-by: Kirthi Shankar Sivamani --- megatron/model/transformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py index b74636a755..676e47dc78 100644 --- a/megatron/model/transformer.py +++ b/megatron/model/transformer.py @@ -1500,7 +1500,7 @@ def build_layer(layer_number): extra_transformer_engine_kwargs["normalization"] = args.normalization assert config.attention_softmax_in_fp32, "TransformerEngine only supports softmax compute in FP32." assert ( - bool(int(os.getenv("NVTE_APPLY_QK_LAYER_SCALING", "0"))) and args.fp16 == config.apply_query_key_layer_scaling + (bool(int(os.getenv("NVTE_APPLY_QK_LAYER_SCALING", "0"))) and args.fp16) == config.apply_query_key_layer_scaling ), "Unsupported config for apply_query_key_layer_scaling in TransformerEngine." return transformer_engine.pytorch.TransformerLayer( config.hidden_size, From efbfb5f05eaa44f7f493e0b11b0db2ee1e7dae0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Tue, 19 Dec 2023 15:29:05 +0100 Subject: [PATCH 054/296] Implement TE vs local compatibility --- megatron/core/dist_checkpointing/utils.py | 40 +++++++++++++++-- megatron/core/models/gpt/gpt_layer_specs.py | 4 ++ megatron/core/tensor_parallel/layers.py | 27 +++++++++++- .../core/transformer/transformer_layer.py | 18 +++++++- .../models/test_gpt_model.py | 44 ++++++++++--------- 5 files changed, 107 insertions(+), 26 deletions(-) diff --git a/megatron/core/dist_checkpointing/utils.py b/megatron/core/dist_checkpointing/utils.py index a5ee251e3b..651a83a2d8 100644 --- a/megatron/core/dist_checkpointing/utils.py +++ b/megatron/core/dist_checkpointing/utils.py @@ -1,6 +1,6 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. -from typing import Tuple +from typing import Tuple, Dict from .dict_utils import dict_list_map_inplace, extract_matching_values from .mapping import ( @@ -48,11 +48,45 @@ def add_prefix(t): def replace_prefix_for_sharding( sharded_state_dict: ShardedStateDict, old_prefix: str, new_prefix: str ): - def replace_prefix(x): + """ Replaces the given prefix in *all* sharded keys in a given state dict. + + Errors out if some key does not begin with a given prefix. + + Args: + sharded_state_dict (ShardedStateDict): sharded state dict to replace keys in + old_prefix (str): prefix to be replaced in each key + new_prefix (str): new prefix + + Returns: + None: state dict is modified in place + """ + def _replace_prefix(x): if isinstance(x, (ShardedTensor, ShardedTensorFactory, ShardedObject)): if not x.key.startswith(old_prefix): raise ValueError(f'Expected {x.key} to begin with prefix {old_prefix}') x.key = f'{new_prefix}{x.key[len(old_prefix):]}' # str.removeprefix in Python >= 3.9 return x - dict_list_map_inplace(replace_prefix, sharded_state_dict) + dict_list_map_inplace(_replace_prefix, sharded_state_dict) + + +def apply_prefix_mapping(sharded_state_dict: ShardedStateDict, prefix_map: Dict[str, str]): + """ Replaces prefixes *only in keys matching* with one of prefixes in the map. + + Args: + sharded_state_dict (ShardedStateDict): sharded state dict to replace keys in + prefix_map (Dict[str, str]): map of old->new prefixes. The first matching prefix for each key is used + + Returns: + None: state dict is modified in place + """ + def _replace_prefixes(x): + if not isinstance(x, (ShardedTensor, ShardedTensorFactory, ShardedObject)): + return x + for old_prefix, new_prefix in prefix_map.items(): + if x.key.startswith(old_prefix): + x.key = f'{new_prefix}{x.key[len(old_prefix):]}' # str.removeprefix in Python >= 3.9 + break + return x + + dict_list_map_inplace(_replace_prefixes, sharded_state_dict) diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index aace1590d8..1e536b668d 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -67,6 +67,10 @@ def get_gpt_layer_local_spec() -> ModuleSpec: ), ), mlp_bda=get_bias_dropout_add, + sharded_state_dict_keys_map={ + 'input_layernorm.': 'self_attention.linear_qkv.layer_norm_', + 'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_', + } ), ) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index c61a837649..249ec2666d 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -3,10 +3,11 @@ # Parts of the code here are adapted from PyTorch # repo: https://github.com/pytorch/pytorch +import io import math import os import warnings -from typing import Callable, Optional, Tuple +from typing import Any, Callable, Optional, Tuple import torch import torch.nn.functional as F @@ -710,6 +711,9 @@ def __init__( self.sequence_parallel or self.expert_parallel ) + # Hook adding a default empty _extra_state for state dict + self._register_load_state_dict_pre_hook(lambda state_dict, prefix, *args, **kwargs: state_dict.setdefault(f'{prefix}_extra_state')) + def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None): """Forward of ColumnParallelLinear @@ -782,6 +786,15 @@ def sharded_state_dict(self, prefix='', sharded_offsets=()): state_dict, prefix, {'weight': 0, 'bias': 0}, sharded_offsets ) + def set_extra_state(self, state: Any): + """ Extra state is ignored """ + + def get_extra_state(self) -> Any: + """ Keep compatibility with TE state dict. """ + state_serialized = io.BytesIO() + torch.save(None, state_serialized) + return state_serialized + class RowParallelLinear(torch.nn.Module): """Linear layer with row parallelism. @@ -904,6 +917,9 @@ def __init__( self.sequence_parallel or self.expert_parallel ) + # Hook adding a default empty _extra_state for state dict + self._register_load_state_dict_pre_hook(lambda state_dict, *args, **kwargs: print('%' * 100) or state_dict.setdefault('_extra_state')) + def forward(self, input_): """Forward of RowParallelLinear @@ -956,3 +972,12 @@ def sharded_state_dict(self, prefix='', sharded_offsets=()): return make_sharded_tensors_for_checkpoint( state_dict, prefix, {'weight': 1}, sharded_offsets ) + + def set_extra_state(self, state: Any): + """ Extra state is ignored """ + + def get_extra_state(self) -> Any: + """ Keep compatibility with TE state dict. """ + state_serialized = io.BytesIO() + torch.save(None, state_serialized) + return state_serialized diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index 8814b8c32c..4d6bae9c74 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -1,11 +1,13 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -from dataclasses import dataclass -from typing import Union +from dataclasses import dataclass, field +from typing import Union, Dict import torch from megatron.core import parallel_state +from megatron.core.dist_checkpointing.mapping import ShardedStateDict +from megatron.core.dist_checkpointing.utils import apply_prefix_mapping from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp from megatron.core.transformer.module import MegatronModule @@ -28,6 +30,9 @@ class TransformerLayerSubmodules: mlp: Union[ModuleSpec, type] = IdentityOp mlp_bda: Union[ModuleSpec, type] = IdentityFuncOp + # Mapping for sharded tensor keys to be applied in `sharded_state_dict` method + sharded_state_dict_keys_map: Dict[str, str] = field(default_factory=dict) + class TransformerLayer(MegatronModule): """A single transformer layer. @@ -44,6 +49,7 @@ def __init__( hidden_dropout: float = None, ): super().__init__(config=config) + self.submodules_config = submodules self.layer_number = layer_number + self._get_layer_offset() self.hidden_dropout = config.hidden_dropout if hidden_dropout is None else hidden_dropout @@ -214,3 +220,11 @@ def forward( ) return output, context + + def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: + sharded_state_dict = super().sharded_state_dict(prefix, sharded_offsets) + prefixed_map = {f'{prefix}{k}': f'{prefix}{v}' + for k, v in self.submodules_config.sharded_state_dict_keys_map.items()} + if prefixed_map: + apply_prefix_mapping(sharded_state_dict, prefixed_map) + return sharded_state_dict diff --git a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py index efe5361630..e429454914 100644 --- a/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py +++ b/tests/unit_tests/dist_checkpointing/models/test_gpt_model.py @@ -14,11 +14,11 @@ from tests.unit_tests.test_utilities import Utils from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.models.gpt.gpt_layer_specs import \ - get_gpt_layer_with_transformer_engine_spec, get_gpt_layer_local_spec, \ + get_gpt_layer_with_transformer_engine_spec as gpt_te_spec, get_gpt_layer_local_spec as gpt_local_spec, \ gpt_layer_with_transformer_engine_spec_moe, gpt_layer_local_spec_moe -def initialize_gpt_model(seed, layer_spec_fn=get_gpt_layer_with_transformer_engine_spec, **config_kwargs): +def initialize_gpt_model(seed, layer_spec_fn=gpt_te_spec, **config_kwargs): torch.manual_seed(seed) model_parallel_cuda_manual_seed(seed) @@ -37,19 +37,19 @@ def initialize_gpt_model(seed, layer_spec_fn=get_gpt_layer_with_transformer_engi class TestGPTModel: - @pytest.mark.parametrize('layer_spec_fn', [ - get_gpt_layer_with_transformer_engine_spec, - get_gpt_layer_local_spec, - ]) - def test_sharded_state_dict_save_load(self, layer_spec_fn, tmp_path_dist_ckpt): + @pytest.mark.parametrize('src_layer_spec_fn', [gpt_te_spec, gpt_local_spec]) + @pytest.mark.parametrize('dst_layer_spec_fn', [gpt_te_spec, gpt_local_spec]) + def test_sharded_state_dict_save_load(self, tmp_path_dist_ckpt, + src_layer_spec_fn, dst_layer_spec_fn): Utils.initialize_model_parallel(2,4) - gpt_model = initialize_gpt_model(1, layer_spec_fn) + gpt_model = initialize_gpt_model(1, src_layer_spec_fn) with TempNamedDir(tmp_path_dist_ckpt / 'test_gpt_model') as ckpt_dir: # Save sharded_state_dict = gpt_model.sharded_state_dict() save(sharded_state_dict, ckpt_dir) # Load + gpt_model = initialize_gpt_model(2, dst_layer_spec_fn) sharded_state_dict = gpt_model.sharded_state_dict() state_dict = load(sharded_state_dict, ckpt_dir) gpt_model.load_state_dict(state_dict) @@ -57,26 +57,30 @@ def test_sharded_state_dict_save_load(self, layer_spec_fn, tmp_path_dist_ckpt): class TestGPTModelReconfiguration: - @pytest.mark.parametrize("src_tp_pp,dest_tp_pp", [ - ((2, 4), (4, 2)), - ((1, 8), (8, 1)), - ((2, 1), (1, 8)), - ((1, 1), (2, 2)), + @pytest.mark.parametrize("src_tp_pp,dest_tp_pp,src_layer_spec_fn,dst_layer_spec_fn", [ + ((2, 4), (4, 2), gpt_te_spec, gpt_te_spec), + ((1, 8), (8, 1), gpt_te_spec, gpt_te_spec), + ((2, 1), (1, 8), gpt_te_spec, gpt_te_spec), + ((1, 1), (2, 2), gpt_te_spec, gpt_te_spec), + ((2, 1), (1, 8), gpt_local_spec, gpt_local_spec), + ((1, 1), (2, 4), gpt_te_spec, gpt_local_spec), + ((1, 8), (2, 1), gpt_local_spec, gpt_te_spec), ]) - def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp, dest_tp_pp): + def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp, dest_tp_pp, + src_layer_spec_fn, dst_layer_spec_fn): """ Test model saving and loading with different TP/PP """ with TempNamedDir(tmp_path_dist_ckpt / 'test_gpt_model_reconfiguration_model_A') as ckpt_dir_A, \ TempNamedDir(tmp_path_dist_ckpt / 'test_gpt_model_reconfiguration_model_B') as ckpt_dir_B: # Save checkpoint A Utils.initialize_model_parallel(*src_tp_pp) - gpt_model_A = initialize_gpt_model(1) + gpt_model_A = initialize_gpt_model(1, src_layer_spec_fn) save(gpt_model_A.sharded_state_dict(), ckpt_dir_A) regular_state_dict_A = gpt_model_A.state_dict() Utils.destroy_model_parallel() # Load checkpoint A with different TP/PP and save as checkpoint B Utils.initialize_model_parallel(*dest_tp_pp) - gpt_model_B = initialize_gpt_model(2) + gpt_model_B = initialize_gpt_model(2, dst_layer_spec_fn) state_dict = load(gpt_model_B.sharded_state_dict(), ckpt_dir_A) gpt_model_B.load_state_dict(state_dict) save(gpt_model_B.sharded_state_dict(), ckpt_dir_B) @@ -91,10 +95,10 @@ def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp, dest_ assert not any(map(bool, diffs)), diffs # Test both regular state dicts are equal, turning FP8 states to bytes first - regular_state_dict_A = {k: v.read() if k.endswith('_extra_state') else v - for k, v in regular_state_dict_A.items()} - regular_state_dict_B = {k: v.read() if k.endswith('_extra_state') else v - for k, v in regular_state_dict_B.items()} + regular_state_dict_A = {k: v for k, v in regular_state_dict_A.items() + if not k.endswith('_extra_state')} + regular_state_dict_B = {k: v for k, v in regular_state_dict_B.items() + if not k.endswith('_extra_state')} diffs = diff(regular_state_dict_A, regular_state_dict_B) assert not any(map(bool, diffs)), diffs Utils.destroy_model_parallel() From 185319adec55e011572993832c973776773bde23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Tue, 19 Dec 2023 15:59:36 +0100 Subject: [PATCH 055/296] Fix formatting --- megatron/core/dist_checkpointing/utils.py | 8 ++++++-- megatron/core/models/gpt/gpt_layer_specs.py | 2 +- megatron/core/tensor_parallel/layers.py | 11 +++++++++-- megatron/core/transformer/transformer_layer.py | 8 +++++--- 4 files changed, 21 insertions(+), 8 deletions(-) diff --git a/megatron/core/dist_checkpointing/utils.py b/megatron/core/dist_checkpointing/utils.py index 651a83a2d8..a234a4ced6 100644 --- a/megatron/core/dist_checkpointing/utils.py +++ b/megatron/core/dist_checkpointing/utils.py @@ -1,6 +1,6 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. -from typing import Tuple, Dict +from typing import Dict, Tuple from .dict_utils import dict_list_map_inplace, extract_matching_values from .mapping import ( @@ -60,6 +60,7 @@ def replace_prefix_for_sharding( Returns: None: state dict is modified in place """ + def _replace_prefix(x): if isinstance(x, (ShardedTensor, ShardedTensorFactory, ShardedObject)): if not x.key.startswith(old_prefix): @@ -80,12 +81,15 @@ def apply_prefix_mapping(sharded_state_dict: ShardedStateDict, prefix_map: Dict[ Returns: None: state dict is modified in place """ + def _replace_prefixes(x): if not isinstance(x, (ShardedTensor, ShardedTensorFactory, ShardedObject)): return x for old_prefix, new_prefix in prefix_map.items(): if x.key.startswith(old_prefix): - x.key = f'{new_prefix}{x.key[len(old_prefix):]}' # str.removeprefix in Python >= 3.9 + x.key = ( + f'{new_prefix}{x.key[len(old_prefix):]}' # str.removeprefix in Python >= 3.9 + ) break return x diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index 1e536b668d..2242c16256 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -70,7 +70,7 @@ def get_gpt_layer_local_spec() -> ModuleSpec: sharded_state_dict_keys_map={ 'input_layernorm.': 'self_attention.linear_qkv.layer_norm_', 'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_', - } + }, ), ) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 249ec2666d..69dbec6e4f 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -712,7 +712,11 @@ def __init__( ) # Hook adding a default empty _extra_state for state dict - self._register_load_state_dict_pre_hook(lambda state_dict, prefix, *args, **kwargs: state_dict.setdefault(f'{prefix}_extra_state')) + self._register_load_state_dict_pre_hook( + lambda state_dict, prefix, *args, **kwargs: state_dict.setdefault( + f'{prefix}_extra_state' + ) + ) def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None): """Forward of ColumnParallelLinear @@ -918,7 +922,10 @@ def __init__( ) # Hook adding a default empty _extra_state for state dict - self._register_load_state_dict_pre_hook(lambda state_dict, *args, **kwargs: print('%' * 100) or state_dict.setdefault('_extra_state')) + self._register_load_state_dict_pre_hook( + lambda state_dict, *args, **kwargs: print('%' * 100) + or state_dict.setdefault('_extra_state') + ) def forward(self, input_): """Forward of RowParallelLinear diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index 4d6bae9c74..b37a983284 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -1,7 +1,7 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. from dataclasses import dataclass, field -from typing import Union, Dict +from typing import Dict, Union import torch @@ -223,8 +223,10 @@ def forward( def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: sharded_state_dict = super().sharded_state_dict(prefix, sharded_offsets) - prefixed_map = {f'{prefix}{k}': f'{prefix}{v}' - for k, v in self.submodules_config.sharded_state_dict_keys_map.items()} + prefixed_map = { + f'{prefix}{k}': f'{prefix}{v}' + for k, v in self.submodules_config.sharded_state_dict_keys_map.items() + } if prefixed_map: apply_prefix_mapping(sharded_state_dict, prefixed_map) return sharded_state_dict From d0e3b238ac42d74cb6c634e8fa70d1b23cbc8ddd Mon Sep 17 00:00:00 2001 From: Jared Casper Date: Tue, 19 Dec 2023 14:56:29 -0800 Subject: [PATCH 056/296] fix TE test. --- .../test_scripts/gpt3/pretrain_gpt3_distributed_test.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh index 3cad97cc60..e3f9626707 100755 --- a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh +++ b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh @@ -42,6 +42,7 @@ if [[ $USE_TE -eq 1 ]]; then echo "Running with TransformerEngine ..." TRANSFORMER_IMPL=transformer_engine TRAINING_DTYPE=bf16 + ADDITIONAL_PARAMS+=" --attention-softmax-in-fp32" else echo "Running with local transformer implementation ..." fi From 6345860558c4b96c37bbda90c6d3d89d11e1cfa8 Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis Date: Tue, 19 Dec 2023 15:29:11 -0800 Subject: [PATCH 057/296] Added test. --- .../transformer/test_spec_customization.py | 61 +++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/tests/unit_tests/transformer/test_spec_customization.py b/tests/unit_tests/transformer/test_spec_customization.py index 03c0f1a7a6..425588b289 100755 --- a/tests/unit_tests/transformer/test_spec_customization.py +++ b/tests/unit_tests/transformer/test_spec_customization.py @@ -20,6 +20,7 @@ from megatron.core.transformer.spec_utils import ModuleSpec, build_module, import_module from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.transformer.transformer_layer import TransformerLayerSubmodules +from megatron.core.transformer.dot_product_attention import DotProductAttention from tests.unit_tests.test_utilities import Utils @@ -125,3 +126,63 @@ def test_build_module(self): # Check BiasDropoutAdd bda_op = build_module(self.bda_spec) assert id(bda_op) == id(get_bias_dropout_add) + + + + def test_sliding_window_attention(self): + config = TransformerConfig( + num_layers=2, + hidden_size=12, + num_attention_heads=4, + use_cpu_initialization=True, + window_size=[10,0] + ) + # Make sure DotProductAttention throws (swa unsupported). + threw = False + try: + attn = DotProductAttention( + config, + layer_number=1, + attn_mask_type=AttnMaskType.causal, + attention_type='self' + ) + except: + threw = True + finally: + assert threw, 'Expected DotProductAttention to throw exception for SWA' + + # Test TEDotProductAttention + attn = TEDotProductAttention( + config, + layer_number=1, + attn_mask_type=AttnMaskType.causal, + attention_type='self' + ) + # Make sure window-size is what we expect. + assert attn.window_size == config.window_size + + # Single integer window-size unsupported, make sure it throws + threw = False + try: + config.window_size = 11 + attn = TEDotProductAttention( + config, + layer_number=1, + attn_mask_type=AttnMaskType.causal, + attention_type='self' + ) + except: + threw = True + finally: + assert threw, "Expected TEDotProductAttention to throw for integer window-size" + + # `None` makes this causal. + config.window_size = None + attn = TEDotProductAttention( + config, + layer_number=1, + attn_mask_type=AttnMaskType.causal, + attention_type='self' + ) + # Make sure it's causal. + assert attn.window_size == (-1, 0) \ No newline at end of file From 0db8c725b51a79ba4c760e719d8388da2f935c45 Mon Sep 17 00:00:00 2001 From: Shanmugam Ramasamy Date: Tue, 19 Dec 2023 15:31:22 -0800 Subject: [PATCH 058/296] Update to work even if there are more than one labels --- .gitlab-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5c7613a9aa..9442c94724 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -85,7 +85,7 @@ formatting: when: always - if: $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED when: always - - if: '$CI_MERGE_REQUEST_LABELS == "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' + - if: '$CI_MERGE_REQUEST_LABELS == *"READY FOR REVIEW"* && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' when: always allow_failure: false retry: 2 @@ -108,7 +108,7 @@ formatting: when: always - if: $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED when: always - - if: '$CI_MERGE_REQUEST_LABELS == "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' + - if: '$CI_MERGE_REQUEST_LABELS == *"READY FOR REVIEW"* && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' when: always allow_failure: false retry: 2 From b7f8a074ec91bbe9ede6acf7477c798830606f82 Mon Sep 17 00:00:00 2001 From: Shanmugam Ramasamy Date: Tue, 19 Dec 2023 15:34:28 -0800 Subject: [PATCH 059/296] Update .gitlab-ci.yml --- .gitlab-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9442c94724..5fe1588265 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -85,7 +85,7 @@ formatting: when: always - if: $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED when: always - - if: '$CI_MERGE_REQUEST_LABELS == *"READY FOR REVIEW"* && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' + - if: '$CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' when: always allow_failure: false retry: 2 @@ -108,7 +108,7 @@ formatting: when: always - if: $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED when: always - - if: '$CI_MERGE_REQUEST_LABELS == *"READY FOR REVIEW"* && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' + - if: '$CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' when: always allow_failure: false retry: 2 From ada74a74abe0b07e46992282f7249ea7e7fbd972 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Tue, 19 Dec 2023 15:35:54 -0800 Subject: [PATCH 060/296] Cleaned up based on MR suggestions Signed-off-by: Selvaraj Anandaraj --- megatron/core/datasets/gpt_dataset.py | 55 +++++++++++-------- megatron/core/datasets/megatron_dataset.py | 6 +- megatron/core/transformer/attention.py | 36 +++++------- .../custom_layers/transformer_engine.py | 9 +-- 4 files changed, 55 insertions(+), 51 deletions(-) diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index 3b7357df71..52b7dfffa7 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -88,14 +88,14 @@ def __len__(self) -> int: """ return self.sample_index.shape[0] - 1 - def __getitem__(self, idx: int) -> Dict[str, Union[torch.Tensor, numpy.ndarray]]: + def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: """Abstract method implementation Args: idx (int): The index into the dataset Returns: - Dict[str, numpy.ndarray]: The text ids wrapped in a dictionary + Dict[str, torch.Tensor]: The text ids wrapped in a dictionary """ text, _ = self._query_document_sample_shuffle_indices(idx) @@ -106,13 +106,20 @@ def __getitem__(self, idx: int) -> Dict[str, Union[torch.Tensor, numpy.ndarray]] tokens = tokens_[:-1].contiguous() attention_mask, loss_mask, position_ids = _get_ltor_masks_and_position_ids( - tokens, - self.config.eod_id, - self.config.reset_position_ids, - self.config.reset_attention_mask, - self.config.eod_mask_loss) + tokens, + self.config.eod_id, + self.config.reset_position_ids, + self.config.reset_attention_mask, + self.config.eod_mask_loss, + ) - return {"tokens": tokens,"labels": labels,"attention_mask": attention_mask,"loss_mask": loss_mask,"position_ids": position_ids} + return { + "tokens": tokens, + "labels": labels, + "attention_mask": attention_mask, + "loss_mask": loss_mask, + "position_ids": position_ids, + } @staticmethod def is_multimodal() -> bool: @@ -487,11 +494,14 @@ def _build_shuffle_index( return numpy.concatenate((shuffle_idx_first, shuffle_idx_last)) -def _get_ltor_masks_and_position_ids(data: torch.Tensor, - eod_token: int, - reset_position_ids: bool, - reset_attention_mask: bool, - eod_mask_loss: bool): + +def _get_ltor_masks_and_position_ids( + data: torch.Tensor, + eod_token: int, + reset_position_ids: bool, + reset_attention_mask: bool, + eod_mask_loss: bool, +): """Build masks and position id for left to right model. Args: @@ -506,18 +516,20 @@ def _get_ltor_masks_and_position_ids(data: torch.Tensor, eod_mask_loss (bool): Switch to enable the EOD mask loss Returns: - attention_mask (torch.Tensor) : Attention mask needed to be used for Attention + torch.Tensor : Attention mask needed to be used for Attention - loss_mask (torch.Tensor) : The mask used for loss value during training + torch.Tensor : The mask used for loss value during training - position_ids (torch.Tensor) : The position ID's of the token + torch.Tensor : The position ID's of the token """ # Extract batch size and sequence length. seq_length = data.numel() - attention_mask = torch.tril(torch.ones((seq_length, seq_length),device=data.device)).unsqueeze(0) + attention_mask = torch.tril(torch.ones((seq_length, seq_length), device=data.device)).unsqueeze( + 0 + ) # Loss mask. loss_mask = torch.ones(seq_length, dtype=torch.float, device=data.device) @@ -525,8 +537,7 @@ def _get_ltor_masks_and_position_ids(data: torch.Tensor, loss_mask[data == eod_token] = 0.0 # Position ids. - position_ids = torch.arange(seq_length, dtype=torch.long, - device=data.device) + position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device) # We need to clone as the ids will be modifed based on batch index. if reset_position_ids: position_ids = position_ids.clone() @@ -545,13 +556,13 @@ def _get_ltor_masks_and_position_ids(data: torch.Tensor, i = eod_index[j] # Mask attention loss. if reset_attention_mask: - attention_mask[ 0, (i + 1):, :(i + 1)] = 0 + attention_mask[0, (i + 1) :, : (i + 1)] = 0 # Reset positions. if reset_position_ids: - position_ids[ (i + 1):] -= (i + 1 - prev_index) + position_ids[(i + 1) :] -= i + 1 - prev_index prev_index = i + 1 # Convert attention mask to binary: - attention_mask = (attention_mask < 0.5) + attention_mask = attention_mask < 0.5 return attention_mask, loss_mask, position_ids diff --git a/megatron/core/datasets/megatron_dataset.py b/megatron/core/datasets/megatron_dataset.py index 21170afa4e..e7fecb64fa 100644 --- a/megatron/core/datasets/megatron_dataset.py +++ b/megatron/core/datasets/megatron_dataset.py @@ -4,7 +4,7 @@ import json from abc import ABC, abstractmethod, abstractstaticmethod from collections import OrderedDict -from typing import Dict, List +from typing import Dict, List, Union import numpy import torch @@ -80,14 +80,14 @@ def __len__(self) -> int: pass @abstractmethod - def __getitem__(self, idx: int) -> Dict[str, numpy.ndarray]: + def __getitem__(self, idx: int) -> Dict[str, Union[torch.Tensor, numpy.ndarray]]: """Return from the dataset Args: idx (int): The index into the dataset Returns: - Dict[str, numpy.ndarray]: See abstract implementation + Dict[str, Union[torch.Tensor, numpy.ndarray]]: See abstract implementation """ pass diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 1ea6aeee5f..35a4d263e3 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -8,12 +8,12 @@ from megatron.core import parallel_state, tensor_parallel from megatron.core.models.common.embeddings.rotary_pos_embedding import apply_rotary_pos_emb +from megatron.core.transformer.custom_layers.transformer_engine import SplitAlongDim from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.transformer.custom_layers.transformer_engine import SplitAlongDim from megatron.core.utils import divide from .enums import AttnMaskType @@ -327,32 +327,24 @@ def get_query_key_value_tensors(self, hidden_states, key_value_states=None): mixed_qkv = mixed_qkv.view(*new_tensor_shape) split_arg_list = [ - ( - self.num_attention_heads_per_partition - // self.num_query_groups_per_partition - * self.hidden_size_per_attention_head - ), - self.hidden_size_per_attention_head, - self.hidden_size_per_attention_head, - ] + ( + self.num_attention_heads_per_partition + // self.num_query_groups_per_partition + * self.hidden_size_per_attention_head + ), + self.hidden_size_per_attention_head, + self.hidden_size_per_attention_head, + ] if SplitAlongDim is not None: - # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] - (query, key, value) = SplitAlongDim( - mixed_qkv, - 3, - split_arg_list, - ) + # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] + (query, key, value) = SplitAlongDim(mixed_qkv, 3, split_arg_list,) else: - # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] - (query, key, value) = torch.split( - mixed_qkv, - split_arg_list, - dim=3, - ) - + # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn] + (query, key, value) = torch.split(mixed_qkv, split_arg_list, dim=3,) + # [sq, b, ng, np/ng * hn] -> [sq, b, np, hn] query = query.reshape(query.size(0), query.size(1), -1, self.hidden_size_per_attention_head) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index 8154ba6012..ea74936c6f 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -430,12 +430,13 @@ def forward( else: return super().forward(query, key, value, attention_mask) + try: - from transformer_engine.pytorch.attention import _SplitAlongDim - SplitAlongDim = _SplitAlongDim.apply + from transformer_engine.pytorch.attention import _SplitAlongDim -except ImportError: + SplitAlongDim = _SplitAlongDim.apply - SplitAlongDim = None +except ImportError: + SplitAlongDim = None From 1524ddcd58040c0890d677c855572cb0576205a0 Mon Sep 17 00:00:00 2001 From: Kirthi Shankar Sivamani Date: Tue, 19 Dec 2023 21:13:05 -0800 Subject: [PATCH 061/296] Fix previous apex versions Signed-off-by: Kirthi Shankar Sivamani --- megatron/model/fused_layer_norm.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/megatron/model/fused_layer_norm.py b/megatron/model/fused_layer_norm.py index bcb7bd7ecd..f076302e4e 100644 --- a/megatron/model/fused_layer_norm.py +++ b/megatron/model/fused_layer_norm.py @@ -19,9 +19,9 @@ HAVE_PERSIST_LAYER_NORM = False try: - from apex.normalization.fused_layer_norm import FusedLayerNormAffineFunction + from apex.normalization.fused_layer_norm import fused_layer_norm_affine except: - FusedLayerNormAffineFunction = None + fused_layer_norm_affine = None global fused_layer_norm_cuda fused_layer_norm_cuda = None @@ -79,9 +79,9 @@ def forward(self, input): weight = self.weight + 1 if self.apply_layernorm_1p else self.weight if self.no_persist_layer_norm: - assert FusedLayerNormAffineFunction is not None, \ - "FusedLayerNormAffineFunction is not available, please install apex from https://github.com/NVIDIA/apex" - return FusedLayerNormAffineFunction.apply(input, weight, self.bias, self.normalized_shape, self.eps, False) + assert fused_layer_norm_affine is not None, \ + "fused_layer_norm_affine is not available, please install apex from https://github.com/NVIDIA/apex" + return fused_layer_norm_affine(input, weight, self.bias, self.normalized_shape, eps=self.eps) else: output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) From f5b2e481e3ecaf3915d389d4f2e00fee04e84810 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Tue, 19 Dec 2023 22:34:57 -0800 Subject: [PATCH 062/296] CI cleanup bug fix Signed-off-by: Selvaraj Anandaraj --- megatron/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/utils.py b/megatron/utils.py index d9cc0a4f57..3a38b2b610 100644 --- a/megatron/utils.py +++ b/megatron/utils.py @@ -315,7 +315,7 @@ def _broadcast(item): tokens=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) labels=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) loss_mask=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.float32 , device = torch.cuda.current_device()) - attention_mask=torch.empty((args.micro_batch_size,args.micro_batch_size,args.seq_length,args.seq_length), dtype = torch.bool , device = torch.cuda.current_device()) + attention_mask=torch.empty((args.micro_batch_size,1,args.seq_length,args.seq_length), dtype = torch.bool , device = torch.cuda.current_device()) position_ids=torch.empty((args.micro_batch_size,args.seq_length), dtype = torch.int64 , device = torch.cuda.current_device()) if args.pipeline_model_parallel_size == 1: From 9b7b81e62149cbdd5df381e966818e0cf7bd147d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Wed, 20 Dec 2023 10:31:55 +0100 Subject: [PATCH 063/296] Adjust extra_state to older TE versions --- megatron/core/tensor_parallel/layers.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 69dbec6e4f..abd07ef563 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -793,11 +793,9 @@ def sharded_state_dict(self, prefix='', sharded_offsets=()): def set_extra_state(self, state: Any): """ Extra state is ignored """ - def get_extra_state(self) -> Any: + def get_extra_state(self) -> None: """ Keep compatibility with TE state dict. """ - state_serialized = io.BytesIO() - torch.save(None, state_serialized) - return state_serialized + return None class RowParallelLinear(torch.nn.Module): @@ -983,8 +981,6 @@ def sharded_state_dict(self, prefix='', sharded_offsets=()): def set_extra_state(self, state: Any): """ Extra state is ignored """ - def get_extra_state(self) -> Any: + def get_extra_state(self) -> None: """ Keep compatibility with TE state dict. """ - state_serialized = io.BytesIO() - torch.save(None, state_serialized) - return state_serialized + return None From 2edd7ddd23e8db8341e20d52aa7d5bbdb700e64b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Wed, 20 Dec 2023 10:59:00 +0100 Subject: [PATCH 064/296] Fix spec test --- tests/unit_tests/transformer/test_spec_customization.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unit_tests/transformer/test_spec_customization.py b/tests/unit_tests/transformer/test_spec_customization.py index 03c0f1a7a6..e0569d6905 100755 --- a/tests/unit_tests/transformer/test_spec_customization.py +++ b/tests/unit_tests/transformer/test_spec_customization.py @@ -73,6 +73,7 @@ def test_build_module(self): noop_transformer_layer = [ build_module(getattr(self.transformer_layer_spec, field.name)) for field in fields(self.transformer_layer_spec) + if field.name != 'sharded_state_dict_keys_map' ] x = random_input From e6223f205b23a9cdcacb36e90db904606d710f0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Wed, 20 Dec 2023 11:44:08 +0100 Subject: [PATCH 065/296] Remove print --- megatron/core/tensor_parallel/layers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index abd07ef563..7128a95c05 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -921,8 +921,7 @@ def __init__( # Hook adding a default empty _extra_state for state dict self._register_load_state_dict_pre_hook( - lambda state_dict, *args, **kwargs: print('%' * 100) - or state_dict.setdefault('_extra_state') + lambda state_dict, *args, **kwargs: state_dict.setdefault('_extra_state') ) def forward(self, input_): From 782d32c7c8a2f61a76dbf753e44f362cc42ef41b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Wed, 20 Dec 2023 11:51:11 +0100 Subject: [PATCH 066/296] Add mapping for MoE and T5 --- megatron/core/models/T5/t5_spec.py | 8 ++++++++ megatron/core/models/gpt/gpt_layer_specs.py | 1 + 2 files changed, 9 insertions(+) diff --git a/megatron/core/models/T5/t5_spec.py b/megatron/core/models/T5/t5_spec.py index 60f33dbd98..f32f1193f0 100644 --- a/megatron/core/models/T5/t5_spec.py +++ b/megatron/core/models/T5/t5_spec.py @@ -116,6 +116,10 @@ def encoder_model_with_local_spec() -> ModuleSpec: ), ), mlp_bda=get_bias_dropout_add, + sharded_state_dict_keys_map={ + 'input_layernorm.': 'self_attention.linear_qkv.layer_norm_', + 'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_', + }, ), ) @@ -156,6 +160,10 @@ def decoder_model_with_local_spec() -> ModuleSpec: ), ), mlp_bda=get_bias_dropout_add, + sharded_state_dict_keys_map={ + 'input_layernorm.': 'self_attention.linear_qkv.layer_norm_', + 'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_', + }, ), ) diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index 2242c16256..a5f41991dc 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -123,5 +123,6 @@ def get_gpt_layer_local_spec() -> ModuleSpec: ), ), mlp_bda=get_bias_dropout_add, + sharded_state_dict_keys_map={'input_layernorm.': 'self_attention.linear_qkv.layer_norm_',}, ), ) From 5977c42b09ceae5ab4f7268bee72ad238925eb7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Wed, 20 Dec 2023 17:53:57 +0100 Subject: [PATCH 067/296] Fix extra_state hook --- megatron/core/tensor_parallel/layers.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 7128a95c05..9c41bf2d63 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -921,7 +921,9 @@ def __init__( # Hook adding a default empty _extra_state for state dict self._register_load_state_dict_pre_hook( - lambda state_dict, *args, **kwargs: state_dict.setdefault('_extra_state') + lambda state_dict, prefix, *args, **kwargs: state_dict.setdefault( + f'{prefix}_extra_state' + ) ) def forward(self, input_): From 2a019a13c0f8c70656821f455218f795f2a08c93 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Wed, 20 Dec 2023 19:36:07 -0500 Subject: [PATCH 068/296] Update megatron version --- megatron/arguments.py | 32 ++++- megatron/checkpointing.py | 2 +- megatron/core/datasets/gpt_dataset.py | 173 ++++++++++++++++++++++- megatron/data/data_samplers.py | 5 +- megatron/global_vars.py | 12 +- megatron/initialize.py | 71 +++++++++- megatron/model/language_model.py | 35 ++++- megatron/model/transformer.py | 54 ++++++- megatron/optimizer/optimizer.py | 34 ++++- megatron/tensor_logging.py | 135 ++++++++++++++++++ megatron/text_generation/tokenization.py | 2 + megatron/tokenizer/gpt2_tokenization.py | 2 +- megatron/tokenizer/tokenizer.py | 65 ++++++++- megatron/training.py | 36 +++-- pretrain_gpt.py | 15 +- tools/preprocess_data.py | 41 +++++- 16 files changed, 679 insertions(+), 35 deletions(-) create mode 100644 megatron/tensor_logging.py diff --git a/megatron/arguments.py b/megatron/arguments.py index fff5bbeb5b..f43edac7dd 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -340,6 +340,9 @@ def validate_args(args, defaults={}): if args.sequence_parallel: args.async_tensor_model_parallel_allreduce = False + if not args.use_flash_attn: + assert args.window_size is None + if os.environ.get('CUDA_DEVICE_MAX_CONNECTIONS') != "1": if args.sequence_parallel: raise RuntimeError( @@ -611,6 +614,8 @@ def _add_network_size_args(parser): 'Deprecated: use --position-embedding-type') group.add_argument('--rotary-percent', type=float, default=1.0, help='Percent of rotary dimension to use, default 100%%') + group.add_argument('--rotary-theta', type=int, default=10000, + help='Theta/frequency value for rotary positional embeddings') group.add_argument('--rotary-seq-len-interpolation-factor', type=int, default=None, help='Sequence length interpolation factor for rotary embeddings.') group.add_argument('--no-position-embedding', @@ -650,6 +655,7 @@ def _add_network_size_args(parser): help='Number of Experts in Switch Transformer (None means no Switch)') group.add_argument('--untie-embeddings-and-output-weights', action='store_true', help='Untie embeddings and output weights.'), + group.add_argument('--window-size', type=int, default=None) return parser @@ -716,12 +722,21 @@ def _add_logging_args(parser): group.add_argument('--log-world-size-to-tensorboard', action='store_true', help='Enable world size logging to tensorboard.') - group.add_argument('--wandb-project', type=str, default='', + group.add_argument('--wandb-project', '--wandb-project-name', type=str, default='', help='The wandb project name. Ignore wandb by default.') group.add_argument('--wandb-exp-name', type=str, default='', help='The wandb experiment name.') group.add_argument('--wandb-save-dir', type=str, default='', help='Path to save the wandb results locally.') + group.add_argument('--wandb-group-name', type=str, default="default") + group.add_argument('--wandb-entity-name', type=str, default=None, + help="Name of wandb entity for reporting") + group.add_argument('--debug_layer_outputs', '--debug-layer-outputs', type=int, default=0) + group.add_argument('--debug_layer_gradients', '--debug-layer-gradients', type=int, default=0) + group.add_argument('--debug_all_param_gradients', '--debug-all-param-gradients', type=int, default=0) + group.add_argument('--debug_param_init', '--debug-param-init', type=int, default=0) + group.add_argument('--debug_param_update', '--debug-param-update', type=int, default=0) + group.add_argument('--debug_transformer', '--debug-transformer', type=int, default=0) return parser @@ -1211,6 +1226,8 @@ def _add_data_args(parser): help='Path to the vocab file.') group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file.') + group.add_argument('--tokenizer-file', type=str, default=None, + help='Path to the tokenizer.json file. Used for the TokenizerFromFile[...] tokenizers') group.add_argument('--vocab-extra-ids', type=int, default=0, help='Number of additional vocabulary tokens. ' 'They are used for span masking in the T5 model') @@ -1238,6 +1255,9 @@ def _add_data_args(parser): choices=['BertWordPieceLowerCase', 'BertWordPieceCase', 'GPT2BPETokenizer', + 'GPT2BPETokenizerWithFIM', + 'TokenizerFromFile', + 'TokenizerFromFileWithFIM', 'SentencePieceTokenizer', 'GPTSentencePieceTokenizer', 'Llama2Tokenizer', @@ -1252,7 +1272,15 @@ def _add_data_args(parser): 'end-of-document token.') group.add_argument('--eod-mask-loss', action='store_true', help='Mask loss for the end of document tokens.') - + group.add_argument('--fim-rate', type=float, default=0., + help='Probability to convert a training sample into a "Fill-in-the-Middle" format. Must be between 0 and 1.') + group.add_argument('--fim-spm-rate', type=float, default=0.5, + help='Probability that the a FIM sample uses the SPM format over the PSM format. ' + 'At 1, exclusively train with SPM. At 0, exclusively train with PSM') + group.add_argument('--fim-split-sample', type=str, default=None, + help='String around which to split the sample for FIM. If None (default), FIM is applied on the sample-level') + group.add_argument('--fragment-fim-rate', type=float, default=0.5, + help='Rate of FIM on each fragment when fim_split_sample is not None.') return parser diff --git a/megatron/checkpointing.py b/megatron/checkpointing.py index a3303229a0..572d551d41 100644 --- a/megatron/checkpointing.py +++ b/megatron/checkpointing.py @@ -56,7 +56,7 @@ def _compare(arg_name, old_arg_name=None, default=None): _compare('hidden_size') _compare('num_attention_heads') _compare('add_position_embedding', default=True) - if args.vocab_file: + if args.vocab_file or args.tokenizer_file: _compare('max_position_embeddings') _compare('make_vocab_size_divisible_by') _compare('padded_vocab_size') diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index c52fe3abfc..e84965b93c 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -9,10 +9,12 @@ import numpy import torch +from megatron import get_args, get_tokenizer from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig from megatron.core.datasets.indexed_dataset import MMapIndexedDataset from megatron.core.datasets.megatron_dataset import MegatronDataset from megatron.core.datasets.utils import Split, log_single_rank +from megatron.tokenizer.tokenizer import FIM_MIDDLE, FIM_PAD, FIM_PREFIX, FIM_SUFFIX logger = logging.getLogger(__name__) @@ -50,6 +52,19 @@ def __init__( config: GPTDatasetConfig, ) -> None: super().__init__(indexed_dataset, indexed_indices, num_samples, index_split, config) + self.args = get_args() + self.tokenizer = get_tokenizer() + self.np_rng = numpy.random.RandomState(seed=self.config.random_seed) # rng state for FIM + + self.fim_rate = self.args.fim_rate + self.fim_spm_rate = self.args.fim_spm_rate + self.fragment_fim_rate = self.args.fragment_fim_rate + self.fim_split_sample = self.tokenizer.vocab[self.args.fim_split_sample] if self.args.fim_split_sample is not None else None + + try: + self.suffix_tok_id, self.prefix_tok_id, self.middle_tok_id, self.pad_tok_id = (self.tokenizer.special_tokens[tok] for tok in [FIM_SUFFIX, FIM_PREFIX, FIM_MIDDLE, FIM_PAD]) + except KeyError: + self.suffix_tok_id, self.prefix_tok_id, self.middle_tok_id, self.pad_tok_id = (self.tokenizer.vocab[tok] for tok in [FIM_SUFFIX, FIM_PREFIX, FIM_MIDDLE, FIM_PAD]) def _finalize(self) -> None: """Abstract method implementation @@ -150,8 +165,98 @@ def _query_document_sample_shuffle_indices( self.indexed_dataset.get(self.document_index[i], offset=offset, length=length) ) + sample=numpy.concatenate(sample_parts) + + # Code from: https://github.com/EleutherAI/gpt-neox/blob/FIM-clean/megatron/data/gpt2_dataset.py#L109 + # TODO(Hailey): can merge the code below this line with code above this line. + # TODO(Hailey), cont: above already iterates through loop, so just add the permuting in there? + sample = numpy.array(sample, dtype=numpy.int64) + sample_len = sample.shape[0] + # # print(sample, sample.shape) + # # do FIM here, if enabled + # TODO: Do we handle the following point from FIM paper? + # To transform data in the character space for context-level FIM, the tokenized documents have to be decoded back into strings before FIM augmentation. Depending on the vocabulary, some care has to be given to ensure decoding does not introduce any spurious characters into training. For example, utf-8 characters are encoded as multiple tokens with a BPE vocabulary; they can result in fragments from chunking and fail to decode. To prevent unforeseen errors midway through training, we encourage checking for these fragments at the beginning or end of a context and removing them. + eod = self.tokenizer.eod + segment_breaks = numpy.argwhere(sample == eod) # split sample by document + + if self.fim_rate == 0: + return sample.astype(numpy.int64) + + def fim_permute_sequence(sequence, rate): + return permute( + sequence, + self.np_rng, + rate, + self.fim_spm_rate, + self.tokenizer, + truncate_or_pad=False, + suffix_tok_id=self.suffix_tok_id, + prefix_tok_id=self.prefix_tok_id, + middle_tok_id=self.middle_tok_id, + pad_tok_id=self.pad_tok_id, + ) + + def fim_split_and_permute_sequence(sequence): + """ + If self.fim_split_sample is not None, split the sequence. + Then apply FIM on the fragments, or the whole sequence if self.fim_split_sample is None. + """ + if self.fim_split_sample is None: + return fim_permute_sequence(sequence, self.fim_rate) + # fim_split_sample is set: split the sample on this token and permute each fragment separately. + # Typically, if each sample is a repository, then we split again on the file level. + # Each fragment is a file, and we permute the files. + fragment_breaks = numpy.argwhere(sequence == self.fim_split_sample) + if fragment_breaks.shape == (0, 1): + # no split token in this sample + return fim_permute_sequence(sequence, self.fim_rate) + if not self.np_rng.binomial(1, self.fim_rate): + # don't do FIM preproc + return sequence + # Do FIM on each fragment + curr_start_position = 0 + new_samples = [] + for loc in numpy.nditer(fragment_breaks): + if loc - curr_start_position > 0: + permuted = fim_permute_sequence(sequence[curr_start_position:loc], self.fragment_fim_rate) + new_samples += [permuted, [self.fim_split_sample]] + curr_start_position = loc + 1 # Jump over the split token + # Permute the segment after the last split token + permuted = fim_permute_sequence(sequence[curr_start_position:], self.fragment_fim_rate) + new_samples.append(permuted) + return numpy.concatenate(new_samples) + + if segment_breaks.shape != (0, 1): # then there is an EOD token in this example + curr_start_position = 0 + new_samples = [] + for loc in numpy.nditer(segment_breaks): + # Only permute non-empty segments. + if loc - curr_start_position > 0: + # permute {prefix, suffix, middle} or {suffix, prefix, middle} + permuted = fim_split_and_permute_sequence(sample[curr_start_position:loc]) + new_samples += [permuted, [eod]] + + curr_start_position = loc + 1 # jump over the EOD token + # Permute the segment after the last EOD + permuted = fim_split_and_permute_sequence(sample[curr_start_position:]) + new_samples.append(permuted) + + sample = numpy.concatenate(new_samples) + else: + sample = fim_split_and_permute_sequence(sample) + + # Truncate or pad sequence to max-length + diff = sample.shape[0] - sample_len + if diff > 0: # too long + sample = sample[:sample_len] + elif diff < 0: # too short + sample = numpy.concatenate([sample, numpy.full((-1 * diff), self.pad_tok_id)]) + + assert sample.shape[0] == sample_len + # end FIM-specific code + return ( - numpy.array(numpy.concatenate(sample_parts), dtype=numpy.int64), + numpy.array(sample, dtype=numpy.int64), numpy.array(document_ids, dtype=numpy.int64), ) @@ -456,3 +561,69 @@ def _build_shuffle_index( numpy_random_state.shuffle(shuffle_idx_last) return numpy.concatenate((shuffle_idx_first, shuffle_idx_last)) + + + +# From https://github.com/EleutherAI/gpt-neox/blob/FIM-clean/megatron/data/gpt2_dataset.py#L339 +def permute(sample, np_rng, fim_rate, fim_spm_rate, tokenizer, truncate_or_pad=True, + suffix_tok_id=None, prefix_tok_id=None, middle_tok_id=None, pad_tok_id=None): + """ + Take in a sample (np array w/ size (0,chunklength)) and perform a FIM transformation on it. + Maintain the same sample length (if transform creates a few extra tokens, drop them). + """ + if np_rng.binomial(1, fim_rate): # sample bernoulli dist + + contents = tokenizer.detokenize(sample) + + try: + # A boundary can be =0 (prefix will be empty) + # a boundary can be =len(contents) (suffix will be empty) + # The two boundaries can be equal (middle will be empty) + boundaries = list(np_rng.randint(low=0, high=len(contents) + 1, size=2)) + boundaries.sort() + except ValueError as e: + print(len(contents), contents) + print(e) + raise e + + prefix = contents[:boundaries[0]] + middle = contents[boundaries[0]:boundaries[1]] + suffix = contents[boundaries[1]:] + + prefix = numpy.array([*tokenizer.tokenize(prefix)], dtype=numpy.int64) + middle = numpy.array([*tokenizer.tokenize(middle)], dtype=numpy.int64) + suffix = numpy.array([*tokenizer.tokenize(suffix)], dtype=numpy.int64) + + # here we truncate each given segment to fit the same length as it was before + # A consequence is that we never reach the end of a file? + # we should rather truncate at the context-level + if truncate_or_pad: + # need to make same length as the input. Take the 3 sentinel tokens into account + new_length = suffix.shape[0] + prefix.shape[0] + middle.shape[0] + 3 + diff = new_length - sample.shape[0] + if diff > 0: # too long + if suffix.shape[0] <= diff: # if there's no space to truncate the suffix: stop and report it. atm i should have stopped this from happening + return sample, np_rng + suffix = suffix[:suffix.shape[0] - diff] + elif diff < 0: # too short + suffix = numpy.concatenate([suffix, numpy.full((-1 * diff), pad_tok_id)]) + + if np_rng.binomial(1, fim_spm_rate): + # SPM (variant 2 from FIM paper) + new_sample = numpy.concatenate([ + [prefix_tok_id, suffix_tok_id], suffix, + [middle_tok_id], prefix, middle + ]) + else: + # PSM + new_sample = numpy.concatenate([ + [prefix_tok_id], prefix, + [suffix_tok_id], suffix, + [middle_tok_id], middle + ]) + + else: + # don't do FIM preproc + new_sample = sample + + return new_sample \ No newline at end of file diff --git a/megatron/data/data_samplers.py b/megatron/data/data_samplers.py index 8dec2c1922..5435485ea6 100644 --- a/megatron/data/data_samplers.py +++ b/megatron/data/data_samplers.py @@ -11,7 +11,7 @@ from megatron.core import mpu -def build_pretraining_data_loader(dataset, consumed_samples): +def build_pretraining_data_loader(dataset, consumed_samples, num_workers=None): """Buld dataloader given an input dataset.""" if dataset is None: @@ -39,10 +39,11 @@ def build_pretraining_data_loader(dataset, consumed_samples): raise Exception('{} dataloader type is not supported.'.format( args.dataloader_type)) + num_workers = args.num_workers if num_workers is None else num_workers # Torch dataloader. return torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, - num_workers=args.num_workers, + num_workers=num_workers, pin_memory=True) class MegatronPretrainingSampler: diff --git a/megatron/global_vars.py b/megatron/global_vars.py index b1b4b043e8..63f5284e6c 100644 --- a/megatron/global_vars.py +++ b/megatron/global_vars.py @@ -166,8 +166,13 @@ def _set_wandb_writer(args): _ensure_var_is_not_initialized(_GLOBAL_WANDB_WRITER, 'wandb writer') if getattr(args, 'wandb_project', '') and args.rank == (args.world_size - 1): + # Wandb login from file + api_key_path = os.environ.get("WANDB_API_KEY_PATH") + if api_key_path: + os.environ["WANDB_API_KEY"]=open(api_key_path,"r").read().strip() if args.wandb_exp_name == '': - raise ValueError("Please specify the wandb experiment name!") + name=os.path.basename(args.save) + print(f"Setting wandb experiment name to \"{name}\"") import wandb if args.wandb_save_dir: @@ -179,7 +184,10 @@ def _set_wandb_writer(args): 'dir': save_dir, 'name': args.wandb_exp_name, 'project': args.wandb_project, - 'config': vars(args)} + 'entity': args.wandb_entity_name, + 'group': args.wandb_group_name, + 'config': vars(args), + } os.makedirs(wandb_kwargs['dir'], exist_ok=True) wandb.init(**wandb_kwargs) _GLOBAL_WANDB_WRITER = wandb diff --git a/megatron/initialize.py b/megatron/initialize.py index fb7866ab03..8d48de90d2 100644 --- a/megatron/initialize.py +++ b/megatron/initialize.py @@ -2,8 +2,12 @@ """Megatron initialization.""" +import logging +import logging.config +import math import random import os +import sys import time import numpy as np @@ -58,6 +62,7 @@ def finish_mpu_init(): args = get_args() # Pytorch distributed. _initialize_distributed() + _configure_logging() # Random seeds for reproducibility. if args.rank == 0: @@ -95,6 +100,58 @@ def finish_mpu_init(): return None + +def _configure_logging(): + args=get_args() + rank = torch.distributed.get_rank() + if args.structured_logs: + world_size=torch.distributed.get_world_size() + rank_str = str(rank).zfill(math.ceil(math.log10(world_size))) + format = f"%(asctime)s {'' if world_size==1 else f'[Rank {rank_str}] '}%(message)s" + else: + format=None + + logging_config = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "default": { + "format": format, + "use_colors": True, + } + }, + "handlers": { + "default": { + "level": "INFO", + "formatter": "default", + "class": "logging.StreamHandler", + "stream": "ext://sys.stdout", + } + }, + "loggers": {"default": {"level": "DEBUG", "handlers": ["default"]}}, + "root": {"handlers": ["default"], "level": "INFO"}, + } + if args.structured_logs_dir is not None: + log_dir=args.structured_logs_dir + os.makedirs(log_dir, exist_ok=True) + logging_config["handlers"]["file"] = { + "level": "INFO", + "formatter": "default", + "class": "logging.FileHandler", + "filename": os.path.join(log_dir, f"logs_rank_{rank}.txt"), + } + logging_config["root"]["handlers"].append("file") + logging_config["loggers"]["default"]["handlers"].append("file") + logging.config.dictConfig(logging_config) + + if args.structured_logs: + # Add these methods so that stdout can be redirected to logging. + logging.write = lambda msg: logging.info(msg) if msg != '\n' else None + logging.flush = lambda : None + + sys.stdout=logging + sys.stderr=logging + def _compile_dependencies(): args = get_args() @@ -115,6 +172,15 @@ def _compile_dependencies(): flush=True, ) + try: + # Skip the rest if the kernels are unnecessary or already available (ex. from apex) + if args.use_flash_attn or args.masked_softmax_fusion: + import scaled_upper_triang_masked_softmax_cuda + import scaled_masked_softmax_cuda + return + except ImportError: + pass + # ================== # Load fused kernels # ================== @@ -318,7 +384,10 @@ def set_jit_fusion_options(): torch._C._jit_override_can_fuse_on_cpu(True) torch._C._jit_override_can_fuse_on_gpu(True) - _warmup_jit_function() + # Prevent the function from messing up the random state. + tensor_parallel.get_cuda_rng_tracker().add("Warmup jit", 0) + with tensor_parallel.get_cuda_rng_tracker().fork("Warmup jit"): + _warmup_jit_function() def _warmup_jit_function(): diff --git a/megatron/model/language_model.py b/megatron/model/language_model.py index 69bfa2e801..c2ddb5bda5 100644 --- a/megatron/model/language_model.py +++ b/megatron/model/language_model.py @@ -378,7 +378,8 @@ def __init__(self, self.rotary_pos_emb = RotaryEmbedding( rotary_dim, args.rotary_percent, - seq_len_interpolation_factor=args.rotary_seq_len_interpolation_factor + seq_len_interpolation_factor=args.rotary_seq_len_interpolation_factor, + rotary_base=args.rotary_theta, ) # Encoder (usually set to True, False if part of an encoder-decoder @@ -425,6 +426,38 @@ def __init__(self, bias=False) # Setting bias to False always to keep it consistent with embedding tying that also does not have a bias. self._output_layer_key = 'output_layer' + for i, (key, value) in enumerate(self.named_parameters()): + # Store standardized parameter names for debug purposes. + args=get_args() + key=key.split(".") + if key[0]=="encoder": + # Remove "encoder" prefix. + key=key[1:] + if key[0]=="layers": + # Shift layer index. + key[1]=str(int(key[1])+1) + if key[2]=="input_layernorm": + key[2]="layer_norm_1" + elif key[2]=="post_attention_layernorm": + key[2]="layer_norm_2" + elif key[2]=="self_attention": + key[2]="self_attn" + elif key[3]=="dense_h_to_4h": + key[3]="layer_1" + elif key[3]=="dense_4h_to_h": + key[3]="layer_2" + else: + assert key[0]=="final_layernorm" + key=["layers",str(args.encoder_num_layers+1)]+key + elif key[0]=="embedding": + key=["layers", "0", "_".join(key[1:])] + else: + # Not implemented but still ok + pass + + value.param_name = ".".join(key) + value.param_idx = i + def set_input_tensor(self, input_tensor): """ See megatron.model.transformer.set_input_tensor()""" diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py index 1b4011eebc..518ec1a41a 100644 --- a/megatron/model/transformer.py +++ b/megatron/model/transformer.py @@ -24,6 +24,7 @@ get_data_parallel_rng_tracker_name ) from megatron.core.parallel_state import get_tensor_model_parallel_group, get_tensor_and_expert_parallel_group +from megatron.tensor_logging import log_tensor try: from einops import rearrange @@ -441,6 +442,7 @@ class FlashSelfAttention(torch.nn.Module): def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None): super().__init__() + self.window_size=get_args().window_size assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, ' 'e.g., with pip install flash-attn') assert rearrange is not None, 'Please install einops first, e.g., with pip install einops' @@ -480,10 +482,14 @@ def forward(self, q, k, v): device=q.device) dropout_p = 0 + # Older versions don't support the argument. + window_arg={} if self.window_size is None else {"window_size":(self.window_size - 1, 0)} + output = flash_attn_unpadded_func( q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k, dropout_p, - softmax_scale=self.softmax_scale, causal=is_causal + softmax_scale=self.softmax_scale, causal=is_causal, + **window_arg, ) output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) @@ -507,6 +513,7 @@ def __init__(self, config, layer_number, self.attn_mask_type = attn_mask_type self.params_dtype = config.params_dtype self.sequence_parallel = config.sequence_parallel + self._debug_transformer=args.debug_transformer self.group_query_attention = args.group_query_attention self.num_query_groups = args.num_query_groups @@ -805,6 +812,12 @@ def forward(self, hidden_states, attention_mask, context_layer = self.core_attention_flash(q, k, v) context_layer = rearrange(context_layer, 'b s h d -> s b (h d)').contiguous() + if self._debug_transformer: + log_tensor(f"Layer {self.layer_number} Query", query_layer, level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Key", key_layer, level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Value", value_layer, level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Attn context", context_layer, level=self._debug_transformer) + # ================= # Output. [sq, b, h] # ================= @@ -861,6 +874,7 @@ def __init__(self, config, super(ParallelTransformerLayer, self).__init__() self.layer_number = layer_number self.layer_type = layer_type + self._debug_transformer=args.debug_transformer self.apply_residual_connection_post_norm \ = config.apply_residual_connection_post_layernorm @@ -1164,6 +1178,13 @@ def forward(self, hidden_states, attention_mask, inference_params=inference_params, rotary_pos_emb=rotary_pos_emb) + if self._debug_transformer: + log_tensor( + f"Layer {self.layer_number} Attn output", + hidden_states + attention_bias, + level=self._debug_transformer + ) + # Residual connection. if self.apply_residual_connection_post_norm: residual = norm_output @@ -1197,6 +1218,9 @@ def forward(self, hidden_states, attention_mask, training=self.training) norm_input = residual + self.drop_path(out) + if self._debug_transformer: + log_tensor(f"Layer {self.layer_number} Attn residual", norm_input, level=self._debug_transformer) + # Layer norm post the self attention. norm_output = self.post_attention_norm(norm_input) @@ -1236,6 +1260,13 @@ def forward(self, hidden_states, attention_mask, # MLP. mlp_output, mlp_bias = self.mlp(norm_output) + if self._debug_transformer: + log_tensor( + f"Layer {self.layer_number} MLP output", + mlp_output + mlp_bias, + level=self._debug_transformer + ) + # Second residual connection. if self.apply_residual_connection_post_norm: residual = norm_output @@ -1689,6 +1720,15 @@ def forward(self, hidden_states, attention_mask, rotary_pos_emb=None): # hidden_states: [s, b, h] + args = get_args() + if args.debug_layer_outputs: + log_tensor(f"Global layer 0 fw: Embedding output", hidden_states.transpose(0, 1), level=args.debug_layer_outputs) + if args.debug_layer_gradients: + hidden_states.register_hook(lambda grad: log_tensor( + f"Global layer 1 bw: Embedding output", + grad.transpose(0, 1), level=args.debug_layer_gradients + )) + # Checks. if inference_params: assert self.recompute_granularity is None, \ @@ -1774,6 +1814,18 @@ def forward(self, hidden_states, attention_mask, attention_mask, **forward_kwargs) + if args.debug_layer_outputs: + log_tensor( + f"Global layer {index + 1} fw: Transformer layer {index+1} output", + hidden_states.transpose(0, 1), level=args.debug_layer_outputs + ) + if args.debug_layer_gradients: + fn=lambda idx:(lambda grad: log_tensor( + f"Global layer {idx + 2} bw: Transformer layer {idx+1} output", + grad.transpose(0, 1), level=args.debug_layer_gradients + )) + hidden_states.register_hook(fn(index)) + # First Retro decoder layer returns both hidden_states # and retriever_output. Make retriever_output available # to subsequence Retro layers. diff --git a/megatron/optimizer/optimizer.py b/megatron/optimizer/optimizer.py index 47d2001dbb..0c32b21ca9 100644 --- a/megatron/optimizer/optimizer.py +++ b/megatron/optimizer/optimizer.py @@ -9,10 +9,11 @@ import torch from megatron import get_timers -from megatron import print_rank_0 +from megatron import print_rank_0, get_args from megatron.core import mpu, tensor_parallel from megatron.model import Float16Module from megatron.model.module import param_is_not_shared +from megatron.tensor_logging import log_tensor, log_generator from .clip_grads import clip_grad_norm_fp32, count_zeros_fp32 @@ -68,6 +69,14 @@ def __init__(self, optimizer, clip_grad, self.check_for_nan_in_grad = check_for_nan_in_grad self.params_have_main_grad = params_have_main_grad + args=get_args() + if args.debug_param_init: + log_generator("PP init generator after reset") + with tensor_parallel.get_cuda_rng_tracker().fork(): + log_generator("TP init generator after reset") + for param in sorted(self.get_parameters(), key=lambda p: p.param_idx): + log_tensor(f"Global param: {param.param_name}", param, level=args.debug_param_init) + # 'models' are retained for access to the contiguous grad buffers. # (see distributed optimizer) self.models = models @@ -313,6 +322,14 @@ def step(self, args, timers): if found_inf_flag: return False, None, None + if args.debug_all_param_gradients: + params=[] + for param in self.get_parameters(): + if param.grad is not None: + params.append(param) + for param in sorted(params, key=lambda p: p.param_idx): + log_tensor(f"Global gradient: {param.param_name}", param.grad, level=args.debug_all_param_gradients) + # Clip the main gradients. timers('optimizer-clip-main-grad', log_level=1).start( barrier=args.barrier_with_L1_time) @@ -341,6 +358,10 @@ def step(self, args, timers): self._copy_main_params_to_model_params() timers('optimizer-copy-main-to-model-params').stop() + if args.debug_param_update: + for param in sorted(self.get_parameters(), key=lambda p: p.param_idx): + log_tensor(f"Global param: {param.param_name}", param, level=args.debug_param_init) + # Successful update. return True, grad_norm, num_zeros_in_grad @@ -415,6 +436,9 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, param) if hasattr(param, 'shared'): main_param.shared = param.shared + if hasattr(param, 'param_name'): + main_param.param_name=param.param_name + main_param.param_idx=param.param_idx # Replace the optimizer params with the new fp32 copy. param_group['params'][i] = main_param @@ -606,6 +630,14 @@ def step(self, args, timers): timers('optimizer-copy-to-main-grad').stop() + if args.debug_all_param_gradients: + params=[] + for param in self.get_parameters(): + if param.grad is not None: + params.append(param) + for param in sorted(params, key=lambda p: p.param_idx): + log_tensor(f"Global gradient: {param.param_name}", param.grad, level=args.debug_all_param_gradients) + # Clip gradients. timers('optimizer-clip-main-grad', log_level=1).start( barrier=args.barrier_with_L1_time) diff --git a/megatron/tensor_logging.py b/megatron/tensor_logging.py new file mode 100644 index 0000000000..b5d2d399a8 --- /dev/null +++ b/megatron/tensor_logging.py @@ -0,0 +1,135 @@ +import contextlib +import logging +import math +import sys +import time +import traceback +import typing + +import torch + + +logger = logging.getLogger(__name__) + + +# A global buffer for holding logged tensor stats. +_tensor_log_stats: list | None = None + + +@contextlib.contextmanager +def run_and_log_exception(): + try: + yield + except Exception: + logger.critical(traceback.format_exc()) + # TODO: This is needed because ngc crops the logs. + time.sleep(10) + sys.exit(1) + + +def reset_tensor_stats_logging(enabled=True): + global _tensor_log_stats + _tensor_log_stats = [] if enabled else None + + +def get_logged_tensor_stats(): + return _tensor_log_stats + + +def format_number(x, prec=4, exp_threshold=3): + digits = 0 if x == 0 else math.log10(abs(x)) + if math.isfinite(digits) and -exp_threshold < math.floor(digits) < prec + exp_threshold: + return f"{x:.{prec}f}" + else: + return f"{x:.{prec-1}e}" + + +def log_tensor( + name: str, + tensor: torch.Tensor, + *, + scale: float = 1.0, + level: int = 2, + storage: bool = False, + log_fn: typing.Callable[[str], typing.Any] | None = logger.info, +): + if level < 1: + return + save_stats = _tensor_log_stats is not None + shape = tuple(tensor.shape) + _, dtype = str(tensor.dtype).split("torch.") + txt = [ + (None, name, 50), + ("shape", shape, 18), + ("dtype", dtype, 9), + ("device", tensor.device, 7), + ] + stats = dict( + name=name, + shape=list(shape), + dtype=dtype, + device=str(tensor.device), + ) + if level >= 2 and tensor.device.type != "meta": + v_float = tensor.float() + + stats.update( + mu=v_float.mean().item(), + std=v_float.std().item(), + stride=tensor.stride(), + min=v_float.min().item(), + max=v_float.max().item(), + ) + txt.extend( + [ + ("mu", format_number(stats["mu"] * scale), 10), + ("std", format_number(stats["std"] * scale), 10), + ("stride", stats["stride"], 20), + ] + ) + if storage: + storage = tensor.untyped_storage() + storage_float = torch.tensor(storage, dtype=tensor.dtype, device=tensor.device).float() + stats.update( + storage=str(storage.data_ptr())[-8:], + storage_size=storage.size(), + storage_mu=storage_float.mean().item() * scale, + storage_std=storage_float.std().item() * scale, + ) + txt.extend( + [ + (f"storage", stats["storage"], 8), + (f"s size", f"{stats['storage_size']:,d}", 12), + (f"s mu", format_number(stats["storage_mu"]), 10), + (f"s std", format_number(stats["storage_std"]), 10), + ] + ) + if level >= 3: + target_samples = 2 ** (level - 3) + step = max(tensor.numel() // target_samples, 1) + while step > 1 and any(step % s == 0 and s > 1 for s in shape): + step -= 1 + samples = tensor.flatten()[: target_samples * step : step].cpu() + stats.update(samples=samples, step=step) + samples = [format_number(x) for x in samples.tolist()] + samples = ",".join(f"{sample:10s}" for sample in samples) + txt.append((f"{f'samples (step={step})':21s}", f" ({samples})", target_samples * 11 + 3)) + out, len_ = "", 0 + if save_stats: + _tensor_log_stats.append(stats) + for prefix, val, col_len in txt: + prefix = "" if prefix is None else f" {prefix}=" + len_ += col_len + len(prefix) + 1 + out = f"{f'{out}{prefix}{str(val)}':{len_}s}" + return log_fn(out) + + +def log_generator( + name, + generator: torch.Tensor | torch.Generator | None = None, + log_fn: typing.Callable[[str], typing.Any] | None = logger.info, +): + if generator is None: + generator = torch.cuda.default_generators[torch.cuda.current_device()] + tensor = generator.get_state() if isinstance(generator, torch.Generator) else generator + return log_fn(f"{name} {tensor.flatten()[-16:].tolist()}") diff --git a/megatron/text_generation/tokenization.py b/megatron/text_generation/tokenization.py index 441add74f9..2e1627c726 100644 --- a/megatron/text_generation/tokenization.py +++ b/megatron/text_generation/tokenization.py @@ -36,6 +36,8 @@ def detokenize_generations(tokens_gpu_tensor, word = tokenizer.decoder[token] elif args.tokenizer_type == 'NullTokenizer': word = str(token) + elif args.tokenizer_type in ['TokenizerFromFile', 'TokenizerFromFileWithFIM']: + word = tokenizer.detokenize([token]) else: word = tokenizer.tokenizer.decoder[token] word = bytearray( diff --git a/megatron/tokenizer/gpt2_tokenization.py b/megatron/tokenizer/gpt2_tokenization.py index 3f37e44908..ff89504351 100644 --- a/megatron/tokenizer/gpt2_tokenization.py +++ b/megatron/tokenizer/gpt2_tokenization.py @@ -281,7 +281,7 @@ def encode(self, text): return self.convert_tokens_to_ids(self.tokenize(text)) def decode(self, tokens): - text = ''.join([self.decoder[token] for token in tokens]) + text = ''.join(self.convert_ids_to_tokens(tokens)) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text diff --git a/megatron/tokenizer/tokenizer.py b/megatron/tokenizer/tokenizer.py index 98643343c5..6817e39f6d 100644 --- a/megatron/tokenizer/tokenizer.py +++ b/megatron/tokenizer/tokenizer.py @@ -5,9 +5,16 @@ from abc import ABC from abc import abstractmethod +from transformers import PreTrainedTokenizerFast from .bert_tokenization import FullTokenizer as FullBertTokenizer from .gpt2_tokenization import GPT2Tokenizer +FIM_PREFIX = "" +FIM_MIDDLE = "" +FIM_SUFFIX = "" +FIM_PAD = "" +EOD = "<|endoftext|>" + def build_tokenizer(args): """Initialize tokenizer.""" if args.rank == 0: @@ -29,6 +36,16 @@ def build_tokenizer(args): assert args.vocab_file is not None assert args.merge_file is not None tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file) + elif args.tokenizer_type == 'GPT2BPETokenizerWithFIM': + assert args.vocab_file is not None + assert args.merge_file is not None + tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file, special_tokens=[FIM_PREFIX, FIM_MIDDLE, FIM_SUFFIX, FIM_PAD]) + elif args.tokenizer_type == "TokenizerFromFile": + assert args.tokenizer_file is not None + tokenizer = _HFTokenizer(args.tokenizer_file, special_tokens=[EOD]) + elif args.tokenizer_type == "TokenizerFromFileWithFIM": + assert args.tokenizer_file is not None + tokenizer = _HFTokenizer(args.tokenizer_file, special_tokens=[EOD, FIM_PREFIX, FIM_MIDDLE, FIM_SUFFIX, FIM_PAD]) elif args.tokenizer_type == 'SentencePieceTokenizer': assert args.tokenizer_model is not None tokenizer = _SentencePieceTokenizer(args.tokenizer_model, vocab_extra_ids=args.vocab_extra_ids) @@ -47,6 +64,8 @@ def build_tokenizer(args): # Add vocab size (if not already set from a checkpoint). if getattr(args, "padded_vocab_size", None) is None: + # TODO: For most tokenizers, vocab_size does not take special_tokens into account. + # Might cause an issue if vocab_size + len(special_tokens) exceeds padded_vocab_size? args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args) @@ -261,13 +280,15 @@ def additional_special_tokens(self, value): class _GPT2BPETokenizer(AbstractTokenizer): """Original GPT2 BPE tokenizer.""" - def __init__(self, vocab_file, merge_file): + def __init__(self, vocab_file, merge_file, special_tokens=None): name = 'GPT2 BPE' super().__init__(name) + special_tokens = special_tokens if special_tokens is not None else [] self.tokenizer = GPT2Tokenizer(vocab_file, merge_file, errors='replace', - special_tokens=[], max_len=None) + special_tokens=special_tokens, max_len=None) self.eod_id = self.tokenizer.encoder['<|endoftext|>'] + self.special_tokens = self.tokenizer.special_tokens @property def vocab_size(self): @@ -292,6 +313,46 @@ def eod(self): return self.eod_id +class _HFTokenizer(AbstractTokenizer): + """HF Tokenizer.""" + + def __init__(self, tokenizer_file, special_tokens=None): + name = 'HF Tokenizer' + super().__init__(name) + + special_tokens = special_tokens if special_tokens is not None else [] + self.tokenizer = PreTrainedTokenizerFast(tokenizer_file=tokenizer_file, errors='replace', max_len=None) + self.tokenizer.add_special_tokens({'additional_special_tokens': special_tokens}) + self.eod_id = self.tokenizer.vocab[EOD] + # Token->id mapping for additional special-tokens + self.special_tokens = { + tok: self.tokenizer.vocab[tok] for tok in special_tokens + } + self._inv_vocab = {v: k for k, v in self.tokenizer.vocab.items()} + + @property + def vocab_size(self): + return len(self.tokenizer) + + @property + def vocab(self): + return self.tokenizer.vocab + + @property + def inv_vocab(self): + return self._inv_vocab + + def tokenize(self, text): + return self.tokenizer.encode(text) + + def detokenize(self, token_ids): + return self.tokenizer.decode(token_ids) + + @property + def eod(self): + return self.eod_id + + class _SentencePieceTokenizer(AbstractTokenizer): """SentencePieceTokenizer-Megatron wrapper""" diff --git a/megatron/training.py b/megatron/training.py index d18d3c3b91..2a05f4f240 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -5,6 +5,7 @@ import gc from datetime import datetime import math +import os import logging import sys from .log_handler import CustomHandler @@ -42,13 +43,14 @@ from megatron.initialize import set_jit_fusion_options from megatron.optimizer_param_scheduler import OptimizerParamScheduler from megatron.utils import check_adlr_autoresume_termination +from megatron.utils import get_tflops from megatron.utils import unwrap_model from megatron.data.data_samplers import build_pretraining_data_loader from megatron.utils import calc_params_l2_norm from megatron.core.pipeline_parallel import get_forward_backward_func from megatron.utils import report_memory from megatron.model.vision.knn_monitor import compute_feature_bank - +from megatron.tensor_logging import get_logged_tensor_stats, reset_tensor_stats_logging def print_datetime(string): """Note that this call will sync across all ranks.""" @@ -135,6 +137,9 @@ def pretrain(train_valid_test_dataset_provider, args = get_args() timers = get_timers() + if args.structured_logs_dir is not None: + reset_tensor_stats_logging() + # Model, optimizer, and learning rate. timers('model-and-optimizer-setup', log_level=0).start(barrier=True) model, optimizer, opt_param_scheduler = setup_model_and_optimizer( @@ -170,6 +175,8 @@ def pretrain(train_valid_test_dataset_provider, timers.log(['model-and-optimizer-setup', 'train/valid/test-data-iterators-setup'], barrier=True) + save_tensor_logs("init") + if not args.skip_train: print_rank_0('training ...') @@ -208,6 +215,14 @@ def pretrain(train_valid_test_dataset_provider, verbose=True, write_to_tensorboard=not args.skip_train) +def save_tensor_logs(step:str|int): + args=get_args() + if args.structured_logs_dir is not None and (tensor_log_stats:=get_logged_tensor_stats()): + tensor_logs_dir = os.path.join(args.structured_logs_dir, f"runs/0/artifacts/{torch.distributed.get_rank()}") + os.makedirs(tensor_logs_dir, exist_ok=True) + torch.save(tensor_log_stats, os.path.join(tensor_logs_dir, f"tensor_logs_{step}.pt")) + reset_tensor_stats_logging() + def update_train_iters(args): # For iteration-based training, we don't need to do anything @@ -652,6 +667,7 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, elapsed_time_per_iteration = elapsed_time / total_iterations throughput = num_floating_point_operations(args, batch_size) / ( elapsed_time_per_iteration * 10**12 * args.world_size) + tokens_per_sec_per_gpu = (args.seq_length * batch_size) / args.world_size / elapsed_time_per_iteration if args.log_timers_to_tensorboard: if writer: writer.add_scalar('iteration-time', @@ -665,13 +681,15 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, args.consumed_train_samples) log_string += ' elapsed time per iteration (ms): {:.1f} |'.format( elapsed_time_per_iteration * 1000.0) - if args.log_throughput: - log_string += f' throughput per GPU (TFLOP/s/GPU): {throughput:.1f} |' - if args.log_timers_to_tensorboard: - if writer: - writer.add_scalar('throughput', throughput, iteration) - if wandb_writer: - wandb_writer.log({'throughput': throughput}, iteration) + log_string += f' throughput per GPU (TFLOP/s/GPU): {throughput:.1f} |' + if args.log_timers_to_tensorboard: + if writer: + writer.add_scalar('throughput', throughput, iteration) + if wandb_writer: + wandb_writer.log({'throughput': throughput}, iteration) + log_string += ' tokens-per-second-per-gpu: {:.2f} |'.format(tokens_per_sec_per_gpu) + if wandb_writer: + wandb_writer.log({'tokens_per_sec_per_gpu': tokens_per_sec_per_gpu}, iteration) log_string += ' learning rate: {:.3E} |'.format(learning_rate) log_string += ' global batch size: {:5d} |'.format(batch_size) for key in total_loss_dict: @@ -805,6 +823,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, report_memory_flag, skipped_iter, grad_norm, params_norm, num_zeros_in_grad) + save_tensor_logs(iteration) + # Autoresume if args.adlr_autoresume and \ (iteration % args.adlr_autoresume_interval == 0): diff --git a/pretrain_gpt.py b/pretrain_gpt.py index 47b3e91881..42b975e95d 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -29,6 +29,7 @@ get_gpt_layer_with_transformer_engine_spec, gpt_layer_with_transformer_engine_spec_moe ) +from megatron.tensor_logging import log_tensor, run_and_log_exception def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megatron.model.GPTModel]: """Builds the model. @@ -156,6 +157,9 @@ def loss_func(loss_mask: Tensor, output_tensor: Tensor): f'Device: {torch.cuda.current_device()}, node: {os.uname()[1]}' ) + args = get_args() + log_tensor(f"Global layer {args.num_layers+1} fw: Loss", loss, level=args.debug_layer_outputs) + # Reduce loss for logging. averaged_loss = average_losses_across_data_parallel_group([loss]) @@ -226,8 +230,9 @@ def train_valid_test_datasets_provider(train_val_test_num_samples): # Temporary for transition to core datasets train_valid_test_datasets_provider.is_distributed = True - pretrain(train_valid_test_datasets_provider, - model_provider, - ModelType.encoder_or_decoder, - forward_step, - args_defaults={'tokenizer_type': 'GPT2BPETokenizer'}) + with run_and_log_exception(): + pretrain(train_valid_test_datasets_provider, + model_provider, + ModelType.encoder_or_decoder, + forward_step, + args_defaults={'tokenizer_type': 'GPT2BPETokenizer'}) diff --git a/tools/preprocess_data.py b/tools/preprocess_data.py index 2ff01ff70e..f1f7fbb98e 100644 --- a/tools/preprocess_data.py +++ b/tools/preprocess_data.py @@ -20,6 +20,7 @@ except ImportError: nltk_available = False +from datasets import load_dataset from megatron.tokenizer import build_tokenizer from megatron.core.datasets import indexed_dataset @@ -81,8 +82,7 @@ def split(self, json_line): output[key] = [tokens for partial in tokens_list for tokens in partial] return json.dumps(output), len(json_line) - def encode(self, json_line): - data = json.loads(json_line) + def _encode_data(self, data): ids = {} lens = {} for key in self.args.json_keys: @@ -103,7 +103,16 @@ def encode(self, json_line): sentence_lens[-1] += 1 ids[key] = doc_ids lens[key] = sentence_lens - return ids, lens, len(json_line) + return ids + + def encode(self, json_line): + data = json.loads(json_line) + ids = self._encode_data(data) + return ids, len(json_line) + + def encode_hf(self, sample): + ids = self._encode_data(sample) + return ids, 1 class Partition(object): @@ -143,14 +152,28 @@ def split_sentences(self, file_name): def process_json_file(self, file_name): input_file_name, output_prefix = file_name - print("Opening", input_file_name) - fin = open(input_file_name, 'r', encoding='utf-8') startup_start = time.time() encoder = Encoder(self.args) tokenizer = build_tokenizer(self.args) pool = multiprocessing.Pool(self.workers, initializer=encoder.initializer) - encoded_docs = pool.imap(encoder.encode, fin, 32) + + print("Opening", self.args.input) + + if self.args.input.endswith(".jsonl"): + print("Input is a jsonl file") + assert self.args.subset is None, f"subset argument set to: {self.args.subset}, but loading a jsonl file." + fin = open(self.args.input, 'r', encoding='utf-8') + encoded_docs = pool.imap(encoder.encode, fin, self.args.chunk_size) + #encoded_docs = map(encoder.encode, fin) + else: + # NOTE: this is not recommended for datasets larger than 40-50GB, as iterating through a dataset can be slow. + # Somehow, it seems faster to first dump the dataset to a jsonl file: ds.to_json() and then process the jsonl file. + # NOTE: this will be even slower if the dataset has large objects in other columns. + # In this case, it is recommended to dump as json only the required key: ds = ds.remove_columns(...) then to_json() + print("Input is not a jsonl file, will try to load from HF datasets") + ds = load_dataset(self.args.input, use_auth_token=True, streaming=True, split="train", data_dir=self.args.subset) + encoded_docs = pool.imap(encoder.encode_hf, ds, self.args.chunk_size) level = "document" if self.args.split_sentences: @@ -189,6 +212,8 @@ def get_args(): group = parser.add_argument_group(title='input data') group.add_argument('--input', type=str, required=True, help='Path to input JSON') + group.add_argument('--subset', type=str, default=None, + help='Subset argument when loading input data from a HuggingFace dataset') group.add_argument('--json-keys', nargs='+', default=['text'], help='space separate listed of keys to extract from json') group.add_argument('--split-sentences', action='store_true', @@ -201,7 +226,7 @@ def get_args(): choices=['BertWordPieceLowerCase','BertWordPieceCase', 'GPT2BPETokenizer', 'SentencePieceTokenizer', 'GPTSentencePieceTokenizer', 'Llama2Tokenizer', - 'NullTokenizer'], + 'NullTokenizer', 'TokenizerFromFile'], help='What type of tokenizer to use.') group.add_argument('--tokenizer-model', type=str, default=None, help='YTTM tokenizer model.') @@ -211,6 +236,8 @@ def get_args(): help='size of vocab for use with NullTokenizer') group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file (if necessary).') + group.add_argument('--tokenizer-file', type=str, default=None, + help='Path to the tokenizer file') group.add_argument('--append-eod', action='store_true', help='Append an token to the end of a document.') group.add_argument('--lang', type=str, default='english', From d2dce059308a9ab038647e72a54413db0269d9d0 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Wed, 20 Dec 2023 17:31:55 -0800 Subject: [PATCH 069/296] Moved offloading library to TE Signed-off-by: Selvaraj Anandaraj --- megatron/core/cpu_offload.py | 389 ----------------------------------- 1 file changed, 389 deletions(-) delete mode 100644 megatron/core/cpu_offload.py diff --git a/megatron/core/cpu_offload.py b/megatron/core/cpu_offload.py deleted file mode 100644 index 96999ddadf..0000000000 --- a/megatron/core/cpu_offload.py +++ /dev/null @@ -1,389 +0,0 @@ -import torch -from typing import Any -from contextlib import nullcontext - -class CpuOffloadSavedTensorHook: - """Contex-manager that executes a pair of pack/unpack hooks for saved tensors. - - In this context, the ``on_save_for_backward`` method will be called every time - a tensor is saved for backward (this includes intermediary results saved using - :func:`~torch.autograd.function._ContextMethodMixin.save_for_backward` but - also those recorded by a PyTorch-defined operation). - - The ``on_get_saved_tensors`` method will be called when the backward function - of this op attempts to retrieve the saved tensor from context (this includes - :func: `torch.Tensor.backward()` or :func: `torch.autograd.grad()`. It takes the - as input the return value of the ``on_save_for_backward``, and is meant to return - an identical copy of the tensor being saved by ``on_save_for_backward`` in terms of - size, device and element values. - - Example: - - >>> import torch - >>> from typing import Any - >>> - >>> class DummyHook(CpuOffloadSavedTensorHook): - ... - ... def on_save_for_backward(self, tensor: torch.Tensor) -> Any: - ... logging.info("On save", tensor) - ... return (tensor,) - ... - ... def on_get_saved_tensor(self, saved_state: Any) -> torch.Tensor: - ... logging.info("On get", saved_state) - ... tensor, = saved_state - ... return tensor - ... - >>> a = torch.ones(5, requires_grad=True) - >>> b = torch.ones(5, requires_grad=True) * 2 - >>> with DummyHook(): - ... y = a * b - ... - On save tensor([1., 1., 1., 1., 1.], requires_grad=True) - On save tensor([2., 2., 2., 2., 2.], grad_fn=) - >>> y.sum().backward() - On get (tensor([1., 1., 1., 1., 1.], requires_grad=True),) - On get (tensor([2., 2., 2., 2., 2.], grad_fn=),) - - """ - - def __init__(self) -> None: - pass - - def __enter__(self): - torch._C._autograd._push_saved_tensors_default_hooks( - self.on_save_for_backward, - self.on_get_saved_tensor - ) - - def __exit__(self, *args: Any): - torch._C._autograd._pop_saved_tensors_default_hooks() - - - def on_save_for_backward(self, tensor: torch.Tensor) -> Any: - raise NotImplementedError("`on_save_for_backward: Callable[[torch.Tensor], Any]`" - "is not implemented in CpuOffloadHook class. Inherit " - "this class and implement your custom hooks") - - def on_get_saved_tensor(self, saved_state: Any) -> torch.Tensor: - raise NotImplementedError("`on_get_saved_tensors: Callable[[Any], torch.Tensor]`" - "is not implemented in CpuOffloadHook class. Inherit " - "this class and implement your custom hooks") - -class CpuOffloadHookWithOffloadHandler(CpuOffloadSavedTensorHook): - """Contex-manager that offloads/recovers tensors through an offload hander. - - The hook just offloads/recovers the tensor object to the handler through `tensor_push` and `tensor_pop` interface. - How the offload-handler manages the offloading, recovering or prefetching timing is transparent to this hook. - """ - def __init__(self, offload_handler, handler_extra_kwargs={}, debug=False) -> None: - self.debug = debug - self.offload_handler = offload_handler - self.handler_extra_kwargs = handler_extra_kwargs - super().__init__() - - def on_save_for_backward(self, tensor: torch.Tensor) -> Any: - retrieve_identifier = self.offload_handler.tensor_push( - tensor, - **self.handler_extra_kwargs - ) - return retrieve_identifier - - def on_get_saved_tensor(self, retrieve_identifier: Any) -> torch.Tensor: - tensor = self.offload_handler.tensor_pop( - retrieve_identifier, - **self.handler_extra_kwargs - ) - return tensor - -class OffloadHandler: - """A base class for CPU offload-handler defining two methods.""" - def __init__(self) -> None: - pass - - def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any: - raise NotImplementedError("`tensor_push is not implented in OffloadHandler class. " - "Inherit this class and implement your custom tensor_push.") - - def tensor_pop(self, state: Any, **kwargs): - raise NotImplementedError("`tensor_pop is not implented in OffloadHandler class. " - "Inherit this class and implement your custom tensor_pop.") - -class GroupCommitFunction(torch.autograd.Function): - """this is a dummy op with output identical to input. - However, it is necessary for marking a timepoint for offload handler to accomplish all synchronizations. - Implementing it as a function is necessary because we need to actions in both forward and backward. - """ - @staticmethod - def forward(ctx, tensor, cpu_offload_handler): - cpu_offload_handler.on_group_commit_forward() - ctx.cpu_offload_handler = cpu_offload_handler - # return the identical tensor - return tensor - - @staticmethod - def backward(ctx, grad_output): - cpu_offload_handler = ctx.cpu_offload_handler - cpu_offload_handler.on_group_commit_backward() - return grad_output, None - -group_prefetch_offload_commit = GroupCommitFunction.apply - -class SynchronizedGroupOffloadHandler(OffloadHandler): - """Offload Handler that offloads/reloads in a synchronized way. - The device-to-host and host-to-device copying happen in the same stream - as the computation kernels, thus the copying will block computation. - """ - def __init__(self, - num_offload_group, - tensor_need_offloading_checker=(lambda _: True), - debug=False - ) -> None: - super().__init__() - - self.num_offload_group = num_offload_group - self.tensor_need_offloading_checker = tensor_need_offloading_checker - self.debug = debug - - self.groupid_reset() - - def groupid_reset(self): - # Data structures to label saved tensors and book-keep their cpu copies. - # Currently, on push, create a new cpu tensor and copies; on pop, copies the tensor back to gpu and deletes the cpu tensor - self.current_group, self.tensor_count_current_group = (0, 0) # will increment whenever `group_commit()` is invoked - self.tensor_tag_to_state = dict() - - def on_group_commit_forward(self): - # finishing up with updating current group and tensor count - self.current_group += 1 # increment - self.tensor_count_current_group = 0 # reset - - def on_group_commit_backward(self): - self.current_group -= 1 - assert self.current_group >= 0 - - @staticmethod - def offload(src_tensor, pin_memory=True): - cpu_backup = torch.empty(src_tensor.size(), - dtype=src_tensor.dtype, - layout=src_tensor.layout, - device="cpu", - pin_memory=pin_memory) - cpu_backup.copy_(src_tensor, non_blocking=pin_memory) - state = (src_tensor.device, cpu_backup) - return state - - @staticmethod - def reload(state, non_blocking=None): - dev, cpu_backup = state - if non_blocking is None: - non_blocking = cpu_backup.is_pinned() - return cpu_backup.to(dev, non_blocking=non_blocking) - - def tensor_push(self, tensor: torch.Tensor, **kwargs): - # obtain a unique tensor tag - tensor_tag = (self.current_group, self.tensor_count_current_group) - self.tensor_count_current_group += 1 - assert not (tensor_tag in self.tensor_tag_to_state) - if self.current_group < self.num_offload_group and self.tensor_need_offloading_checker(tensor): - state = SynchronizedGroupOffloadHandler.offload(tensor) - self.tensor_tag_to_state[tensor_tag] = state - else: - self.tensor_tag_to_state[tensor_tag] = tensor # will be offloaded together after group commit - return tensor_tag - - def tensor_pop(self, tensor_tag, **kwargs): - assert tensor_tag in self.tensor_tag_to_state - state = self.tensor_tag_to_state.pop(tensor_tag) - if isinstance(state, tuple): - tensor = SynchronizedGroupOffloadHandler.reload(state) - else: - tensor = state - return tensor - -class AsyncDoubleBufferGroupOffloadHandler(SynchronizedGroupOffloadHandler): - """Compared to synchronize, using more memory because of the buffer. But achieves better performance - due to the overlapping. D2h and h2d copying are completely hidden behind computation if computation time - of a layer is longer than host-device communication time. Bulk offloading with delay and bulk reloading - with prefetch are implemented. """ - def __init__(self, - num_offload_group, # must be <= actual number of groups (number of commits) - num_prefetch_group=1, - tensor_need_offloading_checker=(lambda t: True), - debug=False - ) -> None: - super().__init__(num_offload_group=num_offload_group, - tensor_need_offloading_checker=tensor_need_offloading_checker, - debug=debug) - self.num_prefetch_group = num_prefetch_group - - # prepare for tensor buffer - self.tensor_id_to_tensor_buf_double_bufs = [] - for _ in range(2): - self.tensor_id_to_tensor_buf_double_bufs.append(dict()) - - # allocate streams and events for synchronization - self.d2h_stream = torch.cuda.Stream() - self.h2d_stream = torch.cuda.Stream() - self.h2d_finish_events = [] - self.compute_stream_bwd_start_events = [] - for _ in range(self.num_offload_group): - self.h2d_finish_events.append(torch.cuda.Event()) - self.compute_stream_bwd_start_events.append(torch.cuda.Event()) - self.d2h_final_event = torch.cuda.Event() - - def get_tensor_buf_for_offloaded_tensor(self, tensor, tensor_tag): - group_id, tensor_id = tensor_tag - # obtain ping-pong buffer - id_buf_map = self.tensor_id_to_tensor_buf_double_bufs[(group_id % 2)] - - if not tensor_id in id_buf_map: - allocate_new_buf = True - else: - tensor_buf = id_buf_map[tensor_id] - if not (tensor_buf.size() == tensor.size() and tensor_buf.dtype == tensor.dtype): - allocate_new_buf = True - else: - allocate_new_buf = False # in this case, reuse the old buffer - - if allocate_new_buf: - # supposed to only execute once - id_buf_map[tensor_id] = torch.empty(tensor.size(), - dtype=tensor.dtype, - layout=tensor.layout, - device=tensor.device, - ) - return id_buf_map[tensor_id] - - def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any: - # obtain a unique tensor tag - tensor_tag = (self.current_group, self.tensor_count_current_group) - self.tensor_count_current_group += 1 - assert not (tensor_tag in self.tensor_tag_to_state) - - if self.current_group < self.num_offload_group and self.tensor_need_offloading_checker(tensor): - # first copy the tensor to tensorbuf, so that the original tensor will not be deleted - tensor_buf = self.get_tensor_buf_for_offloaded_tensor(tensor, tensor_tag) - tensor_buf.copy_(tensor) - # Here we just save it, and at commit, bulk_offload_group will handle it - self.tensor_tag_to_state[tensor_tag] = tensor_buf - else: - self.tensor_tag_to_state[tensor_tag] = tensor - return tensor_tag - - def tensor_pop(self, tensor_tag, **kwargs): - assert tensor_tag in self.tensor_tag_to_state - tensor = self.tensor_tag_to_state.pop(tensor_tag) - # the tensor should have been copied back in on_group_commit_backward() which invokes bulk_reload_group - assert not isinstance(tensor, tuple) - return tensor - - def bulk_offload_group(self, group_to_offload): - with torch.cuda.stream(self.d2h_stream): - for tensor_tag, state in self.tensor_tag_to_state.items(): - group_id, _ = tensor_tag - if group_id == group_to_offload: - assert not isinstance(state, tuple) - tensor_on_device = state - - # if offload, return the reference to cpu copy - if self.tensor_need_offloading_checker(tensor_on_device): - state = SynchronizedGroupOffloadHandler.offload(tensor_on_device) - self.tensor_tag_to_state[tensor_tag] = state - - def synchronize_on_group_commit_forward(self, current_group): - # the host should wait for the copying of previous group - # to avoid overwriting buffer - previous_group = current_group - 1 - if (previous_group < self.num_offload_group): - torch.cuda.synchronize() - # TODO (guyueh): this part is originally designed to reduce the peak memory usage. - # however, uncommenting this part will cause illegal access, have not figured out why. - - if previous_group + 2 >= self.num_offload_group: - # this buffer is no longer required - self.tensor_id_to_tensor_buf_double_bufs[(previous_group % 2)] = dict() - - # the copying of this group should wait for the computation stream event - if current_group < self.num_offload_group: - # perform bulk offloading - self.bulk_offload_group(current_group) - if current_group == self.num_offload_group - 1: - self.d2h_stream.record_event(self.d2h_final_event) - - def on_group_commit_forward(self): - """This function will cause host device synchronization""" - # handle synchronization events - self.synchronize_on_group_commit_forward(self.current_group) - - # during forward, the next_group_to_fetch always points to the min of - # the last commited group, and the last offloaded group - self.next_group_to_fetch = min(self.current_group, self.num_offload_group -1) - - super().on_group_commit_forward() - - def bulk_reload_group(self, group_to_reload): - assert group_to_reload < self.num_offload_group - if group_to_reload == self.num_offload_group - 1: - self.h2d_stream.wait_event(self.d2h_final_event) - with torch.cuda.stream(self.h2d_stream): - # move back tensors - for tensor_label in self.tensor_tag_to_state.keys(): - group_id, _ = tensor_label - if group_id == group_to_reload: - state = self.tensor_tag_to_state[tensor_label] - if isinstance(state, tuple): - recovered_tensor = SynchronizedGroupOffloadHandler.reload(state) - self.tensor_tag_to_state[tensor_label] = recovered_tensor - else: - self.tensor_tag_to_state[tensor_label] = state - - def on_group_commit_backward(self): - # first decrement the current group. - # after last commit in forward, the group will +1; in backward it -1. Finally it should be decremented to 0 - self.current_group -= 1 - assert self.current_group >= 0 - - # decide the range of group to prefetch - should_prefetch_until_group = self.current_group - self.num_prefetch_group - if should_prefetch_until_group < 0: - should_prefetch_until_group = 0 - - # do prefetch - for group_num_to_prefetch in range(self.next_group_to_fetch, should_prefetch_until_group - 1, -1): - # record the event in the compute stream, for h2d to wait - torch.cuda.current_stream().record_event(self.compute_stream_bwd_start_events[group_num_to_prefetch]) - - # start of h2d should wait for the compute and the d2h - self.h2d_stream.wait_event(self.compute_stream_bwd_start_events[group_num_to_prefetch]) - - #recover tensors (copy back from host) - self.bulk_reload_group(group_num_to_prefetch) - - # record an event for the backward of this layer to wait - self.h2d_stream.record_event(self.h2d_finish_events[group_num_to_prefetch]) - - self.next_group_to_fetch = min(self.num_offload_group - 1, should_prefetch_until_group - 1) # always is set to -1 at the end of the backward - - # wait for the current group - if self.current_group < self.num_offload_group: - torch.cuda.current_stream().wait_event(self.h2d_finish_events[self.current_group]) - -def get_cpu_offload_context(cpu_offloading, cpu_offloading_num_layers): - - def tensor_need_offloading_checker(tensor): - return not hasattr(tensor,"avoid_offloading") - - cpu_offload_handler = AsyncDoubleBufferGroupOffloadHandler( - num_offload_group=cpu_offloading_num_layers, - num_prefetch_group=1, - tensor_need_offloading_checker=tensor_need_offloading_checker - ) - - def group_prefetch_offload_commit_async(tensor): - return group_prefetch_offload_commit(tensor,cpu_offload_handler) - - if cpu_offloading: - return CpuOffloadHookWithOffloadHandler(offload_handler = cpu_offload_handler), group_prefetch_offload_commit_async - else: - return nullcontext(), group_prefetch_offload_commit_async - From 416ee13c68b85dc164b96ff80a0263ba3fdcd7a6 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Wed, 20 Dec 2023 17:32:17 -0800 Subject: [PATCH 070/296] Moved offloading library to TE Signed-off-by: Selvaraj Anandaraj --- megatron/core/__init__.py | 1 - megatron/core/transformer/transformer_block.py | 15 +++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/megatron/core/__init__.py b/megatron/core/__init__.py index cef0b0fbf5..85ed72a997 100644 --- a/megatron/core/__init__.py +++ b/megatron/core/__init__.py @@ -1,6 +1,5 @@ import megatron.core.tensor_parallel import megatron.core.utils -import megatron.core.cpu_offload from megatron.core import parallel_state from megatron.core.distributed import DistributedDataParallel from megatron.core.inference_params import InferenceParams diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index b91fac5932..010caeb116 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -17,7 +17,7 @@ from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.transformer.transformer_layer import TransformerLayer from megatron.core.utils import make_sharded_tensor_for_checkpoint, make_viewless_tensor -from megatron.core.cpu_offload import get_cpu_offload_context +from megatron.core.transformer.custom_layers.transformer_engine import get_cpu_offload_context def get_num_layers_to_build(config: TransformerConfig) -> int: @@ -106,10 +106,13 @@ def __init__( self._build_layers() self.num_layers_per_pipeline_rank = len(self.layers) - self.offload_context, self.group_prefetch_offload_commit_async = get_cpu_offload_context( - self.config.cpu_offloading, - self.config.cpu_offloading_num_layers - ) + if get_cpu_offload_context is not None: + self.offload_context, self.group_prefetch_offload_commit_async = get_cpu_offload_context( + self.config.cpu_offloading, + self.config.cpu_offloading_num_layers + ) + else: + self.offload_context, self.group_prefetch_offload_commit_async = nullcontext(), None def _build_layers(self): # Transformer layers. @@ -325,7 +328,7 @@ def forward( inference_params=inference_params, ) - if torch.is_grad_enabled() and self.config.cpu_offloading: + if torch.is_grad_enabled() and self.config.cpu_offloading and self.group_prefetch_offload_commit_async is not None: hidden_states = self.group_prefetch_offload_commit_async(hidden_states) # Final layer norm. From 5cf55137d37081b84df29dbe18f366f9e68408f4 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Wed, 20 Dec 2023 17:33:38 -0800 Subject: [PATCH 071/296] Moved offloading library to TE Signed-off-by: Selvaraj Anandaraj --- megatron/core/transformer/custom_layers/transformer_engine.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index e02bee5cbd..2bc7672067 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -435,9 +435,11 @@ def forward( try: from transformer_engine.pytorch.attention import _SplitAlongDim + from transformer_engine.pytorch.cpu_offload import get_cpu_offload_context SplitAlongDim = _SplitAlongDim.apply except ImportError: SplitAlongDim = None + get_cpu_offload_context = None From 40f251c3560f043ba805036964b168d5bc5733a5 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Thu, 21 Dec 2023 16:20:58 -0500 Subject: [PATCH 072/296] fixes --- megatron/arguments.py | 15 ++++++++++++++- .../datasets/blended_megatron_dataset_builder.py | 2 +- megatron/core/datasets/gpt_dataset.py | 5 ++++- megatron/model/fused_layer_norm.py | 10 +++++++++- megatron/model/language_model.py | 2 +- megatron/training.py | 5 ++--- 6 files changed, 31 insertions(+), 8 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index f43edac7dd..8f7fbc79f2 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -11,12 +11,16 @@ import torch.nn.functional as F from megatron.global_vars import set_retro_args, get_retro_args -from tools.retro.utils import get_args_path as get_retro_args_path from megatron.core.models.retro import RetroConfig from megatron.core.transformer import TransformerConfig +def get_args_path(workdir): + '''Argument copy stored within retro workdir.''' + return os.path.join(workdir, "args.json") + + def parse_args(extra_args_provider=None, ignore_unknown_args=False): """Parse all arguments.""" parser = argparse.ArgumentParser(description='Megatron-LM Arguments', @@ -194,6 +198,9 @@ def validate_args(args, defaults={}): if args.dataloader_type is None: args.dataloader_type = 'single' + if args.valid_num_workers is None: + args.valid_num_workers = args.num_workers + # Consumed tokens. args.consumed_train_samples = 0 args.consumed_valid_samples = 0 @@ -731,6 +738,10 @@ def _add_logging_args(parser): group.add_argument('--wandb-group-name', type=str, default="default") group.add_argument('--wandb-entity-name', type=str, default=None, help="Name of wandb entity for reporting") + group.add_argument('--structured-logs', action="store_true", + help='Add timestamp and worker name to stdout and stderr.') + group.add_argument('--structured-logs-dir', type=str, default=None, + help='Directory to save the logs.') group.add_argument('--debug_layer_outputs', '--debug-layer-outputs', type=int, default=0) group.add_argument('--debug_layer_gradients', '--debug-layer-gradients', type=int, default=0) group.add_argument('--debug_all_param_gradients', '--debug-all-param-gradients', type=int, default=0) @@ -1250,6 +1261,8 @@ def _add_data_args(parser): help='Probability of producing a short sequence.') group.add_argument('--num-workers', type=int, default=2, help="Dataloader number of workers.") + group.add_argument('--valid-num-workers', type=int, default=None, + help="Dataloader number of workers for validation.") group.add_argument('--tokenizer-type', type=str, default=None, choices=['BertWordPieceLowerCase', diff --git a/megatron/core/datasets/blended_megatron_dataset_builder.py b/megatron/core/datasets/blended_megatron_dataset_builder.py index c5c509ea7c..85f7841b4d 100644 --- a/megatron/core/datasets/blended_megatron_dataset_builder.py +++ b/megatron/core/datasets/blended_megatron_dataset_builder.py @@ -209,7 +209,7 @@ def _build_megatron_dataset_splits( megatron_datasets = [] for i, _split in enumerate(Split): - if split[i] is None: + if split[i] is None or sizes[i] == 0: megatron_datasets.append(None) else: megatron_datasets.append( diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index e84965b93c..fd785c5bad 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -180,7 +180,10 @@ def _query_document_sample_shuffle_indices( segment_breaks = numpy.argwhere(sample == eod) # split sample by document if self.fim_rate == 0: - return sample.astype(numpy.int64) + return ( + numpy.array(sample, dtype=numpy.int64), + numpy.array(document_ids, dtype=numpy.int64), + ) def fim_permute_sequence(sequence, rate): return permute( diff --git a/megatron/model/fused_layer_norm.py b/megatron/model/fused_layer_norm.py index f076302e4e..2b90a78d30 100644 --- a/megatron/model/fused_layer_norm.py +++ b/megatron/model/fused_layer_norm.py @@ -5,6 +5,7 @@ with some changes. """ import numbers +import inspect import torch from torch.nn.parameter import Parameter from torch.nn import init @@ -15,6 +16,9 @@ try: from apex.contrib.layer_norm.layer_norm import FastLayerNormFN HAVE_PERSIST_LAYER_NORM = True + _fast_layer_norm_has_mem_efficient = ( + "memory_efficient" in inspect.signature(FastLayerNormFN.forward).parameters + ) except: HAVE_PERSIST_LAYER_NORM = False @@ -83,7 +87,11 @@ def forward(self, input): "fused_layer_norm_affine is not available, please install apex from https://github.com/NVIDIA/apex" return fused_layer_norm_affine(input, weight, self.bias, self.normalized_shape, eps=self.eps) else: - output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) + if _fast_layer_norm_has_mem_efficient: + output = FastLayerNormFN.apply(input, weight, self.bias, self.eps, False) + else: + output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) + # Apex's fast layer norm function outputs a 'view' tensor (i.e., has # a populated '_base' field). This will result in schedule.py's diff --git a/megatron/model/language_model.py b/megatron/model/language_model.py index c2ddb5bda5..2080ad8ae0 100644 --- a/megatron/model/language_model.py +++ b/megatron/model/language_model.py @@ -447,7 +447,7 @@ def __init__(self, elif key[3]=="dense_4h_to_h": key[3]="layer_2" else: - assert key[0]=="final_layernorm" + assert key[0]=="final_norm", key[0] key=["layers",str(args.encoder_num_layers+1)]+key elif key[0]=="embedding": key=["layers", "0", "_".join(key[1:])] diff --git a/megatron/training.py b/megatron/training.py index 2a05f4f240..702f0e876a 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -43,7 +43,6 @@ from megatron.initialize import set_jit_fusion_options from megatron.optimizer_param_scheduler import OptimizerParamScheduler from megatron.utils import check_adlr_autoresume_termination -from megatron.utils import get_tflops from megatron.utils import unwrap_model from megatron.data.data_samplers import build_pretraining_data_loader from megatron.utils import calc_params_l2_norm @@ -1131,10 +1130,10 @@ def build_train_valid_test_data_loaders( train_dataloader = build_pretraining_data_loader( train_ds, args.consumed_train_samples) if args.skip_train: - valid_dataloader = build_pretraining_data_loader(valid_ds, 0) + valid_dataloader = build_pretraining_data_loader(valid_ds, 0, num_workers=args.valid_num_workers) else: valid_dataloader = build_pretraining_data_loader( - valid_ds, args.consumed_valid_samples) + valid_ds, args.consumed_valid_samples, num_workers=args.valid_num_workers) test_dataloader = build_pretraining_data_loader(test_ds, 0) # Flags to know if we need to do training/validation/testing. From d4aaa71bb6749144d732d8f3c85c51896e5387e7 Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Mon, 11 Dec 2023 13:59:58 -0800 Subject: [PATCH 073/296] Truncate or pad in load_parameter_state() to support all DP sizes --- megatron/core/distributed/grad_buffer.py | 20 +++++++++-- megatron/optimizer/distrib_optimizer.py | 44 ++++++++++++++++++++---- 2 files changed, 56 insertions(+), 8 deletions(-) diff --git a/megatron/core/distributed/grad_buffer.py b/megatron/core/distributed/grad_buffer.py index 8bc88a8e71..e60d40dd80 100644 --- a/megatron/core/distributed/grad_buffer.py +++ b/megatron/core/distributed/grad_buffer.py @@ -33,6 +33,7 @@ class Bucket: params: List of parameters whose gradients are collated in this bucket. data: View in larger GradBuffer that this bucket is responsible for. offset: Offset of this bucket's view in the larger GradBuffer. + numel_unpadded: Number of unpadded elements in bucket. data_parallel_group: Data-parallel process group. data_parallel_world_size: World size using the data-parallel group group. overlap_grad_reduce: If true, overlap communication with backprop computation by @@ -47,6 +48,7 @@ def __init__( params: List[torch.nn.Parameter], data: torch.Tensor, offset: int, + numel_unpadded: int, data_parallel_group: torch.distributed.ProcessGroup, data_parallel_world_size: int, overlap_grad_reduce: bool, @@ -63,6 +65,7 @@ def __init__( # The distributed optimizer needs to keep track of this bucket's offset # within the full grad_buffer. self.offset = offset + self.numel_unpadded = numel_unpadded self.data_parallel_group = data_parallel_group self.data_parallel_world_size = data_parallel_world_size self.data_parallel_rank = torch.distributed.get_rank(group=data_parallel_group) @@ -213,6 +216,7 @@ def _pad_if_needed(data_index: int): bucket_data_start_index = data_start_index bucket_params = set() self.bucket_indices = [] + per_bucket_numel_unpadded = [] bucket_id = 0 for param in params[::-1]: # Iterate through parameters in reverse order to roughly follow backprop order, @@ -242,6 +246,7 @@ def _pad_if_needed(data_index: int): if (data_end_index - bucket_data_start_index) >= bucket_size and len( bucket_params ) > 1: + per_bucket_numel_unpadded.append(data_end_index - bucket_data_start_index) data_end_index = _pad_if_needed(data_end_index) self.bucket_indices.append((bucket_data_start_index, data_end_index)) bucket_data_start_index = data_end_index @@ -251,6 +256,7 @@ def _pad_if_needed(data_index: int): # Add remaining params to a new bucket. if len(bucket_params) > 0: + per_bucket_numel_unpadded.append(data_end_index - bucket_data_start_index) data_end_index = _pad_if_needed(data_end_index) self.bucket_indices.append((bucket_data_start_index, data_end_index)) @@ -275,7 +281,11 @@ def _pad_if_needed(data_index: int): if bucket_id != cur_bucket_id: bucket_data_end_index = _pad_if_needed(data_start_index) self._set_bucket( - bucket_params, bucket_data_start_index, bucket_data_end_index, cur_bucket_id + bucket_params=bucket_params, + start_index=bucket_data_start_index, + end_index=bucket_data_end_index, + numel_unpadded=per_bucket_numel_unpadded[cur_bucket_id], + bucket_id=cur_bucket_id, ) bucket_data_start_index = bucket_data_end_index bucket_params = set() @@ -288,7 +298,11 @@ def _pad_if_needed(data_index: int): if len(bucket_params) > 0: bucket_data_end_index = _pad_if_needed(data_end_index) self._set_bucket( - bucket_params, bucket_data_start_index, bucket_data_end_index, cur_bucket_id + bucket_params=bucket_params, + start_index=bucket_data_start_index, + end_index=bucket_data_end_index, + numel_unpadded=per_bucket_numel_unpadded[cur_bucket_id], + bucket_id=cur_bucket_id, ) if not overlap_grad_reduce: @@ -328,6 +342,7 @@ def _set_bucket( bucket_params: List[torch.nn.Parameter], start_index: int, end_index: int, + numel_unpadded: int, bucket_id: int, ): """ @@ -348,6 +363,7 @@ def _set_bucket( params=bucket_params, data=bucket_data, offset=start_index, + numel_unpadded=numel_unpadded, data_parallel_group=self.data_parallel_group, data_parallel_world_size=self.data_parallel_world_size, overlap_grad_reduce=self.overlap_grad_reduce, diff --git a/megatron/optimizer/distrib_optimizer.py b/megatron/optimizer/distrib_optimizer.py index bb133aa42b..62ac885a4d 100644 --- a/megatron/optimizer/distrib_optimizer.py +++ b/megatron/optimizer/distrib_optimizer.py @@ -388,10 +388,14 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, # Model grad buffer ranges. self.model_gbuf_ranges = [] self.per_bucket_numel = [] + self.per_bucket_numel_unpadded = [] for _, model_chunk in enumerate(self.models): self.per_bucket_numel.append( {dtype: [bucket.data.numel() for bucket in model_chunk.grad_buffers[dtype].buckets] for dtype in model_chunk.grad_buffers}) + self.per_bucket_numel_unpadded.append( + {dtype: [bucket.numel_unpadded for bucket in model_chunk.grad_buffers[dtype].buckets] + for dtype in model_chunk.grad_buffers}) self.model_gbuf_ranges.append(self.build_model_gbuf_range_map(model_chunk)) self.model_param_gbuf_map = \ self.build_model_param_gbuf_map(self.model_gbuf_ranges) @@ -654,7 +658,8 @@ def save_parameter_state(self, filename): data_parallel_global_ranks = list(mpu._DATA_PARALLEL_GLOBAL_RANKS_WITH_CP) # Collect param states. - state = {"per_bucket_numel": self.per_bucket_numel} + state = {"per_bucket_numel": self.per_bucket_numel, + "per_bucket_numel_unpadded": self.per_bucket_numel_unpadded} for model_idx, gbuf_range_maps in enumerate(self.model_gbuf_ranges): # Iterate grad buffers (by data type). @@ -753,11 +758,12 @@ def load_parameter_state(self, filename): # Load on DP rank 0. if data_parallel_rank == 0: loaded_state = torch.load(filename) - if "per_bucket_numel" in loaded_state: - per_bucket_numel_in_checkpoint = loaded_state["per_bucket_numel"] - assert self.per_bucket_numel == per_bucket_numel_in_checkpoint, \ - (f"Number of elements in each bucket need to be the same in current run " - f"({self.per_bucket_numel}) and checkpoint ({per_bucket_numel_in_checkpoint})") + if "per_bucket_numel_unpadded" in loaded_state: + per_bucket_numel_unpadded_in_checkpoint = loaded_state["per_bucket_numel_unpadded"] + assert self.per_bucket_numel_unpadded == per_bucket_numel_unpadded_in_checkpoint, \ + (f"Number of unpadded elements in each bucket need to be the same in current run " + f"({self.per_bucket_numel_unpadded}) and checkpoint " + f"({per_bucket_numel_unpadded_in_checkpoint})") # Scatter tensors to all DP ranks. for model_idx, gbuf_range_maps in enumerate(self.model_gbuf_ranges): @@ -767,6 +773,7 @@ def load_parameter_state(self, filename): # Compute local DP contiguous shard's size. model = self.models[model_idx] gbuf_world_numel = model.grad_buffers[dtype].buckets[bucket_idx].data.numel() + assert gbuf_world_numel == self.per_bucket_numel[model_idx][dtype][bucket_idx] assert gbuf_world_numel % data_parallel_world_size == 0 gbuf_local_numel = gbuf_world_numel // data_parallel_world_size @@ -788,7 +795,32 @@ def load_parameter_state(self, filename): (f"Trying to load state for bucket_id {bucket_idx} (out of " f"{len(gbuf_range_map_for_all_buckets)} buckets) from checkpoint; " f"checkpoint only has {len(world_tensor_for_all_buckets)} bucket(s)") + # This tensor might be bigger or smaller than expected (depending on + # relative sizes of per_bucket_numel_in_checkpoint and self.per_bucket_numel). world_tensor = world_tensor_for_all_buckets[bucket_idx] + if "per_bucket_numel" in loaded_state: + numel_in_checkpoint = \ + loaded_state["per_bucket_numel"][model_idx][dtype][bucket_idx] + numel = self.per_bucket_numel[model_idx][dtype][bucket_idx] + numel_unpadded = self.per_bucket_numel_unpadded[model_idx][dtype][bucket_idx] + print(f"numel_in_checkpoint={numel_in_checkpoint}, numel={numel}, numel_unpadded={numel_unpadded}") + assert world_tensor.numel() == numel_in_checkpoint + assert numel_unpadded <= world_tensor.numel(), \ + ("True number of elements should be fewer than number of elements in " + "checkpoint tensor") + if world_tensor.numel() >= numel: + # Truncate extra values, which are padding anyway. + world_tensor = world_tensor[:numel] + else: + # In this case, numel > world_tensor.numel() (which is numel_in_checkpoint). + # Create new tensor with right number of values, then copy and use new tensor. + world_tensor_reshaped = torch.empty((numel,), + dtype=world_tensor.dtype, + device=world_tensor.device) + world_tensor_reshaped[:numel_in_checkpoint].copy_(world_tensor) + world_tensor = world_tensor_reshaped + else: + print("***WARNING*** Using older checkpoint so skipping padding checks") gbuf_start_idxs = \ list(range(0, gbuf_world_numel, gbuf_local_numel)) send_tensors = [world_tensor[i:(i+gbuf_local_numel)] From e1dbab764c47f21fefc83f53dee6832840d96d74 Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Thu, 14 Dec 2023 10:22:46 +0530 Subject: [PATCH 074/296] Improve logging around tensor truncation and expansion when loading distributed optimizer checkpoint --- megatron/optimizer/distrib_optimizer.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/megatron/optimizer/distrib_optimizer.py b/megatron/optimizer/distrib_optimizer.py index 62ac885a4d..dce3b81677 100644 --- a/megatron/optimizer/distrib_optimizer.py +++ b/megatron/optimizer/distrib_optimizer.py @@ -803,24 +803,27 @@ def load_parameter_state(self, filename): loaded_state["per_bucket_numel"][model_idx][dtype][bucket_idx] numel = self.per_bucket_numel[model_idx][dtype][bucket_idx] numel_unpadded = self.per_bucket_numel_unpadded[model_idx][dtype][bucket_idx] - print(f"numel_in_checkpoint={numel_in_checkpoint}, numel={numel}, numel_unpadded={numel_unpadded}") assert world_tensor.numel() == numel_in_checkpoint assert numel_unpadded <= world_tensor.numel(), \ ("True number of elements should be fewer than number of elements in " "checkpoint tensor") - if world_tensor.numel() >= numel: + if world_tensor.numel() > numel: # Truncate extra values, which are padding anyway. + print_rank_0(f"Truncating extra values from checkpoint (numel_in_checkpoint={numel_in_checkpoint}, " + f"numel={numel}, numel_unpadded={numel_unpadded})") world_tensor = world_tensor[:numel] - else: + elif world_tensor.numel() < numel: # In this case, numel > world_tensor.numel() (which is numel_in_checkpoint). # Create new tensor with right number of values, then copy and use new tensor. + print_rank_0(f"Expanding tensor from checkpoint (numel_in_checkpoint={numel_in_checkpoint}, " + f"numel={numel}, numel_unpadded={numel_unpadded})") world_tensor_reshaped = torch.empty((numel,), dtype=world_tensor.dtype, device=world_tensor.device) world_tensor_reshaped[:numel_in_checkpoint].copy_(world_tensor) world_tensor = world_tensor_reshaped else: - print("***WARNING*** Using older checkpoint so skipping padding checks") + print_rank_0("***WARNING*** Using older checkpoint so skipping padding checks") gbuf_start_idxs = \ list(range(0, gbuf_world_numel, gbuf_local_numel)) send_tensors = [world_tensor[i:(i+gbuf_local_numel)] From 5e993318a7bfb9fa3ca00f229f449cf56504fb55 Mon Sep 17 00:00:00 2001 From: Geo Date: Tue, 26 Dec 2023 20:25:35 +0800 Subject: [PATCH 075/296] add assert for overlap_param_gather --- megatron/arguments.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/megatron/arguments.py b/megatron/arguments.py index fff5bbeb5b..0bb6acf9eb 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -170,6 +170,8 @@ def validate_args(args, defaults={}): if args.overlap_param_gather: assert args.use_distributed_optimizer, \ '--overlap-param-gather only supported with distributed optimizer' + assert args.overlap_grad_reduce, \ + '--overlap-grad-reduce should be turned on when using --overlap-param-gather' # Parameters dtype. args.params_dtype = torch.float From c6a3cc1c1a35cd70f7d61207a9fe7747ca2b9c08 Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Thu, 4 Jan 2024 06:56:03 -0800 Subject: [PATCH 076/296] Save checkpoint whenever batch size ramps up --- megatron/training.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/megatron/training.py b/megatron/training.py index d18d3c3b91..29ab904c90 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -773,6 +773,7 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, gc.disable() gc.collect() + num_microbatches = get_num_microbatches() while iteration < args.train_iters: if args.profile and \ iteration == args.profile_step_start and \ @@ -780,7 +781,19 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, torch.cuda.cudart().cudaProfilerStart() torch.autograd.profiler.emit_nvtx(record_shapes=True).__enter__() - update_num_microbatches(args.consumed_train_samples) + # Update number of microbatches first without consistency check to decide if a + # checkpoint should be saved. If the number of microbatches is different + # from the previous iteration, save a checkpoint. Then run consistency check + # to make sure training configuration is still valid. + update_num_microbatches(args.consumed_train_samples, consistency_check=False) + if get_num_microbatches() != num_microbatches and iteration != 0: + assert get_num_microbatches() > num_microbatches, \ + "number of microbatches should be increasing due to batch size rampup" + save_checkpoint_and_time(iteration, model, optimizer, + opt_param_scheduler) + num_microbatches = get_num_microbatches() + update_num_microbatches(args.consumed_train_samples, consistency_check=True) + args.curr_iteration = iteration loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = \ train_step(forward_step_func, From f6b0f4e41bf762676a2f01c944c733a8af06b7db Mon Sep 17 00:00:00 2001 From: Peter Date: Thu, 4 Jan 2024 11:08:14 -0800 Subject: [PATCH 077/296] added mainfest --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) create mode 100644 MANIFEST.in diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000..b3356b76e1 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +include megatron/core/requirements.txt From de29065b88c48d3dcb206d7de168be84648797c4 Mon Sep 17 00:00:00 2001 From: Xiaowei Ren Date: Thu, 4 Jan 2024 22:33:41 -0800 Subject: [PATCH 078/296] fix replica_id by considering CP Signed-off-by: Xiaowei Ren --- megatron/core/models/gpt/gpt_model.py | 2 +- megatron/core/transformer/utils.py | 2 +- megatron/core/utils.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index 2cf26bacac..0f3348ad3b 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -214,7 +214,7 @@ def sharded_state_dict(self, prefix: str = '') -> dict: last_stage_word_emb_replica_id = ( 1, # copy of first stage embedding 0, - parallel_state.get_data_parallel_rank(), + parallel_state.get_data_parallel_rank(with_context_parallel=True), ) sharded_output_layer_tensor = make_tp_sharded_tensor_for_checkpoint( diff --git a/megatron/core/transformer/utils.py b/megatron/core/transformer/utils.py index d7d002734f..9f47bac9a5 100644 --- a/megatron/core/transformer/utils.py +++ b/megatron/core/transformer/utils.py @@ -126,7 +126,7 @@ def make_sharded_object_for_checkpoint( replica_id = ( 0, parallel_state.get_tensor_model_parallel_rank(), - parallel_state.get_data_parallel_rank(), + parallel_state.get_data_parallel_rank(with_context_parallel=True), ) return ShardedObject(key, obj, *_get_extra_state_offsets(sharded_offsets), replica_id, **kwargs) diff --git a/megatron/core/utils.py b/megatron/core/utils.py index d4e042b2d4..bcf9cab55a 100644 --- a/megatron/core/utils.py +++ b/megatron/core/utils.py @@ -194,7 +194,7 @@ def make_tp_sharded_tensor_for_checkpoint( prepend_axis_num = len(prepend_offsets) if replica_id is None: - replica_id = (0, 0, parallel_state.get_data_parallel_rank()) + replica_id = (0, 0, parallel_state.get_data_parallel_rank(with_context_parallel=True)) return ShardedTensor.from_rank_offsets( key, @@ -223,7 +223,7 @@ def make_sharded_tensor_for_checkpoint(tensor, key, prepend_offsets=(), replica_ replica_id = ( 0, parallel_state.get_tensor_model_parallel_rank(), - parallel_state.get_data_parallel_rank(), + parallel_state.get_data_parallel_rank(with_context_parallel=True), ) return ShardedTensor.from_rank_offsets( From 390bfca59818e55caa075336a6287f0a22871c43 Mon Sep 17 00:00:00 2001 From: Kirthi Shankar Sivamani Date: Fri, 5 Jan 2024 05:14:12 -0800 Subject: [PATCH 079/296] Fix checkpointing with TransformerEngine Signed-off-by: Kirthi Shankar Sivamani --- megatron/checkpointing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/checkpointing.py b/megatron/checkpointing.py index a3303229a0..3967103a0d 100644 --- a/megatron/checkpointing.py +++ b/megatron/checkpointing.py @@ -580,7 +580,7 @@ def load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', stri print_rank_0('could not find arguments in the checkpoint ...') # Model. - strict = False if args.retro_add_retriever else strict + strict = False if args.retro_add_retriever or args.transformer_impl == 'transformer_engine' else strict if len(model) == 1: model[0].load_state_dict(state_dict['model'], strict=strict) else: From 8271a4877b9cb2fb068e4adf353f7234bf6d906d Mon Sep 17 00:00:00 2001 From: Xiaowei Ren Date: Mon, 8 Jan 2024 21:42:36 -0800 Subject: [PATCH 080/296] check if val is None before split in sequence dimension Signed-off-by: Xiaowei Ren --- megatron/utils.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/megatron/utils.py b/megatron/utils.py index 3a38b2b610..fe284a378a 100644 --- a/megatron/utils.py +++ b/megatron/utils.py @@ -235,17 +235,18 @@ def get_batch_on_this_cp_rank(batch): if cp_size > 1: cp_rank = mpu.get_context_parallel_rank() for key, val in batch.items(): - seq_dim = 1 if key != 'attention_mask' else 2 - val = val.view( - *val.shape[0:seq_dim], - 2 * cp_size, - val.shape[seq_dim] // (2 * cp_size), - *val.shape[(seq_dim + 1) :], - ) - index = torch.tensor([cp_rank, (2 * cp_size - cp_rank - 1)], device=val.device) - val = val.index_select(seq_dim, index) - val = val.view(*val.shape[0:seq_dim], -1, *val.shape[(seq_dim + 2) :]) - batch[key] = val + if val is not None: + seq_dim = 1 if key != 'attention_mask' else 2 + val = val.view( + *val.shape[0:seq_dim], + 2 * cp_size, + val.shape[seq_dim] // (2 * cp_size), + *val.shape[(seq_dim + 1) :], + ) + index = torch.tensor([cp_rank, (2 * cp_size - cp_rank - 1)], device=val.device) + val = val.index_select(seq_dim, index) + val = val.view(*val.shape[0:seq_dim], -1, *val.shape[(seq_dim + 2) :]) + batch[key] = val return batch From f76f96943eab6326d8cac1e52c9a942df3e2faa5 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Mon, 8 Jan 2024 22:38:14 -0800 Subject: [PATCH 081/296] Modified description for knobs Signed-off-by: Selvaraj Anandaraj --- megatron/core/model_parallel_config.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/megatron/core/model_parallel_config.py b/megatron/core/model_parallel_config.py index 44c97fe8f8..7e245ca0c3 100644 --- a/megatron/core/model_parallel_config.py +++ b/megatron/core/model_parallel_config.py @@ -68,14 +68,13 @@ class ModelParallelConfig: communication collectives like AllGather/ReduceScatter. Overlapping is done for the linear layers wherever possible during the forward and the backward pass. Defaults to False. - tp_comm_split_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM. Don't care if tp_comm_overlap - is False. - tp_comm_atomic_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM. Don't care if tp_comm_overlap - is False. - tp_comm_split_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM. Don't care if - tp_comm_overlap is False. - tp_comm_atomic_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM. Don't care if - tp_comm_overlap is False. + tp_comm_split_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM and All-Gather splits. Don't care if tp_comm_overlap is False. + + tp_comm_atomic_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM and All-Gather both done atomically. Don't care if tp_comm_overlap is False. + + tp_comm_split_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the GEMM and Reduce-Scatter splits. Don't care if tp_comm_overlap is False. + + tp_comm_atomic_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the GEMM and Reduce-Scatter both done atomically. Don't care if tp_comm_overlap is False. tp_comm_bulk_dgrad (bool, default=True): If true, allows All-Gather overlap with Bprop activation gradient GEMM. Don't care if tp_comm_overlap is False. From 4f6cc92abaed7e7a55d4f512f7fdf073e85aef77 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Mon, 8 Jan 2024 22:57:44 -0800 Subject: [PATCH 082/296] Fixed formatting Signed-off-by: Selvaraj Anandaraj --- megatron/core/model_parallel_config.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/megatron/core/model_parallel_config.py b/megatron/core/model_parallel_config.py index 7e245ca0c3..4cd37f9156 100644 --- a/megatron/core/model_parallel_config.py +++ b/megatron/core/model_parallel_config.py @@ -65,16 +65,20 @@ class ModelParallelConfig: tensor-model-parallel all-reduce with weight gradient compuation of a column-linear layer. Defaults to False. tp_comm_overlap (bool, default=False): If true, allows overlapping of Linear layer execution with tensor parallel - communication collectives like AllGather/ReduceScatter. Overlapping is done for the linear layers wherever possible - during the forward and the backward pass. Defaults to False. + communication collectives like AllGather/ReduceScatter. Overlapping is done for the linear layers wherever + possible during the forward and the backward pass. Defaults to False. - tp_comm_split_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM and All-Gather splits. Don't care if tp_comm_overlap is False. + tp_comm_split_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM + and All-Gather splits. Don't care if tp_comm_overlap is False. - tp_comm_atomic_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM and All-Gather both done atomically. Don't care if tp_comm_overlap is False. + tp_comm_atomic_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM + and All-Gather both done atomically. Don't care if tp_comm_overlap is False. - tp_comm_split_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the GEMM and Reduce-Scatter splits. Don't care if tp_comm_overlap is False. + tp_comm_split_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the + GEMM and Reduce-Scatter splits. Don't care if tp_comm_overlap is False. - tp_comm_atomic_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the GEMM and Reduce-Scatter both done atomically. Don't care if tp_comm_overlap is False. + tp_comm_atomic_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the + GEMM and Reduce-Scatter both done atomically. Don't care if tp_comm_overlap is False. tp_comm_bulk_dgrad (bool, default=True): If true, allows All-Gather overlap with Bprop activation gradient GEMM. Don't care if tp_comm_overlap is False. From 4c379eda27e710620638df5c5defdef1aa202d00 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Tue, 9 Jan 2024 13:57:34 -0800 Subject: [PATCH 083/296] Fixed docstring format Signed-off-by: Selvaraj Anandaraj --- megatron/core/model_parallel_config.py | 72 +++++++++++++------------- 1 file changed, 37 insertions(+), 35 deletions(-) diff --git a/megatron/core/model_parallel_config.py b/megatron/core/model_parallel_config.py index 4cd37f9156..3502201287 100644 --- a/megatron/core/model_parallel_config.py +++ b/megatron/core/model_parallel_config.py @@ -35,10 +35,10 @@ class ModelParallelConfig: Initialization -------------- - perform_initialization (bool, default=True): If true, weights are initialized. This option can be useful when you - know you are going to load values from a checkpoint. + perform_initialization (bool, optional): If true, weights are initialized. This option can be useful when you + know you are going to load values from a checkpoint. Defaults to True. - use_cpu_initialization: (bool, default=False): When set to False, we initialize the weights directly on the GPU. + use_cpu_initialization: (bool, optional): When set to False, we initialize the weights directly on the GPU. Transferring weights from CPU to GPU can take a significant amount of time for large models. Defaults to False. Training @@ -61,30 +61,30 @@ class ModelParallelConfig: ". Note that the extension requires CUDA>=11. Otherwise, you must turn off gradient accumulation fusion. Defaults to False. - async_tensor_model_parallel_allreduce (bool, default=True): If true, enables asynchronous execution of + async_tensor_model_parallel_allreduce (bool, optional): If true, enables asynchronous execution of tensor-model-parallel all-reduce with weight gradient compuation of a column-linear layer. Defaults to False. - tp_comm_overlap (bool, default=False): If true, allows overlapping of Linear layer execution with tensor parallel + tp_comm_overlap (bool, optional): If true, allows overlapping of Linear layer execution with tensor parallel communication collectives like AllGather/ReduceScatter. Overlapping is done for the linear layers wherever possible during the forward and the backward pass. Defaults to False. - tp_comm_split_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM - and All-Gather splits. Don't care if tp_comm_overlap is False. + tp_comm_split_ag (bool, optional): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM + and All-Gather splits. Don't care if tp_comm_overlap is False. Defaults to True. - tp_comm_atomic_ag (bool, default=True): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM - and All-Gather both done atomically. Don't care if tp_comm_overlap is False. + tp_comm_atomic_ag (bool, optional): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM + and All-Gather both done atomically. Don't care if tp_comm_overlap is False. Defaults to True. - tp_comm_split_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the - GEMM and Reduce-Scatter splits. Don't care if tp_comm_overlap is False. + tp_comm_split_rs (bool, optional): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the + GEMM and Reduce-Scatter splits. Don't care if tp_comm_overlap is False. Defaults to True. - tp_comm_atomic_rs (bool, default=True): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the - GEMM and Reduce-Scatter both done atomically. Don't care if tp_comm_overlap is False. + tp_comm_atomic_rs (bool, optional): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the + GEMM and Reduce-Scatter both done atomically. Don't care if tp_comm_overlap is False. Defaults to True. - tp_comm_bulk_dgrad (bool, default=True): If true, allows All-Gather overlap with Bprop activation gradient GEMM. Don't - care if tp_comm_overlap is False. + tp_comm_bulk_dgrad (bool, optional): If true, allows All-Gather overlap with Bprop activation gradient GEMM. Don't + care if tp_comm_overlap is False. Defaults to True. - tp_comm_bulk_wgrad (bool, default=True): If true, allows Reduce-Scatter overlap with Bprop weight gradient GEMM. Don't - care if tp_comm_overlap is False. + tp_comm_bulk_wgrad (bool, optional): If true, allows Reduce-Scatter overlap with Bprop weight gradient GEMM. Don't + care if tp_comm_overlap is False. Defaults to True. Parallelism ----------- @@ -97,36 +97,38 @@ class ModelParallelConfig: pipeline_dtype (required): dtype used in p2p communication, usually params_dtype - grad_scale_func (optional, default=None): If using loss scaling, this function should take the loss and return the - scaled loss. If None, no function is called on the loss. + grad_scale_func (optional): If using loss scaling, this function should take the loss and return the + scaled loss. If None, no function is called on the loss. Defaults to None. enable_autocast (bool): If true runs the forward step function inside torch.autocast context. Default is False. autocast_dtype (torch.dtype): dtype to pass to torch.amp.autocast when enabled. Default is pipeline_dtype. - variable_seq_lengths (bool, default=False): Support for variable sequence lengths across microbatches. Setting this + variable_seq_lengths (bool, optional): Support for variable sequence lengths across microbatches. Setting this communicates the size of tensors during pipeline parallelism communication, because of this extra overhead it - should only be set if the sequence length varies by microbatch within a global batch. + should only be set if the sequence length varies by microbatch within a global batch. Defaults to False. - num_microbatches_with_partial_activation_checkpoints (int, default=None): If int, set the number of microbatches + num_microbatches_with_partial_activation_checkpoints (int, optional): If int, set the number of microbatches where not all of the layers will be checkpointed and recomputed. The rest of the microbatches within the window of maximum outstanding microbatches will recompute all layers (either full recompute or selective recompute). If - None, the checkpoint and recompute will be left up to the forward_step function. + None, the checkpoint and recompute will be left up to the forward_step function. Defaults to None. - overlap_p2p_comm (bool, optional, default=False): When True some of the peer to peer communication for pipeline - parallelism will overlap with computation. Must be False if batch_p2p_comm is true. + overlap_p2p_comm (bool, optional): When True some of the peer to peer communication for pipeline + parallelism will overlap with computation. Must be False if batch_p2p_comm is true. Defaults to False. - batch_p2p_comm (bool, default=True): Use batch_isend_irecv instead of individual isend/irecv calls. Must be False - if overlap_p2p_comm is True. + batch_p2p_comm (bool, optional): Use batch_isend_irecv instead of individual isend/irecv calls. Must be False + if overlap_p2p_comm is True. Defaults to True. - batch_p2p_sync (bool, default=True): When using batch_isend_irecv, do a cuda.device.synchronize afterward to work - around a bug in older version of PyTorch. + batch_p2p_sync (bool, optional): When using batch_isend_irecv, do a cuda.device.synchronize afterward to work + around a bug in older version of PyTorch. Defaults to True. - use_ring_exchange_p2p (bool, default=False): Use custom ring_exchange kernel instead of + use_ring_exchange_p2p (bool, optional): Use custom ring_exchange kernel instead of torch.distributed.batch_isend_irecv(). Requires custom built torch with torch.distributed.ring_exchange. + Defaults to False. - deallocate_pipeline_outputs (optional, default=False): If True, output data is deallocated after the tensor is sent + deallocate_pipeline_outputs (optional): If True, output data is deallocated after the tensor is sent to the next pipeline stage. Helps with saving memory, does nothing when pipeline parallel is not used. + Defaults to False. no_sync_func (optional): Function that creates a context that suppresses asynchronous data-parallel communication. If the model is an instance of core.distributed.DistributedDataParallel, the default is to use @@ -140,12 +142,12 @@ class ModelParallelConfig: optimizer parameter all-gathers). The function should take one argument: an iterable of parameters to be synchronized. - pipeline_model_parallel_split_rank (int, default=None): If int, rank where encoder and decoder should be split in - cases where the model has both an encoder and decoder (e.g., T5). Ignored if None. + pipeline_model_parallel_split_rank (int, optional): If int, rank where encoder and decoder should be split in + cases where the model has both an encoder and decoder (e.g., T5). Ignored if None. Defaults to None. - barrier_with_L1_time (bool, default=True): If true, use barrier with level 1 time measurements. It is up to the user + barrier_with_L1_time (bool, optional): If true, use barrier with level 1 time measurements. It is up to the user to make sure calling barrier with their timers will not result in hangs. This can happen if for example the user - adds a level 1 timer that is not called by all ranks. + adds a level 1 timer that is not called by all ranks. Defaults to True. """ From efb3c3a502c3efb6ca4e54f7b9cb5b927bf998dc Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Tue, 9 Jan 2024 18:20:04 -0500 Subject: [PATCH 084/296] profiling --- megatron/arguments.py | 1 + megatron/training.py | 31 ++++++++++++++++++++++++++++++- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 8f7fbc79f2..0421420e7d 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -860,6 +860,7 @@ def _add_training_args(parser): help='Global step to stop profiling.') group.add_argument('--profile-ranks', nargs='+', type=int, default=[0], help='Global ranks to profile.') + group.add_argument('--torch-profile-dir', type=str, default=None) group.add_argument('--tp-comm-overlap', action='store_true', help = 'Enables the ' ' overlap of Tensor parallel communication and GEMM kernels.') group.add_argument('--tp-comm-overlap-cfg', type=str, default=None, diff --git a/megatron/training.py b/megatron/training.py index 702f0e876a..4407a75feb 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -2,6 +2,7 @@ """Pretrain utilities.""" +import contextlib import gc from datetime import datetime import math @@ -790,7 +791,32 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, gc.disable() gc.collect() - while iteration < args.train_iters: + rank = torch.distributed.get_rank() + if args.torch_profile_dir is not None and rank in args.profile_ranks: + os.makedirs(args.torch_profile_dir, exist_ok=True) + def trace_fn(p: torch.profiler.profile): + path=os.path.join(args.torch_profile_dir, f"profile_rank_{rank}_step_{iteration}") + print(f"Saving trace to {path}") + p.export_chrome_trace(path) + + schedule = torch.profiler.schedule( + skip_first=0, + warmup=args.profile_step_start, + wait=0, + active=args.profile_step_end-args.profile_step_start, + repeat=1, + ) + profiler = torch.profiler.profile( + schedule=schedule, + activities=[torch.profiler.ProfilerActivity.CUDA], + on_trace_ready=trace_fn, + with_modules=True, + ) + else: + profiler = None + + with contextlib.nullcontext() if profiler is None else profiler: + while iteration < args.train_iters: if args.profile and \ iteration == args.profile_step_start and \ torch.distributed.get_rank() in args.profile_ranks: @@ -822,6 +848,9 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, report_memory_flag, skipped_iter, grad_norm, params_norm, num_zeros_in_grad) + if profiler is not None: + profiler.step() + save_tensor_logs(iteration) # Autoresume From 6b3b8844e5d954e51d4d0f725c8cafef6670c478 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Tue, 9 Jan 2024 21:00:18 -0800 Subject: [PATCH 085/296] minor fix and add parameter in argument.py Signed-off-by: Hongbin Liu --- megatron/arguments.py | 7 +++- megatron/core/fusions/fused_bias_swiglu.py | 8 +---- megatron/core/transformer/attention.py | 32 ++++++++----------- .../custom_layers/transformer_engine.py | 23 +++++++------ .../core/transformer/transformer_config.py | 8 +++-- 5 files changed, 40 insertions(+), 38 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 0bb6acf9eb..8b382376d2 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -449,7 +449,9 @@ def core_transformer_config_from_args(args): if args.swiglu: kw_args['activation_func'] = F.silu kw_args['gated_linear_unit'] = True - kw_args['bias_gelu_fusion'] = False + kw_args['bias_activation_fusion'] = args.bias_swiglu_fusion + else: + kw_args['bias_activation_fusion'] = args.bias_gelu_fusion if args.squared_relu: assert not args.swiglu def squared_relu(x): @@ -886,6 +888,9 @@ def _add_training_args(parser): group.add_argument('--no-bias-gelu-fusion', action='store_false', help='Disable bias and gelu fusion.', dest='bias_gelu_fusion') + group.add_argument('--no-bias-swiglu-fusion', action='store_false', + help='Disable bias and swiglu fusion.', + dest='bias_swiglu_fusion') group.add_argument('--no-bias-dropout-fusion', action='store_false', help='Disable bias and dropout fusion.', dest='bias_dropout_fusion') diff --git a/megatron/core/fusions/fused_bias_swiglu.py b/megatron/core/fusions/fused_bias_swiglu.py index bf23b6e4ae..d02fa04692 100644 --- a/megatron/core/fusions/fused_bias_swiglu.py +++ b/megatron/core/fusions/fused_bias_swiglu.py @@ -3,13 +3,7 @@ import torch import torch.nn.functional as F -###### BIAS GELU FUSION/ NO AUTOGRAD ################ -# 1/sqrt(2*pi)-> 0.3989423 -# 1/sqrt(2) -> 0.70710678 -# sqrt(2/pi) -> 0.79788456 -# this function is tanh approximation of gelu -# actual gelu is: -# x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) +###### BIAS SWIGLU FUSION/ NO AUTOGRAD ################ @torch.jit.script diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index bc170604e0..d44335d37c 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -1,11 +1,15 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +import logging from abc import ABC, abstractmethod from dataclasses import dataclass -from typing import Union from importlib.metadata import version +from typing import Union + from pkg_resources import packaging +logger = logging.getLogger(__name__) + import torch try: @@ -81,22 +85,19 @@ def __init__( self.num_attention_heads_per_partition = divide(self.config.num_attention_heads, world_size) self.num_query_groups_per_partition = divide(self.config.num_query_groups, world_size) - self.qkv_format = 'sbhd' - te_version = packaging.version.Version(version("transformer-engine")) - # need Kirthi to confirm the version when bshd is supported - if ( - te_version >= packaging.version.Version("0.13.0") - and self.config.apply_rope_fusion - and HAVE_APPLY_ROPE_FUSION - ): - self.qkv_format = 'bshd' + if self.config.apply_rope_fusion and not HAVE_APPLY_ROPE_FUSION: + self.config.apply_rope_fusion = False + logger.warning( + "set apply_rope_fusion to false because its implementation" + " is not included in Apex. Try upgrading to the latest version" + ) + self.core_attention = build_module( submodules.core_attention, config=self.config, layer_number=self.layer_number, attn_mask_type=self.attn_mask_type, attention_type=self.attention_type, - qkv_format=self.qkv_format, ) self.checkpoint_core_attention = self.config.recompute_granularity == 'selective' @@ -264,13 +265,9 @@ def forward( # ================================================ if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - if self.config.apply_rope_fusion and HAVE_APPLY_ROPE_FUSION: + if self.config.apply_rope_fusion: query = fused_apply_rotary_pos_emb(query, q_pos_emb, transpose_output_memory=True) key = fused_apply_rotary_pos_emb(key, k_pos_emb, transpose_output_memory=True) - if self.qkv_format == 'bshd': - query, key, value = [ - x.transpose(0, 1).contiguous() for x in (query, key, value) - ] else: query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) @@ -292,9 +289,6 @@ def forward( query, key, value, attention_mask, attn_mask_type=attn_mask_type ) - if self.qkv_format == 'bshd': - core_attn_out = core_attn_out.transpose(0, 1) - # ================= # Output. [sq, b, h] # ================= diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index ee40197f43..0ca48a0a2c 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -41,10 +41,7 @@ class TENorm: # TODO should we ditch normalization config and just use spec to choose LayerNorm vs RMSNorm? def __new__( - cls, - config: TransformerConfig, - hidden_size: int, - eps: float = 1e-5, + cls, config: TransformerConfig, hidden_size: int, eps: float = 1e-5, ): if config.normalization == "LayerNorm": instance = te.pytorch.LayerNorm( @@ -356,10 +353,10 @@ def __init__( attn_mask_type: AttnMaskType, attention_type: str, attention_dropout: float = None, - qkv_format: str = 'sbhd', ): self.config = config self.te_forward_mask_type = False + self.qkv_format = 'sbhd' if self.config.apply_query_key_layer_scaling != bool( int(os.getenv('NVTE_APPLY_QK_LAYER_SCALING', '0')) @@ -390,8 +387,8 @@ def __init__( if te_version > packaging.version.Version("0.12.0"): self.te_forward_mask_type = True - if te_version > packaging.version.Version("0.13.0"): - extra_kwargs["qkv_format"] = qkv_format + if self.config.apply_rope_fusion and te_version > packaging.version.Version("0.13.0"): + extra_kwargs["qkv_format"] = self.qkv_format = 'bshd' # Only Transformer-Engine version >= 1.0.0 supports context parallelism if te_version >= packaging.version.Version("1.0.0"): @@ -430,12 +427,20 @@ def forward( attention_mask: Tensor, attn_mask_type: AttnMaskType, ): + if self.config.apply_rope_fusion and self.qkv_format == 'bshd': + query, key, value = [x.transpose(0, 1).contiguous() for x in (query, key, value)] + if self.te_forward_mask_type: - return super().forward( + core_attn_out = super().forward( query, key, value, attention_mask, attn_mask_type=attn_mask_type.name ) else: - return super().forward(query, key, value, attention_mask) + core_attn_out = super().forward(query, key, value, attention_mask) + + if self.config.apply_rope_fusion and self.qkv_format == 'bshd': + return core_attn_out.transpose(0, 1) + else: + return core_attn_out try: diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index a4273f6cf8..17f8d26340 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -192,8 +192,12 @@ def __post_init__(self): if self.apply_query_key_layer_scaling: self.attention_softmax_in_fp32 = True - if self.bias_activation_fusion and self.activation_func == F.gelu: - if not self.add_bias_linear: + if self.bias_activation_fusion: + if self.activation_func not in [F.gelu, F.silu]: + raise ValueError( + "When bias_activation_fusion is True, activation function should be either gelu or swiglu" + ) + if self.activation_func == F.gelu and not self.add_bias_linear: raise ValueError( "When bias_activation_fusion is True and activation function is gelu, add_bias_linear must also be True." ) From 46f12487cd797afab50cf1b0c97adf2142903d8d Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Wed, 10 Jan 2024 15:35:48 -0800 Subject: [PATCH 086/296] Added switches for weight/activation offloading, changed code structure as needed for TE, fixed MR based issues Signed-off-by: Selvaraj Anandaraj --- megatron/core/__init__.py | 1 - megatron/core/tensor_parallel/layers.py | 11 +++++++++++ .../custom_layers/transformer_engine.py | 4 ++-- megatron/core/transformer/transformer_block.py | 14 ++++++++++---- megatron/core/transformer/transformer_config.py | 8 +++++++- 5 files changed, 30 insertions(+), 8 deletions(-) diff --git a/megatron/core/__init__.py b/megatron/core/__init__.py index 85ed72a997..2858dc692d 100644 --- a/megatron/core/__init__.py +++ b/megatron/core/__init__.py @@ -12,7 +12,6 @@ "parallel_state", "tensor_parallel", "utils", - "cpu_offload", "DistributedDataParallel", "InferenceParams", "ModelParallelConfig", diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 38379cb34d..6291097c3f 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -721,6 +721,11 @@ def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None): f"not {expected_shape} as expected" ) + if self.config.cpu_offloading_context is not None: + if self.config.cpu_offloading_context.inside_context == True: + assert self.config.cpu_offloading == False, \ + "CPU Offloading cannot be enabled while using non-TE modules" + bias = self.bias if not self.skip_bias_add else None if ( @@ -888,6 +893,12 @@ def forward(self, input_): - output - bias """ + + if self.config.cpu_offloading_context is not None: + if self.config.cpu_offloading_context.inside_context == True: + assert self.config.cpu_offloading == False, \ + "CPU Offloading cannot be enabled while using non-TE modules" + # Set up backprop all-reduce. if self.input_is_parallel: input_parallel = input_ diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index 0f0f88cee7..ab2e853e43 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -122,7 +122,7 @@ def __init__( out_features=output_size, sequence_parallel=self.config.sequence_parallel, fuse_wgrad_accumulation=self.config.gradient_accumulation_fusion, - cpu_offloading=self.config.cpu_offloading, + cpu_offloading_context=self.config.cpu_offloading_context, tp_group=get_tensor_model_parallel_group(check_initialized=False), tp_size=self.config.tensor_model_parallel_size, get_rng_state_tracker=get_cuda_rng_tracker, @@ -212,7 +212,7 @@ def __init__( eps=self.config.layernorm_epsilon, sequence_parallel=self.config.sequence_parallel, fuse_wgrad_accumulation=self.config.gradient_accumulation_fusion, - cpu_offloading=self.config.cpu_offloading, + cpu_offloading_context=self.config.cpu_offloading_context, tp_group=get_tensor_model_parallel_group(check_initialized=False), tp_size=self.config.tensor_model_parallel_size, get_rng_state_tracker=get_cuda_rng_tracker, diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index 010caeb116..4efcaaeaa0 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -103,16 +103,22 @@ def __init__( self.checkpoint_core_attention = self.config.recompute_granularity == 'selective' - self._build_layers() - self.num_layers_per_pipeline_rank = len(self.layers) - if get_cpu_offload_context is not None: self.offload_context, self.group_prefetch_offload_commit_async = get_cpu_offload_context( self.config.cpu_offloading, - self.config.cpu_offloading_num_layers + self.config.cpu_offloading_num_layers, + self.config.cpu_offloading_activations, + self.config.cpu_offloading_weights ) + self.config.cpu_offloading_context = self.offload_context if self.config.cpu_offloading else None else: + assert self.config.cpu_offloading == False, "CPU Offloading is enabled when TE is not present" + self.offload_context, self.group_prefetch_offload_commit_async = nullcontext(), None + self.config.cpu_offloading_context = None + + self._build_layers() + self.num_layers_per_pipeline_rank = len(self.layers) def _build_layers(self): # Transformer layers. diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index df3398d29a..988926aee7 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -2,7 +2,7 @@ import types from dataclasses import dataclass -from typing import Callable +from typing import Callable, ContextManager import torch import torch.nn.functional as F @@ -53,6 +53,9 @@ class TransformerConfig(ModelParallelConfig): fp8_wgrad (bool): When set to False, override FP8 config options and do the wgrad computation in higher precision. Defaults to True. cpu_offloading (bool): When set to True, all the activations are offloaded to the CPU asynchronously cpu_offloading_num_layers (int): Tells the number of transformer layers for which activations has to be offloaded. + cpu_offloading_context (ContextManager): Holds the context manager from TE which is supposed to add PyT hooks for offload/reload of data from CPU. + cpu_offloading_activations (bool): If True, offloads the activations to CPU + cpu_offloading_weights (bool): If True, offloads the weights to CPU clone_scatter_output_in_embedding (bool): When set to true, clone the output of scatter_to_sequence_parallel_region in embedding layer to facilitate garbage collection of input. normalization (str): Swtich b/w `LayerNorm` and `RMSNorm` as normalization layers. For now, these are primarily used by Transformer-Engine's layers like `LayerNormLinear`. Default value is `LayerNorm`. """ @@ -111,6 +114,9 @@ class TransformerConfig(ModelParallelConfig): # cpu offload cpu_offloading: bool = False cpu_offloading_num_layers: int = 0 + cpu_offloading_context: ContextManager = None + cpu_offloading_activations: bool = True + cpu_offloading_weights: bool = True # miscellaneous clone_scatter_output_in_embedding: bool = True From 9aa1afabb98c91e2ac13fd51cb192ca87ac35599 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Mon, 6 Nov 2023 05:04:18 -0800 Subject: [PATCH 087/296] Add Grouped GEMM for MoE. --- megatron/arguments.py | 6 ++ .../core/transformer/grouped_gemm_util.py | 16 +++++ megatron/core/transformer/switch_mlp.py | 63 +++++++++++++++---- .../core/transformer/transformer_config.py | 2 + 4 files changed, 74 insertions(+), 13 deletions(-) create mode 100644 megatron/core/transformer/grouped_gemm_util.py diff --git a/megatron/arguments.py b/megatron/arguments.py index 0bb6acf9eb..fd0f67c5c5 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -650,6 +650,12 @@ def _add_network_size_args(parser): dest='bert_binary_head') group.add_argument('--num-experts', type=int, default=None, help='Number of Experts in Switch Transformer (None means no Switch)') + group.add_argument('--moe-grouped-gemm', action='store_true', + help='When there are multiple experts per rank, compress ' + 'multiple local (potentially small) gemms in a single kernel ' + 'launch to improve the utilization and performance by ' + 'leveraging the Grouped GEMM feature introduced since ' + 'CUTLASS 2.8 (https://github.com/tgale96/grouped_gemm).') group.add_argument('--untie-embeddings-and-output-weights', action='store_true', help='Untie embeddings and output weights.'), return parser diff --git a/megatron/core/transformer/grouped_gemm_util.py b/megatron/core/transformer/grouped_gemm_util.py new file mode 100644 index 0000000000..fc2750e2dc --- /dev/null +++ b/megatron/core/transformer/grouped_gemm_util.py @@ -0,0 +1,16 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +try: + import grouped_gemm +except ImportError: + grouped_gemm = None + +def grouped_gemm_is_available(): + return grouped_gemm is not None + +def assert_grouped_gemm_is_available(): + assert grouped_gemm_is_available(), ( + "Grouped GEMM not available. Please run " + "`pip install git+https://github.com/tgale96/grouped_gemm@main`.") + +ops = grouped_gemm.ops if grouped_gemm_is_available() else None \ No newline at end of file diff --git a/megatron/core/transformer/switch_mlp.py b/megatron/core/transformer/switch_mlp.py index 092c6c6402..47c0523c84 100644 --- a/megatron/core/transformer/switch_mlp.py +++ b/megatron/core/transformer/switch_mlp.py @@ -1,5 +1,6 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +import numpy as np import torch from megatron.core import parallel_state, tensor_parallel @@ -8,6 +9,7 @@ get_tensor_model_parallel_group, ) from megatron.core.tensor_parallel import get_cuda_rng_tracker, get_data_parallel_rng_tracker_name +from megatron.core.transformer import grouped_gemm_util as gg from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig @@ -67,9 +69,18 @@ def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): ] self.local_experts = torch.nn.ModuleList() + self.fc1_grouped_weight = [] + self.fc2_grouped_weight = [] for _ in range(self.num_local_experts): expert = MLP(self.config, submodules, is_expert=True) + self.fc1_grouped_weight.append(expert.linear_fc1.weight) + self.fc2_grouped_weight.append(expert.linear_fc2.weight) self.local_experts.append(expert) + # fc1_grouped_weight: [num_local_experts, ffn_hidden_size, hidden_size] + # fc2_grouped_weight: [num_local_experts, hidden_size, ffn_hidden_size] + self.fc1_grouped_weight = torch.stack(self.fc1_grouped_weight) + self.fc2_grouped_weight = torch.stack(self.fc2_grouped_weight) + self.activation_func = self.local_experts[0].activation_func def gather_indices(self, local_indices): """ Gather tensors and concatenate along the first dimension.""" @@ -118,20 +129,46 @@ def forward(self, hidden_states): global_hidden_states = hidden_states global_indices = max_ind - output_total = torch.zeros_like(global_hidden_states) - if self.add_bias: - output_bias_total = torch.zeros_like(global_hidden_states) - - for expert_num, expert in enumerate(self.local_experts): - local_expert_index = self.local_expert_indices[expert_num] - local_indices = (global_indices == local_expert_index).nonzero() - hidden = global_hidden_states[local_indices, :] - output, output_bias = expert(hidden) - - output_total[local_indices, :] = output + if self.config.moe_grouped_gemm: + with torch.no_grad(): + sorted, indices = torch.sort(global_indices, stable=True) + # Permutation of tokens + sorted_global_hidden_states = global_hidden_states[indices] + # Histogram the expert ids to identify the number of tokens routed to each expert + # Note that for np.histogram, all but the last (righthand-most) bin is half-open. + tokens_per_expert, bin_edges = np.histogram( + sorted.cpu(), + bins=np.arange(self.config.num_moe_experts + 1)) + tokens_per_expert = torch.tensor(tokens_per_expert) + reverse_indices = indices.argsort() + fc1_output = gg.ops.gmm( + sorted_global_hidden_states, + self.fc1_grouped_weight, + tokens_per_expert, + trans_b=True) + intermediate_parallel = self.activation_func(fc1_output) + fc2_output = gg.ops.gmm( + intermediate_parallel, + self.fc2_grouped_weight, + tokens_per_expert, + trans_b=True) + # Un-permutation of tokens + output_total = fc2_output[reverse_indices] + else: + output_total = torch.zeros_like(global_hidden_states) if self.add_bias: - output_bias = output_bias.expand_as(output) - output_bias_total[local_indices, :] = output_bias + output_bias_total = torch.zeros_like(global_hidden_states) + + for expert_num, expert in enumerate(self.local_experts): + local_expert_index = self.local_expert_indices[expert_num] + local_indices = (global_indices == local_expert_index).nonzero() + hidden = global_hidden_states[local_indices, :] + output, output_bias = expert(hidden) + + output_total[local_indices, :] = output + if self.add_bias: + output_bias = output_bias.expand_as(output) + output_bias_total[local_indices, :] = output_bias if self.sequence_parallel or (self.expert_parallel_size > 1): output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 47647e657a..3bf2d70aa0 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -111,6 +111,8 @@ class TransformerConfig(ModelParallelConfig): # experimental section (TODO: move to apt. section above once stable) normalization: bool = "LayerNorm" # alt value supported by TE: "RMSNorm" + # MoE related + moe_grouped_gemm: bool = False def __post_init__(self): """ Python dataclass method that is used to modify attributes after initialization. From d81a037afd9b7577bb8d7081ea9200571d8073d6 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Wed, 8 Nov 2023 03:21:27 -0800 Subject: [PATCH 088/296] MoE grouped gemm: (1) create and init moe weights per rank in SwitchMLP; (2) scale bwd GroupedGEMM by 1/tp_ep_size for correctness. --- megatron/core/parallel_state.py | 9 ++ megatron/core/transformer/switch_mlp.py | 141 ++++++++++++++++++------ 2 files changed, 117 insertions(+), 33 deletions(-) diff --git a/megatron/core/parallel_state.py b/megatron/core/parallel_state.py index 5652b20846..40923a6576 100644 --- a/megatron/core/parallel_state.py +++ b/megatron/core/parallel_state.py @@ -897,6 +897,15 @@ def get_expert_model_parallel_world_size(): else: return 0 +def get_tensor_and_expert_parallel_world_size(): + """Return my rank for the expert parallel group""" + if torch.distributed.is_available() and torch.distributed.is_initialized(): + tensor_and_expert_parallel_world_size = torch.distributed.get_world_size( + group=get_tensor_and_expert_parallel_group() + ) + return tensor_and_expert_parallel_world_size + else: + return 0 def get_expert_model_parallel_rank(): """Return my rank for the expert parallel group""" diff --git a/megatron/core/transformer/switch_mlp.py b/megatron/core/transformer/switch_mlp.py index 47c0523c84..2f15b53b28 100644 --- a/megatron/core/transformer/switch_mlp.py +++ b/megatron/core/transformer/switch_mlp.py @@ -2,6 +2,7 @@ import numpy as np import torch +from torch.nn.parameter import Parameter from megatron.core import parallel_state, tensor_parallel from megatron.core.parallel_state import ( @@ -10,6 +11,9 @@ ) from megatron.core.tensor_parallel import get_cuda_rng_tracker, get_data_parallel_rng_tracker_name from megatron.core.transformer import grouped_gemm_util as gg +from megatron.core.tensor_parallel.layers import _initialize_affine_weight_gpu +from megatron.core.tensor_parallel.utils import divide +from megatron.core.transformer import grouped_gemm_util as gg from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig @@ -32,6 +36,19 @@ def sinkhorn(cost, tol=0.0001): d1_old = d1 return d1 * cost * d0.unsqueeze(1) +class ScaleGradient(torch.autograd.Function): + + @staticmethod + @torch.cuda.amp.custom_fwd + def forward(ctx, x, scale): + ctx.scale = scale + return x + + @staticmethod + @torch.cuda.amp.custom_bwd + def backward(ctx, grad): + return grad * ctx.scale, None +scale_gradient = ScaleGradient.apply def get_router_linear_layer(config): router = torch.nn.Linear(config.hidden_size, config.num_moe_experts, bias=False) @@ -68,19 +85,68 @@ def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): local_expert_indices_offset + i for i in range(self.num_local_experts) ] - self.local_experts = torch.nn.ModuleList() - self.fc1_grouped_weight = [] - self.fc2_grouped_weight = [] - for _ in range(self.num_local_experts): - expert = MLP(self.config, submodules, is_expert=True) - self.fc1_grouped_weight.append(expert.linear_fc1.weight) - self.fc2_grouped_weight.append(expert.linear_fc2.weight) - self.local_experts.append(expert) - # fc1_grouped_weight: [num_local_experts, ffn_hidden_size, hidden_size] - # fc2_grouped_weight: [num_local_experts, hidden_size, ffn_hidden_size] - self.fc1_grouped_weight = torch.stack(self.fc1_grouped_weight) - self.fc2_grouped_weight = torch.stack(self.fc2_grouped_weight) - self.activation_func = self.local_experts[0].activation_func + if not self.config.moe_grouped_gemm: + self.local_experts = torch.nn.ModuleList() + for _ in range(self.num_local_experts): + expert = MLP(self.config, submodules, is_expert=True) + self.local_experts.append(expert) + else: + self.expert_parallel = config.expert_model_parallel_size > 1 + self.gradient_scale = 1 / parallel_state.get_tensor_and_expert_parallel_world_size() + if self.config.gated_linear_unit: + def glu(x): + x = torch.chunk(x, 2, dim=-1) + return self.config.activation_func(x[0]) * x[1] + + self.activation_func = glu + else: + self.activation_func = self.config.activation_func + + assert not config.use_cpu_initialization + # How many feature each rank holds + tp_size = parallel_state.get_tensor_model_parallel_world_size() + ffn_hs_per_expert_per_partition = divide(self.config.ffn_hidden_size, tp_size) + output_size_per_partition = self.num_local_experts * ffn_hs_per_expert_per_partition + fc1_output_size_per_partition = output_size_per_partition + if config.gated_linear_unit: + fc1_output_size_per_partition *= 2 + + self.weight1 = Parameter( + torch.empty( + fc1_output_size_per_partition, + self.config.hidden_size, + device=torch.cuda.current_device(), + dtype=config.params_dtype, + ) + ) + self.weight2 = Parameter( + torch.empty( + output_size_per_partition, + self.config.hidden_size, + device=torch.cuda.current_device(), + dtype=config.params_dtype, + ) + ) + if config.perform_initialization: + _initialize_affine_weight_gpu( + self.weight1, + config.init_method, + partition_dim=0, + expert_parallel=self.expert_parallel, + ) + _initialize_affine_weight_gpu( + self.weight2, + config.output_layer_init_method, + partition_dim=0, + expert_parallel=self.expert_parallel, + ) + setattr(self.weight1, 'allreduce', not self.expert_parallel) + setattr(self.weight2, 'allreduce', not self.expert_parallel) + + def scale_grad(self, w): + if self.gradient_scale is None: + return w + return scale_gradient(w, self.gradient_scale) def gather_indices(self, local_indices): """ Gather tensors and concatenate along the first dimension.""" @@ -129,7 +195,23 @@ def forward(self, hidden_states): global_hidden_states = hidden_states global_indices = max_ind - if self.config.moe_grouped_gemm: + if not self.config.moe_grouped_gemm: + output_total = torch.zeros_like(global_hidden_states) + if self.add_bias: + output_bias_total = torch.zeros_like(global_hidden_states) + + + for expert_num, expert in enumerate(self.local_experts): + local_expert_index = self.local_expert_indices[expert_num] + local_indices = (global_indices == local_expert_index).nonzero() + hidden = global_hidden_states[local_indices, :] + output, output_bias = expert(hidden) + + output_total[local_indices, :] = output + if self.add_bias: + output_bias = output_bias.expand_as(output) + output_bias_total[local_indices, :] = output_bias + else: with torch.no_grad(): sorted, indices = torch.sort(global_indices, stable=True) # Permutation of tokens @@ -139,36 +221,29 @@ def forward(self, hidden_states): tokens_per_expert, bin_edges = np.histogram( sorted.cpu(), bins=np.arange(self.config.num_moe_experts + 1)) - tokens_per_expert = torch.tensor(tokens_per_expert) + tokens_per_expert = torch.tensor(tokens_per_expert).to(torch.long) reverse_indices = indices.argsort() + + w1, w2 = (self.scale_grad(self.weight1), self.scale_grad(self.weight2)) + # Reshape the weights for the grouped GEMMs. + w1 = w1.view(self.num_local_experts, -1, self.config.hidden_size) + w2 = w2.view(self.num_local_experts, -1, self.config.hidden_size) + fc1_output = gg.ops.gmm( sorted_global_hidden_states, - self.fc1_grouped_weight, + w1, tokens_per_expert, trans_b=True) + intermediate_parallel = self.activation_func(fc1_output) + fc2_output = gg.ops.gmm( intermediate_parallel, - self.fc2_grouped_weight, + w2, tokens_per_expert, - trans_b=True) + trans_b=False) # Un-permutation of tokens output_total = fc2_output[reverse_indices] - else: - output_total = torch.zeros_like(global_hidden_states) - if self.add_bias: - output_bias_total = torch.zeros_like(global_hidden_states) - - for expert_num, expert in enumerate(self.local_experts): - local_expert_index = self.local_expert_indices[expert_num] - local_indices = (global_indices == local_expert_index).nonzero() - hidden = global_hidden_states[local_indices, :] - output, output_bias = expert(hidden) - - output_total[local_indices, :] = output - if self.add_bias: - output_bias = output_bias.expand_as(output) - output_bias_total[local_indices, :] = output_bias if self.sequence_parallel or (self.expert_parallel_size > 1): output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( From b1d80ff602c0a65a8f79a99a75de0cab02ff4392 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Tue, 14 Nov 2023 12:43:30 +0000 Subject: [PATCH 089/296] MoE grouped GEMM: add UTs --- megatron/arguments.py | 5 +- .../core/transformer/grouped_gemm_util.py | 2 +- megatron/core/transformer/switch_mlp.py | 9 +- .../core/transformer/transformer_config.py | 2 + .../transformer/test_grouped_gemm.py | 124 ++++++++++++++++++ 5 files changed, 136 insertions(+), 6 deletions(-) create mode 100644 tests/unit_tests/transformer/test_grouped_gemm.py diff --git a/megatron/arguments.py b/megatron/arguments.py index fd0f67c5c5..6d4fcd6ca8 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -291,6 +291,9 @@ def validate_args(args, defaults={}): assert args.fp16 or args.bf16, \ 'residual connection in fp32 only supported when using fp16 or bf16.' + if args.moe_grouped_gemm: + assert args.bf16, 'Currently GroupedGEMM for MoE only supports bf16 dtype.' + if args.weight_decay_incr_style == 'constant': assert args.start_weight_decay is None assert args.end_weight_decay is None @@ -655,7 +658,7 @@ def _add_network_size_args(parser): 'multiple local (potentially small) gemms in a single kernel ' 'launch to improve the utilization and performance by ' 'leveraging the Grouped GEMM feature introduced since ' - 'CUTLASS 2.8 (https://github.com/tgale96/grouped_gemm).') + 'CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm).') group.add_argument('--untie-embeddings-and-output-weights', action='store_true', help='Untie embeddings and output weights.'), return parser diff --git a/megatron/core/transformer/grouped_gemm_util.py b/megatron/core/transformer/grouped_gemm_util.py index fc2750e2dc..b4b09e170f 100644 --- a/megatron/core/transformer/grouped_gemm_util.py +++ b/megatron/core/transformer/grouped_gemm_util.py @@ -10,7 +10,7 @@ def grouped_gemm_is_available(): def assert_grouped_gemm_is_available(): assert grouped_gemm_is_available(), ( - "Grouped GEMM not available. Please run " + "Grouped GEMM is not available. Please run " "`pip install git+https://github.com/tgale96/grouped_gemm@main`.") ops = grouped_gemm.ops if grouped_gemm_is_available() else None \ No newline at end of file diff --git a/megatron/core/transformer/switch_mlp.py b/megatron/core/transformer/switch_mlp.py index 2f15b53b28..10944c5203 100644 --- a/megatron/core/transformer/switch_mlp.py +++ b/megatron/core/transformer/switch_mlp.py @@ -91,6 +91,7 @@ def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): expert = MLP(self.config, submodules, is_expert=True) self.local_experts.append(expert) else: + gg.assert_grouped_gemm_is_available() self.expert_parallel = config.expert_model_parallel_size > 1 self.gradient_scale = 1 / parallel_state.get_tensor_and_expert_parallel_world_size() if self.config.gated_linear_unit: @@ -121,8 +122,8 @@ def glu(x): ) self.weight2 = Parameter( torch.empty( - output_size_per_partition, self.config.hidden_size, + output_size_per_partition, device=torch.cuda.current_device(), dtype=config.params_dtype, ) @@ -137,7 +138,7 @@ def glu(x): _initialize_affine_weight_gpu( self.weight2, config.output_layer_init_method, - partition_dim=0, + partition_dim=1, expert_parallel=self.expert_parallel, ) setattr(self.weight1, 'allreduce', not self.expert_parallel) @@ -227,7 +228,7 @@ def forward(self, hidden_states): w1, w2 = (self.scale_grad(self.weight1), self.scale_grad(self.weight2)) # Reshape the weights for the grouped GEMMs. w1 = w1.view(self.num_local_experts, -1, self.config.hidden_size) - w2 = w2.view(self.num_local_experts, -1, self.config.hidden_size) + w2 = w2.view(self.num_local_experts, self.config.hidden_size, -1) fc1_output = gg.ops.gmm( sorted_global_hidden_states, @@ -241,7 +242,7 @@ def forward(self, hidden_states): intermediate_parallel, w2, tokens_per_expert, - trans_b=False) + trans_b=True) # Un-permutation of tokens output_total = fc2_output[reverse_indices] diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 3bf2d70aa0..fd1ae87f64 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -53,6 +53,8 @@ class TransformerConfig(ModelParallelConfig): fp8_wgrad (bool): When set to False, override FP8 config options and do the wgrad computation in higher precision. Defaults to True. clone_scatter_output_in_embedding (bool): When set to true, clone the output of scatter_to_sequence_parallel_region in embedding layer to facilitate garbage collection of input. normalization (str): Swtich b/w `LayerNorm` and `RMSNorm` as normalization layers. For now, these are primarily used by Transformer-Engine's layers like `LayerNormLinear`. Default value is `LayerNorm`. + moe_grouped_gemm (bool): When there are multiple experts per rank, compress multiple local (potentially small) + gemms in a single kernel launch to improve the utilization and performance by leveraging the Grouped GEMM feature introduced since CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm). """ # model architecture diff --git a/tests/unit_tests/transformer/test_grouped_gemm.py b/tests/unit_tests/transformer/test_grouped_gemm.py new file mode 100644 index 0000000000..9eea8a2b36 --- /dev/null +++ b/tests/unit_tests/transformer/test_grouped_gemm.py @@ -0,0 +1,124 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import pytest + +import torch + +from megatron.arguments import parse_args +from megatron.core.models.gpt.gpt_layer_specs import gpt_layer_with_transformer_engine_spec_moe +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.switch_mlp import SwitchMLP +from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.model import Float16Module +from tests.unit_tests.test_utilities import Utils + +class TestParallelSwitchMLP: + + def setup_method(self, method): + Utils.initialize_model_parallel(1,1) + num_layers=1 # 2 + self.hidden_size=2 # 12 + self.num_experts = 2 + + # Vanilla sequential GEMM + model_parallel_cuda_manual_seed(123) + tf_config_smm = TransformerConfig( + num_layers=num_layers, hidden_size=self.hidden_size, num_attention_heads=4, + num_moe_experts=self.num_experts, use_cpu_initialization=False, add_bias_linear=False, + bf16=True, params_dtype=torch.bfloat16, + moe_grouped_gemm=False) + self.switch_mlp_smm = SwitchMLP(tf_config_smm, + gpt_layer_with_transformer_engine_spec_moe.submodules.mlp.submodules) + + self.args = parse_args(extra_args_provider=None, ignore_unknown_args=False) + self.args.bf16=True + # Bias is not supported in grouped gemm currently, thus we disable the + # bias in the linear layer. + self.args.add_bias_linear=False + self.switch_mlp_smm = Float16Module(self.switch_mlp_smm, self.args).module + print("done intializing for sequential gemm") + + # Grouped GEMM + model_parallel_cuda_manual_seed(123) + tf_config_gmm = TransformerConfig( + num_layers=num_layers, hidden_size=self.hidden_size, num_attention_heads=4, + num_moe_experts=self.num_experts, use_cpu_initialization=False, add_bias_linear=False, + bf16=True, # Currently GroupedGEMM only supports bf16. + params_dtype=torch.bfloat16, + moe_grouped_gemm=True) + self.switch_mlp_gmm = SwitchMLP(tf_config_gmm, + gpt_layer_with_transformer_engine_spec_moe.submodules.mlp.submodules) + self.switch_mlp_gmm = Float16Module(self.switch_mlp_gmm, self.args).module + print("done intializing for grouped gemm") + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + def test_constructor(self): + assert isinstance(self.switch_mlp_smm, SwitchMLP) + assert isinstance(self.switch_mlp_gmm, SwitchMLP) + + num_weights_smm = sum([p.numel() for p in self.switch_mlp_smm.parameters()]) + num_weights_gmm = sum([p.numel() for p in self.switch_mlp_gmm.parameters()]) + + # For the same hyper-parm model configs except the `moe_grouped_gemm`, + # GroupedGEMM and sequential GEMMs should hold the same number of parms. + assert num_weights_smm == num_weights_gmm + + # TODO: The param init value is not exactly the same between gmm and smm + # assert torch.equal(self.switch_mlp_smm.router.weight, self.switch_mlp_gmm.router.weight) + # assert num_weights_smm == 2330, 'num_weights_sm=', num_weights_smm + + # weight1: [num_experts*4h, h] + # weight2: [num_experts, h, 4h] + assert self.switch_mlp_gmm.weight1.shape[0] == self.num_experts * 4 * self.hidden_size + assert self.switch_mlp_gmm.weight1.shape[1] == self.hidden_size + assert self.switch_mlp_gmm.weight1.shape == \ + self.switch_mlp_gmm.weight2.t().shape + + def test_weight_init_value_the_same(self): + gmm_w1 = self.switch_mlp_gmm.weight1.view(self.num_experts, -1, self.hidden_size) + gmm_w2 = self.switch_mlp_gmm.weight2.view(self.num_experts, self.hidden_size, -1) + gmm_expert0_fc1 = gmm_w1[0] + gmm_expert0_fc2 = gmm_w2[0] + gmm_expert1_fc1 = gmm_w1[1] + gmm_expert1_fc2 = gmm_w2[1] + + smm_expert0_fc1 = self.switch_mlp_smm.local_experts[0].linear_fc1.weight + smm_expert0_fc2 = self.switch_mlp_smm.local_experts[0].linear_fc2.weight + smm_expert1_fc1 = self.switch_mlp_smm.local_experts[1].linear_fc1.weight + smm_expert1_fc2 = self.switch_mlp_smm.local_experts[1].linear_fc2.weight + + assert torch.equal(gmm_expert0_fc1, smm_expert0_fc1) + assert torch.equal(gmm_expert0_fc2, smm_expert0_fc2) + # the param init value is not exactly the same between gmm and smm (refer to test_weight_init_value_the_same.) + # TODO: is it necessary to keep smm and gmm share exactly the same init params? + # assert torch.equal(gmm_expert1_fc1, smm_expert1_fc1) + # assert torch.equal(gmm_expert1_fc2, smm_expert1_fc2) + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + def test_gpu_forward(self): + self.switch_mlp_smm.cuda() + self.switch_mlp_gmm.cuda() + # [sequence length, batch size, hidden size] + seq_len = 3 #32 + batch_size = 2 + hidden_states = torch.ones( + (seq_len, batch_size, self.switch_mlp_smm.config.hidden_size), + dtype=torch.bfloat16) + hidden_states = hidden_states.cuda() + output_smm, _ = self.switch_mlp_smm(hidden_states) + output_gmm, _ = self.switch_mlp_gmm(hidden_states) + + # The following assert fails due to two reasons: + # (i) the param init value is not exactly the same between gmm and smm (refer to test_weight_init_value_the_same.) + # (ii) the router weight init value is not fixed in this UT. + # assert torch.equal(output_smm, output_gmm),print(output_smm, output_gmm) + +if __name__ == "__main__": + SMLP_test = TestParallelSwitchMLP() + SMLP_test.setup_method(method=None) + SMLP_test.test_constructor() + SMLP_test.test_weight_init_value_the_same() + SMLP_test.test_gpu_forward() + SMLP_test.teardown_method(method=None) \ No newline at end of file From f5b820bb969f1890432eca5daadd6069ed1987c0 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Tue, 14 Nov 2023 18:38:49 -0800 Subject: [PATCH 090/296] MoE grouped GEMM: set torch random seed for reproducability. --- .../transformer/test_grouped_gemm.py | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/tests/unit_tests/transformer/test_grouped_gemm.py b/tests/unit_tests/transformer/test_grouped_gemm.py index 9eea8a2b36..091f7fa112 100644 --- a/tests/unit_tests/transformer/test_grouped_gemm.py +++ b/tests/unit_tests/transformer/test_grouped_gemm.py @@ -6,9 +6,9 @@ from megatron.arguments import parse_args from megatron.core.models.gpt.gpt_layer_specs import gpt_layer_with_transformer_engine_spec_moe -from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.transformer.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.initialize import _set_random_seed from megatron.model import Float16Module from tests.unit_tests.test_utilities import Utils @@ -21,7 +21,8 @@ def setup_method(self, method): self.num_experts = 2 # Vanilla sequential GEMM - model_parallel_cuda_manual_seed(123) + # Set random seed for reproducability + _set_random_seed(seed_=123, data_parallel_random_init=False) tf_config_smm = TransformerConfig( num_layers=num_layers, hidden_size=self.hidden_size, num_attention_heads=4, num_moe_experts=self.num_experts, use_cpu_initialization=False, add_bias_linear=False, @@ -39,7 +40,7 @@ def setup_method(self, method): print("done intializing for sequential gemm") # Grouped GEMM - model_parallel_cuda_manual_seed(123) + _set_random_seed(seed_=123, data_parallel_random_init=False) tf_config_gmm = TransformerConfig( num_layers=num_layers, hidden_size=self.hidden_size, num_attention_heads=4, num_moe_experts=self.num_experts, use_cpu_initialization=False, add_bias_linear=False, @@ -64,13 +65,16 @@ def test_constructor(self): # For the same hyper-parm model configs except the `moe_grouped_gemm`, # GroupedGEMM and sequential GEMMs should hold the same number of parms. assert num_weights_smm == num_weights_gmm + # expected num weights: router linear weights+bias + MLP weights(no bias) of all experts + expected_num_weights = \ + self.hidden_size * self.num_experts + self.num_experts + \ + self.hidden_size * (4*self.hidden_size) * 2 * self.num_experts + assert num_weights_smm == expected_num_weights - # TODO: The param init value is not exactly the same between gmm and smm - # assert torch.equal(self.switch_mlp_smm.router.weight, self.switch_mlp_gmm.router.weight) - # assert num_weights_smm == 2330, 'num_weights_sm=', num_weights_smm + assert torch.equal(self.switch_mlp_smm.router.weight, self.switch_mlp_gmm.router.weight) # weight1: [num_experts*4h, h] - # weight2: [num_experts, h, 4h] + # weight2: [h, num_experts*4h] assert self.switch_mlp_gmm.weight1.shape[0] == self.num_experts * 4 * self.hidden_size assert self.switch_mlp_gmm.weight1.shape[1] == self.hidden_size assert self.switch_mlp_gmm.weight1.shape == \ @@ -110,9 +114,8 @@ def test_gpu_forward(self): output_smm, _ = self.switch_mlp_smm(hidden_states) output_gmm, _ = self.switch_mlp_gmm(hidden_states) - # The following assert fails due to two reasons: - # (i) the param init value is not exactly the same between gmm and smm (refer to test_weight_init_value_the_same.) - # (ii) the router weight init value is not fixed in this UT. + # The following assert fails due to the param init value is not exactly + # the same between gmm and smm (refer to test_weight_init_value_the_same.) # assert torch.equal(output_smm, output_gmm),print(output_smm, output_gmm) if __name__ == "__main__": From edb31e821c37d32f0f26c4a3d38ded54c845c7b1 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Wed, 15 Nov 2023 20:57:23 -0800 Subject: [PATCH 091/296] GroupedMLP/SwitchMLP/BasicMoELayer refactoring. --- megatron/core/transformer/base_moe_layer.py | 139 +++++++++ megatron/core/transformer/grouped_mlp.py | 138 +++++++++ megatron/core/transformer/switch_mlp.py | 265 ++---------------- .../transformer/test_grouped_gemm.py | 11 +- 4 files changed, 304 insertions(+), 249 deletions(-) create mode 100644 megatron/core/transformer/base_moe_layer.py create mode 100644 megatron/core/transformer/grouped_mlp.py diff --git a/megatron/core/transformer/base_moe_layer.py b/megatron/core/transformer/base_moe_layer.py new file mode 100644 index 0000000000..b60893ddbc --- /dev/null +++ b/megatron/core/transformer/base_moe_layer.py @@ -0,0 +1,139 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import numpy as np +import torch + +from megatron.core import parallel_state, tensor_parallel +from megatron.core.parallel_state import ( + get_tensor_and_expert_parallel_group, +) +from megatron.core.tensor_parallel import get_cuda_rng_tracker, get_data_parallel_rng_tracker_name +from megatron.core.transformer.module import MegatronModule +from megatron.core.transformer.transformer_config import TransformerConfig + + +def sinkhorn(cost, tol=0.0001): + "Sinkhorn based MoE routing function" + cost = torch.exp(cost) + d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype) + d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype) + + eps = 0.00000001 + error = 1e9 + d1_old = d1 + while error > tol: + d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps) + d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps) + error = torch.mean(torch.abs(d1_old - d1)) + d1_old = d1 + return d1 * cost * d0.unsqueeze(1) + + +def get_router_linear_layer(config): + router = torch.nn.Linear(config.hidden_size, config.num_moe_experts, bias=False) + with get_cuda_rng_tracker().fork(get_data_parallel_rng_tracker_name()): + config.init_method(router.weight) + setattr(router.weight, 'sequence_parallel', config.sequence_parallel) + return router + + +class BaseMoELayer(MegatronModule): + """ + Basic MoE layer. + """ + def __init__(self, config: TransformerConfig): + super().__init__(config=config) + + self.config: TransformerConfig = config + + self.router = get_router_linear_layer(self.config) + self.add_bias = config.add_bias_linear + self.sequence_parallel = config.sequence_parallel + self.route_algo = sinkhorn + self.router_activation = torch.sigmoid + self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size() + + assert self.config.num_moe_experts % self.expert_parallel_size == 0 + self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size + local_expert_indices_offset = ( + parallel_state.get_expert_model_parallel_rank() * self.num_local_experts + ) + self.local_expert_indices = [ + local_expert_indices_offset + i for i in range(self.num_local_experts) + ] + + def gather_indices(self, local_indices): + """ Gather tensors and concatenate along the first dimension.""" + group = get_tensor_and_expert_parallel_group() + world_size = torch.distributed.get_world_size(group=group) + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return local_indices + + dim_size = list(local_indices.size()) + dim_size[0] = dim_size[0] * world_size + + # TODO pre allocate memory + output = torch.empty( + dim_size, dtype=local_indices.dtype, device=torch.cuda.current_device() + ) + torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group) + return output + + def token_permutation(self, hidden_states): + self.hidden_shape = hidden_states.shape + route = self.router(hidden_states) + # print(self.router.weight) + route = route.view(-1, self.config.num_moe_experts) + + if self.training: + with torch.no_grad(): + norm_route = self.route_algo( + route.detach().to(dtype=torch.float32) + ) # explicit fp32 conversion for stability + _, max_ind = torch.max(norm_route, dim=1) + route = self.router_activation(route) + max_prob = route[torch.arange(route.size(0)), max_ind] + else: + route = self.router_activation(route) + max_prob, max_ind = torch.max(route, dim=1) + + self.max_prob = torch.unsqueeze(max_prob, 1) + hidden_states = hidden_states.view(-1, self.hidden_shape[-1]) + + if self.sequence_parallel or (self.expert_parallel_size > 1): + global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe( + hidden_states + ) + global_indices = self.gather_indices(max_ind) + else: + global_hidden_states = hidden_states + global_indices = max_ind + + return global_hidden_states, global_indices + + def token_unpermutation(self, output_total, output_bias_total=None): + if self.sequence_parallel or (self.expert_parallel_size > 1): + output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( + output_total + ) + if self.add_bias: + assert output_bias_total is not None + output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( + output_bias_total + ) + # bias is duplicated across tensor parallelism ranks; + # reduce scatter reduces bias across tensor parallel_ranks + output_bias_total = ( + output_bias_total / parallel_state.get_tensor_model_parallel_world_size() + ) + + output_total = output_total * self.max_prob + output_total = output_total.view(self.hidden_shape) + if self.add_bias: + output_bias_total = output_bias_total * self.max_prob + output_bias_total = output_bias_total.view(self.hidden_shape) + else: + output_bias_total = None + + return output_total, output_bias_total \ No newline at end of file diff --git a/megatron/core/transformer/grouped_mlp.py b/megatron/core/transformer/grouped_mlp.py new file mode 100644 index 0000000000..e1e9b49642 --- /dev/null +++ b/megatron/core/transformer/grouped_mlp.py @@ -0,0 +1,138 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import numpy as np +import torch +from torch.nn.parameter import Parameter + +from megatron.core import parallel_state + +from megatron.core.tensor_parallel.layers import _initialize_affine_weight_gpu +from megatron.core.tensor_parallel.utils import divide +from megatron.core.transformer import grouped_gemm_util as gg +from megatron.core.transformer.transformer_config import TransformerConfig + +from .base_moe_layer import BaseMoELayer +from .mlp import MLPSubmodules + +class ScaleGradient(torch.autograd.Function): + + @staticmethod + @torch.cuda.amp.custom_fwd + def forward(ctx, x, scale): + ctx.scale = scale + return x + + @staticmethod + @torch.cuda.amp.custom_bwd + def backward(ctx, grad): + return grad * ctx.scale, None +scale_gradient = ScaleGradient.apply + +class GroupedMLP(BaseMoELayer): + """ + Top-1 Mixture of Experts Layer with Grouped GEMM. Routes input to one of N MLP "experts" + Curently supports Sinkhorn based expert routing. + """ + + def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): + super().__init__(config=config) + self.config: TransformerConfig = config + + gg.assert_grouped_gemm_is_available() + self.expert_parallel = config.expert_model_parallel_size > 1 + self.gradient_scale = 1 / parallel_state.get_tensor_and_expert_parallel_world_size() + if self.config.gated_linear_unit: + def glu(x): + x = torch.chunk(x, 2, dim=-1) + return self.config.activation_func(x[0]) * x[1] + + self.activation_func = glu + else: + self.activation_func = self.config.activation_func + + assert not config.use_cpu_initialization + assert config.add_bias_linear == False, \ + "bias in the expert layer is not supported in Grouped GEMM yet." + # How many feature each rank holds + tp_size = parallel_state.get_tensor_model_parallel_world_size() + ffn_hs_per_expert_per_partition = divide(self.config.ffn_hidden_size, tp_size) + output_size_per_partition = self.num_local_experts * ffn_hs_per_expert_per_partition + fc1_output_size_per_partition = output_size_per_partition + if config.gated_linear_unit: + fc1_output_size_per_partition *= 2 + + self.weight1 = Parameter( + torch.empty( + fc1_output_size_per_partition, + self.config.hidden_size, + device=torch.cuda.current_device(), + dtype=config.params_dtype, + ) + ) + self.weight2 = Parameter( + torch.empty( + self.config.hidden_size, + output_size_per_partition, + device=torch.cuda.current_device(), + dtype=config.params_dtype, + ) + ) + if config.perform_initialization: + _initialize_affine_weight_gpu( + self.weight1, + config.init_method, + partition_dim=0, + expert_parallel=self.expert_parallel, + ) + _initialize_affine_weight_gpu( + self.weight2, + config.output_layer_init_method, + partition_dim=1, + expert_parallel=self.expert_parallel, + ) + setattr(self.weight1, 'allreduce', not self.expert_parallel) + setattr(self.weight2, 'allreduce', not self.expert_parallel) + + def scale_grad(self, w): + if self.gradient_scale is None: + return w + return scale_gradient(w, self.gradient_scale) + + def forward(self, hidden_states): + global_hidden_states, global_indices = self.token_permutation(hidden_states) + + with torch.no_grad(): + sorted, indices = torch.sort(global_indices, stable=True) + # Permutation of tokens + sorted_global_hidden_states = global_hidden_states[indices] + # Histogram the expert ids to identify the number of tokens routed to each expert + # Note that for np.histogram, all but the last (righthand-most) bin is half-open. + tokens_per_expert, bin_edges = np.histogram( + sorted.cpu(), + bins=np.arange(self.config.num_moe_experts + 1)) + tokens_per_expert = torch.tensor(tokens_per_expert).to(torch.long) + reverse_indices = indices.argsort() + + w1, w2 = (self.scale_grad(self.weight1), self.scale_grad(self.weight2)) + # Reshape the weights for the grouped GEMMs. + w1 = w1.view(self.num_local_experts, -1, self.config.hidden_size) + w2 = w2.view(self.num_local_experts, self.config.hidden_size, -1) + + fc1_output = gg.ops.gmm( + sorted_global_hidden_states, + w1, + tokens_per_expert, + trans_b=True) + + intermediate_parallel = self.activation_func(fc1_output) + + fc2_output = gg.ops.gmm( + intermediate_parallel, + w2, + tokens_per_expert, + trans_b=True) + # Un-permutation of tokens + output_total = fc2_output[reverse_indices] + + output_total, _ = self.token_unpermutation(output_total) + return output_total, None \ No newline at end of file diff --git a/megatron/core/transformer/switch_mlp.py b/megatron/core/transformer/switch_mlp.py index 10944c5203..f891ab5aed 100644 --- a/megatron/core/transformer/switch_mlp.py +++ b/megatron/core/transformer/switch_mlp.py @@ -2,63 +2,14 @@ import numpy as np import torch -from torch.nn.parameter import Parameter -from megatron.core import parallel_state, tensor_parallel -from megatron.core.parallel_state import ( - get_tensor_and_expert_parallel_group, - get_tensor_model_parallel_group, -) -from megatron.core.tensor_parallel import get_cuda_rng_tracker, get_data_parallel_rng_tracker_name -from megatron.core.transformer import grouped_gemm_util as gg -from megatron.core.tensor_parallel.layers import _initialize_affine_weight_gpu -from megatron.core.tensor_parallel.utils import divide -from megatron.core.transformer import grouped_gemm_util as gg -from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig +from .base_moe_layer import BaseMoELayer from .mlp import MLP, MLPSubmodules -def sinkhorn(cost, tol=0.0001): - "Sinkhorn based MoE routing function" - cost = torch.exp(cost) - d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype) - d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype) - - eps = 0.00000001 - error = 1e9 - d1_old = d1 - while error > tol: - d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps) - d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps) - error = torch.mean(torch.abs(d1_old - d1)) - d1_old = d1 - return d1 * cost * d0.unsqueeze(1) - -class ScaleGradient(torch.autograd.Function): - - @staticmethod - @torch.cuda.amp.custom_fwd - def forward(ctx, x, scale): - ctx.scale = scale - return x - - @staticmethod - @torch.cuda.amp.custom_bwd - def backward(ctx, grad): - return grad * ctx.scale, None -scale_gradient = ScaleGradient.apply - -def get_router_linear_layer(config): - router = torch.nn.Linear(config.hidden_size, config.num_moe_experts, bias=False) - with get_cuda_rng_tracker().fork(get_data_parallel_rng_tracker_name()): - config.init_method(router.weight) - setattr(router.weight, 'sequence_parallel', config.sequence_parallel) - return router - - -class SwitchMLP(MegatronModule): +class SwitchMLP(BaseMoELayer): """ Top-1 Mixture of Experts Layer. Routes input to one of N MLP "experts" Curently supports Sinkhorn based expert routing. @@ -67,205 +18,31 @@ class SwitchMLP(MegatronModule): def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): super().__init__(config=config) - self.config: TransformerConfig = config - - self.router = get_router_linear_layer(self.config) - self.add_bias = config.add_bias_linear - self.sequence_parallel = config.sequence_parallel - self.route_algo = sinkhorn - self.router_activation = torch.sigmoid - self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size() - - assert self.config.num_moe_experts % self.expert_parallel_size == 0 - self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size - local_expert_indices_offset = ( - parallel_state.get_expert_model_parallel_rank() * self.num_local_experts - ) - self.local_expert_indices = [ - local_expert_indices_offset + i for i in range(self.num_local_experts) - ] - - if not self.config.moe_grouped_gemm: - self.local_experts = torch.nn.ModuleList() - for _ in range(self.num_local_experts): - expert = MLP(self.config, submodules, is_expert=True) - self.local_experts.append(expert) - else: - gg.assert_grouped_gemm_is_available() - self.expert_parallel = config.expert_model_parallel_size > 1 - self.gradient_scale = 1 / parallel_state.get_tensor_and_expert_parallel_world_size() - if self.config.gated_linear_unit: - def glu(x): - x = torch.chunk(x, 2, dim=-1) - return self.config.activation_func(x[0]) * x[1] - - self.activation_func = glu - else: - self.activation_func = self.config.activation_func - - assert not config.use_cpu_initialization - # How many feature each rank holds - tp_size = parallel_state.get_tensor_model_parallel_world_size() - ffn_hs_per_expert_per_partition = divide(self.config.ffn_hidden_size, tp_size) - output_size_per_partition = self.num_local_experts * ffn_hs_per_expert_per_partition - fc1_output_size_per_partition = output_size_per_partition - if config.gated_linear_unit: - fc1_output_size_per_partition *= 2 - - self.weight1 = Parameter( - torch.empty( - fc1_output_size_per_partition, - self.config.hidden_size, - device=torch.cuda.current_device(), - dtype=config.params_dtype, - ) - ) - self.weight2 = Parameter( - torch.empty( - self.config.hidden_size, - output_size_per_partition, - device=torch.cuda.current_device(), - dtype=config.params_dtype, - ) - ) - if config.perform_initialization: - _initialize_affine_weight_gpu( - self.weight1, - config.init_method, - partition_dim=0, - expert_parallel=self.expert_parallel, - ) - _initialize_affine_weight_gpu( - self.weight2, - config.output_layer_init_method, - partition_dim=1, - expert_parallel=self.expert_parallel, - ) - setattr(self.weight1, 'allreduce', not self.expert_parallel) - setattr(self.weight2, 'allreduce', not self.expert_parallel) - - def scale_grad(self, w): - if self.gradient_scale is None: - return w - return scale_gradient(w, self.gradient_scale) - - def gather_indices(self, local_indices): - """ Gather tensors and concatenate along the first dimension.""" - group = get_tensor_and_expert_parallel_group() - world_size = torch.distributed.get_world_size(group=group) - # Bypass the function if we are using only 1 GPU. - if world_size == 1: - return local_indices - - dim_size = list(local_indices.size()) - dim_size[0] = dim_size[0] * world_size - - # TODO pre allocate memory - output = torch.empty( - dim_size, dtype=local_indices.dtype, device=torch.cuda.current_device() - ) - torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group) - return output + self.local_experts = torch.nn.ModuleList() + for _ in range(self.num_local_experts): + expert = MLP(self.config, submodules, is_expert=True) + self.local_experts.append(expert) def forward(self, hidden_states): - hidden_shape = hidden_states.shape - route = self.router(hidden_states) - route = route.view(-1, self.config.num_moe_experts) - - if self.training: - with torch.no_grad(): - norm_route = self.route_algo( - route.detach().to(dtype=torch.float32) - ) # explicit fp32 conversion for stability - _, max_ind = torch.max(norm_route, dim=1) - route = self.router_activation(route) - max_prob = route[torch.arange(route.size(0)), max_ind] - else: - route = self.router_activation(route) - max_prob, max_ind = torch.max(route, dim=1) - - max_prob = torch.unsqueeze(max_prob, 1) - hidden_states = hidden_states.view(-1, hidden_shape[-1]) - - if self.sequence_parallel or (self.expert_parallel_size > 1): - global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe( - hidden_states - ) - global_indices = self.gather_indices(max_ind) - else: - global_hidden_states = hidden_states - global_indices = max_ind - - if not self.config.moe_grouped_gemm: - output_total = torch.zeros_like(global_hidden_states) - if self.add_bias: - output_bias_total = torch.zeros_like(global_hidden_states) - - - for expert_num, expert in enumerate(self.local_experts): - local_expert_index = self.local_expert_indices[expert_num] - local_indices = (global_indices == local_expert_index).nonzero() - hidden = global_hidden_states[local_indices, :] - output, output_bias = expert(hidden) - - output_total[local_indices, :] = output - if self.add_bias: - output_bias = output_bias.expand_as(output) - output_bias_total[local_indices, :] = output_bias - else: - with torch.no_grad(): - sorted, indices = torch.sort(global_indices, stable=True) - # Permutation of tokens - sorted_global_hidden_states = global_hidden_states[indices] - # Histogram the expert ids to identify the number of tokens routed to each expert - # Note that for np.histogram, all but the last (righthand-most) bin is half-open. - tokens_per_expert, bin_edges = np.histogram( - sorted.cpu(), - bins=np.arange(self.config.num_moe_experts + 1)) - tokens_per_expert = torch.tensor(tokens_per_expert).to(torch.long) - reverse_indices = indices.argsort() + global_hidden_states, global_indices = self.token_permutation(hidden_states) - w1, w2 = (self.scale_grad(self.weight1), self.scale_grad(self.weight2)) - # Reshape the weights for the grouped GEMMs. - w1 = w1.view(self.num_local_experts, -1, self.config.hidden_size) - w2 = w2.view(self.num_local_experts, self.config.hidden_size, -1) - - fc1_output = gg.ops.gmm( - sorted_global_hidden_states, - w1, - tokens_per_expert, - trans_b=True) + output_total = torch.zeros_like(global_hidden_states) + output_bias_total = None + if self.add_bias: + output_bias_total = torch.zeros_like(global_hidden_states) - intermediate_parallel = self.activation_func(fc1_output) - fc2_output = gg.ops.gmm( - intermediate_parallel, - w2, - tokens_per_expert, - trans_b=True) - # Un-permutation of tokens - output_total = fc2_output[reverse_indices] + for expert_num, expert in enumerate(self.local_experts): + local_expert_index = self.local_expert_indices[expert_num] + local_indices = (global_indices == local_expert_index).nonzero() + hidden = global_hidden_states[local_indices, :] + output, output_bias = expert(hidden) - if self.sequence_parallel or (self.expert_parallel_size > 1): - output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( - output_total - ) + output_total[local_indices, :] = output if self.add_bias: - output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( - output_bias_total - ) - # bias is duplicated across tensor parallelism ranks; - # reduce scatter reduces bias across tensor parallel_ranks - output_bias_total = ( - output_bias_total / parallel_state.get_tensor_model_parallel_world_size() - ) + output_bias = output_bias.expand_as(output) + output_bias_total[local_indices, :] = output_bias - output_total = output_total * max_prob - output_total = output_total.view(hidden_shape) - if self.add_bias: - output_bias_total = output_bias_total * max_prob - output_bias_total = output_bias_total.view(hidden_shape) - else: - output_bias_total = None + output_total, output_bias_total = self.token_unpermutation(output_total, output_bias_total) - return output_total, output_bias_total + return output_total, output_bias_total \ No newline at end of file diff --git a/tests/unit_tests/transformer/test_grouped_gemm.py b/tests/unit_tests/transformer/test_grouped_gemm.py index 091f7fa112..9a838c7e9d 100644 --- a/tests/unit_tests/transformer/test_grouped_gemm.py +++ b/tests/unit_tests/transformer/test_grouped_gemm.py @@ -6,13 +6,14 @@ from megatron.arguments import parse_args from megatron.core.models.gpt.gpt_layer_specs import gpt_layer_with_transformer_engine_spec_moe +from megatron.core.transformer.grouped_mlp import GroupedMLP from megatron.core.transformer.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_config import TransformerConfig from megatron.initialize import _set_random_seed from megatron.model import Float16Module from tests.unit_tests.test_utilities import Utils -class TestParallelSwitchMLP: +class TestParallelGroupedMLP: def setup_method(self, method): Utils.initialize_model_parallel(1,1) @@ -47,7 +48,7 @@ def setup_method(self, method): bf16=True, # Currently GroupedGEMM only supports bf16. params_dtype=torch.bfloat16, moe_grouped_gemm=True) - self.switch_mlp_gmm = SwitchMLP(tf_config_gmm, + self.switch_mlp_gmm = GroupedMLP(tf_config_gmm, gpt_layer_with_transformer_engine_spec_moe.submodules.mlp.submodules) self.switch_mlp_gmm = Float16Module(self.switch_mlp_gmm, self.args).module print("done intializing for grouped gemm") @@ -57,7 +58,7 @@ def teardown_method(self, method): def test_constructor(self): assert isinstance(self.switch_mlp_smm, SwitchMLP) - assert isinstance(self.switch_mlp_gmm, SwitchMLP) + assert isinstance(self.switch_mlp_gmm, GroupedMLP) num_weights_smm = sum([p.numel() for p in self.switch_mlp_smm.parameters()]) num_weights_gmm = sum([p.numel() for p in self.switch_mlp_gmm.parameters()]) @@ -116,10 +117,10 @@ def test_gpu_forward(self): # The following assert fails due to the param init value is not exactly # the same between gmm and smm (refer to test_weight_init_value_the_same.) - # assert torch.equal(output_smm, output_gmm),print(output_smm, output_gmm) + # assert torch.equal(output_smm, output_gmm) if __name__ == "__main__": - SMLP_test = TestParallelSwitchMLP() + SMLP_test = TestParallelGroupedMLP() SMLP_test.setup_method(method=None) SMLP_test.test_constructor() SMLP_test.test_weight_init_value_the_same() From 85a03924d99d0865acb4d5856b62ad6476fb56ac Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Wed, 15 Nov 2023 21:17:33 -0800 Subject: [PATCH 092/296] add entrypoint for GroupedMLP and SwitchMLP. --- megatron/core/models/gpt/gpt_layer_specs.py | 48 ++++++++++++++++++- megatron/core/transformer/grouped_mlp.py | 4 +- pretrain_gpt.py | 5 +- .../transformer/test_grouped_gemm.py | 25 ++++------ 4 files changed, 62 insertions(+), 20 deletions(-) diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index aace1590d8..94be21c02e 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -11,6 +11,7 @@ ) from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType +from megatron.core.transformer.grouped_mlp import GroupedMLP from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.switch_mlp import SwitchMLP @@ -96,7 +97,29 @@ def get_gpt_layer_local_spec() -> ModuleSpec: ), ) -# Use this spec for an implementation using only modules in megatron core for MoE models +# Use this spec to use lower level Transformer Engine modules and GroupedMLP based MoE +gpt_layer_with_transformer_engine_spec_moe_grouped_gemm = ModuleSpec( + module=TransformerLayer, + submodules=TransformerLayerSubmodules( + self_attention=ModuleSpec( + module=SelfAttention, + params={"attn_mask_type": AttnMaskType.causal}, + submodules=SelfAttentionSubmodules( + linear_qkv=TELayerNormColumnParallelLinear, + dot_product_attention=TEDotProductAttention, + linear_proj=TERowParallelLinear, + ), + ), + self_attn_bda=get_bias_dropout_add, + pre_mlp_layernorm=FusedLayerNorm, + mlp=ModuleSpec( + module=GroupedMLP, # MOE + ), + mlp_bda=get_bias_dropout_add, + ), +) + +# Use this spec for an implementation using only modules in megatron core for SwitchMLP based MoE models gpt_layer_local_spec_moe = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( @@ -121,3 +144,26 @@ def get_gpt_layer_local_spec() -> ModuleSpec: mlp_bda=get_bias_dropout_add, ), ) + +# Use this spec for an implementation using only modules in megatron core for GroupedMLP based MoE models +gpt_layer_local_spec_moe_grouped_gemm = ModuleSpec( + module=TransformerLayer, + submodules=TransformerLayerSubmodules( + input_layernorm=FusedLayerNorm, + self_attention=ModuleSpec( + module=SelfAttention, + params={"attn_mask_type": AttnMaskType.causal}, + submodules=SelfAttentionSubmodules( + linear_qkv=ColumnParallelLinear, + dot_product_attention=DotProductAttention, + linear_proj=RowParallelLinear, + ), + ), + self_attn_bda=get_bias_dropout_add, + pre_mlp_layernorm=FusedLayerNorm, + mlp=ModuleSpec( + module=GroupedMLP, # MOE + ), + mlp_bda=get_bias_dropout_add, + ), +) \ No newline at end of file diff --git a/megatron/core/transformer/grouped_mlp.py b/megatron/core/transformer/grouped_mlp.py index e1e9b49642..5050584259 100644 --- a/megatron/core/transformer/grouped_mlp.py +++ b/megatron/core/transformer/grouped_mlp.py @@ -12,7 +12,7 @@ from megatron.core.transformer.transformer_config import TransformerConfig from .base_moe_layer import BaseMoELayer -from .mlp import MLPSubmodules + class ScaleGradient(torch.autograd.Function): @@ -34,7 +34,7 @@ class GroupedMLP(BaseMoELayer): Curently supports Sinkhorn based expert routing. """ - def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): + def __init__(self, config: TransformerConfig): super().__init__(config=config) self.config: TransformerConfig = config diff --git a/pretrain_gpt.py b/pretrain_gpt.py index 1180922761..e6685dfffa 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -27,7 +27,8 @@ from megatron.arguments import core_transformer_config_from_args from megatron.core.models.gpt.gpt_layer_specs import ( get_gpt_layer_with_transformer_engine_spec, - gpt_layer_with_transformer_engine_spec_moe + gpt_layer_with_transformer_engine_spec_moe, + gpt_layer_with_transformer_engine_spec_moe_grouped_gemm, ) def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megatron.model.GPTModel]: @@ -54,6 +55,8 @@ def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megat else: if args.num_experts is None: transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec() + elif args.moe_grouped_gemm: + transformer_layer_spec = gpt_layer_with_transformer_engine_spec_moe_grouped_gemm else: transformer_layer_spec = gpt_layer_with_transformer_engine_spec_moe diff --git a/tests/unit_tests/transformer/test_grouped_gemm.py b/tests/unit_tests/transformer/test_grouped_gemm.py index 9a838c7e9d..61f5e26e8d 100644 --- a/tests/unit_tests/transformer/test_grouped_gemm.py +++ b/tests/unit_tests/transformer/test_grouped_gemm.py @@ -21,15 +21,15 @@ def setup_method(self, method): self.hidden_size=2 # 12 self.num_experts = 2 - # Vanilla sequential GEMM - # Set random seed for reproducability - _set_random_seed(seed_=123, data_parallel_random_init=False) - tf_config_smm = TransformerConfig( + tf_config = TransformerConfig( num_layers=num_layers, hidden_size=self.hidden_size, num_attention_heads=4, num_moe_experts=self.num_experts, use_cpu_initialization=False, add_bias_linear=False, - bf16=True, params_dtype=torch.bfloat16, - moe_grouped_gemm=False) - self.switch_mlp_smm = SwitchMLP(tf_config_smm, + bf16=True, params_dtype=torch.bfloat16) + + ## Vanilla sequential GEMM + # Set random seed for reproducability + _set_random_seed(seed_=123, data_parallel_random_init=False) + self.switch_mlp_smm = SwitchMLP(tf_config, gpt_layer_with_transformer_engine_spec_moe.submodules.mlp.submodules) self.args = parse_args(extra_args_provider=None, ignore_unknown_args=False) @@ -40,16 +40,9 @@ def setup_method(self, method): self.switch_mlp_smm = Float16Module(self.switch_mlp_smm, self.args).module print("done intializing for sequential gemm") - # Grouped GEMM + ## Grouped GEMM _set_random_seed(seed_=123, data_parallel_random_init=False) - tf_config_gmm = TransformerConfig( - num_layers=num_layers, hidden_size=self.hidden_size, num_attention_heads=4, - num_moe_experts=self.num_experts, use_cpu_initialization=False, add_bias_linear=False, - bf16=True, # Currently GroupedGEMM only supports bf16. - params_dtype=torch.bfloat16, - moe_grouped_gemm=True) - self.switch_mlp_gmm = GroupedMLP(tf_config_gmm, - gpt_layer_with_transformer_engine_spec_moe.submodules.mlp.submodules) + self.switch_mlp_gmm = GroupedMLP(tf_config) self.switch_mlp_gmm = Float16Module(self.switch_mlp_gmm, self.args).module print("done intializing for grouped gemm") From ee9346e8c1b4c8484095082ad4074a31a9d62197 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Thu, 16 Nov 2023 09:37:29 +0000 Subject: [PATCH 093/296] Add cpu initilization of parms for GroupedMLP; Add related UTs. --- megatron/core/transformer/grouped_mlp.py | 114 ++++++++++++------ .../transformer/test_grouped_gemm.py | 79 ++++++++---- 2 files changed, 132 insertions(+), 61 deletions(-) diff --git a/megatron/core/transformer/grouped_mlp.py b/megatron/core/transformer/grouped_mlp.py index 5050584259..a6d90e613f 100644 --- a/megatron/core/transformer/grouped_mlp.py +++ b/megatron/core/transformer/grouped_mlp.py @@ -6,7 +6,10 @@ from megatron.core import parallel_state -from megatron.core.tensor_parallel.layers import _initialize_affine_weight_gpu +from megatron.core.tensor_parallel.layers import ( + _initialize_affine_weight_cpu, + _initialize_affine_weight_gpu, +) from megatron.core.tensor_parallel.utils import divide from megatron.core.transformer import grouped_gemm_util as gg from megatron.core.transformer.transformer_config import TransformerConfig @@ -39,6 +42,9 @@ def __init__(self, config: TransformerConfig): self.config: TransformerConfig = config gg.assert_grouped_gemm_is_available() + assert config.add_bias_linear == False, \ + "bias in the expert layer is not supported in Grouped GEMM yet." + self.expert_parallel = config.expert_model_parallel_size > 1 self.gradient_scale = 1 / parallel_state.get_tensor_and_expert_parallel_world_size() if self.config.gated_linear_unit: @@ -50,46 +56,84 @@ def glu(x): else: self.activation_func = self.config.activation_func - assert not config.use_cpu_initialization - assert config.add_bias_linear == False, \ - "bias in the expert layer is not supported in Grouped GEMM yet." - # How many feature each rank holds + + # How many feature each rank holds for fc1 and fc2, respectively. tp_size = parallel_state.get_tensor_model_parallel_world_size() - ffn_hs_per_expert_per_partition = divide(self.config.ffn_hidden_size, tp_size) - output_size_per_partition = self.num_local_experts * ffn_hs_per_expert_per_partition - fc1_output_size_per_partition = output_size_per_partition + fc1_output_size = self.config.ffn_hidden_size * self.num_local_experts if config.gated_linear_unit: - fc1_output_size_per_partition *= 2 - - self.weight1 = Parameter( - torch.empty( - fc1_output_size_per_partition, - self.config.hidden_size, - device=torch.cuda.current_device(), - dtype=config.params_dtype, + # Project to 4h. If using swiglu double the output width, + # see https://arxiv.org/pdf/2002.05202.pdf + fc1_output_size *= 2 + fc1_output_size_per_partition = divide(fc1_output_size, tp_size) + + fc2_input_size = self.config.ffn_hidden_size * self.num_local_experts + fc2_input_size_per_partition = divide(fc2_input_size, tp_size) + + # Initialize weight. + if config.use_cpu_initialization: + self.weight1 = Parameter( + torch.empty( + fc1_output_size_per_partition, + self.config.hidden_size, + dtype=config.params_dtype, + ) ) - ) - self.weight2 = Parameter( - torch.empty( - self.config.hidden_size, - output_size_per_partition, - device=torch.cuda.current_device(), - dtype=config.params_dtype, + self.weight2 = Parameter( + torch.empty( + self.config.hidden_size, + fc2_input_size_per_partition, + dtype=config.params_dtype, + ) ) - ) - if config.perform_initialization: - _initialize_affine_weight_gpu( - self.weight1, - config.init_method, - partition_dim=0, - expert_parallel=self.expert_parallel, + if config.perform_initialization: + _initialize_affine_weight_cpu( + self.weight1, + fc1_output_size, + self.config.hidden_size, + fc1_output_size_per_partition, + partition_dim=0, + init_method=config.init_method, + params_dtype=config.params_dtype, + ) + _initialize_affine_weight_cpu( + self.weight2, + self.config.hidden_size, + fc2_input_size, + fc2_input_size_per_partition, + partition_dim=1, + init_method=config.output_layer_init_method, + params_dtype=config.params_dtype, + ) + else: + self.weight1 = Parameter( + torch.empty( + fc1_output_size_per_partition, + self.config.hidden_size, + device=torch.cuda.current_device(), + dtype=config.params_dtype, + ) ) - _initialize_affine_weight_gpu( - self.weight2, - config.output_layer_init_method, - partition_dim=1, - expert_parallel=self.expert_parallel, + self.weight2 = Parameter( + torch.empty( + self.config.hidden_size, + fc2_input_size_per_partition, + device=torch.cuda.current_device(), + dtype=config.params_dtype, + ) ) + if config.perform_initialization: + _initialize_affine_weight_gpu( + self.weight1, + config.init_method, + partition_dim=0, + expert_parallel=self.expert_parallel, + ) + _initialize_affine_weight_gpu( + self.weight2, + config.output_layer_init_method, + partition_dim=1, + expert_parallel=self.expert_parallel, + ) setattr(self.weight1, 'allreduce', not self.expert_parallel) setattr(self.weight2, 'allreduce', not self.expert_parallel) diff --git a/tests/unit_tests/transformer/test_grouped_gemm.py b/tests/unit_tests/transformer/test_grouped_gemm.py index 61f5e26e8d..525feef105 100644 --- a/tests/unit_tests/transformer/test_grouped_gemm.py +++ b/tests/unit_tests/transformer/test_grouped_gemm.py @@ -3,6 +3,7 @@ import pytest import torch +import torch.nn.functional as F from megatron.arguments import parse_args from megatron.core.models.gpt.gpt_layer_specs import gpt_layer_with_transformer_engine_spec_moe @@ -15,17 +16,33 @@ class TestParallelGroupedMLP: - def setup_method(self, method): + def setup_method(self, method, use_cpu_initialization=False, swiglu=True): + print("============") + print("Test for use_cpu_initilization={} and swiglu={}.".format(use_cpu_initialization, swiglu)) + print("============") Utils.initialize_model_parallel(1,1) num_layers=1 # 2 self.hidden_size=2 # 12 self.num_experts = 2 + self.gated_linear_unit = True + self.use_cpu_initialization = use_cpu_initialization + self.gated_linear_unit = False + if swiglu: + self.gated_linear_unit = True tf_config = TransformerConfig( num_layers=num_layers, hidden_size=self.hidden_size, num_attention_heads=4, - num_moe_experts=self.num_experts, use_cpu_initialization=False, add_bias_linear=False, + num_moe_experts=self.num_experts, use_cpu_initialization=self.use_cpu_initialization, + add_bias_linear=False, gated_linear_unit=self.gated_linear_unit, + bias_gelu_fusion=False, bf16=True, params_dtype=torch.bfloat16) + self.fc1_ffn_hidden_size = tf_config.ffn_hidden_size + self.fc2_ffn_hidden_size = tf_config.ffn_hidden_size + # If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf + if self.gated_linear_unit: + self.fc1_ffn_hidden_size *= 2 + ## Vanilla sequential GEMM # Set random seed for reproducability _set_random_seed(seed_=123, data_parallel_random_init=False) @@ -62,37 +79,42 @@ def test_constructor(self): # expected num weights: router linear weights+bias + MLP weights(no bias) of all experts expected_num_weights = \ self.hidden_size * self.num_experts + self.num_experts + \ - self.hidden_size * (4*self.hidden_size) * 2 * self.num_experts + self.hidden_size * (self.fc1_ffn_hidden_size + self.fc2_ffn_hidden_size) * self.num_experts assert num_weights_smm == expected_num_weights assert torch.equal(self.switch_mlp_smm.router.weight, self.switch_mlp_gmm.router.weight) # weight1: [num_experts*4h, h] # weight2: [h, num_experts*4h] - assert self.switch_mlp_gmm.weight1.shape[0] == self.num_experts * 4 * self.hidden_size + assert self.switch_mlp_gmm.weight1.shape[0] == self.num_experts * self.fc1_ffn_hidden_size assert self.switch_mlp_gmm.weight1.shape[1] == self.hidden_size - assert self.switch_mlp_gmm.weight1.shape == \ - self.switch_mlp_gmm.weight2.t().shape + if self.gated_linear_unit: + assert self.switch_mlp_gmm.weight2.shape[0] == self.hidden_size + assert self.switch_mlp_gmm.weight2.shape[1] == self.num_experts * self.fc2_ffn_hidden_size + else: + assert self.switch_mlp_gmm.weight1.shape == self.switch_mlp_gmm.weight2.t().shape def test_weight_init_value_the_same(self): gmm_w1 = self.switch_mlp_gmm.weight1.view(self.num_experts, -1, self.hidden_size) gmm_w2 = self.switch_mlp_gmm.weight2.view(self.num_experts, self.hidden_size, -1) - gmm_expert0_fc1 = gmm_w1[0] - gmm_expert0_fc2 = gmm_w2[0] - gmm_expert1_fc1 = gmm_w1[1] - gmm_expert1_fc2 = gmm_w2[1] - - smm_expert0_fc1 = self.switch_mlp_smm.local_experts[0].linear_fc1.weight - smm_expert0_fc2 = self.switch_mlp_smm.local_experts[0].linear_fc2.weight - smm_expert1_fc1 = self.switch_mlp_smm.local_experts[1].linear_fc1.weight - smm_expert1_fc2 = self.switch_mlp_smm.local_experts[1].linear_fc2.weight - - assert torch.equal(gmm_expert0_fc1, smm_expert0_fc1) - assert torch.equal(gmm_expert0_fc2, smm_expert0_fc2) + gmm_expert1_fc1 = gmm_w1[0] + gmm_expert1_fc2 = gmm_w2[0] + gmm_expert2_fc1 = gmm_w1[1] + gmm_expert2_fc2 = gmm_w2[1] + + smm_expert1_fc1 = self.switch_mlp_smm.local_experts[0].linear_fc1.weight + smm_expert1_fc2 = self.switch_mlp_smm.local_experts[0].linear_fc2.weight + smm_expert2_fc1 = self.switch_mlp_smm.local_experts[1].linear_fc1.weight + smm_expert2_fc2 = self.switch_mlp_smm.local_experts[1].linear_fc2.weight + + assert torch.equal(gmm_expert1_fc1, smm_expert1_fc1) + if not self.use_cpu_initialization: + assert torch.equal(gmm_expert1_fc2, smm_expert1_fc2) # the param init value is not exactly the same between gmm and smm (refer to test_weight_init_value_the_same.) # TODO: is it necessary to keep smm and gmm share exactly the same init params? - # assert torch.equal(gmm_expert1_fc1, smm_expert1_fc1) - # assert torch.equal(gmm_expert1_fc2, smm_expert1_fc2) + # assert torch.equal(gmm_expert2_fc1, smm_expert2_fc1) + if self.use_cpu_initialization: + assert torch.equal(gmm_expert2_fc2, smm_expert2_fc2) @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_gpu_forward(self): @@ -113,9 +135,14 @@ def test_gpu_forward(self): # assert torch.equal(output_smm, output_gmm) if __name__ == "__main__": - SMLP_test = TestParallelGroupedMLP() - SMLP_test.setup_method(method=None) - SMLP_test.test_constructor() - SMLP_test.test_weight_init_value_the_same() - SMLP_test.test_gpu_forward() - SMLP_test.teardown_method(method=None) \ No newline at end of file + for use_cpu_unitilization in [True, False]: + for swiglu in [True, False]: + SMLP_test = TestParallelGroupedMLP() + SMLP_test.setup_method( + method=None, + use_cpu_initialization=use_cpu_unitilization, + swiglu=swiglu) + SMLP_test.test_constructor() + SMLP_test.test_weight_init_value_the_same() + SMLP_test.test_gpu_forward() + SMLP_test.teardown_method(method=None) \ No newline at end of file From 1c3c42806763a6352c66998acd957c5821c893ef Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Thu, 16 Nov 2023 23:00:03 -0800 Subject: [PATCH 094/296] minor fix for 'test_grouped_mlp' --- .../{test_grouped_gemm.py => test_grouped_mlp.py} | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) rename tests/unit_tests/transformer/{test_grouped_gemm.py => test_grouped_mlp.py} (96%) diff --git a/tests/unit_tests/transformer/test_grouped_gemm.py b/tests/unit_tests/transformer/test_grouped_mlp.py similarity index 96% rename from tests/unit_tests/transformer/test_grouped_gemm.py rename to tests/unit_tests/transformer/test_grouped_mlp.py index 525feef105..a83a6e0d9f 100644 --- a/tests/unit_tests/transformer/test_grouped_gemm.py +++ b/tests/unit_tests/transformer/test_grouped_mlp.py @@ -137,12 +137,12 @@ def test_gpu_forward(self): if __name__ == "__main__": for use_cpu_unitilization in [True, False]: for swiglu in [True, False]: - SMLP_test = TestParallelGroupedMLP() - SMLP_test.setup_method( + GMLP_test = TestParallelGroupedMLP() + GMLP_test.setup_method( method=None, use_cpu_initialization=use_cpu_unitilization, swiglu=swiglu) - SMLP_test.test_constructor() - SMLP_test.test_weight_init_value_the_same() - SMLP_test.test_gpu_forward() - SMLP_test.teardown_method(method=None) \ No newline at end of file + GMLP_test.test_constructor() + GMLP_test.test_weight_init_value_the_same() + GMLP_test.test_gpu_forward() + GMLP_test.teardown_method(method=None) \ No newline at end of file From ff4542a4a9f14f26ced07181280e6dd3d52b336c Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Fri, 17 Nov 2023 01:14:56 -0800 Subject: [PATCH 095/296] rebase and fix conflicts. --- megatron/core/models/gpt/gpt_layer_specs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index 94be21c02e..8965688385 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -106,7 +106,7 @@ def get_gpt_layer_local_spec() -> ModuleSpec: params={"attn_mask_type": AttnMaskType.causal}, submodules=SelfAttentionSubmodules( linear_qkv=TELayerNormColumnParallelLinear, - dot_product_attention=TEDotProductAttention, + core_attention=TEDotProductAttention, linear_proj=TERowParallelLinear, ), ), @@ -155,7 +155,7 @@ def get_gpt_layer_local_spec() -> ModuleSpec: params={"attn_mask_type": AttnMaskType.causal}, submodules=SelfAttentionSubmodules( linear_qkv=ColumnParallelLinear, - dot_product_attention=DotProductAttention, + core_attention=DotProductAttention, linear_proj=RowParallelLinear, ), ), From b95cba203ccfe7134eb0d9d29723543057b9db23 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Fri, 17 Nov 2023 11:23:15 +0000 Subject: [PATCH 096/296] autoformat. --- megatron/core/models/gpt/gpt_layer_specs.py | 10 ++----- megatron/core/parallel_state.py | 2 ++ megatron/core/transformer/base_moe_layer.py | 8 ++--- .../core/transformer/grouped_gemm_util.py | 8 +++-- megatron/core/transformer/grouped_mlp.py | 30 ++++++++----------- megatron/core/transformer/switch_mlp.py | 3 +- 6 files changed, 28 insertions(+), 33 deletions(-) diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index 8965688385..d27aa62a68 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -112,9 +112,7 @@ def get_gpt_layer_local_spec() -> ModuleSpec: ), self_attn_bda=get_bias_dropout_add, pre_mlp_layernorm=FusedLayerNorm, - mlp=ModuleSpec( - module=GroupedMLP, # MOE - ), + mlp=ModuleSpec(module=GroupedMLP), # MOE mlp_bda=get_bias_dropout_add, ), ) @@ -161,9 +159,7 @@ def get_gpt_layer_local_spec() -> ModuleSpec: ), self_attn_bda=get_bias_dropout_add, pre_mlp_layernorm=FusedLayerNorm, - mlp=ModuleSpec( - module=GroupedMLP, # MOE - ), + mlp=ModuleSpec(module=GroupedMLP), # MOE mlp_bda=get_bias_dropout_add, ), -) \ No newline at end of file +) diff --git a/megatron/core/parallel_state.py b/megatron/core/parallel_state.py index 40923a6576..f509a68b88 100644 --- a/megatron/core/parallel_state.py +++ b/megatron/core/parallel_state.py @@ -897,6 +897,7 @@ def get_expert_model_parallel_world_size(): else: return 0 + def get_tensor_and_expert_parallel_world_size(): """Return my rank for the expert parallel group""" if torch.distributed.is_available() and torch.distributed.is_initialized(): @@ -907,6 +908,7 @@ def get_tensor_and_expert_parallel_world_size(): else: return 0 + def get_expert_model_parallel_rank(): """Return my rank for the expert parallel group""" if torch.distributed.is_available() and torch.distributed.is_initialized(): diff --git a/megatron/core/transformer/base_moe_layer.py b/megatron/core/transformer/base_moe_layer.py index b60893ddbc..3c44410782 100644 --- a/megatron/core/transformer/base_moe_layer.py +++ b/megatron/core/transformer/base_moe_layer.py @@ -4,9 +4,8 @@ import torch from megatron.core import parallel_state, tensor_parallel -from megatron.core.parallel_state import ( - get_tensor_and_expert_parallel_group, -) + +from megatron.core.parallel_state import get_tensor_and_expert_parallel_group from megatron.core.tensor_parallel import get_cuda_rng_tracker, get_data_parallel_rng_tracker_name from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig @@ -41,6 +40,7 @@ class BaseMoELayer(MegatronModule): """ Basic MoE layer. """ + def __init__(self, config: TransformerConfig): super().__init__(config=config) @@ -136,4 +136,4 @@ def token_unpermutation(self, output_total, output_bias_total=None): else: output_bias_total = None - return output_total, output_bias_total \ No newline at end of file + return output_total, output_bias_total diff --git a/megatron/core/transformer/grouped_gemm_util.py b/megatron/core/transformer/grouped_gemm_util.py index b4b09e170f..43bdf79759 100644 --- a/megatron/core/transformer/grouped_gemm_util.py +++ b/megatron/core/transformer/grouped_gemm_util.py @@ -5,12 +5,16 @@ except ImportError: grouped_gemm = None + def grouped_gemm_is_available(): return grouped_gemm is not None + def assert_grouped_gemm_is_available(): assert grouped_gemm_is_available(), ( "Grouped GEMM is not available. Please run " - "`pip install git+https://github.com/tgale96/grouped_gemm@main`.") + "`pip install git+https://github.com/tgale96/grouped_gemm@main`." + ) + -ops = grouped_gemm.ops if grouped_gemm_is_available() else None \ No newline at end of file +ops = grouped_gemm.ops if grouped_gemm_is_available() else None diff --git a/megatron/core/transformer/grouped_mlp.py b/megatron/core/transformer/grouped_mlp.py index a6d90e613f..7ec522f789 100644 --- a/megatron/core/transformer/grouped_mlp.py +++ b/megatron/core/transformer/grouped_mlp.py @@ -5,7 +5,6 @@ from torch.nn.parameter import Parameter from megatron.core import parallel_state - from megatron.core.tensor_parallel.layers import ( _initialize_affine_weight_cpu, _initialize_affine_weight_gpu, @@ -17,8 +16,8 @@ from .base_moe_layer import BaseMoELayer -class ScaleGradient(torch.autograd.Function): +class ScaleGradient(torch.autograd.Function): @staticmethod @torch.cuda.amp.custom_fwd def forward(ctx, x, scale): @@ -29,6 +28,8 @@ def forward(ctx, x, scale): @torch.cuda.amp.custom_bwd def backward(ctx, grad): return grad * ctx.scale, None + + scale_gradient = ScaleGradient.apply class GroupedMLP(BaseMoELayer): @@ -42,12 +43,14 @@ def __init__(self, config: TransformerConfig): self.config: TransformerConfig = config gg.assert_grouped_gemm_is_available() - assert config.add_bias_linear == False, \ - "bias in the expert layer is not supported in Grouped GEMM yet." + assert ( + config.add_bias_linear == False + ), "bias in the expert layer is not supported in Grouped GEMM yet." self.expert_parallel = config.expert_model_parallel_size > 1 self.gradient_scale = 1 / parallel_state.get_tensor_and_expert_parallel_world_size() if self.config.gated_linear_unit: + def glu(x): x = torch.chunk(x, 2, dim=-1) return self.config.activation_func(x[0]) * x[1] @@ -56,7 +59,6 @@ def glu(x): else: self.activation_func = self.config.activation_func - # How many feature each rank holds for fc1 and fc2, respectively. tp_size = parallel_state.get_tensor_model_parallel_world_size() fc1_output_size = self.config.ffn_hidden_size * self.num_local_experts @@ -152,8 +154,8 @@ def forward(self, hidden_states): # Histogram the expert ids to identify the number of tokens routed to each expert # Note that for np.histogram, all but the last (righthand-most) bin is half-open. tokens_per_expert, bin_edges = np.histogram( - sorted.cpu(), - bins=np.arange(self.config.num_moe_experts + 1)) + sorted.cpu(), bins=np.arange(self.config.num_moe_experts + 1) + ) tokens_per_expert = torch.tensor(tokens_per_expert).to(torch.long) reverse_indices = indices.argsort() @@ -162,21 +164,13 @@ def forward(self, hidden_states): w1 = w1.view(self.num_local_experts, -1, self.config.hidden_size) w2 = w2.view(self.num_local_experts, self.config.hidden_size, -1) - fc1_output = gg.ops.gmm( - sorted_global_hidden_states, - w1, - tokens_per_expert, - trans_b=True) + fc1_output = gg.ops.gmm(sorted_global_hidden_states, w1, tokens_per_expert, trans_b=True) intermediate_parallel = self.activation_func(fc1_output) - fc2_output = gg.ops.gmm( - intermediate_parallel, - w2, - tokens_per_expert, - trans_b=True) + fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=True) # Un-permutation of tokens output_total = fc2_output[reverse_indices] output_total, _ = self.token_unpermutation(output_total) - return output_total, None \ No newline at end of file + return output_total, None diff --git a/megatron/core/transformer/switch_mlp.py b/megatron/core/transformer/switch_mlp.py index f891ab5aed..07529ed8be 100644 --- a/megatron/core/transformer/switch_mlp.py +++ b/megatron/core/transformer/switch_mlp.py @@ -31,7 +31,6 @@ def forward(self, hidden_states): if self.add_bias: output_bias_total = torch.zeros_like(global_hidden_states) - for expert_num, expert in enumerate(self.local_experts): local_expert_index = self.local_expert_indices[expert_num] local_indices = (global_indices == local_expert_index).nonzero() @@ -45,4 +44,4 @@ def forward(self, hidden_states): output_total, output_bias_total = self.token_unpermutation(output_total, output_bias_total) - return output_total, output_bias_total \ No newline at end of file + return output_total, output_bias_total From 9b5401dbe79eaaca1921aeb6c8339e7c3a6e9b39 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Sun, 19 Nov 2023 22:27:03 -0800 Subject: [PATCH 097/296] rebase and fix conflicts. --- megatron/core/transformer/base_moe_layer.py | 2 -- megatron/core/transformer/grouped_mlp.py | 2 +- tests/unit_tests/transformer/test_grouped_mlp.py | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/megatron/core/transformer/base_moe_layer.py b/megatron/core/transformer/base_moe_layer.py index 3c44410782..349727b9cb 100644 --- a/megatron/core/transformer/base_moe_layer.py +++ b/megatron/core/transformer/base_moe_layer.py @@ -1,10 +1,8 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -import numpy as np import torch from megatron.core import parallel_state, tensor_parallel - from megatron.core.parallel_state import get_tensor_and_expert_parallel_group from megatron.core.tensor_parallel import get_cuda_rng_tracker, get_data_parallel_rng_tracker_name from megatron.core.transformer.module import MegatronModule diff --git a/megatron/core/transformer/grouped_mlp.py b/megatron/core/transformer/grouped_mlp.py index 7ec522f789..8516813b3e 100644 --- a/megatron/core/transformer/grouped_mlp.py +++ b/megatron/core/transformer/grouped_mlp.py @@ -16,7 +16,6 @@ from .base_moe_layer import BaseMoELayer - class ScaleGradient(torch.autograd.Function): @staticmethod @torch.cuda.amp.custom_fwd @@ -32,6 +31,7 @@ def backward(ctx, grad): scale_gradient = ScaleGradient.apply + class GroupedMLP(BaseMoELayer): """ Top-1 Mixture of Experts Layer with Grouped GEMM. Routes input to one of N MLP "experts" diff --git a/tests/unit_tests/transformer/test_grouped_mlp.py b/tests/unit_tests/transformer/test_grouped_mlp.py index a83a6e0d9f..85d3ba1bce 100644 --- a/tests/unit_tests/transformer/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/test_grouped_mlp.py @@ -78,7 +78,7 @@ def test_constructor(self): assert num_weights_smm == num_weights_gmm # expected num weights: router linear weights+bias + MLP weights(no bias) of all experts expected_num_weights = \ - self.hidden_size * self.num_experts + self.num_experts + \ + self.hidden_size * self.num_experts + \ self.hidden_size * (self.fc1_ffn_hidden_size + self.fc2_ffn_hidden_size) * self.num_experts assert num_weights_smm == expected_num_weights From c2e29fbe94aa68a6ff20de910e1b2f3d145d2e98 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Wed, 22 Nov 2023 23:14:38 -0800 Subject: [PATCH 098/296] Fix UT. --- tests/unit_tests/transformer/test_grouped_mlp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit_tests/transformer/test_grouped_mlp.py b/tests/unit_tests/transformer/test_grouped_mlp.py index 85d3ba1bce..f19070ea60 100644 --- a/tests/unit_tests/transformer/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/test_grouped_mlp.py @@ -49,7 +49,7 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): self.switch_mlp_smm = SwitchMLP(tf_config, gpt_layer_with_transformer_engine_spec_moe.submodules.mlp.submodules) - self.args = parse_args(extra_args_provider=None, ignore_unknown_args=False) + self.args = parse_args(ignore_unknown_args=True) self.args.bf16=True # Bias is not supported in grouped gemm currently, thus we disable the # bias in the linear layer. From c5fb719e87d1463f1a8397b0c78901c59e8f5482 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Sun, 26 Nov 2023 18:12:14 -0800 Subject: [PATCH 099/296] fix UTs. --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5fe1588265..e8a15be4e6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -32,6 +32,7 @@ unit_tests: - pip install nltk - pip install wrapt - pip install zarr "tensorstore==0.1.45" # for distributed checkpointing tests + - pip install git+https://github.com/tgale96/grouped_gemm@main # for grouped gemm tests - torchrun --nproc_per_node=8 -m pytest --cov-report=term --cov-report=html --cov=megatron/core tests/unit_tests coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/' artifacts: From ef5bee983022d51154755e4ecf457196540a6df0 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Wed, 29 Nov 2023 23:46:10 -0800 Subject: [PATCH 100/296] SwitchMLP: token permutation optimizations. --- megatron/core/transformer/grouped_mlp.py | 25 ++++++++++++------------ 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/megatron/core/transformer/grouped_mlp.py b/megatron/core/transformer/grouped_mlp.py index 8516813b3e..889621ef2c 100644 --- a/megatron/core/transformer/grouped_mlp.py +++ b/megatron/core/transformer/grouped_mlp.py @@ -1,6 +1,5 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -import numpy as np import torch from torch.nn.parameter import Parameter @@ -148,16 +147,15 @@ def forward(self, hidden_states): global_hidden_states, global_indices = self.token_permutation(hidden_states) with torch.no_grad(): - sorted, indices = torch.sort(global_indices, stable=True) - # Permutation of tokens - sorted_global_hidden_states = global_hidden_states[indices] - # Histogram the expert ids to identify the number of tokens routed to each expert - # Note that for np.histogram, all but the last (righthand-most) bin is half-open. - tokens_per_expert, bin_edges = np.histogram( - sorted.cpu(), bins=np.arange(self.config.num_moe_experts + 1) - ) - tokens_per_expert = torch.tensor(tokens_per_expert).to(torch.long) - reverse_indices = indices.argsort() + sorted_indices = torch.argsort(global_indices) + # Permutation of tokens to each expert group. + sorted_global_hidden_states = global_hidden_states[sorted_indices] + # GroupedGEMM requires tokens_per_expert is on cpu. + tokens_per_expert = torch.histc( + global_indices, + bins=self.config.num_moe_experts, + min=0, + max=self.config.num_moe_experts-1).cpu() w1, w2 = (self.scale_grad(self.weight1), self.scale_grad(self.weight2)) # Reshape the weights for the grouped GEMMs. @@ -170,7 +168,8 @@ def forward(self, hidden_states): fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=True) # Un-permutation of tokens - output_total = fc2_output[reverse_indices] + original_order_ghs = torch.empty_like(fc2_output) + original_order_ghs[sorted_indices] = fc2_output + output_total, _ = self.token_unpermutation(original_order_ghs) - output_total, _ = self.token_unpermutation(output_total) return output_total, None From 378fdd213c1220e850bb0df10555829bbf693257 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Thu, 30 Nov 2023 23:01:24 -0800 Subject: [PATCH 101/296] fix format. --- megatron/core/transformer/grouped_mlp.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/megatron/core/transformer/grouped_mlp.py b/megatron/core/transformer/grouped_mlp.py index 889621ef2c..35296d636d 100644 --- a/megatron/core/transformer/grouped_mlp.py +++ b/megatron/core/transformer/grouped_mlp.py @@ -155,7 +155,8 @@ def forward(self, hidden_states): global_indices, bins=self.config.num_moe_experts, min=0, - max=self.config.num_moe_experts-1).cpu() + max=self.config.num_moe_experts - 1, + ).cpu() w1, w2 = (self.scale_grad(self.weight1), self.scale_grad(self.weight2)) # Reshape the weights for the grouped GEMMs. From 57f91c83bd4108167f9b7677449e2af29df9c2a2 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Sun, 3 Dec 2023 23:54:51 -0800 Subject: [PATCH 102/296] gpt_layer_specs simplifications for MoE. --- megatron/core/models/gpt/gpt_layer_specs.py | 133 +++++------------- pretrain_gpt.py | 14 +- .../transformer/test_grouped_mlp.py | 6 +- .../unit_tests/transformer/test_switch_mlp.py | 10 +- 4 files changed, 46 insertions(+), 117 deletions(-) diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index d27aa62a68..a8b979aac3 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -12,6 +12,7 @@ from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.grouped_mlp import GroupedMLP +from megatron.core.transformer.identity_op import IdentityOp from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.switch_mlp import SwitchMLP @@ -19,7 +20,12 @@ # Use this spec to use lower level Transformer Engine modules (required for fp8 training) -def get_gpt_layer_with_transformer_engine_spec() -> ModuleSpec: +def get_gpt_layer_with_transformer_engine_spec( + num_experts: int = None, moe_grouped_gemm: bool = False +) -> ModuleSpec: + mlp = _get_mlp_module_spec( + use_te=True, num_experts=num_experts, moe_grouped_gemm=moe_grouped_gemm + ) return ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( @@ -33,19 +39,18 @@ def get_gpt_layer_with_transformer_engine_spec() -> ModuleSpec: ), ), self_attn_bda=get_bias_dropout_add, - mlp=ModuleSpec( - module=MLP, - submodules=MLPSubmodules( - linear_fc1=TELayerNormColumnParallelLinear, linear_fc2=TERowParallelLinear, - ), - ), + pre_mlp_layernorm=FusedLayerNorm if num_experts else IdentityOp, + mlp=mlp, mlp_bda=get_bias_dropout_add, ), ) # Use this spec for an implementation using only modules in megatron core -def get_gpt_layer_local_spec() -> ModuleSpec: +def get_gpt_layer_local_spec(num_experts: int = None, moe_grouped_gemm: bool = False) -> ModuleSpec: + mlp = _get_mlp_module_spec( + use_te=False, num_experts=num_experts, moe_grouped_gemm=moe_grouped_gemm + ) return ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( @@ -61,105 +66,33 @@ def get_gpt_layer_local_spec() -> ModuleSpec: ), self_attn_bda=get_bias_dropout_add, pre_mlp_layernorm=FusedLayerNorm, - mlp=ModuleSpec( - module=MLP, - submodules=MLPSubmodules( - linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, - ), - ), + mlp=mlp, mlp_bda=get_bias_dropout_add, ), ) -# Use this spec to use lower level Transformer Engine modules and SwitchMLP based MoE -gpt_layer_with_transformer_engine_spec_moe = ModuleSpec( - module=TransformerLayer, - submodules=TransformerLayerSubmodules( - self_attention=ModuleSpec( - module=SelfAttention, - params={"attn_mask_type": AttnMaskType.causal}, - submodules=SelfAttentionSubmodules( - linear_qkv=TELayerNormColumnParallelLinear, - core_attention=TEDotProductAttention, - linear_proj=TERowParallelLinear, - ), - ), - self_attn_bda=get_bias_dropout_add, - pre_mlp_layernorm=FusedLayerNorm, - mlp=ModuleSpec( - module=SwitchMLP, # MOE +# Helper function to get module spec for MLP/MoE +def _get_mlp_module_spec( + use_te: bool = True, num_experts: int = None, moe_grouped_gemm: bool = False +) -> ModuleSpec: + if num_experts is None: + # Dense MLP w/ or w/o TE modules. + return ModuleSpec( + module=MLP, submodules=MLPSubmodules( - linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, - ), - ), - mlp_bda=get_bias_dropout_add, - ), -) - -# Use this spec to use lower level Transformer Engine modules and GroupedMLP based MoE -gpt_layer_with_transformer_engine_spec_moe_grouped_gemm = ModuleSpec( - module=TransformerLayer, - submodules=TransformerLayerSubmodules( - self_attention=ModuleSpec( - module=SelfAttention, - params={"attn_mask_type": AttnMaskType.causal}, - submodules=SelfAttentionSubmodules( - linear_qkv=TELayerNormColumnParallelLinear, - core_attention=TEDotProductAttention, - linear_proj=TERowParallelLinear, - ), - ), - self_attn_bda=get_bias_dropout_add, - pre_mlp_layernorm=FusedLayerNorm, - mlp=ModuleSpec(module=GroupedMLP), # MOE - mlp_bda=get_bias_dropout_add, - ), -) - -# Use this spec for an implementation using only modules in megatron core for SwitchMLP based MoE models -gpt_layer_local_spec_moe = ModuleSpec( - module=TransformerLayer, - submodules=TransformerLayerSubmodules( - input_layernorm=FusedLayerNorm, - self_attention=ModuleSpec( - module=SelfAttention, - params={"attn_mask_type": AttnMaskType.causal}, - submodules=SelfAttentionSubmodules( - linear_qkv=ColumnParallelLinear, - core_attention=DotProductAttention, - linear_proj=RowParallelLinear, + linear_fc1=TELayerNormColumnParallelLinear if use_te else ColumnParallelLinear, + linear_fc2=TERowParallelLinear if use_te else RowParallelLinear, ), - ), - self_attn_bda=get_bias_dropout_add, - pre_mlp_layernorm=FusedLayerNorm, - mlp=ModuleSpec( - module=SwitchMLP, # MOE + ) + elif moe_grouped_gemm: + # GroupedMLP based MoE with modules in megatron core. + return GroupedMLP + else: + # SwitchMLP based MoE with modules in megatron core. + return ModuleSpec( + module=SwitchMLP, submodules=MLPSubmodules( linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, ), - ), - mlp_bda=get_bias_dropout_add, - ), -) - -# Use this spec for an implementation using only modules in megatron core for GroupedMLP based MoE models -gpt_layer_local_spec_moe_grouped_gemm = ModuleSpec( - module=TransformerLayer, - submodules=TransformerLayerSubmodules( - input_layernorm=FusedLayerNorm, - self_attention=ModuleSpec( - module=SelfAttention, - params={"attn_mask_type": AttnMaskType.causal}, - submodules=SelfAttentionSubmodules( - linear_qkv=ColumnParallelLinear, - core_attention=DotProductAttention, - linear_proj=RowParallelLinear, - ), - ), - self_attn_bda=get_bias_dropout_add, - pre_mlp_layernorm=FusedLayerNorm, - mlp=ModuleSpec(module=GroupedMLP), # MOE - mlp_bda=get_bias_dropout_add, - ), -) + ) diff --git a/pretrain_gpt.py b/pretrain_gpt.py index e6685dfffa..acf5ea8377 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -25,11 +25,8 @@ average_losses_across_data_parallel_group ) from megatron.arguments import core_transformer_config_from_args -from megatron.core.models.gpt.gpt_layer_specs import ( - get_gpt_layer_with_transformer_engine_spec, - gpt_layer_with_transformer_engine_spec_moe, - gpt_layer_with_transformer_engine_spec_moe_grouped_gemm, -) +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec + def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megatron.model.GPTModel]: """Builds the model. @@ -53,12 +50,7 @@ def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megat if args.spec is not None: transformer_layer_spec = import_module(args.spec) else: - if args.num_experts is None: - transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec() - elif args.moe_grouped_gemm: - transformer_layer_spec = gpt_layer_with_transformer_engine_spec_moe_grouped_gemm - else: - transformer_layer_spec = gpt_layer_with_transformer_engine_spec_moe + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec(args.num_experts, args.moe_grouped_gemm) model = GPTModel( config=config, diff --git a/tests/unit_tests/transformer/test_grouped_mlp.py b/tests/unit_tests/transformer/test_grouped_mlp.py index f19070ea60..72da23d8d4 100644 --- a/tests/unit_tests/transformer/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/test_grouped_mlp.py @@ -6,7 +6,7 @@ import torch.nn.functional as F from megatron.arguments import parse_args -from megatron.core.models.gpt.gpt_layer_specs import gpt_layer_with_transformer_engine_spec_moe +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec from megatron.core.transformer.grouped_mlp import GroupedMLP from megatron.core.transformer.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_config import TransformerConfig @@ -46,8 +46,10 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): ## Vanilla sequential GEMM # Set random seed for reproducability _set_random_seed(seed_=123, data_parallel_random_init=False) + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( + self.num_experts, moe_grouped_gemm=False) self.switch_mlp_smm = SwitchMLP(tf_config, - gpt_layer_with_transformer_engine_spec_moe.submodules.mlp.submodules) + transformer_layer_spec.submodules.mlp.submodules) self.args = parse_args(ignore_unknown_args=True) self.args.bf16=True diff --git a/tests/unit_tests/transformer/test_switch_mlp.py b/tests/unit_tests/transformer/test_switch_mlp.py index b5f31ca237..384557f9d3 100644 --- a/tests/unit_tests/transformer/test_switch_mlp.py +++ b/tests/unit_tests/transformer/test_switch_mlp.py @@ -8,7 +8,7 @@ from tests.unit_tests.test_utilities import Utils from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.models.gpt.gpt_layer_specs import gpt_layer_with_transformer_engine_spec_moe +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec class TestParallelSwitchMLP: @@ -16,9 +16,11 @@ def setup_method(self, method): Utils.initialize_model_parallel(1,1) model_parallel_cuda_manual_seed(123) print("done intializing") - transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts= 2, use_cpu_initialization=True) - self.switch_mlp = SwitchMLP(transformer_config, - gpt_layer_with_transformer_engine_spec_moe.submodules.mlp.submodules) + num_moe_experts = 2 + transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True) + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( + num_experts=num_moe_experts, moe_grouped_gemm=False) + self.switch_mlp = SwitchMLP(transformer_config, transformer_layer_spec.submodules.mlp.submodules) def teardown_method(self, method): Utils.destroy_model_parallel() From a464a92047c942218bb56cc8e67eb6444c45b00f Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Mon, 4 Dec 2023 03:19:34 -0800 Subject: [PATCH 103/296] move all moe stuffs into core/transformer/moe folder. --- megatron/core/models/gpt/gpt_layer_specs.py | 4 ++-- megatron/core/transformer/{ => moe}/base_moe_layer.py | 0 megatron/core/transformer/{ => moe}/grouped_gemm_util.py | 0 megatron/core/transformer/{ => moe}/grouped_mlp.py | 2 +- megatron/core/transformer/{ => moe}/switch_mlp.py | 2 +- tests/unit_tests/transformer/test_grouped_mlp.py | 4 ++-- tests/unit_tests/transformer/test_switch_mlp.py | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) rename megatron/core/transformer/{ => moe}/base_moe_layer.py (100%) rename megatron/core/transformer/{ => moe}/grouped_gemm_util.py (100%) rename megatron/core/transformer/{ => moe}/grouped_mlp.py (99%) rename megatron/core/transformer/{ => moe}/switch_mlp.py (96%) diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index a8b979aac3..25ef28914a 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -11,11 +11,11 @@ ) from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType -from megatron.core.transformer.grouped_mlp import GroupedMLP from megatron.core.transformer.identity_op import IdentityOp from megatron.core.transformer.mlp import MLP, MLPSubmodules +from megatron.core.transformer.moe.grouped_mlp import GroupedMLP +from megatron.core.transformer.moe.switch_mlp import SwitchMLP from megatron.core.transformer.spec_utils import ModuleSpec -from megatron.core.transformer.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules diff --git a/megatron/core/transformer/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py similarity index 100% rename from megatron/core/transformer/base_moe_layer.py rename to megatron/core/transformer/moe/base_moe_layer.py diff --git a/megatron/core/transformer/grouped_gemm_util.py b/megatron/core/transformer/moe/grouped_gemm_util.py similarity index 100% rename from megatron/core/transformer/grouped_gemm_util.py rename to megatron/core/transformer/moe/grouped_gemm_util.py diff --git a/megatron/core/transformer/grouped_mlp.py b/megatron/core/transformer/moe/grouped_mlp.py similarity index 99% rename from megatron/core/transformer/grouped_mlp.py rename to megatron/core/transformer/moe/grouped_mlp.py index 35296d636d..67ac30cb24 100644 --- a/megatron/core/transformer/grouped_mlp.py +++ b/megatron/core/transformer/moe/grouped_mlp.py @@ -9,7 +9,7 @@ _initialize_affine_weight_gpu, ) from megatron.core.tensor_parallel.utils import divide -from megatron.core.transformer import grouped_gemm_util as gg +from megatron.core.transformer.moe import grouped_gemm_util as gg from megatron.core.transformer.transformer_config import TransformerConfig from .base_moe_layer import BaseMoELayer diff --git a/megatron/core/transformer/switch_mlp.py b/megatron/core/transformer/moe/switch_mlp.py similarity index 96% rename from megatron/core/transformer/switch_mlp.py rename to megatron/core/transformer/moe/switch_mlp.py index 07529ed8be..357a020d2c 100644 --- a/megatron/core/transformer/switch_mlp.py +++ b/megatron/core/transformer/moe/switch_mlp.py @@ -3,10 +3,10 @@ import numpy as np import torch +from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.transformer_config import TransformerConfig from .base_moe_layer import BaseMoELayer -from .mlp import MLP, MLPSubmodules class SwitchMLP(BaseMoELayer): diff --git a/tests/unit_tests/transformer/test_grouped_mlp.py b/tests/unit_tests/transformer/test_grouped_mlp.py index 72da23d8d4..3541fbf456 100644 --- a/tests/unit_tests/transformer/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/test_grouped_mlp.py @@ -7,8 +7,8 @@ from megatron.arguments import parse_args from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec -from megatron.core.transformer.grouped_mlp import GroupedMLP -from megatron.core.transformer.switch_mlp import SwitchMLP +from megatron.core.transformer.moe.grouped_mlp import GroupedMLP +from megatron.core.transformer.moe.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_config import TransformerConfig from megatron.initialize import _set_random_seed from megatron.model import Float16Module diff --git a/tests/unit_tests/transformer/test_switch_mlp.py b/tests/unit_tests/transformer/test_switch_mlp.py index 384557f9d3..b7ee023349 100644 --- a/tests/unit_tests/transformer/test_switch_mlp.py +++ b/tests/unit_tests/transformer/test_switch_mlp.py @@ -4,7 +4,7 @@ import torch -from megatron.core.transformer.switch_mlp import SwitchMLP +from megatron.core.transformer.moe.switch_mlp import SwitchMLP from tests.unit_tests.test_utilities import Utils from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.transformer.transformer_config import TransformerConfig From 131421468097188a83607ee1bbf4480139f8adbc Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Mon, 4 Dec 2023 18:32:21 -0800 Subject: [PATCH 104/296] Enable CUTLASS GroupedGEMM for FWD experts computation. --- .../core/transformer/moe/grouped_gemm_util.py | 2 +- megatron/core/transformer/moe/grouped_mlp.py | 32 +++++++++++-------- .../transformer/test_grouped_mlp.py | 16 +++++----- 3 files changed, 27 insertions(+), 23 deletions(-) diff --git a/megatron/core/transformer/moe/grouped_gemm_util.py b/megatron/core/transformer/moe/grouped_gemm_util.py index 43bdf79759..07c576c24b 100644 --- a/megatron/core/transformer/moe/grouped_gemm_util.py +++ b/megatron/core/transformer/moe/grouped_gemm_util.py @@ -13,7 +13,7 @@ def grouped_gemm_is_available(): def assert_grouped_gemm_is_available(): assert grouped_gemm_is_available(), ( "Grouped GEMM is not available. Please run " - "`pip install git+https://github.com/tgale96/grouped_gemm@main`." + "`pip install git+https://github.com/fanshiqing/grouped_gemm@main`." ) diff --git a/megatron/core/transformer/moe/grouped_mlp.py b/megatron/core/transformer/moe/grouped_mlp.py index 67ac30cb24..f8f2879112 100644 --- a/megatron/core/transformer/moe/grouped_mlp.py +++ b/megatron/core/transformer/moe/grouped_mlp.py @@ -70,54 +70,58 @@ def glu(x): fc2_input_size = self.config.ffn_hidden_size * self.num_local_experts fc2_input_size_per_partition = divide(fc2_input_size, tp_size) + # Note: The current kernel implementations of grouped_gemm + # does not support transposition with CUTLASS grouped GEMM + # (https://github.com/fanshiqing/grouped_gemm/blob/main/csrc/grouped_gemm.cu#L355-L358) + # and as a result we avoid allocate the transpose of weights. # Initialize weight. if config.use_cpu_initialization: self.weight1 = Parameter( torch.empty( - fc1_output_size_per_partition, self.config.hidden_size, + fc1_output_size_per_partition, dtype=config.params_dtype, ) ) self.weight2 = Parameter( torch.empty( - self.config.hidden_size, fc2_input_size_per_partition, + self.config.hidden_size, dtype=config.params_dtype, ) ) if config.perform_initialization: _initialize_affine_weight_cpu( self.weight1, - fc1_output_size, self.config.hidden_size, + fc1_output_size, fc1_output_size_per_partition, - partition_dim=0, + partition_dim=1, init_method=config.init_method, params_dtype=config.params_dtype, ) _initialize_affine_weight_cpu( self.weight2, - self.config.hidden_size, fc2_input_size, + self.config.hidden_size, fc2_input_size_per_partition, - partition_dim=1, + partition_dim=0, init_method=config.output_layer_init_method, params_dtype=config.params_dtype, ) else: self.weight1 = Parameter( torch.empty( - fc1_output_size_per_partition, self.config.hidden_size, + fc1_output_size_per_partition, device=torch.cuda.current_device(), dtype=config.params_dtype, ) ) self.weight2 = Parameter( torch.empty( - self.config.hidden_size, fc2_input_size_per_partition, + self.config.hidden_size, device=torch.cuda.current_device(), dtype=config.params_dtype, ) @@ -126,13 +130,13 @@ def glu(x): _initialize_affine_weight_gpu( self.weight1, config.init_method, - partition_dim=0, + partition_dim=1, expert_parallel=self.expert_parallel, ) _initialize_affine_weight_gpu( self.weight2, config.output_layer_init_method, - partition_dim=1, + partition_dim=0, expert_parallel=self.expert_parallel, ) setattr(self.weight1, 'allreduce', not self.expert_parallel) @@ -160,14 +164,14 @@ def forward(self, hidden_states): w1, w2 = (self.scale_grad(self.weight1), self.scale_grad(self.weight2)) # Reshape the weights for the grouped GEMMs. - w1 = w1.view(self.num_local_experts, -1, self.config.hidden_size) - w2 = w2.view(self.num_local_experts, self.config.hidden_size, -1) + w1 = w1.view(self.num_local_experts, self.config.hidden_size, -1) + w2 = w2.view(self.num_local_experts, -1, self.config.hidden_size) - fc1_output = gg.ops.gmm(sorted_global_hidden_states, w1, tokens_per_expert, trans_b=True) + fc1_output = gg.ops.gmm(sorted_global_hidden_states, w1, tokens_per_expert, trans_b=False) intermediate_parallel = self.activation_func(fc1_output) - fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=True) + fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=False) # Un-permutation of tokens original_order_ghs = torch.empty_like(fc2_output) original_order_ghs[sorted_indices] = fc2_output diff --git a/tests/unit_tests/transformer/test_grouped_mlp.py b/tests/unit_tests/transformer/test_grouped_mlp.py index 3541fbf456..b3c08eca89 100644 --- a/tests/unit_tests/transformer/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/test_grouped_mlp.py @@ -21,8 +21,8 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): print("Test for use_cpu_initilization={} and swiglu={}.".format(use_cpu_initialization, swiglu)) print("============") Utils.initialize_model_parallel(1,1) - num_layers=1 # 2 - self.hidden_size=2 # 12 + num_layers = 1 # 2 + self.hidden_size = 2 # 12 self.num_experts = 2 self.gated_linear_unit = True self.use_cpu_initialization = use_cpu_initialization @@ -86,13 +86,13 @@ def test_constructor(self): assert torch.equal(self.switch_mlp_smm.router.weight, self.switch_mlp_gmm.router.weight) - # weight1: [num_experts*4h, h] - # weight2: [h, num_experts*4h] - assert self.switch_mlp_gmm.weight1.shape[0] == self.num_experts * self.fc1_ffn_hidden_size - assert self.switch_mlp_gmm.weight1.shape[1] == self.hidden_size + # weight1: [h, num_experts*4h] + # weight2: [num_experts*4h, h] + assert self.switch_mlp_gmm.weight1.shape[0] == self.hidden_size + assert self.switch_mlp_gmm.weight1.shape[1] == self.num_experts * self.fc1_ffn_hidden_size if self.gated_linear_unit: - assert self.switch_mlp_gmm.weight2.shape[0] == self.hidden_size - assert self.switch_mlp_gmm.weight2.shape[1] == self.num_experts * self.fc2_ffn_hidden_size + assert self.switch_mlp_gmm.weight2.shape[0] == self.num_experts * self.fc2_ffn_hidden_size + assert self.switch_mlp_gmm.weight2.shape[1] == self.hidden_size else: assert self.switch_mlp_gmm.weight1.shape == self.switch_mlp_gmm.weight2.t().shape From f156a209cf454bef0b3c76bfdfb6c1feb2788281 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Wed, 6 Dec 2023 08:15:04 +0000 Subject: [PATCH 105/296] fix comments. --- megatron/core/transformer/moe/base_moe_layer.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 349727b9cb..e15c3700ff 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -1,5 +1,7 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +from abc import ABC, abstractmethod + import torch from megatron.core import parallel_state, tensor_parallel @@ -34,7 +36,7 @@ def get_router_linear_layer(config): return router -class BaseMoELayer(MegatronModule): +class BaseMoELayer(ABC, MegatronModule): """ Basic MoE layer. """ @@ -135,3 +137,7 @@ def token_unpermutation(self, output_total, output_bias_total=None): output_bias_total = None return output_total, output_bias_total + + @abstractmethod + def forward(self, hidden_states): + pass From 7d86537c99a771c0a44bccac651b1d6c7ccf01a4 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Fri, 8 Dec 2023 01:42:59 -0800 Subject: [PATCH 106/296] Support EP for GroupedGEMM; Token-(un)permutation API cleaned; More documentation. --- megatron/core/tensor_parallel/layers.py | 13 +-- .../core/transformer/moe/base_moe_layer.py | 91 +++++++++++++++++-- megatron/core/transformer/moe/grouped_mlp.py | 24 ++--- megatron/core/transformer/moe/switch_mlp.py | 26 ++++-- .../transformer/{ => moe}/test_grouped_mlp.py | 2 +- .../transformer/{ => moe}/test_switch_mlp.py | 0 6 files changed, 111 insertions(+), 45 deletions(-) rename tests/unit_tests/transformer/{ => moe}/test_grouped_mlp.py (99%) rename tests/unit_tests/transformer/{ => moe}/test_switch_mlp.py (100%) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 38379cb34d..1c66927bfc 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -370,12 +370,13 @@ def backward(ctx, grad_output): # https://github.com/pytorch/pytorch/blob/c47cf9bc7f9e02f649ab4ed53fe4d35732c92ab6/torch/_refs/__init__.py#L2761 grad_output = grad_output.contiguous() # Convert the tensor shapes to 2D for execution compatibility - grad_output = grad_output.view( - grad_output.shape[0] * grad_output.shape[1], grad_output.shape[2] - ) - total_input = total_input.view( - total_input.shape[0] * total_input.shape[1], total_input.shape[2] - ) + if grad_output.dim() == 3: + grad_output = grad_output.view( + grad_output.shape[0] * grad_output.shape[1], grad_output.shape[2] + ) + total_input = total_input.view( + total_input.shape[0] * total_input.shape[1], total_input.shape[2] + ) if ctx.async_grad_allreduce: # Asynchronous all-reduce diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index e15c3700ff..33ac819a62 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -81,9 +81,23 @@ def gather_indices(self, local_indices): return output def token_permutation(self, hidden_states): + """Dispatch tokens to local experts. It's composed of two stages: + (1) Permute the tokens across the expert parallel devices. After this stage, + each device receives all of the tokens assigned to its local set of experts + in its local HBM. + (2) Permute the tokens locally so that they are grouped by their expert + assignment. After the stage (1), the tokens are grouped by which device + they came from. We re-order them locally for subsequent efficient computation. + + Args: + hidden_states: input tokens of shape [SeqLen/TP, MBS, HiddenSize] + + Returns: + permuted_local_hidden_states: Permutation of tokens to local experts group. + tokens_per_expert: the number of tokens each local expert to process. + """ self.hidden_shape = hidden_states.shape route = self.router(hidden_states) - # print(self.router.weight) route = route.view(-1, self.config.num_moe_experts) if self.training: @@ -99,28 +113,78 @@ def token_permutation(self, hidden_states): max_prob, max_ind = torch.max(route, dim=1) self.max_prob = torch.unsqueeze(max_prob, 1) + # [S/TP, B, H] -> [S*B/TP, H] hidden_states = hidden_states.view(-1, self.hidden_shape[-1]) + # Permute the tokens across the expert parallel devices. if self.sequence_parallel or (self.expert_parallel_size > 1): + # [S*B/TP, H] -> [S*B, H] global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe( hidden_states ) global_indices = self.gather_indices(max_ind) + self.ghs_shape = global_hidden_states.shape + # Create a mask where each element is True if it's between the local_expert_indices + self.mask = (global_indices >= self.local_expert_indices[0]) & ( + global_indices <= self.local_expert_indices[-1] + ) + self.local_indices = global_indices[self.mask] + local_hidden_states = global_hidden_states[self.mask, :] else: - global_hidden_states = hidden_states - global_indices = max_ind - - return global_hidden_states, global_indices - - def token_unpermutation(self, output_total, output_bias_total=None): + self.ghs_shape = hidden_states.shape + self.local_indices = max_ind + local_hidden_states = hidden_states + + # Permute the tokens locally so that they are grouped by their expert assignment + with torch.no_grad(): + self.permuted_indices = torch.argsort(self.local_indices) + # Permutation of tokens to each expert group. + permuted_local_hidden_states = local_hidden_states[self.permuted_indices] + tokens_per_expert = torch.histc( + self.local_indices, + bins=self.num_local_experts, + min=self.local_expert_indices[0], + max=self.local_expert_indices[-1], + ) + tokens_per_expert = tokens_per_expert.cpu().to(torch.long) + + return permuted_local_hidden_states, tokens_per_expert + + def token_unpermutation(self, hidden_states, bias=None): + """Reverse process of 'token_permutation' which permutes the ouput of local + experts into the original order to produce the final output. + + Args: + hidden_states: 2D tensor of shape [sum_tokens_of_all_local_experts, HiddenSize], + ouput of local experts. + bias: bias if self.add_bias is enabled. + + Returns: + output_total: un-permuted updated hidden states output from all local experts + with shape of [SeqLen/TP, MBS, HiddenSize] + """ + # Unpermute the tokens locally. + original_order_lhs = torch.zeros_like(hidden_states) + original_order_lhs[self.permuted_indices] = hidden_states + output_total = original_order_lhs + output_bias_total = bias + + # Unpermute the tokens across expert parallel devices. if self.sequence_parallel or (self.expert_parallel_size > 1): + original_order_ghs = torch.zeros( + self.ghs_shape, dtype=hidden_states.dtype, device=torch.cuda.current_device() + ) + global_local_map = torch.squeeze(self.mask.nonzero().contiguous()) + original_order_ghs[global_local_map] = original_order_lhs output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( - output_total + original_order_ghs ) if self.add_bias: - assert output_bias_total is not None + assert bias is not None + original_order_bias = torch.zeros_like(original_order_ghs) + original_order_bias[global_local_map] = bias output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( - output_bias_total + original_order_bias ) # bias is duplicated across tensor parallelism ranks; # reduce scatter reduces bias across tensor parallel_ranks @@ -131,6 +195,7 @@ def token_unpermutation(self, output_total, output_bias_total=None): output_total = output_total * self.max_prob output_total = output_total.view(self.hidden_shape) if self.add_bias: + assert output_bias_total is not None output_bias_total = output_bias_total * self.max_prob output_bias_total = output_bias_total.view(self.hidden_shape) else: @@ -140,4 +205,10 @@ def token_unpermutation(self, output_total, output_bias_total=None): @abstractmethod def forward(self, hidden_states): + """Forward computation of MoE layer. + + Args: + hidden_states: input activation of shape [SeqLen, MBS, HiddenSize] + + """ pass diff --git a/megatron/core/transformer/moe/grouped_mlp.py b/megatron/core/transformer/moe/grouped_mlp.py index f8f2879112..507a687b03 100644 --- a/megatron/core/transformer/moe/grouped_mlp.py +++ b/megatron/core/transformer/moe/grouped_mlp.py @@ -148,33 +148,21 @@ def scale_grad(self, w): return scale_gradient(w, self.gradient_scale) def forward(self, hidden_states): - global_hidden_states, global_indices = self.token_permutation(hidden_states) - - with torch.no_grad(): - sorted_indices = torch.argsort(global_indices) - # Permutation of tokens to each expert group. - sorted_global_hidden_states = global_hidden_states[sorted_indices] - # GroupedGEMM requires tokens_per_expert is on cpu. - tokens_per_expert = torch.histc( - global_indices, - bins=self.config.num_moe_experts, - min=0, - max=self.config.num_moe_experts - 1, - ).cpu() + # Permutation of tokens + permuted_local_hidden_states, tokens_per_expert = self.token_permutation(hidden_states) w1, w2 = (self.scale_grad(self.weight1), self.scale_grad(self.weight2)) # Reshape the weights for the grouped GEMMs. w1 = w1.view(self.num_local_experts, self.config.hidden_size, -1) w2 = w2.view(self.num_local_experts, -1, self.config.hidden_size) - fc1_output = gg.ops.gmm(sorted_global_hidden_states, w1, tokens_per_expert, trans_b=False) + fc1_output = gg.ops.gmm(permuted_local_hidden_states, w1, tokens_per_expert, trans_b=False) intermediate_parallel = self.activation_func(fc1_output) fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=False) - # Un-permutation of tokens - original_order_ghs = torch.empty_like(fc2_output) - original_order_ghs[sorted_indices] = fc2_output - output_total, _ = self.token_unpermutation(original_order_ghs) + + # Un-permutation of tokens. + output_total, _ = self.token_unpermutation(fc2_output) return output_total, None diff --git a/megatron/core/transformer/moe/switch_mlp.py b/megatron/core/transformer/moe/switch_mlp.py index 357a020d2c..5e89939a03 100644 --- a/megatron/core/transformer/moe/switch_mlp.py +++ b/megatron/core/transformer/moe/switch_mlp.py @@ -24,24 +24,30 @@ def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): self.local_experts.append(expert) def forward(self, hidden_states): - global_hidden_states, global_indices = self.token_permutation(hidden_states) + # global_hidden_states, global_indices = self.token_permutation(hidden_states) + permuted_local_hidden_states, tokens_per_expert = self.token_permutation(hidden_states) - output_total = torch.zeros_like(global_hidden_states) - output_bias_total = None + output_local = torch.zeros_like(permuted_local_hidden_states) + output_bias_local = None if self.add_bias: - output_bias_total = torch.zeros_like(global_hidden_states) + output_bias_local = torch.zeros_like(permuted_local_hidden_states) + cumsum_num_tokens = torch.cumsum(tokens_per_expert, dim=0) + # Insert zero at the begining for offset index's convenience + zero_tensor = torch.zeros(1, dtype=torch.long) + cumsum_num_tokens = torch.cat((zero_tensor, cumsum_num_tokens)) for expert_num, expert in enumerate(self.local_experts): - local_expert_index = self.local_expert_indices[expert_num] - local_indices = (global_indices == local_expert_index).nonzero() - hidden = global_hidden_states[local_indices, :] + start = cumsum_num_tokens[expert_num] + end = cumsum_num_tokens[expert_num + 1] + hidden = permuted_local_hidden_states[start:end] output, output_bias = expert(hidden) - output_total[local_indices, :] = output + output_local[start:end] = output if self.add_bias: output_bias = output_bias.expand_as(output) - output_bias_total[local_indices, :] = output_bias + output_bias_local[start:end, :] = output_bias - output_total, output_bias_total = self.token_unpermutation(output_total, output_bias_total) + # Un-permutation of tokens. + output_total, output_bias_total = self.token_unpermutation(output_local, output_bias_local) return output_total, output_bias_total diff --git a/tests/unit_tests/transformer/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py similarity index 99% rename from tests/unit_tests/transformer/test_grouped_mlp.py rename to tests/unit_tests/transformer/moe/test_grouped_mlp.py index b3c08eca89..558c7eb12a 100644 --- a/tests/unit_tests/transformer/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -125,7 +125,7 @@ def test_gpu_forward(self): # [sequence length, batch size, hidden size] seq_len = 3 #32 batch_size = 2 - hidden_states = torch.ones( + hidden_states = torch.rand( (seq_len, batch_size, self.switch_mlp_smm.config.hidden_size), dtype=torch.bfloat16) hidden_states = hidden_states.cuda() diff --git a/tests/unit_tests/transformer/test_switch_mlp.py b/tests/unit_tests/transformer/moe/test_switch_mlp.py similarity index 100% rename from tests/unit_tests/transformer/test_switch_mlp.py rename to tests/unit_tests/transformer/moe/test_switch_mlp.py From bc7599615106b04b2d424537eb4342b6eb1e2e9c Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Sat, 9 Dec 2023 21:20:42 -0800 Subject: [PATCH 107/296] add unpermutation of bias for SwitchMLP. --- .../core/transformer/moe/base_moe_layer.py | 30 +++++++++++-------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 33ac819a62..19e515e593 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -163,28 +163,34 @@ def token_unpermutation(self, hidden_states, bias=None): output_total: un-permuted updated hidden states output from all local experts with shape of [SeqLen/TP, MBS, HiddenSize] """ - # Unpermute the tokens locally. - original_order_lhs = torch.zeros_like(hidden_states) - original_order_lhs[self.permuted_indices] = hidden_states - output_total = original_order_lhs - output_bias_total = bias + # Unpermute the tokens and bias locally respectively. + unpermuted_local_hidden = torch.zeros_like(hidden_states) + unpermuted_local_hidden[self.permuted_indices] = hidden_states + unpermuted_local_bias = None + if self.add_bias: + assert bias is not None + unpermuted_local_bias = torch.zeros_like(hidden_states) + unpermuted_local_bias[self.permuted_indices] = bias + + output_total = unpermuted_local_hidden + output_bias_total = unpermuted_local_bias # Unpermute the tokens across expert parallel devices. if self.sequence_parallel or (self.expert_parallel_size > 1): - original_order_ghs = torch.zeros( + unpermuted_global_hidden = torch.zeros( self.ghs_shape, dtype=hidden_states.dtype, device=torch.cuda.current_device() ) global_local_map = torch.squeeze(self.mask.nonzero().contiguous()) - original_order_ghs[global_local_map] = original_order_lhs + unpermuted_global_hidden[global_local_map] = unpermuted_local_hidden output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( - original_order_ghs + unpermuted_global_hidden ) if self.add_bias: - assert bias is not None - original_order_bias = torch.zeros_like(original_order_ghs) - original_order_bias[global_local_map] = bias + # Unpermute the bias across expert parallel devices. + unpermuted_global_bias = torch.zeros_like(unpermuted_global_hidden) + unpermuted_global_bias[global_local_map] = unpermuted_local_bias output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( - original_order_bias + unpermuted_global_bias ) # bias is duplicated across tensor parallelism ranks; # reduce scatter reduces bias across tensor parallel_ranks From c3e192db60c52ab47c744a3411469ded150411b3 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Mon, 11 Dec 2023 01:15:56 -0800 Subject: [PATCH 108/296] fix ci test. --- megatron/core/transformer/moe/base_moe_layer.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 19e515e593..35725e9bea 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -128,25 +128,24 @@ def token_permutation(self, hidden_states): self.mask = (global_indices >= self.local_expert_indices[0]) & ( global_indices <= self.local_expert_indices[-1] ) - self.local_indices = global_indices[self.mask] + local_indices = global_indices[self.mask] local_hidden_states = global_hidden_states[self.mask, :] else: self.ghs_shape = hidden_states.shape - self.local_indices = max_ind + local_indices = max_ind local_hidden_states = hidden_states - # Permute the tokens locally so that they are grouped by their expert assignment with torch.no_grad(): - self.permuted_indices = torch.argsort(self.local_indices) - # Permutation of tokens to each expert group. - permuted_local_hidden_states = local_hidden_states[self.permuted_indices] + self.permuted_indices = torch.argsort(local_indices) tokens_per_expert = torch.histc( - self.local_indices, + local_indices, bins=self.num_local_experts, min=self.local_expert_indices[0], max=self.local_expert_indices[-1], ) tokens_per_expert = tokens_per_expert.cpu().to(torch.long) + # Permute the tokens locally so that they are grouped by their expert assignment + permuted_local_hidden_states = local_hidden_states[self.permuted_indices] return permuted_local_hidden_states, tokens_per_expert From bfaef541323eab3d7e90ab8fe8454dc437a52cfa Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Mon, 11 Dec 2023 06:05:18 -0800 Subject: [PATCH 109/296] code clean. --- .../core/transformer/moe/base_moe_layer.py | 48 ++++++++++++------- megatron/core/transformer/moe/grouped_mlp.py | 9 +++- megatron/core/transformer/moe/switch_mlp.py | 12 +++-- 3 files changed, 48 insertions(+), 21 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 35725e9bea..bc9f381562 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -95,6 +95,11 @@ def token_permutation(self, hidden_states): Returns: permuted_local_hidden_states: Permutation of tokens to local experts group. tokens_per_expert: the number of tokens each local expert to process. + indices: The indices of `local_indices` (which holds the un-sorted expert + indices of tokens that local expert can process) that give its sorted order along dim 0. + global_local_map (optional): A mask of mapping between global and local tokens where each + element is True if it's between the local_expert_indices. Only useful + when cross device token permutation is enabled and **AllGahter** is performed. """ self.hidden_shape = hidden_states.shape route = self.router(hidden_states) @@ -123,20 +128,21 @@ def token_permutation(self, hidden_states): hidden_states ) global_indices = self.gather_indices(max_ind) - self.ghs_shape = global_hidden_states.shape - # Create a mask where each element is True if it's between the local_expert_indices - self.mask = (global_indices >= self.local_expert_indices[0]) & ( + # Create a mask of mapping between global and local tokens where each + # element is True if it's between the local_expert_indices + global_local_map = (global_indices >= self.local_expert_indices[0]) & ( global_indices <= self.local_expert_indices[-1] ) - local_indices = global_indices[self.mask] - local_hidden_states = global_hidden_states[self.mask, :] + local_indices = global_indices[global_local_map] + local_hidden_states = global_hidden_states[global_local_map] else: - self.ghs_shape = hidden_states.shape local_indices = max_ind local_hidden_states = hidden_states + global_local_map = None with torch.no_grad(): - self.permuted_indices = torch.argsort(local_indices) + # The indices of local_indices that give its sorted order along dim 0. + indices = torch.argsort(local_indices) tokens_per_expert = torch.histc( local_indices, bins=self.num_local_experts, @@ -145,41 +151,51 @@ def token_permutation(self, hidden_states): ) tokens_per_expert = tokens_per_expert.cpu().to(torch.long) # Permute the tokens locally so that they are grouped by their expert assignment - permuted_local_hidden_states = local_hidden_states[self.permuted_indices] + permuted_local_hidden_states = local_hidden_states[indices] - return permuted_local_hidden_states, tokens_per_expert + return permuted_local_hidden_states, tokens_per_expert, indices, global_local_map - def token_unpermutation(self, hidden_states, bias=None): - """Reverse process of 'token_permutation' which permutes the ouput of local - experts into the original order to produce the final output. + def token_unpermutation(self, hidden_states, indices, global_local_map=None, bias=None): + """Reverse process of `token_permutation()` which permutes the ouput of local + experts locallay and across expert parallel rank into the original order to + produce the final output. Args: hidden_states: 2D tensor of shape [sum_tokens_of_all_local_experts, HiddenSize], ouput of local experts. + indices: The indices of `local_indices` (which holds the un-sorted expert + indices of tokens that local expert can process) that give its sorted order along dim 0. + global_local_map (optional): A mask of mapping between global and local tokens where each + element is True if it's between the local_expert_indices. Only useful + when cross device token permutation is enabled and **AllGahter** is performed. bias: bias if self.add_bias is enabled. Returns: output_total: un-permuted updated hidden states output from all local experts with shape of [SeqLen/TP, MBS, HiddenSize] + output_bias_total: un-permuted bias output from all local experts if + self.add_bias is enabled. """ # Unpermute the tokens and bias locally respectively. unpermuted_local_hidden = torch.zeros_like(hidden_states) - unpermuted_local_hidden[self.permuted_indices] = hidden_states + unpermuted_local_hidden[indices] = hidden_states unpermuted_local_bias = None if self.add_bias: assert bias is not None unpermuted_local_bias = torch.zeros_like(hidden_states) - unpermuted_local_bias[self.permuted_indices] = bias + unpermuted_local_bias[indices] = bias output_total = unpermuted_local_hidden output_bias_total = unpermuted_local_bias # Unpermute the tokens across expert parallel devices. if self.sequence_parallel or (self.expert_parallel_size > 1): + assert global_local_map is not None, "global_local_map is necessary for `AllGather`." + # Shape of global_hidden_size: [SeqLen*MBS, HiddenSize] + global_hidden_shape = [global_local_map.shape[0], hidden_states.shape[-1]] unpermuted_global_hidden = torch.zeros( - self.ghs_shape, dtype=hidden_states.dtype, device=torch.cuda.current_device() + global_hidden_shape, dtype=hidden_states.dtype, device=torch.cuda.current_device() ) - global_local_map = torch.squeeze(self.mask.nonzero().contiguous()) unpermuted_global_hidden[global_local_map] = unpermuted_local_hidden output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( unpermuted_global_hidden diff --git a/megatron/core/transformer/moe/grouped_mlp.py b/megatron/core/transformer/moe/grouped_mlp.py index 507a687b03..19f45240b1 100644 --- a/megatron/core/transformer/moe/grouped_mlp.py +++ b/megatron/core/transformer/moe/grouped_mlp.py @@ -149,7 +149,12 @@ def scale_grad(self, w): def forward(self, hidden_states): # Permutation of tokens - permuted_local_hidden_states, tokens_per_expert = self.token_permutation(hidden_states) + ( + permuted_local_hidden_states, + tokens_per_expert, + indices, + global_local_map, + ) = self.token_permutation(hidden_states) w1, w2 = (self.scale_grad(self.weight1), self.scale_grad(self.weight2)) # Reshape the weights for the grouped GEMMs. @@ -163,6 +168,6 @@ def forward(self, hidden_states): fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=False) # Un-permutation of tokens. - output_total, _ = self.token_unpermutation(fc2_output) + output_total, _ = self.token_unpermutation(fc2_output, indices, global_local_map) return output_total, None diff --git a/megatron/core/transformer/moe/switch_mlp.py b/megatron/core/transformer/moe/switch_mlp.py index 5e89939a03..46cced972e 100644 --- a/megatron/core/transformer/moe/switch_mlp.py +++ b/megatron/core/transformer/moe/switch_mlp.py @@ -24,8 +24,12 @@ def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): self.local_experts.append(expert) def forward(self, hidden_states): - # global_hidden_states, global_indices = self.token_permutation(hidden_states) - permuted_local_hidden_states, tokens_per_expert = self.token_permutation(hidden_states) + ( + permuted_local_hidden_states, + tokens_per_expert, + indices, + global_local_map, + ) = self.token_permutation(hidden_states) output_local = torch.zeros_like(permuted_local_hidden_states) output_bias_local = None @@ -48,6 +52,8 @@ def forward(self, hidden_states): output_bias_local[start:end, :] = output_bias # Un-permutation of tokens. - output_total, output_bias_total = self.token_unpermutation(output_local, output_bias_local) + output_total, output_bias_total = self.token_unpermutation( + output_local, indices, global_local_map, output_bias_local + ) return output_total, output_bias_total From a0059df302da9bac898d297b0806218d6dd55d13 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Mon, 11 Dec 2023 23:09:42 -0800 Subject: [PATCH 110/296] replace regular indexing with index_select and scatter for better performance. --- .../core/transformer/moe/base_moe_layer.py | 46 +++++++++++++------ 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index bc9f381562..957f5b2886 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -121,7 +121,7 @@ def token_permutation(self, hidden_states): # [S/TP, B, H] -> [S*B/TP, H] hidden_states = hidden_states.view(-1, self.hidden_shape[-1]) - # Permute the tokens across the expert parallel devices. + # Stage1: permute the tokens across the expert parallel devices. if self.sequence_parallel or (self.expert_parallel_size > 1): # [S*B/TP, H] -> [S*B, H] global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe( @@ -133,8 +133,9 @@ def token_permutation(self, hidden_states): global_local_map = (global_indices >= self.local_expert_indices[0]) & ( global_indices <= self.local_expert_indices[-1] ) - local_indices = global_indices[global_local_map] - local_hidden_states = global_hidden_states[global_local_map] + global_local_map = torch.squeeze(global_local_map.nonzero()) + local_indices = torch.index_select(global_indices, 0, global_local_map) + local_hidden_states = torch.index_select(global_hidden_states, 0, global_local_map) else: local_indices = max_ind local_hidden_states = hidden_states @@ -150,8 +151,9 @@ def token_permutation(self, hidden_states): max=self.local_expert_indices[-1], ) tokens_per_expert = tokens_per_expert.cpu().to(torch.long) - # Permute the tokens locally so that they are grouped by their expert assignment - permuted_local_hidden_states = local_hidden_states[indices] + + # Stage2: permute the tokens locally so that they are grouped by their expert assignment + permuted_local_hidden_states = torch.index_select(local_hidden_states, 0, indices) return permuted_local_hidden_states, tokens_per_expert, indices, global_local_map @@ -163,9 +165,9 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia Args: hidden_states: 2D tensor of shape [sum_tokens_of_all_local_experts, HiddenSize], ouput of local experts. - indices: The indices of `local_indices` (which holds the un-sorted expert + indices: 1D tensor of the indices of `local_indices` (which holds the un-sorted expert indices of tokens that local expert can process) that give its sorted order along dim 0. - global_local_map (optional): A mask of mapping between global and local tokens where each + global_local_map (optional): 1D tensor, a mask of mapping between global and local tokens where each element is True if it's between the local_expert_indices. Only useful when cross device token permutation is enabled and **AllGahter** is performed. bias: bias if self.add_bias is enabled. @@ -176,34 +178,48 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia output_bias_total: un-permuted bias output from all local experts if self.add_bias is enabled. """ - # Unpermute the tokens and bias locally respectively. + # Stage1: unpermute the tokens and bias locally respectively. unpermuted_local_hidden = torch.zeros_like(hidden_states) - unpermuted_local_hidden[indices] = hidden_states + # Reshape global_local_map to be compatible with Tensor.scatter + indices = torch.unsqueeze(indices, 1).expand(-1, hidden_states.shape[-1]) + assert indices.shape == hidden_states.shape + unpermuted_local_hidden = unpermuted_local_hidden.scatter(0, indices, hidden_states) + unpermuted_local_bias = None if self.add_bias: assert bias is not None unpermuted_local_bias = torch.zeros_like(hidden_states) - unpermuted_local_bias[indices] = bias + assert indices.shape == bias.shape + unpermuted_local_bias = unpermuted_local_bias.scatter(0, indices, bias) output_total = unpermuted_local_hidden output_bias_total = unpermuted_local_bias - # Unpermute the tokens across expert parallel devices. + # Stage2: unpermute the tokens across expert parallel devices. if self.sequence_parallel or (self.expert_parallel_size > 1): assert global_local_map is not None, "global_local_map is necessary for `AllGather`." - # Shape of global_hidden_size: [SeqLen*MBS, HiddenSize] - global_hidden_shape = [global_local_map.shape[0], hidden_states.shape[-1]] + ep_group_size = parallel_state.get_tensor_and_expert_parallel_world_size() + # hidden_shape: [SeqLen/TP, MBS, HiddenSize], glboal_num_tokens = SeqLen/TP*MBS*(TP*EP) + global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1] * ep_group_size + global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]] unpermuted_global_hidden = torch.zeros( global_hidden_shape, dtype=hidden_states.dtype, device=torch.cuda.current_device() ) - unpermuted_global_hidden[global_local_map] = unpermuted_local_hidden + # Reshape global_local_map to be compatible with Tensor.scatter + global_local_map = global_local_map.unsqueeze(1).expand(-1, hidden_states.shape[-1]) + assert global_local_map.shape == unpermuted_local_hidden.shape + unpermuted_global_hidden = unpermuted_global_hidden.scatter( + 0, global_local_map, unpermuted_local_hidden + ) output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( unpermuted_global_hidden ) if self.add_bias: # Unpermute the bias across expert parallel devices. unpermuted_global_bias = torch.zeros_like(unpermuted_global_hidden) - unpermuted_global_bias[global_local_map] = unpermuted_local_bias + unpermuted_global_bias = unpermuted_global_bias.scatter( + 0, global_local_map, unpermuted_local_bias + ) output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( unpermuted_global_bias ) From 0341c135940fd19222b5c007f4ab287df51cf388 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Mon, 11 Dec 2023 23:38:44 -0800 Subject: [PATCH 111/296] update grouped_gemm src to fix ci test. --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e8a15be4e6..2a0d41bcfa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -32,7 +32,7 @@ unit_tests: - pip install nltk - pip install wrapt - pip install zarr "tensorstore==0.1.45" # for distributed checkpointing tests - - pip install git+https://github.com/tgale96/grouped_gemm@main # for grouped gemm tests + - pip install git+https://github.com/fanshiqing/grouped_gemm@main # for grouped gemm tests - torchrun --nproc_per_node=8 -m pytest --cov-report=term --cov-report=html --cov=megatron/core tests/unit_tests coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/' artifacts: From bdbcfeb3752901ff9d241159a94a5005c94077e0 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Tue, 12 Dec 2023 00:15:40 -0800 Subject: [PATCH 112/296] add device capability check for groupedGEMM and related UTs. --- megatron/arguments.py | 2 ++ tests/unit_tests/transformer/moe/test_grouped_mlp.py | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/megatron/arguments.py b/megatron/arguments.py index 6d4fcd6ca8..90d8651f17 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -293,6 +293,8 @@ def validate_args(args, defaults={}): if args.moe_grouped_gemm: assert args.bf16, 'Currently GroupedGEMM for MoE only supports bf16 dtype.' + dc = torch.cuda.get_device_capability() + assert dc[0] >= 8, "Unsupported compute capability for GroupedGEMM kernels." if args.weight_decay_incr_style == 'constant': assert args.start_weight_decay is None diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index 558c7eb12a..d74ea9c35f 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -14,6 +14,11 @@ from megatron.model import Float16Module from tests.unit_tests.test_utilities import Utils +DEVICE_CAPABILITY = None +if torch.cuda.is_available(): + DEVICE_CAPABILITY = torch.cuda.get_device_capability() + + class TestParallelGroupedMLP: def setup_method(self, method, use_cpu_initialization=False, swiglu=True): @@ -119,6 +124,9 @@ def test_weight_init_value_the_same(self): assert torch.equal(gmm_expert2_fc2, smm_expert2_fc2) @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + @pytest.mark.skipif( + not DEVICE_CAPABILITY or DEVICE_CAPABILITY[0] < 8, reason='GroupedGEMM kernels are not supported on this device.' + ) def test_gpu_forward(self): self.switch_mlp_smm.cuda() self.switch_mlp_gmm.cuda() From 52711130ceaff54a0a47a1d3bc8bea6fa13129bc Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Thu, 14 Dec 2023 12:25:34 +0000 Subject: [PATCH 113/296] Support Top-K routing, permutation and unpermutation under ETP and SP. --- .../core/transformer/moe/base_moe_layer.py | 42 ++++++++++++------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 957f5b2886..f71248e2fb 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -61,6 +61,7 @@ def __init__(self, config: TransformerConfig): self.local_expert_indices = [ local_expert_indices_offset + i for i in range(self.num_local_experts) ] + self.k = 1 # TODO: self.config.top_k def gather_indices(self, local_indices): """ Gather tensors and concatenate along the first dimension.""" @@ -110,14 +111,13 @@ def token_permutation(self, hidden_states): norm_route = self.route_algo( route.detach().to(dtype=torch.float32) ) # explicit fp32 conversion for stability - _, max_ind = torch.max(norm_route, dim=1) + _, max_ind = torch.topk(norm_route, k=self.k, dim=1) route = self.router_activation(route) - max_prob = route[torch.arange(route.size(0)), max_ind] + # max_ind = max_ind.view(-1) + max_prob = torch.gather(route, 1, max_ind) else: route = self.router_activation(route) - max_prob, max_ind = torch.max(route, dim=1) - - self.max_prob = torch.unsqueeze(max_prob, 1) + max_prob, max_ind = torch.topk(route, k=self.k, dim=1) # [S/TP, B, H] -> [S*B/TP, H] hidden_states = hidden_states.view(-1, self.hidden_shape[-1]) @@ -133,17 +133,24 @@ def token_permutation(self, hidden_states): global_local_map = (global_indices >= self.local_expert_indices[0]) & ( global_indices <= self.local_expert_indices[-1] ) - global_local_map = torch.squeeze(global_local_map.nonzero()) - local_indices = torch.index_select(global_indices, 0, global_local_map) + local_indices = global_indices[global_local_map] + if self.k > 1: # k > 1 + global_probs = self.gather_indices(max_prob) + local_probs = global_probs[global_local_map] + else: + local_probs = max_prob + global_local_map = torch.squeeze(global_local_map.nonzero()[:, 0]) local_hidden_states = torch.index_select(global_hidden_states, 0, global_local_map) else: local_indices = max_ind + local_probs = max_prob local_hidden_states = hidden_states global_local_map = None + self.max_prob = local_probs with torch.no_grad(): # The indices of local_indices that give its sorted order along dim 0. - indices = torch.argsort(local_indices) + indices = torch.argsort(local_indices, dim=0) tokens_per_expert = torch.histc( local_indices, bins=self.num_local_experts, @@ -153,7 +160,7 @@ def token_permutation(self, hidden_states): tokens_per_expert = tokens_per_expert.cpu().to(torch.long) # Stage2: permute the tokens locally so that they are grouped by their expert assignment - permuted_local_hidden_states = torch.index_select(local_hidden_states, 0, indices) + permuted_local_hidden_states = torch.index_select(local_hidden_states, 0, indices.view(-1)) return permuted_local_hidden_states, tokens_per_expert, indices, global_local_map @@ -181,9 +188,12 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia # Stage1: unpermute the tokens and bias locally respectively. unpermuted_local_hidden = torch.zeros_like(hidden_states) # Reshape global_local_map to be compatible with Tensor.scatter - indices = torch.unsqueeze(indices, 1).expand(-1, hidden_states.shape[-1]) + indices = indices.view(-1, 1).expand(-1, hidden_states.shape[1]) assert indices.shape == hidden_states.shape unpermuted_local_hidden = unpermuted_local_hidden.scatter(0, indices, hidden_states) + # Scale the expert output prior to reduction and subsequent to local unpermutation if k > 1. + if self.k > 1: + unpermuted_local_hidden = unpermuted_local_hidden * self.max_prob.view(-1, 1) unpermuted_local_bias = None if self.add_bias: @@ -191,6 +201,8 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia unpermuted_local_bias = torch.zeros_like(hidden_states) assert indices.shape == bias.shape unpermuted_local_bias = unpermuted_local_bias.scatter(0, indices, bias) + if self.k > 1: + unpermuted_local_bias = unpermuted_local_bias * self.max_prob.view(-1, 1) output_total = unpermuted_local_hidden output_bias_total = unpermuted_local_bias @@ -208,7 +220,7 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia # Reshape global_local_map to be compatible with Tensor.scatter global_local_map = global_local_map.unsqueeze(1).expand(-1, hidden_states.shape[-1]) assert global_local_map.shape == unpermuted_local_hidden.shape - unpermuted_global_hidden = unpermuted_global_hidden.scatter( + unpermuted_global_hidden = unpermuted_global_hidden.scatter_add( 0, global_local_map, unpermuted_local_hidden ) output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( @@ -217,7 +229,7 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia if self.add_bias: # Unpermute the bias across expert parallel devices. unpermuted_global_bias = torch.zeros_like(unpermuted_global_hidden) - unpermuted_global_bias = unpermuted_global_bias.scatter( + unpermuted_global_bias = unpermuted_global_bias.scatter_add( 0, global_local_map, unpermuted_local_bias ) output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( @@ -228,12 +240,12 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia output_bias_total = ( output_bias_total / parallel_state.get_tensor_model_parallel_world_size() ) - - output_total = output_total * self.max_prob + if self.k == 1: + output_total = output_total * self.max_prob.view(-1, 1) output_total = output_total.view(self.hidden_shape) if self.add_bias: assert output_bias_total is not None - output_bias_total = output_bias_total * self.max_prob + output_bias_total = output_bias_total * self.max_prob.view(-1, 1) output_bias_total = output_bias_total.view(self.hidden_shape) else: output_bias_total = None From 22e66c3a06d60eda34a4ea2bd627f2f232a0b684 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Thu, 14 Dec 2023 13:15:03 +0000 Subject: [PATCH 114/296] replace index_select with gather for better perf. --- .../core/transformer/moe/base_moe_layer.py | 47 ++++++++++--------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index f71248e2fb..cf596fd3dc 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -98,7 +98,7 @@ def token_permutation(self, hidden_states): tokens_per_expert: the number of tokens each local expert to process. indices: The indices of `local_indices` (which holds the un-sorted expert indices of tokens that local expert can process) that give its sorted order along dim 0. - global_local_map (optional): A mask of mapping between global and local tokens where each + global_local_map (optional): 2D tensor. A mask of mapping between global and local tokens where each element is True if it's between the local_expert_indices. Only useful when cross device token permutation is enabled and **AllGahter** is performed. """ @@ -127,20 +127,23 @@ def token_permutation(self, hidden_states): global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe( hidden_states ) - global_indices = self.gather_indices(max_ind) - # Create a mask of mapping between global and local tokens where each - # element is True if it's between the local_expert_indices - global_local_map = (global_indices >= self.local_expert_indices[0]) & ( - global_indices <= self.local_expert_indices[-1] - ) - local_indices = global_indices[global_local_map] - if self.k > 1: # k > 1 - global_probs = self.gather_indices(max_prob) - local_probs = global_probs[global_local_map] - else: - local_probs = max_prob - global_local_map = torch.squeeze(global_local_map.nonzero()[:, 0]) - local_hidden_states = torch.index_select(global_hidden_states, 0, global_local_map) + with torch.no_grad(): + global_indices = self.gather_indices(max_ind) + # Create a mask of mapping between global and local tokens where each + # element is True if it's between the local_expert_indices + global_local_map = (global_indices >= self.local_expert_indices[0]) & ( + global_indices <= self.local_expert_indices[-1] + ) + local_indices = global_indices[global_local_map] + if self.k > 1: # k > 1 + global_probs = self.gather_indices(max_prob) + local_probs = global_probs[global_local_map] + else: + local_probs = max_prob + # Reshape global_local_map to be compatible with Tensor.gather + global_local_map = global_local_map.nonzero()[:, 0] + global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1]) + local_hidden_states = torch.gather(global_hidden_states, 0, global_local_map) else: local_indices = max_ind local_probs = max_prob @@ -161,7 +164,10 @@ def token_permutation(self, hidden_states): # Stage2: permute the tokens locally so that they are grouped by their expert assignment permuted_local_hidden_states = torch.index_select(local_hidden_states, 0, indices.view(-1)) + # Reshape indices to be compatible with Tensor.gather + indices = indices.view(-1, 1).expand(-1, hidden_states.shape[-1]) + permuted_local_hidden_states = torch.gather(local_hidden_states, 0, indices) return permuted_local_hidden_states, tokens_per_expert, indices, global_local_map def token_unpermutation(self, hidden_states, indices, global_local_map=None, bias=None): @@ -172,9 +178,9 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia Args: hidden_states: 2D tensor of shape [sum_tokens_of_all_local_experts, HiddenSize], ouput of local experts. - indices: 1D tensor of the indices of `local_indices` (which holds the un-sorted expert + indices: 2D tensor of the indices of `local_indices` (which holds the un-sorted expert indices of tokens that local expert can process) that give its sorted order along dim 0. - global_local_map (optional): 1D tensor, a mask of mapping between global and local tokens where each + global_local_map (optional): 2D tensor, a mask of mapping between global and local tokens where each element is True if it's between the local_expert_indices. Only useful when cross device token permutation is enabled and **AllGahter** is performed. bias: bias if self.add_bias is enabled. @@ -187,10 +193,9 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia """ # Stage1: unpermute the tokens and bias locally respectively. unpermuted_local_hidden = torch.zeros_like(hidden_states) - # Reshape global_local_map to be compatible with Tensor.scatter - indices = indices.view(-1, 1).expand(-1, hidden_states.shape[1]) assert indices.shape == hidden_states.shape unpermuted_local_hidden = unpermuted_local_hidden.scatter(0, indices, hidden_states) + # Scale the expert output prior to reduction and subsequent to local unpermutation if k > 1. if self.k > 1: unpermuted_local_hidden = unpermuted_local_hidden * self.max_prob.view(-1, 1) @@ -218,7 +223,6 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia global_hidden_shape, dtype=hidden_states.dtype, device=torch.cuda.current_device() ) # Reshape global_local_map to be compatible with Tensor.scatter - global_local_map = global_local_map.unsqueeze(1).expand(-1, hidden_states.shape[-1]) assert global_local_map.shape == unpermuted_local_hidden.shape unpermuted_global_hidden = unpermuted_global_hidden.scatter_add( 0, global_local_map, unpermuted_local_hidden @@ -245,7 +249,8 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia output_total = output_total.view(self.hidden_shape) if self.add_bias: assert output_bias_total is not None - output_bias_total = output_bias_total * self.max_prob.view(-1, 1) + if self.k == 1: + output_bias_total = output_bias_total * self.max_prob.view(-1, 1) output_bias_total = output_bias_total.view(self.hidden_shape) else: output_bias_total = None From df779ae9d64decbc9b0d1c1c00de2955c75dfc75 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Fri, 15 Dec 2023 11:31:02 +0000 Subject: [PATCH 115/296] add MoE w/ groupedGEMM CI golden values. --- .gitlab-ci.yml | 19 ++++++++++++++++++- .../run_selene_test_launcher_script.sh | 4 ++-- ...bled_te_8experts2parallel_groupedGEMM.json | 1 + .../gpt3/pretrain_gpt3_distributed_test.sh | 7 +++++++ .../gpt3/sbatch_gpt3_distributed_test.sh | 2 +- 5 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2a0d41bcfa..c0553de5a3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -16,6 +16,7 @@ variables: &VARS TEST_REGEX_ON_THIS_COMMIT: NONE #https://github.com/google/re2/wiki/Syntax (Can define regex as in this spec) e.g /.*gpt3.*/ DISPLAY_OUTPUT: "True" # Set to true for new tests to copy the logs for creating golden truth file TIME_LIMIT: "10:00" # Default time limit for all jobs + MOE_GROUPED_GEMM: 0 # Set to 1 to enable grouped gemm for MoE include: @@ -98,7 +99,7 @@ formatting: script: &selene-test-launcher-script - echo "Running selene test" - pwd - - run_cmd="bash tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh RUN_MODEL=$RUN_MODEL TP_SIZE=$TP_SIZE PP_SIZE=$PP_SIZE VP_SIZE=$VP_SIZE NUM_NODES=$NUM_NODES SELENE_ADLR_CI_PATH=$SELENE_ADLR_CI_PATH CI_PIPELINE_ID=$CI_PIPELINE_ID RUN_NAME=$RUN_NAME MAX_STEPS=$MAX_STEPS PYTORCH_IMAGE=$PYTORCH_IMAGE DATA_DIR=$DATA_DIR USE_CORE=$USE_CORE USE_TE=$USE_TE TIME_LIMIT=$TIME_LIMIT" + - run_cmd="bash tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh RUN_MODEL=$RUN_MODEL TP_SIZE=$TP_SIZE PP_SIZE=$PP_SIZE VP_SIZE=$VP_SIZE NUM_NODES=$NUM_NODES SELENE_ADLR_CI_PATH=$SELENE_ADLR_CI_PATH CI_PIPELINE_ID=$CI_PIPELINE_ID RUN_NAME=$RUN_NAME MAX_STEPS=$MAX_STEPS PYTORCH_IMAGE=$PYTORCH_IMAGE DATA_DIR=$DATA_DIR USE_CORE=$USE_CORE USE_TE=$USE_TE MOE_GROUPED_GEMM=$MOE_GROUPED_GEMM TIME_LIMIT=$TIME_LIMIT" - echo "$run_cmd" - ${run_cmd} - echo "Completed the job" @@ -564,6 +565,22 @@ train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_1node_50steps: METADATA: "te_8experts2parallel" ADDITIONAL_PARAMS: "--sequence-parallel --num-experts 8 --expert-model-parallel-size 2" +train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_groupedGEMM_1node_50steps: + <<: *selene-test-launcher + variables: + <<: [*VARS] + RUN_MODEL: gpt3 + USE_TE: 0 + TP_SIZE: 2 + PP_SIZE: 1 + NUM_NODES: 1 + MAX_STEPS: 50 + USE_CORE: 1 + MOE_GROUPED_GEMM: 1 + TEST_LEVEL: MR_TESTS + METADATA: "te_8experts2parallel_groupedGEMM" + ADDITIONAL_PARAMS: "--moe-grouped-gemm --disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2" + train.moe_gpt3.345m_tp2_pp2_4experts_1node_50steps: <<: *selene-test-launcher variables: diff --git a/tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh b/tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh index e7c8c3c88f..d454932abb 100755 --- a/tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh +++ b/tests/functional_tests/shell_test_utils/run_selene_test_launcher_script.sh @@ -44,11 +44,11 @@ export GOTO_NUM_THREADS=2 export OPENBLAS_NUM_THREADS=2 # step 5 : CREATING A COPY OF THE SBATCH SCRIPT THAT WILL BE RUN FOR DEBUGGING -envsubst '$BASE_DIR $PYTORCH_IMAGE $BUILD_DIR $DATA_DIR $MBS $GBS $ADDITIONAL_PARAMS $USE_TE $TP_SIZE $PP_SIZE $VP_SIZE $NUM_NODES $MAX_STEPS $USE_CORE' <$BUILD_DIR/tests/functional_tests/test_scripts/$RUN_MODEL/sbatch_${RUN_MODEL}_distributed_test.sh > $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/debug/sbatch_${RUN_MODEL}_distributed_test.sh +envsubst '$BASE_DIR $PYTORCH_IMAGE $BUILD_DIR $DATA_DIR $MBS $GBS $MOE_GROUPED_GEMM $ADDITIONAL_PARAMS $USE_TE $TP_SIZE $PP_SIZE $VP_SIZE $NUM_NODES $MAX_STEPS $USE_CORE' <$BUILD_DIR/tests/functional_tests/test_scripts/$RUN_MODEL/sbatch_${RUN_MODEL}_distributed_test.sh > $SELENE_ADLR_CI_PATH/$CI_PIPELINE_ID/$RUN_NAME/debug/sbatch_${RUN_MODEL}_distributed_test.sh # step 6 : SUBMITTING THE JOB -sbatch_submission=`sbatch -t $TIME_LIMIT $BUILD_DIR/tests/functional_tests/test_scripts/$RUN_MODEL/sbatch_${RUN_MODEL}_distributed_test.sh --export=BASE_DIR,BUILD_DIR,DATA_DIR,USE_TE,TP_SIZE,PP_SIZE,VP_SIZE,NUM_NODES,MAX_STEPS,MBS,GBS,PYTORCH_IMAGE,ADDITIONAL_PARAMS` +sbatch_submission=`sbatch -t $TIME_LIMIT $BUILD_DIR/tests/functional_tests/test_scripts/$RUN_MODEL/sbatch_${RUN_MODEL}_distributed_test.sh --export=BASE_DIR,BUILD_DIR,DATA_DIR,USE_TE,TP_SIZE,PP_SIZE,VP_SIZE,NUM_NODES,MAX_STEPS,MBS,GBS,MOE_GROUPED_GEMM,PYTORCH_IMAGE,ADDITIONAL_PARAMS` export SLURM_JOBID=$(echo $sbatch_submission| grep 'Submitted batch job' | awk '{ print $4 }'); # step 7 : WAITING FOR JOB TO COMPLETE AND PRINTING JOB INFO diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json new file mode 100644 index 0000000000..ac4ae4fc1a --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80356, 10.85313, 10.86254, 10.79554, 10.72133, 10.63614, 10.2101, 10.31993, 10.22025, 9.91788]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16292.0, 20024.0, 19792.0, 19062.0, 17408.0, 18180.0, 15649.0, 17942.0, 18731.0, 19356.0]}, "iteration_timing_avg": 0.18242147058823527} diff --git a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh index e3f9626707..234bc75858 100755 --- a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh +++ b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh @@ -15,6 +15,7 @@ echo "---------------------------------" set -x if [[ -z $MBS ]]; then MBS=4; fi if [[ -z $GBS ]]; then GBS=32; fi +if [[ -z $MOE_GROUPED_GEMM ]]; then MOE_GROUPED_GEMM=0; fi if [[ -z $VOCAB_FILE ]]; then VOCAB_FILE="/workspace/data/gpt3_data/vocab.json" ; fi if [[ -z $MERGE_FILE ]]; then MERGE_FILE="/workspace/data/gpt3_data/merges.txt" ; fi @@ -38,6 +39,12 @@ if [[ $USE_CORE -eq 1 ]]; then USE_MCORE=1 fi +if [[ $MOE_GROUPED_GEMM -eq 1 ]]; then + echo "Running MoE with Grouped GEMM" + command="$command pip install git+https://github.com/fanshiqing/grouped_gemm@main;" + TRAINING_DTYPE=bf16 # Currently GroupedGEMM for MoE only supports bf16 dtype +fi + if [[ $USE_TE -eq 1 ]]; then echo "Running with TransformerEngine ..." TRANSFORMER_IMPL=transformer_engine diff --git a/tests/functional_tests/test_scripts/gpt3/sbatch_gpt3_distributed_test.sh b/tests/functional_tests/test_scripts/gpt3/sbatch_gpt3_distributed_test.sh index ba2a1b4b62..0319880575 100755 --- a/tests/functional_tests/test_scripts/gpt3/sbatch_gpt3_distributed_test.sh +++ b/tests/functional_tests/test_scripts/gpt3/sbatch_gpt3_distributed_test.sh @@ -16,4 +16,4 @@ echo 'Running tests using $PYTORCH_IMAGE image' srun --output $BASE_DIR/debug/slurm-%j.out --error $BASE_DIR/debug/slurm-%j.out --container-image $PYTORCH_IMAGE --container-mounts $BASE_DIR/tensorboard_logs:/workspace/tensorboard_logs,$BASE_DIR/debug:/workspace/debug,$BASE_DIR/checkpoints:/workspace/checkpoints,$BUILD_DIR:/workspace/megatron-lm,$DATA_DIR:/workspace/data --no-container-mount-home bash -c " ls cd /workspace/megatron-lm - ./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh DATA_PATH=$DATA_PATH CHECKPOINT_PATH=$CHECKPOINT_PATH TENSORBOARD_DIR=$TENSORBOARD_DIR SCRIPTS_DIR=$SCRIPTS_DIR USE_TE=$USE_TE TP_SIZE=$TP_SIZE PP_SIZE=$PP_SIZE VP_SIZE=$VP_SIZE NUM_NODES=$NUM_NODES MAX_STEPS=$MAX_STEPS USE_CORE=$USE_CORE MBS=$MBS GBS=$GBS ADDITIONAL_PARAMS=\"$ADDITIONAL_PARAMS\"" + ./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh DATA_PATH=$DATA_PATH CHECKPOINT_PATH=$CHECKPOINT_PATH TENSORBOARD_DIR=$TENSORBOARD_DIR SCRIPTS_DIR=$SCRIPTS_DIR USE_TE=$USE_TE TP_SIZE=$TP_SIZE PP_SIZE=$PP_SIZE VP_SIZE=$VP_SIZE NUM_NODES=$NUM_NODES MAX_STEPS=$MAX_STEPS USE_CORE=$USE_CORE MBS=$MBS GBS=$GBS MOE_GROUPED_GEMM=$MOE_GROUPED_GEMM ADDITIONAL_PARAMS=\"$ADDITIONAL_PARAMS\"" From 44c1752886dd904a1f32fb62ac8ba84f367ddc5d Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Sun, 17 Dec 2023 20:11:10 -0800 Subject: [PATCH 116/296] code clean. --- megatron/core/transformer/moe/base_moe_layer.py | 3 +-- megatron/core/transformer/moe/grouped_mlp.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index cf596fd3dc..0b502e3f4e 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -163,11 +163,10 @@ def token_permutation(self, hidden_states): tokens_per_expert = tokens_per_expert.cpu().to(torch.long) # Stage2: permute the tokens locally so that they are grouped by their expert assignment - permuted_local_hidden_states = torch.index_select(local_hidden_states, 0, indices.view(-1)) # Reshape indices to be compatible with Tensor.gather indices = indices.view(-1, 1).expand(-1, hidden_states.shape[-1]) - permuted_local_hidden_states = torch.gather(local_hidden_states, 0, indices) + return permuted_local_hidden_states, tokens_per_expert, indices, global_local_map def token_unpermutation(self, hidden_states, indices, global_local_map=None, bias=None): diff --git a/megatron/core/transformer/moe/grouped_mlp.py b/megatron/core/transformer/moe/grouped_mlp.py index 19f45240b1..b82e79233e 100644 --- a/megatron/core/transformer/moe/grouped_mlp.py +++ b/megatron/core/transformer/moe/grouped_mlp.py @@ -44,7 +44,7 @@ def __init__(self, config: TransformerConfig): gg.assert_grouped_gemm_is_available() assert ( config.add_bias_linear == False - ), "bias in the expert layer is not supported in Grouped GEMM yet." + ), "bias in the expert layer is not supported in Grouped GEMM yet, please set '--disable-bias-linear' instead." self.expert_parallel = config.expert_model_parallel_size > 1 self.gradient_scale = 1 / parallel_state.get_tensor_and_expert_parallel_world_size() From 254c87400f2207f0ee5e907a9552de8c5cbb864f Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Mon, 18 Dec 2023 17:06:36 +0000 Subject: [PATCH 117/296] Fix the wrong local_indices when k>1. --- megatron/core/transformer/moe/base_moe_layer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 0b502e3f4e..976cb1e61b 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -134,10 +134,10 @@ def token_permutation(self, hidden_states): global_local_map = (global_indices >= self.local_expert_indices[0]) & ( global_indices <= self.local_expert_indices[-1] ) - local_indices = global_indices[global_local_map] + local_indices = global_indices.masked_select(global_local_map) if self.k > 1: # k > 1 global_probs = self.gather_indices(max_prob) - local_probs = global_probs[global_local_map] + local_probs = global_probs.masked_select(global_local_map) else: local_probs = max_prob # Reshape global_local_map to be compatible with Tensor.gather From 3c03122b95babd70741afe401a56379709742f2c Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Tue, 2 Jan 2024 03:56:26 -0800 Subject: [PATCH 118/296] replace FusedLN with TENorm for MoE so that alt value 'RMSNorm' by TE can be used. --- megatron/core/models/gpt/gpt_layer_specs.py | 3 ++- ...2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json | 2 +- ..._50steps_core_enabled_te_8experts2parallel_groupedGEMM.json | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index 25ef28914a..a2c50a8e4e 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -7,6 +7,7 @@ from megatron.core.transformer.custom_layers.transformer_engine import ( TEDotProductAttention, TELayerNormColumnParallelLinear, + TENorm, TERowParallelLinear, ) from megatron.core.transformer.dot_product_attention import DotProductAttention @@ -39,7 +40,7 @@ def get_gpt_layer_with_transformer_engine_spec( ), ), self_attn_bda=get_bias_dropout_add, - pre_mlp_layernorm=FusedLayerNorm if num_experts else IdentityOp, + pre_mlp_layernorm=TENorm if num_experts else IdentityOp, mlp=mlp, mlp_bda=get_bias_dropout_add, ), diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json index 4f0233160c..879ec6978b 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80055, 10.86883, 10.86422, 10.80142, 10.71115, 10.63973, 10.2006, 10.30993, 10.21958, 9.92011]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16139.0, 19489.0, 19350.0, 18806.0, 16997.0, 18210.0, 15507.0, 18409.0, 19032.0, 19709.0]}, "iteration_timing_avg": 0.2878829411764705} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79995, 10.8686, 10.86517, 10.801, 10.71238, 10.63884, 10.20088, 10.31027, 10.22057, 9.92076]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16119.0, 19347.0, 19548.0, 18978.0, 17241.0, 18198.0, 15695.0, 18267.0, 18834.0, 19678.0]}, "iteration_timing_avg": 0.2742326470588235} diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json index ac4ae4fc1a..3ac2e4ec51 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80356, 10.85313, 10.86254, 10.79554, 10.72133, 10.63614, 10.2101, 10.31993, 10.22025, 9.91788]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16292.0, 20024.0, 19792.0, 19062.0, 17408.0, 18180.0, 15649.0, 17942.0, 18731.0, 19356.0]}, "iteration_timing_avg": 0.18242147058823527} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80299, 10.8542, 10.86297, 10.79511, 10.72125, 10.63589, 10.20959, 10.31974, 10.22064, 9.91805]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16202.0, 19498.0, 19676.0, 18969.0, 17528.0, 18153.0, 15821.0, 18030.0, 18555.0, 19223.0]}, "iteration_timing_avg": 0.17766941176470588} From 6b7b95920ee240b3f304761f186ec715edd25f78 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Sun, 7 Jan 2024 18:45:02 -0800 Subject: [PATCH 119/296] more comments. --- megatron/core/parallel_state.py | 6 ++++-- megatron/core/transformer/moe/grouped_mlp.py | 12 ++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/megatron/core/parallel_state.py b/megatron/core/parallel_state.py index f509a68b88..c65d8a5f7f 100644 --- a/megatron/core/parallel_state.py +++ b/megatron/core/parallel_state.py @@ -888,7 +888,7 @@ def get_context_parallel_rank(): def get_expert_model_parallel_world_size(): - """Return my rank for the expert parallel group""" + """Return world size for the expert model parallel group""" if torch.distributed.is_available() and torch.distributed.is_initialized(): tensor_and_expert_parallel_world_size = torch.distributed.get_world_size( group=get_tensor_and_expert_parallel_group() @@ -899,7 +899,9 @@ def get_expert_model_parallel_world_size(): def get_tensor_and_expert_parallel_world_size(): - """Return my rank for the expert parallel group""" + """Return world size for the expert model parallel group times model parallel group. + Currently, each expert will also be distributed across TP group by default. + """ if torch.distributed.is_available() and torch.distributed.is_initialized(): tensor_and_expert_parallel_world_size = torch.distributed.get_world_size( group=get_tensor_and_expert_parallel_group() diff --git a/megatron/core/transformer/moe/grouped_mlp.py b/megatron/core/transformer/moe/grouped_mlp.py index b82e79233e..411f3561ee 100644 --- a/megatron/core/transformer/moe/grouped_mlp.py +++ b/megatron/core/transformer/moe/grouped_mlp.py @@ -16,6 +16,18 @@ class ScaleGradient(torch.autograd.Function): + """ When running MoE layer with T tokens per device and E experts on N devices + with pure data parallelism (no expert model parallelism), each device + calculates the average gradient for its local T tokens and then averages over + the N devices, so the gradient is effectively scaled by 1 / (T * N) for + each expert weights. + + If you're instead running with N-way expert model parallelism, there is + no final gradient all reduce for the expert weights so the gradient + is scaled by 1 / tokens. Thus We scale by 1 / expert_parallel_world_size + = 1 / N to correct this so that the two settings match. + """ + @staticmethod @torch.cuda.amp.custom_fwd def forward(ctx, x, scale): From 65f3659bd6e1235966837d82e5fda057e675b3a3 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Sun, 7 Jan 2024 21:13:27 -0800 Subject: [PATCH 120/296] fix comments. --- megatron/core/transformer/moe/grouped_mlp.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/megatron/core/transformer/moe/grouped_mlp.py b/megatron/core/transformer/moe/grouped_mlp.py index 411f3561ee..19d67e1d01 100644 --- a/megatron/core/transformer/moe/grouped_mlp.py +++ b/megatron/core/transformer/moe/grouped_mlp.py @@ -26,6 +26,10 @@ class ScaleGradient(torch.autograd.Function): no final gradient all reduce for the expert weights so the gradient is scaled by 1 / tokens. Thus We scale by 1 / expert_parallel_world_size = 1 / N to correct this so that the two settings match. + + Note: this is necessary to keep the grouped_gemm implementation (https://github.com/tgale96/grouped_gemm) + works as expected compared to our SwitchMLP baseline. + TODO: We will remove this module in our own developed grouped-gemm kernels. """ @staticmethod From c13f08a11b7773289bb1cb8b5eda51d1cb5234fc Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Mon, 8 Jan 2024 23:08:26 -0800 Subject: [PATCH 121/296] remove duplicated gradient scaling operations for MoE weight. Already processed in DDP. --- megatron/core/transformer/moe/grouped_mlp.py | 43 +------------------ ...bled_te_8experts2parallel_groupedGEMM.json | 2 +- 2 files changed, 3 insertions(+), 42 deletions(-) diff --git a/megatron/core/transformer/moe/grouped_mlp.py b/megatron/core/transformer/moe/grouped_mlp.py index 19d67e1d01..802cfcde14 100644 --- a/megatron/core/transformer/moe/grouped_mlp.py +++ b/megatron/core/transformer/moe/grouped_mlp.py @@ -15,38 +15,6 @@ from .base_moe_layer import BaseMoELayer -class ScaleGradient(torch.autograd.Function): - """ When running MoE layer with T tokens per device and E experts on N devices - with pure data parallelism (no expert model parallelism), each device - calculates the average gradient for its local T tokens and then averages over - the N devices, so the gradient is effectively scaled by 1 / (T * N) for - each expert weights. - - If you're instead running with N-way expert model parallelism, there is - no final gradient all reduce for the expert weights so the gradient - is scaled by 1 / tokens. Thus We scale by 1 / expert_parallel_world_size - = 1 / N to correct this so that the two settings match. - - Note: this is necessary to keep the grouped_gemm implementation (https://github.com/tgale96/grouped_gemm) - works as expected compared to our SwitchMLP baseline. - TODO: We will remove this module in our own developed grouped-gemm kernels. - """ - - @staticmethod - @torch.cuda.amp.custom_fwd - def forward(ctx, x, scale): - ctx.scale = scale - return x - - @staticmethod - @torch.cuda.amp.custom_bwd - def backward(ctx, grad): - return grad * ctx.scale, None - - -scale_gradient = ScaleGradient.apply - - class GroupedMLP(BaseMoELayer): """ Top-1 Mixture of Experts Layer with Grouped GEMM. Routes input to one of N MLP "experts" @@ -63,7 +31,6 @@ def __init__(self, config: TransformerConfig): ), "bias in the expert layer is not supported in Grouped GEMM yet, please set '--disable-bias-linear' instead." self.expert_parallel = config.expert_model_parallel_size > 1 - self.gradient_scale = 1 / parallel_state.get_tensor_and_expert_parallel_world_size() if self.config.gated_linear_unit: def glu(x): @@ -158,11 +125,6 @@ def glu(x): setattr(self.weight1, 'allreduce', not self.expert_parallel) setattr(self.weight2, 'allreduce', not self.expert_parallel) - def scale_grad(self, w): - if self.gradient_scale is None: - return w - return scale_gradient(w, self.gradient_scale) - def forward(self, hidden_states): # Permutation of tokens ( @@ -172,10 +134,9 @@ def forward(self, hidden_states): global_local_map, ) = self.token_permutation(hidden_states) - w1, w2 = (self.scale_grad(self.weight1), self.scale_grad(self.weight2)) # Reshape the weights for the grouped GEMMs. - w1 = w1.view(self.num_local_experts, self.config.hidden_size, -1) - w2 = w2.view(self.num_local_experts, -1, self.config.hidden_size) + w1 = self.weight1.view(self.num_local_experts, self.config.hidden_size, -1) + w2 = self.weight2.view(self.num_local_experts, -1, self.config.hidden_size) fc1_output = gg.ops.gmm(permuted_local_hidden_states, w1, tokens_per_expert, trans_b=False) diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json index 3ac2e4ec51..65722ad370 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80299, 10.8542, 10.86297, 10.79511, 10.72125, 10.63589, 10.20959, 10.31974, 10.22064, 9.91805]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16202.0, 19498.0, 19676.0, 18969.0, 17528.0, 18153.0, 15821.0, 18030.0, 18555.0, 19223.0]}, "iteration_timing_avg": 0.17766941176470588} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80299, 10.85374, 10.86293, 10.7946, 10.72149, 10.6366, 10.20914, 10.31959, 10.21976, 9.9151]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16202.0, 19844.0, 19572.0, 18806.0, 17390.0, 17902.0, 15816.0, 17990.0, 18341.0, 19322.0]}, "iteration_timing_avg": 0.1749138235294118} \ No newline at end of file From 3a46f12e15a50866f6942384ee796e5018e81342 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Wed, 10 Jan 2024 19:07:37 -0800 Subject: [PATCH 122/296] Fixed typo Signed-off-by: Selvaraj Anandaraj --- megatron/core/model_parallel_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/core/model_parallel_config.py b/megatron/core/model_parallel_config.py index 3502201287..f9590615dc 100644 --- a/megatron/core/model_parallel_config.py +++ b/megatron/core/model_parallel_config.py @@ -62,7 +62,7 @@ class ModelParallelConfig: Defaults to False. async_tensor_model_parallel_allreduce (bool, optional): If true, enables asynchronous execution of - tensor-model-parallel all-reduce with weight gradient compuation of a column-linear layer. Defaults to False. + tensor-model-parallel all-reduce with weight gradient compuation of a column-linear layer. Defaults to True. tp_comm_overlap (bool, optional): If true, allows overlapping of Linear layer execution with tensor parallel communication collectives like AllGather/ReduceScatter. Overlapping is done for the linear layers wherever From f17d5d3e1040a6fcaa5ec988b5e14e20da7565ec Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis Date: Thu, 11 Jan 2024 09:35:23 -0800 Subject: [PATCH 123/296] formatting. --- megatron/core/transformer/custom_layers/transformer_engine.py | 4 ++-- megatron/core/transformer/transformer_config.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index fbc1c245b4..240672791d 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -402,8 +402,8 @@ def __init__( if config.window_size is not None: # Check version - assert ( - te_version >= packaging.version.Version("1.2.0") + assert te_version >= packaging.version.Version( + "1.2.0" ), f"Transformer-Engine version ({str(te_version)}) must be >= 1.2.0 to support sliding window attention." extra_kwargs['window_size'] = config.window_size diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index f77d959217..78de06a2b0 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -2,7 +2,7 @@ import types from dataclasses import dataclass -from typing import Callable, Tuple, Optional +from typing import Callable, Optional, Tuple import torch import torch.nn.functional as F From 416dcc6a66e6a08cb091d3d44fc23e991085292d Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Thu, 11 Jan 2024 17:08:37 -0800 Subject: [PATCH 124/296] Removed passing context to TE Signed-off-by: Selvaraj Anandaraj --- megatron/core/transformer/custom_layers/transformer_engine.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index ab2e853e43..c72ae5060e 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -122,7 +122,6 @@ def __init__( out_features=output_size, sequence_parallel=self.config.sequence_parallel, fuse_wgrad_accumulation=self.config.gradient_accumulation_fusion, - cpu_offloading_context=self.config.cpu_offloading_context, tp_group=get_tensor_model_parallel_group(check_initialized=False), tp_size=self.config.tensor_model_parallel_size, get_rng_state_tracker=get_cuda_rng_tracker, @@ -212,7 +211,6 @@ def __init__( eps=self.config.layernorm_epsilon, sequence_parallel=self.config.sequence_parallel, fuse_wgrad_accumulation=self.config.gradient_accumulation_fusion, - cpu_offloading_context=self.config.cpu_offloading_context, tp_group=get_tensor_model_parallel_group(check_initialized=False), tp_size=self.config.tensor_model_parallel_size, get_rng_state_tracker=get_cuda_rng_tracker, From d6edb390255a8f46a5f68e5fc562eea34d07d983 Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis Date: Fri, 12 Jan 2024 13:23:12 -0800 Subject: [PATCH 125/296] SWA-test: only run when TE version is high enough; temporary fix to be removed once CI updated. --- .../transformer/test_spec_customization.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/unit_tests/transformer/test_spec_customization.py b/tests/unit_tests/transformer/test_spec_customization.py index 425588b289..2ffa835404 100755 --- a/tests/unit_tests/transformer/test_spec_customization.py +++ b/tests/unit_tests/transformer/test_spec_customization.py @@ -1,10 +1,13 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +import sys from dataclasses import dataclass, fields +from importlib.metadata import version import pytest import torch import transformer_engine as te +from pkg_resources import packaging from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed @@ -15,12 +18,12 @@ TENorm, TERowParallelLinear, ) +from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp from megatron.core.transformer.spec_utils import ModuleSpec, build_module, import_module from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.transformer.transformer_layer import TransformerLayerSubmodules -from megatron.core.transformer.dot_product_attention import DotProductAttention from tests.unit_tests.test_utilities import Utils @@ -130,6 +133,13 @@ def test_build_module(self): def test_sliding_window_attention(self): + te_version = packaging.version.Version(version("transformer-engine")) + if te_version < packaging.version.Version( + "1.2.0" + ): + print("SWA not tested because TE version is not >= 1.2.0", file=sys.stderr) + return + config = TransformerConfig( num_layers=2, hidden_size=12, @@ -185,4 +195,4 @@ def test_sliding_window_attention(self): attention_type='self' ) # Make sure it's causal. - assert attn.window_size == (-1, 0) \ No newline at end of file + assert attn.window_size == (-1, 0) From ac4c63b88c4cfd1042e50a5db28bda2a28635fda Mon Sep 17 00:00:00 2001 From: Shriya Balaji Palsamudram Date: Fri, 12 Jan 2024 13:33:07 -0800 Subject: [PATCH 126/296] Add missing __init__.py to moe folder --- megatron/core/transformer/moe/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 megatron/core/transformer/moe/__init__.py diff --git a/megatron/core/transformer/moe/__init__.py b/megatron/core/transformer/moe/__init__.py new file mode 100644 index 0000000000..e69de29bb2 From 2e96537890884f48f640b8ac42aafb95e9a141f1 Mon Sep 17 00:00:00 2001 From: John Kamalu Date: Fri, 12 Jan 2024 13:52:26 -0800 Subject: [PATCH 127/296] Preprocessing unittests fix --- tests/unit_tests/data/test_preprocess_data.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tests/unit_tests/data/test_preprocess_data.py b/tests/unit_tests/data/test_preprocess_data.py index 63dba573fc..06e2be1f4e 100644 --- a/tests/unit_tests/data/test_preprocess_data.py +++ b/tests/unit_tests/data/test_preprocess_data.py @@ -22,6 +22,12 @@ "https://huggingface.co/bert-base-uncased/raw/main/vocab.txt" ) +__LOCAL_BERT_VOCAB = "/home/gitlab-runner/data/bert_data/vocab.txt" + +__LOCAL_GPT2_MERGE = "/home/gitlab-runner/data/gpt3_data/gpt2-merges.txt" + +__LOCAL_GPT2_VOCAB = "/home/gitlab-runner/data/gpt3_data/gpt2-vocab.json" + def dummy_jsonl(odir): # numbers @@ -92,7 +98,7 @@ def tokens_to_string(toks): return getattr(encoder.tokenizer, option)(toks) except: continue - raise RuntimeError(f"{type(encoder.tokenizer)} tokenizer cannot `decode` or `detokenize`.") + raise RuntimeError(f"{type(encoder.tokenizer)} tokenizer cannot decode or detokenize") merged_index = 0 merged_dataset = MMapIndexedDataset(os.path.join(path_to_data, "merge")) @@ -161,6 +167,8 @@ def tokens_to_string(toks): def gpt2_vocab(odir): + if os.path.exists(__LOCAL_GPT2_VOCAB): + return __LOCAL_GPT2_VOCAB path = os.path.join(odir, "vocab.json") with open(path, "wb") as writer: writer.write(requests.get(PRETRAINED_VOCAB_ARCHIVE_MAP['gpt2']).content) @@ -168,6 +176,8 @@ def gpt2_vocab(odir): def gpt2_merge(odir): + if os.path.exists(__LOCAL_GPT2_MERGE): + return __LOCAL_GPT2_MERGE path = os.path.join(odir, "merge.txt") with open(path, "wb") as writer: writer.write(requests.get(PRETRAINED_MERGES_ARCHIVE_MAP['gpt2']).content) @@ -196,6 +206,8 @@ def test_preprocess_data_gpt(): def bert_vocab(odir): + if os.path.exists(__LOCAL_BERT_VOCAB): + return __LOCAL_BERT_VOCAB path = os.path.join(odir, "vocab.txt") with open(path, "wb") as writer: writer.write(requests.get(__HUGGINGFACE_BERT_BASE_UNCASED_VOCAB).content) From 1d2af028dabe6150c71216e59ca135fcbb4971e8 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Mon, 15 Jan 2024 01:07:59 -0800 Subject: [PATCH 128/296] minor fix Signed-off-by: Hongbin Liu --- megatron/core/fusions/fused_bias_swiglu.py | 14 +++++++++++--- megatron/core/transformer/mlp.py | 12 ++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/megatron/core/fusions/fused_bias_swiglu.py b/megatron/core/fusions/fused_bias_swiglu.py index d02fa04692..6710407e89 100644 --- a/megatron/core/fusions/fused_bias_swiglu.py +++ b/megatron/core/fusions/fused_bias_swiglu.py @@ -62,6 +62,14 @@ def backward(ctx, grad_output): tmp = swiglu_back(grad_output, input[0]) return tmp - -bias_swiglu_impl = BiasSwiGLUFunction.apply -swiglu_impl = SwiGLUFunction.apply +def bias_swiglu_impl(input, bias): + shape = input.shape + input = input.view(-1, shape[2]) + if bias is not None: + output = BiasSwiGLUFunction.apply(input, bias) + else: + output = SwiGLUFunction.apply(input) + return output.view(shape[0], shape[1], -1) + +#bias_swiglu_impl = BiasSwiGLUFunction.apply +#swiglu_impl = SwiGLUFunction.apply diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index a8df733b50..2a32831b77 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -10,7 +10,7 @@ from megatron.core.dist_checkpointing import ShardedTensor from megatron.core.dist_checkpointing.mapping import ShardedTensorFactory from megatron.core.fusions.fused_bias_gelu import bias_gelu_impl -from megatron.core.fusions.fused_bias_swiglu import bias_swiglu_impl, swiglu_impl +from megatron.core.fusions.fused_bias_swiglu import bias_swiglu_impl from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig @@ -90,24 +90,16 @@ def forward(self, hidden_states): assert self.config.add_bias_linear is True intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) elif self.activation_func == F.silu: - shape = intermediate_parallel.shape - intermediate_parallel = intermediate_parallel.view(-1, shape[2]) - if bias_parallel is not None: - intermediate_parallel = bias_swiglu_impl(intermediate_parallel, bias_parallel) - else: - intermediate_parallel = swiglu_impl(intermediate_parallel) - intermediate_parallel = intermediate_parallel.view(shape[0], shape[1], -1) + intermediate_parallel = bias_swiglu_impl(intermediate_parallel, bias_parallel) else: raise ValueError("Only support fusion of gelu and swiglu") else: if bias_parallel is not None: intermediate_parallel = intermediate_parallel + bias_parallel if self.config.gated_linear_unit: - def glu(x): x = torch.chunk(x, 2, dim=-1) return self.config.activation_func(x[0]) * x[1] - intermediate_parallel = glu(intermediate_parallel) else: intermediate_parallel = self.activation_func(intermediate_parallel) From 9924a3a8f0190871825840b5e415539cfbb7206b Mon Sep 17 00:00:00 2001 From: Zhengjiang Shao Date: Mon, 15 Jan 2024 06:11:00 -0800 Subject: [PATCH 129/296] Integrate one-logger api for E2E app metrics tracking --- megatron/__init__.py | 1 + megatron/arguments.py | 2 + megatron/config/default.yaml | 11 ++++ .../blended_megatron_dataset_builder.py | 1 + megatron/global_vars.py | 18 +++++++ megatron/timers.py | 9 +++- megatron/training.py | 53 +++++++++++++++++++ 7 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 megatron/config/default.yaml diff --git a/megatron/__init__.py b/megatron/__init__.py index c35de282a2..e9faa069ed 100644 --- a/megatron/__init__.py +++ b/megatron/__init__.py @@ -10,6 +10,7 @@ from .global_vars import get_tokenizer from .global_vars import get_tensorboard_writer from .global_vars import get_wandb_writer +from .global_vars import get_one_logger from .global_vars import get_adlr_autoresume from .global_vars import get_timers from .initialize import initialize_megatron diff --git a/megatron/arguments.py b/megatron/arguments.py index fff5bbeb5b..fcd745a323 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -722,6 +722,8 @@ def _add_logging_args(parser): help='The wandb experiment name.') group.add_argument('--wandb-save-dir', type=str, default='', help='Path to save the wandb results locally.') + group.add_argument('--enable-onelogger', action='store_false', + help='If set, use one_logger to track e2e metrics') return parser diff --git a/megatron/config/default.yaml b/megatron/config/default.yaml new file mode 100644 index 0000000000..73b74afd3a --- /dev/null +++ b/megatron/config/default.yaml @@ -0,0 +1,11 @@ +enable_one_logger: True + +wandb: + host: https://api.wandb.ai + api_key: ${oc.env:WANDB_API_KEY} + entity: zshao + project: MNIST + name: one-logger-megatron-test + tags: + - e2e_metrics_enabled + - e2e_metrics_testing \ No newline at end of file diff --git a/megatron/core/datasets/blended_megatron_dataset_builder.py b/megatron/core/datasets/blended_megatron_dataset_builder.py index c5c509ea7c..39f6d23630 100644 --- a/megatron/core/datasets/blended_megatron_dataset_builder.py +++ b/megatron/core/datasets/blended_megatron_dataset_builder.py @@ -38,6 +38,7 @@ def __init__( self.cls = cls self.sizes = sizes self.config = config + self.config.path_to_cache = '/lustre/fsw/portfolios/hwinf/users/zshao/onelogger-test/Megatron-LM/data_cache' def build(self) -> List[Optional[Union[BlendedDataset, MegatronDataset]]]: """Build all dataset splits according to the provided blend(s) diff --git a/megatron/global_vars.py b/megatron/global_vars.py index b1b4b043e8..664092c10b 100644 --- a/megatron/global_vars.py +++ b/megatron/global_vars.py @@ -17,6 +17,7 @@ _GLOBAL_TOKENIZER = None _GLOBAL_TENSORBOARD_WRITER = None _GLOBAL_WANDB_WRITER = None +_GLOBAL_ONE_LOGGER = None _GLOBAL_ADLR_AUTORESUME = None _GLOBAL_TIMERS = None _GLOBAL_SIGNAL_HANDLER = None @@ -63,6 +64,12 @@ def get_wandb_writer(): return _GLOBAL_WANDB_WRITER +def get_one_logger(): + """Return one logger. It can be None so no need + to check if it is initialized.""" + return _GLOBAL_ONE_LOGGER + + def get_adlr_autoresume(): """ADLR autoresume object. It can be None so no need to check if it is initialized.""" @@ -100,6 +107,7 @@ def set_global_variables(args, build_tokenizer=True): _ = _build_tokenizer(args) _set_tensorboard_writer(args) _set_wandb_writer(args) + _set_one_logger(args) _set_adlr_autoresume(args) _set_timers(args) @@ -185,6 +193,16 @@ def _set_wandb_writer(args): _GLOBAL_WANDB_WRITER = wandb +def _set_one_logger(args): + global _GLOBAL_ONE_LOGGER + _ensure_var_is_not_initialized(_GLOBAL_ONE_LOGGER, 'one logger') + + if args.enable_onelogger and args.rank == (args.world_size - 1): + from one_logger.core import OneLogger + one_logger = OneLogger() + _GLOBAL_ONE_LOGGER = one_logger + + def _set_adlr_autoresume(args): """Initialize ADLR autoresume.""" global _GLOBAL_ADLR_AUTORESUME diff --git a/megatron/timers.py b/megatron/timers.py index a9478fa014..e64d41e044 100644 --- a/megatron/timers.py +++ b/megatron/timers.py @@ -66,6 +66,7 @@ class Timer(TimerBase): def __init__(self, name): super().__init__(name) self._elapsed = 0.0 + self._active_time = 0.0 self._started = False # Note that None will default to the global process group self._barrier_group = None @@ -92,12 +93,15 @@ def stop(self, barrier=False): if barrier: torch.distributed.barrier(group=self._barrier_group) torch.cuda.synchronize() - self._elapsed += (time.time() - self._start_time) + elapsed = time.time() - self._start_time + self._elapsed += elapsed + self._active_time += elapsed self._started = False def reset(self): """Reset timer.""" + # Don't reset _active_time self._elapsed = 0.0 self._started = False @@ -118,6 +122,9 @@ def elapsed(self, reset=True, barrier=False): self.start(barrier=barrier) return _elapsed + def active_time(self): + return self._active_time + class Timers: diff --git a/megatron/training.py b/megatron/training.py index d18d3c3b91..6487326e83 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -21,6 +21,7 @@ from megatron import get_timers from megatron import get_tensorboard_writer from megatron import get_wandb_writer +from megatron import get_one_logger from megatron import get_current_global_batch_size from megatron import get_num_microbatches from megatron import is_last_rank @@ -135,10 +136,17 @@ def pretrain(train_valid_test_dataset_provider, args = get_args() timers = get_timers() + one_logger = get_one_logger() + if one_logger: + one_logger.log_metrics({ + 'train_iterations_warmup': args.lr_warmup_iters, + }) + # Model, optimizer, and learning rate. timers('model-and-optimizer-setup', log_level=0).start(barrier=True) model, optimizer, opt_param_scheduler = setup_model_and_optimizer( model_provider, model_type) + timers('model-and-optimizer-setup').stop() print_datetime('after model, optimizer, and learning rate ' 'scheduler are built') @@ -208,6 +216,7 @@ def pretrain(train_valid_test_dataset_provider, verbose=True, write_to_tensorboard=not args.skip_train) + def update_train_iters(args): # For iteration-based training, we don't need to do anything @@ -650,6 +659,7 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, if iteration % args.log_interval == 0: elapsed_time = timers('interval-time').elapsed(barrier=True) elapsed_time_per_iteration = elapsed_time / total_iterations + throughput = num_floating_point_operations(args, batch_size) / ( elapsed_time_per_iteration * 10**12 * args.world_size) if args.log_timers_to_tensorboard: @@ -738,6 +748,17 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, # Iterations. iteration = args.iteration + one_logger = get_one_logger() + if one_logger: + iteration_start = iteration + train_samples_start = args.consumed_train_samples + train_samples_target = args.train_samples + one_logger.log_metrics({ + 'train_iterations_start': iteration, + 'train_samples_start': args.consumed_train_samples, + 'train_samples_target': train_samples_target, + 'train_iterations_target': args.train_iters, + }) # Setup some training config params config.grad_scale_func = optimizer.scale_loss @@ -773,6 +794,29 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, gc.disable() gc.collect() + eval_duration = 0.0 + eval_iterations = 0 + def track_e2e_metrics(): + # Nested function to track a bunch of E2E APP metrics + if one_logger: + train_duration = timers('interval-time').active_time() # overall_elapsed + train_samples = args.consumed_train_samples - train_samples_start + train_iterations = iteration - iteration_start + train_iterations_time_msecs_avg = train_duration*1000.0 / train_iterations + if eval_iterations: + validation_iterations_time_msecs_avg = eval_duration*1000.0 / eval_iterations + else: + validation_iterations_time_msecs_avg = None + + one_logger.log_metrics({ + 'train_iterations_end': iteration, + 'train_samples_end': args.consumed_train_samples, + 'train_iterations': train_iterations, + 'train_samples': train_samples, + 'train_iterations_time_msecs_avg': train_iterations_time_msecs_avg, + 'validation_iterations_time_msecs_avg': validation_iterations_time_msecs_avg + }) + while iteration < args.train_iters: if args.profile and \ iteration == args.profile_step_start and \ @@ -805,6 +849,9 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, report_memory_flag, skipped_iter, grad_norm, params_norm, num_zeros_in_grad) + if iteration % args.log_interval == 0: + track_e2e_metrics() + # Autoresume if args.adlr_autoresume and \ (iteration % args.adlr_autoresume_interval == 0): @@ -819,10 +866,14 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, # Collect all objects. gc.collect() prefix = 'iteration {}'.format(iteration) + timers('eval-time', log_level=0).start(barrier=True) evaluate_and_print_results(prefix, forward_step_func, valid_data_iterator, model, iteration, process_non_loss_data_func, config, False) + eval_duration += timers('eval-time').elapsed() + eval_iterations += args.eval_iters + timers('eval-time').stop() if args.manual_gc and args.manual_gc_eval: # Collect only the objects created and used in evaluation. gc.collect(generation=0) @@ -883,6 +934,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if args.manual_gc_interval != 0 and iteration % args.manual_gc_interval == 0: gc.collect() + track_e2e_metrics() + # Flush TensorBoard and WandB writers. writer = get_tensorboard_writer() if writer: From a95227a5eb264369dfcf1d7deed6805047cc8f57 Mon Sep 17 00:00:00 2001 From: zshao Date: Mon, 15 Jan 2024 22:27:15 +0800 Subject: [PATCH 130/296] Set --enable-onelogger action to 'store_true' --- megatron/arguments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index fcd745a323..c3a6522eb8 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -722,7 +722,7 @@ def _add_logging_args(parser): help='The wandb experiment name.') group.add_argument('--wandb-save-dir', type=str, default='', help='Path to save the wandb results locally.') - group.add_argument('--enable-onelogger', action='store_false', + group.add_argument('--enable-onelogger', action='store_true', help='If set, use one_logger to track e2e metrics') return parser From c7d0fb169f1d0f5fb379317a7d8b6236ea772be1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 12 Jan 2024 16:30:04 +0100 Subject: [PATCH 131/296] Include module parameters in default sharded_state_dict --- megatron/core/transformer/module.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/megatron/core/transformer/module.py b/megatron/core/transformer/module.py index eedfa9ce26..159ce2fda1 100644 --- a/megatron/core/transformer/module.py +++ b/megatron/core/transformer/module.py @@ -70,6 +70,12 @@ def sharded_state_dict( dict: dictionary of state dict keys mapped to ShardedTensors """ sharded_state_dict = {} + # Save parameters + self._save_to_state_dict(sharded_state_dict, '', keep_vars=True) + sharded_state_dict = make_sharded_tensors_for_checkpoint( + sharded_state_dict, prefix, sharded_offsets=sharded_offsets + ) + # Recurse into submodules for name, module in self.named_children(): sharded_state_dict.update( sharded_state_dict_default(module, f'{prefix}{name}.', sharded_offsets) From 7bcb2e195ccb37b3bffef5cebe61b2d58338a18e Mon Sep 17 00:00:00 2001 From: Zhengjiang Shao Date: Mon, 15 Jan 2024 06:11:00 -0800 Subject: [PATCH 132/296] Integrate one-logger api for E2E app metrics tracking --- megatron/__init__.py | 1 + megatron/arguments.py | 2 + megatron/config/default.yaml | 11 ++++ .../blended_megatron_dataset_builder.py | 1 + megatron/global_vars.py | 18 +++++++ megatron/timers.py | 9 +++- megatron/training.py | 53 +++++++++++++++++++ 7 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 megatron/config/default.yaml diff --git a/megatron/__init__.py b/megatron/__init__.py index c35de282a2..e9faa069ed 100644 --- a/megatron/__init__.py +++ b/megatron/__init__.py @@ -10,6 +10,7 @@ from .global_vars import get_tokenizer from .global_vars import get_tensorboard_writer from .global_vars import get_wandb_writer +from .global_vars import get_one_logger from .global_vars import get_adlr_autoresume from .global_vars import get_timers from .initialize import initialize_megatron diff --git a/megatron/arguments.py b/megatron/arguments.py index 90d8651f17..5730f47e1d 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -735,6 +735,8 @@ def _add_logging_args(parser): help='The wandb experiment name.') group.add_argument('--wandb-save-dir', type=str, default='', help='Path to save the wandb results locally.') + group.add_argument('--enable-onelogger', action='store_false', + help='If set, use one_logger to track e2e metrics') return parser diff --git a/megatron/config/default.yaml b/megatron/config/default.yaml new file mode 100644 index 0000000000..73b74afd3a --- /dev/null +++ b/megatron/config/default.yaml @@ -0,0 +1,11 @@ +enable_one_logger: True + +wandb: + host: https://api.wandb.ai + api_key: ${oc.env:WANDB_API_KEY} + entity: zshao + project: MNIST + name: one-logger-megatron-test + tags: + - e2e_metrics_enabled + - e2e_metrics_testing \ No newline at end of file diff --git a/megatron/core/datasets/blended_megatron_dataset_builder.py b/megatron/core/datasets/blended_megatron_dataset_builder.py index c5c509ea7c..39f6d23630 100644 --- a/megatron/core/datasets/blended_megatron_dataset_builder.py +++ b/megatron/core/datasets/blended_megatron_dataset_builder.py @@ -38,6 +38,7 @@ def __init__( self.cls = cls self.sizes = sizes self.config = config + self.config.path_to_cache = '/lustre/fsw/portfolios/hwinf/users/zshao/onelogger-test/Megatron-LM/data_cache' def build(self) -> List[Optional[Union[BlendedDataset, MegatronDataset]]]: """Build all dataset splits according to the provided blend(s) diff --git a/megatron/global_vars.py b/megatron/global_vars.py index b1b4b043e8..664092c10b 100644 --- a/megatron/global_vars.py +++ b/megatron/global_vars.py @@ -17,6 +17,7 @@ _GLOBAL_TOKENIZER = None _GLOBAL_TENSORBOARD_WRITER = None _GLOBAL_WANDB_WRITER = None +_GLOBAL_ONE_LOGGER = None _GLOBAL_ADLR_AUTORESUME = None _GLOBAL_TIMERS = None _GLOBAL_SIGNAL_HANDLER = None @@ -63,6 +64,12 @@ def get_wandb_writer(): return _GLOBAL_WANDB_WRITER +def get_one_logger(): + """Return one logger. It can be None so no need + to check if it is initialized.""" + return _GLOBAL_ONE_LOGGER + + def get_adlr_autoresume(): """ADLR autoresume object. It can be None so no need to check if it is initialized.""" @@ -100,6 +107,7 @@ def set_global_variables(args, build_tokenizer=True): _ = _build_tokenizer(args) _set_tensorboard_writer(args) _set_wandb_writer(args) + _set_one_logger(args) _set_adlr_autoresume(args) _set_timers(args) @@ -185,6 +193,16 @@ def _set_wandb_writer(args): _GLOBAL_WANDB_WRITER = wandb +def _set_one_logger(args): + global _GLOBAL_ONE_LOGGER + _ensure_var_is_not_initialized(_GLOBAL_ONE_LOGGER, 'one logger') + + if args.enable_onelogger and args.rank == (args.world_size - 1): + from one_logger.core import OneLogger + one_logger = OneLogger() + _GLOBAL_ONE_LOGGER = one_logger + + def _set_adlr_autoresume(args): """Initialize ADLR autoresume.""" global _GLOBAL_ADLR_AUTORESUME diff --git a/megatron/timers.py b/megatron/timers.py index a9478fa014..e64d41e044 100644 --- a/megatron/timers.py +++ b/megatron/timers.py @@ -66,6 +66,7 @@ class Timer(TimerBase): def __init__(self, name): super().__init__(name) self._elapsed = 0.0 + self._active_time = 0.0 self._started = False # Note that None will default to the global process group self._barrier_group = None @@ -92,12 +93,15 @@ def stop(self, barrier=False): if barrier: torch.distributed.barrier(group=self._barrier_group) torch.cuda.synchronize() - self._elapsed += (time.time() - self._start_time) + elapsed = time.time() - self._start_time + self._elapsed += elapsed + self._active_time += elapsed self._started = False def reset(self): """Reset timer.""" + # Don't reset _active_time self._elapsed = 0.0 self._started = False @@ -118,6 +122,9 @@ def elapsed(self, reset=True, barrier=False): self.start(barrier=barrier) return _elapsed + def active_time(self): + return self._active_time + class Timers: diff --git a/megatron/training.py b/megatron/training.py index 29ab904c90..d5d6fa8edd 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -21,6 +21,7 @@ from megatron import get_timers from megatron import get_tensorboard_writer from megatron import get_wandb_writer +from megatron import get_one_logger from megatron import get_current_global_batch_size from megatron import get_num_microbatches from megatron import is_last_rank @@ -135,10 +136,17 @@ def pretrain(train_valid_test_dataset_provider, args = get_args() timers = get_timers() + one_logger = get_one_logger() + if one_logger: + one_logger.log_metrics({ + 'train_iterations_warmup': args.lr_warmup_iters, + }) + # Model, optimizer, and learning rate. timers('model-and-optimizer-setup', log_level=0).start(barrier=True) model, optimizer, opt_param_scheduler = setup_model_and_optimizer( model_provider, model_type) + timers('model-and-optimizer-setup').stop() print_datetime('after model, optimizer, and learning rate ' 'scheduler are built') @@ -208,6 +216,7 @@ def pretrain(train_valid_test_dataset_provider, verbose=True, write_to_tensorboard=not args.skip_train) + def update_train_iters(args): # For iteration-based training, we don't need to do anything @@ -650,6 +659,7 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, if iteration % args.log_interval == 0: elapsed_time = timers('interval-time').elapsed(barrier=True) elapsed_time_per_iteration = elapsed_time / total_iterations + throughput = num_floating_point_operations(args, batch_size) / ( elapsed_time_per_iteration * 10**12 * args.world_size) if args.log_timers_to_tensorboard: @@ -738,6 +748,17 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, # Iterations. iteration = args.iteration + one_logger = get_one_logger() + if one_logger: + iteration_start = iteration + train_samples_start = args.consumed_train_samples + train_samples_target = args.train_samples + one_logger.log_metrics({ + 'train_iterations_start': iteration, + 'train_samples_start': args.consumed_train_samples, + 'train_samples_target': train_samples_target, + 'train_iterations_target': args.train_iters, + }) # Setup some training config params config.grad_scale_func = optimizer.scale_loss @@ -774,6 +795,29 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, gc.collect() num_microbatches = get_num_microbatches() + eval_duration = 0.0 + eval_iterations = 0 + def track_e2e_metrics(): + # Nested function to track a bunch of E2E APP metrics + if one_logger: + train_duration = timers('interval-time').active_time() # overall_elapsed + train_samples = args.consumed_train_samples - train_samples_start + train_iterations = iteration - iteration_start + train_iterations_time_msecs_avg = train_duration*1000.0 / train_iterations + if eval_iterations: + validation_iterations_time_msecs_avg = eval_duration*1000.0 / eval_iterations + else: + validation_iterations_time_msecs_avg = None + + one_logger.log_metrics({ + 'train_iterations_end': iteration, + 'train_samples_end': args.consumed_train_samples, + 'train_iterations': train_iterations, + 'train_samples': train_samples, + 'train_iterations_time_msecs_avg': train_iterations_time_msecs_avg, + 'validation_iterations_time_msecs_avg': validation_iterations_time_msecs_avg + }) + while iteration < args.train_iters: if args.profile and \ iteration == args.profile_step_start and \ @@ -818,6 +862,9 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, report_memory_flag, skipped_iter, grad_norm, params_norm, num_zeros_in_grad) + if iteration % args.log_interval == 0: + track_e2e_metrics() + # Autoresume if args.adlr_autoresume and \ (iteration % args.adlr_autoresume_interval == 0): @@ -832,10 +879,14 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, # Collect all objects. gc.collect() prefix = 'iteration {}'.format(iteration) + timers('eval-time', log_level=0).start(barrier=True) evaluate_and_print_results(prefix, forward_step_func, valid_data_iterator, model, iteration, process_non_loss_data_func, config, False) + eval_duration += timers('eval-time').elapsed() + eval_iterations += args.eval_iters + timers('eval-time').stop() if args.manual_gc and args.manual_gc_eval: # Collect only the objects created and used in evaluation. gc.collect(generation=0) @@ -896,6 +947,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if args.manual_gc_interval != 0 and iteration % args.manual_gc_interval == 0: gc.collect() + track_e2e_metrics() + # Flush TensorBoard and WandB writers. writer = get_tensorboard_writer() if writer: From 97d9a508d2b8c529f8fad7cd00bd93e1e297d440 Mon Sep 17 00:00:00 2001 From: zshao Date: Mon, 15 Jan 2024 22:27:15 +0800 Subject: [PATCH 133/296] Set --enable-onelogger action to 'store_true' --- megatron/arguments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 5730f47e1d..26fed39c49 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -735,7 +735,7 @@ def _add_logging_args(parser): help='The wandb experiment name.') group.add_argument('--wandb-save-dir', type=str, default='', help='Path to save the wandb results locally.') - group.add_argument('--enable-onelogger', action='store_false', + group.add_argument('--enable-onelogger', action='store_true', help='If set, use one_logger to track e2e metrics') return parser From 46ca3db13fc21348a055456fd300cda015ce2c1e Mon Sep 17 00:00:00 2001 From: Jianbin Chang Date: Wed, 17 Jan 2024 09:16:52 -0800 Subject: [PATCH 134/296] Refactor DistributedOptimizer for MoE model support --- megatron/arguments.py | 2 - megatron/optimizer/__init__.py | 180 +++++++--- megatron/optimizer/distrib_optimizer.py | 308 ++++++++++-------- megatron/optimizer/optimizer.py | 129 ++++++-- ...eps_core_enabled_te_8experts2parallel.json | 2 +- ...bled_te_8experts2parallel_groupedGEMM.json | 2 +- ...odes_50steps_core_enabled_te_2experts.json | 2 +- ...eps_core_enabled_te_4experts2parallel.json | 2 +- 8 files changed, 416 insertions(+), 211 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 90d8651f17..8ff864cf05 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -402,8 +402,6 @@ def validate_args(args, defaults={}): assert args.num_experts is not None, "num_experts must be non None to use expert model parallelism" assert args.num_experts % args.expert_model_parallel_size == 0, \ "Number of experts should be a multiple of expert model parallel_size." - assert not args.use_distributed_optimizer, \ - "Expert parallelism is not suppored with distributed optimizer." assert not args.fp16, \ "Expert parallelism is not supported with fp16 training." if args.tensor_model_parallel_size > 1: diff --git a/megatron/optimizer/__init__.py b/megatron/optimizer/__init__.py index 33744a2f3a..f7cbca0466 100644 --- a/megatron/optimizer/__init__.py +++ b/megatron/optimizer/__init__.py @@ -7,26 +7,53 @@ from .distrib_optimizer import DistributedOptimizer from .grad_scaler import ConstantGradScaler, DynamicGradScaler -from .optimizer import Float16OptimizerWithFloat16Params, FP32Optimizer +from .optimizer import ( + Float16OptimizerWithFloat16Params, + FP32Optimizer, + ChainedOptimizer, +) -def get_param_groups(modules, + +def get_param_groups(model_chunks, no_weight_decay_cond, scale_lr_cond, lr_mult): - """creates param groups based on weight decay condition (regularized vs non regularized) - and learning rate scale condition (args.lr vs lr_mult * args.lr) - scale_lr_cond is used during finetuning where head of the network requires a scaled - version of the base learning rate. + """Create parameter groups for optimizer. + + Creates parameter groups based on weight decay condition (regularized vs + non regularized), learning rate scale condition (args.lr vs lr_mult * args.lr), + and whether it is expert parameters. scale_lr_cond is used during finetuning + where head of the network requires a scaled version of the base learning rate. + + Args: + model_chunks (List[MegatronModule]): model chunks to create parameter + groups for. + no_weight_decay_cond (func): function to determine whether a parameter + should not perform weight decay. + scale_lr_cond (func): function to determine whether a parameter + should have a scaled learning rate. + lr_mult (float): learning rate multiplier for parameters that + satisfy scale_lr_cond. """ - wd_no_scale_lr = [] - wd_scale_lr = [] - no_wd_no_scale_lr = [] - no_wd_scale_lr = [] - for module in modules: - for name, param in module.named_parameters(): + # map (wd_mult, lr_mult, is_expert_parallel) to params + params_map = { + (1.0, 1.0, False): [], + (1.0, 1.0, True): [], + (1.0, lr_mult, False): [], + (1.0, lr_mult, True): [], + (0.0, 1.0, False): [], + (0.0, 1.0, True): [], + (0.0, lr_mult, False): [], + (0.0, lr_mult, True): [], + } + + for model_chunk in model_chunks: + for name, param in model_chunk.named_parameters(): if not param.requires_grad: continue + is_expert_parallel = not getattr(param, 'allreduce', True) + if no_weight_decay_cond is not None: no_wd = no_weight_decay_cond(name, param) else: @@ -39,37 +66,38 @@ def get_param_groups(modules, scale_lr = False if not no_wd and not scale_lr: - wd_no_scale_lr.append(param) + wd_mult, lr_mult = 1.0, 1.0 elif not no_wd and scale_lr: - wd_scale_lr.append(param) + wd_mult, lr_mult = 1.0, lr_mult elif no_wd and not scale_lr: - no_wd_no_scale_lr.append(param) + wd_mult, lr_mult = 0.0, 1.0 else: - no_wd_scale_lr.append(param) + wd_mult, lr_mult = 0.0, lr_mult + + params_map[(wd_mult, lr_mult, is_expert_parallel)].append(param) param_groups = [] - if len(wd_no_scale_lr): - param_groups.append({'params': wd_no_scale_lr, 'wd_mult': 1.0, 'lr_mult': 1.0}) - if len(wd_scale_lr): - param_groups.append({'params': wd_scale_lr, 'wd_mult': 1.0, 'lr_mult': lr_mult}) - if len(no_wd_no_scale_lr): - param_groups.append({'params': no_wd_no_scale_lr, 'wd_mult': 0.0, 'lr_mult': 1.0}) - if len(no_wd_scale_lr): - param_groups.append({'params': no_wd_scale_lr, 'wd_mult': 0.0, 'lr_mult': lr_mult}) + for (wd_mult, lr_mult, is_expert_parallel), params in params_map.items(): + if len(params) == 0: + continue + param_groups.append( + {'params': params, 'wd_mult': wd_mult, 'lr_mult': lr_mult, 'is_expert_parallel': is_expert_parallel} + ) return param_groups -def get_megatron_optimizer(model, - no_weight_decay_cond=None, - scale_lr_cond=None, - lr_mult=1.0): - args = get_args() - # Base optimizer. - param_groups = get_param_groups(model, - no_weight_decay_cond, - scale_lr_cond, - lr_mult) +def get_megatron_optimizer_based_on_param_groups(param_groups, grad_buffers=None): + """Get megatron optimizer based on parameter groups. + + For distributed optimizer, we need the parameter gradients to be stored in a + contiguous grad_buffer. + + Args: + param_groups (list): list of parameter groups. + grad_buffers (list, optional): list of gradient buffers. Defaults to None. + """ + args = get_args() if args.optimizer == 'adam': optimizer = Adam(param_groups, @@ -89,11 +117,18 @@ def get_megatron_optimizer(model, # Determine whether the params have main-grad field. params_have_main_grad = True + # If it is expert parameters, we do not use the distributed optimizer. + # TODO: enable support for distributed optimizer with expert parameters + # (need to support DistOpt across process group with size dp_size / ep_size). + use_distributed_optimizer = args.use_distributed_optimizer and not any( + [pg['is_expert_parallel'] for pg in param_groups] + ) + # Mixed precision optimizer. # - Note: both the Float16Optimizer and the DistributedOptimizer inherit # from the MixedPrecisionOptimizer, which manages any optimizer where # the model params and main params are distinct. - if args.fp16 or args.bf16 or args.use_distributed_optimizer: + if args.fp16 or args.bf16 or use_distributed_optimizer: # Grad scaler: # if loss-scale is provided, instantiate the constant scaler. @@ -118,24 +153,67 @@ def get_megatron_optimizer(model, growth_interval=args.loss_scale_window, hysteresis=args.hysteresis) - # Megatron optimizer. - opt_ty = DistributedOptimizer \ - if args.use_distributed_optimizer else \ - Float16OptimizerWithFloat16Params - return opt_ty(optimizer, - args.clip_grad, - args.log_num_zeros_in_grad, - args.check_for_nan_in_loss_and_grad, - params_have_main_grad, - args.fp16, - args.bf16, - args.params_dtype, - grad_scaler, - model) + optimizer_args = [ + optimizer, + args.clip_grad, + args.log_num_zeros_in_grad, + args.check_for_nan_in_loss_and_grad, + params_have_main_grad, + args.fp16, + args.bf16, + args.params_dtype, + grad_scaler, + ] + if use_distributed_optimizer: + optimizer = DistributedOptimizer(*optimizer_args, grad_buffers) + else: + optimizer = Float16OptimizerWithFloat16Params(*optimizer_args) + + return optimizer # FP32. return FP32Optimizer(optimizer, args.clip_grad, args.log_num_zeros_in_grad, args.check_for_nan_in_loss_and_grad, - params_have_main_grad, - model) + params_have_main_grad) + + +def get_megatron_optimizer(model_chunks, + no_weight_decay_cond=None, + scale_lr_cond=None, + lr_mult=1.0): + """Retrieve the Megatron optimizer for model chunks. + + We use separate optimizers for expert parameters and non-expert parameters. + + Args: + model_chunks (List[MegatronModule]): model chunks to get optimizer for. + no_weight_decay_cond (func, optional): function to determine whether a parameter + should not perform weight decay. Defaults to None. + scale_lr_cond (func, optional): function to determine whether a parameter + should have a scaled learning rate. Defaults to None. + lr_mult (float, optional): learning rate multiplier for parameters that + satisfy scale_lr_cond. Defaults to 1.0. + """ + # Collect param groups. + param_groups = get_param_groups(model_chunks, no_weight_decay_cond, scale_lr_cond, lr_mult) + + # Collect grad buffers for distributed optimizer. + per_model_grad_buffers = {} + for model_idx, model_chunk in enumerate(model_chunks): + if hasattr(model_chunk, 'grad_buffers'): + per_model_grad_buffers[model_idx] = list(model_chunk.grad_buffers.values()) + + # Split param groups into dense and moe. + dense_param_groups = list(filter(lambda g: not g['is_expert_parallel'], param_groups)) + moe_param_groups = list(filter(lambda g: g['is_expert_parallel'], param_groups)) + + # Create optimizers. + optimizers = [get_megatron_optimizer_based_on_param_groups(dense_param_groups, per_model_grad_buffers)] + if len(moe_param_groups): + optimizers.append(get_megatron_optimizer_based_on_param_groups(moe_param_groups)) + + if len(optimizers) == 1: + return optimizers[0] + + return ChainedOptimizer(optimizers) diff --git a/megatron/optimizer/distrib_optimizer.py b/megatron/optimizer/distrib_optimizer.py index dce3b81677..0c763237ae 100644 --- a/megatron/optimizer/distrib_optimizer.py +++ b/megatron/optimizer/distrib_optimizer.py @@ -6,6 +6,7 @@ from apex.optimizers import FusedAdam as Adam import math import torch +import itertools from megatron import get_args from megatron import get_timers @@ -59,12 +60,16 @@ class DistributedOptimizer(MixedPrecisionOptimizer): use any loss scale. Note that for `bf16 = True`, we can have a constnat gradient scaler. Also for `bf16 = False`, we always require a grad scaler. - models: list of models (i.e., the virtual pipelining models). This - is used by the distributed optimizer for mapping parameters. + grad_buffers: the implementation of the distributed optimizer is + centered on using the contiguous grad buffer for communicating + grads & params between the model state and the optimizer state. + You can find a more detailed description in this document + https://github.com/NVIDIA/Megatron-LM/blob/main/docs/source/distrib_optimizer.md + . """ @classmethod - def build_model_gbuf_param_range_map(cls, model, dtype, gbuf_world_range, bucket_offset): + def build_model_gbuf_param_range_map(cls, grad_buffer, gbuf_world_range, bucket_offset): """ Build mapping from param reference to grad buffer shard ranges. @@ -92,7 +97,7 @@ def build_model_gbuf_param_range_map(cls, model, dtype, gbuf_world_range, bucket """ # Param range map. - param_world_index_map = model.grad_buffer_param_index_map[dtype] + param_world_index_map = grad_buffer.param_index_map param_range_map = {} for param, param_world_indexes in param_world_index_map.items(): @@ -125,7 +130,7 @@ def build_model_gbuf_param_range_map(cls, model, dtype, gbuf_world_range, bucket @classmethod - def build_model_gbuf_range(cls, model, dtype, bucket_index): + def build_model_gbuf_range(cls, grad_buffer, bucket_index): """ Build mapping between params and their grad buffers. @@ -139,7 +144,7 @@ def build_model_gbuf_range(cls, model, dtype, bucket_index): data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) data_parallel_world_size = mpu.get_data_parallel_world_size(with_context_parallel=True) - bucket = model.grad_buffers[dtype].buckets[bucket_index] + bucket = grad_buffer.buckets[bucket_index] bucket_buffer = bucket.data gbuf_size = bucket_buffer.numel() assert gbuf_size % data_parallel_world_size == 0, \ @@ -161,8 +166,7 @@ def build_model_gbuf_range(cls, model, dtype, bucket_index): gbuf_world_range = gbuf_world_all_ranges[data_parallel_rank] # Get each param's ranges. - param_range_map = cls.build_model_gbuf_param_range_map(model, - dtype, + param_range_map = cls.build_model_gbuf_param_range_map(grad_buffer, gbuf_world_range, bucket.offset) @@ -175,40 +179,45 @@ def build_model_gbuf_range(cls, model, dtype, bucket_index): @classmethod - def build_model_gbuf_range_map(cls, model): + def build_gbuf_range_map(cls, grad_buffer): """ - Create param-to-grad-buffer mappings, for grad buffer data types - within a specific virtual model. + Build mapping between params and their grad buffers. These mappings are + partitioned according to data type. + + Iterate through all buckets of grad buffer to construct param ranges + that this rank "owns" (the dp_rank'th shard of each bucket, where each + shard is 1/dp_world_size of the bucket). + + Args: + grad_buffer (GradBuffer): grad buffer to build mapping for. """ - # Iterate through all buckets to construct param ranges that this rank "owns" - # (the dp_rank'th shard of each bucket, where each shard is 1/dp_world_size - # of the bucket). return { - dtype : [cls.build_model_gbuf_range(model, dtype, bucket_index) - for bucket_index in range(len(model.grad_buffers[dtype].buckets))] - for dtype in model.grad_buffers + grad_buffer.dtype: [ + cls.build_model_gbuf_range(grad_buffer, bucket_index) + for bucket_index in range(len(grad_buffer.buckets)) + ] } @classmethod - def build_model_param_gbuf_map(cls, model_gbuf_ranges): + def build_model_param_gbuf_map(cls, gbuf_ranges): """ - Create a reverse of the model_gbuf_ranges, for referencing in + Create a reverse of the gbuf_ranges, for referencing in opposite direction. """ param_gbuf_map = {} - for model_index, model_gbuf_range_map in enumerate(model_gbuf_ranges): - for dtype, gbuf_range_map_for_all_buckets in model_gbuf_range_map.items(): + for gbuf_index, gbuf_range_map in enumerate(gbuf_ranges): + for dtype, gbuf_range_map_for_all_buckets in gbuf_range_map.items(): for bucket_index, gbuf_range_map in enumerate(gbuf_range_map_for_all_buckets): for param, _ in gbuf_range_map["param_map"].items(): assert param not in param_gbuf_map, \ "Param should not be in param_gbuf_map; each param only belongs to a single bucket" - param_gbuf_map[param] = (model_index, dtype, bucket_index) + param_gbuf_map[param] = (gbuf_index, dtype, bucket_index) return param_gbuf_map @classmethod - def build_optimizer_group_ranges(cls, param_groups, model_gbuf_ranges): + def build_optimizer_group_ranges(cls, param_groups, gbuf_ranges): """ Create optimizer groups. @@ -240,8 +249,8 @@ def build_optimizer_group_ranges(cls, param_groups, model_gbuf_ranges): # saving and loading checkpoints. local_param_group_map = {} group_ranges = [ {"params": []} for _ in param_groups ] - for model_gbuf_range_map in model_gbuf_ranges: - for dtype, gbuf_range_map_for_all_buckets in model_gbuf_range_map.items(): + for gbuf_range_map in gbuf_ranges: + for dtype, gbuf_range_map_for_all_buckets in gbuf_range_map.items(): for gbuf_range_map in gbuf_range_map_for_all_buckets: for param in gbuf_range_map["param_map"]: group_index = world_param_group_map[param] @@ -260,7 +269,7 @@ def build_optimizer_group_ranges(cls, param_groups, model_gbuf_ranges): @classmethod def build_model_and_main_param_groups(cls, - model_gbuf_ranges, + gbuf_ranges, param_gbuf_map, opt_group_ranges): """ @@ -306,8 +315,8 @@ def build_model_and_main_param_groups(cls, assert model_param.requires_grad - model_index, dtype, bucket_index = param_gbuf_map[model_param] - gbuf_range = model_gbuf_ranges[model_index][dtype][bucket_index] + gbuf_index, dtype, bucket_index = param_gbuf_map[model_param] + gbuf_range = gbuf_ranges[gbuf_index][dtype][bucket_index] param_range = gbuf_range["param_map"][model_param]["param"] # fp16, bf16 params. @@ -366,7 +375,7 @@ def build_model_and_main_param_groups(cls, def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, check_for_nan_in_grad, params_have_main_grad, fp16, - bf16, params_dtype, grad_scaler, models): + bf16, params_dtype, grad_scaler, per_model_grad_buffers): """ See top of class definition for argument descriptions. @@ -380,30 +389,37 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, super().__init__( optimizer, clip_grad, log_num_zeros_in_grad, check_for_nan_in_grad, params_have_main_grad, - fp16, bf16, params_dtype, grad_scaler, models) + fp16, bf16, params_dtype, grad_scaler) assert isinstance(optimizer, Adam), \ "Only Adam currently supported, due to checkpointing requirements." # Model grad buffer ranges. - self.model_gbuf_ranges = [] + assert per_model_grad_buffers, "grad_buffers must be provided" + self.grad_buffers = list(itertools.chain(*per_model_grad_buffers.values())) + self.per_model_grad_buffers = per_model_grad_buffers + self.gbuf_idx_to_model_idx_map = {} + gbuf_idx = 0 + for model_idx, grad_buffers in self.per_model_grad_buffers.items(): + for _ in grad_buffers: + self.gbuf_idx_to_model_idx_map[gbuf_idx] = model_idx + gbuf_idx += 1 + self.gbuf_ranges = [] self.per_bucket_numel = [] self.per_bucket_numel_unpadded = [] - for _, model_chunk in enumerate(self.models): + for grad_buffer in self.grad_buffers: self.per_bucket_numel.append( - {dtype: [bucket.data.numel() for bucket in model_chunk.grad_buffers[dtype].buckets] - for dtype in model_chunk.grad_buffers}) + {grad_buffer.dtype: [bucket.data.numel() for bucket in grad_buffer.buckets]}) self.per_bucket_numel_unpadded.append( - {dtype: [bucket.numel_unpadded for bucket in model_chunk.grad_buffers[dtype].buckets] - for dtype in model_chunk.grad_buffers}) - self.model_gbuf_ranges.append(self.build_model_gbuf_range_map(model_chunk)) + {grad_buffer.dtype: [bucket.numel_unpadded for bucket in grad_buffer.buckets]}) + self.gbuf_ranges.append(self.build_gbuf_range_map(grad_buffer)) self.model_param_gbuf_map = \ - self.build_model_param_gbuf_map(self.model_gbuf_ranges) + self.build_model_param_gbuf_map(self.gbuf_ranges) # Optimizer ranges. self.model_param_group_index_map, self.opt_group_ranges = \ self.build_optimizer_group_ranges(self.optimizer.param_groups, - self.model_gbuf_ranges) + self.gbuf_ranges) # Allocate main param shards. ( @@ -412,7 +428,7 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, self.shard_float16_groups, self.shard_fp32_groups, self.shard_fp32_from_float16_groups, - ) = self.build_model_and_main_param_groups(self.model_gbuf_ranges, + ) = self.build_model_and_main_param_groups(self.gbuf_ranges, self.model_param_gbuf_map, self.opt_group_ranges) @@ -421,64 +437,66 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, # storage & have their own dtype. This is safe because the param # dtype size is always <= grad dtype size. self.param_buffers = [] - for model_index, model in enumerate(self.models): - current_param_buffers = {} - for dtype, grad_buffer in model.grad_buffers.items(): - size_ratio = torch.finfo(dtype).bits // torch.finfo(params_dtype).bits - current_param_buffers[dtype] = [] - for bucket in grad_buffer.buckets: - - # Handle older/newer method for getting untyped storage. + for gbuf_index, grad_buffer in enumerate(self.grad_buffers): + size_ratio = torch.finfo(grad_buffer.dtype).bits // torch.finfo(params_dtype).bits + current_param_buffers = [] + for bucket in grad_buffer.buckets: + + # Handle older/newer method for getting untyped storage. + try: + storage = bucket.data.untyped_storage() + except: try: - storage = bucket.data.untyped_storage() + storage = bucket.data.storage()._untyped() except: - try: - storage = bucket.data.storage()._untyped() - except: - storage = bucket.data.storage().untyped() - - # Typed param buffer. - param_buffer = torch.tensor( - storage, - dtype = params_dtype, - device = bucket.data.device) - - # .storage() ignores views / slices, so param_buffer now points to the start - # of the grad_buffer instead of to the start of each bucket. As a result, - # add bucket.offset to make sure param_buffers point to the right region of - # memory. - # Since we want the start of each bucket's param_buffer to coincide with the - # start of the same bucket's grad_buffer (this ensures that zeroing the grad - # buffer does not zero out params in the param_buffer before they are copied - # into the model_params), multiply the offset by the size ratio of grads and - # params. - offset = bucket.offset * size_ratio - param_buffer = param_buffer[offset:offset+bucket.data.numel()] - assert param_buffer.data_ptr() == bucket.data.data_ptr(), \ - "param_buffer and grad_buffer for same bucket should start at the same byte address" - assert param_buffer.numel() == bucket.data.numel(), \ - "param_buffer and grad_buffer for same bucket should have the same number of elements" - current_param_buffers[dtype].append(param_buffer) + storage = bucket.data.storage().untyped() + + # Typed param buffer. + param_buffer = torch.tensor( + storage, + dtype = params_dtype, + device = bucket.data.device) + + # .storage() ignores views / slices, so param_buffer now points to the start + # of the grad_buffer instead of to the start of each bucket. As a result, + # add bucket.offset to make sure param_buffers point to the right region of + # memory. + # Since we want the start of each bucket's param_buffer to coincide with the + # start of the same bucket's grad_buffer (this ensures that zeroing the grad + # buffer does not zero out params in the param_buffer before they are copied + # into the model_params), multiply the offset by the size ratio of grads and + # params. + offset = bucket.offset * size_ratio + param_buffer = param_buffer[offset:offset+bucket.data.numel()] + assert param_buffer.data_ptr() == bucket.data.data_ptr(), \ + "param_buffer and grad_buffer for same bucket should start at the same byte address" + assert param_buffer.numel() == bucket.data.numel(), \ + "param_buffer and grad_buffer for same bucket should have the same number of elements" + current_param_buffers.append(param_buffer) self.param_buffers.append(current_param_buffers) # Now construct data structures to manage all-gather handles. self.all_gather_handles = [] self.all_gather_handle_index_to_bucket_index_map = [] self.model_index_to_all_gather_handle_index_map = {} + self.all_gather_handle_indices = [] self.param_to_all_gather_handle_index_map = {} self.param_buffer_copied = [] self.pbuf_view_items = self.get_model_param_buffer_dp_views() - for (model_index, dtype, bucket_index, _, _) in self.pbuf_view_items: - self.all_gather_handle_index_to_bucket_index_map.append((model_index, dtype, bucket_index)) + for (gbuf_index, dtype, bucket_index, _, _) in self.pbuf_view_items: + self.all_gather_handle_index_to_bucket_index_map.append( + (gbuf_index, dtype, bucket_index) + ) all_gather_handle_index = len(self.all_gather_handle_index_to_bucket_index_map) - 1 - # Store all all_gather_handle_indices relevant to a particular model chunk. - if model_index not in self.model_index_to_all_gather_handle_index_map: - self.model_index_to_all_gather_handle_index_map[model_index] = [] - self.model_index_to_all_gather_handle_index_map[model_index].append(all_gather_handle_index) + # Store all all_gather_handle_indices. + model_idx = self.gbuf_idx_to_model_idx_map[gbuf_index] + if model_idx not in self.model_index_to_all_gather_handle_index_map: + self.model_index_to_all_gather_handle_index_map[model_idx] = [] + self.model_index_to_all_gather_handle_index_map[model_idx].append(all_gather_handle_index) - for param in self.models[model_index].grad_buffers[dtype].buckets[bucket_index].params_list: + for param in self.grad_buffers[gbuf_index].buckets[bucket_index].params_list: self.param_to_all_gather_handle_index_map[param] = all_gather_handle_index self.param_buffer_copied.append(False) self.num_all_gather_handles = len(self.all_gather_handle_index_to_bucket_index_map) @@ -505,8 +523,8 @@ def get_model_param_range_map(self, param): Given a model param, get the index sub-range of the param that this data-parallel rank owns. """ - model_index, dtype, bucket_index = self.model_param_gbuf_map[param] - gbuf_range_map = self.model_gbuf_ranges[model_index][dtype][bucket_index] + gbuf_index, dtype, bucket_index = self.model_param_gbuf_map[param] + gbuf_range_map = self.gbuf_ranges[gbuf_index][dtype][bucket_index] param_range_map = gbuf_range_map["param_map"][param] return param_range_map @@ -590,7 +608,7 @@ def load_state_dict(self, state_dict): # Allocate 'dummy' data for optimizer state (i.e., torch.empty() below) # - Real data is overwritten during load_parameter_state(). state_dict_state = [] - for gbuf_range_maps in self.model_gbuf_ranges: + for gbuf_range_maps in self.gbuf_ranges: for gbuf_range_map_for_all_buckets in gbuf_range_maps.values(): for gbuf_range_map in gbuf_range_map_for_all_buckets: for model_param, param_range_map in \ @@ -639,8 +657,8 @@ def load_state_dict(self, state_dict): 'Skipping loading grad scaler ...') - def save_parameter_state(self, filename): - """Save parameter state (i.e., parameter & optimizer tensors). + def get_parameter_state(self): + """Get parameter state (i.e., parameter & optimizer tensors). This method performs three steps: - For each DP rank, copy param & optimizer shards to contiguous CPU @@ -648,7 +666,6 @@ def save_parameter_state(self, filename): exp_avg_sq). - Gather contiguous buffers on DP rank 0 and concatenate to world buffers. - - Save world buffers to disk (i.e., distrib_opt.pt). """ # Data parallelism variables. @@ -660,7 +677,7 @@ def save_parameter_state(self, filename): # Collect param states. state = {"per_bucket_numel": self.per_bucket_numel, "per_bucket_numel_unpadded": self.per_bucket_numel_unpadded} - for model_idx, gbuf_range_maps in enumerate(self.model_gbuf_ranges): + for gbuf_idx, gbuf_range_maps in enumerate(self.gbuf_ranges): # Iterate grad buffers (by data type). dtype_state = {} @@ -670,8 +687,7 @@ def save_parameter_state(self, filename): for bucket_idx, gbuf_range_map in enumerate(gbuf_range_map_for_all_buckets): # Compute local DP contiguous shard's size. - model = self.models[model_idx] - gbuf_world_numel = model.grad_buffers[dtype].buckets[bucket_idx].data.numel() + gbuf_world_numel = self.grad_buffers[gbuf_idx].buckets[bucket_idx].data.numel() assert gbuf_world_numel % data_parallel_world_size == 0 gbuf_local_numel = gbuf_world_numel // data_parallel_world_size local_shards = {key: torch.empty((gbuf_local_numel,), @@ -730,18 +746,28 @@ def save_parameter_state(self, filename): # Collect world state. dtype_state[dtype] = world_tensors - state[model_idx] = dtype_state + state[gbuf_idx] = dtype_state - # Save param state. + return state + + + def save_parameter_state(self, filename): + """Save the distributed parameter state on DP rank 0. + + Args: + filename (str): path to save parameter state to. + """ + + data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) + state_dict = self.get_parameter_state() if data_parallel_rank == 0: - torch.save(state, filename) + torch.save(state_dict, filename) - def load_parameter_state(self, filename): + def load_parameter_state_from_state_dict(self, state_dict): """Load parameter state (i.e., parameter & optimizer tensors). - This method performs the reverse of save_parameter_state(): - - Load world buffers from disk (i.e., distrib_opt.pt). + This method performs the reverse of get_parameter_state(): - Scatter contiguous buffers from DP rank 0 to each DP rank (each DP rank receives its relevant subset of the world buffers). - For each DP rank, copy param & optimizer shards from contiguous CPU @@ -755,25 +781,14 @@ def load_parameter_state(self, filename): data_parallel_group_gloo = mpu.get_data_parallel_group_gloo(with_context_parallel=True) data_parallel_global_ranks = list(mpu._DATA_PARALLEL_GLOBAL_RANKS_WITH_CP) - # Load on DP rank 0. - if data_parallel_rank == 0: - loaded_state = torch.load(filename) - if "per_bucket_numel_unpadded" in loaded_state: - per_bucket_numel_unpadded_in_checkpoint = loaded_state["per_bucket_numel_unpadded"] - assert self.per_bucket_numel_unpadded == per_bucket_numel_unpadded_in_checkpoint, \ - (f"Number of unpadded elements in each bucket need to be the same in current run " - f"({self.per_bucket_numel_unpadded}) and checkpoint " - f"({per_bucket_numel_unpadded_in_checkpoint})") - # Scatter tensors to all DP ranks. - for model_idx, gbuf_range_maps in enumerate(self.model_gbuf_ranges): + for gbuf_idx, gbuf_range_maps in enumerate(self.gbuf_ranges): for dtype, gbuf_range_map_for_all_buckets in gbuf_range_maps.items(): for bucket_idx, gbuf_range_map in enumerate(gbuf_range_map_for_all_buckets): # Compute local DP contiguous shard's size. - model = self.models[model_idx] - gbuf_world_numel = model.grad_buffers[dtype].buckets[bucket_idx].data.numel() - assert gbuf_world_numel == self.per_bucket_numel[model_idx][dtype][bucket_idx] + gbuf_world_numel = self.grad_buffers[gbuf_idx].buckets[bucket_idx].data.numel() + assert gbuf_world_numel == self.per_bucket_numel[gbuf_idx][dtype][bucket_idx] assert gbuf_world_numel % data_parallel_world_size == 0 gbuf_local_numel = gbuf_world_numel // data_parallel_world_size @@ -788,7 +803,7 @@ def load_parameter_state(self, filename): # Scatter tensor list. if data_parallel_rank == 0: - world_tensor_for_all_buckets = loaded_state[model_idx][dtype][key] + world_tensor_for_all_buckets = state_dict[gbuf_idx][dtype][key] if not isinstance(world_tensor_for_all_buckets, list): world_tensor_for_all_buckets = [world_tensor_for_all_buckets] assert bucket_idx < len(world_tensor_for_all_buckets), \ @@ -798,11 +813,11 @@ def load_parameter_state(self, filename): # This tensor might be bigger or smaller than expected (depending on # relative sizes of per_bucket_numel_in_checkpoint and self.per_bucket_numel). world_tensor = world_tensor_for_all_buckets[bucket_idx] - if "per_bucket_numel" in loaded_state: + if "per_bucket_numel" in state_dict: numel_in_checkpoint = \ - loaded_state["per_bucket_numel"][model_idx][dtype][bucket_idx] - numel = self.per_bucket_numel[model_idx][dtype][bucket_idx] - numel_unpadded = self.per_bucket_numel_unpadded[model_idx][dtype][bucket_idx] + state_dict["per_bucket_numel"][gbuf_idx][dtype][bucket_idx] + numel = self.per_bucket_numel[gbuf_idx][dtype][bucket_idx] + numel_unpadded = self.per_bucket_numel_unpadded[gbuf_idx][dtype][bucket_idx] assert world_tensor.numel() == numel_in_checkpoint assert numel_unpadded <= world_tensor.numel(), \ ("True number of elements should be fewer than number of elements in " @@ -863,6 +878,27 @@ def load_parameter_state(self, filename): local_shards[key][gbuf_local_start:gbuf_local_end]) + def load_parameter_state(self, filename): + """Load the distributed parameter state from disk. + + Args: + filename (str): path to load parameter state from. + """ + + data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) + state_dict = None + if data_parallel_rank == 0: + state_dict = torch.load(filename) + if "per_bucket_numel_unpadded" in state_dict: + per_bucket_numel_unpadded_in_checkpoint = state_dict["per_bucket_numel_unpadded"] + assert self.per_bucket_numel_unpadded == per_bucket_numel_unpadded_in_checkpoint, \ + (f"Number of unpadded elements in each bucket need to be the same in current run " + f"({self.per_bucket_numel_unpadded}) and checkpoint " + f"({per_bucket_numel_unpadded_in_checkpoint})") + + self.load_parameter_state_from_state_dict(state_dict) + + def zero_grad(self, set_to_none=True): """ Zero grads. @@ -916,12 +952,12 @@ def get_model_param_buffer_dp_views(self): # In all cases, we want all_gather and all_gather_handle.wait() to be called in the same order, # and all_gather_handle.wait() needs to be called just before the corresponding forward pass. view_items = [] - for model_index, buffers in enumerate(self.param_buffers): + for gbuf_index, buffers in enumerate(self.param_buffers): view_items_per_model_chunk = [] - for dtype, buf_for_all_buckets in buffers.items(): - for bucket_index, buf in enumerate(buf_for_all_buckets): - buf_views = shard_buffer(buf) - view_items_per_model_chunk.insert(0, (model_index, dtype, bucket_index, buf, buf_views)) + dtype = self.grad_buffers[gbuf_index].dtype + for bucket_index, buf in enumerate(buffers): + buf_views = shard_buffer(buf) + view_items_per_model_chunk.insert(0, (gbuf_index, dtype, bucket_index, buf, buf_views)) view_items.extend(view_items_per_model_chunk) return view_items @@ -944,7 +980,7 @@ def _dispatch_gather_model_params(self, all_gather_handle_index): # across all data-parallel ranks, due to padding (done in grad_buffer.py), # and extended to the param_bufs. Thus, all sub-views will have consistent # start / end indexes across data-parallel ranks. - (model_index, dtype, bucket_index, pbuf, pbuf_views) = self.pbuf_view_items[all_gather_handle_index] + (gbuf_index, dtype, bucket_index, pbuf, pbuf_views) = self.pbuf_view_items[all_gather_handle_index] assert all_gather_handle_index == len(self.all_gather_handles) all_gather_handle = torch.distributed._all_gather_base( pbuf, @@ -954,7 +990,7 @@ def _dispatch_gather_model_params(self, all_gather_handle_index): ) self.all_gather_handles.append(all_gather_handle) assert self.all_gather_handle_index_to_bucket_index_map[all_gather_handle_index] == \ - (model_index, dtype, bucket_index) + (gbuf_index, dtype, bucket_index) self.param_buffer_copied.append(False) if not self.overlap_param_gather: @@ -984,16 +1020,17 @@ def hook(module, *unused): return hook - def finish_param_sync(self, model_index, *unused): """ Finishes all necessary param syncs for the model_index'th model chunk. """ + if model_index not in self.model_index_to_all_gather_handle_index_map: + return + all_gather_handle_indices = self.model_index_to_all_gather_handle_index_map[model_index] for all_gather_handle_index in all_gather_handle_indices: self._finish_param_sync_helper(all_gather_handle_index) - def _finish_param_sync_helper(self, all_gather_handle_index): """ Waits on all_gather_handle if necessary, then copies params from param_buffer @@ -1030,16 +1067,17 @@ def _copy_params_from_param_buffer(self, all_gather_handle_index): """ Copy params from param_buffer to model_params. """ - (model_index, dtype, bucket_index) = self.all_gather_handle_index_to_bucket_index_map[ + (gbuf_index, dtype, bucket_index) = self.all_gather_handle_index_to_bucket_index_map[ all_gather_handle_index] - model = self.models[model_index] + grad_buffer = self.grad_buffers[gbuf_index] + if self.update_successful: # Copy from param buffer to each param. - param_map = model.grad_buffer_param_index_map[dtype] + param_map = grad_buffer.param_index_map for param, (buf_start, buf_end, bucket_index_in_param_map) in param_map.items(): if bucket_index == bucket_index_in_param_map: - bucket_offset = model.grad_buffers[dtype].buckets[bucket_index].offset - param_buf = self.param_buffers[model_index][dtype][bucket_index] + bucket_offset = grad_buffer.buckets[bucket_index].offset + param_buf = self.param_buffers[gbuf_index][bucket_index] # buf_start and buf_end store position of this parameter in the full grad_buffer, # so need to adjust these indices (by subtracting out bucket_offset) since we # have independent param_bufs for each bucket. @@ -1049,8 +1087,8 @@ def _copy_params_from_param_buffer(self, all_gather_handle_index): # Zero out the grad buffer in preparation for next set of fwd / bwd passes after copy # completes (since param_buffer and grad_buffer are shared for each bucket). - param_buf = self.param_buffers[model_index][dtype][bucket_index] - grad_buf = model.grad_buffers[dtype].buckets[bucket_index].data + param_buf = self.param_buffers[gbuf_index][bucket_index] + grad_buf = grad_buffer.buckets[bucket_index].data assert param_buf.data_ptr() == grad_buf.data_ptr() grad_buf.zero_() @@ -1134,8 +1172,8 @@ def copy_group_params(shard_main_groups, model_groups): assert world_range.size == shard_main_param.nelement() - model_id, dtype, bucket_id = self.model_param_gbuf_map[model_param] - model_param_buffer = self.param_buffers[model_id][dtype][bucket_id] + gbuf_index, dtype, bucket_id = self.model_param_gbuf_map[model_param] + model_param_buffer = self.param_buffers[gbuf_index][bucket_id] shard_model_param = model_param_buffer.view(-1) \ [world_range.start:world_range.end] diff --git a/megatron/optimizer/optimizer.py b/megatron/optimizer/optimizer.py index 47d2001dbb..892b1105d5 100644 --- a/megatron/optimizer/optimizer.py +++ b/megatron/optimizer/optimizer.py @@ -7,6 +7,7 @@ from apex.multi_tensor_apply import multi_tensor_applier import amp_C import torch +import math from megatron import get_timers from megatron import print_rank_0 @@ -56,8 +57,7 @@ class MegatronOptimizer(ABC): def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, check_for_nan_in_grad, - params_have_main_grad, - models): + params_have_main_grad): """Input optimizer is the base optimizer for example Adam.""" self.optimizer = optimizer @@ -68,10 +68,6 @@ def __init__(self, optimizer, clip_grad, self.check_for_nan_in_grad = check_for_nan_in_grad self.params_have_main_grad = params_have_main_grad - # 'models' are retained for access to the contiguous grad buffers. - # (see distributed optimizer) - self.models = models - def get_parameters(self): params = [] @@ -211,18 +207,15 @@ class MixedPrecisionOptimizer(MegatronOptimizer): use any loss scale. Note that for `bf16 = True`, we can have a constnat gradient scaler. Also for `bf16 = False`, we always require a grad scaler. - models: list of models (i.e., the virtual pipelining models). This - is used by the distributed optimizer for mapping parameters. """ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, check_for_nan_in_grad, params_have_main_grad, - fp16, bf16, params_dtype, grad_scaler, models): + fp16, bf16, params_dtype, grad_scaler): super().__init__( optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, - models) + check_for_nan_in_grad, params_have_main_grad) self.fp16 = fp16 self.bf16 = bf16 @@ -370,18 +363,16 @@ class Float16OptimizerWithFloat16Params(MixedPrecisionOptimizer): use any loss scale. Note that for `bf16 = True`, we can have a constnat gradient scaler. Also for `bf16 = False`, we always require a grad scaler. - models: list of models (i.e., the virtual pipelining models). This - is used by the distributed optimizer for mapping parameters. """ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, check_for_nan_in_grad, params_have_main_grad, fp16, bf16, - params_dtype, grad_scaler, models): + params_dtype, grad_scaler): super().__init__( optimizer, clip_grad, log_num_zeros_in_grad, check_for_nan_in_grad, params_have_main_grad, - fp16, bf16, params_dtype, grad_scaler, models) + fp16, bf16, params_dtype, grad_scaler) # ====================== # main parameter stuff @@ -569,13 +560,11 @@ class FP32Optimizer(MegatronOptimizer): def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, check_for_nan_in_grad, - params_have_main_grad, - models): + params_have_main_grad): super(FP32Optimizer, self).__init__( optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, - models) + check_for_nan_in_grad, params_have_main_grad) self._scale = torch.tensor([1.0], dtype=torch.float, device='cuda') @@ -642,3 +631,105 @@ def state_dict(self): def load_state_dict(self, state_dict): self.optimizer.load_state_dict(state_dict) + + +class ChainedOptimizer(MegatronOptimizer): + """ChainedOptimizer is designed for chain of multiple optimizers. + + These optimizers are responsible for different parts of multiple models for + a training task and will be executed one by one when the model is updated. + + Args: + chained_optimizers: a list of optimizers. + """ + + # Remove these attributes which inherits from MegatronOptimizer. + state = None + param_groups = None + + def __init__(self, chained_optimizers): + self.chained_optimizers = chained_optimizers + self.param_groups = [] + for optimizer in self.chained_optimizers: + self.param_groups += optimizer.param_groups + + def zero_grad(self, set_to_none=True): + for optimizer in self.chained_optimizers: + optimizer.zero_grad(set_to_none) + + def get_loss_scale(self): + return self.chained_optimizers[0].get_loss_scale() + + def reload_model_params(self): + for optimizer in self.chained_optimizers: + optimizer.reload_model_params() + + def state_dict(self): + return [optimizer.state_dict() for optimizer in self.chained_optimizers] + + def load_state_dict(self, state_dict): + for optimizer, state in zip(self.chained_optimizers, state_dict): + optimizer.load_state_dict(state) + + def step(self, args, timers): + """ChainedOptimizer will step all optimizers one by one. + + Args: + args (argparse.Namespace): command-line arguments. + timers (Timers): timers used for profiling. + """ + + update_successful, grad_norm, num_zeros_in_grad = True, 0, 0 + grad_norms = [] + for optimizer in self.chained_optimizers: + _update_successful, _grad_norm, _num_zeros_in_grad = optimizer.step(args, timers) + update_successful &= _update_successful + grad_norms += [_grad_norm if _grad_norm else 0.] + num_zeros_in_grad += _num_zeros_in_grad if _num_zeros_in_grad else 0 + grad_norm = math.sqrt(sum([x**2 for x in grad_norms])) + + return update_successful, grad_norm, num_zeros_in_grad + + def save_parameter_state(self, filename): + """Save the distributed parameter states of all optimizers to a file. + + Args: + filename (str): path to save parameter state to. + """ + data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) + + states = [] + for optimizer in self.chained_optimizers: + if hasattr(optimizer, 'get_parameter_state'): + states.append(optimizer.get_parameter_state()) + else: + states.append(None) + + if data_parallel_rank == 0: + torch.save(states, filename) + + def load_parameter_state(self, filename): + """Load the distributed parameter states of all optimizers from a file. + + Args: + filename (str): path to load parameter state from. + """ + data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) + num_of_optimizers = len(self.chained_optimizers) + if data_parallel_rank == 0: + states = torch.load(filename) + else: + states = [None] * num_of_optimizers + + assert len(states) == num_of_optimizers, "Number of optimizers in "\ + "checkpoint does not match number of optimizers in model." + + for optimizer, state in zip(self.chained_optimizers, states): + if hasattr(optimizer, 'load_parameter_state_from_state_dict'): + optimizer.load_parameter_state_from_state_dict(state) + + def finish_param_sync(self, model_index): + """Finish parameter synchronization for all optimizers. + """ + for optimizer in self.chained_optimizers: + optimizer.finish_param_sync(model_index) diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json index 879ec6978b..a03930027e 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79995, 10.8686, 10.86517, 10.801, 10.71238, 10.63884, 10.20088, 10.31027, 10.22057, 9.92076]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16119.0, 19347.0, 19548.0, 18978.0, 17241.0, 18198.0, 15695.0, 18267.0, 18834.0, 19678.0]}, "iteration_timing_avg": 0.2742326470588235} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79995, 10.86816, 10.86502, 10.80149, 10.71138, 10.63815, 10.19945, 10.30719, 10.2155, 9.90987]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16119.0, 19407.0, 19395.0, 18709.0, 17372.0, 18070.0, 15753.0, 18008.0, 18946.0, 19784.0]}, "iteration_timing_avg": 0.2843088235294118} diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json index 65722ad370..e632407437 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80299, 10.85374, 10.86293, 10.7946, 10.72149, 10.6366, 10.20914, 10.31959, 10.21976, 9.9151]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16202.0, 19844.0, 19572.0, 18806.0, 17390.0, 17902.0, 15816.0, 17990.0, 18341.0, 19322.0]}, "iteration_timing_avg": 0.1749138235294118} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80299, 10.85298, 10.86262, 10.79516, 10.72134, 10.63641, 10.20727, 10.31594, 10.21293, 9.90292]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16202.0, 19817.0, 19787.0, 18858.0, 17645.0, 17931.0, 15872.0, 18124.0, 18472.0, 19200.0]}, "iteration_timing_avg": 0.1745276470588235} diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json index f007a01b52..876e61c788 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79116, 10.83954, 10.81173, 10.75983, 10.65557, 10.56982, 10.08268, 10.21338, 10.10761, 9.8191]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2917.0, 3465.0, 3576.0, 3347.0, 3187.0, 3215.0, 2817.0, 3455.0, 3838.0, 3755.0]}, "iteration_timing_avg": 0.23038411764705882} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.7912, 10.83963, 10.81166, 10.76004, 10.65544, 10.56972, 10.08242, 10.21343, 10.10767, 9.8192]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [3019.0, 3460.0, 3563.0, 3285.0, 3236.0, 3287.0, 2839.0, 3374.0, 3794.0, 3731.0]}, "iteration_timing_avg": 0.23343970588235297} diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json index fbf3695098..70e1102250 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82661, 10.87444, 10.85653, 10.80493, 10.70751, 10.63374, 10.15545, 10.27641, 10.18349, 9.87672]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [6999.0, 8493.0, 8974.0, 8653.0, 7725.0, 8045.0, 7067.0, 8642.0, 8950.0, 9562.0]}, "iteration_timing_avg": 0.24783852941176465} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82669, 10.87408, 10.85677, 10.80443, 10.7074, 10.63353, 10.15437, 10.27397, 10.17955, 9.86891]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [7132.0, 8526.0, 8992.0, 8638.0, 7665.0, 8074.0, 7151.0, 8425.0, 8985.0, 9522.0]}, "iteration_timing_avg": 0.27723117647058826} From 6083743d1958b49ca170828dfaed5e0f277ce93b Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Wed, 17 Jan 2024 09:42:59 -0800 Subject: [PATCH 135/296] Run black on megatron/optimizer --- megatron/optimizer/__init__.py | 59 +-- megatron/optimizer/clip_grads.py | 38 +- megatron/optimizer/distrib_optimizer.py | 491 ++++++++++++------------ megatron/optimizer/grad_scaler.py | 17 +- megatron/optimizer/optimizer.py | 267 +++++++------ megatron/optimizer/utils.py | 6 +- 6 files changed, 439 insertions(+), 439 deletions(-) diff --git a/megatron/optimizer/__init__.py b/megatron/optimizer/__init__.py index f7cbca0466..395485bf00 100644 --- a/megatron/optimizer/__init__.py +++ b/megatron/optimizer/__init__.py @@ -14,10 +14,7 @@ ) -def get_param_groups(model_chunks, - no_weight_decay_cond, - scale_lr_cond, - lr_mult): +def get_param_groups(model_chunks, no_weight_decay_cond, scale_lr_cond, lr_mult): """Create parameter groups for optimizer. Creates parameter groups based on weight decay condition (regularized vs @@ -81,7 +78,12 @@ def get_param_groups(model_chunks, if len(params) == 0: continue param_groups.append( - {'params': params, 'wd_mult': wd_mult, 'lr_mult': lr_mult, 'is_expert_parallel': is_expert_parallel} + { + 'params': params, + 'wd_mult': wd_mult, + 'lr_mult': lr_mult, + 'is_expert_parallel': is_expert_parallel, + } ) return param_groups @@ -100,19 +102,19 @@ def get_megatron_optimizer_based_on_param_groups(param_groups, grad_buffers=None args = get_args() if args.optimizer == 'adam': - optimizer = Adam(param_groups, - lr=args.lr, - weight_decay=args.weight_decay, - betas=(args.adam_beta1, args.adam_beta2), - eps=args.adam_eps) + optimizer = Adam( + param_groups, + lr=args.lr, + weight_decay=args.weight_decay, + betas=(args.adam_beta1, args.adam_beta2), + eps=args.adam_eps, + ) elif args.optimizer == 'sgd': - optimizer = SGD(param_groups, - lr=args.lr, - weight_decay=args.weight_decay, - momentum=args.sgd_momentum) + optimizer = SGD( + param_groups, lr=args.lr, weight_decay=args.weight_decay, momentum=args.sgd_momentum + ) else: - raise Exception('{} optimizer is not supported.'.format( - args.optimizer)) + raise Exception('{} optimizer is not supported.'.format(args.optimizer)) # Determine whether the params have main-grad field. params_have_main_grad = True @@ -151,7 +153,8 @@ def get_megatron_optimizer_based_on_param_groups(param_groups, grad_buffers=None growth_factor=2.0, backoff_factor=0.5, growth_interval=args.loss_scale_window, - hysteresis=args.hysteresis) + hysteresis=args.hysteresis, + ) optimizer_args = [ optimizer, @@ -172,16 +175,18 @@ def get_megatron_optimizer_based_on_param_groups(param_groups, grad_buffers=None return optimizer # FP32. - return FP32Optimizer(optimizer, args.clip_grad, - args.log_num_zeros_in_grad, - args.check_for_nan_in_loss_and_grad, - params_have_main_grad) + return FP32Optimizer( + optimizer, + args.clip_grad, + args.log_num_zeros_in_grad, + args.check_for_nan_in_loss_and_grad, + params_have_main_grad, + ) -def get_megatron_optimizer(model_chunks, - no_weight_decay_cond=None, - scale_lr_cond=None, - lr_mult=1.0): +def get_megatron_optimizer( + model_chunks, no_weight_decay_cond=None, scale_lr_cond=None, lr_mult=1.0 +): """Retrieve the Megatron optimizer for model chunks. We use separate optimizers for expert parameters and non-expert parameters. @@ -209,7 +214,9 @@ def get_megatron_optimizer(model_chunks, moe_param_groups = list(filter(lambda g: g['is_expert_parallel'], param_groups)) # Create optimizers. - optimizers = [get_megatron_optimizer_based_on_param_groups(dense_param_groups, per_model_grad_buffers)] + optimizers = [ + get_megatron_optimizer_based_on_param_groups(dense_param_groups, per_model_grad_buffers) + ] if len(moe_param_groups): optimizers.append(get_megatron_optimizer_based_on_param_groups(moe_param_groups)) diff --git a/megatron/optimizer/clip_grads.py b/megatron/optimizer/clip_grads.py index a6a3d294e5..904502e3dc 100644 --- a/megatron/optimizer/clip_grads.py +++ b/megatron/optimizer/clip_grads.py @@ -14,9 +14,14 @@ from megatron.core.tensor_parallel import param_is_not_tensor_parallel_duplicate -def clip_grad_norm_fp32(parameters, grads_for_norm, - max_norm, check_for_nan_in_grad, - norm_type=2, model_parallel_group=None): +def clip_grad_norm_fp32( + parameters, + grads_for_norm, + max_norm, + check_for_nan_in_grad, + norm_type=2, + model_parallel_group=None, +): """Clips gradient norm of an iterable of parameters whose gradients are in fp32. @@ -62,9 +67,9 @@ def clip_grad_norm_fp32(parameters, grads_for_norm, total_norm = max(grad.abs().max() for grad in grads_for_norm) total_norm_cuda = torch.tensor([float(total_norm)], dtype=torch.float, device='cuda') # Take max across all model-parallel GPUs. - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.MAX, - group=model_parallel_group) + torch.distributed.all_reduce( + total_norm_cuda, op=torch.distributed.ReduceOp.MAX, group=model_parallel_group + ) total_norm = total_norm_cuda[0].item() else: @@ -78,7 +83,7 @@ def clip_grad_norm_fp32(parameters, grads_for_norm, amp_C.multi_tensor_l2norm, dummy_overflow_buf, [grads_for_norm], - False # no per-parameter norm + False, # no per-parameter norm ) else: grad_norm = torch.tensor([0], dtype=torch.float, device='cuda') @@ -102,19 +107,18 @@ def clip_grad_norm_fp32(parameters, grads_for_norm, ) # Sum across all model-parallel GPUs. - torch.distributed.all_reduce(total_norm, - op=torch.distributed.ReduceOp.SUM, - group=model_parallel_group) + torch.distributed.all_reduce( + total_norm, op=torch.distributed.ReduceOp.SUM, group=model_parallel_group + ) total_norm = total_norm.item() ** (1.0 / norm_type) # Scale. clip_coeff = max_norm / (total_norm + 1.0e-6) if clip_coeff < 1.0: dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device='cuda') - multi_tensor_applier(amp_C.multi_tensor_scale, - dummy_overflow_buf, - [grads, grads], - clip_coeff) + multi_tensor_applier( + amp_C.multi_tensor_scale, dummy_overflow_buf, [grads, grads], clip_coeff + ) return total_norm @@ -139,9 +143,9 @@ def count_zeros_fp32(parameters, model_parallel_group): total_num_zeros = num_zeros + total_num_zeros # Sum across all model-parallel GPUs. - torch.distributed.all_reduce(total_num_zeros, - op=torch.distributed.ReduceOp.SUM, - group=model_parallel_group) + torch.distributed.all_reduce( + total_num_zeros, op=torch.distributed.ReduceOp.SUM, group=model_parallel_group + ) total_num_zeros = total_num_zeros.item() diff --git a/megatron/optimizer/distrib_optimizer.py b/megatron/optimizer/distrib_optimizer.py index 0c763237ae..52f41fb9d6 100644 --- a/megatron/optimizer/distrib_optimizer.py +++ b/megatron/optimizer/distrib_optimizer.py @@ -17,20 +17,23 @@ from .utils import shard_buffer - class Range: """ A range represents a start and end points for indexing a shard from a full tensor. """ + def __init__(self, start, end): self.start = start self.end = end self.size = end - start - def normalize(self, start = 0): + + def normalize(self, start=0): return Range(start, start + self.size) + def __str__(self): return "%d,%d [%d]" % (self.start, self.end, self.size) + def __len__(self): return self.end - self.start @@ -103,32 +106,29 @@ def build_model_gbuf_param_range_map(cls, grad_buffer, gbuf_world_range, bucket_ # Param range. param_world_start, param_world_end, _ = param_world_indexes - param_local_start = max( - 0, - param_world_start - gbuf_world_range.start) - param_local_end = min( - gbuf_world_range.size, - param_world_end - gbuf_world_range.start) + param_local_start = max(0, param_world_start - gbuf_world_range.start) + param_local_end = min(gbuf_world_range.size, param_world_end - gbuf_world_range.start) # Add param, if within local gbuf range. if param_local_end > param_local_start: param_local_range = Range(param_local_start, param_local_end) param_world_range = param_local_range.normalize( - param_local_start + gbuf_world_range.start) - param_world_range_in_bucket = Range(param_world_range.start-bucket_offset, - param_world_range.end-bucket_offset) - sub_param_start = max(0, gbuf_world_range.start-param_world_start) + param_local_start + gbuf_world_range.start + ) + param_world_range_in_bucket = Range( + param_world_range.start - bucket_offset, param_world_range.end - bucket_offset + ) + sub_param_start = max(0, gbuf_world_range.start - param_world_start) sub_param_range = param_local_range.normalize(sub_param_start) param_range_map[param] = { - "gbuf_world" : param_world_range, + "gbuf_world": param_world_range, "gbuf_world_in_bucket": param_world_range_in_bucket, - "gbuf_local" : param_local_range, - "param" : sub_param_range, + "gbuf_local": param_local_range, + "param": sub_param_range, } return param_range_map - @classmethod def build_model_gbuf_range(cls, grad_buffer, bucket_index): """ @@ -147,8 +147,9 @@ def build_model_gbuf_range(cls, grad_buffer, bucket_index): bucket = grad_buffer.buckets[bucket_index] bucket_buffer = bucket.data gbuf_size = bucket_buffer.numel() - assert gbuf_size % data_parallel_world_size == 0, \ - f"Each bucket's buffer size should be divisible by {data_parallel_world_size}" + assert ( + gbuf_size % data_parallel_world_size == 0 + ), f"Each bucket's buffer size should be divisible by {data_parallel_world_size}" max_gbuf_range_size = gbuf_size // data_parallel_world_size # All world ranges (i.e., across all data parallel ranks). @@ -156,28 +157,28 @@ def build_model_gbuf_range(cls, grad_buffer, bucket_index): for r in range(data_parallel_world_size): # Compute start of chunk in this bucket. gbuf_world_start = r * max_gbuf_range_size - gbuf_world_end = min(gbuf_size, gbuf_world_start+max_gbuf_range_size) + gbuf_world_end = min(gbuf_size, gbuf_world_start + max_gbuf_range_size) # Add bucket's offset in grad buffer. - gbuf_world_range = Range(gbuf_world_start + bucket.offset, - gbuf_world_end + bucket.offset) + gbuf_world_range = Range( + gbuf_world_start + bucket.offset, gbuf_world_end + bucket.offset + ) gbuf_world_all_ranges.append(gbuf_world_range) # Local DP's ranges. gbuf_world_range = gbuf_world_all_ranges[data_parallel_rank] # Get each param's ranges. - param_range_map = cls.build_model_gbuf_param_range_map(grad_buffer, - gbuf_world_range, - bucket.offset) + param_range_map = cls.build_model_gbuf_param_range_map( + grad_buffer, gbuf_world_range, bucket.offset + ) # Group into dict. data = { - "param_map" : param_range_map, + "param_map": param_range_map, } return data - @classmethod def build_gbuf_range_map(cls, grad_buffer): """ @@ -198,7 +199,6 @@ def build_gbuf_range_map(cls, grad_buffer): ] } - @classmethod def build_model_param_gbuf_map(cls, gbuf_ranges): """ @@ -210,12 +210,12 @@ def build_model_param_gbuf_map(cls, gbuf_ranges): for dtype, gbuf_range_map_for_all_buckets in gbuf_range_map.items(): for bucket_index, gbuf_range_map in enumerate(gbuf_range_map_for_all_buckets): for param, _ in gbuf_range_map["param_map"].items(): - assert param not in param_gbuf_map, \ - "Param should not be in param_gbuf_map; each param only belongs to a single bucket" + assert ( + param not in param_gbuf_map + ), "Param should not be in param_gbuf_map; each param only belongs to a single bucket" param_gbuf_map[param] = (gbuf_index, dtype, bucket_index) return param_gbuf_map - @classmethod def build_optimizer_group_ranges(cls, param_groups, gbuf_ranges): """ @@ -248,7 +248,7 @@ def build_optimizer_group_ranges(cls, param_groups, gbuf_ranges): # the group. The group index and order are particularly important for # saving and loading checkpoints. local_param_group_map = {} - group_ranges = [ {"params": []} for _ in param_groups ] + group_ranges = [{"params": []} for _ in param_groups] for gbuf_range_map in gbuf_ranges: for dtype, gbuf_range_map_for_all_buckets in gbuf_range_map.items(): for gbuf_range_map in gbuf_range_map_for_all_buckets: @@ -256,8 +256,7 @@ def build_optimizer_group_ranges(cls, param_groups, gbuf_ranges): group_index = world_param_group_map[param] group_range = group_ranges[group_index] group_range["params"].append(param) - local_param_group_map[param] = \ - (group_index, len(group_range["params"]) - 1) + local_param_group_map[param] = (group_index, len(group_range["params"]) - 1) # Squeeze zero-size group ranges. for group_index, group_range in enumerate(group_ranges): @@ -266,12 +265,8 @@ def build_optimizer_group_ranges(cls, param_groups, gbuf_ranges): return local_param_group_map, group_ranges - @classmethod - def build_model_and_main_param_groups(cls, - gbuf_ranges, - param_gbuf_map, - opt_group_ranges): + def build_model_and_main_param_groups(cls, gbuf_ranges, param_gbuf_map, opt_group_ranges): """ Create main parameter groups needed for the optimizer step. @@ -308,8 +303,7 @@ def build_model_and_main_param_groups(cls, model_fp32_groups.append(model_fp32_params_this_group) shard_float16_groups.append(shard_float16_params_this_group) shard_fp32_groups.append(shard_fp32_params_this_group) - shard_fp32_from_float16_groups.append( - shard_fp32_from_float16_params_this_group) + shard_fp32_from_float16_groups.append(shard_fp32_from_float16_params_this_group) for model_param in group_range["params"]: @@ -320,17 +314,19 @@ def build_model_and_main_param_groups(cls, param_range = gbuf_range["param_map"][model_param]["param"] # fp16, bf16 params. - if model_param.type() in ['torch.cuda.HalfTensor', - 'torch.cuda.BFloat16Tensor']: + if model_param.type() in ['torch.cuda.HalfTensor', 'torch.cuda.BFloat16Tensor']: # Clone model -> main. - shard_model_param = model_param.detach().view(-1) \ - [param_range.start:param_range.end] + shard_model_param = model_param.detach().view(-1)[ + param_range.start : param_range.end + ] shard_main_param = shard_model_param.clone().float() tensor_parallel.copy_tensor_model_parallel_attributes( - shard_model_param, model_param) + shard_model_param, model_param + ) tensor_parallel.copy_tensor_model_parallel_attributes( - shard_main_param, model_param) + shard_main_param, model_param + ) if hasattr(model_param, 'shared'): shard_model_param.shared = model_param.shared shard_main_param.shared = model_param.shared @@ -342,21 +338,23 @@ def build_model_and_main_param_groups(cls, # fp32 params. elif model_param.type() == 'torch.cuda.FloatTensor': - shard_model_param = model_param.view(-1) \ - [param_range.start:param_range.end] + shard_model_param = model_param.view(-1)[param_range.start : param_range.end] model_fp32_params_this_group.append(model_param) shard_fp32_params_this_group.append(shard_model_param) tensor_parallel.copy_tensor_model_parallel_attributes( - shard_model_param, model_param) + shard_model_param, model_param + ) if hasattr(model_param, 'shared'): shard_model_param.shared = model_param.shared else: - raise TypeError('Wrapped parameters must be one of ' - 'torch.cuda.FloatTensor, ' - 'torch.cuda.HalfTensor, or ' - 'torch.cuda.BFloat16Tensor. ' - 'Received {}'.format(model_param.type())) + raise TypeError( + 'Wrapped parameters must be one of ' + 'torch.cuda.FloatTensor, ' + 'torch.cuda.HalfTensor, or ' + 'torch.cuda.BFloat16Tensor. ' + 'Received {}'.format(model_param.type()) + ) # Update optimizer's params. group_range["orig_group"]["params"] = [ @@ -372,10 +370,19 @@ def build_model_and_main_param_groups(cls, shard_fp32_from_float16_groups, ) - - def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, fp16, - bf16, params_dtype, grad_scaler, per_model_grad_buffers): + def __init__( + self, + optimizer, + clip_grad, + log_num_zeros_in_grad, + check_for_nan_in_grad, + params_have_main_grad, + fp16, + bf16, + params_dtype, + grad_scaler, + per_model_grad_buffers, + ): """ See top of class definition for argument descriptions. @@ -387,12 +394,20 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, """ super().__init__( - optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, - fp16, bf16, params_dtype, grad_scaler) + optimizer, + clip_grad, + log_num_zeros_in_grad, + check_for_nan_in_grad, + params_have_main_grad, + fp16, + bf16, + params_dtype, + grad_scaler, + ) - assert isinstance(optimizer, Adam), \ - "Only Adam currently supported, due to checkpointing requirements." + assert isinstance( + optimizer, Adam + ), "Only Adam currently supported, due to checkpointing requirements." # Model grad buffer ranges. assert per_model_grad_buffers, "grad_buffers must be provided" @@ -409,17 +424,18 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, self.per_bucket_numel_unpadded = [] for grad_buffer in self.grad_buffers: self.per_bucket_numel.append( - {grad_buffer.dtype: [bucket.data.numel() for bucket in grad_buffer.buckets]}) + {grad_buffer.dtype: [bucket.data.numel() for bucket in grad_buffer.buckets]} + ) self.per_bucket_numel_unpadded.append( - {grad_buffer.dtype: [bucket.numel_unpadded for bucket in grad_buffer.buckets]}) + {grad_buffer.dtype: [bucket.numel_unpadded for bucket in grad_buffer.buckets]} + ) self.gbuf_ranges.append(self.build_gbuf_range_map(grad_buffer)) - self.model_param_gbuf_map = \ - self.build_model_param_gbuf_map(self.gbuf_ranges) + self.model_param_gbuf_map = self.build_model_param_gbuf_map(self.gbuf_ranges) # Optimizer ranges. - self.model_param_group_index_map, self.opt_group_ranges = \ - self.build_optimizer_group_ranges(self.optimizer.param_groups, - self.gbuf_ranges) + self.model_param_group_index_map, self.opt_group_ranges = self.build_optimizer_group_ranges( + self.optimizer.param_groups, self.gbuf_ranges + ) # Allocate main param shards. ( @@ -428,9 +444,9 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, self.shard_float16_groups, self.shard_fp32_groups, self.shard_fp32_from_float16_groups, - ) = self.build_model_and_main_param_groups(self.gbuf_ranges, - self.model_param_gbuf_map, - self.opt_group_ranges) + ) = self.build_model_and_main_param_groups( + self.gbuf_ranges, self.model_param_gbuf_map, self.opt_group_ranges + ) # Initialize param buffers. # - These are views on the DDP model's grad buffers, that share @@ -452,10 +468,7 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, storage = bucket.data.storage().untyped() # Typed param buffer. - param_buffer = torch.tensor( - storage, - dtype = params_dtype, - device = bucket.data.device) + param_buffer = torch.tensor(storage, dtype=params_dtype, device=bucket.data.device) # .storage() ignores views / slices, so param_buffer now points to the start # of the grad_buffer instead of to the start of each bucket. As a result, @@ -467,11 +480,13 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, # into the model_params), multiply the offset by the size ratio of grads and # params. offset = bucket.offset * size_ratio - param_buffer = param_buffer[offset:offset+bucket.data.numel()] - assert param_buffer.data_ptr() == bucket.data.data_ptr(), \ - "param_buffer and grad_buffer for same bucket should start at the same byte address" - assert param_buffer.numel() == bucket.data.numel(), \ - "param_buffer and grad_buffer for same bucket should have the same number of elements" + param_buffer = param_buffer[offset : offset + bucket.data.numel()] + assert ( + param_buffer.data_ptr() == bucket.data.data_ptr() + ), "param_buffer and grad_buffer for same bucket should start at the same byte address" + assert ( + param_buffer.numel() == bucket.data.numel() + ), "param_buffer and grad_buffer for same bucket should have the same number of elements" current_param_buffers.append(param_buffer) self.param_buffers.append(current_param_buffers) @@ -494,7 +509,9 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, model_idx = self.gbuf_idx_to_model_idx_map[gbuf_index] if model_idx not in self.model_index_to_all_gather_handle_index_map: self.model_index_to_all_gather_handle_index_map[model_idx] = [] - self.model_index_to_all_gather_handle_index_map[model_idx].append(all_gather_handle_index) + self.model_index_to_all_gather_handle_index_map[model_idx].append( + all_gather_handle_index + ) for param in self.grad_buffers[gbuf_index].buckets[bucket_index].params_list: self.param_to_all_gather_handle_index_map[param] = all_gather_handle_index @@ -504,7 +521,8 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, self.overlap_param_gather = get_args().overlap_param_gather if self.overlap_param_gather: self.remove_pre_hook_handle = torch.nn.modules.module.register_module_forward_pre_hook( - self._make_forward_pre_hook()) + self._make_forward_pre_hook() + ) else: self.remove_pre_hook_handle = None @@ -513,11 +531,9 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, # Update optimizer groups. # - Also, leverage state_dict() and load_state_dict() to # recast preexisting per-param state tensors. - self.optimizer.param_groups = \ - [ g["orig_group"] for g in self.opt_group_ranges ] + self.optimizer.param_groups = [g["orig_group"] for g in self.opt_group_ranges] self.optimizer.load_state_dict(self.optimizer.state_dict()) - def get_model_param_range_map(self, param): """ Given a model param, get the index sub-range of the param that this @@ -528,7 +544,6 @@ def get_model_param_range_map(self, param): param_range_map = gbuf_range_map["param_map"][param] return param_range_map - def get_model_parallel_group(self): """ With the distributed optimizer, the model parallel group is the @@ -536,7 +551,6 @@ def get_model_parallel_group(self): """ return None - def state_dict(self): """ The state dict contains all non-DP-rank-dependent (i.e., non-parameter- @@ -550,9 +564,7 @@ def state_dict(self): # Optimizer state (do not store parameter state here). state_dict['optimizer'] = { - k : v - for k, v in self.optimizer.state_dict().items() - if k != "state" + k: v for k, v in self.optimizer.state_dict().items() if k != "state" } for param_group in state_dict["optimizer"]["param_groups"]: del param_group["params"] @@ -563,7 +575,6 @@ def state_dict(self): return state_dict - def load_state_dict(self, state_dict): """Load the state dict. @@ -600,10 +611,10 @@ def load_state_dict(self, state_dict): # the ordering of parameters within its flattened parameter state # list. inner_state_dict = self.optimizer.state_dict() - state_dict_param_groups = [{ - **group, - "params" : list(inner_state_dict["param_groups"][idx]["params"]), - } for idx, group in enumerate(state_dict["optimizer"]["param_groups"])] + state_dict_param_groups = [ + {**group, "params": list(inner_state_dict["param_groups"][idx]["params"]),} + for idx, group in enumerate(state_dict["optimizer"]["param_groups"]) + ] # Allocate 'dummy' data for optimizer state (i.e., torch.empty() below) # - Real data is overwritten during load_parameter_state(). @@ -611,51 +622,49 @@ def load_state_dict(self, state_dict): for gbuf_range_maps in self.gbuf_ranges: for gbuf_range_map_for_all_buckets in gbuf_range_maps.values(): for gbuf_range_map in gbuf_range_map_for_all_buckets: - for model_param, param_range_map in \ - gbuf_range_map["param_map"].items(): + for model_param, param_range_map in gbuf_range_map["param_map"].items(): # Get parameter ordering information (see method docstring # for details). - group_index, group_order = \ - self.model_param_group_index_map[model_param] - state_order = inner_state_dict["param_groups"] \ - [group_index]["params"][group_order] + group_index, group_order = self.model_param_group_index_map[model_param] + state_order = inner_state_dict["param_groups"][group_index]["params"][ + group_order + ] # Allocate dummy tensors. numel = len(param_range_map["gbuf_world"]) - init_shard = lambda : torch.empty( - (numel,), - dtype=torch.float32, - device=torch.cuda.current_device()) + init_shard = lambda: torch.empty( + (numel,), dtype=torch.float32, device=torch.cuda.current_device() + ) - state_dict_state.append((state_order, { - "exp_avg" : init_shard(), - "exp_avg_sq" : init_shard(), - })) + state_dict_state.append( + (state_order, {"exp_avg": init_shard(), "exp_avg_sq": init_shard(),}) + ) # Sort by state order (see method docstring for details). - state_dict_state.sort(key = lambda s : s[0]) - state_dict_state = {s[0]:s[1] for s in state_dict_state} + state_dict_state.sort(key=lambda s: s[0]) + state_dict_state = {s[0]: s[1] for s in state_dict_state} # Optimizer. - self.optimizer.load_state_dict({ - "state" : state_dict_state, - "param_groups" : state_dict_param_groups, - }) + self.optimizer.load_state_dict( + {"state": state_dict_state, "param_groups": state_dict_param_groups,} + ) # Grad scaler. if 'grad_scaler' not in state_dict: if self.fp16: - print_rank_0('***WARNING*** found an old checkpoint, will not ' - 'load grad scaler ...') + print_rank_0( + '***WARNING*** found an old checkpoint, will not ' 'load grad scaler ...' + ) else: if self.grad_scaler: self.grad_scaler.load_state_dict(state_dict['grad_scaler']) else: - print_rank_0('***WARNING*** fould the grad scaler in the ' - 'checkpoint but it is None in the class. ' - 'Skipping loading grad scaler ...') - + print_rank_0( + '***WARNING*** fould the grad scaler in the ' + 'checkpoint but it is None in the class. ' + 'Skipping loading grad scaler ...' + ) def get_parameter_state(self): """Get parameter state (i.e., parameter & optimizer tensors). @@ -675,8 +684,10 @@ def get_parameter_state(self): data_parallel_global_ranks = list(mpu._DATA_PARALLEL_GLOBAL_RANKS_WITH_CP) # Collect param states. - state = {"per_bucket_numel": self.per_bucket_numel, - "per_bucket_numel_unpadded": self.per_bucket_numel_unpadded} + state = { + "per_bucket_numel": self.per_bucket_numel, + "per_bucket_numel_unpadded": self.per_bucket_numel_unpadded, + } for gbuf_idx, gbuf_range_maps in enumerate(self.gbuf_ranges): # Iterate grad buffers (by data type). @@ -690,24 +701,21 @@ def get_parameter_state(self): gbuf_world_numel = self.grad_buffers[gbuf_idx].buckets[bucket_idx].data.numel() assert gbuf_world_numel % data_parallel_world_size == 0 gbuf_local_numel = gbuf_world_numel // data_parallel_world_size - local_shards = {key: torch.empty((gbuf_local_numel,), - dtype=torch.float32, - device="cpu") - for key in ("param", "exp_avg", "exp_avg_sq")} + local_shards = { + key: torch.empty((gbuf_local_numel,), dtype=torch.float32, device="cpu") + for key in ("param", "exp_avg", "exp_avg_sq") + } # Build contiguous DP rank shards (for param + optim states). - for model_param, param_range_map in \ - gbuf_range_map["param_map"].items(): + for model_param, param_range_map in gbuf_range_map["param_map"].items(): # Main param & optimizer states. - group_index, group_order = \ - self.model_param_group_index_map[model_param] - main_param = self.optimizer.param_groups \ - [group_index]["params"][group_order] + group_index, group_order = self.model_param_group_index_map[model_param] + main_param = self.optimizer.param_groups[group_index]["params"][group_order] optim_state = self.optimizer.state[main_param] tensors = { - "param" : main_param, + "param": main_param, **optim_state, } @@ -715,18 +723,19 @@ def get_parameter_state(self): gbuf_local_start = param_range_map["gbuf_local"].start gbuf_local_end = param_range_map["gbuf_local"].end for key in local_shards: - local_shards[key][gbuf_local_start:gbuf_local_end] \ - .data.copy_(tensors[key].detach().cpu()) + local_shards[key][gbuf_local_start:gbuf_local_end].data.copy_( + tensors[key].detach().cpu() + ) # Gather contiguous shards on DP rank 0. for key, send_tensor in local_shards.items(): # Gather tensor list. if data_parallel_rank == 0: - recv_tensors = [torch.empty((gbuf_local_numel,), - dtype=torch.float32, - device="cpu") - for _ in range(data_parallel_world_size)] + recv_tensors = [ + torch.empty((gbuf_local_numel,), dtype=torch.float32, device="cpu") + for _ in range(data_parallel_world_size) + ] else: recv_tensors = None @@ -750,7 +759,6 @@ def get_parameter_state(self): return state - def save_parameter_state(self, filename): """Save the distributed parameter state on DP rank 0. @@ -763,7 +771,6 @@ def save_parameter_state(self, filename): if data_parallel_rank == 0: torch.save(state_dict, filename) - def load_parameter_state_from_state_dict(self, state_dict): """Load parameter state (i.e., parameter & optimizer tensors). @@ -793,10 +800,10 @@ def load_parameter_state_from_state_dict(self, state_dict): gbuf_local_numel = gbuf_world_numel // data_parallel_world_size # Contiguous local shards (received from DP rank 0). - local_shards = {key: torch.empty((gbuf_local_numel,), - dtype=torch.float32, - device="cpu") - for key in ("param", "exp_avg", "exp_avg_sq")} + local_shards = { + key: torch.empty((gbuf_local_numel,), dtype=torch.float32, device="cpu") + for key in ("param", "exp_avg", "exp_avg_sq") + } # Scatter local shards from DP rank 0. for key, recv_tensor in local_shards.items(): @@ -806,43 +813,56 @@ def load_parameter_state_from_state_dict(self, state_dict): world_tensor_for_all_buckets = state_dict[gbuf_idx][dtype][key] if not isinstance(world_tensor_for_all_buckets, list): world_tensor_for_all_buckets = [world_tensor_for_all_buckets] - assert bucket_idx < len(world_tensor_for_all_buckets), \ - (f"Trying to load state for bucket_id {bucket_idx} (out of " - f"{len(gbuf_range_map_for_all_buckets)} buckets) from checkpoint; " - f"checkpoint only has {len(world_tensor_for_all_buckets)} bucket(s)") + assert bucket_idx < len(world_tensor_for_all_buckets), ( + f"Trying to load state for bucket_id {bucket_idx} (out of " + f"{len(gbuf_range_map_for_all_buckets)} buckets) from checkpoint; " + f"checkpoint only has {len(world_tensor_for_all_buckets)} bucket(s)" + ) # This tensor might be bigger or smaller than expected (depending on # relative sizes of per_bucket_numel_in_checkpoint and self.per_bucket_numel). world_tensor = world_tensor_for_all_buckets[bucket_idx] if "per_bucket_numel" in state_dict: - numel_in_checkpoint = \ - state_dict["per_bucket_numel"][gbuf_idx][dtype][bucket_idx] + numel_in_checkpoint = state_dict["per_bucket_numel"][gbuf_idx][ + dtype + ][bucket_idx] numel = self.per_bucket_numel[gbuf_idx][dtype][bucket_idx] - numel_unpadded = self.per_bucket_numel_unpadded[gbuf_idx][dtype][bucket_idx] + numel_unpadded = self.per_bucket_numel_unpadded[gbuf_idx][dtype][ + bucket_idx + ] assert world_tensor.numel() == numel_in_checkpoint - assert numel_unpadded <= world_tensor.numel(), \ - ("True number of elements should be fewer than number of elements in " - "checkpoint tensor") + assert numel_unpadded <= world_tensor.numel(), ( + "True number of elements should be fewer than number of elements in " + "checkpoint tensor" + ) if world_tensor.numel() > numel: # Truncate extra values, which are padding anyway. - print_rank_0(f"Truncating extra values from checkpoint (numel_in_checkpoint={numel_in_checkpoint}, " - f"numel={numel}, numel_unpadded={numel_unpadded})") + print_rank_0( + f"Truncating extra values from checkpoint (numel_in_checkpoint={numel_in_checkpoint}, " + f"numel={numel}, numel_unpadded={numel_unpadded})" + ) world_tensor = world_tensor[:numel] elif world_tensor.numel() < numel: # In this case, numel > world_tensor.numel() (which is numel_in_checkpoint). # Create new tensor with right number of values, then copy and use new tensor. - print_rank_0(f"Expanding tensor from checkpoint (numel_in_checkpoint={numel_in_checkpoint}, " - f"numel={numel}, numel_unpadded={numel_unpadded})") - world_tensor_reshaped = torch.empty((numel,), - dtype=world_tensor.dtype, - device=world_tensor.device) + print_rank_0( + f"Expanding tensor from checkpoint (numel_in_checkpoint={numel_in_checkpoint}, " + f"numel={numel}, numel_unpadded={numel_unpadded})" + ) + world_tensor_reshaped = torch.empty( + (numel,), + dtype=world_tensor.dtype, + device=world_tensor.device, + ) world_tensor_reshaped[:numel_in_checkpoint].copy_(world_tensor) world_tensor = world_tensor_reshaped else: - print_rank_0("***WARNING*** Using older checkpoint so skipping padding checks") - gbuf_start_idxs = \ - list(range(0, gbuf_world_numel, gbuf_local_numel)) - send_tensors = [world_tensor[i:(i+gbuf_local_numel)] - for i in gbuf_start_idxs] + print_rank_0( + "***WARNING*** Using older checkpoint so skipping padding checks" + ) + gbuf_start_idxs = list(range(0, gbuf_world_numel, gbuf_local_numel)) + send_tensors = [ + world_tensor[i : (i + gbuf_local_numel)] for i in gbuf_start_idxs + ] else: send_tensors = None @@ -855,18 +875,15 @@ def load_parameter_state_from_state_dict(self, state_dict): ) # Copy local contiguous shards to param/optim shards. - for model_param, param_range_map in \ - gbuf_range_map["param_map"].items(): + for model_param, param_range_map in gbuf_range_map["param_map"].items(): # Main param & optimizer states. - group_index, group_order = \ - self.model_param_group_index_map[model_param] - main_param = self.optimizer.param_groups \ - [group_index]["params"][group_order] + group_index, group_order = self.model_param_group_index_map[model_param] + main_param = self.optimizer.param_groups[group_index]["params"][group_order] optim_state = self.optimizer.state[main_param] tensors = { - "param" : main_param, + "param": main_param, **optim_state, } @@ -875,8 +892,8 @@ def load_parameter_state_from_state_dict(self, state_dict): gbuf_local_end = param_range_map["gbuf_local"].end for key in local_shards: tensors[key].data.copy_( - local_shards[key][gbuf_local_start:gbuf_local_end]) - + local_shards[key][gbuf_local_start:gbuf_local_end] + ) def load_parameter_state(self, filename): """Load the distributed parameter state from disk. @@ -891,14 +908,14 @@ def load_parameter_state(self, filename): state_dict = torch.load(filename) if "per_bucket_numel_unpadded" in state_dict: per_bucket_numel_unpadded_in_checkpoint = state_dict["per_bucket_numel_unpadded"] - assert self.per_bucket_numel_unpadded == per_bucket_numel_unpadded_in_checkpoint, \ - (f"Number of unpadded elements in each bucket need to be the same in current run " - f"({self.per_bucket_numel_unpadded}) and checkpoint " - f"({per_bucket_numel_unpadded_in_checkpoint})") + assert self.per_bucket_numel_unpadded == per_bucket_numel_unpadded_in_checkpoint, ( + f"Number of unpadded elements in each bucket need to be the same in current run " + f"({self.per_bucket_numel_unpadded}) and checkpoint " + f"({per_bucket_numel_unpadded_in_checkpoint})" + ) self.load_parameter_state_from_state_dict(state_dict) - def zero_grad(self, set_to_none=True): """ Zero grads. @@ -910,11 +927,12 @@ def zero_grad(self, set_to_none=True): used by this field can be safely deallocated at this point. """ for groups in ( - self.model_float16_groups, - self.model_fp32_groups, - self.shard_float16_groups, # grad empty/unused here? - self.shard_fp32_groups, # throws grad-access warning - self.shard_fp32_from_float16_groups): + self.model_float16_groups, + self.model_fp32_groups, + self.shard_float16_groups, # grad empty/unused here? + self.shard_fp32_groups, # throws grad-access warning + self.shard_fp32_from_float16_groups, + ): for group in groups: _zero_grad_group_helper(group, set_to_none) @@ -927,7 +945,6 @@ def zero_grad(self, set_to_none=True): if self.overlap_param_gather: self._dispatch_gather_model_params(all_gather_handle_index=0) - def get_model_param_buffer_dp_views(self): """ Get shard views of each of the param buffers. @@ -957,12 +974,13 @@ def get_model_param_buffer_dp_views(self): dtype = self.grad_buffers[gbuf_index].dtype for bucket_index, buf in enumerate(buffers): buf_views = shard_buffer(buf) - view_items_per_model_chunk.insert(0, (gbuf_index, dtype, bucket_index, buf, buf_views)) + view_items_per_model_chunk.insert( + 0, (gbuf_index, dtype, bucket_index, buf, buf_views) + ) view_items.extend(view_items_per_model_chunk) return view_items - def _dispatch_gather_model_params(self, all_gather_handle_index): """ All-gather updated model params. @@ -980,24 +998,27 @@ def _dispatch_gather_model_params(self, all_gather_handle_index): # across all data-parallel ranks, due to padding (done in grad_buffer.py), # and extended to the param_bufs. Thus, all sub-views will have consistent # start / end indexes across data-parallel ranks. - (gbuf_index, dtype, bucket_index, pbuf, pbuf_views) = self.pbuf_view_items[all_gather_handle_index] + (gbuf_index, dtype, bucket_index, pbuf, pbuf_views) = self.pbuf_view_items[ + all_gather_handle_index + ] assert all_gather_handle_index == len(self.all_gather_handles) all_gather_handle = torch.distributed._all_gather_base( pbuf, pbuf_views[data_parallel_rank], - group = data_parallel_group, - async_op = self.overlap_param_gather + group=data_parallel_group, + async_op=self.overlap_param_gather, ) self.all_gather_handles.append(all_gather_handle) - assert self.all_gather_handle_index_to_bucket_index_map[all_gather_handle_index] == \ - (gbuf_index, dtype, bucket_index) + assert self.all_gather_handle_index_to_bucket_index_map[all_gather_handle_index] == ( + gbuf_index, + dtype, + bucket_index, + ) self.param_buffer_copied.append(False) if not self.overlap_param_gather: self._copy_params_from_param_buffer(all_gather_handle_index) - - def _make_forward_pre_hook(self): """ Create a forward pre-hook to wait on all-gather handles when necessary (i.e., @@ -1006,7 +1027,9 @@ def _make_forward_pre_hook(self): """ def hook(module, *unused): - assert self.overlap_param_gather, "Should use pre-hook only when overlap_param_gather is True" + assert ( + self.overlap_param_gather + ), "Should use pre-hook only when overlap_param_gather is True" # Make sure all parameters in this module have been all-gathered as necessary. for param in module.parameters(recurse=False): @@ -1062,13 +1085,13 @@ def _finish_param_sync_helper(self, all_gather_handle_index): self._copy_params_from_param_buffer(all_gather_handle_index) self.param_buffer_copied[all_gather_handle_index] = True - def _copy_params_from_param_buffer(self, all_gather_handle_index): """ Copy params from param_buffer to model_params. """ (gbuf_index, dtype, bucket_index) = self.all_gather_handle_index_to_bucket_index_map[ - all_gather_handle_index] + all_gather_handle_index + ] grad_buffer = self.grad_buffers[gbuf_index] if self.update_successful: @@ -1081,7 +1104,7 @@ def _copy_params_from_param_buffer(self, all_gather_handle_index): # buf_start and buf_end store position of this parameter in the full grad_buffer, # so need to adjust these indices (by subtracting out bucket_offset) since we # have independent param_bufs for each bucket. - param_buf_shard = param_buf[buf_start-bucket_offset:buf_end-bucket_offset] + param_buf_shard = param_buf[buf_start - bucket_offset : buf_end - bucket_offset] assert param.data.nelement() == param_buf_shard.nelement() param.view(-1).detach().copy_(param_buf_shard) @@ -1092,33 +1115,29 @@ def _copy_params_from_param_buffer(self, all_gather_handle_index): assert param_buf.data_ptr() == grad_buf.data_ptr() grad_buf.zero_() - def _collect_main_grad_data_for_unscaling(self): """ Note: this should be equivalent to the float-16 optimizer's method, but writtent differently, so the two should be combined. """ return [ - param.grad.data - for group in self.optimizer.param_groups - for param in group["params"] + param.grad.data for group in self.optimizer.param_groups for param in group["params"] ] - def _get_model_and_main_params_data_float16(self): """ Get aligned list of model and main params. """ model_data = [] main_data = [] - for model_group, main_group in zip(self.shard_float16_groups, - self.shard_fp32_from_float16_groups): + for model_group, main_group in zip( + self.shard_float16_groups, self.shard_fp32_from_float16_groups + ): for model_param, main_param in zip(model_group, main_group): model_data.append(model_param.data) main_data.append(main_param.data) return model_data, main_data - def _copy_model_grads_to_main_grads(self): """ Copy model grads to main grads. @@ -1130,26 +1149,20 @@ def _copy_model_grads_to_main_grads(self): # Utility method for copying group grads. def copy_group_grads(model_groups, shard_main_groups): - for model_group, shard_main_group in zip(model_groups, - shard_main_groups): - for model_param, shard_main_param in zip(model_group, - shard_main_group): + for model_group, shard_main_group in zip(model_groups, shard_main_groups): + for model_param, shard_main_param in zip(model_group, shard_main_group): param_range_map = self.get_model_param_range_map(model_param) param_range = param_range_map["param"] assert param_range.size == shard_main_param.nelement() model_grad = model_param.main_grad - shard_model_grad = model_grad.view(-1) \ - [param_range.start:param_range.end] + shard_model_grad = model_grad.view(-1)[param_range.start : param_range.end] shard_main_param.grad = shard_model_grad.float() # Copy model groups to shard groups. - copy_group_grads(self.model_float16_groups, - self.shard_fp32_from_float16_groups) - copy_group_grads(self.model_fp32_groups, - self.shard_fp32_groups) - + copy_group_grads(self.model_float16_groups, self.shard_fp32_from_float16_groups) + copy_group_grads(self.model_fp32_groups, self.shard_fp32_groups) def _copy_main_params_to_model_params(self): """ @@ -1162,10 +1175,8 @@ def _copy_main_params_to_model_params(self): # Utility method for copying group params. def copy_group_params(shard_main_groups, model_groups): - for shard_main_group, model_group in zip(shard_main_groups, - model_groups): - for shard_main_param, model_param in zip(shard_main_group, - model_group): + for shard_main_group, model_group in zip(shard_main_groups, model_groups): + for shard_main_param, model_param in zip(shard_main_group, model_group): param_range_map = self.get_model_param_range_map(model_param) world_range = param_range_map["gbuf_world_in_bucket"] @@ -1175,17 +1186,15 @@ def copy_group_params(shard_main_groups, model_groups): gbuf_index, dtype, bucket_id = self.model_param_gbuf_map[model_param] model_param_buffer = self.param_buffers[gbuf_index][bucket_id] - shard_model_param = model_param_buffer.view(-1) \ - [world_range.start:world_range.end] + shard_model_param = model_param_buffer.view(-1)[ + world_range.start : world_range.end + ] shard_model_param.data.copy_(shard_main_param) # Copy shard groups to model groups. - copy_group_params(self.shard_fp32_from_float16_groups, - self.model_float16_groups) - copy_group_params(self.shard_fp32_groups, - self.model_fp32_groups) - + copy_group_params(self.shard_fp32_from_float16_groups, self.model_float16_groups) + copy_group_params(self.shard_fp32_groups, self.model_fp32_groups) def _copy_model_params_to_main_params(self): """ @@ -1198,25 +1207,19 @@ def _copy_model_params_to_main_params(self): # Utility method for copying group params. def copy_group_params(model_groups, shard_main_groups): - for model_group, shard_main_group in zip(model_groups, - shard_main_groups): - for model_param, shard_main_param in zip(model_group, - shard_main_group): + for model_group, shard_main_group in zip(model_groups, shard_main_groups): + for model_param, shard_main_param in zip(model_group, shard_main_group): param_range_map = self.get_model_param_range_map(model_param) param_range = param_range_map["param"] assert param_range.size == shard_main_param.nelement() - shard_model_param = model_param.view(-1) \ - [param_range.start:param_range.end] + shard_model_param = model_param.view(-1)[param_range.start : param_range.end] shard_main_param.data.copy_(shard_model_param) # Copy model groups to shard groups. - copy_group_params(self.model_float16_groups, - self.shard_fp32_from_float16_groups) - copy_group_params(self.model_fp32_groups, - self.shard_fp32_groups) - + copy_group_params(self.model_float16_groups, self.shard_fp32_from_float16_groups) + copy_group_params(self.model_fp32_groups, self.shard_fp32_groups) @torch.no_grad() def step(self, args, timers): diff --git a/megatron/optimizer/grad_scaler.py b/megatron/optimizer/grad_scaler.py index f77da3fc69..4d5d302ea1 100644 --- a/megatron/optimizer/grad_scaler.py +++ b/megatron/optimizer/grad_scaler.py @@ -9,7 +9,6 @@ class MegatronGradScaler(ABC): - def __init__(self, initial_scale): """Initialize scale value with the input initial scale.""" assert initial_scale > 0.0 @@ -36,9 +35,7 @@ def load_state_dict(self, state_dict): pass - class ConstantGradScaler(MegatronGradScaler): - def update(self, found_inf): pass @@ -49,12 +46,10 @@ def load_state_dict(self, state_dict): pass - class DynamicGradScaler(MegatronGradScaler): - - def __init__(self, initial_scale, min_scale, - growth_factor, backoff_factor, - growth_interval, hysteresis): + def __init__( + self, initial_scale, min_scale, growth_factor, backoff_factor, growth_interval, hysteresis + ): """"Grad scaler with dynamic scale that gets adjusted during training.""" super(DynamicGradScaler, self).__init__(initial_scale) @@ -82,7 +77,6 @@ def __init__(self, initial_scale, min_scale, self._growth_tracker = 0 self._hysteresis_tracker = self.hysteresis - def update(self, found_inf): # If we have an inf/nan, growth tracker is set to 0 @@ -92,8 +86,7 @@ def update(self, found_inf): self._hysteresis_tracker -= 1 # Now if we are out of hysteresis count, scale down the loss. if self._hysteresis_tracker <= 0: - self._scale = torch.max(self._scale * self.backoff_factor, - self.min_scale) + self._scale = torch.max(self._scale * self.backoff_factor, self.min_scale) else: # If there is no nan/inf, increment the growth tracker. self._growth_tracker += 1 @@ -105,7 +98,6 @@ def update(self, found_inf): # and scale up the loss scale. self._scale = self._scale * self.growth_factor - def state_dict(self): state_dict = {} state_dict['scale'] = self._scale @@ -113,7 +105,6 @@ def state_dict(self): state_dict['hysteresis_tracker'] = self._hysteresis_tracker return state_dict - def load_state_dict(self, state_dict): self._scale = state_dict['scale'].cuda(torch.cuda.current_device()) self._growth_tracker = state_dict['growth_tracker'] diff --git a/megatron/optimizer/optimizer.py b/megatron/optimizer/optimizer.py index 892b1105d5..6afb888f52 100644 --- a/megatron/optimizer/optimizer.py +++ b/megatron/optimizer/optimizer.py @@ -41,23 +41,21 @@ def _multi_tensor_copy_this_to_that(this, that, overflow_buf=None): if overflow_buf: overflow_buf.fill_(0) # Scaling with factor `1.0` is equivalent to copy. - multi_tensor_applier(amp_C.multi_tensor_scale, - overflow_buf, - [this, that], - 1.0) + multi_tensor_applier(amp_C.multi_tensor_scale, overflow_buf, [this, that], 1.0) else: for this_, that_ in zip(this, that): that_.copy_(this_) - class MegatronOptimizer(ABC): - - - def __init__(self, optimizer, clip_grad, - log_num_zeros_in_grad, - check_for_nan_in_grad, - params_have_main_grad): + def __init__( + self, + optimizer, + clip_grad, + log_num_zeros_in_grad, + check_for_nan_in_grad, + params_have_main_grad, + ): """Input optimizer is the base optimizer for example Adam.""" self.optimizer = optimizer @@ -68,7 +66,6 @@ def __init__(self, optimizer, clip_grad, self.check_for_nan_in_grad = check_for_nan_in_grad self.params_have_main_grad = params_have_main_grad - def get_parameters(self): params = [] for param_group in self.optimizer.param_groups: @@ -76,7 +73,6 @@ def get_parameters(self): params.append(param) return params - def get_main_grads_for_grad_norm(self): # Filter parameters based on: @@ -95,43 +91,38 @@ def get_main_grads_for_grad_norm(self): return grads_for_norm - def get_model_parallel_group(self): """Default returned here, but the distributed optimizer overrides this.""" return mpu.get_model_parallel_group() - def clip_grad_norm(self, clip_grad, check_for_nan_in_grad): params = self.get_parameters() grads_for_norm = self.get_main_grads_for_grad_norm() return clip_grad_norm_fp32( - params, grads_for_norm, clip_grad, + params, + grads_for_norm, + clip_grad, check_for_nan_in_grad, - model_parallel_group=self.get_model_parallel_group()) - + model_parallel_group=self.get_model_parallel_group(), + ) def count_zeros(self): params = self.get_parameters() - return count_zeros_fp32(params, - model_parallel_group=self.get_model_parallel_group()) - + return count_zeros_fp32(params, model_parallel_group=self.get_model_parallel_group()) @abstractmethod def zero_grad(self, set_to_none=True): pass - @abstractmethod def get_loss_scale(self): """The output should be a cuda tensor of size 1.""" pass - def scale_loss(self, loss): """Simple scaling.""" return self.get_loss_scale() * loss - @abstractmethod def reload_model_params(self): """Refreshes any internal state from the current model parameters. @@ -141,17 +132,14 @@ def reload_model_params(self): with main parameters, the main parameters need to also be updated.""" pass - @abstractmethod def state_dict(self): pass - @abstractmethod def load_state_dict(self, state_dict): pass - # Promote state so it can be retrieved or set via # "optimizer_instance.state" def _get_state(self): @@ -162,7 +150,6 @@ def _set_state(self, value): state = property(_get_state, _set_state) - # Promote param_groups so it can be retrieved or set via # "optimizer_instance.param_groups" # (for example, to adjust the learning rate) @@ -174,13 +161,11 @@ def _set_param_groups(self, value): param_groups = property(_get_param_groups, _set_param_groups) - @abstractmethod def step(self, args, timers): pass - class MixedPrecisionOptimizer(MegatronOptimizer): """Base class for both the float-16 and the distributed optimizer. @@ -209,13 +194,26 @@ class MixedPrecisionOptimizer(MegatronOptimizer): always require a grad scaler. """ - def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, - fp16, bf16, params_dtype, grad_scaler): + def __init__( + self, + optimizer, + clip_grad, + log_num_zeros_in_grad, + check_for_nan_in_grad, + params_have_main_grad, + fp16, + bf16, + params_dtype, + grad_scaler, + ): super().__init__( - optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad) + optimizer, + clip_grad, + log_num_zeros_in_grad, + check_for_nan_in_grad, + params_have_main_grad, + ) self.fp16 = fp16 self.bf16 = bf16 @@ -245,17 +243,14 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, if self.grad_scaler is None: self._scale_one = torch.tensor([1.0], dtype=torch.float, device='cuda') - def get_loss_scale(self): if self.grad_scaler is None: return self._scale_one return self.grad_scaler.scale - def reload_model_params(self): self._copy_model_params_to_main_params() - def _unscale_main_grads_and_check_for_nan(self): # Collect main grads. @@ -266,25 +261,24 @@ def _unscale_main_grads_and_check_for_nan(self): # Unscale and set found inf/nan torch._amp_foreach_non_finite_check_and_unscale_( - main_grads, self.found_inf, self.grad_scaler.inv_scale) + main_grads, self.found_inf, self.grad_scaler.inv_scale + ) # Update across all model parallel instances. - torch.distributed.all_reduce(self.found_inf, - op=torch.distributed.ReduceOp.MAX, - group=self.get_model_parallel_group()) + torch.distributed.all_reduce( + self.found_inf, op=torch.distributed.ReduceOp.MAX, group=self.get_model_parallel_group() + ) # Check for nan. - found_inf_flag = (self.found_inf.item() > 0) + found_inf_flag = self.found_inf.item() > 0 return found_inf_flag - @torch.no_grad() def step(self, args, timers): # Copy gradients from model params to main params. - timers('optimizer-copy-to-main-grad', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-copy-to-main-grad', log_level=1).start(barrier=args.barrier_with_L1_time) self._copy_model_grads_to_main_grads() timers('optimizer-copy-to-main-grad').stop() @@ -294,7 +288,8 @@ def step(self, args, timers): # Unscale and check for inf/nan. timers('optimizer-unscale-and-check-inf', log_level=1).start( - barrier=args.barrier_with_L1_time) + barrier=args.barrier_with_L1_time + ) found_inf_flag = self._unscale_main_grads_and_check_for_nan() timers('optimizer-unscale-and-check-inf').stop() @@ -307,30 +302,26 @@ def step(self, args, timers): return False, None, None # Clip the main gradients. - timers('optimizer-clip-main-grad', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-clip-main-grad', log_level=1).start(barrier=args.barrier_with_L1_time) grad_norm = None if self.clip_grad > 0.0: - grad_norm = self.clip_grad_norm(self.clip_grad, - self.check_for_nan_in_grad) + grad_norm = self.clip_grad_norm(self.clip_grad, self.check_for_nan_in_grad) timers('optimizer-clip-main-grad').stop() # Count the zeros in the grads. - timers('optimizer-count-zeros', log_level=1).start( - barrier=args.barrier_with_L1_time) - num_zeros_in_grad = self.count_zeros() if \ - self.log_num_zeros_in_grad else None + timers('optimizer-count-zeros', log_level=1).start(barrier=args.barrier_with_L1_time) + num_zeros_in_grad = self.count_zeros() if self.log_num_zeros_in_grad else None timers('optimizer-count-zeros').stop() # Step the optimizer. - timers('optimizer-inner-step', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-inner-step', log_level=1).start(barrier=args.barrier_with_L1_time) self.optimizer.step() timers('optimizer-inner-step').stop() # Update params from main params. timers('optimizer-copy-main-to-model-params', log_level=1).start( - barrier=args.barrier_with_L1_time) + barrier=args.barrier_with_L1_time + ) self._copy_main_params_to_model_params() timers('optimizer-copy-main-to-model-params').stop() @@ -365,14 +356,30 @@ class Float16OptimizerWithFloat16Params(MixedPrecisionOptimizer): always require a grad scaler. """ - def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, fp16, bf16, - params_dtype, grad_scaler): + def __init__( + self, + optimizer, + clip_grad, + log_num_zeros_in_grad, + check_for_nan_in_grad, + params_have_main_grad, + fp16, + bf16, + params_dtype, + grad_scaler, + ): super().__init__( - optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, - fp16, bf16, params_dtype, grad_scaler) + optimizer, + clip_grad, + log_num_zeros_in_grad, + check_for_nan_in_grad, + params_have_main_grad, + fp16, + bf16, + params_dtype, + grad_scaler, + ) # ====================== # main parameter stuff @@ -396,14 +403,12 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, if param.requires_grad: # float16 params: - if param.type() in ['torch.cuda.HalfTensor', - 'torch.cuda.BFloat16Tensor']: + if param.type() in ['torch.cuda.HalfTensor', 'torch.cuda.BFloat16Tensor']: float16_params_this_group.append(param) # Create a copy main_param = param.detach().clone().float() # Copy tensor model parallel attributes. - tensor_parallel.copy_tensor_model_parallel_attributes(main_param, - param) + tensor_parallel.copy_tensor_model_parallel_attributes(main_param, param) if hasattr(param, 'shared'): main_param.shared = param.shared # Replace the optimizer params with the new fp32 copy. @@ -412,26 +417,25 @@ def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, fp32_from_float16_params_this_group.append(main_param) # Reset existing state dict key to the new main param. if param in self.optimizer.state: - self.optimizer.state[main_param] \ - = self.optimizer.state.pop(param) + self.optimizer.state[main_param] = self.optimizer.state.pop(param) # fp32 params. elif param.type() == 'torch.cuda.FloatTensor': fp32_params_this_group.append(param) param_group['params'][i] = param else: - raise TypeError('Wrapped parameters must be one of ' - 'torch.cuda.FloatTensor, ' - 'torch.cuda.HalfTensor, or ' - 'torch.cuda.BFloat16Tensor. ' - 'Received {}'.format(param.type())) + raise TypeError( + 'Wrapped parameters must be one of ' + 'torch.cuda.FloatTensor, ' + 'torch.cuda.HalfTensor, or ' + 'torch.cuda.BFloat16Tensor. ' + 'Received {}'.format(param.type()) + ) self.float16_groups.append(float16_params_this_group) - self.fp32_from_float16_groups.append( - fp32_from_float16_params_this_group) + self.fp32_from_float16_groups.append(fp32_from_float16_params_this_group) self.fp32_from_fp32_groups.append(fp32_params_this_group) - def zero_grad(self, set_to_none=True): """We only need to zero the model related parameters, i.e., float16_groups & fp32_from_fp32_groups. We additionally zero @@ -445,7 +449,6 @@ def zero_grad(self, set_to_none=True): for group in self.fp32_from_fp32_groups: _zero_grad_group_helper(group, set_to_none) - def _collect_main_grad_data_for_unscaling(self): main_grads = [] @@ -461,25 +464,21 @@ def _collect_main_grad_data_for_unscaling(self): for main_param in main_group: if main_param.grad is not None: main_grads.append(main_param.grad.data) - - return main_grads + return main_grads def _get_model_and_main_params_data_float16(self): model_data = [] main_data = [] - for model_group, main_group in zip(self.float16_groups, - self.fp32_from_float16_groups): + for model_group, main_group in zip(self.float16_groups, self.fp32_from_float16_groups): for model_param, main_param in zip(model_group, main_group): model_data.append(model_param.data) main_data.append(main_param.data) return model_data, main_data - def _copy_model_grads_to_main_grads(self): # This only needs to be done for the float16 group. - for model_group, main_group in zip(self.float16_groups, - self.fp32_from_float16_groups): + for model_group, main_group in zip(self.float16_groups, self.fp32_from_float16_groups): for model_param, main_param in zip(model_group, main_group): if self.params_have_main_grad and hasattr(model_param, 'main_grad'): main_param.grad = model_param.main_grad.float() @@ -498,20 +497,19 @@ def _copy_model_grads_to_main_grads(self): for model_param in model_group: model_param.grad = model_param.main_grad - def _copy_main_params_to_model_params(self): # Only needed for the float16 params. model_data, main_data = self._get_model_and_main_params_data_float16() - _multi_tensor_copy_this_to_that(this=main_data, that=model_data, - overflow_buf=self._dummy_overflow_buf) - + _multi_tensor_copy_this_to_that( + this=main_data, that=model_data, overflow_buf=self._dummy_overflow_buf + ) def _copy_model_params_to_main_params(self): # Only needed for the float16 params. model_data, main_data = self._get_model_and_main_params_data_float16() - _multi_tensor_copy_this_to_that(this=model_data, that=main_data, - overflow_buf=self._dummy_overflow_buf) - + _multi_tensor_copy_this_to_that( + this=model_data, that=main_data, overflow_buf=self._dummy_overflow_buf + ) def state_dict(self): state_dict = {} @@ -521,73 +519,77 @@ def state_dict(self): state_dict['fp32_from_fp16_params'] = self.fp32_from_float16_groups return state_dict - def load_state_dict(self, state_dict): # Optimizer. optimizer_key = 'optimizer' if optimizer_key not in state_dict: optimizer_key = 'optimizer_state_dict' - print_rank_0('***WARNING*** loading optimizer from ' - 'an old checkpoint ...') + print_rank_0('***WARNING*** loading optimizer from ' 'an old checkpoint ...') self.optimizer.load_state_dict(state_dict[optimizer_key]) # Grad scaler. if 'grad_scaler' not in state_dict: if self.fp16: - print_rank_0('***WARNING*** found an old checkpoint, will not ' - 'load grad scaler ...') + print_rank_0( + '***WARNING*** found an old checkpoint, will not ' 'load grad scaler ...' + ) else: if self.grad_scaler: self.grad_scaler.load_state_dict(state_dict['grad_scaler']) else: - print_rank_0('***WARNING*** fould the grad scaler in the ' - 'checkpoint but it is None in the class. ' - 'Skipping loading grad scaler ...') + print_rank_0( + '***WARNING*** fould the grad scaler in the ' + 'checkpoint but it is None in the class. ' + 'Skipping loading grad scaler ...' + ) # Copy data for the main params. fp32_from_float16_params_key = 'fp32_from_fp16_params' if fp32_from_float16_params_key not in state_dict: fp32_from_float16_params_key = 'fp32_from_fp16' for current_group, saved_group in zip( - self.fp32_from_float16_groups, - state_dict[fp32_from_float16_params_key]): + self.fp32_from_float16_groups, state_dict[fp32_from_float16_params_key] + ): for current_param, saved_param in zip(current_group, saved_group): current_param.data.copy_(saved_param.data) class FP32Optimizer(MegatronOptimizer): - - def __init__(self, optimizer, clip_grad, - log_num_zeros_in_grad, - check_for_nan_in_grad, - params_have_main_grad): + def __init__( + self, + optimizer, + clip_grad, + log_num_zeros_in_grad, + check_for_nan_in_grad, + params_have_main_grad, + ): super(FP32Optimizer, self).__init__( - optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad) + optimizer, + clip_grad, + log_num_zeros_in_grad, + check_for_nan_in_grad, + params_have_main_grad, + ) self._scale = torch.tensor([1.0], dtype=torch.float, device='cuda') - def zero_grad(self, set_to_none=True): """Copied from torch.optim.optimizer""" for group in self.optimizer.param_groups: _zero_grad_group_helper(group['params'], set_to_none) - def get_loss_scale(self): """FP32 optimizer does not do any scaling.""" return self._scale - @torch.no_grad() def step(self, args, timers): """Clip gradients (if needed) and step the base optimizer. Always return successful since there is no overflow.""" # Copy main_grads to grads. - timers('optimizer-copy-to-main-grad', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-copy-to-main-grad', log_level=1).start(barrier=args.barrier_with_L1_time) if self.params_have_main_grad: for param_group in self.optimizer.param_groups: for param in param_group['params']: @@ -596,39 +598,31 @@ def step(self, args, timers): timers('optimizer-copy-to-main-grad').stop() # Clip gradients. - timers('optimizer-clip-main-grad', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-clip-main-grad', log_level=1).start(barrier=args.barrier_with_L1_time) grad_norm = None if self.clip_grad > 0.0: - grad_norm = self.clip_grad_norm(self.clip_grad, - self.check_for_nan_in_grad) + grad_norm = self.clip_grad_norm(self.clip_grad, self.check_for_nan_in_grad) timers('optimizer-clip-main-grad').stop() # count the zeros in the grads - timers('optimizer-count-zeros', log_level=1).start( - barrier=args.barrier_with_L1_time) - num_zeros_in_grad = self.count_zeros() if \ - self.log_num_zeros_in_grad else None + timers('optimizer-count-zeros', log_level=1).start(barrier=args.barrier_with_L1_time) + num_zeros_in_grad = self.count_zeros() if self.log_num_zeros_in_grad else None timers('optimizer-count-zeros').stop() # Update parameters. - timers('optimizer-inner-step', log_level=1).start( - barrier=args.barrier_with_L1_time) + timers('optimizer-inner-step', log_level=1).start(barrier=args.barrier_with_L1_time) self.optimizer.step() timers('optimizer-inner-step').stop() # No overflow for FP32 optimizer. return True, grad_norm, num_zeros_in_grad - def reload_model_params(self): pass - def state_dict(self): return self.optimizer.state_dict() - def load_state_dict(self, state_dict): self.optimizer.load_state_dict(state_dict) @@ -652,25 +646,25 @@ def __init__(self, chained_optimizers): self.param_groups = [] for optimizer in self.chained_optimizers: self.param_groups += optimizer.param_groups - + def zero_grad(self, set_to_none=True): for optimizer in self.chained_optimizers: optimizer.zero_grad(set_to_none) def get_loss_scale(self): return self.chained_optimizers[0].get_loss_scale() - + def reload_model_params(self): for optimizer in self.chained_optimizers: optimizer.reload_model_params() def state_dict(self): return [optimizer.state_dict() for optimizer in self.chained_optimizers] - + def load_state_dict(self, state_dict): for optimizer, state in zip(self.chained_optimizers, state_dict): optimizer.load_state_dict(state) - + def step(self, args, timers): """ChainedOptimizer will step all optimizers one by one. @@ -684,9 +678,9 @@ def step(self, args, timers): for optimizer in self.chained_optimizers: _update_successful, _grad_norm, _num_zeros_in_grad = optimizer.step(args, timers) update_successful &= _update_successful - grad_norms += [_grad_norm if _grad_norm else 0.] + grad_norms += [_grad_norm if _grad_norm else 0.0] num_zeros_in_grad += _num_zeros_in_grad if _num_zeros_in_grad else 0 - grad_norm = math.sqrt(sum([x**2 for x in grad_norms])) + grad_norm = math.sqrt(sum([x ** 2 for x in grad_norms])) return update_successful, grad_norm, num_zeros_in_grad @@ -721,8 +715,9 @@ def load_parameter_state(self, filename): else: states = [None] * num_of_optimizers - assert len(states) == num_of_optimizers, "Number of optimizers in "\ - "checkpoint does not match number of optimizers in model." + assert len(states) == num_of_optimizers, ( + "Number of optimizers in " "checkpoint does not match number of optimizers in model." + ) for optimizer, state in zip(self.chained_optimizers, states): if hasattr(optimizer, 'load_parameter_state_from_state_dict'): diff --git a/megatron/optimizer/utils.py b/megatron/optimizer/utils.py index f4b7cbd634..6376f45de8 100644 --- a/megatron/optimizer/utils.py +++ b/megatron/optimizer/utils.py @@ -13,7 +13,7 @@ def shard_buffer(buffer): data_parallel_world_size = mpu.get_data_parallel_world_size(with_context_parallel=True) assert buffer.numel() % data_parallel_world_size == 0 shard_size = buffer.numel() // data_parallel_world_size - sharded_buffer = [buffer[(r*shard_size):((r+1)*shard_size)] - for r in range(data_parallel_world_size)] + sharded_buffer = [ + buffer[(r * shard_size) : ((r + 1) * shard_size)] for r in range(data_parallel_world_size) + ] return sharded_buffer - From 17545b327035666caf29416b4eedf361e237186b Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Thu, 18 Jan 2024 17:13:57 +0800 Subject: [PATCH 136/296] Remove hardcoded data cache path --- megatron/core/datasets/blended_megatron_dataset_builder.py | 1 - 1 file changed, 1 deletion(-) diff --git a/megatron/core/datasets/blended_megatron_dataset_builder.py b/megatron/core/datasets/blended_megatron_dataset_builder.py index 39f6d23630..c5c509ea7c 100644 --- a/megatron/core/datasets/blended_megatron_dataset_builder.py +++ b/megatron/core/datasets/blended_megatron_dataset_builder.py @@ -38,7 +38,6 @@ def __init__( self.cls = cls self.sizes = sizes self.config = config - self.config.path_to_cache = '/lustre/fsw/portfolios/hwinf/users/zshao/onelogger-test/Megatron-LM/data_cache' def build(self) -> List[Optional[Union[BlendedDataset, MegatronDataset]]]: """Build all dataset splits according to the provided blend(s) From 6c0e7a9e26f158e6b18940afc80372a2fa6eac90 Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Thu, 18 Jan 2024 22:49:42 +0800 Subject: [PATCH 137/296] Change --enable-onelogger to --enable-one-logger for consistent naming --- megatron/arguments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 26fed39c49..9ca35611ee 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -735,7 +735,7 @@ def _add_logging_args(parser): help='The wandb experiment name.') group.add_argument('--wandb-save-dir', type=str, default='', help='Path to save the wandb results locally.') - group.add_argument('--enable-onelogger', action='store_true', + group.add_argument('--enable-one-logger', action='store_true', help='If set, use one_logger to track e2e metrics') return parser From bf9c0a10d3fb5bf652554e866166f62455133903 Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Thu, 18 Jan 2024 23:08:20 +0800 Subject: [PATCH 138/296] Add ImportError catch for one_logger --- megatron/global_vars.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/megatron/global_vars.py b/megatron/global_vars.py index 664092c10b..50d8e75b94 100644 --- a/megatron/global_vars.py +++ b/megatron/global_vars.py @@ -198,9 +198,15 @@ def _set_one_logger(args): _ensure_var_is_not_initialized(_GLOBAL_ONE_LOGGER, 'one logger') if args.enable_onelogger and args.rank == (args.world_size - 1): - from one_logger.core import OneLogger - one_logger = OneLogger() - _GLOBAL_ONE_LOGGER = one_logger + try: + from one_logger.core import OneLogger + one_logger = OneLogger() + _GLOBAL_ONE_LOGGER = one_logger + except BaseException: + print('WARNING: one_logger package is required to enable e2e metrics ' + 'tracking. Try pip install ' + '--index-url=https://sc-hw-artf.nvidia.com/api/pypi/hwinf-ml-pypi/simple' + ' one_logger to install it') def _set_adlr_autoresume(args): From 85c403437f34366b8d220db65793824b6790adaa Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Thu, 18 Jan 2024 23:15:18 +0800 Subject: [PATCH 139/296] Add message on how to install one_logger --- megatron/arguments.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 9ca35611ee..0f7f47365e 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -736,7 +736,11 @@ def _add_logging_args(parser): group.add_argument('--wandb-save-dir', type=str, default='', help='Path to save the wandb results locally.') group.add_argument('--enable-one-logger', action='store_true', - help='If set, use one_logger to track e2e metrics') + help='If set, use one_logger to track E2E metrics' + 'For installation, please try command: `pip install ' + '--index-url=https://sc-hw-artf.nvidia.com/api/pypi/hwinf-ml-pypi/simple' + ' one_logger` or go to https://gitlab-master.nvidia.com/hwinf-dcm/onelogger ' + 'for more details') return parser From 54de98ddc97ec05cff81e61983708695dda6fd23 Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Thu, 18 Jan 2024 23:17:04 +0800 Subject: [PATCH 140/296] Better code formatting --- megatron/training.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/megatron/training.py b/megatron/training.py index d5d6fa8edd..a34c0efcab 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -803,9 +803,9 @@ def track_e2e_metrics(): train_duration = timers('interval-time').active_time() # overall_elapsed train_samples = args.consumed_train_samples - train_samples_start train_iterations = iteration - iteration_start - train_iterations_time_msecs_avg = train_duration*1000.0 / train_iterations + train_iterations_time_msecs_avg = (train_duration * 1000.0) / train_iterations if eval_iterations: - validation_iterations_time_msecs_avg = eval_duration*1000.0 / eval_iterations + validation_iterations_time_msecs_avg = (eval_duration * 1000.0) / eval_iterations else: validation_iterations_time_msecs_avg = None From 3c44fb9f611db452e1a0c71356272e51be650b61 Mon Sep 17 00:00:00 2001 From: jiemingz Date: Wed, 10 Jan 2024 13:51:30 -0800 Subject: [PATCH 141/296] add is_first_microbatch for TE Signed-off-by: jiemingz --- megatron/core/models/gpt/gpt_model.py | 5 ++++ megatron/core/pipeline_parallel/schedules.py | 26 +++++++++++++++++++ .../custom_layers/transformer_engine.py | 10 ++++--- megatron/core/transformer/module.py | 4 +++ 4 files changed, 41 insertions(+), 4 deletions(-) diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index 0f3348ad3b..e4f7c122ff 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -239,3 +239,8 @@ def sharded_state_dict(self, prefix: str = '') -> dict: sharded_state_dict[output_layer_key] = sharded_output_layer_tensor return sharded_state_dict + + def set_is_first_microbatch(self): + for m in self.modules(): + if hasattr(m, "is_first_microbatch"): + m.is_first_microbatch = True diff --git a/megatron/core/pipeline_parallel/schedules.py b/megatron/core/pipeline_parallel/schedules.py index 05a70ec700..2d8fb850d0 100644 --- a/megatron/core/pipeline_parallel/schedules.py +++ b/megatron/core/pipeline_parallel/schedules.py @@ -156,6 +156,7 @@ def forward_step( config, collect_non_loss_data=False, checkpoint_activations_microbatch=None, + is_first_microbatch=False, ): """Forward step for passed-in model. @@ -166,6 +167,9 @@ def forward_step( if config.timers is not None: config.timers('forward-compute', log_level=2).start() + if is_first_microbatch and hasattr(model, 'set_is_first_microbatch'): + model.set_is_first_microbatch() + unwrap_output_tensor = False if not isinstance(input_tensor, list): input_tensor = [input_tensor] @@ -280,6 +284,13 @@ def backward_step(input_tensor, output_tensor, output_tensor_grad, model_type, c return input_tensor_grad +def check_first_val_step(first_val_step, forward_only, cond): + if (first_val_step is not None) and forward_only: + return first_val_step and cond + else: + return cond + + def forward_backward_no_pipelining( *, forward_step_func, @@ -291,6 +302,7 @@ def forward_backward_no_pipelining( decoder_seq_length: int = None, # unused forward_only: bool = False, collect_non_loss_data: bool = False, + first_val_step: bool = None, ): """Run forward and backward passes with no pipeline parallelism (no inter-stage communication). @@ -333,6 +345,7 @@ def forward_backward_no_pipelining( forward_data_store, config, collect_non_loss_data, + is_first_microbatch=check_first_val_step(first_val_step, forward_only, i == 0), ) if not forward_only: backward_step(input_tensor, output_tensor, output_tensor_grad, model_type, config) @@ -348,6 +361,9 @@ def forward_backward_no_pipelining( forward_data_store, config, collect_non_loss_data, + is_first_microbatch=check_first_val_step( + first_val_step, forward_only, num_microbatches == 1 + ), ) if not forward_only: @@ -375,6 +391,7 @@ def forward_backward_pipelining_with_interleaving( decoder_seq_length: int = None, forward_only: bool = False, collect_non_loss_data: bool = False, + first_val_step: bool = None, ): """Run interleaved 1F1B schedule (model split into model chunks), with communication between pipeline stages as needed. @@ -560,6 +577,7 @@ def forward_step_helper(microbatch_id, checkpoint_activations_microbatch): if len(input_tensors[model_chunk_id]) == len(output_tensors[model_chunk_id]): input_tensors[model_chunk_id].append(None) input_tensor = input_tensors[model_chunk_id][-1] + output_tensor = forward_step( forward_step_func, data_iterator[model_chunk_id], @@ -570,6 +588,9 @@ def forward_step_helper(microbatch_id, checkpoint_activations_microbatch): config, collect_non_loss_data, checkpoint_activations_microbatch, + check_first_val_step( + first_val_step, forward_only, is_first_microbatch_for_model_chunk(microbatch_id), + ), ) output_tensors[model_chunk_id].append(output_tensor) @@ -1060,6 +1081,7 @@ def forward_backward_pipelining_without_interleaving( decoder_seq_length: int = None, forward_only: bool = False, collect_non_loss_data: bool = False, + first_val_step: bool = None, ): """Run non-interleaved 1F1B schedule, with communication between pipeline stages. @@ -1179,6 +1201,7 @@ def enable_grad_sync(): config, collect_non_loss_data, checkpoint_activations_microbatch, + check_first_val_step(first_val_step, forward_only, i == 0), ) send_forward(output_tensor, send_tensor_shapes, config) @@ -1215,6 +1238,9 @@ def enable_grad_sync(): config, collect_non_loss_data, checkpoint_activations_microbatch, + check_first_val_step( + first_val_step, forward_only, (i == 0) and (num_warmup_microbatches == 0) + ), ) if forward_only: diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index 69ff08652d..d31709afa6 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -98,7 +98,7 @@ def __init__( # ourselves. This way our forward always returns two values # and we don't have to deal with the zero length Tensor. self.te_return_bias = skip_bias_add and bias - + self.is_first_microbatch = True if skip_weight_param_allocation: raise ValueError( 'Transformer Engine linear layers do not support skip_weight_param_allocation' @@ -133,7 +133,8 @@ def __init__( ) def forward(self, x): - out = super().forward(x) + out = super().forward(x, self.is_first_microbatch) + self.is_first_microbatch = False # TE only returns a tuple when return_bias is True, otherwise # it returns a single Tensor, we always want to return two @@ -182,7 +183,7 @@ def __init__( # ourselves. This way our forward always returns two values # and we don't have to deal with the zero length Tensor. self.te_return_bias = skip_bias_add and bias - + self.is_first_microbatch = True extra_kwargs = _get_extra_te_kwargs(config) # Only Transformer-Engine version >= 0.11.0 supports `RMSNorm` @@ -224,7 +225,8 @@ def __init__( ) def forward(self, x): - out = super().forward(x) + out = super().forward(x, self.is_first_microbatch) + self.is_first_microbatch = False # TE only returns a tuple when return_bias is True, otherwise # it returns a single Tensor, we always want to return two diff --git a/megatron/core/transformer/module.py b/megatron/core/transformer/module.py index d20074aa07..b3d8f73fdb 100644 --- a/megatron/core/transformer/module.py +++ b/megatron/core/transformer/module.py @@ -155,3 +155,7 @@ def sharded_state_dict(self, prefix=''): def load_state_dict(self, state_dict, strict=True): self.module.load_state_dict(state_dict, strict=strict) + + def set_is_first_microbatch(self): + if hasattr(self.module, 'set_is_first_microbatch'): + self.module.set_is_first_microbatch() From 27879a7dea4a82101ff13820a39218ff068396cd Mon Sep 17 00:00:00 2001 From: jiemingz Date: Wed, 10 Jan 2024 15:30:30 -0800 Subject: [PATCH 142/296] add arg name Signed-off-by: jiemingz --- megatron/core/transformer/custom_layers/transformer_engine.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index d31709afa6..31294c7ff4 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -133,7 +133,7 @@ def __init__( ) def forward(self, x): - out = super().forward(x, self.is_first_microbatch) + out = super().forward(x, is_first_microbatch=self.is_first_microbatch) self.is_first_microbatch = False # TE only returns a tuple when return_bias is True, otherwise @@ -225,7 +225,7 @@ def __init__( ) def forward(self, x): - out = super().forward(x, self.is_first_microbatch) + out = super().forward(x, is_first_microbatch=self.is_first_microbatch) self.is_first_microbatch = False # TE only returns a tuple when return_bias is True, otherwise From 7dc2ee8f628be0e5fb1d6556a0012892d08fd24e Mon Sep 17 00:00:00 2001 From: jiemingz Date: Fri, 12 Jan 2024 15:31:39 -0800 Subject: [PATCH 143/296] add docstring and move set_is_first_microbatch Signed-off-by: jiemingz --- megatron/core/models/gpt/gpt_model.py | 5 ----- megatron/core/pipeline_parallel/schedules.py | 4 ++++ megatron/core/transformer/module.py | 12 ++++++++---- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index e4f7c122ff..0f3348ad3b 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -239,8 +239,3 @@ def sharded_state_dict(self, prefix: str = '') -> dict: sharded_state_dict[output_layer_key] = sharded_output_layer_tensor return sharded_state_dict - - def set_is_first_microbatch(self): - for m in self.modules(): - if hasattr(m, "is_first_microbatch"): - m.is_first_microbatch = True diff --git a/megatron/core/pipeline_parallel/schedules.py b/megatron/core/pipeline_parallel/schedules.py index 2d8fb850d0..1a45a6036f 100644 --- a/megatron/core/pipeline_parallel/schedules.py +++ b/megatron/core/pipeline_parallel/schedules.py @@ -88,6 +88,9 @@ def forward_step(data_iterator, model): collect_non_loss_data (optional, bool, default=False): TODO + first_val_step (bool, optional): Is the first step of the validation phase. Used by + Transformer Engine modules to only update their fp8 weights only on the first validation step. + """ pipeline_model_parallel_size = parallel_state.get_pipeline_model_parallel_world_size() if pipeline_model_parallel_size > 1: @@ -158,6 +161,7 @@ def forward_step( checkpoint_activations_microbatch=None, is_first_microbatch=False, ): + """Forward step for passed-in model. If first stage, input tensor is obtained from data_iterator, otherwise diff --git a/megatron/core/transformer/module.py b/megatron/core/transformer/module.py index b3d8f73fdb..b123af504e 100644 --- a/megatron/core/transformer/module.py +++ b/megatron/core/transformer/module.py @@ -59,6 +59,14 @@ def sharded_state_dict(self, prefix: str = ''): """ return self.state_dict(prefix=prefix, keep_vars=True) + def set_is_first_microbatch(self): + """Sets the is_first_microbatch flag if it exists. When this flag is set, TE modules will update their fp8 parameter cache. + + """ + for m in self.modules(): + if hasattr(m, "is_first_microbatch"): + m.is_first_microbatch = True + def conversion_helper(val, conversion): if not isinstance(val, (tuple, list)): @@ -155,7 +163,3 @@ def sharded_state_dict(self, prefix=''): def load_state_dict(self, state_dict, strict=True): self.module.load_state_dict(state_dict, strict=strict) - - def set_is_first_microbatch(self): - if hasattr(self.module, 'set_is_first_microbatch'): - self.module.set_is_first_microbatch() From 3e19c761321934ce32a67151f6984fe65c58dbbb Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Thu, 18 Jan 2024 14:23:41 -0800 Subject: [PATCH 144/296] Fixed formatting Signed-off-by: Selvaraj Anandaraj --- megatron/core/tensor_parallel/layers.py | 14 ++++--- .../custom_layers/transformer_engine.py | 11 ++--- .../core/transformer/transformer_block.py | 41 ++++++++++++------- .../core/transformer/transformer_config.py | 14 +++++-- 4 files changed, 51 insertions(+), 29 deletions(-) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 2bd50241eb..64e066f55c 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -724,9 +724,10 @@ def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None): if self.config.cpu_offloading_context is not None: if self.config.cpu_offloading_context.inside_context == True: - assert self.config.cpu_offloading == False, \ - "CPU Offloading cannot be enabled while using non-TE modules" - + assert ( + self.config.cpu_offloading == False + ), "CPU Offloading cannot be enabled while using non-TE modules" + bias = self.bias if not self.skip_bias_add else None if ( @@ -894,11 +895,12 @@ def forward(self, input_): - output - bias """ - + if self.config.cpu_offloading_context is not None: if self.config.cpu_offloading_context.inside_context == True: - assert self.config.cpu_offloading == False, \ - "CPU Offloading cannot be enabled while using non-TE modules" + assert ( + self.config.cpu_offloading == False + ), "CPU Offloading cannot be enabled while using non-TE modules" # Set up backprop all-reduce. if self.input_is_parallel: diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index a144d9d93f..1ee3a7e242 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -440,11 +440,12 @@ def forward( try: - from transformer_engine.pytorch.attention import _SplitAlongDim - from transformer_engine.pytorch.cpu_offload import get_cpu_offload_context - SplitAlongDim = _SplitAlongDim.apply + from transformer_engine.pytorch.attention import _SplitAlongDim + from transformer_engine.pytorch.cpu_offload import get_cpu_offload_context + + SplitAlongDim = _SplitAlongDim.apply except ImportError: - SplitAlongDim = None - get_cpu_offload_context = None + SplitAlongDim = None + get_cpu_offload_context = None diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index 4efcaaeaa0..218b6764d8 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -10,14 +10,16 @@ from megatron.core import InferenceParams, parallel_state, tensor_parallel from megatron.core.fusions.fused_layer_norm import FusedLayerNorm -from megatron.core.transformer.custom_layers.transformer_engine import TENorm +from megatron.core.transformer.custom_layers.transformer_engine import ( + TENorm, + get_cpu_offload_context, +) from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.transformer.transformer_layer import TransformerLayer from megatron.core.utils import make_sharded_tensor_for_checkpoint, make_viewless_tensor -from megatron.core.transformer.custom_layers.transformer_engine import get_cpu_offload_context def get_num_layers_to_build(config: TransformerConfig) -> int: @@ -104,16 +106,23 @@ def __init__( self.checkpoint_core_attention = self.config.recompute_granularity == 'selective' if get_cpu_offload_context is not None: - self.offload_context, self.group_prefetch_offload_commit_async = get_cpu_offload_context( - self.config.cpu_offloading, - self.config.cpu_offloading_num_layers, - self.config.cpu_offloading_activations, - self.config.cpu_offloading_weights - ) - self.config.cpu_offloading_context = self.offload_context if self.config.cpu_offloading else None + ( + self.offload_context, + self.group_prefetch_offload_commit_async, + ) = get_cpu_offload_context( + self.config.cpu_offloading, + self.config.cpu_offloading_num_layers, + self.config.cpu_offloading_activations, + self.config.cpu_offloading_weights, + ) + self.config.cpu_offloading_context = ( + self.offload_context if self.config.cpu_offloading else None + ) else: - assert self.config.cpu_offloading == False, "CPU Offloading is enabled when TE is not present" - + assert ( + self.config.cpu_offloading == False + ), "CPU Offloading is enabled when TE is not present" + self.offload_context, self.group_prefetch_offload_commit_async = nullcontext(), None self.config.cpu_offloading_context = None @@ -333,9 +342,13 @@ def forward( rotary_pos_emb=rotary_pos_emb, inference_params=inference_params, ) - - if torch.is_grad_enabled() and self.config.cpu_offloading and self.group_prefetch_offload_commit_async is not None: - hidden_states = self.group_prefetch_offload_commit_async(hidden_states) + + if ( + torch.is_grad_enabled() + and self.config.cpu_offloading + and self.group_prefetch_offload_commit_async is not None + ): + hidden_states = self.group_prefetch_offload_commit_async(hidden_states) # Final layer norm. if self.post_process and self.post_layer_norm: diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 7c84d1ad0c..18601431d0 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -2,7 +2,7 @@ import types from dataclasses import dataclass -from typing import Callable, Optional, Tuple, ContextManager +from typing import Callable, ContextManager, Optional, Tuple import torch import torch.nn.functional as F @@ -168,13 +168,19 @@ def __post_init__(self): raise ValueError(f'num_moe_experts must be non None to use expert-parallel.') if self.cpu_offloading_num_layers < 0 or self.cpu_offloading_num_layers >= self.num_layers: - raise ValueError(f'CPU offloading can be done only for layers less than {self.num_layers}') + raise ValueError( + f'CPU offloading can be done only for layers less than {self.num_layers}' + ) if self.cpu_offloading and self.pipeline_model_parallel_size > 1: - raise ValueError(f'Currently there is no support for Pipeline parallelism with CPU offloading') + raise ValueError( + f'Currently there is no support for Pipeline parallelism with CPU offloading' + ) if self.cpu_offloading and self.recompute_granularity is not None: - raise ValueError(f'CPU offloading does not work when activation recomputation is enabled') + raise ValueError( + f'CPU offloading does not work when activation recomputation is enabled' + ) if self.recompute_granularity is not None: if not self.recompute_granularity in ['full', 'selective']: From cf1a1c6647f14b2ea66c0c0e4a9df1b04da3f995 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Thu, 18 Jan 2024 18:55:49 -0800 Subject: [PATCH 145/296] fix a bug in branch and format --- megatron/core/fusions/fused_bias_swiglu.py | 8 +++++--- megatron/core/transformer/mlp.py | 4 +++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/megatron/core/fusions/fused_bias_swiglu.py b/megatron/core/fusions/fused_bias_swiglu.py index 6710407e89..5fb30605bb 100644 --- a/megatron/core/fusions/fused_bias_swiglu.py +++ b/megatron/core/fusions/fused_bias_swiglu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. import torch import torch.nn.functional as F @@ -62,6 +62,7 @@ def backward(ctx, grad_output): tmp = swiglu_back(grad_output, input[0]) return tmp + def bias_swiglu_impl(input, bias): shape = input.shape input = input.view(-1, shape[2]) @@ -71,5 +72,6 @@ def bias_swiglu_impl(input, bias): output = SwiGLUFunction.apply(input) return output.view(shape[0], shape[1], -1) -#bias_swiglu_impl = BiasSwiGLUFunction.apply -#swiglu_impl = SwiGLUFunction.apply + +# bias_swiglu_impl = BiasSwiGLUFunction.apply +# swiglu_impl = SwiGLUFunction.apply diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index 2a32831b77..899f352354 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -89,7 +89,7 @@ def forward(self, hidden_states): if self.activation_func == F.gelu: assert self.config.add_bias_linear is True intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) - elif self.activation_func == F.silu: + elif self.activation_func == F.silu and self.config.gated_linear_unit: intermediate_parallel = bias_swiglu_impl(intermediate_parallel, bias_parallel) else: raise ValueError("Only support fusion of gelu and swiglu") @@ -97,9 +97,11 @@ def forward(self, hidden_states): if bias_parallel is not None: intermediate_parallel = intermediate_parallel + bias_parallel if self.config.gated_linear_unit: + def glu(x): x = torch.chunk(x, 2, dim=-1) return self.config.activation_func(x[0]) * x[1] + intermediate_parallel = glu(intermediate_parallel) else: intermediate_parallel = self.activation_func(intermediate_parallel) From 568da5a1bd1c91df80e1737eafcd41b24e7c0bc1 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Thu, 18 Jan 2024 19:28:05 -0800 Subject: [PATCH 146/296] fix tests --- megatron/arguments.py | 5 ++--- tests/unit_tests/transformer/moe/test_grouped_mlp.py | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 91b7828833..20ccff58ac 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -899,9 +899,8 @@ def _add_training_args(parser): group.add_argument('--no-bias-gelu-fusion', action='store_false', help='Disable bias and gelu fusion.', dest='bias_gelu_fusion') - group.add_argument('--no-bias-swiglu-fusion', action='store_false', - help='Disable bias and swiglu fusion.', - dest='bias_swiglu_fusion') + group.add_argument('--bias-swiglu-fusion', action='store_true', + help='enable bias and swiglu fusion.') group.add_argument('--no-bias-dropout-fusion', action='store_false', help='Disable bias and dropout fusion.', dest='bias_dropout_fusion') diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index d74ea9c35f..84fb5bbfde 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -39,7 +39,7 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): num_layers=num_layers, hidden_size=self.hidden_size, num_attention_heads=4, num_moe_experts=self.num_experts, use_cpu_initialization=self.use_cpu_initialization, add_bias_linear=False, gated_linear_unit=self.gated_linear_unit, - bias_gelu_fusion=False, + bias_activation_fusion=False, bf16=True, params_dtype=torch.bfloat16) self.fc1_ffn_hidden_size = tf_config.ffn_hidden_size @@ -155,4 +155,4 @@ def test_gpu_forward(self): GMLP_test.test_constructor() GMLP_test.test_weight_init_value_the_same() GMLP_test.test_gpu_forward() - GMLP_test.teardown_method(method=None) \ No newline at end of file + GMLP_test.teardown_method(method=None) From de9428a70103d38638d21712b73a8da6c520a7c6 Mon Sep 17 00:00:00 2001 From: Hongbin Liu Date: Thu, 18 Jan 2024 21:05:04 -0800 Subject: [PATCH 147/296] enable swiglu and rope fusion by default and disable them in tests --- megatron/arguments.py | 10 ++++++++-- ...pretrain_gpt3_distributed_resume_checkpoint_test.sh | 2 ++ .../gpt3/pretrain_gpt3_distributed_test.sh | 2 ++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 20ccff58ac..28855a5b5d 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -899,11 +899,17 @@ def _add_training_args(parser): group.add_argument('--no-bias-gelu-fusion', action='store_false', help='Disable bias and gelu fusion.', dest='bias_gelu_fusion') - group.add_argument('--bias-swiglu-fusion', action='store_true', - help='enable bias and swiglu fusion.') + group.add_argument('--no-bias-swiglu-fusion', action='store_false', + help='Disable bias and swiglu fusion, the fusion is ' + 'available only when using megatron-core.', + dest='bias_swiglu_fusion') group.add_argument('--no-bias-dropout-fusion', action='store_false', help='Disable bias and dropout fusion.', dest='bias_dropout_fusion') + group.add_argument('--no-rope-fusion', action='store_false', + help='Disable rope fusion, the fusion is available ' + 'only when using megatron-core.', + dest='apply_rope_fusion') group.add_argument('--use-flash-attn', action='store_true', help='use FlashAttention implementation of attention. ' 'https://arxiv.org/abs/2205.14135') diff --git a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh index 83caf3f669..c38cdf5b01 100755 --- a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh +++ b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh @@ -64,6 +64,8 @@ torchrun $DISTRIBUTED_ARGS \ --tensor-model-parallel-size $TP_SIZE \ --pipeline-model-parallel-size $PP_SIZE \ --no-gradient-accumulation-fusion \ + --no-bias-swiglu-fusion \ + --no-rope-fusion \ --fp16 echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt diff --git a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh index 234bc75858..c5961c8f17 100755 --- a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh +++ b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh @@ -94,6 +94,8 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --transformer-impl $TRANSFORMER_IMPL \ --tensor-model-parallel-size $TP_SIZE \ --pipeline-model-parallel-size $PP_SIZE \ + --no-bias-swiglu-fusion \ + --no-rope-fusion \ ${VP_SIZE:+--num-layers-per-virtual-pipeline-stage "$VP_SIZE"} \ ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS} \ ${USE_MCORE:+--use-mcore-models} \ From 79269fa86049b53109d549f6a634ea55a584e8e5 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Fri, 19 Jan 2024 09:28:02 -0800 Subject: [PATCH 148/296] Docstring removed for context config Signed-off-by: Selvaraj Anandaraj --- megatron/core/tensor_parallel/layers.py | 8 ++++---- megatron/core/transformer/transformer_block.py | 4 ++-- megatron/core/transformer/transformer_config.py | 3 +-- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 64e066f55c..08fbb1298d 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -722,8 +722,8 @@ def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None): f"not {expected_shape} as expected" ) - if self.config.cpu_offloading_context is not None: - if self.config.cpu_offloading_context.inside_context == True: + if self.config._cpu_offloading_context is not None: + if self.config._cpu_offloading_context.inside_context == True: assert ( self.config.cpu_offloading == False ), "CPU Offloading cannot be enabled while using non-TE modules" @@ -896,8 +896,8 @@ def forward(self, input_): - bias """ - if self.config.cpu_offloading_context is not None: - if self.config.cpu_offloading_context.inside_context == True: + if self.config._cpu_offloading_context is not None: + if self.config._cpu_offloading_context.inside_context == True: assert ( self.config.cpu_offloading == False ), "CPU Offloading cannot be enabled while using non-TE modules" diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index 218b6764d8..f23169f393 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -115,7 +115,7 @@ def __init__( self.config.cpu_offloading_activations, self.config.cpu_offloading_weights, ) - self.config.cpu_offloading_context = ( + self.config._cpu_offloading_context = ( self.offload_context if self.config.cpu_offloading else None ) else: @@ -124,7 +124,7 @@ def __init__( ), "CPU Offloading is enabled when TE is not present" self.offload_context, self.group_prefetch_offload_commit_async = nullcontext(), None - self.config.cpu_offloading_context = None + self.config._cpu_offloading_context = None self._build_layers() self.num_layers_per_pipeline_rank = len(self.layers) diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 18601431d0..2c8541444b 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -53,7 +53,6 @@ class TransformerConfig(ModelParallelConfig): fp8_wgrad (bool): When set to False, override FP8 config options and do the wgrad computation in higher precision. Defaults to True. cpu_offloading (bool): When set to True, all the activations are offloaded to the CPU asynchronously cpu_offloading_num_layers (int): Tells the number of transformer layers for which activations has to be offloaded. - cpu_offloading_context (ContextManager): Holds the context manager from TE which is supposed to add PyT hooks for offload/reload of data from CPU. cpu_offloading_activations (bool): If True, offloads the activations to CPU cpu_offloading_weights (bool): If True, offloads the weights to CPU clone_scatter_output_in_embedding (bool): When set to true, clone the output of scatter_to_sequence_parallel_region in embedding layer to facilitate garbage collection of input. @@ -118,7 +117,7 @@ class TransformerConfig(ModelParallelConfig): # cpu offload cpu_offloading: bool = False cpu_offloading_num_layers: int = 0 - cpu_offloading_context: ContextManager = None + _cpu_offloading_context: ContextManager = None # Used for internal use only, not to be set by the user. TODO: Need to move to the 'right' place when possible. cpu_offloading_activations: bool = True cpu_offloading_weights: bool = True From 4b05862a749f6886bb6f2d7fa15b12bd2be7b519 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Fri, 19 Jan 2024 09:43:19 -0800 Subject: [PATCH 149/296] Decoupled cpu offloading and SplitAlongDim imports Signed-off-by: Selvaraj Anandaraj --- .../core/transformer/custom_layers/transformer_engine.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index 1ee3a7e242..f0cd074cd7 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -441,11 +441,17 @@ def forward( try: from transformer_engine.pytorch.attention import _SplitAlongDim - from transformer_engine.pytorch.cpu_offload import get_cpu_offload_context SplitAlongDim = _SplitAlongDim.apply except ImportError: SplitAlongDim = None + +try: + + from transformer_engine.pytorch.cpu_offload import get_cpu_offload_context + +except ImportError: + get_cpu_offload_context = None From 473225f9a51c422735fb75a52bf902ee0ca1fedf Mon Sep 17 00:00:00 2001 From: Jaemin Choi Date: Fri, 19 Jan 2024 14:02:43 -0800 Subject: [PATCH 150/296] Add jit_fuser to switch between torch.jit.script and torch.compile --- megatron/core/fusions/fused_bias_dropout.py | 6 ++++-- megatron/core/fusions/fused_bias_gelu.py | 6 ++++-- megatron/core/fusions/fused_bias_swiglu.py | 10 ++++++---- megatron/core/jit.py | 11 +++++++++++ megatron/core/transformer/utils.py | 5 +++-- megatron/model/fused_bias_gelu.py | 5 +++-- megatron/model/transformer.py | 5 +++-- megatron/model/utils.py | 5 +++-- 8 files changed, 37 insertions(+), 16 deletions(-) create mode 100644 megatron/core/jit.py diff --git a/megatron/core/fusions/fused_bias_dropout.py b/megatron/core/fusions/fused_bias_dropout.py index 14c1fe0d71..08af02b099 100644 --- a/megatron/core/fusions/fused_bias_dropout.py +++ b/megatron/core/fusions/fused_bias_dropout.py @@ -3,6 +3,8 @@ import torch +from megatron.core.jit import jit_fuser + def _bias_dropout_add_func(x_with_bias, residual, prob, training): # type: (Tuple[Tensor, Optional[Tensor]], Tensor, float, bool) -> Tensor @@ -43,14 +45,14 @@ def _bias_dropout_add(x_with_bias, residual, prob): return _bias_dropout_add -@torch.jit.script +@jit_fuser def bias_dropout_add_fused_train( x_with_bias: Tuple[torch.Tensor, Optional[torch.Tensor]], residual: torch.Tensor, prob: float, ) -> torch.Tensor: return _bias_dropout_add_func(x_with_bias, residual, prob, True) -@torch.jit.script +@jit_fuser def bias_dropout_add_fused_inference( x_with_bias: Tuple[torch.Tensor, Optional[torch.Tensor]], residual: torch.Tensor, prob: float, ) -> torch.Tensor: diff --git a/megatron/core/fusions/fused_bias_gelu.py b/megatron/core/fusions/fused_bias_gelu.py index 9c791c1807..2b5467467c 100644 --- a/megatron/core/fusions/fused_bias_gelu.py +++ b/megatron/core/fusions/fused_bias_gelu.py @@ -2,6 +2,8 @@ import torch +from megatron.core.jit import jit_fuser + ###### BIAS GELU FUSION/ NO AUTOGRAD ################ # 1/sqrt(2*pi)-> 0.3989423 # 1/sqrt(2) -> 0.70710678 @@ -11,7 +13,7 @@ # x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) -@torch.jit.script +@jit_fuser def bias_gelu(bias, y): x = bias + y return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) @@ -20,7 +22,7 @@ def bias_gelu(bias, y): # gradient of tanh approximation of gelu # gradient of actual gelu is: # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) -@torch.jit.script +@jit_fuser def bias_gelu_back(g, bias, y): x = bias + y tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) diff --git a/megatron/core/fusions/fused_bias_swiglu.py b/megatron/core/fusions/fused_bias_swiglu.py index 5fb30605bb..de4cb753e5 100644 --- a/megatron/core/fusions/fused_bias_swiglu.py +++ b/megatron/core/fusions/fused_bias_swiglu.py @@ -3,16 +3,18 @@ import torch import torch.nn.functional as F +from megatron.core.jit import jit_fuser + ###### BIAS SWIGLU FUSION/ NO AUTOGRAD ################ -@torch.jit.script +@jit_fuser def swiglu(y): y_1, y_2 = torch.chunk(y, 2, -1) return F.silu(y_1) * y_2 -@torch.jit.script +@jit_fuser def bias_swiglu(y, bias): y = y + bias return swiglu(y) @@ -21,7 +23,7 @@ def bias_swiglu(y, bias): # gradient of tanh approximation of gelu # gradient of actual gelu is: # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) -@torch.jit.script +@jit_fuser def swiglu_back(g, y): y_1, y_2 = torch.chunk(y, 2, -1) return torch.cat( @@ -29,7 +31,7 @@ def swiglu_back(g, y): ) -@torch.jit.script +@jit_fuser def bias_swiglu_back(g, y, bias): y = y + bias return swiglu_back(g, y) diff --git a/megatron/core/jit.py b/megatron/core/jit.py new file mode 100644 index 0000000000..8bb18d393c --- /dev/null +++ b/megatron/core/jit.py @@ -0,0 +1,11 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import torch + +TORCH_MAJOR = int(torch.__version__.split(".")[0]) +TORCH_MINOR = int(torch.__version__.split(".")[1]) + +jit_fuser = torch.jit.script +# nvFuser is deprecated in PyTorch JIT starting from 2.2 +if (TORCH_MAJOR > 2) or (TORCH_MAJOR == 2 and TORCH_MINOR >= 2): + jit_fuser = torch.compile diff --git a/megatron/core/transformer/utils.py b/megatron/core/transformer/utils.py index cc82b5bd3b..c5bf81b4bf 100644 --- a/megatron/core/transformer/utils.py +++ b/megatron/core/transformer/utils.py @@ -8,6 +8,7 @@ from megatron.core import parallel_state from megatron.core.dist_checkpointing.mapping import ShardedObject, ShardedStateDict, StateDict +from megatron.core.jit import jit_fuser from megatron.core.utils import ( make_sharded_tensor_for_checkpoint, make_tp_sharded_tensor_for_checkpoint, @@ -29,7 +30,7 @@ def attention_mask_func(attention_scores, attention_mask): return attention_scores -@torch.jit.script +@jit_fuser def gelu_impl(x): """OpenAI's gelu implementation.""" return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 + 0.044715 * x * x))) @@ -40,7 +41,7 @@ def openai_gelu(x): # This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter -@torch.jit.script +@jit_fuser def erf_gelu(x): return ( x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype) + torch.ones_like(x).to(dtype=x.dtype)) diff --git a/megatron/model/fused_bias_gelu.py b/megatron/model/fused_bias_gelu.py index 29222db024..e00e63148b 100644 --- a/megatron/model/fused_bias_gelu.py +++ b/megatron/model/fused_bias_gelu.py @@ -1,6 +1,7 @@ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import torch +from megatron.core.jit import jit_fuser ###### BIAS GELU FUSION/ NO AUTOGRAD ################ @@ -11,7 +12,7 @@ # actual gelu is: # x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) -@torch.jit.script +@jit_fuser def bias_gelu(bias, y): x = bias + y return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) @@ -19,7 +20,7 @@ def bias_gelu(bias, y): # gradient of tanh approximation of gelu # gradient of actual gelu is: # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) -@torch.jit.script +@jit_fuser def bias_gelu_back(g, bias, y): x = bias + y tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py index 676e47dc78..8a47171d38 100644 --- a/megatron/model/transformer.py +++ b/megatron/model/transformer.py @@ -25,6 +25,7 @@ get_data_parallel_rng_tracker_name ) from megatron.core.parallel_state import get_tensor_model_parallel_group, get_tensor_and_expert_parallel_group +from megatron.core.jit import jit_fuser try: from einops import rearrange @@ -830,7 +831,7 @@ def _bias_dropout_add(x, bias, residual, prob): return _bias_dropout_add -@torch.jit.script +@jit_fuser def bias_dropout_add_fused_train(x: torch.Tensor, bias: Optional[torch.Tensor], residual: torch.Tensor, @@ -838,7 +839,7 @@ def bias_dropout_add_fused_train(x: torch.Tensor, return bias_dropout_add(x, bias, residual, prob, True) -@torch.jit.script +@jit_fuser def bias_dropout_add_fused_inference(x: torch.Tensor, bias: Optional[torch.Tensor], residual: torch.Tensor, diff --git a/megatron/model/utils.py b/megatron/model/utils.py index 15fbe9ad9e..ace7f346c4 100644 --- a/megatron/model/utils.py +++ b/megatron/model/utils.py @@ -8,6 +8,7 @@ from megatron import get_args from megatron.model import LayerNorm, RMSNorm +from megatron.core.jit import jit_fuser def init_method_normal(sigma): """Init method based on N(0, sigma).""" @@ -42,7 +43,7 @@ def get_linear_layer(rows, columns, init_method): return layer -@torch.jit.script +@jit_fuser def gelu_impl(x): """OpenAI's gelu implementation.""" return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * @@ -53,7 +54,7 @@ def openai_gelu(x): #This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter -@torch.jit.script +@jit_fuser def erf_gelu(x): return x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype)+torch.ones_like(x).to(dtype=x.dtype)) From 716204ee49c8175a4148a84b93ff07f0ea7e1df1 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Fri, 19 Jan 2024 18:48:24 -0500 Subject: [PATCH 151/296] misc --- megatron/model/language_model.py | 10 +++++----- megatron/model/transformer.py | 27 ++++++++++++++++++++------- megatron/training.py | 4 ++-- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/megatron/model/language_model.py b/megatron/model/language_model.py index 2080ad8ae0..c1819e212c 100644 --- a/megatron/model/language_model.py +++ b/megatron/model/language_model.py @@ -436,10 +436,10 @@ def __init__(self, if key[0]=="layers": # Shift layer index. key[1]=str(int(key[1])+1) - if key[2]=="input_layernorm": - key[2]="layer_norm_1" - elif key[2]=="post_attention_layernorm": - key[2]="layer_norm_2" + if key[2]=="input_norm": + key[2]="norm_1" + elif key[2]=="post_attention_norm": + key[2]="norm_2" elif key[2]=="self_attention": key[2]="self_attn" elif key[3]=="dense_h_to_4h": @@ -448,7 +448,7 @@ def __init__(self, key[3]="layer_2" else: assert key[0]=="final_norm", key[0] - key=["layers",str(args.encoder_num_layers+1)]+key + key=["layers",str(args.encoder_num_layers+1), "final_norm"]+key[1:] elif key[0]=="embedding": key=["layers", "0", "_".join(key[1:])] else: diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py index 42367e28da..88d0e1aed6 100644 --- a/megatron/model/transformer.py +++ b/megatron/model/transformer.py @@ -814,10 +814,10 @@ def forward(self, hidden_states, attention_mask, context_layer = rearrange(context_layer, 'b s h d -> s b (h d)').contiguous() if self._debug_transformer: - log_tensor(f"Layer {self.layer_number} Query", query_layer, level=self._debug_transformer) - log_tensor(f"Layer {self.layer_number} Key", key_layer, level=self._debug_transformer) - log_tensor(f"Layer {self.layer_number} Value", value_layer, level=self._debug_transformer) - log_tensor(f"Layer {self.layer_number} Attn context", context_layer, level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Query", query_layer.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Key", key_layer.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Value", value_layer.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Attn context", context_layer.transpose(0,1), level=self._debug_transformer) # ================= # Output. [sq, b, h] @@ -1171,6 +1171,13 @@ def forward(self, hidden_states, attention_mask, # Layer norm at the beginning of the transformer layer. norm_output = self.input_norm(hidden_states) + if self._debug_transformer: + log_tensor( + f"Layer {self.layer_number} norm 1", + norm_output.transpose(0,1), + level=self._debug_transformer + ) + # Self attention. attention_output, attention_bias = \ self.self_attention( @@ -1182,7 +1189,7 @@ def forward(self, hidden_states, attention_mask, if self._debug_transformer: log_tensor( f"Layer {self.layer_number} Attn output", - hidden_states + attention_bias, + (hidden_states + attention_bias).transpose(0,1), level=self._debug_transformer ) @@ -1220,11 +1227,17 @@ def forward(self, hidden_states, attention_mask, norm_input = residual + self.drop_path(out) if self._debug_transformer: - log_tensor(f"Layer {self.layer_number} Attn residual", norm_input, level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Attn residual", norm_input.transpose(0,1), level=self._debug_transformer) # Layer norm post the self attention. norm_output = self.post_attention_norm(norm_input) + if self._debug_transformer: + log_tensor( + f"Layer {self.layer_number} norm 2", + norm_output.transpose(0,1), + level=self._debug_transformer + ) # Cross attention. if self.layer_type == LayerType.encoder: pass @@ -1264,7 +1277,7 @@ def forward(self, hidden_states, attention_mask, if self._debug_transformer: log_tensor( f"Layer {self.layer_number} MLP output", - mlp_output + mlp_bias, + (mlp_output + mlp_bias).transpose(0,1), level=self._debug_transformer ) diff --git a/megatron/training.py b/megatron/training.py index 4407a75feb..8bf37445df 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -215,7 +215,7 @@ def pretrain(train_valid_test_dataset_provider, verbose=True, write_to_tensorboard=not args.skip_train) -def save_tensor_logs(step:str|int): +def save_tensor_logs(step:str): args=get_args() if args.structured_logs_dir is not None and (tensor_log_stats:=get_logged_tensor_stats()): tensor_logs_dir = os.path.join(args.structured_logs_dir, f"runs/0/artifacts/{torch.distributed.get_rank()}") @@ -851,7 +851,7 @@ def trace_fn(p: torch.profiler.profile): if profiler is not None: profiler.step() - save_tensor_logs(iteration) + save_tensor_logs(f"train_{iteration}") # Autoresume if args.adlr_autoresume and \ From c79503850b23081c77e2bf3680f4bb4327324804 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Thu, 14 Dec 2023 12:50:26 +0000 Subject: [PATCH 152/296] Router and communication refactoring. --- megatron/arguments.py | 31 +- megatron/core/models/gpt/gpt_layer_specs.py | 7 +- megatron/core/pipeline_parallel/schedules.py | 6 + .../core/transformer/moe/base_moe_layer.py | 357 ++++++++++++++---- megatron/core/transformer/moe/grouped_mlp.py | 21 +- megatron/core/transformer/moe/moe_layer.py | 90 +++++ megatron/core/transformer/moe/switch_mlp.py | 26 +- .../core/transformer/transformer_config.py | 2 + 8 files changed, 421 insertions(+), 119 deletions(-) create mode 100644 megatron/core/transformer/moe/moe_layer.py diff --git a/megatron/arguments.py b/megatron/arguments.py index 64de0c77e8..4c10623f43 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -36,6 +36,7 @@ def parse_args(extra_args_provider=None, ignore_unknown_args=False): parser = _add_autoresume_args(parser) parser = _add_biencoder_args(parser) parser = _add_vision_args(parser) + parser = _add_moe_args(parser) parser = _add_logging_args(parser) parser = _add_inference_args(parser) parser = _add_transformer_engine_args(parser) @@ -653,14 +654,6 @@ def _add_network_size_args(parser): group.add_argument('--bert-no-binary-head', action='store_false', help='Disable BERT binary head.', dest='bert_binary_head') - group.add_argument('--num-experts', type=int, default=None, - help='Number of Experts in Switch Transformer (None means no Switch)') - group.add_argument('--moe-grouped-gemm', action='store_true', - help='When there are multiple experts per rank, compress ' - 'multiple local (potentially small) gemms in a single kernel ' - 'launch to improve the utilization and performance by ' - 'leveraging the Grouped GEMM feature introduced since ' - 'CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm).') group.add_argument('--untie-embeddings-and-output-weights', action='store_true', help='Untie embeddings and output weights.'), return parser @@ -1414,6 +1407,28 @@ def _add_vision_args(parser): return parser +def _add_moe_args(parser): + group = parser.add_argument_group(title="moe") + + # general moe arguements + group.add_argument('--num-experts', type=int, default=None, + help='Number of Experts in MoE (None means no MoE)') + group.add_argument('--moe-grouped-gemm', action='store_true', + help='When there are multiple experts per rank, compress ' + 'multiple local (potentially small) gemms in a single kernel ' + 'launch to improve the utilization and performance by ' + 'leveraging the Grouped GEMM feature introduced since ' + 'CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm).') + group.add_argument('--moe-loss-coeff', type=float, default=0.01, + help='Scaling coefficient for adding MoE loss to model loss') + group.add_argument('--moe-router-type', type=str, default='top1', + help='Options for router type, support top1 and ec') + # zero token drop moe arguments + + # token drop moe arugments + + return parser + def _add_experimental_args(parser): group = parser.add_argument_group(title='experimental') diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index 93d6d68248..07f10fbf5a 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -14,8 +14,7 @@ from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityOp from megatron.core.transformer.mlp import MLP, MLPSubmodules -from megatron.core.transformer.moe.grouped_mlp import GroupedMLP -from megatron.core.transformer.moe.switch_mlp import SwitchMLP +from megatron.core.transformer.moe.moe_layer import GroupedGemmMoELayer, SwitchMLPLayer from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules @@ -92,11 +91,11 @@ def _get_mlp_module_spec( ) elif moe_grouped_gemm: # GroupedMLP based MoE with modules in megatron core. - return GroupedMLP + return GroupedGemmMoELayer else: # SwitchMLP based MoE with modules in megatron core. return ModuleSpec( - module=SwitchMLP, + module=SwitchMLPLayer, submodules=MLPSubmodules( linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, ), diff --git a/megatron/core/pipeline_parallel/schedules.py b/megatron/core/pipeline_parallel/schedules.py index 1a45a6036f..23b89883ed 100644 --- a/megatron/core/pipeline_parallel/schedules.py +++ b/megatron/core/pipeline_parallel/schedules.py @@ -9,6 +9,7 @@ from megatron.core import parallel_state from megatron.core.enums import ModelType from megatron.core.pipeline_parallel import p2p_communication +from megatron.core.transformer.moe.base_moe_layer import MoEAuxLossAutoScaler from megatron.core.utils import get_attr_wrapped_model, get_model_config, get_model_type # Types @@ -207,6 +208,11 @@ def forward_step( if config.timers is not None: config.timers('forward-compute').stop() + # set loss scale for the auxiliary loss of MoE layer + if config.num_moe_experts is not None: + loss_scale = config.grad_scale_func(1.0) if config.grad_scale_func is not None else 1.0 + MoEAuxLossAutoScaler.set_loss_scale(loss_scale / num_microbatches) + # If T5 model (or other model with encoder and decoder) # and in decoder stack, then send encoder_hidden_state # downstream as well. diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 976cb1e61b..f5179d0c31 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -1,67 +1,204 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. from abc import ABC, abstractmethod +from contextlib import nullcontext import torch from megatron.core import parallel_state, tensor_parallel from megatron.core.parallel_state import get_tensor_and_expert_parallel_group from megatron.core.tensor_parallel import get_cuda_rng_tracker, get_data_parallel_rng_tracker_name +from megatron.core.tensor_parallel.random import ( + get_cuda_rng_tracker, + get_data_parallel_rng_tracker_name, +) from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig -def sinkhorn(cost, tol=0.0001): - "Sinkhorn based MoE routing function" - cost = torch.exp(cost) - d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype) - d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype) +class Router(ABC, MegatronModule): + """Base Router class""" - eps = 0.00000001 - error = 1e9 - d1_old = d1 - while error > tol: - d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps) - d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps) - error = torch.mean(torch.abs(d1_old - d1)) - d1_old = d1 - return d1 * cost * d0.unsqueeze(1) + def __init__(self, config: TransformerConfig) -> None: + """ + Initialize the Router module. + + Args: + config (TransformerConfig): Configuration object for the Transformer model. + """ + super().__init__(config) + self.config = config + self.num_experts = self.config.num_moe_experts + + # Token dispatcher for exchange tokens between experts. + self.token_dispatcher = None + + # Initialize the gate weights. + self.gate = torch.nn.Linear( + self.config.hidden_size, self.config.num_moe_experts, bias=False + ) + with get_cuda_rng_tracker().fork(get_data_parallel_rng_tracker_name()): + config.init_method(self.gate.weight) + setattr(self.gate.weight, 'sequence_parallel', config.sequence_parallel) + + self.fp32_router = False + self.input_jitter = None + + def gating(self, input: torch.Tensor): + """ + Forward pass of the router gate. + + Args: + input (torch.Tensor): Input tensor. + + Returns: + torch.Tensor: Logits tensor. + """ + logits = self.gate(input) + return logits + + def routing(self, logits: torch.Tensor): + """ + Get the routing results. + + Args: + logits (torch.Tensor): Logits tensor. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Tuple of tensors representing max probs and the indices. + """ + raise NotImplementedError + def dispatch( + self, tokens: torch.Tensor, indices: torch.Tensor, + ): + raise NotImplementedError -def get_router_linear_layer(config): - router = torch.nn.Linear(config.hidden_size, config.num_moe_experts, bias=False) - with get_cuda_rng_tracker().fork(get_data_parallel_rng_tracker_name()): - config.init_method(router.weight) - setattr(router.weight, 'sequence_parallel', config.sequence_parallel) - return router + def restore( + self, expert_output: torch.Tensor, gating: torch.Tensor, indicies: torch.Tensor, + ): + raise NotImplementedError + def apply_input_jitter(self, input, eps=1e-2): + """ + Add noise to the input tensor. + Refer to https://arxiv.org/abs/2101.03961. + + Args: + input (Tensor): Input tensor. + eps (float, optional): Defaults to 1e-2. + + Returns: + Tensor: Jittered input. + """ + if self.input_jitter is None: + self.input_jitter = torch.distributions.uniform.Uniform( + torch.tensor(1.0 - eps, device=input.device), + torch.tensor(1.0 + eps, device=input.device), + ).rsample + return input * self.input_jitter(input.shape) + + def forward(self, input: torch.Tensor): + """ + Forward pass of the router. -class BaseMoELayer(ABC, MegatronModule): + Args: + input (torch.Tensor): Input tensor. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: gating and indices. + """ + self.hidden = input.shape[-1] + + if self.fp32_router: + if self.gate.weight.dtype != torch.float32: + self.gate.weight.data = self.gate.weight.data.float() + assert hasattr(self.gate.weight, 'sequence_parallel') + input = input.float() + + route = self.gating(input) + route = route.view(-1, self.config.num_moe_experts) + + gating, indices = self.routing(route) + + return gating, indices + + def switch_transformer_load_balancing_loss(self, gates, mask): + """ + Calculate the auxiliary loss for better load balacing. + Please refer to the Switch Transformer paper (https://arxiv.org/abs/2101.03961) for details. + + Args: + route (torch.Tensor): The gates tensor. + mask (torch.Tensor): The mask tensor. + + Returns: + torch.Tensor: The auxiliary loss. + """ + gates_mean = gates.mean(dim=0) + selection_mean = mask.float().mean(dim=0) + aux_loss = torch.sum(gates_mean * selection_mean) * self.num_experts + aux_loss *= self.config.moe_loss_coeff + return aux_loss + + +class MoETokenDispatcher: """ - Basic MoE layer. + MoE Token Dispatcher """ - def __init__(self, config: TransformerConfig): - super().__init__(config=config) + def __init__(self, config: TransformerConfig) -> None: + """ + Initialize the MoE Token Dispatcher. + """ + self.config = config - self.config: TransformerConfig = config + def dispatch( + self, tokens: torch.Tensor, indices: torch.Tensor, + ): + """ + Dispatch tokens to experts. - self.router = get_router_linear_layer(self.config) - self.add_bias = config.add_bias_linear - self.sequence_parallel = config.sequence_parallel - self.route_algo = sinkhorn - self.router_activation = torch.sigmoid - self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size() + Args: + tokens (torch.Tensor): Input tokens. + indices (torch.Tensor): indices tensor. - assert self.config.num_moe_experts % self.expert_parallel_size == 0 - self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size - local_expert_indices_offset = ( - parallel_state.get_expert_model_parallel_rank() * self.num_local_experts - ) - self.local_expert_indices = [ - local_expert_indices_offset + i for i in range(self.num_local_experts) - ] - self.k = 1 # TODO: self.config.top_k + Returns: + torch.Tensor: Tokens tensor. + """ + raise NotImplementedError + + def restore( + self, expert_output: torch.Tensor, gating: torch.Tensor, indices: torch.Tensor, + ): + """ + Restores the expert output to its original ordering. + + Args: + expert_output (torch.Tensor): The output tensor from the expert models. + gating (torch.Tensor): The gating tensor used to route the inputs to the experts. + indices (torch.Tensor): The indices used to reorder the expert output. + + Returns: + None + """ + raise NotImplementedError + + +class MoEZeroDropTokenDispatcher(MoETokenDispatcher): + """ + ZeroDrop Token Dispatcher + """ + + def __init__(self, num_local_experts, local_expert_indices, config: TransformerConfig) -> None: + """ + Initialize the zero token dropping router. + """ + super().__init__(config=config) + self.num_local_experts = num_local_experts + self.local_expert_indices = local_expert_indices + self.k = 1 + self.add_bias = config.add_bias_linear def gather_indices(self, local_indices): """ Gather tensors and concatenate along the first dimension.""" @@ -81,7 +218,7 @@ def gather_indices(self, local_indices): torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group) return output - def token_permutation(self, hidden_states): + def dispatch(self, hidden_states, max_prob, max_ind): """Dispatch tokens to local experts. It's composed of two stages: (1) Permute the tokens across the expert parallel devices. After this stage, each device receives all of the tokens assigned to its local set of experts @@ -103,26 +240,11 @@ def token_permutation(self, hidden_states): when cross device token permutation is enabled and **AllGahter** is performed. """ self.hidden_shape = hidden_states.shape - route = self.router(hidden_states) - route = route.view(-1, self.config.num_moe_experts) - - if self.training: - with torch.no_grad(): - norm_route = self.route_algo( - route.detach().to(dtype=torch.float32) - ) # explicit fp32 conversion for stability - _, max_ind = torch.topk(norm_route, k=self.k, dim=1) - route = self.router_activation(route) - # max_ind = max_ind.view(-1) - max_prob = torch.gather(route, 1, max_ind) - else: - route = self.router_activation(route) - max_prob, max_ind = torch.topk(route, k=self.k, dim=1) # [S/TP, B, H] -> [S*B/TP, H] hidden_states = hidden_states.view(-1, self.hidden_shape[-1]) - # Stage1: permute the tokens across the expert parallel devices. - if self.sequence_parallel or (self.expert_parallel_size > 1): + # Permute the tokens across the expert parallel devices. + if self.config.sequence_parallel or (self.config.expert_model_parallel_size > 1): # [S*B/TP, H] -> [S*B, H] global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe( hidden_states @@ -149,7 +271,6 @@ def token_permutation(self, hidden_states): local_probs = max_prob local_hidden_states = hidden_states global_local_map = None - self.max_prob = local_probs with torch.no_grad(): # The indices of local_indices that give its sorted order along dim 0. @@ -166,11 +287,11 @@ def token_permutation(self, hidden_states): # Reshape indices to be compatible with Tensor.gather indices = indices.view(-1, 1).expand(-1, hidden_states.shape[-1]) permuted_local_hidden_states = torch.gather(local_hidden_states, 0, indices) + return permuted_local_hidden_states, tokens_per_expert, local_probs, indices, global_local_map - return permuted_local_hidden_states, tokens_per_expert, indices, global_local_map - - def token_unpermutation(self, hidden_states, indices, global_local_map=None, bias=None): - """Reverse process of `token_permutation()` which permutes the ouput of local + def restore(self, hidden_states, gating, indices, global_local_map=None, bias=None): + """ + Reverse process of `dispatch()` which permutes the ouput of local experts locallay and across expert parallel rank into the original order to produce the final output. @@ -182,22 +303,20 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia global_local_map (optional): 2D tensor, a mask of mapping between global and local tokens where each element is True if it's between the local_expert_indices. Only useful when cross device token permutation is enabled and **AllGahter** is performed. - bias: bias if self.add_bias is enabled. Returns: output_total: un-permuted updated hidden states output from all local experts with shape of [SeqLen/TP, MBS, HiddenSize] - output_bias_total: un-permuted bias output from all local experts if - self.add_bias is enabled. """ # Stage1: unpermute the tokens and bias locally respectively. + gating = gating.to(dtype=hidden_states.dtype) unpermuted_local_hidden = torch.zeros_like(hidden_states) assert indices.shape == hidden_states.shape unpermuted_local_hidden = unpermuted_local_hidden.scatter(0, indices, hidden_states) # Scale the expert output prior to reduction and subsequent to local unpermutation if k > 1. if self.k > 1: - unpermuted_local_hidden = unpermuted_local_hidden * self.max_prob.view(-1, 1) + unpermuted_local_hidden = unpermuted_local_hidden * gating unpermuted_local_bias = None if self.add_bias: @@ -206,13 +325,13 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia assert indices.shape == bias.shape unpermuted_local_bias = unpermuted_local_bias.scatter(0, indices, bias) if self.k > 1: - unpermuted_local_bias = unpermuted_local_bias * self.max_prob.view(-1, 1) + unpermuted_local_bias = unpermuted_local_bias * gating output_total = unpermuted_local_hidden - output_bias_total = unpermuted_local_bias + output_bias_total = None - # Stage2: unpermute the tokens across expert parallel devices. - if self.sequence_parallel or (self.expert_parallel_size > 1): + # Unpermute the tokens across expert parallel devices. + if self.config.sequence_parallel or (self.config.expert_model_parallel_size > 1): assert global_local_map is not None, "global_local_map is necessary for `AllGather`." ep_group_size = parallel_state.get_tensor_and_expert_parallel_world_size() # hidden_shape: [SeqLen/TP, MBS, HiddenSize], glboal_num_tokens = SeqLen/TP*MBS*(TP*EP) @@ -244,24 +363,106 @@ def token_unpermutation(self, hidden_states, indices, global_local_map=None, bia output_bias_total / parallel_state.get_tensor_model_parallel_world_size() ) if self.k == 1: - output_total = output_total * self.max_prob.view(-1, 1) + output_total = output_total * gating output_total = output_total.view(self.hidden_shape) if self.add_bias: assert output_bias_total is not None if self.k == 1: - output_bias_total = output_bias_total * self.max_prob.view(-1, 1) + output_bias_total = output_bias_total * gating output_bias_total = output_bias_total.view(self.hidden_shape) else: output_bias_total = None return output_total, output_bias_total - @abstractmethod - def forward(self, hidden_states): - """Forward computation of MoE layer. + +class ZeroDropSinkhornRouter(Router): + """ + ZeroDrop Sinkhorn Router + """ + + def __init__(self, num_local_experts, local_expert_indices, config: TransformerConfig) -> None: + """ + Initialize the zero token dropping router. + """ + super().__init__(config=config) + self.route_algo = self.sinkhorn + self.router_activation = torch.sigmoid + self.moe_aux_loss = self.switch_transformer_load_balancing_loss + self.token_dispatcher = MoEZeroDropTokenDispatcher( + num_local_experts, local_expert_indices, config + ) + + def sinkhorn(self, cost, tol=0.0001): + "Sinkhorn based MoE routing function" + cost = torch.exp(cost) + d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype) + d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype) + + eps = 0.00000001 + error = 1e9 + d1_old = d1 + while error > tol: + d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps) + d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps) + error = torch.mean(torch.abs(d1_old - d1)) + d1_old = d1 + return d1 * cost * d0.unsqueeze(1) + + def moe_loss(self, gatings, indicies): + mask = torch.nn.functional.one_hot(indicies, num_classes=self.num_experts).sum(dim=1) + aux_loss = self.moe_aux_loss(gatings, mask) + gatings = MoEAuxLossAutoScaler.apply(gatings, aux_loss) + return gatings + + def routing(self, route: torch.Tensor): + """ + Get the routing results. Args: - hidden_states: input activation of shape [SeqLen, MBS, HiddenSize] + logits (torch.Tensor): Logits tensor. + Returns: + Tuple[torch.Tensor, torch.Tensor]: Tuple of tensors representing max probs and the indices. """ - pass + route = route.view(-1, self.config.num_moe_experts) + k = 1 # TODO: self.config.top_k + + if self.training: + with torch.no_grad(): + norm_route = self.route_algo( + route.detach().to(dtype=torch.float32) + ) # explicit fp32 conversion for stability + _, indices = torch.topk(norm_route, k=k, dim=1) + route = self.router_activation(route) + gatings = torch.gather(route, 1, indices) + else: + route = self.router_activation(route) + gatings, indices = torch.topk(route, k=k, dim=1) + + # gatings = self.moe_loss(gatings, indices) + + return gatings, indices + + +class MoEAuxLossAutoScaler(torch.autograd.Function): + main_loss_backward_scale = 1 + + @staticmethod + def forward(ctx, output, aux_loss): + # Preserve the aux_loss by storing it in the context to avoid garbage collection. + ctx.save_for_backward(aux_loss) + return output + + @staticmethod + def backward(ctx, grad_output): + # Scale the auxiliary loss. + (aux_loss,) = ctx.saved_tensors + aux_loss_backward_scale = MoEAuxLossAutoScaler.main_loss_backward_scale + scaled_aux_loss_grad = torch.ones_like(aux_loss) * aux_loss_backward_scale + return grad_output, scaled_aux_loss_grad + + @staticmethod + def set_loss_scale(scale): + # Scale the aux loss in the same way as the main loss. + MoEAuxLossAutoScaler.main_loss_backward_scale = scale diff --git a/megatron/core/transformer/moe/grouped_mlp.py b/megatron/core/transformer/moe/grouped_mlp.py index 802cfcde14..22aa915aee 100644 --- a/megatron/core/transformer/moe/grouped_mlp.py +++ b/megatron/core/transformer/moe/grouped_mlp.py @@ -9,21 +9,21 @@ _initialize_affine_weight_gpu, ) from megatron.core.tensor_parallel.utils import divide +from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.moe import grouped_gemm_util as gg from megatron.core.transformer.transformer_config import TransformerConfig -from .base_moe_layer import BaseMoELayer - -class GroupedMLP(BaseMoELayer): +class GroupedMLP(MegatronModule): """ Top-1 Mixture of Experts Layer with Grouped GEMM. Routes input to one of N MLP "experts" Curently supports Sinkhorn based expert routing. """ - def __init__(self, config: TransformerConfig): + def __init__(self, num_local_experts: int, config: TransformerConfig): super().__init__(config=config) self.config: TransformerConfig = config + self.num_local_experts = num_local_experts gg.assert_grouped_gemm_is_available() assert ( @@ -125,14 +125,9 @@ def glu(x): setattr(self.weight1, 'allreduce', not self.expert_parallel) setattr(self.weight2, 'allreduce', not self.expert_parallel) - def forward(self, hidden_states): + def forward(self, permuted_local_hidden_states, tokens_per_expert): # Permutation of tokens - ( - permuted_local_hidden_states, - tokens_per_expert, - indices, - global_local_map, - ) = self.token_permutation(hidden_states) + # permuted_local_hidden_states, tokens_per_expert = self.token_permutation(hidden_states) # Reshape the weights for the grouped GEMMs. w1 = self.weight1.view(self.num_local_experts, self.config.hidden_size, -1) @@ -145,6 +140,6 @@ def forward(self, hidden_states): fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=False) # Un-permutation of tokens. - output_total, _ = self.token_unpermutation(fc2_output, indices, global_local_map) + # output_total, _ = self.token_unpermutation(fc2_output) - return output_total, None + return fc2_output, None diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py new file mode 100644 index 0000000000..4d86ef4ece --- /dev/null +++ b/megatron/core/transformer/moe/moe_layer.py @@ -0,0 +1,90 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +from abc import ABC, abstractmethod + +import torch + +from megatron.core import parallel_state +from megatron.core.transformer.mlp import MLPSubmodules +from megatron.core.transformer.module import MegatronModule +from megatron.core.transformer.moe.base_moe_layer import ZeroDropSinkhornRouter +from megatron.core.transformer.moe.grouped_mlp import GroupedMLP +from megatron.core.transformer.moe.switch_mlp import SwitchMLP +from megatron.core.transformer.transformer_config import TransformerConfig + + +class BaseMoELayer(MegatronModule, ABC): + def __init__(self, config: TransformerConfig): + super(BaseMoELayer, self).__init__(config) + self.config = config + self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size() + + assert self.config.num_moe_experts % self.expert_parallel_size == 0 + self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size + local_expert_indices_offset = ( + parallel_state.get_expert_model_parallel_rank() * self.num_local_experts + ) + self.local_expert_indices = [ + local_expert_indices_offset + i for i in range(self.num_local_experts) + ] + + self.router = self.initialize_router() + self.experts = self.initialize_experts() + + def initialize_experts(self): + pass + + def initialize_router(self): + pass + + def forward(self, hidden_states): + # process MoE + gatings, indices = self.router(hidden_states) + ( + dispatched_input, + tokens_per_expert, + probs, + indices, + global_local_map, + ) = self.router.token_dispatcher.dispatch(hidden_states, gatings, indices) + expert_output, mlp_bias = self.experts(dispatched_input, tokens_per_expert) + output, mlp_bias = self.router.token_dispatcher.restore( + expert_output, probs, indices, global_local_map, mlp_bias + ) + + if mlp_bias is None: + mlp_bias = torch.tensor(0.0, device=hidden_states.device, dtype=hidden_states.dtype) + + # output = output.reshape(hidden_states.shape) + return output, mlp_bias + + +class GroupedGemmMoELayer(BaseMoELayer): + def __init__(self, config: TransformerConfig): + super(GroupedGemmMoELayer, self).__init__(config=config) + + def initialize_experts(self): + experts = GroupedMLP(self.num_local_experts, self.config) + return experts + + def initialize_router(self): + router = ZeroDropSinkhornRouter( + self.num_local_experts, self.local_expert_indices, self.config + ) + return router + + +class SwitchMLPLayer(BaseMoELayer): + def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): + self.submodules = submodules + super(SwitchMLPLayer, self).__init__(config=config) + + def initialize_experts(self): + experts = SwitchMLP(self.num_local_experts, self.config, self.submodules) + return experts + + def initialize_router(self): + router = ZeroDropSinkhornRouter( + self.num_local_experts, self.local_expert_indices, self.config + ) + return router diff --git a/megatron/core/transformer/moe/switch_mlp.py b/megatron/core/transformer/moe/switch_mlp.py index 46cced972e..0a75f9f7b9 100644 --- a/megatron/core/transformer/moe/switch_mlp.py +++ b/megatron/core/transformer/moe/switch_mlp.py @@ -4,32 +4,28 @@ import torch from megatron.core.transformer.mlp import MLP, MLPSubmodules +from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig -from .base_moe_layer import BaseMoELayer - -class SwitchMLP(BaseMoELayer): +class SwitchMLP(MegatronModule): """ Top-1 Mixture of Experts Layer. Routes input to one of N MLP "experts" Curently supports Sinkhorn based expert routing. """ - def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): + def __init__(self, num_local_experts, config: TransformerConfig, submodules: MLPSubmodules): super().__init__(config=config) - + self.add_bias = config.add_bias_linear + self.num_local_experts = num_local_experts self.local_experts = torch.nn.ModuleList() for _ in range(self.num_local_experts): expert = MLP(self.config, submodules, is_expert=True) self.local_experts.append(expert) - def forward(self, hidden_states): - ( - permuted_local_hidden_states, - tokens_per_expert, - indices, - global_local_map, - ) = self.token_permutation(hidden_states) + def forward(self, permuted_local_hidden_states, tokens_per_expert): + # global_hidden_states, global_indices = self.token_permutation(hidden_states) + # permuted_local_hidden_states, tokens_per_expert = self.token_permutation(hidden_states) output_local = torch.zeros_like(permuted_local_hidden_states) output_bias_local = None @@ -52,8 +48,6 @@ def forward(self, hidden_states): output_bias_local[start:end, :] = output_bias # Un-permutation of tokens. - output_total, output_bias_total = self.token_unpermutation( - output_local, indices, global_local_map, output_bias_local - ) + # output_total, output_bias_total = self.token_unpermutation(output_local, output_bias_local) - return output_total, output_bias_total + return output_local, output_bias_local diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 74a472da01..d3321206fe 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -127,8 +127,10 @@ class TransformerConfig(ModelParallelConfig): # experimental section (TODO: move to apt. section above once stable) normalization: bool = "LayerNorm" # alt value supported by TE: "RMSNorm" + # MoE related moe_grouped_gemm: bool = False + moe_loss_coeff: float = 0.01 def __post_init__(self): """ Python dataclass method that is used to modify attributes after initialization. From 2016969f8418fefaf510b259e6adbc43e4327ce4 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Fri, 15 Dec 2023 10:32:33 +0000 Subject: [PATCH 153/296] Add Z-loss and aux loss. Code cleanup. --- megatron/arguments.py | 4 +- .../core/transformer/moe/base_moe_layer.py | 109 +++++++++--------- megatron/core/transformer/moe/moe_layer.py | 46 +++++--- megatron/core/transformer/moe/moe_utils.py | 36 ++++++ .../core/transformer/transformer_config.py | 2 +- 5 files changed, 125 insertions(+), 72 deletions(-) create mode 100644 megatron/core/transformer/moe/moe_utils.py diff --git a/megatron/arguments.py b/megatron/arguments.py index 4c10623f43..170962aa87 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -1419,7 +1419,9 @@ def _add_moe_args(parser): 'launch to improve the utilization and performance by ' 'leveraging the Grouped GEMM feature introduced since ' 'CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm).') - group.add_argument('--moe-loss-coeff', type=float, default=0.01, + group.add_argument('--moe-aux-loss-coeff', type=float, default=1e-2, + help='Scaling coefficient for adding MoE loss to model loss') + group.add_argument('--moe-z-loss-coeff', type=float, default=1e-3, help='Scaling coefficient for adding MoE loss to model loss') group.add_argument('--moe-router-type', type=str, default='top1', help='Options for router type, support top1 and ec') diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index f5179d0c31..9fcb33a860 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -13,6 +13,7 @@ get_data_parallel_rng_tracker_name, ) from megatron.core.transformer.module import MegatronModule +from megatron.core.transformer.moe.moe_utils import switch_load_balancing_loss_func, z_loss_func from megatron.core.transformer.transformer_config import TransformerConfig @@ -29,21 +30,20 @@ def __init__(self, config: TransformerConfig) -> None: super().__init__(config) self.config = config self.num_experts = self.config.num_moe_experts - # Token dispatcher for exchange tokens between experts. self.token_dispatcher = None - # Initialize the gate weights. self.gate = torch.nn.Linear( self.config.hidden_size, self.config.num_moe_experts, bias=False ) + # Initialize the aux losses. + self.moe_aux_loss_func = None + + # Initialize the gate weights. with get_cuda_rng_tracker().fork(get_data_parallel_rng_tracker_name()): config.init_method(self.gate.weight) setattr(self.gate.weight, 'sequence_parallel', config.sequence_parallel) - self.fp32_router = False - self.input_jitter = None - def gating(self, input: torch.Tensor): """ Forward pass of the router gate. @@ -75,7 +75,7 @@ def dispatch( raise NotImplementedError def restore( - self, expert_output: torch.Tensor, gating: torch.Tensor, indicies: torch.Tensor, + self, expert_output: torch.Tensor, scores: torch.Tensor, indicies: torch.Tensor, ): raise NotImplementedError @@ -106,39 +106,53 @@ def forward(self, input: torch.Tensor): input (torch.Tensor): Input tensor. Returns: - Tuple[torch.Tensor, torch.Tensor]: gating and indices. + Tuple[torch.Tensor, torch.Tensor]: scores and indices. """ self.hidden = input.shape[-1] - if self.fp32_router: - if self.gate.weight.dtype != torch.float32: - self.gate.weight.data = self.gate.weight.data.float() - assert hasattr(self.gate.weight, 'sequence_parallel') - input = input.float() + logits = self.gating(input) + logits = logits.view(-1, self.config.num_moe_experts) - route = self.gating(input) - route = route.view(-1, self.config.num_moe_experts) + scores, indices = self.routing(logits) - gating, indices = self.routing(route) + return scores, indices - return gating, indices + def apply_aux_loss(self, loss_func, scores, indicies): + mask = torch.nn.functional.one_hot(indicies, num_classes=self.num_experts).sum(dim=1) + aux_loss = loss_func(scores, mask) + scores = MoEAuxLossAutoScaler.apply(scores, aux_loss) + return scores + + def apply_z_loss(self, logits): + """Encourages the router's logits to remain small to enhance stability. + Please refer to the ST-MoE paper (https://arxiv.org/pdf/2202.08906.pdf) for details. + + Args: + logits (torch.Tensor): The logits of the router. + + Returns: + torch.Tensor: The logits after applying the z-loss. + """ + + z_loss = z_loss_func(logits) + logits = MoEAuxLossAutoScaler.apply(logits, z_loss) + return logits def switch_transformer_load_balancing_loss(self, gates, mask): - """ - Calculate the auxiliary loss for better load balacing. + """Calculate the auxiliary loss for better load balacing. Please refer to the Switch Transformer paper (https://arxiv.org/abs/2101.03961) for details. Args: - route (torch.Tensor): The gates tensor. - mask (torch.Tensor): The mask tensor. + gates (torch.Tensor): The gates tensor representing the routing probabilities for each expert. + mask (torch.Tensor): The 2D mask tensor indicating which experts are selected. Returns: - torch.Tensor: The auxiliary loss. + torch.Tensor: The auxiliary loss for load balancing. """ gates_mean = gates.mean(dim=0) selection_mean = mask.float().mean(dim=0) aux_loss = torch.sum(gates_mean * selection_mean) * self.num_experts - aux_loss *= self.config.moe_loss_coeff + aux_loss *= self.config.aux_loss_coeff return aux_loss @@ -169,14 +183,14 @@ def dispatch( raise NotImplementedError def restore( - self, expert_output: torch.Tensor, gating: torch.Tensor, indices: torch.Tensor, + self, expert_output: torch.Tensor, scores: torch.Tensor, indices: torch.Tensor, ): """ Restores the expert output to its original ordering. Args: expert_output (torch.Tensor): The output tensor from the expert models. - gating (torch.Tensor): The gating tensor used to route the inputs to the experts. + scores (torch.Tensor): Each token's score with each expert. indices (torch.Tensor): The indices used to reorder the expert output. Returns: @@ -187,7 +201,7 @@ def restore( class MoEZeroDropTokenDispatcher(MoETokenDispatcher): """ - ZeroDrop Token Dispatcher + Token dispatcher without token dropping. """ def __init__(self, num_local_experts, local_expert_indices, config: TransformerConfig) -> None: @@ -289,7 +303,7 @@ def dispatch(self, hidden_states, max_prob, max_ind): permuted_local_hidden_states = torch.gather(local_hidden_states, 0, indices) return permuted_local_hidden_states, tokens_per_expert, local_probs, indices, global_local_map - def restore(self, hidden_states, gating, indices, global_local_map=None, bias=None): + def restore(self, hidden_states, scores, indices, global_local_map=None, bias=None): """ Reverse process of `dispatch()` which permutes the ouput of local experts locallay and across expert parallel rank into the original order to @@ -309,14 +323,14 @@ def restore(self, hidden_states, gating, indices, global_local_map=None, bias=No with shape of [SeqLen/TP, MBS, HiddenSize] """ # Stage1: unpermute the tokens and bias locally respectively. - gating = gating.to(dtype=hidden_states.dtype) + scores = scores.to(dtype=hidden_states.dtype) unpermuted_local_hidden = torch.zeros_like(hidden_states) assert indices.shape == hidden_states.shape unpermuted_local_hidden = unpermuted_local_hidden.scatter(0, indices, hidden_states) # Scale the expert output prior to reduction and subsequent to local unpermutation if k > 1. if self.k > 1: - unpermuted_local_hidden = unpermuted_local_hidden * gating + unpermuted_local_hidden = unpermuted_local_hidden * scores unpermuted_local_bias = None if self.add_bias: @@ -325,7 +339,7 @@ def restore(self, hidden_states, gating, indices, global_local_map=None, bias=No assert indices.shape == bias.shape unpermuted_local_bias = unpermuted_local_bias.scatter(0, indices, bias) if self.k > 1: - unpermuted_local_bias = unpermuted_local_bias * gating + unpermuted_local_bias = unpermuted_local_bias * scores output_total = unpermuted_local_hidden output_bias_total = None @@ -363,12 +377,12 @@ def restore(self, hidden_states, gating, indices, global_local_map=None, bias=No output_bias_total / parallel_state.get_tensor_model_parallel_world_size() ) if self.k == 1: - output_total = output_total * gating + output_total = output_total * scores output_total = output_total.view(self.hidden_shape) if self.add_bias: assert output_bias_total is not None if self.k == 1: - output_bias_total = output_bias_total * gating + output_bias_total = output_bias_total * scores output_bias_total = output_bias_total.view(self.hidden_shape) else: output_bias_total = None @@ -378,7 +392,7 @@ def restore(self, hidden_states, gating, indices, global_local_map=None, bias=No class ZeroDropSinkhornRouter(Router): """ - ZeroDrop Sinkhorn Router + Sinkhorn Router without token dropping. """ def __init__(self, num_local_experts, local_expert_indices, config: TransformerConfig) -> None: @@ -388,10 +402,10 @@ def __init__(self, num_local_experts, local_expert_indices, config: TransformerC super().__init__(config=config) self.route_algo = self.sinkhorn self.router_activation = torch.sigmoid - self.moe_aux_loss = self.switch_transformer_load_balancing_loss self.token_dispatcher = MoEZeroDropTokenDispatcher( num_local_experts, local_expert_indices, config ) + self.k = 1 def sinkhorn(self, cost, tol=0.0001): "Sinkhorn based MoE routing function" @@ -409,13 +423,7 @@ def sinkhorn(self, cost, tol=0.0001): d1_old = d1 return d1 * cost * d0.unsqueeze(1) - def moe_loss(self, gatings, indicies): - mask = torch.nn.functional.one_hot(indicies, num_classes=self.num_experts).sum(dim=1) - aux_loss = self.moe_aux_loss(gatings, mask) - gatings = MoEAuxLossAutoScaler.apply(gatings, aux_loss) - return gatings - - def routing(self, route: torch.Tensor): + def routing(self, logits: torch.Tensor): """ Get the routing results. @@ -425,24 +433,21 @@ def routing(self, route: torch.Tensor): Returns: Tuple[torch.Tensor, torch.Tensor]: Tuple of tensors representing max probs and the indices. """ - route = route.view(-1, self.config.num_moe_experts) - k = 1 # TODO: self.config.top_k + logits = logits.view(-1, self.config.num_moe_experts) if self.training: with torch.no_grad(): - norm_route = self.route_algo( - route.detach().to(dtype=torch.float32) + norm_logits = self.route_algo( + logits.to(dtype=torch.float32) ) # explicit fp32 conversion for stability - _, indices = torch.topk(norm_route, k=k, dim=1) - route = self.router_activation(route) - gatings = torch.gather(route, 1, indices) + _, indices = torch.topk(norm_logits, k=self.k, dim=1) + logits = self.router_activation(logits) + scores = torch.gather(logits, 1, indices) else: - route = self.router_activation(route) - gatings, indices = torch.topk(route, k=k, dim=1) - - # gatings = self.moe_loss(gatings, indices) + logits = self.router_activation(logits) + scores, indices = torch.topk(logits, k=self.k, dim=1) - return gatings, indices + return scores, indices class MoEAuxLossAutoScaler(torch.autograd.Function): diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index 4d86ef4ece..336a2c928a 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -18,8 +18,27 @@ def __init__(self, config: TransformerConfig): super(BaseMoELayer, self).__init__(config) self.config = config self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size() - assert self.config.num_moe_experts % self.expert_parallel_size == 0 + self.router = None + self.experts = None + + @abstractmethod + def initialize_experts(self): + pass + + @abstractmethod + def initialize_router(self): + pass + + @abstractmethod + def forward(self, hidden_states): + pass + + +class BaseSwitchMLPLayer(BaseMoELayer): + def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): + self.submodules = submodules + super(BaseSwitchMLPLayer, self).__init__(config=config) self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size local_expert_indices_offset = ( parallel_state.get_expert_model_parallel_rank() * self.num_local_experts @@ -27,41 +46,33 @@ def __init__(self, config: TransformerConfig): self.local_expert_indices = [ local_expert_indices_offset + i for i in range(self.num_local_experts) ] - self.router = self.initialize_router() self.experts = self.initialize_experts() - def initialize_experts(self): - pass - - def initialize_router(self): - pass - def forward(self, hidden_states): # process MoE - gatings, indices = self.router(hidden_states) + scores, indices = self.router(hidden_states) ( dispatched_input, tokens_per_expert, - probs, + scores, indices, global_local_map, - ) = self.router.token_dispatcher.dispatch(hidden_states, gatings, indices) + ) = self.router.token_dispatcher.dispatch(hidden_states, scores, indices) expert_output, mlp_bias = self.experts(dispatched_input, tokens_per_expert) output, mlp_bias = self.router.token_dispatcher.restore( - expert_output, probs, indices, global_local_map, mlp_bias + expert_output, scores, indices, global_local_map, mlp_bias ) if mlp_bias is None: mlp_bias = torch.tensor(0.0, device=hidden_states.device, dtype=hidden_states.dtype) - # output = output.reshape(hidden_states.shape) return output, mlp_bias -class GroupedGemmMoELayer(BaseMoELayer): +class GroupedGemmMoELayer(BaseSwitchMLPLayer): def __init__(self, config: TransformerConfig): - super(GroupedGemmMoELayer, self).__init__(config=config) + super(GroupedGemmMoELayer, self).__init__(config=config,) def initialize_experts(self): experts = GroupedMLP(self.num_local_experts, self.config) @@ -74,10 +85,9 @@ def initialize_router(self): return router -class SwitchMLPLayer(BaseMoELayer): +class SwitchMLPLayer(BaseSwitchMLPLayer): def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): - self.submodules = submodules - super(SwitchMLPLayer, self).__init__(config=config) + super(SwitchMLPLayer, self).__init__(config=config, submodules=submodules) def initialize_experts(self): experts = SwitchMLP(self.num_local_experts, self.config, self.submodules) diff --git a/megatron/core/transformer/moe/moe_utils.py b/megatron/core/transformer/moe/moe_utils.py new file mode 100644 index 0000000000..04a53d021c --- /dev/null +++ b/megatron/core/transformer/moe/moe_utils.py @@ -0,0 +1,36 @@ +import torch + + +def switch_load_balancing_loss_func(config, gates, mask): + """Calculate the auxiliary loss for better load balacing. + Please refer to the Switch Transformer paper (https://arxiv.org/abs/2101.03961) for details. + + Args: + gates (torch.Tensor): The gates tensor representing the routing probabilities for each expert. + mask (torch.Tensor): The 2D mask tensor indicating which experts are selected. + + Returns: + torch.Tensor: The auxiliary loss for load balancing. + """ + num_experts = mask.size(1) + assert num_experts == config.num_moe_experts + gates_mean = gates.mean(dim=0) + selection_mean = mask.float().mean(dim=0) + aux_loss = torch.sum(gates_mean * selection_mean) * num_experts + aux_loss *= config.aux_loss_coeff + return aux_loss + + +def z_loss_func(logits): + """Encourages the router's logits to remain small to enhance stability. + Please refer to the ST-MoE paper (https://arxiv.org/pdf/2202.08906.pdf) for details. + + Args: + logits (torch.Tensor): The logits of the router. + + Returns: + torch.Tensor: The logits after applying the z-loss. + """ + + z_loss = torch.mean(torch.square(torch.logsumexp(logits, dim=-1))) + return z_loss diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index d3321206fe..8ada5553be 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -130,7 +130,7 @@ class TransformerConfig(ModelParallelConfig): # MoE related moe_grouped_gemm: bool = False - moe_loss_coeff: float = 0.01 + moe_aux_loss_coeff: float = 0.01 def __post_init__(self): """ Python dataclass method that is used to modify attributes after initialization. From 9b5cd88a29161a4dd022f47c9c7ddefbc6352434 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Mon, 18 Dec 2023 01:45:31 +0000 Subject: [PATCH 154/296] Code clean. --- megatron/arguments.py | 6 ++-- megatron/core/models/gpt/gpt_layer_specs.py | 9 ++---- megatron/core/transformer/moe/moe_layer.py | 32 +++++-------------- .../core/transformer/transformer_config.py | 5 ++- 4 files changed, 19 insertions(+), 33 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 170962aa87..57bb24780a 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -1417,14 +1417,16 @@ def _add_moe_args(parser): help='When there are multiple experts per rank, compress ' 'multiple local (potentially small) gemms in a single kernel ' 'launch to improve the utilization and performance by ' - 'leveraging the Grouped GEMM feature introduced since ' + 'leveraging the Grouped GEMM feature introduced since ' 'CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm).') group.add_argument('--moe-aux-loss-coeff', type=float, default=1e-2, help='Scaling coefficient for adding MoE loss to model loss') group.add_argument('--moe-z-loss-coeff', type=float, default=1e-3, help='Scaling coefficient for adding MoE loss to model loss') - group.add_argument('--moe-router-type', type=str, default='top1', + group.add_argument('--moe-router-type', type=str, default='sinkhorn', help='Options for router type, support top1 and ec') + group.add_argument('--moe-token-dropping',action='store_true', + help='Drop or pad selected tokens for each expert as GShard, Swtich-Transformer and DeepSpeed-MoE.') # zero token drop moe arguments # token drop moe arugments diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index 07f10fbf5a..cffe40c425 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -89,14 +89,11 @@ def _get_mlp_module_spec( linear_fc2=TERowParallelLinear if use_te else RowParallelLinear, ), ) - elif moe_grouped_gemm: - # GroupedMLP based MoE with modules in megatron core. - return GroupedGemmMoELayer else: # SwitchMLP based MoE with modules in megatron core. return ModuleSpec( module=SwitchMLPLayer, - submodules=MLPSubmodules( - linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, - ), + submodules=MLPSubmodules(linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear,) + if not moe_grouped_gemm + else None, ) diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index 336a2c928a..6266f81a61 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -35,10 +35,10 @@ def forward(self, hidden_states): pass -class BaseSwitchMLPLayer(BaseMoELayer): - def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): +class SwitchMLPLayer(BaseMoELayer): + def __init__(self, config: TransformerConfig, submodules: MLPSubmodules = None): self.submodules = submodules - super(BaseSwitchMLPLayer, self).__init__(config=config) + super(SwitchMLPLayer, self).__init__(config=config) self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size local_expert_indices_offset = ( parallel_state.get_expert_model_parallel_rank() * self.num_local_experts @@ -69,28 +69,12 @@ def forward(self, hidden_states): return output, mlp_bias - -class GroupedGemmMoELayer(BaseSwitchMLPLayer): - def __init__(self, config: TransformerConfig): - super(GroupedGemmMoELayer, self).__init__(config=config,) - - def initialize_experts(self): - experts = GroupedMLP(self.num_local_experts, self.config) - return experts - - def initialize_router(self): - router = ZeroDropSinkhornRouter( - self.num_local_experts, self.local_expert_indices, self.config - ) - return router - - -class SwitchMLPLayer(BaseSwitchMLPLayer): - def __init__(self, config: TransformerConfig, submodules: MLPSubmodules): - super(SwitchMLPLayer, self).__init__(config=config, submodules=submodules) - def initialize_experts(self): - experts = SwitchMLP(self.num_local_experts, self.config, self.submodules) + if self.config.moe_grouped_gemm: + experts = GroupedMLP(self.num_local_experts, self.config) + else: + assert isinstance(self.submodules, MLPSubmodules) + experts = SwitchMLP(self.num_local_experts, self.config, self.submodules) return experts def initialize_router(self): diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 8ada5553be..3cb2cf2ebe 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -130,7 +130,10 @@ class TransformerConfig(ModelParallelConfig): # MoE related moe_grouped_gemm: bool = False - moe_aux_loss_coeff: float = 0.01 + moe_aux_loss_coeff: float = 0 # 1e-2 would be a good start value for load balance loss. + moe_z_loss_coeff: float = 0 # 1e-3 would be a good start value for z-loss + moe_token_dropping: bool = False # TODO: Support token dropping. + moe_router_type: str = "sinkhorn" def __post_init__(self): """ Python dataclass method that is used to modify attributes after initialization. From dc436f25080bb24422b793df27a493e415d14911 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Mon, 18 Dec 2023 16:33:54 +0000 Subject: [PATCH 155/296] Add top-k router and documentation. --- megatron/arguments.py | 67 +++++++++++---- megatron/core/models/gpt/gpt_layer_specs.py | 2 +- .../core/transformer/moe/base_moe_layer.py | 86 +++++++++++++------ megatron/core/transformer/moe/moe_layer.py | 24 +++++- .../core/transformer/transformer_config.py | 4 + 5 files changed, 135 insertions(+), 48 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 57bb24780a..e13b33bde3 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -397,6 +397,19 @@ def validate_args(args, defaults={}): # MoE Spec check if args.num_experts is not None: assert args.spec is None, "Model Spec must be None when using MoEs" + if args.moe_router_type.lower().startswith("top"): + try: + k = int(args.moe_router_type[3:]) + assert k > 0, "Invalid topk router name: {}, please ensure k > 0.".format( + args.moe_router_type + ) + except: + raise RuntimeError( + "Invalid `topk` router name: `{}`. Please use the format `topk`, where `k` must be an integer.".format( + args.moe_router_type + ) + ) + # Expert parallelism check if args.expert_model_parallel_size > 1: @@ -1409,27 +1422,43 @@ def _add_vision_args(parser): def _add_moe_args(parser): group = parser.add_argument_group(title="moe") - # general moe arguements - group.add_argument('--num-experts', type=int, default=None, - help='Number of Experts in MoE (None means no MoE)') - group.add_argument('--moe-grouped-gemm', action='store_true', - help='When there are multiple experts per rank, compress ' - 'multiple local (potentially small) gemms in a single kernel ' - 'launch to improve the utilization and performance by ' - 'leveraging the Grouped GEMM feature introduced since ' - 'CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm).') - group.add_argument('--moe-aux-loss-coeff', type=float, default=1e-2, - help='Scaling coefficient for adding MoE loss to model loss') - group.add_argument('--moe-z-loss-coeff', type=float, default=1e-3, - help='Scaling coefficient for adding MoE loss to model loss') - group.add_argument('--moe-router-type', type=str, default='sinkhorn', - help='Options for router type, support top1 and ec') - group.add_argument('--moe-token-dropping',action='store_true', - help='Drop or pad selected tokens for each expert as GShard, Swtich-Transformer and DeepSpeed-MoE.') + group.add_argument( + '--num-experts', type=int, default=None, help='Number of Experts in MoE (None means no MoE)' + ) + group.add_argument( + '--moe-grouped-gemm', + action='store_true', + help='When there are multiple experts per rank, compress ' + 'multiple local (potentially small) gemms in a single kernel ' + 'launch to improve the utilization and performance by ' + 'leveraging the Grouped GEMM feature introduced since ' + 'CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm).', + ) + group.add_argument( + '--moe-aux-loss-coeff', + type=float, + default=0.0, + help='Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended.', + ) + group.add_argument( + '--moe-z-loss-coeff', + type=float, + default=0.0, + help='Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended.', + ) + group.add_argument( + '--moe-router-type', + type=str, + default='sinkhorn', + help='Options for router type. Currently supports sinkhorn and topk router.', + ) + group.add_argument( + '--moe-token-dropping', + action='store_true', + help='Currently unsupported. This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to to GShard, Switch-Transformer, and DeepSpeed-MoE.', + ) # zero token drop moe arguments - - # token drop moe arugments return parser diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index cffe40c425..ce8710d760 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -14,7 +14,7 @@ from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityOp from megatron.core.transformer.mlp import MLP, MLPSubmodules -from megatron.core.transformer.moe.moe_layer import GroupedGemmMoELayer, SwitchMLPLayer +from megatron.core.transformer.moe.moe_layer import SwitchMLPLayer from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 9fcb33a860..2875c470f1 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -138,23 +138,6 @@ def apply_z_loss(self, logits): logits = MoEAuxLossAutoScaler.apply(logits, z_loss) return logits - def switch_transformer_load_balancing_loss(self, gates, mask): - """Calculate the auxiliary loss for better load balacing. - Please refer to the Switch Transformer paper (https://arxiv.org/abs/2101.03961) for details. - - Args: - gates (torch.Tensor): The gates tensor representing the routing probabilities for each expert. - mask (torch.Tensor): The 2D mask tensor indicating which experts are selected. - - Returns: - torch.Tensor: The auxiliary loss for load balancing. - """ - gates_mean = gates.mean(dim=0) - selection_mean = mask.float().mean(dim=0) - aux_loss = torch.sum(gates_mean * selection_mean) * self.num_experts - aux_loss *= self.config.aux_loss_coeff - return aux_loss - class MoETokenDispatcher: """ @@ -204,14 +187,16 @@ class MoEZeroDropTokenDispatcher(MoETokenDispatcher): Token dispatcher without token dropping. """ - def __init__(self, num_local_experts, local_expert_indices, config: TransformerConfig) -> None: + def __init__( + self, num_local_experts, local_expert_indices, k, config: TransformerConfig + ) -> None: """ Initialize the zero token dropping router. """ super().__init__(config=config) self.num_local_experts = num_local_experts self.local_expert_indices = local_expert_indices - self.k = 1 + self.k = k self.add_bias = config.add_bias_linear def gather_indices(self, local_indices): @@ -301,7 +286,13 @@ def dispatch(self, hidden_states, max_prob, max_ind): # Reshape indices to be compatible with Tensor.gather indices = indices.view(-1, 1).expand(-1, hidden_states.shape[-1]) permuted_local_hidden_states = torch.gather(local_hidden_states, 0, indices) - return permuted_local_hidden_states, tokens_per_expert, local_probs, indices, global_local_map + return ( + permuted_local_hidden_states, + tokens_per_expert, + local_probs, + indices, + global_local_map, + ) def restore(self, hidden_states, scores, indices, global_local_map=None, bias=None): """ @@ -330,7 +321,7 @@ def restore(self, hidden_states, scores, indices, global_local_map=None, bias=No # Scale the expert output prior to reduction and subsequent to local unpermutation if k > 1. if self.k > 1: - unpermuted_local_hidden = unpermuted_local_hidden * scores + unpermuted_local_hidden = unpermuted_local_hidden * scores.view(-1, 1) unpermuted_local_bias = None if self.add_bias: @@ -339,7 +330,7 @@ def restore(self, hidden_states, scores, indices, global_local_map=None, bias=No assert indices.shape == bias.shape unpermuted_local_bias = unpermuted_local_bias.scatter(0, indices, bias) if self.k > 1: - unpermuted_local_bias = unpermuted_local_bias * scores + unpermuted_local_bias = unpermuted_local_bias * scores.view(-1, 1) output_total = unpermuted_local_hidden output_bias_total = None @@ -400,12 +391,14 @@ def __init__(self, num_local_experts, local_expert_indices, config: TransformerC Initialize the zero token dropping router. """ super().__init__(config=config) + assert config.moe_token_dropping == False + assert config.moe_router_type == "sinkhorn" self.route_algo = self.sinkhorn self.router_activation = torch.sigmoid + self.k = 1 self.token_dispatcher = MoEZeroDropTokenDispatcher( - num_local_experts, local_expert_indices, config + num_local_experts, local_expert_indices, self.k, config ) - self.k = 1 def sinkhorn(self, cost, tol=0.0001): "Sinkhorn based MoE routing function" @@ -450,6 +443,51 @@ def routing(self, logits: torch.Tensor): return scores, indices +class ZeroDropTopKRouter(Router): + """ + Sinkhorn Router without token dropping. + """ + + def __init__(self, num_local_experts, local_expert_indices, config: TransformerConfig) -> None: + """ + Initialize the zero token dropping router. + """ + super().__init__(config=config) + assert config.moe_token_dropping == False + assert config.moe_router_type.startswith("top") + # extract k from config.moe_router_type + self.k = int(config.moe_router_type[3:]) + self.token_dispatcher = MoEZeroDropTokenDispatcher( + num_local_experts, local_expert_indices, self.k, config + ) + self.moe_aux_loss_func = switch_load_balancing_loss_func + + def routing(self, logits: torch.Tensor): + """ + Get the routing results. + + Args: + logits (torch.Tensor): Logits tensor. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Tuple of tensors representing max probs and the indices. + """ + logits = logits.view(-1, self.config.num_moe_experts) + logits = logits.to(dtype=torch.float32) + + if self.config.moe_z_loss_coeff > 0: + # Apply Z-Loss + logits = self.apply_z_loss(logits) + + scores, indices = torch.topk(logits, k=self.k, dim=1) + + if self.config.moe_aux_loss_coeff > 0: + # Apply load balancing loss + scores = self.apply_aux_loss(self.moe_aux_loss_func, scores, indices) + + return scores, indices + + class MoEAuxLossAutoScaler(torch.autograd.Function): main_loss_backward_scale = 1 diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index 6266f81a61..c01f83faf3 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -7,7 +7,7 @@ from megatron.core import parallel_state from megatron.core.transformer.mlp import MLPSubmodules from megatron.core.transformer.module import MegatronModule -from megatron.core.transformer.moe.base_moe_layer import ZeroDropSinkhornRouter +from megatron.core.transformer.moe.base_moe_layer import ZeroDropSinkhornRouter, ZeroDropTopKRouter from megatron.core.transformer.moe.grouped_mlp import GroupedMLP from megatron.core.transformer.moe.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_config import TransformerConfig @@ -36,6 +36,14 @@ def forward(self, hidden_states): class SwitchMLPLayer(BaseMoELayer): + """ + Top-K Mixture of Experts Layer Without Token Dropping. + Currently supports Sinkhorn-based expert routing (Top-1 only) and a generalized Top-k routing with Z loss and auxiliary loss. + + Args: + BaseMoELayer (MegatronModule): Base class for MoE layers + """ + def __init__(self, config: TransformerConfig, submodules: MLPSubmodules = None): self.submodules = submodules super(SwitchMLPLayer, self).__init__(config=config) @@ -48,6 +56,7 @@ def __init__(self, config: TransformerConfig, submodules: MLPSubmodules = None): ] self.router = self.initialize_router() self.experts = self.initialize_experts() + assert config.moe_token_dropping is False def forward(self, hidden_states): # process MoE @@ -78,7 +87,14 @@ def initialize_experts(self): return experts def initialize_router(self): - router = ZeroDropSinkhornRouter( - self.num_local_experts, self.local_expert_indices, self.config - ) + if self.config.moe_router_type.lower().startswith("top"): + router = ZeroDropTopKRouter( + self.num_local_experts, self.local_expert_indices, self.config + ) + elif self.config.moe_router_type.lower() == "sinkhorn": + router = ZeroDropSinkhornRouter( + self.num_local_experts, self.local_expert_indices, self.config + ) + else: + raise NotImplementedError(f"Routing method {self.config.moe_router_type} not supported") return router diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 3cb2cf2ebe..7859d3c2c8 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -60,6 +60,10 @@ class TransformerConfig(ModelParallelConfig): window_size ((int,int) or None): If not None, then will use sliding window attention. The size of the window is specified by the numbers inside the tuple; -1 is special value meaning "infinite window size". moe_grouped_gemm (bool): When there are multiple experts per rank, compress multiple local (potentially small) gemms in a single kernel launch to improve the utilization and performance by leveraging the Grouped GEMM feature introduced since CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm). + moe_aux_loss_coeff (float): Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended. + moe_z_loss_coeff (float): Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended. + moe_router_type (str): Options for router type. Currently supports sinkhorn and topk router. + moe_token_dropping (bool): Currently unsupported. This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to to GShard, Switch-Transformer, and DeepSpeed-MoE., """ # model architecture From a98c5ba19c44ae0df3d06f4bd1920e33288e4e91 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Tue, 26 Dec 2023 07:46:16 +0000 Subject: [PATCH 156/296] Add UT. Fix top-k >1 when EP is off. --- .../core/transformer/moe/base_moe_layer.py | 39 +++++++++--- .../transformer/moe/test_routers.py | 58 ++++++++++++++++++ .../transformer/moe/test_token_dispatcher.py | 59 +++++++++++++++++++ 3 files changed, 149 insertions(+), 7 deletions(-) create mode 100644 tests/unit_tests/transformer/moe/test_routers.py create mode 100644 tests/unit_tests/transformer/moe/test_token_dispatcher.py diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 2875c470f1..84956eeef2 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -266,10 +266,18 @@ def dispatch(self, hidden_states, max_prob, max_ind): global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1]) local_hidden_states = torch.gather(global_hidden_states, 0, global_local_map) else: - local_indices = max_ind - local_probs = max_prob - local_hidden_states = hidden_states - global_local_map = None + if self.k > 1: + global_local_map = torch.ones_like(max_ind).bool() + local_indices = max_ind.masked_select(global_local_map) + local_probs = max_prob.masked_select(global_local_map) + global_local_map = global_local_map.nonzero()[:, 0] + global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1]) + local_hidden_states = torch.gather(hidden_states, 0, global_local_map) + else: + local_indices = max_ind + local_probs = max_prob + local_hidden_states = hidden_states + global_local_map = None with torch.no_grad(): # The indices of local_indices that give its sorted order along dim 0. @@ -367,6 +375,22 @@ def restore(self, hidden_states, scores, indices, global_local_map=None, bias=No output_bias_total = ( output_bias_total / parallel_state.get_tensor_model_parallel_world_size() ) + else: + if self.k > 1: + global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1] + global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]] + unpermuted_global_hidden = torch.zeros( + global_hidden_shape, dtype=hidden_states.dtype, device=torch.cuda.current_device() + ) + output_total = unpermuted_global_hidden.scatter_add( + 0, global_local_map, unpermuted_local_hidden + ) + if self.add_bias: + unpermuted_global_bias = torch.zeros_like(unpermuted_global_hidden) + output_bias_total = unpermuted_global_bias.scatter_add( + 0, global_local_map, unpermuted_local_bias + ) + if self.k == 1: output_total = output_total * scores output_total = output_total.view(self.hidden_shape) @@ -474,15 +498,16 @@ def routing(self, logits: torch.Tensor): """ logits = logits.view(-1, self.config.num_moe_experts) logits = logits.to(dtype=torch.float32) - + logits = torch.softmax(logits, dim=-1) + + # Apply Z-Loss if self.config.moe_z_loss_coeff > 0: - # Apply Z-Loss logits = self.apply_z_loss(logits) scores, indices = torch.topk(logits, k=self.k, dim=1) + # Apply load balancing loss if self.config.moe_aux_loss_coeff > 0: - # Apply load balancing loss scores = self.apply_aux_loss(self.moe_aux_loss_func, scores, indices) return scores, indices diff --git a/tests/unit_tests/transformer/moe/test_routers.py b/tests/unit_tests/transformer/moe/test_routers.py new file mode 100644 index 0000000000..17a970ecfb --- /dev/null +++ b/tests/unit_tests/transformer/moe/test_routers.py @@ -0,0 +1,58 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import pytest + +import torch + +from megatron.core.transformer.moe.base_moe_layer import Router, ZeroDropTopKRouter +from megatron.initialize import _set_random_seed +from tests.unit_tests.test_utilities import Utils +from megatron.core.transformer.transformer_config import TransformerConfig + + +class TestZeroDropTop2Router: + def setup_method(self, method): + Utils.initialize_model_parallel(1, 1) + _set_random_seed(seed_=123, data_parallel_random_init=False) + print("done intializing") + num_moe_experts = 4 + transformer_config = TransformerConfig( + num_layers=2, + hidden_size=12, + num_attention_heads=4, + num_moe_experts=num_moe_experts, + use_cpu_initialization=True, + moe_router_type="top2", + ) + self.router = ZeroDropTopKRouter( + num_local_experts=num_moe_experts, + local_expert_indices=range(num_moe_experts), + config=transformer_config, + ) + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + def test_constructor(self): + assert isinstance(self.router, Router) + + num_weights = sum([p.numel() for p in self.router.parameters()]) + assert num_weights == 12 * 4, num_weights + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + def test_gpu_forward(self): + self.router = self.router.cuda() + # [num tokens, hidden size] + hidden_states = torch.randn((32, self.router.config.hidden_size)) + hidden_states = hidden_states.cuda() + scores, indices = self.router(hidden_states) + print(scores.shape, indices.shape) + assert scores.shape == (32, 2) + assert indices.shape == (32, 2) + print( + (indices == 0).sum(), (indices == 1).sum(), (indices == 2).sum(), (indices == 3).sum() + ) + assert (indices == 0).sum() == 15, (indices == 0).sum() + assert (indices == 1).sum() == 18, (indices == 1).sum() + assert (indices == 2).sum() == 18, (indices == 2).sum() + assert (indices == 3).sum() == 13, (indices == 3).sum() diff --git a/tests/unit_tests/transformer/moe/test_token_dispatcher.py b/tests/unit_tests/transformer/moe/test_token_dispatcher.py new file mode 100644 index 0000000000..8725561fe7 --- /dev/null +++ b/tests/unit_tests/transformer/moe/test_token_dispatcher.py @@ -0,0 +1,59 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import pytest + +import torch + +from megatron.core.transformer.moe.base_moe_layer import Router, ZeroDropTopKRouter +from megatron.initialize import _set_random_seed +from tests.unit_tests.test_utilities import Utils +from megatron.core.transformer.transformer_config import TransformerConfig + + +class TestZeroDropDispatcher: + def setup_method(self, method): + Utils.initialize_model_parallel(1, 1) + _set_random_seed(seed_=123, data_parallel_random_init=False) + print("done intializing") + num_moe_experts = 4 + transformer_config = TransformerConfig( + num_layers=2, + hidden_size=12, + num_attention_heads=4, + num_moe_experts=num_moe_experts, + use_cpu_initialization=True, + moe_router_type="top2", + ) + self.router = ZeroDropTopKRouter( + num_local_experts=num_moe_experts, + local_expert_indices=range(num_moe_experts), + config=transformer_config, + ) + self.token_dispatcher = self.router.token_dispatcher + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + def test_gpu_forward(self): + self.router = self.router.cuda() + # [bs, seql, hidden size] + hidden_states = torch.randn((32, 8, self.router.config.hidden_size)) + hidden_states = hidden_states.cuda() + scores, indices = self.router(hidden_states) + assert scores.shape == (256, 2), "Scores shape is not correct" + assert indices.shape == (256, 2), "Indices shape is not correct" + print( + (indices == 0).sum(), (indices == 1).sum(), (indices == 2).sum(), (indices == 3).sum() + ) + ( + permuted_local_hidden_states, + tokens_per_expert, + local_probs, + revert_indices, + global_local_map, + ) = self.token_dispatcher.dispatch(hidden_states, scores, indices) + probs = torch.ones_like(local_probs) / 2 + restored_hidden_states, restored_bias = self.token_dispatcher.restore(permuted_local_hidden_states, probs, revert_indices, global_local_map, bias=torch.zeros_like(permuted_local_hidden_states)) + + assert torch.allclose(restored_hidden_states, hidden_states), "Restored hidden states do not match original hidden states" From 0f80408b04ca62f3f77059436fbc83dd375fa46f Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Tue, 26 Dec 2023 08:43:44 +0000 Subject: [PATCH 157/296] Noramlize the token scores. --- megatron/core/transformer/moe/base_moe_layer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 84956eeef2..aec8bab123 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -505,6 +505,8 @@ def routing(self, logits: torch.Tensor): logits = self.apply_z_loss(logits) scores, indices = torch.topk(logits, k=self.k, dim=1) + + scores /= scores.sum(dim=-1, keepdim=True) # Apply load balancing loss if self.config.moe_aux_loss_coeff > 0: From de37485c4e4ee9b29a2d6f4e7412180a582a48cb Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Tue, 26 Dec 2023 09:55:08 +0000 Subject: [PATCH 158/296] Code clean. --- megatron/core/transformer/moe/base_moe_layer.py | 10 ++++++---- .../transformer/moe/test_token_dispatcher.py | 14 +++++++++++--- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index aec8bab123..5e18c0e106 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -380,7 +380,9 @@ def restore(self, hidden_states, scores, indices, global_local_map=None, bias=No global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1] global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]] unpermuted_global_hidden = torch.zeros( - global_hidden_shape, dtype=hidden_states.dtype, device=torch.cuda.current_device() + global_hidden_shape, + dtype=hidden_states.dtype, + device=torch.cuda.current_device(), ) output_total = unpermuted_global_hidden.scatter_add( 0, global_local_map, unpermuted_local_hidden @@ -390,7 +392,7 @@ def restore(self, hidden_states, scores, indices, global_local_map=None, bias=No output_bias_total = unpermuted_global_bias.scatter_add( 0, global_local_map, unpermuted_local_bias ) - + if self.k == 1: output_total = output_total * scores output_total = output_total.view(self.hidden_shape) @@ -499,13 +501,13 @@ def routing(self, logits: torch.Tensor): logits = logits.view(-1, self.config.num_moe_experts) logits = logits.to(dtype=torch.float32) logits = torch.softmax(logits, dim=-1) - + # Apply Z-Loss if self.config.moe_z_loss_coeff > 0: logits = self.apply_z_loss(logits) scores, indices = torch.topk(logits, k=self.k, dim=1) - + scores /= scores.sum(dim=-1, keepdim=True) # Apply load balancing loss diff --git a/tests/unit_tests/transformer/moe/test_token_dispatcher.py b/tests/unit_tests/transformer/moe/test_token_dispatcher.py index 8725561fe7..2624386ae8 100644 --- a/tests/unit_tests/transformer/moe/test_token_dispatcher.py +++ b/tests/unit_tests/transformer/moe/test_token_dispatcher.py @@ -54,6 +54,14 @@ def test_gpu_forward(self): global_local_map, ) = self.token_dispatcher.dispatch(hidden_states, scores, indices) probs = torch.ones_like(local_probs) / 2 - restored_hidden_states, restored_bias = self.token_dispatcher.restore(permuted_local_hidden_states, probs, revert_indices, global_local_map, bias=torch.zeros_like(permuted_local_hidden_states)) - - assert torch.allclose(restored_hidden_states, hidden_states), "Restored hidden states do not match original hidden states" + restored_hidden_states, restored_bias = self.token_dispatcher.restore( + permuted_local_hidden_states, + probs, + revert_indices, + global_local_map, + bias=torch.zeros_like(permuted_local_hidden_states), + ) + + assert torch.allclose( + restored_hidden_states, hidden_states + ), "Restored hidden states do not match original hidden states" From 8efc8de8d0fc3c617d955c5d1a59b5f321b7511f Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Tue, 26 Dec 2023 11:46:32 +0000 Subject: [PATCH 159/296] Fix moe aux loss. --- .../core/transformer/moe/base_moe_layer.py | 18 +++++++++--------- megatron/core/transformer/moe/moe_utils.py | 7 +++---- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 5e18c0e106..c5d9ca6a82 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -117,11 +117,11 @@ def forward(self, input: torch.Tensor): return scores, indices - def apply_aux_loss(self, loss_func, scores, indicies): - mask = torch.nn.functional.one_hot(indicies, num_classes=self.num_experts).sum(dim=1) - aux_loss = loss_func(scores, mask) - scores = MoEAuxLossAutoScaler.apply(scores, aux_loss) - return scores + def apply_aux_loss(self, loss_func, probs, indices): + mask = torch.nn.functional.one_hot(indices, num_classes=self.num_experts).sum(dim=1) + aux_loss = loss_func(probs, mask, self.config.moe_aux_loss_coeff) + indices = MoEAuxLossAutoScaler.apply(indices, aux_loss) + return indices def apply_z_loss(self, logits): """Encourages the router's logits to remain small to enhance stability. @@ -500,19 +500,19 @@ def routing(self, logits: torch.Tensor): """ logits = logits.view(-1, self.config.num_moe_experts) logits = logits.to(dtype=torch.float32) - logits = torch.softmax(logits, dim=-1) + probs = torch.softmax(logits, dim=-1) # Apply Z-Loss if self.config.moe_z_loss_coeff > 0: - logits = self.apply_z_loss(logits) + probs = self.apply_z_loss(probs) - scores, indices = torch.topk(logits, k=self.k, dim=1) + scores, indices = torch.topk(probs, k=self.k, dim=1) scores /= scores.sum(dim=-1, keepdim=True) # Apply load balancing loss if self.config.moe_aux_loss_coeff > 0: - scores = self.apply_aux_loss(self.moe_aux_loss_func, scores, indices) + indices = self.apply_aux_loss(self.moe_aux_loss_func, probs, indices) return scores, indices diff --git a/megatron/core/transformer/moe/moe_utils.py b/megatron/core/transformer/moe/moe_utils.py index 04a53d021c..938324933d 100644 --- a/megatron/core/transformer/moe/moe_utils.py +++ b/megatron/core/transformer/moe/moe_utils.py @@ -1,7 +1,7 @@ import torch -def switch_load_balancing_loss_func(config, gates, mask): +def switch_load_balancing_loss_func(gates, mask, moe_aux_loss_coeff): """Calculate the auxiliary loss for better load balacing. Please refer to the Switch Transformer paper (https://arxiv.org/abs/2101.03961) for details. @@ -12,12 +12,11 @@ def switch_load_balancing_loss_func(config, gates, mask): Returns: torch.Tensor: The auxiliary loss for load balancing. """ - num_experts = mask.size(1) - assert num_experts == config.num_moe_experts + num_experts = mask.size(-1) gates_mean = gates.mean(dim=0) selection_mean = mask.float().mean(dim=0) aux_loss = torch.sum(gates_mean * selection_mean) * num_experts - aux_loss *= config.aux_loss_coeff + aux_loss *= moe_aux_loss_coeff return aux_loss From 15e75b08902805e5d08cddb7d2ed957a092a5d43 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Thu, 28 Dec 2023 12:38:09 +0000 Subject: [PATCH 160/296] Fix UTs; Fix MoE Loss. --- .../core/transformer/moe/base_moe_layer.py | 33 +++++++--- megatron/core/transformer/moe/moe_layer.py | 6 +- .../transformer/moe/test_grouped_mlp.py | 16 ++--- .../transformer/moe/test_routers.py | 63 ++++++++++++------- .../transformer/moe/test_switch_mlp.py | 8 +-- .../transformer/moe/test_token_dispatcher.py | 6 +- 6 files changed, 82 insertions(+), 50 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index c5d9ca6a82..6e6d4adf1b 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -117,11 +117,23 @@ def forward(self, input: torch.Tensor): return scores, indices - def apply_aux_loss(self, loss_func, probs, indices): + def apply_aux_loss(self, loss_func, probs, indices, activation): + """ + Applies auxiliary loss to the MoE layer. + + Args: + loss_func (callable): The loss function to be used. + probs (torch.Tensor): The probabilities output by the MoE layer. + indices (torch.Tensor): The indices of the selected experts. + activation (torch.Tensor): The activation tensor to attach the gradient function to. + + Returns: + torch.Tensor: The activation tensor with the attached gradient function. + """ mask = torch.nn.functional.one_hot(indices, num_classes=self.num_experts).sum(dim=1) aux_loss = loss_func(probs, mask, self.config.moe_aux_loss_coeff) - indices = MoEAuxLossAutoScaler.apply(indices, aux_loss) - return indices + activation = MoEAuxLossAutoScaler.apply(activation, aux_loss) + return activation def apply_z_loss(self, logits): """Encourages the router's logits to remain small to enhance stability. @@ -182,7 +194,7 @@ def restore( raise NotImplementedError -class MoEZeroDropTokenDispatcher(MoETokenDispatcher): +class MoEDroplessTokenDispatcher(MoETokenDispatcher): """ Token dispatcher without token dropping. """ @@ -341,7 +353,7 @@ def restore(self, hidden_states, scores, indices, global_local_map=None, bias=No unpermuted_local_bias = unpermuted_local_bias * scores.view(-1, 1) output_total = unpermuted_local_hidden - output_bias_total = None + output_bias_total = unpermuted_local_bias # Unpermute the tokens across expert parallel devices. if self.config.sequence_parallel or (self.config.expert_model_parallel_size > 1): @@ -407,7 +419,7 @@ def restore(self, hidden_states, scores, indices, global_local_map=None, bias=No return output_total, output_bias_total -class ZeroDropSinkhornRouter(Router): +class DroplessSinkhornRouter(Router): """ Sinkhorn Router without token dropping. """ @@ -422,7 +434,7 @@ def __init__(self, num_local_experts, local_expert_indices, config: TransformerC self.route_algo = self.sinkhorn self.router_activation = torch.sigmoid self.k = 1 - self.token_dispatcher = MoEZeroDropTokenDispatcher( + self.token_dispatcher = MoEDroplessTokenDispatcher( num_local_experts, local_expert_indices, self.k, config ) @@ -469,7 +481,7 @@ def routing(self, logits: torch.Tensor): return scores, indices -class ZeroDropTopKRouter(Router): +class DroplessTopKRouter(Router): """ Sinkhorn Router without token dropping. """ @@ -483,7 +495,7 @@ def __init__(self, num_local_experts, local_expert_indices, config: TransformerC assert config.moe_router_type.startswith("top") # extract k from config.moe_router_type self.k = int(config.moe_router_type[3:]) - self.token_dispatcher = MoEZeroDropTokenDispatcher( + self.token_dispatcher = MoEDroplessTokenDispatcher( num_local_experts, local_expert_indices, self.k, config ) self.moe_aux_loss_func = switch_load_balancing_loss_func @@ -512,7 +524,7 @@ def routing(self, logits: torch.Tensor): # Apply load balancing loss if self.config.moe_aux_loss_coeff > 0: - indices = self.apply_aux_loss(self.moe_aux_loss_func, probs, indices) + scores = self.apply_aux_loss(self.moe_aux_loss_func, probs, indices, activation=scores) return scores, indices @@ -532,6 +544,7 @@ def backward(ctx, grad_output): (aux_loss,) = ctx.saved_tensors aux_loss_backward_scale = MoEAuxLossAutoScaler.main_loss_backward_scale scaled_aux_loss_grad = torch.ones_like(aux_loss) * aux_loss_backward_scale + print("233333, trigger backward!") return grad_output, scaled_aux_loss_grad @staticmethod diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index c01f83faf3..69d5e24710 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -7,7 +7,7 @@ from megatron.core import parallel_state from megatron.core.transformer.mlp import MLPSubmodules from megatron.core.transformer.module import MegatronModule -from megatron.core.transformer.moe.base_moe_layer import ZeroDropSinkhornRouter, ZeroDropTopKRouter +from megatron.core.transformer.moe.base_moe_layer import DroplessSinkhornRouter, DroplessTopKRouter from megatron.core.transformer.moe.grouped_mlp import GroupedMLP from megatron.core.transformer.moe.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_config import TransformerConfig @@ -88,11 +88,11 @@ def initialize_experts(self): def initialize_router(self): if self.config.moe_router_type.lower().startswith("top"): - router = ZeroDropTopKRouter( + router = DroplessTopKRouter( self.num_local_experts, self.local_expert_indices, self.config ) elif self.config.moe_router_type.lower() == "sinkhorn": - router = ZeroDropSinkhornRouter( + router = DroplessSinkhornRouter( self.num_local_experts, self.local_expert_indices, self.config ) else: diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index 84fb5bbfde..193086a8e0 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -7,8 +7,7 @@ from megatron.arguments import parse_args from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec -from megatron.core.transformer.moe.grouped_mlp import GroupedMLP -from megatron.core.transformer.moe.switch_mlp import SwitchMLP +from megatron.core.transformer.moe.moe_layer import SwitchMLPLayer from megatron.core.transformer.transformer_config import TransformerConfig from megatron.initialize import _set_random_seed from megatron.model import Float16Module @@ -39,8 +38,8 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): num_layers=num_layers, hidden_size=self.hidden_size, num_attention_heads=4, num_moe_experts=self.num_experts, use_cpu_initialization=self.use_cpu_initialization, add_bias_linear=False, gated_linear_unit=self.gated_linear_unit, - bias_activation_fusion=False, - bf16=True, params_dtype=torch.bfloat16) + bias_gelu_fusion=False, + bf16=True, params_dtype=torch.bfloat16, moe_router_type="sinkhorn") self.fc1_ffn_hidden_size = tf_config.ffn_hidden_size self.fc2_ffn_hidden_size = tf_config.ffn_hidden_size @@ -53,7 +52,7 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): _set_random_seed(seed_=123, data_parallel_random_init=False) transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( self.num_experts, moe_grouped_gemm=False) - self.switch_mlp_smm = SwitchMLP(tf_config, + self.switch_mlp_smm = SwitchMLPLayer(tf_config, transformer_layer_spec.submodules.mlp.submodules) self.args = parse_args(ignore_unknown_args=True) @@ -66,7 +65,8 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): ## Grouped GEMM _set_random_seed(seed_=123, data_parallel_random_init=False) - self.switch_mlp_gmm = GroupedMLP(tf_config) + tf_config.moe_grouped_gemm = True + self.switch_mlp_gmm = SwitchMLPLayer(tf_config) self.switch_mlp_gmm = Float16Module(self.switch_mlp_gmm, self.args).module print("done intializing for grouped gemm") @@ -74,8 +74,8 @@ def teardown_method(self, method): Utils.destroy_model_parallel() def test_constructor(self): - assert isinstance(self.switch_mlp_smm, SwitchMLP) - assert isinstance(self.switch_mlp_gmm, GroupedMLP) + assert isinstance(self.switch_mlp_smm, SwitchMLPLayer) + assert isinstance(self.switch_mlp_gmm, SwitchMLPLayer) num_weights_smm = sum([p.numel() for p in self.switch_mlp_smm.parameters()]) num_weights_gmm = sum([p.numel() for p in self.switch_mlp_gmm.parameters()]) diff --git a/tests/unit_tests/transformer/moe/test_routers.py b/tests/unit_tests/transformer/moe/test_routers.py index 17a970ecfb..5966951d2c 100644 --- a/tests/unit_tests/transformer/moe/test_routers.py +++ b/tests/unit_tests/transformer/moe/test_routers.py @@ -4,31 +4,36 @@ import torch -from megatron.core.transformer.moe.base_moe_layer import Router, ZeroDropTopKRouter +from megatron.core.transformer.moe.base_moe_layer import Router from megatron.initialize import _set_random_seed from tests.unit_tests.test_utilities import Utils from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.transformer.moe.moe_layer import SwitchMLPLayer +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec -class TestZeroDropTop2Router: +class TestDroplessTop2Router: def setup_method(self, method): Utils.initialize_model_parallel(1, 1) _set_random_seed(seed_=123, data_parallel_random_init=False) print("done intializing") num_moe_experts = 4 - transformer_config = TransformerConfig( + self.transformer_config = TransformerConfig( num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, moe_router_type="top2", + moe_aux_loss_coeff=0, ) - self.router = ZeroDropTopKRouter( - num_local_experts=num_moe_experts, - local_expert_indices=range(num_moe_experts), - config=transformer_config, + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( + num_experts=num_moe_experts, moe_grouped_gemm=False ) + self.switch_mlp = SwitchMLPLayer( + self.transformer_config, transformer_layer_spec.submodules.mlp.submodules + ) + self.router = self.switch_mlp.router def teardown_method(self, method): Utils.destroy_model_parallel() @@ -40,19 +45,33 @@ def test_constructor(self): assert num_weights == 12 * 4, num_weights @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") - def test_gpu_forward(self): - self.router = self.router.cuda() - # [num tokens, hidden size] - hidden_states = torch.randn((32, self.router.config.hidden_size)) + def test_router_forward(self): + with torch.no_grad(): + self.router = self.router.cuda() + # [num tokens, hidden size] + hidden_states = torch.randn((32, 2, self.router.config.hidden_size)) + hidden_states = hidden_states.cuda() + scores, indices = self.router(hidden_states) + print(scores.shape, indices.shape) + assert scores.shape == (64, 2) + assert indices.shape == (64, 2) + print( + (indices == 0).sum(), (indices == 1).sum(), (indices == 2).sum(), (indices == 3).sum() + ) + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + def test_aux_loss(self): + self.switch_mlp = self.switch_mlp.cuda() + + # Without aux loss + hidden_states = torch.randn((32, 2, self.router.config.hidden_size)) hidden_states = hidden_states.cuda() - scores, indices = self.router(hidden_states) - print(scores.shape, indices.shape) - assert scores.shape == (32, 2) - assert indices.shape == (32, 2) - print( - (indices == 0).sum(), (indices == 1).sum(), (indices == 2).sum(), (indices == 3).sum() - ) - assert (indices == 0).sum() == 15, (indices == 0).sum() - assert (indices == 1).sum() == 18, (indices == 1).sum() - assert (indices == 2).sum() == 18, (indices == 2).sum() - assert (indices == 3).sum() == 13, (indices == 3).sum() + out = self.switch_mlp(hidden_states)[0] + out.sum().mul_(0).backward() + assert self.switch_mlp.router.gate.weight.grad.abs().sum() == 0 + + # With aux loss + self.transformer_config.moe_aux_loss_coeff = 1 + out = self.switch_mlp(hidden_states)[0] + out.sum().mul_(0).backward() + assert self.switch_mlp.router.gate.weight.grad.abs().sum() > 0 \ No newline at end of file diff --git a/tests/unit_tests/transformer/moe/test_switch_mlp.py b/tests/unit_tests/transformer/moe/test_switch_mlp.py index b7ee023349..73d17e4102 100644 --- a/tests/unit_tests/transformer/moe/test_switch_mlp.py +++ b/tests/unit_tests/transformer/moe/test_switch_mlp.py @@ -4,7 +4,7 @@ import torch -from megatron.core.transformer.moe.switch_mlp import SwitchMLP +from megatron.core.transformer.moe.moe_layer import SwitchMLPLayer from tests.unit_tests.test_utilities import Utils from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.transformer.transformer_config import TransformerConfig @@ -17,16 +17,16 @@ def setup_method(self, method): model_parallel_cuda_manual_seed(123) print("done intializing") num_moe_experts = 2 - transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True) + transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, moe_router_type="sinkhorn") transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( num_experts=num_moe_experts, moe_grouped_gemm=False) - self.switch_mlp = SwitchMLP(transformer_config, transformer_layer_spec.submodules.mlp.submodules) + self.switch_mlp = SwitchMLPLayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) def teardown_method(self, method): Utils.destroy_model_parallel() def test_constructor(self): - assert isinstance(self.switch_mlp, SwitchMLP) + assert isinstance(self.switch_mlp, SwitchMLPLayer) num_weights = sum([p.numel() for p in self.switch_mlp.parameters()]) assert num_weights == 2448 diff --git a/tests/unit_tests/transformer/moe/test_token_dispatcher.py b/tests/unit_tests/transformer/moe/test_token_dispatcher.py index 2624386ae8..32bb4ddc0d 100644 --- a/tests/unit_tests/transformer/moe/test_token_dispatcher.py +++ b/tests/unit_tests/transformer/moe/test_token_dispatcher.py @@ -4,13 +4,13 @@ import torch -from megatron.core.transformer.moe.base_moe_layer import Router, ZeroDropTopKRouter +from megatron.core.transformer.moe.base_moe_layer import Router, DroplessTopKRouter from megatron.initialize import _set_random_seed from tests.unit_tests.test_utilities import Utils from megatron.core.transformer.transformer_config import TransformerConfig -class TestZeroDropDispatcher: +class TestDroplessDispatcher: def setup_method(self, method): Utils.initialize_model_parallel(1, 1) _set_random_seed(seed_=123, data_parallel_random_init=False) @@ -24,7 +24,7 @@ def setup_method(self, method): use_cpu_initialization=True, moe_router_type="top2", ) - self.router = ZeroDropTopKRouter( + self.router = DroplessTopKRouter( num_local_experts=num_moe_experts, local_expert_indices=range(num_moe_experts), config=transformer_config, From dd0411b5f238e2bdb3e090558b87bbf83cf2b4ac Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Thu, 28 Dec 2023 12:46:13 +0000 Subject: [PATCH 161/296] Add Z loss UT. --- megatron/core/transformer/moe/base_moe_layer.py | 1 - tests/unit_tests/transformer/moe/test_routers.py | 8 ++++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 6e6d4adf1b..4bddaf707d 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -544,7 +544,6 @@ def backward(ctx, grad_output): (aux_loss,) = ctx.saved_tensors aux_loss_backward_scale = MoEAuxLossAutoScaler.main_loss_backward_scale scaled_aux_loss_grad = torch.ones_like(aux_loss) * aux_loss_backward_scale - print("233333, trigger backward!") return grad_output, scaled_aux_loss_grad @staticmethod diff --git a/tests/unit_tests/transformer/moe/test_routers.py b/tests/unit_tests/transformer/moe/test_routers.py index 5966951d2c..a3ae6ea18c 100644 --- a/tests/unit_tests/transformer/moe/test_routers.py +++ b/tests/unit_tests/transformer/moe/test_routers.py @@ -74,4 +74,12 @@ def test_aux_loss(self): self.transformer_config.moe_aux_loss_coeff = 1 out = self.switch_mlp(hidden_states)[0] out.sum().mul_(0).backward() + assert self.switch_mlp.router.gate.weight.grad.abs().sum() > 0 + + # With Z loss + self.transformer_config.moe_aux_loss_coeff = 0 + self.transformer_config.moe_z_loss_coeff = 1 + self.switch_mlp.router.gate.weight.grad.fill_(0) + out = self.switch_mlp(hidden_states)[0] + out.sum().mul_(0).backward() assert self.switch_mlp.router.gate.weight.grad.abs().sum() > 0 \ No newline at end of file From bfb7bbdd5434e6679d2adc9679af10e6d8ea029d Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Tue, 2 Jan 2024 11:02:29 +0000 Subject: [PATCH 162/296] Add documentation. --- .../core/transformer/moe/base_moe_layer.py | 98 +++++++++++-------- megatron/core/transformer/moe/moe_layer.py | 11 ++- 2 files changed, 63 insertions(+), 46 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 4bddaf707d..e90cc107d7 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -2,6 +2,7 @@ from abc import ABC, abstractmethod from contextlib import nullcontext +from typing import List import torch @@ -45,8 +46,7 @@ def __init__(self, config: TransformerConfig) -> None: setattr(self.gate.weight, 'sequence_parallel', config.sequence_parallel) def gating(self, input: torch.Tensor): - """ - Forward pass of the router gate. + """Forward pass of the router gate. Args: input (torch.Tensor): Input tensor. @@ -58,8 +58,7 @@ def gating(self, input: torch.Tensor): return logits def routing(self, logits: torch.Tensor): - """ - Get the routing results. + """Routing function. Args: logits (torch.Tensor): Logits tensor. @@ -69,19 +68,8 @@ def routing(self, logits: torch.Tensor): """ raise NotImplementedError - def dispatch( - self, tokens: torch.Tensor, indices: torch.Tensor, - ): - raise NotImplementedError - - def restore( - self, expert_output: torch.Tensor, scores: torch.Tensor, indicies: torch.Tensor, - ): - raise NotImplementedError - def apply_input_jitter(self, input, eps=1e-2): - """ - Add noise to the input tensor. + """Add noise to the input tensor. Refer to https://arxiv.org/abs/2101.03961. Args: @@ -118,8 +106,7 @@ def forward(self, input: torch.Tensor): return scores, indices def apply_aux_loss(self, loss_func, probs, indices, activation): - """ - Applies auxiliary loss to the MoE layer. + """Applies auxiliary loss to the MoE layer. Args: loss_func (callable): The loss function to be used. @@ -165,8 +152,7 @@ def __init__(self, config: TransformerConfig) -> None: def dispatch( self, tokens: torch.Tensor, indices: torch.Tensor, ): - """ - Dispatch tokens to experts. + """Dispatch tokens to experts. Args: tokens (torch.Tensor): Input tokens. @@ -180,8 +166,7 @@ def dispatch( def restore( self, expert_output: torch.Tensor, scores: torch.Tensor, indices: torch.Tensor, ): - """ - Restores the expert output to its original ordering. + """Restores the expert output to its original ordering. Args: expert_output (torch.Tensor): The output tensor from the expert models. @@ -420,14 +405,11 @@ def restore(self, hidden_states, scores, indices, global_local_map=None, bias=No class DroplessSinkhornRouter(Router): - """ - Sinkhorn Router without token dropping. + """Sinkhorn Router without token dropping. """ def __init__(self, num_local_experts, local_expert_indices, config: TransformerConfig) -> None: - """ - Initialize the zero token dropping router. - """ + """Initialize the dropless sinkhorn router.""" super().__init__(config=config) assert config.moe_token_dropping == False assert config.moe_router_type == "sinkhorn" @@ -439,7 +421,7 @@ def __init__(self, num_local_experts, local_expert_indices, config: TransformerC ) def sinkhorn(self, cost, tol=0.0001): - "Sinkhorn based MoE routing function" + """Sinkhorn based MoE routing function""" cost = torch.exp(cost) d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype) d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype) @@ -455,14 +437,13 @@ def sinkhorn(self, cost, tol=0.0001): return d1 * cost * d0.unsqueeze(1) def routing(self, logits: torch.Tensor): - """ - Get the routing results. + """Get the routing results. Args: logits (torch.Tensor): Logits tensor. Returns: - Tuple[torch.Tensor, torch.Tensor]: Tuple of tensors representing max probs and the indices. + Tuple[torch.Tensor, torch.Tensor]: Tuple of tensors representing the routing scores and indices. """ logits = logits.view(-1, self.config.num_moe_experts) @@ -482,13 +463,22 @@ def routing(self, logits: torch.Tensor): class DroplessTopKRouter(Router): - """ - Sinkhorn Router without token dropping. + """Sinkhorn Router without token dropping. + + This class represents a router that applies the Sinkhorn algorithm for load balancing without dropping any tokens. + """ - def __init__(self, num_local_experts, local_expert_indices, config: TransformerConfig) -> None: - """ - Initialize the zero token dropping router. + def __init__( + self, num_local_experts: int, local_expert_indices: List[int], config: TransformerConfig + ) -> None: + """Initialize the zero token dropping router. + + Args: + num_local_experts (int): The number of local experts. + local_expert_indices (List[int]): The indices of the local experts. + config (TransformerConfig): The configuration for the transformer model. + """ super().__init__(config=config) assert config.moe_token_dropping == False @@ -501,14 +491,13 @@ def __init__(self, num_local_experts, local_expert_indices, config: TransformerC self.moe_aux_loss_func = switch_load_balancing_loss_func def routing(self, logits: torch.Tensor): - """ - Get the routing results. + """Top-k routing function Args: logits (torch.Tensor): Logits tensor. Returns: - Tuple[torch.Tensor, torch.Tensor]: Tuple of tensors representing max probs and the indices. + Tuple[torch.Tensor, torch.Tensor]: Probs and the indices tensor. """ logits = logits.view(-1, self.config.num_moe_experts) logits = logits.to(dtype=torch.float32) @@ -530,23 +519,46 @@ def routing(self, logits: torch.Tensor): class MoEAuxLossAutoScaler(torch.autograd.Function): + """A AutoScaler that compute and scales the grad of auxiliary loss. + + """ + main_loss_backward_scale = 1 @staticmethod def forward(ctx, output, aux_loss): - # Preserve the aux_loss by storing it in the context to avoid garbage collection. + """Preserve the aux_loss by storing it in the context to avoid garbage collection. + + Args: + output (torch.Tensor): The output tensor. + aux_loss (torch.Tensor): The auxiliary loss tensor. + + Returns: + torch.Tensor: The output tensor. + """ ctx.save_for_backward(aux_loss) return output @staticmethod def backward(ctx, grad_output): - # Scale the auxiliary loss. + """Trigger the backward pass of the auxiliary loss as well as it scaling. + + Args: + grad_output (torch.Tensor): The gradient of the output. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The gradient of the output, scaled auxiliary loss gradient. + """ (aux_loss,) = ctx.saved_tensors aux_loss_backward_scale = MoEAuxLossAutoScaler.main_loss_backward_scale scaled_aux_loss_grad = torch.ones_like(aux_loss) * aux_loss_backward_scale return grad_output, scaled_aux_loss_grad @staticmethod - def set_loss_scale(scale): - # Scale the aux loss in the same way as the main loss. + def set_loss_scale(scale: int): + """set the scale of the aux loss. + + Args: + scale (int): The scale value to set. Please ensure that the scale passed in matches the scale of the main_loss. + """ MoEAuxLossAutoScaler.main_loss_backward_scale = scale diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index 69d5e24710..d97e8aca7b 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -14,6 +14,12 @@ class BaseMoELayer(MegatronModule, ABC): + """Base class for a mixture of experts layer. + + Args: + config (TransformerConfig): Configuration object for the transformer model. + """ + def __init__(self, config: TransformerConfig): super(BaseMoELayer, self).__init__(config) self.config = config @@ -36,9 +42,8 @@ def forward(self, hidden_states): class SwitchMLPLayer(BaseMoELayer): - """ - Top-K Mixture of Experts Layer Without Token Dropping. - Currently supports Sinkhorn-based expert routing (Top-1 only) and a generalized Top-k routing with Z loss and auxiliary loss. + """Top-K Mixture of Experts Layer **Without Token Dropping**. + Currently supports Sinkhorn-based routing (Top-1) and generalized Top-k routing with auxiliary loss. Args: BaseMoELayer (MegatronModule): Base class for MoE layers From b50615200851492dfeacf6f12b9a6cca8b441236 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Tue, 2 Jan 2024 12:04:07 +0000 Subject: [PATCH 163/296] Add typing check. --- .../core/transformer/moe/base_moe_layer.py | 41 ++++++++++++++----- megatron/core/transformer/moe/moe_layer.py | 2 +- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index e90cc107d7..cbc5bbd606 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -68,7 +68,7 @@ def routing(self, logits: torch.Tensor): """ raise NotImplementedError - def apply_input_jitter(self, input, eps=1e-2): + def apply_input_jitter(self, input: torch.Tensor, eps: float = 1e-2): """Add noise to the input tensor. Refer to https://arxiv.org/abs/2101.03961. @@ -105,7 +105,13 @@ def forward(self, input: torch.Tensor): return scores, indices - def apply_aux_loss(self, loss_func, probs, indices, activation): + def apply_aux_loss( + self, + loss_func: function, + probs: torch.Tensor, + indices: torch.Tensor, + activation: torch.Tensor, + ): """Applies auxiliary loss to the MoE layer. Args: @@ -185,7 +191,11 @@ class MoEDroplessTokenDispatcher(MoETokenDispatcher): """ def __init__( - self, num_local_experts, local_expert_indices, k, config: TransformerConfig + self, + num_local_experts: int, + local_expert_indices: List[int], + k: int, + config: TransformerConfig, ) -> None: """ Initialize the zero token dropping router. @@ -196,7 +206,7 @@ def __init__( self.k = k self.add_bias = config.add_bias_linear - def gather_indices(self, local_indices): + def gather_indices(self, local_indices: torch.Tensor): """ Gather tensors and concatenate along the first dimension.""" group = get_tensor_and_expert_parallel_group() world_size = torch.distributed.get_world_size(group=group) @@ -214,7 +224,7 @@ def gather_indices(self, local_indices): torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group) return output - def dispatch(self, hidden_states, max_prob, max_ind): + def dispatch(self, hidden_states: torch.Tensor, max_prob: torch.Tensor, max_ind: torch.Tensor): """Dispatch tokens to local experts. It's composed of two stages: (1) Permute the tokens across the expert parallel devices. After this stage, each device receives all of the tokens assigned to its local set of experts @@ -299,7 +309,14 @@ def dispatch(self, hidden_states, max_prob, max_ind): global_local_map, ) - def restore(self, hidden_states, scores, indices, global_local_map=None, bias=None): + def restore( + self, + hidden_states: torch.Tensor, + scores: torch.Tensor, + indices: torch.Tensor, + global_local_map: torch.Tensor = None, + bias: torch.Tensor = None, + ): """ Reverse process of `dispatch()` which permutes the ouput of local experts locallay and across expert parallel rank into the original order to @@ -408,7 +425,9 @@ class DroplessSinkhornRouter(Router): """Sinkhorn Router without token dropping. """ - def __init__(self, num_local_experts, local_expert_indices, config: TransformerConfig) -> None: + def __init__( + self, num_local_experts: int, local_expert_indices: List[int], config: TransformerConfig, + ) -> None: """Initialize the dropless sinkhorn router.""" super().__init__(config=config) assert config.moe_token_dropping == False @@ -420,7 +439,7 @@ def __init__(self, num_local_experts, local_expert_indices, config: TransformerC num_local_experts, local_expert_indices, self.k, config ) - def sinkhorn(self, cost, tol=0.0001): + def sinkhorn(self, cost: torch.Tensor, tol: float = 0.0001): """Sinkhorn based MoE routing function""" cost = torch.exp(cost) d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype) @@ -523,10 +542,10 @@ class MoEAuxLossAutoScaler(torch.autograd.Function): """ - main_loss_backward_scale = 1 + main_loss_backward_scale: int = 1 @staticmethod - def forward(ctx, output, aux_loss): + def forward(ctx, output: torch.Tensor, aux_loss: torch.Tensor): """Preserve the aux_loss by storing it in the context to avoid garbage collection. Args: @@ -540,7 +559,7 @@ def forward(ctx, output, aux_loss): return output @staticmethod - def backward(ctx, grad_output): + def backward(ctx, grad_output: torch.Tensor): """Trigger the backward pass of the auxiliary loss as well as it scaling. Args: diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index d97e8aca7b..a83ce765dc 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -63,7 +63,7 @@ def __init__(self, config: TransformerConfig, submodules: MLPSubmodules = None): self.experts = self.initialize_experts() assert config.moe_token_dropping is False - def forward(self, hidden_states): + def forward(self, hidden_states: torch.Tensor): # process MoE scores, indices = self.router(hidden_states) ( From 411bc27b4b659f62803b8bc2fbfc4edad4237784 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Wed, 3 Jan 2024 11:03:28 +0000 Subject: [PATCH 164/296] Update CI. --- .gitlab-ci.yml | 16 ++++++++++++++++ megatron/core/transformer/moe/base_moe_layer.py | 4 ++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c0553de5a3..a4bcdff82b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -581,6 +581,22 @@ train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_groupedGEMM_1node_50steps: METADATA: "te_8experts2parallel_groupedGEMM" ADDITIONAL_PARAMS: "--moe-grouped-gemm --disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2" +train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_top2_1node_50steps: + <<: *selene-test-launcher + variables: + <<: [*VARS] + RUN_MODEL: gpt3 + USE_TE: 0 + TP_SIZE: 2 + PP_SIZE: 1 + NUM_NODES: 1 + MAX_STEPS: 50 + USE_CORE: 1 + MOE_GROUPED_GEMM: 1 + TEST_LEVEL: MR_TESTS + METADATA: "te_8experts2parallel_top2router" + ADDITIONAL_PARAMS: "--disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-type top2 --moe-aux-loss-coeff 1e-2" + train.moe_gpt3.345m_tp2_pp2_4experts_1node_50steps: <<: *selene-test-launcher variables: diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index cbc5bbd606..10a7c25d3d 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -2,7 +2,7 @@ from abc import ABC, abstractmethod from contextlib import nullcontext -from typing import List +from typing import Callable, List import torch @@ -107,7 +107,7 @@ def forward(self, input: torch.Tensor): def apply_aux_loss( self, - loss_func: function, + loss_func: Callable, probs: torch.Tensor, indices: torch.Tensor, activation: torch.Tensor, From 1ab146ca6b91895fb47a08c0e6a27bf09f4d7668 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Thu, 4 Jan 2024 09:23:38 +0000 Subject: [PATCH 165/296] Fix grouped gemm UT. --- .../transformer/moe/test_grouped_mlp.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index 193086a8e0..39252974c1 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -89,30 +89,30 @@ def test_constructor(self): self.hidden_size * (self.fc1_ffn_hidden_size + self.fc2_ffn_hidden_size) * self.num_experts assert num_weights_smm == expected_num_weights - assert torch.equal(self.switch_mlp_smm.router.weight, self.switch_mlp_gmm.router.weight) + assert torch.equal(self.switch_mlp_smm.router.gate.weight, self.switch_mlp_gmm.router.gate.weight) # weight1: [h, num_experts*4h] # weight2: [num_experts*4h, h] - assert self.switch_mlp_gmm.weight1.shape[0] == self.hidden_size - assert self.switch_mlp_gmm.weight1.shape[1] == self.num_experts * self.fc1_ffn_hidden_size + assert self.switch_mlp_gmm.experts.weight1.shape[0] == self.hidden_size + assert self.switch_mlp_gmm.experts.weight1.shape[1] == self.num_experts * self.fc1_ffn_hidden_size if self.gated_linear_unit: - assert self.switch_mlp_gmm.weight2.shape[0] == self.num_experts * self.fc2_ffn_hidden_size - assert self.switch_mlp_gmm.weight2.shape[1] == self.hidden_size + assert self.switch_mlp_gmm.experts.weight2.shape[0] == self.num_experts * self.fc2_ffn_hidden_size + assert self.switch_mlp_gmm.experts.weight2.shape[1] == self.hidden_size else: - assert self.switch_mlp_gmm.weight1.shape == self.switch_mlp_gmm.weight2.t().shape + assert self.switch_mlp_gmm.experts.weight1.shape == self.switch_mlp_gmm.weight2.t().shape def test_weight_init_value_the_same(self): - gmm_w1 = self.switch_mlp_gmm.weight1.view(self.num_experts, -1, self.hidden_size) - gmm_w2 = self.switch_mlp_gmm.weight2.view(self.num_experts, self.hidden_size, -1) + gmm_w1 = self.switch_mlp_gmm.experts.weight1.view(self.num_experts, -1, self.hidden_size) + gmm_w2 = self.switch_mlp_gmm.experts.weight2.view(self.num_experts, self.hidden_size, -1) gmm_expert1_fc1 = gmm_w1[0] gmm_expert1_fc2 = gmm_w2[0] gmm_expert2_fc1 = gmm_w1[1] gmm_expert2_fc2 = gmm_w2[1] - smm_expert1_fc1 = self.switch_mlp_smm.local_experts[0].linear_fc1.weight - smm_expert1_fc2 = self.switch_mlp_smm.local_experts[0].linear_fc2.weight - smm_expert2_fc1 = self.switch_mlp_smm.local_experts[1].linear_fc1.weight - smm_expert2_fc2 = self.switch_mlp_smm.local_experts[1].linear_fc2.weight + smm_expert1_fc1 = self.switch_mlp_smm.experts.local_experts[0].linear_fc1.weight + smm_expert1_fc2 = self.switch_mlp_smm.experts.local_experts[0].linear_fc2.weight + smm_expert2_fc1 = self.switch_mlp_smm.experts.local_experts[1].linear_fc1.weight + smm_expert2_fc2 = self.switch_mlp_smm.experts.local_experts[1].linear_fc2.weight assert torch.equal(gmm_expert1_fc1, smm_expert1_fc1) if not self.use_cpu_initialization: From 6d702cb2c035a40511efa47e5039c81e54304a20 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Fri, 5 Jan 2024 02:32:02 +0000 Subject: [PATCH 166/296] Compatible with previous MoE checkpoints. --- .../core/transformer/moe/base_moe_layer.py | 26 ++++++++++--------- .../transformer/moe/test_grouped_mlp.py | 6 ++--- .../transformer/moe/test_routers.py | 8 +++--- 3 files changed, 21 insertions(+), 19 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 10a7c25d3d..5c51fb5490 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -1,7 +1,7 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +import math from abc import ABC, abstractmethod -from contextlib import nullcontext from typing import Callable, List import torch @@ -33,17 +33,16 @@ def __init__(self, config: TransformerConfig) -> None: self.num_experts = self.config.num_moe_experts # Token dispatcher for exchange tokens between experts. self.token_dispatcher = None - # Initialize the gate weights. - self.gate = torch.nn.Linear( - self.config.hidden_size, self.config.num_moe_experts, bias=False - ) - # Initialize the aux losses. self.moe_aux_loss_func = None # Initialize the gate weights. + self.weight = torch.nn.Parameter( + torch.empty((self.config.num_moe_experts, self.config.hidden_size)) + ) + torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) with get_cuda_rng_tracker().fork(get_data_parallel_rng_tracker_name()): - config.init_method(self.gate.weight) - setattr(self.gate.weight, 'sequence_parallel', config.sequence_parallel) + config.init_method(self.weight) + setattr(self.weight, 'sequence_parallel', config.sequence_parallel) def gating(self, input: torch.Tensor): """Forward pass of the router gate. @@ -54,9 +53,10 @@ def gating(self, input: torch.Tensor): Returns: torch.Tensor: Logits tensor. """ - logits = self.gate(input) + logits = torch.nn.functional.linear(input, self.weight) return logits + @abstractmethod def routing(self, logits: torch.Tensor): """Routing function. @@ -66,7 +66,7 @@ def routing(self, logits: torch.Tensor): Returns: Tuple[torch.Tensor, torch.Tensor]: Tuple of tensors representing max probs and the indices. """ - raise NotImplementedError + raise NotImplementedError("Routing function not implemented.") def apply_input_jitter(self, input: torch.Tensor, eps: float = 1e-2): """Add noise to the input tensor. @@ -155,6 +155,7 @@ def __init__(self, config: TransformerConfig) -> None: """ self.config = config + @abstractmethod def dispatch( self, tokens: torch.Tensor, indices: torch.Tensor, ): @@ -167,8 +168,9 @@ def dispatch( Returns: torch.Tensor: Tokens tensor. """ - raise NotImplementedError + raise NotImplementedError("Dispatch function not implemented.") + @abstractmethod def restore( self, expert_output: torch.Tensor, scores: torch.Tensor, indices: torch.Tensor, ): @@ -182,7 +184,7 @@ def restore( Returns: None """ - raise NotImplementedError + raise NotImplementedError("Restore function not implemented.") class MoEDroplessTokenDispatcher(MoETokenDispatcher): diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index 39252974c1..b30d7870ab 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -89,7 +89,7 @@ def test_constructor(self): self.hidden_size * (self.fc1_ffn_hidden_size + self.fc2_ffn_hidden_size) * self.num_experts assert num_weights_smm == expected_num_weights - assert torch.equal(self.switch_mlp_smm.router.gate.weight, self.switch_mlp_gmm.router.gate.weight) + assert torch.equal(self.switch_mlp_smm.router.weight, self.switch_mlp_gmm.router.weight) # weight1: [h, num_experts*4h] # weight2: [num_experts*4h, h] @@ -137,8 +137,8 @@ def test_gpu_forward(self): (seq_len, batch_size, self.switch_mlp_smm.config.hidden_size), dtype=torch.bfloat16) hidden_states = hidden_states.cuda() - output_smm, _ = self.switch_mlp_smm(hidden_states) - output_gmm, _ = self.switch_mlp_gmm(hidden_states) + # output_smm, _ = self.switch_mlp_smm(hidden_states) + # output_gmm, _ = self.switch_mlp_gmm(hidden_states) # The following assert fails due to the param init value is not exactly # the same between gmm and smm (refer to test_weight_init_value_the_same.) diff --git a/tests/unit_tests/transformer/moe/test_routers.py b/tests/unit_tests/transformer/moe/test_routers.py index a3ae6ea18c..ca67c4f960 100644 --- a/tests/unit_tests/transformer/moe/test_routers.py +++ b/tests/unit_tests/transformer/moe/test_routers.py @@ -68,18 +68,18 @@ def test_aux_loss(self): hidden_states = hidden_states.cuda() out = self.switch_mlp(hidden_states)[0] out.sum().mul_(0).backward() - assert self.switch_mlp.router.gate.weight.grad.abs().sum() == 0 + assert self.switch_mlp.router.weight.grad.abs().sum() == 0 # With aux loss self.transformer_config.moe_aux_loss_coeff = 1 out = self.switch_mlp(hidden_states)[0] out.sum().mul_(0).backward() - assert self.switch_mlp.router.gate.weight.grad.abs().sum() > 0 + assert self.switch_mlp.router.weight.grad.abs().sum() > 0 # With Z loss self.transformer_config.moe_aux_loss_coeff = 0 self.transformer_config.moe_z_loss_coeff = 1 - self.switch_mlp.router.gate.weight.grad.fill_(0) + self.switch_mlp.router.weight.grad.fill_(0) out = self.switch_mlp(hidden_states)[0] out.sum().mul_(0).backward() - assert self.switch_mlp.router.gate.weight.grad.abs().sum() > 0 \ No newline at end of file + assert self.switch_mlp.router.weight.grad.abs().sum() > 0 \ No newline at end of file From c656553315c0448c5a8b0b2e881b63af62bbdd4b Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Sun, 7 Jan 2024 03:17:10 +0000 Subject: [PATCH 167/296] Fix Z Loss. --- megatron/core/transformer/moe/base_moe_layer.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 5c51fb5490..6ffecddc67 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -484,7 +484,7 @@ def routing(self, logits: torch.Tensor): class DroplessTopKRouter(Router): - """Sinkhorn Router without token dropping. + """TopK Router without token dropping. This class represents a router that applies the Sinkhorn algorithm for load balancing without dropping any tokens. @@ -522,11 +522,10 @@ def routing(self, logits: torch.Tensor): """ logits = logits.view(-1, self.config.num_moe_experts) logits = logits.to(dtype=torch.float32) - probs = torch.softmax(logits, dim=-1) - # Apply Z-Loss if self.config.moe_z_loss_coeff > 0: - probs = self.apply_z_loss(probs) + logits = self.apply_z_loss(logits) + probs = torch.softmax(logits, dim=-1) scores, indices = torch.topk(probs, k=self.k, dim=1) From 8b41c9f4741891a3006f5849a630fc2ba1a2b890 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Sun, 7 Jan 2024 04:13:03 +0000 Subject: [PATCH 168/296] Merge the Sinkhorn and top-k routing. --- megatron/arguments.py | 4 +- megatron/core/models/gpt/gpt_layer_specs.py | 4 +- .../core/transformer/moe/base_moe_layer.py | 121 +++++++----------- megatron/core/transformer/moe/moe_layer.py | 26 ++-- megatron/core/transformer/moe/moe_utils.py | 17 +++ .../transformer/moe/test_grouped_mlp.py | 12 +- .../transformer/moe/test_routers.py | 4 +- .../transformer/moe/test_switch_mlp.py | 8 +- .../transformer/moe/test_token_dispatcher.py | 2 + 9 files changed, 101 insertions(+), 97 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index e13b33bde3..2c69d653af 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -1450,8 +1450,8 @@ def _add_moe_args(parser): group.add_argument( '--moe-router-type', type=str, - default='sinkhorn', - help='Options for router type. Currently supports sinkhorn and topk router.', + default='sinkhorn1', + help='Options for router type. Currently supports sinkhornK and topK router, where K represents the number of routers each token selects. The default is sinkhorn1.', ) group.add_argument( '--moe-token-dropping', diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index ce8710d760..db3f5e9dd0 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -14,7 +14,7 @@ from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityOp from megatron.core.transformer.mlp import MLP, MLPSubmodules -from megatron.core.transformer.moe.moe_layer import SwitchMLPLayer +from megatron.core.transformer.moe.moe_layer import DroplessMoELayer from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules @@ -92,7 +92,7 @@ def _get_mlp_module_spec( else: # SwitchMLP based MoE with modules in megatron core. return ModuleSpec( - module=SwitchMLPLayer, + module=DroplessMoELayer, submodules=MLPSubmodules(linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear,) if not moe_grouped_gemm else None, diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 6ffecddc67..53729e0b77 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -14,7 +14,11 @@ get_data_parallel_rng_tracker_name, ) from megatron.core.transformer.module import MegatronModule -from megatron.core.transformer.moe.moe_utils import switch_load_balancing_loss_func, z_loss_func +from megatron.core.transformer.moe.moe_utils import ( + sinkhorn, + switch_load_balancing_loss_func, + z_loss_func, +) from megatron.core.transformer.transformer_config import TransformerConfig @@ -423,94 +427,61 @@ def restore( return output_total, output_bias_total -class DroplessSinkhornRouter(Router): - """Sinkhorn Router without token dropping. +class DroplessTopKRouter(Router): + """TopK Router without token dropping. """ def __init__( - self, num_local_experts: int, local_expert_indices: List[int], config: TransformerConfig, + self, + num_local_experts: int, + local_expert_indices: List[int], + k: int, + routing_type: str, + config: TransformerConfig, ) -> None: - """Initialize the dropless sinkhorn router.""" + """Initialize the zero token dropping router. + + Args: + num_local_experts (int): The number of local experts. + local_expert_indices (List[int]): The indices of the local experts. + k: The number of experts to route to. + routing_type (str): The routing type to use. Currently supports sinkhorn and top. + config (TransformerConfig): The configuration for the transformer model. + + """ super().__init__(config=config) assert config.moe_token_dropping == False - assert config.moe_router_type == "sinkhorn" - self.route_algo = self.sinkhorn - self.router_activation = torch.sigmoid - self.k = 1 + assert routing_type in ["sinkhorn", "top"], f"Routing type {routing_type} not supported." + self.k = k + self.routing_type = routing_type self.token_dispatcher = MoEDroplessTokenDispatcher( num_local_experts, local_expert_indices, self.k, config ) + self.moe_aux_loss_func = switch_load_balancing_loss_func - def sinkhorn(self, cost: torch.Tensor, tol: float = 0.0001): - """Sinkhorn based MoE routing function""" - cost = torch.exp(cost) - d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype) - d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype) - - eps = 0.00000001 - error = 1e9 - d1_old = d1 - while error > tol: - d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps) - d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps) - error = torch.mean(torch.abs(d1_old - d1)) - d1_old = d1 - return d1 * cost * d0.unsqueeze(1) - - def routing(self, logits: torch.Tensor): - """Get the routing results. + def apply_sinkhorn(self, logits: torch.Tensor): + """Apply sinkhorn routing to the logits tensor. Args: - logits (torch.Tensor): Logits tensor. + logits (torch.Tensor): The logits tensor. Returns: - Tuple[torch.Tensor, torch.Tensor]: Tuple of tensors representing the routing scores and indices. + torch.Tensor: The logits tensor after applying sinkhorn routing. """ - logits = logits.view(-1, self.config.num_moe_experts) - + router_activation = torch.sigmoid if self.training: with torch.no_grad(): - norm_logits = self.route_algo( + norm_logits = sinkhorn( logits.to(dtype=torch.float32) ) # explicit fp32 conversion for stability _, indices = torch.topk(norm_logits, k=self.k, dim=1) - logits = self.router_activation(logits) + logits = router_activation(logits) scores = torch.gather(logits, 1, indices) else: - logits = self.router_activation(logits) + logits = router_activation(logits) scores, indices = torch.topk(logits, k=self.k, dim=1) - return scores, indices - -class DroplessTopKRouter(Router): - """TopK Router without token dropping. - - This class represents a router that applies the Sinkhorn algorithm for load balancing without dropping any tokens. - - """ - - def __init__( - self, num_local_experts: int, local_expert_indices: List[int], config: TransformerConfig - ) -> None: - """Initialize the zero token dropping router. - - Args: - num_local_experts (int): The number of local experts. - local_expert_indices (List[int]): The indices of the local experts. - config (TransformerConfig): The configuration for the transformer model. - - """ - super().__init__(config=config) - assert config.moe_token_dropping == False - assert config.moe_router_type.startswith("top") - # extract k from config.moe_router_type - self.k = int(config.moe_router_type[3:]) - self.token_dispatcher = MoEDroplessTokenDispatcher( - num_local_experts, local_expert_indices, self.k, config - ) - self.moe_aux_loss_func = switch_load_balancing_loss_func - def routing(self, logits: torch.Tensor): """Top-k routing function @@ -521,19 +492,23 @@ def routing(self, logits: torch.Tensor): Tuple[torch.Tensor, torch.Tensor]: Probs and the indices tensor. """ logits = logits.view(-1, self.config.num_moe_experts) - logits = logits.to(dtype=torch.float32) # Apply Z-Loss if self.config.moe_z_loss_coeff > 0: logits = self.apply_z_loss(logits) - probs = torch.softmax(logits, dim=-1) - scores, indices = torch.topk(probs, k=self.k, dim=1) - - scores /= scores.sum(dim=-1, keepdim=True) - - # Apply load balancing loss - if self.config.moe_aux_loss_coeff > 0: - scores = self.apply_aux_loss(self.moe_aux_loss_func, probs, indices, activation=scores) + if self.routing_type == "sinkhorn": + # sinkhorn routing + scores, indices = self.apply_sinkhorn(logits) + elif self.routing_type == "top": + # topK routing + probs = torch.softmax(logits.to(dtype=torch.float32), dim=-1) + scores, indices = torch.topk(probs, k=self.k, dim=1) + scores /= scores.sum(dim=-1, keepdim=True) + # Apply load balancing loss + if self.config.moe_aux_loss_coeff > 0: + scores = self.apply_aux_loss( + self.moe_aux_loss_func, probs, indices, activation=scores + ) return scores, indices diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index a83ce765dc..4cbb9c21ba 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -7,7 +7,7 @@ from megatron.core import parallel_state from megatron.core.transformer.mlp import MLPSubmodules from megatron.core.transformer.module import MegatronModule -from megatron.core.transformer.moe.base_moe_layer import DroplessSinkhornRouter, DroplessTopKRouter +from megatron.core.transformer.moe.base_moe_layer import DroplessTopKRouter from megatron.core.transformer.moe.grouped_mlp import GroupedMLP from megatron.core.transformer.moe.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_config import TransformerConfig @@ -41,9 +41,9 @@ def forward(self, hidden_states): pass -class SwitchMLPLayer(BaseMoELayer): +class DroplessMoELayer(BaseMoELayer): """Top-K Mixture of Experts Layer **Without Token Dropping**. - Currently supports Sinkhorn-based routing (Top-1) and generalized Top-k routing with auxiliary loss. + Currently supports Sinkhorn-based routing (Top-k based) and generalized Top-k routing with auxiliary loss. Args: BaseMoELayer (MegatronModule): Base class for MoE layers @@ -51,7 +51,7 @@ class SwitchMLPLayer(BaseMoELayer): def __init__(self, config: TransformerConfig, submodules: MLPSubmodules = None): self.submodules = submodules - super(SwitchMLPLayer, self).__init__(config=config) + super(DroplessMoELayer, self).__init__(config=config) self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size local_expert_indices_offset = ( parallel_state.get_expert_model_parallel_rank() * self.num_local_experts @@ -93,12 +93,22 @@ def initialize_experts(self): def initialize_router(self): if self.config.moe_router_type.lower().startswith("top"): + k = int(self.config.moe_router_type[3:]) router = DroplessTopKRouter( - self.num_local_experts, self.local_expert_indices, self.config + self.num_local_experts, + self.local_expert_indices, + k=k, + routing_type="top", + config=self.config, ) - elif self.config.moe_router_type.lower() == "sinkhorn": - router = DroplessSinkhornRouter( - self.num_local_experts, self.local_expert_indices, self.config + elif self.config.moe_router_type.lower().startswith("sinkhorn"): + k = int(self.config.moe_router_type[8:]) + router = DroplessTopKRouter( + self.num_local_experts, + self.local_expert_indices, + k=k, + routing_type="sinkhorn", + config=self.config, ) else: raise NotImplementedError(f"Routing method {self.config.moe_router_type} not supported") diff --git a/megatron/core/transformer/moe/moe_utils.py b/megatron/core/transformer/moe/moe_utils.py index 938324933d..0e9534a36e 100644 --- a/megatron/core/transformer/moe/moe_utils.py +++ b/megatron/core/transformer/moe/moe_utils.py @@ -33,3 +33,20 @@ def z_loss_func(logits): z_loss = torch.mean(torch.square(torch.logsumexp(logits, dim=-1))) return z_loss + + +def sinkhorn(cost: torch.Tensor, tol: float = 0.0001): + """Sinkhorn based MoE routing function""" + cost = torch.exp(cost) + d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype) + d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype) + + eps = 0.00000001 + error = 1e9 + d1_old = d1 + while error > tol: + d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps) + d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps) + error = torch.mean(torch.abs(d1_old - d1)) + d1_old = d1 + return d1 * cost * d0.unsqueeze(1) diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index b30d7870ab..1777022049 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -7,7 +7,7 @@ from megatron.arguments import parse_args from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec -from megatron.core.transformer.moe.moe_layer import SwitchMLPLayer +from megatron.core.transformer.moe.moe_layer import DroplessMoELayer from megatron.core.transformer.transformer_config import TransformerConfig from megatron.initialize import _set_random_seed from megatron.model import Float16Module @@ -39,7 +39,7 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): num_moe_experts=self.num_experts, use_cpu_initialization=self.use_cpu_initialization, add_bias_linear=False, gated_linear_unit=self.gated_linear_unit, bias_gelu_fusion=False, - bf16=True, params_dtype=torch.bfloat16, moe_router_type="sinkhorn") + bf16=True, params_dtype=torch.bfloat16, moe_router_type="sinkhorn1") self.fc1_ffn_hidden_size = tf_config.ffn_hidden_size self.fc2_ffn_hidden_size = tf_config.ffn_hidden_size @@ -52,7 +52,7 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): _set_random_seed(seed_=123, data_parallel_random_init=False) transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( self.num_experts, moe_grouped_gemm=False) - self.switch_mlp_smm = SwitchMLPLayer(tf_config, + self.switch_mlp_smm = DroplessMoELayer(tf_config, transformer_layer_spec.submodules.mlp.submodules) self.args = parse_args(ignore_unknown_args=True) @@ -66,7 +66,7 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): ## Grouped GEMM _set_random_seed(seed_=123, data_parallel_random_init=False) tf_config.moe_grouped_gemm = True - self.switch_mlp_gmm = SwitchMLPLayer(tf_config) + self.switch_mlp_gmm = DroplessMoELayer(tf_config) self.switch_mlp_gmm = Float16Module(self.switch_mlp_gmm, self.args).module print("done intializing for grouped gemm") @@ -74,8 +74,8 @@ def teardown_method(self, method): Utils.destroy_model_parallel() def test_constructor(self): - assert isinstance(self.switch_mlp_smm, SwitchMLPLayer) - assert isinstance(self.switch_mlp_gmm, SwitchMLPLayer) + assert isinstance(self.switch_mlp_smm, DroplessMoELayer) + assert isinstance(self.switch_mlp_gmm, DroplessMoELayer) num_weights_smm = sum([p.numel() for p in self.switch_mlp_smm.parameters()]) num_weights_gmm = sum([p.numel() for p in self.switch_mlp_gmm.parameters()]) diff --git a/tests/unit_tests/transformer/moe/test_routers.py b/tests/unit_tests/transformer/moe/test_routers.py index ca67c4f960..1950869114 100644 --- a/tests/unit_tests/transformer/moe/test_routers.py +++ b/tests/unit_tests/transformer/moe/test_routers.py @@ -8,7 +8,7 @@ from megatron.initialize import _set_random_seed from tests.unit_tests.test_utilities import Utils from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.transformer.moe.moe_layer import SwitchMLPLayer +from megatron.core.transformer.moe.moe_layer import DroplessMoELayer from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec @@ -30,7 +30,7 @@ def setup_method(self, method): transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( num_experts=num_moe_experts, moe_grouped_gemm=False ) - self.switch_mlp = SwitchMLPLayer( + self.switch_mlp = DroplessMoELayer( self.transformer_config, transformer_layer_spec.submodules.mlp.submodules ) self.router = self.switch_mlp.router diff --git a/tests/unit_tests/transformer/moe/test_switch_mlp.py b/tests/unit_tests/transformer/moe/test_switch_mlp.py index 73d17e4102..c3cf8310fc 100644 --- a/tests/unit_tests/transformer/moe/test_switch_mlp.py +++ b/tests/unit_tests/transformer/moe/test_switch_mlp.py @@ -4,7 +4,7 @@ import torch -from megatron.core.transformer.moe.moe_layer import SwitchMLPLayer +from megatron.core.transformer.moe.moe_layer import DroplessMoELayer from tests.unit_tests.test_utilities import Utils from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.transformer.transformer_config import TransformerConfig @@ -17,16 +17,16 @@ def setup_method(self, method): model_parallel_cuda_manual_seed(123) print("done intializing") num_moe_experts = 2 - transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, moe_router_type="sinkhorn") + transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, moe_router_type="sinkhorn1") transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( num_experts=num_moe_experts, moe_grouped_gemm=False) - self.switch_mlp = SwitchMLPLayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) + self.switch_mlp = DroplessMoELayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) def teardown_method(self, method): Utils.destroy_model_parallel() def test_constructor(self): - assert isinstance(self.switch_mlp, SwitchMLPLayer) + assert isinstance(self.switch_mlp, DroplessMoELayer) num_weights = sum([p.numel() for p in self.switch_mlp.parameters()]) assert num_weights == 2448 diff --git a/tests/unit_tests/transformer/moe/test_token_dispatcher.py b/tests/unit_tests/transformer/moe/test_token_dispatcher.py index 32bb4ddc0d..f2def24ab7 100644 --- a/tests/unit_tests/transformer/moe/test_token_dispatcher.py +++ b/tests/unit_tests/transformer/moe/test_token_dispatcher.py @@ -27,6 +27,8 @@ def setup_method(self, method): self.router = DroplessTopKRouter( num_local_experts=num_moe_experts, local_expert_indices=range(num_moe_experts), + k=2, + routing_type="top", config=transformer_config, ) self.token_dispatcher = self.router.token_dispatcher From 196b91158cb09e9e26f1f4c4ee70e4b20cafb448 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Sun, 7 Jan 2024 04:32:26 +0000 Subject: [PATCH 169/296] Update CI golden values. --- ...des_50steps_core_enabled_te_8experts2parallel_top2router.json | 1 + 1 file changed, 1 insertion(+) create mode 100644 tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json new file mode 100644 index 0000000000..cee07ba480 --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.81378, 10.86284, 10.87027, 10.80051, 10.6775, 10.59, 10.08956, 10.20252, 10.10007, 9.76971]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62685.0, 65693.0, 65929.0, 65172.0, 63628.0, 64659.0, 63472.0, 66120.0, 66690.0, 68136.0]}, "iteration_timing_avg": 0.24636794117647057} From 3ff8c7f77d00703eacb66fde059808ca776d3cb6 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Wed, 10 Jan 2024 08:06:03 +0000 Subject: [PATCH 170/296] Swap topk and softmax. --- megatron/core/transformer/moe/base_moe_layer.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 53729e0b77..f3b95d5fb0 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -497,15 +497,15 @@ def routing(self, logits: torch.Tensor): logits = self.apply_z_loss(logits) if self.routing_type == "sinkhorn": - # sinkhorn routing + # Sinkhorn routing. scores, indices = self.apply_sinkhorn(logits) elif self.routing_type == "top": - # topK routing - probs = torch.softmax(logits.to(dtype=torch.float32), dim=-1) - scores, indices = torch.topk(probs, k=self.k, dim=1) - scores /= scores.sum(dim=-1, keepdim=True) + # TopK routing. + top_logits, indices = torch.topk(logits, k=self.k, dim=1) + scores = torch.softmax(top_logits, dim=-1, dtype=torch.float32).type_as(logits) # Apply load balancing loss if self.config.moe_aux_loss_coeff > 0: + probs = torch.softmax(logits, dim=-1, dtype=torch.float32) scores = self.apply_aux_loss( self.moe_aux_loss_func, probs, indices, activation=scores ) From 1ce57127e01ac9847f51071d24ca1e74f9c98eeb Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Thu, 11 Jan 2024 03:22:02 +0000 Subject: [PATCH 171/296] Update CI after rebasing. --- megatron/core/transformer/moe/base_moe_layer.py | 5 +++-- ...50steps_core_enabled_te_8experts2parallel_top2router.json | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index f3b95d5fb0..3876876c88 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -468,6 +468,7 @@ def apply_sinkhorn(self, logits: torch.Tensor): Returns: torch.Tensor: The logits tensor after applying sinkhorn routing. """ + assert self.config.moe_aux_loss_coeff == 0, "Sinkhorn routing does not support aux loss." router_activation = torch.sigmoid if self.training: with torch.no_grad(): @@ -514,7 +515,7 @@ def routing(self, logits: torch.Tensor): class MoEAuxLossAutoScaler(torch.autograd.Function): - """A AutoScaler that compute and scales the grad of auxiliary loss. + """An AutoScaler that compute and scales the grad for auxiliary loss. """ @@ -536,7 +537,7 @@ def forward(ctx, output: torch.Tensor, aux_loss: torch.Tensor): @staticmethod def backward(ctx, grad_output: torch.Tensor): - """Trigger the backward pass of the auxiliary loss as well as it scaling. + """Compute and scale the gradient for auxiliary loss.. Args: grad_output (torch.Tensor): The gradient of the output. diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json index cee07ba480..0d167f429d 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.81378, 10.86284, 10.87027, 10.80051, 10.6775, 10.59, 10.08956, 10.20252, 10.10007, 9.76971]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62685.0, 65693.0, 65929.0, 65172.0, 63628.0, 64659.0, 63472.0, 66120.0, 66690.0, 68136.0]}, "iteration_timing_avg": 0.24636794117647057} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.81353, 10.86326, 10.87031, 10.80095, 10.67763, 10.59016, 10.0901, 10.20222, 10.10031, 9.7697]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62436.0, 65833.0, 65919.0, 65307.0, 63835.0, 64879.0, 63444.0, 66271.0, 66563.0, 68081.0]}, "iteration_timing_avg": 0.26249352941176474} From 09accc84bfa25fa34da81493357ef06482e2c980 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Mon, 15 Jan 2024 03:23:05 +0000 Subject: [PATCH 172/296] Fix loss scale documentation and remove unused code --- megatron/core/pipeline_parallel/schedules.py | 5 ++++- .../core/transformer/moe/base_moe_layer.py | 22 ++----------------- megatron/core/transformer/moe/moe_layer.py | 14 ++++++------ .../transformer/moe/test_grouped_mlp.py | 4 ++-- 4 files changed, 15 insertions(+), 30 deletions(-) diff --git a/megatron/core/pipeline_parallel/schedules.py b/megatron/core/pipeline_parallel/schedules.py index 23b89883ed..2d63cee9d6 100644 --- a/megatron/core/pipeline_parallel/schedules.py +++ b/megatron/core/pipeline_parallel/schedules.py @@ -208,9 +208,12 @@ def forward_step( if config.timers is not None: config.timers('forward-compute').stop() - # set loss scale for the auxiliary loss of MoE layer + # Set the loss scale for the auxiliary loss of the MoE layer. + # Since we use a trick to do backward on the auxiliary loss, we need to set the scale explicitly. if config.num_moe_experts is not None: + # Calculate the loss scale based on the grad_scale_func if available, else default to 1.0. loss_scale = config.grad_scale_func(1.0) if config.grad_scale_func is not None else 1.0 + # Set the loss scale MoEAuxLossAutoScaler.set_loss_scale(loss_scale / num_microbatches) # If T5 model (or other model with encoder and decoder) diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/base_moe_layer.py index 3876876c88..74140dbcb2 100644 --- a/megatron/core/transformer/moe/base_moe_layer.py +++ b/megatron/core/transformer/moe/base_moe_layer.py @@ -72,24 +72,6 @@ def routing(self, logits: torch.Tensor): """ raise NotImplementedError("Routing function not implemented.") - def apply_input_jitter(self, input: torch.Tensor, eps: float = 1e-2): - """Add noise to the input tensor. - Refer to https://arxiv.org/abs/2101.03961. - - Args: - input (Tensor): Input tensor. - eps (float, optional): Defaults to 1e-2. - - Returns: - Tensor: Jittered input. - """ - if self.input_jitter is None: - self.input_jitter = torch.distributions.uniform.Uniform( - torch.tensor(1.0 - eps, device=input.device), - torch.tensor(1.0 + eps, device=input.device), - ).rsample - return input * self.input_jitter(input.shape) - def forward(self, input: torch.Tensor): """ Forward pass of the router. @@ -185,8 +167,8 @@ def restore( scores (torch.Tensor): Each token's score with each expert. indices (torch.Tensor): The indices used to reorder the expert output. - Returns: - None + Returns: + (torch.Tensor, torch.Tensor): Unpermuted activation and optional bias. """ raise NotImplementedError("Restore function not implemented.") diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index 4cbb9c21ba..0999023484 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -25,6 +25,13 @@ def __init__(self, config: TransformerConfig): self.config = config self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size() assert self.config.num_moe_experts % self.expert_parallel_size == 0 + self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size + local_expert_indices_offset = ( + parallel_state.get_expert_model_parallel_rank() * self.num_local_experts + ) + self.local_expert_indices = [ + local_expert_indices_offset + i for i in range(self.num_local_experts) + ] self.router = None self.experts = None @@ -52,13 +59,6 @@ class DroplessMoELayer(BaseMoELayer): def __init__(self, config: TransformerConfig, submodules: MLPSubmodules = None): self.submodules = submodules super(DroplessMoELayer, self).__init__(config=config) - self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size - local_expert_indices_offset = ( - parallel_state.get_expert_model_parallel_rank() * self.num_local_experts - ) - self.local_expert_indices = [ - local_expert_indices_offset + i for i in range(self.num_local_experts) - ] self.router = self.initialize_router() self.experts = self.initialize_experts() assert config.moe_token_dropping is False diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index 1777022049..33bfc70009 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -137,8 +137,8 @@ def test_gpu_forward(self): (seq_len, batch_size, self.switch_mlp_smm.config.hidden_size), dtype=torch.bfloat16) hidden_states = hidden_states.cuda() - # output_smm, _ = self.switch_mlp_smm(hidden_states) - # output_gmm, _ = self.switch_mlp_gmm(hidden_states) + output_smm, _ = self.switch_mlp_smm(hidden_states) + output_gmm, _ = self.switch_mlp_gmm(hidden_states) # The following assert fails due to the param init value is not exactly # the same between gmm and smm (refer to test_weight_init_value_the_same.) From 5d0dbd3571d0b5d54f529db74909dcdd42601d45 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Mon, 15 Jan 2024 06:14:51 +0000 Subject: [PATCH 173/296] Rename base_moe_layer.py to router.py --- megatron/core/pipeline_parallel/schedules.py | 2 +- megatron/core/transformer/moe/moe_layer.py | 2 +- megatron/core/transformer/moe/{base_moe_layer.py => router.py} | 0 tests/unit_tests/transformer/moe/test_routers.py | 2 +- tests/unit_tests/transformer/moe/test_token_dispatcher.py | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) rename megatron/core/transformer/moe/{base_moe_layer.py => router.py} (100%) diff --git a/megatron/core/pipeline_parallel/schedules.py b/megatron/core/pipeline_parallel/schedules.py index 2d63cee9d6..81126c6a5d 100644 --- a/megatron/core/pipeline_parallel/schedules.py +++ b/megatron/core/pipeline_parallel/schedules.py @@ -9,7 +9,7 @@ from megatron.core import parallel_state from megatron.core.enums import ModelType from megatron.core.pipeline_parallel import p2p_communication -from megatron.core.transformer.moe.base_moe_layer import MoEAuxLossAutoScaler +from megatron.core.transformer.moe.router import MoEAuxLossAutoScaler from megatron.core.utils import get_attr_wrapped_model, get_model_config, get_model_type # Types diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index 0999023484..22401c3715 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -7,8 +7,8 @@ from megatron.core import parallel_state from megatron.core.transformer.mlp import MLPSubmodules from megatron.core.transformer.module import MegatronModule -from megatron.core.transformer.moe.base_moe_layer import DroplessTopKRouter from megatron.core.transformer.moe.grouped_mlp import GroupedMLP +from megatron.core.transformer.moe.router import DroplessTopKRouter from megatron.core.transformer.moe.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_config import TransformerConfig diff --git a/megatron/core/transformer/moe/base_moe_layer.py b/megatron/core/transformer/moe/router.py similarity index 100% rename from megatron/core/transformer/moe/base_moe_layer.py rename to megatron/core/transformer/moe/router.py diff --git a/tests/unit_tests/transformer/moe/test_routers.py b/tests/unit_tests/transformer/moe/test_routers.py index 1950869114..9328e0f24e 100644 --- a/tests/unit_tests/transformer/moe/test_routers.py +++ b/tests/unit_tests/transformer/moe/test_routers.py @@ -4,7 +4,7 @@ import torch -from megatron.core.transformer.moe.base_moe_layer import Router +from megatron.core.transformer.moe.router import Router from megatron.initialize import _set_random_seed from tests.unit_tests.test_utilities import Utils from megatron.core.transformer.transformer_config import TransformerConfig diff --git a/tests/unit_tests/transformer/moe/test_token_dispatcher.py b/tests/unit_tests/transformer/moe/test_token_dispatcher.py index f2def24ab7..c9ef001055 100644 --- a/tests/unit_tests/transformer/moe/test_token_dispatcher.py +++ b/tests/unit_tests/transformer/moe/test_token_dispatcher.py @@ -4,7 +4,7 @@ import torch -from megatron.core.transformer.moe.base_moe_layer import Router, DroplessTopKRouter +from megatron.core.transformer.moe.router import Router, DroplessTopKRouter from megatron.initialize import _set_random_seed from tests.unit_tests.test_utilities import Utils from megatron.core.transformer.transformer_config import TransformerConfig From a003610eac2e06f6414f2870b7f679de409fc138 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Wed, 17 Jan 2024 03:03:01 +0000 Subject: [PATCH 174/296] Fix review comments. --- megatron/core/transformer/moe/grouped_mlp.py | 6 ----- megatron/core/transformer/moe/moe_layer.py | 25 +++++++++----------- megatron/core/transformer/moe/switch_mlp.py | 6 ----- 3 files changed, 11 insertions(+), 26 deletions(-) diff --git a/megatron/core/transformer/moe/grouped_mlp.py b/megatron/core/transformer/moe/grouped_mlp.py index 22aa915aee..57428dcf11 100644 --- a/megatron/core/transformer/moe/grouped_mlp.py +++ b/megatron/core/transformer/moe/grouped_mlp.py @@ -126,9 +126,6 @@ def glu(x): setattr(self.weight2, 'allreduce', not self.expert_parallel) def forward(self, permuted_local_hidden_states, tokens_per_expert): - # Permutation of tokens - # permuted_local_hidden_states, tokens_per_expert = self.token_permutation(hidden_states) - # Reshape the weights for the grouped GEMMs. w1 = self.weight1.view(self.num_local_experts, self.config.hidden_size, -1) w2 = self.weight2.view(self.num_local_experts, -1, self.config.hidden_size) @@ -139,7 +136,4 @@ def forward(self, permuted_local_hidden_states, tokens_per_expert): fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=False) - # Un-permutation of tokens. - # output_total, _ = self.token_unpermutation(fc2_output) - return fc2_output, None diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index 22401c3715..599ee187c8 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -92,24 +92,21 @@ def initialize_experts(self): return experts def initialize_router(self): + routing_type = None if self.config.moe_router_type.lower().startswith("top"): k = int(self.config.moe_router_type[3:]) - router = DroplessTopKRouter( - self.num_local_experts, - self.local_expert_indices, - k=k, - routing_type="top", - config=self.config, - ) + routing_type = "top" elif self.config.moe_router_type.lower().startswith("sinkhorn"): k = int(self.config.moe_router_type[8:]) - router = DroplessTopKRouter( - self.num_local_experts, - self.local_expert_indices, - k=k, - routing_type="sinkhorn", - config=self.config, - ) + routing_type = "sinkhorn" else: raise NotImplementedError(f"Routing method {self.config.moe_router_type} not supported") + + router = DroplessTopKRouter( + self.num_local_experts, + self.local_expert_indices, + k=k, + routing_type=routing_type, + config=self.config, + ) return router diff --git a/megatron/core/transformer/moe/switch_mlp.py b/megatron/core/transformer/moe/switch_mlp.py index 0a75f9f7b9..434c33e3cb 100644 --- a/megatron/core/transformer/moe/switch_mlp.py +++ b/megatron/core/transformer/moe/switch_mlp.py @@ -24,9 +24,6 @@ def __init__(self, num_local_experts, config: TransformerConfig, submodules: MLP self.local_experts.append(expert) def forward(self, permuted_local_hidden_states, tokens_per_expert): - # global_hidden_states, global_indices = self.token_permutation(hidden_states) - # permuted_local_hidden_states, tokens_per_expert = self.token_permutation(hidden_states) - output_local = torch.zeros_like(permuted_local_hidden_states) output_bias_local = None if self.add_bias: @@ -47,7 +44,4 @@ def forward(self, permuted_local_hidden_states, tokens_per_expert): output_bias = output_bias.expand_as(output) output_bias_local[start:end, :] = output_bias - # Un-permutation of tokens. - # output_total, output_bias_total = self.token_unpermutation(output_local, output_bias_local) - return output_local, output_bias_local From e2d3e4fdadba50e297c911ae2d7850a35597b087 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Fri, 19 Jan 2024 15:10:07 +0000 Subject: [PATCH 175/296] Renaming. --- megatron/arguments.py | 36 +++++------ megatron/core/transformer/moe/grouped_mlp.py | 3 +- megatron/core/transformer/moe/moe_layer.py | 16 +---- megatron/core/transformer/moe/router.py | 62 ++++++++++--------- megatron/core/transformer/moe/switch_mlp.py | 3 +- .../core/transformer/transformer_config.py | 10 +-- .../transformer/moe/test_token_dispatcher.py | 4 +- 7 files changed, 61 insertions(+), 73 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 2c69d653af..4fd71890b5 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -397,19 +397,6 @@ def validate_args(args, defaults={}): # MoE Spec check if args.num_experts is not None: assert args.spec is None, "Model Spec must be None when using MoEs" - if args.moe_router_type.lower().startswith("top"): - try: - k = int(args.moe_router_type[3:]) - assert k > 0, "Invalid topk router name: {}, please ensure k > 0.".format( - args.moe_router_type - ) - except: - raise RuntimeError( - "Invalid `topk` router name: `{}`. Please use the format `topk`, where `k` must be an integer.".format( - args.moe_router_type - ) - ) - # Expert parallelism check if args.expert_model_parallel_size > 1: @@ -1426,6 +1413,19 @@ def _add_moe_args(parser): group.add_argument( '--num-experts', type=int, default=None, help='Number of Experts in MoE (None means no MoE)' ) + group.add_argument( + '--moe-router-load-balancing-type', + type=str, + choices=['aux_loss', 'sinkhorn', None], + default='aux_loss', + help='Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "None" implies no load balancing. The default is "aux_loss".', + ) + group.add_argument( + '--moe-router-topk', + type=int, + default=2, + help='Number of experts to route to for each token. The default is 2.', + ) group.add_argument( '--moe-grouped-gemm', action='store_true', @@ -1444,19 +1444,13 @@ def _add_moe_args(parser): group.add_argument( '--moe-z-loss-coeff', type=float, - default=0.0, + default=None, help='Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended.', ) - group.add_argument( - '--moe-router-type', - type=str, - default='sinkhorn1', - help='Options for router type. Currently supports sinkhornK and topK router, where K represents the number of routers each token selects. The default is sinkhorn1.', - ) group.add_argument( '--moe-token-dropping', action='store_true', - help='Currently unsupported. This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to to GShard, Switch-Transformer, and DeepSpeed-MoE.', + help='This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to GShard, Switch-Transformer, and DeepSpeed-MoE. Note: Currently unsupported.', ) # zero token drop moe arguments diff --git a/megatron/core/transformer/moe/grouped_mlp.py b/megatron/core/transformer/moe/grouped_mlp.py index 57428dcf11..f4f0482218 100644 --- a/megatron/core/transformer/moe/grouped_mlp.py +++ b/megatron/core/transformer/moe/grouped_mlp.py @@ -16,8 +16,7 @@ class GroupedMLP(MegatronModule): """ - Top-1 Mixture of Experts Layer with Grouped GEMM. Routes input to one of N MLP "experts" - Curently supports Sinkhorn based expert routing. + Experts Layer with Grouped GEMM. Routes input to one of N MLP "experts" """ def __init__(self, num_local_experts: int, config: TransformerConfig): diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index 599ee187c8..c5e81d0dc5 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -8,7 +8,7 @@ from megatron.core.transformer.mlp import MLPSubmodules from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.moe.grouped_mlp import GroupedMLP -from megatron.core.transformer.moe.router import DroplessTopKRouter +from megatron.core.transformer.moe.router import TopKRouter from megatron.core.transformer.moe.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_config import TransformerConfig @@ -92,21 +92,9 @@ def initialize_experts(self): return experts def initialize_router(self): - routing_type = None - if self.config.moe_router_type.lower().startswith("top"): - k = int(self.config.moe_router_type[3:]) - routing_type = "top" - elif self.config.moe_router_type.lower().startswith("sinkhorn"): - k = int(self.config.moe_router_type[8:]) - routing_type = "sinkhorn" - else: - raise NotImplementedError(f"Routing method {self.config.moe_router_type} not supported") - - router = DroplessTopKRouter( + router = TopKRouter( self.num_local_experts, self.local_expert_indices, - k=k, - routing_type=routing_type, config=self.config, ) return router diff --git a/megatron/core/transformer/moe/router.py b/megatron/core/transformer/moe/router.py index 74140dbcb2..d9d5dda4c7 100644 --- a/megatron/core/transformer/moe/router.py +++ b/megatron/core/transformer/moe/router.py @@ -124,9 +124,9 @@ def apply_z_loss(self, logits): Returns: torch.Tensor: The logits after applying the z-loss. """ - - z_loss = z_loss_func(logits) - logits = MoEAuxLossAutoScaler.apply(logits, z_loss) + if self.config.moe_z_loss_coeff is not None: + z_loss = z_loss_func(logits) + logits = MoEAuxLossAutoScaler.apply(logits, z_loss) return logits @@ -409,7 +409,7 @@ def restore( return output_total, output_bias_total -class DroplessTopKRouter(Router): +class TopKRouter(Router): """TopK Router without token dropping. """ @@ -417,8 +417,6 @@ def __init__( self, num_local_experts: int, local_expert_indices: List[int], - k: int, - routing_type: str, config: TransformerConfig, ) -> None: """Initialize the zero token dropping router. @@ -426,22 +424,18 @@ def __init__( Args: num_local_experts (int): The number of local experts. local_expert_indices (List[int]): The indices of the local experts. - k: The number of experts to route to. - routing_type (str): The routing type to use. Currently supports sinkhorn and top. config (TransformerConfig): The configuration for the transformer model. - """ super().__init__(config=config) assert config.moe_token_dropping == False - assert routing_type in ["sinkhorn", "top"], f"Routing type {routing_type} not supported." - self.k = k - self.routing_type = routing_type + self.topk = self.config.moe_router_topk + self.routing_type = self.config.moe_router_load_balancing_type self.token_dispatcher = MoEDroplessTokenDispatcher( - num_local_experts, local_expert_indices, self.k, config + num_local_experts, local_expert_indices, self.topk, config ) self.moe_aux_loss_func = switch_load_balancing_loss_func - def apply_sinkhorn(self, logits: torch.Tensor): + def sinkhorn_load_balancing(self, logits: torch.Tensor): """Apply sinkhorn routing to the logits tensor. Args: @@ -457,12 +451,30 @@ def apply_sinkhorn(self, logits: torch.Tensor): norm_logits = sinkhorn( logits.to(dtype=torch.float32) ) # explicit fp32 conversion for stability - _, indices = torch.topk(norm_logits, k=self.k, dim=1) + _, indices = torch.topk(norm_logits, k=self.topk, dim=1) logits = router_activation(logits) scores = torch.gather(logits, 1, indices) else: logits = router_activation(logits) - scores, indices = torch.topk(logits, k=self.k, dim=1) + scores, indices = torch.topk(logits, k=self.topk, dim=1) + return scores, indices + + def aux_loss_load_balancing(self, logits: torch.Tensor): + """Apply loss-based load balancing to the logits tensor. + + Args: + logits (torch.Tensor): The logits tensor. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The scores and the indices tensor after applying load balancing. + """ + top_logits, indices = torch.topk(logits, k=self.topk, dim=1) + scores = torch.softmax(top_logits, dim=-1, dtype=torch.float32).type_as(logits) + # Apply load balancing loss + probs = torch.softmax(logits, dim=-1, dtype=torch.float32) + scores = self.apply_aux_loss( + self.moe_aux_loss_func, probs, indices, activation=scores + ) return scores, indices def routing(self, logits: torch.Tensor): @@ -476,22 +488,16 @@ def routing(self, logits: torch.Tensor): """ logits = logits.view(-1, self.config.num_moe_experts) # Apply Z-Loss - if self.config.moe_z_loss_coeff > 0: - logits = self.apply_z_loss(logits) + logits = self.apply_z_loss(logits) if self.routing_type == "sinkhorn": - # Sinkhorn routing. - scores, indices = self.apply_sinkhorn(logits) - elif self.routing_type == "top": - # TopK routing. + scores, indices = self.sinkhorn_load_balancing(logits) + elif self.routing_type == "aux_loss": + scores, indices = self.aux_loss_load_balancing(logits) + elif self.routing_type is None: + # A naive top-k routing without load balancing top_logits, indices = torch.topk(logits, k=self.k, dim=1) scores = torch.softmax(top_logits, dim=-1, dtype=torch.float32).type_as(logits) - # Apply load balancing loss - if self.config.moe_aux_loss_coeff > 0: - probs = torch.softmax(logits, dim=-1, dtype=torch.float32) - scores = self.apply_aux_loss( - self.moe_aux_loss_func, probs, indices, activation=scores - ) return scores, indices diff --git a/megatron/core/transformer/moe/switch_mlp.py b/megatron/core/transformer/moe/switch_mlp.py index 434c33e3cb..5e390370fd 100644 --- a/megatron/core/transformer/moe/switch_mlp.py +++ b/megatron/core/transformer/moe/switch_mlp.py @@ -10,8 +10,7 @@ class SwitchMLP(MegatronModule): """ - Top-1 Mixture of Experts Layer. Routes input to one of N MLP "experts" - Curently supports Sinkhorn based expert routing. + Mixture of Experts Layer. Routes input to one of N MLP "experts" """ def __init__(self, num_local_experts, config: TransformerConfig, submodules: MLPSubmodules): diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 7859d3c2c8..9bbf2eb0ab 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -58,12 +58,13 @@ class TransformerConfig(ModelParallelConfig): clone_scatter_output_in_embedding (bool): When set to true, clone the output of scatter_to_sequence_parallel_region in embedding layer to facilitate garbage collection of input. normalization (str): Swtich b/w `LayerNorm` and `RMSNorm` as normalization layers. For now, these are primarily used by Transformer-Engine's layers like `LayerNormLinear`. Default value is `LayerNorm`. window_size ((int,int) or None): If not None, then will use sliding window attention. The size of the window is specified by the numbers inside the tuple; -1 is special value meaning "infinite window size". + moe_router_load_balancing_type (str): Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "None" implies no load balancing. The default is "aux_loss". + moe_router_topk (int): Number of experts to route to for each token. The default is 2. moe_grouped_gemm (bool): When there are multiple experts per rank, compress multiple local (potentially small) gemms in a single kernel launch to improve the utilization and performance by leveraging the Grouped GEMM feature introduced since CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm). moe_aux_loss_coeff (float): Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended. moe_z_loss_coeff (float): Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended. - moe_router_type (str): Options for router type. Currently supports sinkhorn and topk router. - moe_token_dropping (bool): Currently unsupported. This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to to GShard, Switch-Transformer, and DeepSpeed-MoE., + moe_token_dropping (bool): This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to GShard, Switch-Transformer, and DeepSpeed-MoE. Note: Currently unsupported. """ # model architecture @@ -133,11 +134,12 @@ class TransformerConfig(ModelParallelConfig): normalization: bool = "LayerNorm" # alt value supported by TE: "RMSNorm" # MoE related + moe_router_load_balancing_type: str = "aux_loss" + moe_router_topk: int = 2 moe_grouped_gemm: bool = False moe_aux_loss_coeff: float = 0 # 1e-2 would be a good start value for load balance loss. - moe_z_loss_coeff: float = 0 # 1e-3 would be a good start value for z-loss + moe_z_loss_coeff: float = None # 1e-3 would be a good start value for z-loss moe_token_dropping: bool = False # TODO: Support token dropping. - moe_router_type: str = "sinkhorn" def __post_init__(self): """ Python dataclass method that is used to modify attributes after initialization. diff --git a/tests/unit_tests/transformer/moe/test_token_dispatcher.py b/tests/unit_tests/transformer/moe/test_token_dispatcher.py index c9ef001055..2b12faeffc 100644 --- a/tests/unit_tests/transformer/moe/test_token_dispatcher.py +++ b/tests/unit_tests/transformer/moe/test_token_dispatcher.py @@ -4,7 +4,7 @@ import torch -from megatron.core.transformer.moe.router import Router, DroplessTopKRouter +from megatron.core.transformer.moe.router import Router, TopKRouter from megatron.initialize import _set_random_seed from tests.unit_tests.test_utilities import Utils from megatron.core.transformer.transformer_config import TransformerConfig @@ -24,7 +24,7 @@ def setup_method(self, method): use_cpu_initialization=True, moe_router_type="top2", ) - self.router = DroplessTopKRouter( + self.router = TopKRouter( num_local_experts=num_moe_experts, local_expert_indices=range(num_moe_experts), k=2, From b616497a00494a820cba5bca672ea5418fef3940 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Fri, 19 Jan 2024 15:24:11 +0000 Subject: [PATCH 176/296] Renaming. --- .gitlab-ci.yml | 12 ++++++------ megatron/core/transformer/moe/moe_layer.py | 6 +----- megatron/core/transformer/moe/router.py | 14 ++++---------- .../unit_tests/transformer/moe/test_grouped_mlp.py | 2 +- tests/unit_tests/transformer/moe/test_routers.py | 3 ++- .../unit_tests/transformer/moe/test_switch_mlp.py | 2 +- .../transformer/moe/test_token_dispatcher.py | 5 ++--- 7 files changed, 17 insertions(+), 27 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a4bcdff82b..cc5d00c8b7 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -533,7 +533,7 @@ train.te_core_moe_gpt3.345m_tp2_pp2_2experts_1node_50steps: USE_CORE: 1 TEST_LEVEL: NIGHTLY_TESTS METADATA: "te_2experts" - ADDITIONAL_PARAMS: "--num-experts 2" + ADDITIONAL_PARAMS: "--num-experts 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" train.te_core_moe_gpt3.345m_tp2_pp2_4experts2parallel_1node_50steps: <<: *selene-test-launcher @@ -548,7 +548,7 @@ train.te_core_moe_gpt3.345m_tp2_pp2_4experts2parallel_1node_50steps: USE_CORE: 1 TEST_LEVEL: NIGHTLY_TESTS METADATA: "te_4experts2parallel" - ADDITIONAL_PARAMS: "--sequence-parallel --num-experts 4 --expert-model-parallel-size 2" + ADDITIONAL_PARAMS: "--sequence-parallel --num-experts 4 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_1node_50steps: <<: *selene-test-launcher @@ -563,7 +563,7 @@ train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_1node_50steps: USE_CORE: 1 TEST_LEVEL: MR_TESTS METADATA: "te_8experts2parallel" - ADDITIONAL_PARAMS: "--sequence-parallel --num-experts 8 --expert-model-parallel-size 2" + ADDITIONAL_PARAMS: "--sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_groupedGEMM_1node_50steps: <<: *selene-test-launcher @@ -579,7 +579,7 @@ train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_groupedGEMM_1node_50steps: MOE_GROUPED_GEMM: 1 TEST_LEVEL: MR_TESTS METADATA: "te_8experts2parallel_groupedGEMM" - ADDITIONAL_PARAMS: "--moe-grouped-gemm --disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2" + ADDITIONAL_PARAMS: "--moe-grouped-gemm --disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_top2_1node_50steps: <<: *selene-test-launcher @@ -595,7 +595,7 @@ train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_top2_1node_50steps: MOE_GROUPED_GEMM: 1 TEST_LEVEL: MR_TESTS METADATA: "te_8experts2parallel_top2router" - ADDITIONAL_PARAMS: "--disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-type top2 --moe-aux-loss-coeff 1e-2" + ADDITIONAL_PARAMS: "--disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type "aux_loss" --moe-router-topk 2 --moe-aux-loss-coeff 1e-2" train.moe_gpt3.345m_tp2_pp2_4experts_1node_50steps: <<: *selene-test-launcher @@ -610,7 +610,7 @@ train.moe_gpt3.345m_tp2_pp2_4experts_1node_50steps: USE_CORE: 0 TEST_LEVEL: NIGHTLY_TESTS METADATA: "4experts" - ADDITIONAL_PARAMS: "--num-experts 4" + ADDITIONAL_PARAMS: "--num-experts 4 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" train.bert.345m_tp4_pp1_1node_50steps: <<: *selene-test-launcher diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index c5e81d0dc5..6ed28f2bbd 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -92,9 +92,5 @@ def initialize_experts(self): return experts def initialize_router(self): - router = TopKRouter( - self.num_local_experts, - self.local_expert_indices, - config=self.config, - ) + router = TopKRouter(self.num_local_experts, self.local_expert_indices, config=self.config,) return router diff --git a/megatron/core/transformer/moe/router.py b/megatron/core/transformer/moe/router.py index d9d5dda4c7..0d934cf846 100644 --- a/megatron/core/transformer/moe/router.py +++ b/megatron/core/transformer/moe/router.py @@ -410,14 +410,10 @@ def restore( class TopKRouter(Router): - """TopK Router without token dropping. - """ + """Route each token to the top-k experts.""" def __init__( - self, - num_local_experts: int, - local_expert_indices: List[int], - config: TransformerConfig, + self, num_local_experts: int, local_expert_indices: List[int], config: TransformerConfig, ) -> None: """Initialize the zero token dropping router. @@ -458,7 +454,7 @@ def sinkhorn_load_balancing(self, logits: torch.Tensor): logits = router_activation(logits) scores, indices = torch.topk(logits, k=self.topk, dim=1) return scores, indices - + def aux_loss_load_balancing(self, logits: torch.Tensor): """Apply loss-based load balancing to the logits tensor. @@ -472,9 +468,7 @@ def aux_loss_load_balancing(self, logits: torch.Tensor): scores = torch.softmax(top_logits, dim=-1, dtype=torch.float32).type_as(logits) # Apply load balancing loss probs = torch.softmax(logits, dim=-1, dtype=torch.float32) - scores = self.apply_aux_loss( - self.moe_aux_loss_func, probs, indices, activation=scores - ) + scores = self.apply_aux_loss(self.moe_aux_loss_func, probs, indices, activation=scores) return scores, indices def routing(self, logits: torch.Tensor): diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index 33bfc70009..ad5d0e817c 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -39,7 +39,7 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): num_moe_experts=self.num_experts, use_cpu_initialization=self.use_cpu_initialization, add_bias_linear=False, gated_linear_unit=self.gated_linear_unit, bias_gelu_fusion=False, - bf16=True, params_dtype=torch.bfloat16, moe_router_type="sinkhorn1") + bf16=True, params_dtype=torch.bfloat16, moe_router_load_balancing_type="sinkhorn", moe_router_topk=1) self.fc1_ffn_hidden_size = tf_config.ffn_hidden_size self.fc2_ffn_hidden_size = tf_config.ffn_hidden_size diff --git a/tests/unit_tests/transformer/moe/test_routers.py b/tests/unit_tests/transformer/moe/test_routers.py index 9328e0f24e..3e48f14095 100644 --- a/tests/unit_tests/transformer/moe/test_routers.py +++ b/tests/unit_tests/transformer/moe/test_routers.py @@ -24,7 +24,8 @@ def setup_method(self, method): num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, - moe_router_type="top2", + moe_router_load_balancing_type="aux_loss", + moe_router_topk=2, moe_aux_loss_coeff=0, ) transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( diff --git a/tests/unit_tests/transformer/moe/test_switch_mlp.py b/tests/unit_tests/transformer/moe/test_switch_mlp.py index c3cf8310fc..bc645596ed 100644 --- a/tests/unit_tests/transformer/moe/test_switch_mlp.py +++ b/tests/unit_tests/transformer/moe/test_switch_mlp.py @@ -17,7 +17,7 @@ def setup_method(self, method): model_parallel_cuda_manual_seed(123) print("done intializing") num_moe_experts = 2 - transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, moe_router_type="sinkhorn1") + transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, moe_router_load_balancing_type="sinkhorn", moe_router_topk=1) transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( num_experts=num_moe_experts, moe_grouped_gemm=False) self.switch_mlp = DroplessMoELayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) diff --git a/tests/unit_tests/transformer/moe/test_token_dispatcher.py b/tests/unit_tests/transformer/moe/test_token_dispatcher.py index 2b12faeffc..cc56e0673b 100644 --- a/tests/unit_tests/transformer/moe/test_token_dispatcher.py +++ b/tests/unit_tests/transformer/moe/test_token_dispatcher.py @@ -22,13 +22,12 @@ def setup_method(self, method): num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, - moe_router_type="top2", + moe_router_load_balancing_type="aux_loss", + moe_router_topk=2, ) self.router = TopKRouter( num_local_experts=num_moe_experts, local_expert_indices=range(num_moe_experts), - k=2, - routing_type="top", config=transformer_config, ) self.token_dispatcher = self.router.token_dispatcher From 20383240c5245e7afc9495323610f46a27160e6f Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Sat, 20 Jan 2024 02:52:23 +0000 Subject: [PATCH 177/296] Move dispatcher and experts. --- megatron/core/models/gpt/gpt_layer_specs.py | 4 +- .../moe/{grouped_mlp.py => experts.py} | 40 ++ megatron/core/transformer/moe/moe_layer.py | 49 +-- megatron/core/transformer/moe/moe_utils.py | 46 ++ megatron/core/transformer/moe/router.py | 407 ++---------------- megatron/core/transformer/moe/switch_mlp.py | 46 -- .../core/transformer/moe/token_dispatcher.py | 283 ++++++++++++ .../transformer/moe/test_grouped_mlp.py | 12 +- .../transformer/moe/test_routers.py | 4 +- .../transformer/moe/test_switch_mlp.py | 6 +- .../transformer/moe/test_token_dispatcher.py | 5 +- 11 files changed, 444 insertions(+), 458 deletions(-) rename megatron/core/transformer/moe/{grouped_mlp.py => experts.py} (76%) delete mode 100644 megatron/core/transformer/moe/switch_mlp.py create mode 100644 megatron/core/transformer/moe/token_dispatcher.py diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index db3f5e9dd0..2e35e1f250 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -14,7 +14,7 @@ from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityOp from megatron.core.transformer.mlp import MLP, MLPSubmodules -from megatron.core.transformer.moe.moe_layer import DroplessMoELayer +from megatron.core.transformer.moe.moe_layer import MoELayer from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules @@ -92,7 +92,7 @@ def _get_mlp_module_spec( else: # SwitchMLP based MoE with modules in megatron core. return ModuleSpec( - module=DroplessMoELayer, + module=MoELayer, submodules=MLPSubmodules(linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear,) if not moe_grouped_gemm else None, diff --git a/megatron/core/transformer/moe/grouped_mlp.py b/megatron/core/transformer/moe/experts.py similarity index 76% rename from megatron/core/transformer/moe/grouped_mlp.py rename to megatron/core/transformer/moe/experts.py index f4f0482218..ce2dfaa5c9 100644 --- a/megatron/core/transformer/moe/grouped_mlp.py +++ b/megatron/core/transformer/moe/experts.py @@ -1,5 +1,6 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +import numpy as np import torch from torch.nn.parameter import Parameter @@ -9,6 +10,7 @@ _initialize_affine_weight_gpu, ) from megatron.core.tensor_parallel.utils import divide +from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.moe import grouped_gemm_util as gg from megatron.core.transformer.transformer_config import TransformerConfig @@ -136,3 +138,41 @@ def forward(self, permuted_local_hidden_states, tokens_per_expert): fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=False) return fc2_output, None + + +class SwitchMLP(MegatronModule): + """ + Mixture of Experts Layer. Routes input to one of N MLP "experts" + """ + + def __init__(self, num_local_experts, config: TransformerConfig, submodules: MLPSubmodules): + super().__init__(config=config) + self.add_bias = config.add_bias_linear + self.num_local_experts = num_local_experts + self.local_experts = torch.nn.ModuleList() + for _ in range(self.num_local_experts): + expert = MLP(self.config, submodules, is_expert=True) + self.local_experts.append(expert) + + def forward(self, permuted_local_hidden_states, tokens_per_expert): + output_local = torch.zeros_like(permuted_local_hidden_states) + output_bias_local = None + if self.add_bias: + output_bias_local = torch.zeros_like(permuted_local_hidden_states) + + cumsum_num_tokens = torch.cumsum(tokens_per_expert, dim=0) + # Insert zero at the begining for offset index's convenience + zero_tensor = torch.zeros(1, dtype=torch.long) + cumsum_num_tokens = torch.cat((zero_tensor, cumsum_num_tokens)) + for expert_num, expert in enumerate(self.local_experts): + start = cumsum_num_tokens[expert_num] + end = cumsum_num_tokens[expert_num + 1] + hidden = permuted_local_hidden_states[start:end] + output, output_bias = expert(hidden) + + output_local[start:end] = output + if self.add_bias: + output_bias = output_bias.expand_as(output) + output_bias_local[start:end, :] = output_bias + + return output_local, output_bias_local diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index 6ed28f2bbd..c62ec32bc3 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -7,9 +7,9 @@ from megatron.core import parallel_state from megatron.core.transformer.mlp import MLPSubmodules from megatron.core.transformer.module import MegatronModule -from megatron.core.transformer.moe.grouped_mlp import GroupedMLP +from megatron.core.transformer.moe.experts import GroupedMLP, SwitchMLP from megatron.core.transformer.moe.router import TopKRouter -from megatron.core.transformer.moe.switch_mlp import SwitchMLP +from megatron.core.transformer.moe.token_dispatcher import MoEDroplessTokenDispatcher from megatron.core.transformer.transformer_config import TransformerConfig @@ -34,23 +34,15 @@ def __init__(self, config: TransformerConfig): ] self.router = None self.experts = None - - @abstractmethod - def initialize_experts(self): - pass - - @abstractmethod - def initialize_router(self): - pass + self.token_dispatcher = None @abstractmethod def forward(self, hidden_states): pass -class DroplessMoELayer(BaseMoELayer): - """Top-K Mixture of Experts Layer **Without Token Dropping**. - Currently supports Sinkhorn-based routing (Top-k based) and generalized Top-k routing with auxiliary loss. +class MoELayer(BaseMoELayer): + """Mixture of experts Layer **currently only supports no token dropping**. Args: BaseMoELayer (MegatronModule): Base class for MoE layers @@ -58,9 +50,18 @@ class DroplessMoELayer(BaseMoELayer): def __init__(self, config: TransformerConfig, submodules: MLPSubmodules = None): self.submodules = submodules - super(DroplessMoELayer, self).__init__(config=config) - self.router = self.initialize_router() - self.experts = self.initialize_experts() + super(MoELayer, self).__init__(config=config) + self.router = TopKRouter( + self.num_local_experts, self.local_expert_indices, config=self.config + ) + if self.config.moe_grouped_gemm: + self.experts = GroupedMLP(self.num_local_experts, self.config) + else: + assert isinstance(self.submodules, MLPSubmodules) + self.experts = SwitchMLP(self.num_local_experts, self.config, self.submodules) + self.token_dispatcher = MoEDroplessTokenDispatcher( + self.num_local_experts, self.local_expert_indices, config=self.config + ) assert config.moe_token_dropping is False def forward(self, hidden_states: torch.Tensor): @@ -72,9 +73,9 @@ def forward(self, hidden_states: torch.Tensor): scores, indices, global_local_map, - ) = self.router.token_dispatcher.dispatch(hidden_states, scores, indices) + ) = self.token_dispatcher.dispatch(hidden_states, scores, indices) expert_output, mlp_bias = self.experts(dispatched_input, tokens_per_expert) - output, mlp_bias = self.router.token_dispatcher.restore( + output, mlp_bias = self.token_dispatcher.restore( expert_output, scores, indices, global_local_map, mlp_bias ) @@ -82,15 +83,3 @@ def forward(self, hidden_states: torch.Tensor): mlp_bias = torch.tensor(0.0, device=hidden_states.device, dtype=hidden_states.dtype) return output, mlp_bias - - def initialize_experts(self): - if self.config.moe_grouped_gemm: - experts = GroupedMLP(self.num_local_experts, self.config) - else: - assert isinstance(self.submodules, MLPSubmodules) - experts = SwitchMLP(self.num_local_experts, self.config, self.submodules) - return experts - - def initialize_router(self): - router = TopKRouter(self.num_local_experts, self.local_expert_indices, config=self.config,) - return router diff --git a/megatron/core/transformer/moe/moe_utils.py b/megatron/core/transformer/moe/moe_utils.py index 0e9534a36e..301a2cf669 100644 --- a/megatron/core/transformer/moe/moe_utils.py +++ b/megatron/core/transformer/moe/moe_utils.py @@ -50,3 +50,49 @@ def sinkhorn(cost: torch.Tensor, tol: float = 0.0001): error = torch.mean(torch.abs(d1_old - d1)) d1_old = d1 return d1 * cost * d0.unsqueeze(1) + + +class MoEAuxLossAutoScaler(torch.autograd.Function): + """An AutoScaler that compute and scales the grad for auxiliary loss. + + """ + + main_loss_backward_scale: int = 1 + + @staticmethod + def forward(ctx, output: torch.Tensor, aux_loss: torch.Tensor): + """Preserve the aux_loss by storing it in the context to avoid garbage collection. + + Args: + output (torch.Tensor): The output tensor. + aux_loss (torch.Tensor): The auxiliary loss tensor. + + Returns: + torch.Tensor: The output tensor. + """ + ctx.save_for_backward(aux_loss) + return output + + @staticmethod + def backward(ctx, grad_output: torch.Tensor): + """Compute and scale the gradient for auxiliary loss.. + + Args: + grad_output (torch.Tensor): The gradient of the output. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The gradient of the output, scaled auxiliary loss gradient. + """ + (aux_loss,) = ctx.saved_tensors + aux_loss_backward_scale = MoEAuxLossAutoScaler.main_loss_backward_scale + scaled_aux_loss_grad = torch.ones_like(aux_loss) * aux_loss_backward_scale + return grad_output, scaled_aux_loss_grad + + @staticmethod + def set_loss_scale(scale: int): + """set the scale of the aux loss. + + Args: + scale (int): The scale value to set. Please ensure that the scale passed in matches the scale of the main_loss. + """ + MoEAuxLossAutoScaler.main_loss_backward_scale = scale diff --git a/megatron/core/transformer/moe/router.py b/megatron/core/transformer/moe/router.py index 0d934cf846..8b2cb3a4ad 100644 --- a/megatron/core/transformer/moe/router.py +++ b/megatron/core/transformer/moe/router.py @@ -15,6 +15,7 @@ ) from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.moe.moe_utils import ( + MoEAuxLossAutoScaler, sinkhorn, switch_load_balancing_loss_func, z_loss_func, @@ -35,8 +36,6 @@ def __init__(self, config: TransformerConfig) -> None: super().__init__(config) self.config = config self.num_experts = self.config.num_moe_experts - # Token dispatcher for exchange tokens between experts. - self.token_dispatcher = None self.moe_aux_loss_func = None # Initialize the gate weights. @@ -91,323 +90,6 @@ def forward(self, input: torch.Tensor): return scores, indices - def apply_aux_loss( - self, - loss_func: Callable, - probs: torch.Tensor, - indices: torch.Tensor, - activation: torch.Tensor, - ): - """Applies auxiliary loss to the MoE layer. - - Args: - loss_func (callable): The loss function to be used. - probs (torch.Tensor): The probabilities output by the MoE layer. - indices (torch.Tensor): The indices of the selected experts. - activation (torch.Tensor): The activation tensor to attach the gradient function to. - - Returns: - torch.Tensor: The activation tensor with the attached gradient function. - """ - mask = torch.nn.functional.one_hot(indices, num_classes=self.num_experts).sum(dim=1) - aux_loss = loss_func(probs, mask, self.config.moe_aux_loss_coeff) - activation = MoEAuxLossAutoScaler.apply(activation, aux_loss) - return activation - - def apply_z_loss(self, logits): - """Encourages the router's logits to remain small to enhance stability. - Please refer to the ST-MoE paper (https://arxiv.org/pdf/2202.08906.pdf) for details. - - Args: - logits (torch.Tensor): The logits of the router. - - Returns: - torch.Tensor: The logits after applying the z-loss. - """ - if self.config.moe_z_loss_coeff is not None: - z_loss = z_loss_func(logits) - logits = MoEAuxLossAutoScaler.apply(logits, z_loss) - return logits - - -class MoETokenDispatcher: - """ - MoE Token Dispatcher - """ - - def __init__(self, config: TransformerConfig) -> None: - """ - Initialize the MoE Token Dispatcher. - """ - self.config = config - - @abstractmethod - def dispatch( - self, tokens: torch.Tensor, indices: torch.Tensor, - ): - """Dispatch tokens to experts. - - Args: - tokens (torch.Tensor): Input tokens. - indices (torch.Tensor): indices tensor. - - Returns: - torch.Tensor: Tokens tensor. - """ - raise NotImplementedError("Dispatch function not implemented.") - - @abstractmethod - def restore( - self, expert_output: torch.Tensor, scores: torch.Tensor, indices: torch.Tensor, - ): - """Restores the expert output to its original ordering. - - Args: - expert_output (torch.Tensor): The output tensor from the expert models. - scores (torch.Tensor): Each token's score with each expert. - indices (torch.Tensor): The indices used to reorder the expert output. - - Returns: - (torch.Tensor, torch.Tensor): Unpermuted activation and optional bias. - """ - raise NotImplementedError("Restore function not implemented.") - - -class MoEDroplessTokenDispatcher(MoETokenDispatcher): - """ - Token dispatcher without token dropping. - """ - - def __init__( - self, - num_local_experts: int, - local_expert_indices: List[int], - k: int, - config: TransformerConfig, - ) -> None: - """ - Initialize the zero token dropping router. - """ - super().__init__(config=config) - self.num_local_experts = num_local_experts - self.local_expert_indices = local_expert_indices - self.k = k - self.add_bias = config.add_bias_linear - - def gather_indices(self, local_indices: torch.Tensor): - """ Gather tensors and concatenate along the first dimension.""" - group = get_tensor_and_expert_parallel_group() - world_size = torch.distributed.get_world_size(group=group) - # Bypass the function if we are using only 1 GPU. - if world_size == 1: - return local_indices - - dim_size = list(local_indices.size()) - dim_size[0] = dim_size[0] * world_size - - # TODO pre allocate memory - output = torch.empty( - dim_size, dtype=local_indices.dtype, device=torch.cuda.current_device() - ) - torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group) - return output - - def dispatch(self, hidden_states: torch.Tensor, max_prob: torch.Tensor, max_ind: torch.Tensor): - """Dispatch tokens to local experts. It's composed of two stages: - (1) Permute the tokens across the expert parallel devices. After this stage, - each device receives all of the tokens assigned to its local set of experts - in its local HBM. - (2) Permute the tokens locally so that they are grouped by their expert - assignment. After the stage (1), the tokens are grouped by which device - they came from. We re-order them locally for subsequent efficient computation. - - Args: - hidden_states: input tokens of shape [SeqLen/TP, MBS, HiddenSize] - - Returns: - permuted_local_hidden_states: Permutation of tokens to local experts group. - tokens_per_expert: the number of tokens each local expert to process. - indices: The indices of `local_indices` (which holds the un-sorted expert - indices of tokens that local expert can process) that give its sorted order along dim 0. - global_local_map (optional): 2D tensor. A mask of mapping between global and local tokens where each - element is True if it's between the local_expert_indices. Only useful - when cross device token permutation is enabled and **AllGahter** is performed. - """ - self.hidden_shape = hidden_states.shape - # [S/TP, B, H] -> [S*B/TP, H] - hidden_states = hidden_states.view(-1, self.hidden_shape[-1]) - - # Permute the tokens across the expert parallel devices. - if self.config.sequence_parallel or (self.config.expert_model_parallel_size > 1): - # [S*B/TP, H] -> [S*B, H] - global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe( - hidden_states - ) - with torch.no_grad(): - global_indices = self.gather_indices(max_ind) - # Create a mask of mapping between global and local tokens where each - # element is True if it's between the local_expert_indices - global_local_map = (global_indices >= self.local_expert_indices[0]) & ( - global_indices <= self.local_expert_indices[-1] - ) - local_indices = global_indices.masked_select(global_local_map) - if self.k > 1: # k > 1 - global_probs = self.gather_indices(max_prob) - local_probs = global_probs.masked_select(global_local_map) - else: - local_probs = max_prob - # Reshape global_local_map to be compatible with Tensor.gather - global_local_map = global_local_map.nonzero()[:, 0] - global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1]) - local_hidden_states = torch.gather(global_hidden_states, 0, global_local_map) - else: - if self.k > 1: - global_local_map = torch.ones_like(max_ind).bool() - local_indices = max_ind.masked_select(global_local_map) - local_probs = max_prob.masked_select(global_local_map) - global_local_map = global_local_map.nonzero()[:, 0] - global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1]) - local_hidden_states = torch.gather(hidden_states, 0, global_local_map) - else: - local_indices = max_ind - local_probs = max_prob - local_hidden_states = hidden_states - global_local_map = None - - with torch.no_grad(): - # The indices of local_indices that give its sorted order along dim 0. - indices = torch.argsort(local_indices, dim=0) - tokens_per_expert = torch.histc( - local_indices, - bins=self.num_local_experts, - min=self.local_expert_indices[0], - max=self.local_expert_indices[-1], - ) - tokens_per_expert = tokens_per_expert.cpu().to(torch.long) - - # Stage2: permute the tokens locally so that they are grouped by their expert assignment - # Reshape indices to be compatible with Tensor.gather - indices = indices.view(-1, 1).expand(-1, hidden_states.shape[-1]) - permuted_local_hidden_states = torch.gather(local_hidden_states, 0, indices) - return ( - permuted_local_hidden_states, - tokens_per_expert, - local_probs, - indices, - global_local_map, - ) - - def restore( - self, - hidden_states: torch.Tensor, - scores: torch.Tensor, - indices: torch.Tensor, - global_local_map: torch.Tensor = None, - bias: torch.Tensor = None, - ): - """ - Reverse process of `dispatch()` which permutes the ouput of local - experts locallay and across expert parallel rank into the original order to - produce the final output. - - Args: - hidden_states: 2D tensor of shape [sum_tokens_of_all_local_experts, HiddenSize], - ouput of local experts. - indices: 2D tensor of the indices of `local_indices` (which holds the un-sorted expert - indices of tokens that local expert can process) that give its sorted order along dim 0. - global_local_map (optional): 2D tensor, a mask of mapping between global and local tokens where each - element is True if it's between the local_expert_indices. Only useful - when cross device token permutation is enabled and **AllGahter** is performed. - - Returns: - output_total: un-permuted updated hidden states output from all local experts - with shape of [SeqLen/TP, MBS, HiddenSize] - """ - # Stage1: unpermute the tokens and bias locally respectively. - scores = scores.to(dtype=hidden_states.dtype) - unpermuted_local_hidden = torch.zeros_like(hidden_states) - assert indices.shape == hidden_states.shape - unpermuted_local_hidden = unpermuted_local_hidden.scatter(0, indices, hidden_states) - - # Scale the expert output prior to reduction and subsequent to local unpermutation if k > 1. - if self.k > 1: - unpermuted_local_hidden = unpermuted_local_hidden * scores.view(-1, 1) - - unpermuted_local_bias = None - if self.add_bias: - assert bias is not None - unpermuted_local_bias = torch.zeros_like(hidden_states) - assert indices.shape == bias.shape - unpermuted_local_bias = unpermuted_local_bias.scatter(0, indices, bias) - if self.k > 1: - unpermuted_local_bias = unpermuted_local_bias * scores.view(-1, 1) - - output_total = unpermuted_local_hidden - output_bias_total = unpermuted_local_bias - - # Unpermute the tokens across expert parallel devices. - if self.config.sequence_parallel or (self.config.expert_model_parallel_size > 1): - assert global_local_map is not None, "global_local_map is necessary for `AllGather`." - ep_group_size = parallel_state.get_tensor_and_expert_parallel_world_size() - # hidden_shape: [SeqLen/TP, MBS, HiddenSize], glboal_num_tokens = SeqLen/TP*MBS*(TP*EP) - global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1] * ep_group_size - global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]] - unpermuted_global_hidden = torch.zeros( - global_hidden_shape, dtype=hidden_states.dtype, device=torch.cuda.current_device() - ) - # Reshape global_local_map to be compatible with Tensor.scatter - assert global_local_map.shape == unpermuted_local_hidden.shape - unpermuted_global_hidden = unpermuted_global_hidden.scatter_add( - 0, global_local_map, unpermuted_local_hidden - ) - output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( - unpermuted_global_hidden - ) - if self.add_bias: - # Unpermute the bias across expert parallel devices. - unpermuted_global_bias = torch.zeros_like(unpermuted_global_hidden) - unpermuted_global_bias = unpermuted_global_bias.scatter_add( - 0, global_local_map, unpermuted_local_bias - ) - output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( - unpermuted_global_bias - ) - # bias is duplicated across tensor parallelism ranks; - # reduce scatter reduces bias across tensor parallel_ranks - output_bias_total = ( - output_bias_total / parallel_state.get_tensor_model_parallel_world_size() - ) - else: - if self.k > 1: - global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1] - global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]] - unpermuted_global_hidden = torch.zeros( - global_hidden_shape, - dtype=hidden_states.dtype, - device=torch.cuda.current_device(), - ) - output_total = unpermuted_global_hidden.scatter_add( - 0, global_local_map, unpermuted_local_hidden - ) - if self.add_bias: - unpermuted_global_bias = torch.zeros_like(unpermuted_global_hidden) - output_bias_total = unpermuted_global_bias.scatter_add( - 0, global_local_map, unpermuted_local_bias - ) - - if self.k == 1: - output_total = output_total * scores - output_total = output_total.view(self.hidden_shape) - if self.add_bias: - assert output_bias_total is not None - if self.k == 1: - output_bias_total = output_bias_total * scores - output_bias_total = output_bias_total.view(self.hidden_shape) - else: - output_bias_total = None - - return output_total, output_bias_total - class TopKRouter(Router): """Route each token to the top-k experts.""" @@ -426,9 +108,6 @@ def __init__( assert config.moe_token_dropping == False self.topk = self.config.moe_router_topk self.routing_type = self.config.moe_router_load_balancing_type - self.token_dispatcher = MoEDroplessTokenDispatcher( - num_local_experts, local_expert_indices, self.topk, config - ) self.moe_aux_loss_func = switch_load_balancing_loss_func def sinkhorn_load_balancing(self, logits: torch.Tensor): @@ -471,6 +150,44 @@ def aux_loss_load_balancing(self, logits: torch.Tensor): scores = self.apply_aux_loss(self.moe_aux_loss_func, probs, indices, activation=scores) return scores, indices + def apply_aux_loss( + self, + loss_func: Callable, + probs: torch.Tensor, + indices: torch.Tensor, + activation: torch.Tensor, + ): + """Applies auxiliary loss to the MoE layer. + + Args: + loss_func (callable): The loss function to be used. + probs (torch.Tensor): The probabilities output by the MoE layer. + indices (torch.Tensor): The indices of the selected experts. + activation (torch.Tensor): The activation tensor to attach the gradient function to. + + Returns: + torch.Tensor: The activation tensor with the attached gradient function. + """ + mask = torch.nn.functional.one_hot(indices, num_classes=self.num_experts).sum(dim=1) + aux_loss = loss_func(probs, mask, self.config.moe_aux_loss_coeff) + activation = MoEAuxLossAutoScaler.apply(activation, aux_loss) + return activation + + def apply_z_loss(self, logits): + """Encourages the router's logits to remain small to enhance stability. + Please refer to the ST-MoE paper (https://arxiv.org/pdf/2202.08906.pdf) for details. + + Args: + logits (torch.Tensor): The logits of the router. + + Returns: + torch.Tensor: The logits after applying the z-loss. + """ + if self.config.moe_z_loss_coeff is not None: + z_loss = z_loss_func(logits) + logits = MoEAuxLossAutoScaler.apply(logits, z_loss) + return logits + def routing(self, logits: torch.Tensor): """Top-k routing function @@ -494,49 +211,3 @@ def routing(self, logits: torch.Tensor): scores = torch.softmax(top_logits, dim=-1, dtype=torch.float32).type_as(logits) return scores, indices - - -class MoEAuxLossAutoScaler(torch.autograd.Function): - """An AutoScaler that compute and scales the grad for auxiliary loss. - - """ - - main_loss_backward_scale: int = 1 - - @staticmethod - def forward(ctx, output: torch.Tensor, aux_loss: torch.Tensor): - """Preserve the aux_loss by storing it in the context to avoid garbage collection. - - Args: - output (torch.Tensor): The output tensor. - aux_loss (torch.Tensor): The auxiliary loss tensor. - - Returns: - torch.Tensor: The output tensor. - """ - ctx.save_for_backward(aux_loss) - return output - - @staticmethod - def backward(ctx, grad_output: torch.Tensor): - """Compute and scale the gradient for auxiliary loss.. - - Args: - grad_output (torch.Tensor): The gradient of the output. - - Returns: - Tuple[torch.Tensor, torch.Tensor]: The gradient of the output, scaled auxiliary loss gradient. - """ - (aux_loss,) = ctx.saved_tensors - aux_loss_backward_scale = MoEAuxLossAutoScaler.main_loss_backward_scale - scaled_aux_loss_grad = torch.ones_like(aux_loss) * aux_loss_backward_scale - return grad_output, scaled_aux_loss_grad - - @staticmethod - def set_loss_scale(scale: int): - """set the scale of the aux loss. - - Args: - scale (int): The scale value to set. Please ensure that the scale passed in matches the scale of the main_loss. - """ - MoEAuxLossAutoScaler.main_loss_backward_scale = scale diff --git a/megatron/core/transformer/moe/switch_mlp.py b/megatron/core/transformer/moe/switch_mlp.py deleted file mode 100644 index 5e390370fd..0000000000 --- a/megatron/core/transformer/moe/switch_mlp.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. - -import numpy as np -import torch - -from megatron.core.transformer.mlp import MLP, MLPSubmodules -from megatron.core.transformer.module import MegatronModule -from megatron.core.transformer.transformer_config import TransformerConfig - - -class SwitchMLP(MegatronModule): - """ - Mixture of Experts Layer. Routes input to one of N MLP "experts" - """ - - def __init__(self, num_local_experts, config: TransformerConfig, submodules: MLPSubmodules): - super().__init__(config=config) - self.add_bias = config.add_bias_linear - self.num_local_experts = num_local_experts - self.local_experts = torch.nn.ModuleList() - for _ in range(self.num_local_experts): - expert = MLP(self.config, submodules, is_expert=True) - self.local_experts.append(expert) - - def forward(self, permuted_local_hidden_states, tokens_per_expert): - output_local = torch.zeros_like(permuted_local_hidden_states) - output_bias_local = None - if self.add_bias: - output_bias_local = torch.zeros_like(permuted_local_hidden_states) - - cumsum_num_tokens = torch.cumsum(tokens_per_expert, dim=0) - # Insert zero at the begining for offset index's convenience - zero_tensor = torch.zeros(1, dtype=torch.long) - cumsum_num_tokens = torch.cat((zero_tensor, cumsum_num_tokens)) - for expert_num, expert in enumerate(self.local_experts): - start = cumsum_num_tokens[expert_num] - end = cumsum_num_tokens[expert_num + 1] - hidden = permuted_local_hidden_states[start:end] - output, output_bias = expert(hidden) - - output_local[start:end] = output - if self.add_bias: - output_bias = output_bias.expand_as(output) - output_bias_local[start:end, :] = output_bias - - return output_local, output_bias_local diff --git a/megatron/core/transformer/moe/token_dispatcher.py b/megatron/core/transformer/moe/token_dispatcher.py new file mode 100644 index 0000000000..d7bce69503 --- /dev/null +++ b/megatron/core/transformer/moe/token_dispatcher.py @@ -0,0 +1,283 @@ +from abc import abstractmethod +from typing import List + +import torch + +from megatron.core import parallel_state, tensor_parallel +from megatron.core.parallel_state import get_tensor_and_expert_parallel_group +from megatron.core.transformer.transformer_config import TransformerConfig + + +class MoETokenDispatcher: + """ + MoE Token Dispatcher + """ + + def __init__(self, config: TransformerConfig) -> None: + """ + Initialize the MoE Token Dispatcher. + """ + self.config = config + + @abstractmethod + def dispatch( + self, tokens: torch.Tensor, indices: torch.Tensor, + ): + """Dispatch tokens to experts. + + Args: + tokens (torch.Tensor): Input tokens. + indices (torch.Tensor): indices tensor. + + Returns: + torch.Tensor: Tokens tensor. + """ + raise NotImplementedError("Dispatch function not implemented.") + + @abstractmethod + def restore( + self, expert_output: torch.Tensor, scores: torch.Tensor, indices: torch.Tensor, + ): + """Restores the expert output to its original ordering. + + Args: + expert_output (torch.Tensor): The output tensor from the expert models. + scores (torch.Tensor): Each token's score with each expert. + indices (torch.Tensor): The indices used to reorder the expert output. + + Returns: + (torch.Tensor, torch.Tensor): Unpermuted activation and optional bias. + """ + raise NotImplementedError("Restore function not implemented.") + + +class MoEDroplessTokenDispatcher(MoETokenDispatcher): + """ + Token dispatcher without token dropping. + """ + + def __init__( + self, num_local_experts: int, local_expert_indices: List[int], config: TransformerConfig, + ) -> None: + """ + Initialize the zero token dropping router. + """ + super().__init__(config=config) + self.num_local_experts = num_local_experts + self.local_expert_indices = local_expert_indices + self.router_topk = config.moe_router_topk + self.add_bias = config.add_bias_linear + + def gather_indices(self, local_indices: torch.Tensor): + """ Gather tensors and concatenate along the first dimension.""" + group = get_tensor_and_expert_parallel_group() + world_size = torch.distributed.get_world_size(group=group) + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return local_indices + + dim_size = list(local_indices.size()) + dim_size[0] = dim_size[0] * world_size + + # TODO pre allocate memory + output = torch.empty( + dim_size, dtype=local_indices.dtype, device=torch.cuda.current_device() + ) + torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group) + return output + + def dispatch(self, hidden_states: torch.Tensor, max_prob: torch.Tensor, max_ind: torch.Tensor): + """Dispatch tokens to local experts. It's composed of two stages: + (1) Permute the tokens across the expert parallel devices. After this stage, + each device receives all of the tokens assigned to its local set of experts + in its local HBM. + (2) Permute the tokens locally so that they are grouped by their expert + assignment. After the stage (1), the tokens are grouped by which device + they came from. We re-order them locally for subsequent efficient computation. + + Args: + hidden_states: input tokens of shape [SeqLen/TP, MBS, HiddenSize] + + Returns: + permuted_local_hidden_states: Permutation of tokens to local experts group. + tokens_per_expert: the number of tokens each local expert to process. + indices: The indices of `local_indices` (which holds the un-sorted expert + indices of tokens that local expert can process) that give its sorted order along dim 0. + global_local_map (optional): 2D tensor. A mask of mapping between global and local tokens where each + element is True if it's between the local_expert_indices. Only useful + when cross device token permutation is enabled and **AllGahter** is performed. + """ + self.hidden_shape = hidden_states.shape + # [S/TP, B, H] -> [S*B/TP, H] + hidden_states = hidden_states.view(-1, self.hidden_shape[-1]) + + # Permute the tokens across the expert parallel devices. + if self.config.sequence_parallel or (self.config.expert_model_parallel_size > 1): + # [S*B/TP, H] -> [S*B, H] + global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe( + hidden_states + ) + with torch.no_grad(): + global_indices = self.gather_indices(max_ind) + # Create a mask of mapping between global and local tokens where each + # element is True if it's between the local_expert_indices + global_local_map = (global_indices >= self.local_expert_indices[0]) & ( + global_indices <= self.local_expert_indices[-1] + ) + local_indices = global_indices.masked_select(global_local_map) + if self.router_topk > 1: # k > 1 + global_probs = self.gather_indices(max_prob) + local_probs = global_probs.masked_select(global_local_map) + else: + local_probs = max_prob + # Reshape global_local_map to be compatible with Tensor.gather + global_local_map = global_local_map.nonzero()[:, 0] + global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1]) + local_hidden_states = torch.gather(global_hidden_states, 0, global_local_map) + else: + if self.router_topk > 1: + global_local_map = torch.ones_like(max_ind).bool() + local_indices = max_ind.masked_select(global_local_map) + local_probs = max_prob.masked_select(global_local_map) + global_local_map = global_local_map.nonzero()[:, 0] + global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1]) + local_hidden_states = torch.gather(hidden_states, 0, global_local_map) + else: + local_indices = max_ind + local_probs = max_prob + local_hidden_states = hidden_states + global_local_map = None + + with torch.no_grad(): + # The indices of local_indices that give its sorted order along dim 0. + indices = torch.argsort(local_indices, dim=0) + tokens_per_expert = torch.histc( + local_indices, + bins=self.num_local_experts, + min=self.local_expert_indices[0], + max=self.local_expert_indices[-1], + ) + tokens_per_expert = tokens_per_expert.cpu().to(torch.long) + + # Stage2: permute the tokens locally so that they are grouped by their expert assignment + # Reshape indices to be compatible with Tensor.gather + indices = indices.view(-1, 1).expand(-1, hidden_states.shape[-1]) + permuted_local_hidden_states = torch.gather(local_hidden_states, 0, indices) + return ( + permuted_local_hidden_states, + tokens_per_expert, + local_probs, + indices, + global_local_map, + ) + + def restore( + self, + hidden_states: torch.Tensor, + scores: torch.Tensor, + indices: torch.Tensor, + global_local_map: torch.Tensor = None, + bias: torch.Tensor = None, + ): + """ + Reverse process of `dispatch()` which permutes the ouput of local + experts locallay and across expert parallel rank into the original order to + produce the final output. + + Args: + hidden_states: 2D tensor of shape [sum_tokens_of_all_local_experts, HiddenSize], + ouput of local experts. + indices: 2D tensor of the indices of `local_indices` (which holds the un-sorted expert + indices of tokens that local expert can process) that give its sorted order along dim 0. + global_local_map (optional): 2D tensor, a mask of mapping between global and local tokens where each + element is True if it's between the local_expert_indices. Only useful + when cross device token permutation is enabled and **AllGahter** is performed. + + Returns: + output_total: un-permuted updated hidden states output from all local experts + with shape of [SeqLen/TP, MBS, HiddenSize] + """ + # Stage1: unpermute the tokens and bias locally respectively. + scores = scores.to(dtype=hidden_states.dtype) + unpermuted_local_hidden = torch.zeros_like(hidden_states) + assert indices.shape == hidden_states.shape + unpermuted_local_hidden = unpermuted_local_hidden.scatter(0, indices, hidden_states) + + # Scale the expert output prior to reduction and subsequent to local unpermutation if k > 1. + if self.router_topk > 1: + unpermuted_local_hidden = unpermuted_local_hidden * scores.view(-1, 1) + + unpermuted_local_bias = None + if self.add_bias: + assert bias is not None + unpermuted_local_bias = torch.zeros_like(hidden_states) + assert indices.shape == bias.shape + unpermuted_local_bias = unpermuted_local_bias.scatter(0, indices, bias) + if self.router_topk > 1: + unpermuted_local_bias = unpermuted_local_bias * scores.view(-1, 1) + + output_total = unpermuted_local_hidden + output_bias_total = unpermuted_local_bias + + # Unpermute the tokens across expert parallel devices. + if self.config.sequence_parallel or (self.config.expert_model_parallel_size > 1): + assert global_local_map is not None, "global_local_map is necessary for `AllGather`." + ep_group_size = parallel_state.get_tensor_and_expert_parallel_world_size() + # hidden_shape: [SeqLen/TP, MBS, HiddenSize], glboal_num_tokens = SeqLen/TP*MBS*(TP*EP) + global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1] * ep_group_size + global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]] + unpermuted_global_hidden = torch.zeros( + global_hidden_shape, dtype=hidden_states.dtype, device=torch.cuda.current_device() + ) + # Reshape global_local_map to be compatible with Tensor.scatter + assert global_local_map.shape == unpermuted_local_hidden.shape + unpermuted_global_hidden = unpermuted_global_hidden.scatter_add( + 0, global_local_map, unpermuted_local_hidden + ) + output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( + unpermuted_global_hidden + ) + if self.add_bias: + # Unpermute the bias across expert parallel devices. + unpermuted_global_bias = torch.zeros_like(unpermuted_global_hidden) + unpermuted_global_bias = unpermuted_global_bias.scatter_add( + 0, global_local_map, unpermuted_local_bias + ) + output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe( + unpermuted_global_bias + ) + # bias is duplicated across tensor parallelism ranks; + # reduce scatter reduces bias across tensor parallel_ranks + output_bias_total = ( + output_bias_total / parallel_state.get_tensor_model_parallel_world_size() + ) + else: + if self.router_topk > 1: + global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1] + global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]] + unpermuted_global_hidden = torch.zeros( + global_hidden_shape, + dtype=hidden_states.dtype, + device=torch.cuda.current_device(), + ) + output_total = unpermuted_global_hidden.scatter_add( + 0, global_local_map, unpermuted_local_hidden + ) + if self.add_bias: + unpermuted_global_bias = torch.zeros_like(unpermuted_global_hidden) + output_bias_total = unpermuted_global_bias.scatter_add( + 0, global_local_map, unpermuted_local_bias + ) + + if self.router_topk == 1: + output_total = output_total * scores + output_total = output_total.view(self.hidden_shape) + if self.add_bias: + assert output_bias_total is not None + if self.router_topk == 1: + output_bias_total = output_bias_total * scores + output_bias_total = output_bias_total.view(self.hidden_shape) + else: + output_bias_total = None + + return output_total, output_bias_total diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index ad5d0e817c..468a594c3e 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -7,7 +7,7 @@ from megatron.arguments import parse_args from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec -from megatron.core.transformer.moe.moe_layer import DroplessMoELayer +from megatron.core.transformer.moe.moe_layer import MoELayer from megatron.core.transformer.transformer_config import TransformerConfig from megatron.initialize import _set_random_seed from megatron.model import Float16Module @@ -38,7 +38,7 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): num_layers=num_layers, hidden_size=self.hidden_size, num_attention_heads=4, num_moe_experts=self.num_experts, use_cpu_initialization=self.use_cpu_initialization, add_bias_linear=False, gated_linear_unit=self.gated_linear_unit, - bias_gelu_fusion=False, + bias_activation_fusion=False, bf16=True, params_dtype=torch.bfloat16, moe_router_load_balancing_type="sinkhorn", moe_router_topk=1) self.fc1_ffn_hidden_size = tf_config.ffn_hidden_size @@ -52,7 +52,7 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): _set_random_seed(seed_=123, data_parallel_random_init=False) transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( self.num_experts, moe_grouped_gemm=False) - self.switch_mlp_smm = DroplessMoELayer(tf_config, + self.switch_mlp_smm = MoELayer(tf_config, transformer_layer_spec.submodules.mlp.submodules) self.args = parse_args(ignore_unknown_args=True) @@ -66,7 +66,7 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): ## Grouped GEMM _set_random_seed(seed_=123, data_parallel_random_init=False) tf_config.moe_grouped_gemm = True - self.switch_mlp_gmm = DroplessMoELayer(tf_config) + self.switch_mlp_gmm = MoELayer(tf_config) self.switch_mlp_gmm = Float16Module(self.switch_mlp_gmm, self.args).module print("done intializing for grouped gemm") @@ -74,8 +74,8 @@ def teardown_method(self, method): Utils.destroy_model_parallel() def test_constructor(self): - assert isinstance(self.switch_mlp_smm, DroplessMoELayer) - assert isinstance(self.switch_mlp_gmm, DroplessMoELayer) + assert isinstance(self.switch_mlp_smm, MoELayer) + assert isinstance(self.switch_mlp_gmm, MoELayer) num_weights_smm = sum([p.numel() for p in self.switch_mlp_smm.parameters()]) num_weights_gmm = sum([p.numel() for p in self.switch_mlp_gmm.parameters()]) diff --git a/tests/unit_tests/transformer/moe/test_routers.py b/tests/unit_tests/transformer/moe/test_routers.py index 3e48f14095..2b857f6d65 100644 --- a/tests/unit_tests/transformer/moe/test_routers.py +++ b/tests/unit_tests/transformer/moe/test_routers.py @@ -8,7 +8,7 @@ from megatron.initialize import _set_random_seed from tests.unit_tests.test_utilities import Utils from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.transformer.moe.moe_layer import DroplessMoELayer +from megatron.core.transformer.moe.moe_layer import MoELayer from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec @@ -31,7 +31,7 @@ def setup_method(self, method): transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( num_experts=num_moe_experts, moe_grouped_gemm=False ) - self.switch_mlp = DroplessMoELayer( + self.switch_mlp = MoELayer( self.transformer_config, transformer_layer_spec.submodules.mlp.submodules ) self.router = self.switch_mlp.router diff --git a/tests/unit_tests/transformer/moe/test_switch_mlp.py b/tests/unit_tests/transformer/moe/test_switch_mlp.py index bc645596ed..65c02252e0 100644 --- a/tests/unit_tests/transformer/moe/test_switch_mlp.py +++ b/tests/unit_tests/transformer/moe/test_switch_mlp.py @@ -4,7 +4,7 @@ import torch -from megatron.core.transformer.moe.moe_layer import DroplessMoELayer +from megatron.core.transformer.moe.moe_layer import MoELayer from tests.unit_tests.test_utilities import Utils from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.transformer.transformer_config import TransformerConfig @@ -20,13 +20,13 @@ def setup_method(self, method): transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, moe_router_load_balancing_type="sinkhorn", moe_router_topk=1) transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( num_experts=num_moe_experts, moe_grouped_gemm=False) - self.switch_mlp = DroplessMoELayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) + self.switch_mlp = MoELayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) def teardown_method(self, method): Utils.destroy_model_parallel() def test_constructor(self): - assert isinstance(self.switch_mlp, DroplessMoELayer) + assert isinstance(self.switch_mlp, MoELayer) num_weights = sum([p.numel() for p in self.switch_mlp.parameters()]) assert num_weights == 2448 diff --git a/tests/unit_tests/transformer/moe/test_token_dispatcher.py b/tests/unit_tests/transformer/moe/test_token_dispatcher.py index cc56e0673b..1d557a42b2 100644 --- a/tests/unit_tests/transformer/moe/test_token_dispatcher.py +++ b/tests/unit_tests/transformer/moe/test_token_dispatcher.py @@ -5,6 +5,7 @@ import torch from megatron.core.transformer.moe.router import Router, TopKRouter +from megatron.core.transformer.moe.token_dispatcher import MoEDroplessTokenDispatcher from megatron.initialize import _set_random_seed from tests.unit_tests.test_utilities import Utils from megatron.core.transformer.transformer_config import TransformerConfig @@ -30,7 +31,9 @@ def setup_method(self, method): local_expert_indices=range(num_moe_experts), config=transformer_config, ) - self.token_dispatcher = self.router.token_dispatcher + self.token_dispatcher = MoEDroplessTokenDispatcher( + num_moe_experts, range(num_moe_experts), config=transformer_config + ) def teardown_method(self, method): Utils.destroy_model_parallel() From eb47d69d02c84acd676db74704e5bc5051063530 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Sat, 20 Jan 2024 02:54:41 +0000 Subject: [PATCH 178/296] Update CI golden value. --- ...s_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json | 2 +- ...es_50steps_core_enabled_te_8experts2parallel_top2router.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json index e632407437..7117cde778 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80299, 10.85298, 10.86262, 10.79516, 10.72134, 10.63641, 10.20727, 10.31594, 10.21293, 9.90292]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16202.0, 19817.0, 19787.0, 18858.0, 17645.0, 17931.0, 15872.0, 18124.0, 18472.0, 19200.0]}, "iteration_timing_avg": 0.1745276470588235} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8003, 10.85686, 10.86025, 10.80027, 10.71796, 10.63616, 10.20806, 10.31289, 10.2103, 9.90374]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16370.0, 19919.0, 19446.0, 18830.0, 17430.0, 18019.0, 15536.0, 18028.0, 18299.0, 19161.0]}, "iteration_timing_avg": 0.18801823529411768} diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json index 0d167f429d..609ee21961 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.81353, 10.86326, 10.87031, 10.80095, 10.67763, 10.59016, 10.0901, 10.20222, 10.10031, 9.7697]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62436.0, 65833.0, 65919.0, 65307.0, 63835.0, 64879.0, 63444.0, 66271.0, 66563.0, 68081.0]}, "iteration_timing_avg": 0.26249352941176474} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.81353, 10.86306, 10.86978, 10.8003, 10.67659, 10.58919, 10.08786, 10.19866, 10.0957, 9.76239]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62436.0, 65688.0, 65763.0, 65321.0, 63782.0, 64892.0, 63489.0, 66207.0, 66785.0, 68431.0]}, "iteration_timing_avg": 0.25937588235294123} \ No newline at end of file From 3da7d1d5fcc26bf20740264c9463864c58afa276 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Sat, 20 Jan 2024 03:02:39 +0000 Subject: [PATCH 179/296] Rename to token_permutation and SequentialMLP. --- megatron/core/transformer/moe/experts.py | 2 +- megatron/core/transformer/moe/moe_layer.py | 8 ++++---- megatron/core/transformer/moe/token_dispatcher.py | 10 ++++++---- .../transformer/moe/test_token_dispatcher.py | 4 ++-- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/megatron/core/transformer/moe/experts.py b/megatron/core/transformer/moe/experts.py index ce2dfaa5c9..7ac1e7c5fd 100644 --- a/megatron/core/transformer/moe/experts.py +++ b/megatron/core/transformer/moe/experts.py @@ -140,7 +140,7 @@ def forward(self, permuted_local_hidden_states, tokens_per_expert): return fc2_output, None -class SwitchMLP(MegatronModule): +class SequentialMLP(MegatronModule): """ Mixture of Experts Layer. Routes input to one of N MLP "experts" """ diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index c62ec32bc3..c84b98df7f 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -7,7 +7,7 @@ from megatron.core import parallel_state from megatron.core.transformer.mlp import MLPSubmodules from megatron.core.transformer.module import MegatronModule -from megatron.core.transformer.moe.experts import GroupedMLP, SwitchMLP +from megatron.core.transformer.moe.experts import GroupedMLP, SequentialMLP from megatron.core.transformer.moe.router import TopKRouter from megatron.core.transformer.moe.token_dispatcher import MoEDroplessTokenDispatcher from megatron.core.transformer.transformer_config import TransformerConfig @@ -58,7 +58,7 @@ def __init__(self, config: TransformerConfig, submodules: MLPSubmodules = None): self.experts = GroupedMLP(self.num_local_experts, self.config) else: assert isinstance(self.submodules, MLPSubmodules) - self.experts = SwitchMLP(self.num_local_experts, self.config, self.submodules) + self.experts = SequentialMLP(self.num_local_experts, self.config, self.submodules) self.token_dispatcher = MoEDroplessTokenDispatcher( self.num_local_experts, self.local_expert_indices, config=self.config ) @@ -73,9 +73,9 @@ def forward(self, hidden_states: torch.Tensor): scores, indices, global_local_map, - ) = self.token_dispatcher.dispatch(hidden_states, scores, indices) + ) = self.token_dispatcher.token_permutation(hidden_states, scores, indices) expert_output, mlp_bias = self.experts(dispatched_input, tokens_per_expert) - output, mlp_bias = self.token_dispatcher.restore( + output, mlp_bias = self.token_dispatcher.token_unpermutation( expert_output, scores, indices, global_local_map, mlp_bias ) diff --git a/megatron/core/transformer/moe/token_dispatcher.py b/megatron/core/transformer/moe/token_dispatcher.py index d7bce69503..c802adaeb9 100644 --- a/megatron/core/transformer/moe/token_dispatcher.py +++ b/megatron/core/transformer/moe/token_dispatcher.py @@ -20,7 +20,7 @@ def __init__(self, config: TransformerConfig) -> None: self.config = config @abstractmethod - def dispatch( + def token_permutation( self, tokens: torch.Tensor, indices: torch.Tensor, ): """Dispatch tokens to experts. @@ -35,7 +35,7 @@ def dispatch( raise NotImplementedError("Dispatch function not implemented.") @abstractmethod - def restore( + def token_unpermutation( self, expert_output: torch.Tensor, scores: torch.Tensor, indices: torch.Tensor, ): """Restores the expert output to its original ordering. @@ -86,7 +86,9 @@ def gather_indices(self, local_indices: torch.Tensor): torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group) return output - def dispatch(self, hidden_states: torch.Tensor, max_prob: torch.Tensor, max_ind: torch.Tensor): + def token_permutation( + self, hidden_states: torch.Tensor, max_prob: torch.Tensor, max_ind: torch.Tensor + ): """Dispatch tokens to local experts. It's composed of two stages: (1) Permute the tokens across the expert parallel devices. After this stage, each device receives all of the tokens assigned to its local set of experts @@ -171,7 +173,7 @@ def dispatch(self, hidden_states: torch.Tensor, max_prob: torch.Tensor, max_ind: global_local_map, ) - def restore( + def token_unpermutation( self, hidden_states: torch.Tensor, scores: torch.Tensor, diff --git a/tests/unit_tests/transformer/moe/test_token_dispatcher.py b/tests/unit_tests/transformer/moe/test_token_dispatcher.py index 1d557a42b2..40b49d0d75 100644 --- a/tests/unit_tests/transformer/moe/test_token_dispatcher.py +++ b/tests/unit_tests/transformer/moe/test_token_dispatcher.py @@ -56,9 +56,9 @@ def test_gpu_forward(self): local_probs, revert_indices, global_local_map, - ) = self.token_dispatcher.dispatch(hidden_states, scores, indices) + ) = self.token_dispatcher.token_permutation(hidden_states, scores, indices) probs = torch.ones_like(local_probs) / 2 - restored_hidden_states, restored_bias = self.token_dispatcher.restore( + restored_hidden_states, restored_bias = self.token_dispatcher.token_unpermutation( permuted_local_hidden_states, probs, revert_indices, From 2afee765fde96fe4b870bf7c64a76c60b800e04d Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Sun, 21 Jan 2024 04:50:27 +0000 Subject: [PATCH 180/296] Code clean. --- megatron/arguments.py | 65 +++++-------------- megatron/core/transformer/moe/experts.py | 10 +-- megatron/core/transformer/moe/moe_layer.py | 5 -- megatron/core/transformer/moe/router.py | 2 +- .../transformer/moe/test_routers.py | 2 +- 5 files changed, 26 insertions(+), 58 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 4fd71890b5..8d7836f7ca 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -1157,8 +1157,6 @@ def _add_distributed_args(parser): 'affects the encoder embedding.)') group.add_argument('--use-distributed-optimizer', action='store_true', help='Use distributed optimizer.') - group.add_argument('--expert-model-parallel-size', type=int, default=1, - help='Degree of expert model parallelism.') group.add_argument('--context-parallel-size', type=int, default=1, help='Degree of context parallelism.') group.add_argument('--nccl-communicator-config-path', type=str, default=None, @@ -1375,7 +1373,6 @@ def _add_vision_args(parser): group.add_argument('--swin-backbone-type', type=str, default='tiny', choices=['tiny', 'base', 'h3'], help='pretraining objectives') - # inpainting arguments group.add_argument('--mask-type', type=str, default='random', choices=['random', 'row'], @@ -1409,50 +1406,24 @@ def _add_vision_args(parser): def _add_moe_args(parser): group = parser.add_argument_group(title="moe") - # general moe arguements - group.add_argument( - '--num-experts', type=int, default=None, help='Number of Experts in MoE (None means no MoE)' - ) - group.add_argument( - '--moe-router-load-balancing-type', - type=str, - choices=['aux_loss', 'sinkhorn', None], - default='aux_loss', - help='Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "None" implies no load balancing. The default is "aux_loss".', - ) - group.add_argument( - '--moe-router-topk', - type=int, - default=2, - help='Number of experts to route to for each token. The default is 2.', - ) - group.add_argument( - '--moe-grouped-gemm', - action='store_true', - help='When there are multiple experts per rank, compress ' - 'multiple local (potentially small) gemms in a single kernel ' - 'launch to improve the utilization and performance by ' - 'leveraging the Grouped GEMM feature introduced since ' - 'CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm).', - ) - group.add_argument( - '--moe-aux-loss-coeff', - type=float, - default=0.0, - help='Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended.', - ) - group.add_argument( - '--moe-z-loss-coeff', - type=float, - default=None, - help='Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended.', - ) - group.add_argument( - '--moe-token-dropping', - action='store_true', - help='This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to GShard, Switch-Transformer, and DeepSpeed-MoE. Note: Currently unsupported.', - ) - # zero token drop moe arguments + group.add_argument('--expert-model-parallel-size', type=int, default=1, + help='Degree of expert model parallelism.') + group.add_argument('--num-experts', type=int, default=None, + help='Number of Experts in MoE (None means no MoE)') + group.add_argument('--moe-router-load-balancing-type', type=str, + choices=['aux_loss', 'sinkhorn', None], + default='aux_loss', + help='Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "None" implies no load balancing. The default is "aux_loss".') + group.add_argument('--moe-router-topk', type=int, default=2, + help='Number of experts to route to for each token. The default is 2.') + group.add_argument('--moe-grouped-gemm', action='store_true', + help='When there are multiple experts per rank, compress multiple local (potentially small) gemms in a single kernel launch to improve the utilization and performance by leveraging the Grouped GEMM feature introduced since CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm).') + group.add_argument('--moe-aux-loss-coeff', type=float, default=0.0, + help='Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended.') + group.add_argument('--moe-z-loss-coeff', type=float, default=None, + help='Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended.') + group.add_argument('--moe-token-dropping', action='store_true', + help='This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to GShard, Switch-Transformer, and DeepSpeed-MoE. Note: Currently unsupported.') return parser diff --git a/megatron/core/transformer/moe/experts.py b/megatron/core/transformer/moe/experts.py index 7ac1e7c5fd..cc8afcd322 100644 --- a/megatron/core/transformer/moe/experts.py +++ b/megatron/core/transformer/moe/experts.py @@ -17,8 +17,9 @@ class GroupedMLP(MegatronModule): - """ - Experts Layer with Grouped GEMM. Routes input to one of N MLP "experts" + """An efficient implementation of the Experts layer using CUTLASS GroupedGEMM. + + This class is designed to execute multiple experts in parallel, thereby maximizing computational efficiency. """ def __init__(self, num_local_experts: int, config: TransformerConfig): @@ -141,8 +142,9 @@ def forward(self, permuted_local_hidden_states, tokens_per_expert): class SequentialMLP(MegatronModule): - """ - Mixture of Experts Layer. Routes input to one of N MLP "experts" + """An implementation of the Experts layer using a sequence of MLP layers. + + This class executes each expert sequentially. """ def __init__(self, num_local_experts, config: TransformerConfig, submodules: MLPSubmodules): diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index c84b98df7f..fe89d64766 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -62,7 +62,6 @@ def __init__(self, config: TransformerConfig, submodules: MLPSubmodules = None): self.token_dispatcher = MoEDroplessTokenDispatcher( self.num_local_experts, self.local_expert_indices, config=self.config ) - assert config.moe_token_dropping is False def forward(self, hidden_states: torch.Tensor): # process MoE @@ -78,8 +77,4 @@ def forward(self, hidden_states: torch.Tensor): output, mlp_bias = self.token_dispatcher.token_unpermutation( expert_output, scores, indices, global_local_map, mlp_bias ) - - if mlp_bias is None: - mlp_bias = torch.tensor(0.0, device=hidden_states.device, dtype=hidden_states.dtype) - return output, mlp_bias diff --git a/megatron/core/transformer/moe/router.py b/megatron/core/transformer/moe/router.py index 8b2cb3a4ad..c9ec950d19 100644 --- a/megatron/core/transformer/moe/router.py +++ b/megatron/core/transformer/moe/router.py @@ -105,7 +105,7 @@ def __init__( config (TransformerConfig): The configuration for the transformer model. """ super().__init__(config=config) - assert config.moe_token_dropping == False + assert config.moe_token_dropping is False self.topk = self.config.moe_router_topk self.routing_type = self.config.moe_router_load_balancing_type self.moe_aux_loss_func = switch_load_balancing_loss_func diff --git a/tests/unit_tests/transformer/moe/test_routers.py b/tests/unit_tests/transformer/moe/test_routers.py index 2b857f6d65..fb6668ddf1 100644 --- a/tests/unit_tests/transformer/moe/test_routers.py +++ b/tests/unit_tests/transformer/moe/test_routers.py @@ -12,7 +12,7 @@ from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec -class TestDroplessTop2Router: +class TestTop2Router: def setup_method(self, method): Utils.initialize_model_parallel(1, 1) _set_random_seed(seed_=123, data_parallel_random_init=False) From aed469faaab91ff2d9e7fd3b73776b60065f1416 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Mon, 22 Jan 2024 11:24:12 +0000 Subject: [PATCH 181/296] Fix CI, Code clean and add readme. --- .gitlab-ci.yml | 2 +- megatron/arguments.py | 6 +- megatron/core/transformer/moe/README.md | 184 ++++++++++++++++++ megatron/core/transformer/moe/moe_utils.py | 4 +- megatron/core/transformer/moe/router.py | 3 +- ...bled_te_8experts2parallel_groupedGEMM.json | 2 +- ...abled_te_8experts2parallel_top2router.json | 2 +- 7 files changed, 193 insertions(+), 10 deletions(-) create mode 100644 megatron/core/transformer/moe/README.md diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cc5d00c8b7..b9b7eda180 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -595,7 +595,7 @@ train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_top2_1node_50steps: MOE_GROUPED_GEMM: 1 TEST_LEVEL: MR_TESTS METADATA: "te_8experts2parallel_top2router" - ADDITIONAL_PARAMS: "--disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type "aux_loss" --moe-router-topk 2 --moe-aux-loss-coeff 1e-2" + ADDITIONAL_PARAMS: "--disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type aux_loss --moe-router-topk 2 --moe-aux-loss-coeff 1e-2" train.moe_gpt3.345m_tp2_pp2_4experts_1node_50steps: <<: *selene-test-launcher diff --git a/megatron/arguments.py b/megatron/arguments.py index 8d7836f7ca..8d3c2cec12 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -397,6 +397,9 @@ def validate_args(args, defaults={}): # MoE Spec check if args.num_experts is not None: assert args.spec is None, "Model Spec must be None when using MoEs" + if args.tensor_model_parallel_size > 1: + assert args.sequence_parallel, \ + "When using MoE and tensor parallelism, sequence parallelism must be used." # Expert parallelism check if args.expert_model_parallel_size > 1: @@ -405,9 +408,6 @@ def validate_args(args, defaults={}): "Number of experts should be a multiple of expert model parallel_size." assert not args.fp16, \ "Expert parallelism is not supported with fp16 training." - if args.tensor_model_parallel_size > 1: - assert args.sequence_parallel, \ - "When using expert parallelism and tensor parallelism, sequence parallelism must be used." # Print arguments. _print_args("arguments", args) diff --git a/megatron/core/transformer/moe/README.md b/megatron/core/transformer/moe/README.md new file mode 100644 index 0000000000..fad581695b --- /dev/null +++ b/megatron/core/transformer/moe/README.md @@ -0,0 +1,184 @@ +# Megatron Core MoE Key Features + +### Parallelism + +- **Expert Parallel** + - A specific method of parallelism for MoE models, where experts are partitioned onto different workers and each worker processes a different batch of training samples, each worker process one or more experts for each MoE layer. +- **3D Parallel**: Data Parallel , Tensor Parallel, Pipeline Parallel, Sequence Parallel + - Note: When using MoE and tensor parallelism, sequence parallelism must be used. +- **Richer parallel mappings**: EP can be combined with DP/TP/PP/SP for handling larger MoE variants. +- **Distributed optimizer.** + +### Router and Load Balancing + +- Router type: + - Top-K router + - Expert Choice router (coming soon) +- Load Balancing algorithms: + - Sinkhorn (S-BASE) + - Z-Loss + - Aux loss / Load balancing loss + +### Performance Optimizations + +- GroupedGEMM when num local experts > 1 + - Supported dtype: fp32/bf16/fp16 +- Token permutation / unpermutation fusion +- Fused Sinkhorn Kernel + +### Token Dispatch Mechanism + +- Dropless / No token drop. +- Token drop. (coming soon) + +### Ease of use +- Checkpoint converter (coming soon) + +## Upcoming features + +- Context Parallel with MoE +- FP8 training support +- Enable ’--tp-comm-overlap‘ for MoE + +# User Guide + +### MoE Related Arguments + +| Item | Description | +| --- | --- | +| num-experts | Number of Experts in MoE (None means no MoE) | +| expert-model-parallel-size | Degree of expert model parallelism. | +| moe-grouped-gemm | When there are multiple experts per rank, compress multiple local gemms into a single kernel launch to improve the utilization and performance by leveraging the Grouped GEMM feature introduced since CUTLASS 2.8 | +| moe-router-load-balancing-type | Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "None" implies no load balancing. The default is "aux_loss". | +| moe-router-topk | Number of experts to route to for each token. The default is 2. | +| moe-aux-loss-coeff | Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended. | +| moe-z-loss-coeff | Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended. | +| moe-token-dropping | This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to GShard, Switch-Transformer, and DeepSpeed-MoE. Note: Currently unsupported. | + +### Example + +To train a top-2 MoE model with an auxiliary loss, include the following arguments: + +```python +--num-experts 8 +--expert-model-parallel-size 8 +--moe-router-load-balancing-type aux_loss # options: aux_loss, sinkhorn, None. Default is sinkhorn1. +--moe-router-topk 2 +--moe-aux-loss-coeff 1e-2 +``` +## A detailed MoE script: +
+Click here. + +```python +#!/bin/bash + +# Runs Mixtral 8x7B model on 16 A100 GPUs + +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=${MASTER_ADDR:-"localhost"} +MASTER_PORT=${MASTER_PORT:-"6000"} +NNODES=${NNODES:-"1"} +NODE_RANK=${RANK:-"0"} +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +CHECKPOINT_PATH=$1 +TOKENIZER_MODEL=$2 +DATA_PATH=$3 + +DISTRIBUTED_ARGS=( + --nproc_per_node $GPUS_PER_NODE + --nnodes $NNODES + --node_rank $NODE_RANK + --master_addr $MASTER_ADDR + --master_port $MASTER_PORT +) + +MODEL_ARGS=( + --use-mcore-models + --disable-bias-linear + --seq-length 2048 + --max-position-embeddings 32768 + --num-layers 32 + --hidden-size 4096 + --ffn-hidden-size 14336 + --num-attention-heads 32 + --init-method-std 0.01 + --attention-dropout 0.0 + --hidden-dropout 0.0 + --normalization RMSNorm + --position-embedding-type rope + --swiglu + --untie-embeddings-and-output-weights + --group-query-attention + --num-query-groups 8 + --no-masked-softmax-fusion + --no-position-embedding +) + +MOE_ARGS=( + --num-experts 8 + --moe-router-load-balancing-type aux_loss # options: aux_loss, sinkhorn, None. Default is aux_loss. + --moe-router-topk 2 + --moe-aux-loss-coeff 1e-2 +) + +DATA_ARGS=( + --tokenizer-type Llama2Tokenizer + --tokenizer-model ${TOKENIZER_MODEL} + --data-path $DATA_PATH + --split 99990,8,2 +) + +TRAINING_ARGS=( + --micro-batch-size 1 + --global-batch-size 128 + --lr 1e-4 + --train-iters 500000 + --lr-decay-iters 320000 + --lr-decay-style cosine + --min-lr 1.0e-5 + --weight-decay 0.1 + --lr-warmup-iters 500 + --clip-grad 1.0 + --bf16 +) + +MODEL_PARALLEL_ARGS=( + --tensor-model-parallel-size 4 + --pipeline-model-parallel-size 1 + --expert-model-parallel-size 4 + --sequence-parallel +) + +LOGGING_ARGS=( + --log-interval 1 \ + --save-interval 10000 \ + --eval-interval 1000 \ + --eval-iters 10 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --tensorboard-dir "${CHECKPOINT_PATH}/tensorboard" \ + --no-load-optim \ + --no-load-rng +) + +if [ -n "${WANDB_API_KEY}" ]; then + LOGGING_ARGS+=( + --wandb-project ${WANDB_PROJECT:-"Mixtral-Finetuning"} + --wandb-exp-name ${WANDB_NAME:-"Mixtral_8x7B"} + ) +fi + +torchrun ${DISTRIBUTED_ARGS[@]} pretrain_gpt.py \ + ${MODEL_ARGS[@]} \ + ${MOE_ARGS[@]} \ + ${DATA_ARGS[@]} \ + ${TRAINING_ARGS[@]} \ + ${MODEL_PARALLEL_ARGS[@]} \ + ${LOGGING_ARGS[@]} +``` +
\ No newline at end of file diff --git a/megatron/core/transformer/moe/moe_utils.py b/megatron/core/transformer/moe/moe_utils.py index 301a2cf669..52712d5155 100644 --- a/megatron/core/transformer/moe/moe_utils.py +++ b/megatron/core/transformer/moe/moe_utils.py @@ -20,7 +20,7 @@ def switch_load_balancing_loss_func(gates, mask, moe_aux_loss_coeff): return aux_loss -def z_loss_func(logits): +def z_loss_func(logits, z_loss_coeff): """Encourages the router's logits to remain small to enhance stability. Please refer to the ST-MoE paper (https://arxiv.org/pdf/2202.08906.pdf) for details. @@ -31,7 +31,7 @@ def z_loss_func(logits): torch.Tensor: The logits after applying the z-loss. """ - z_loss = torch.mean(torch.square(torch.logsumexp(logits, dim=-1))) + z_loss = torch.mean(torch.square(torch.logsumexp(logits, dim=-1))) * z_loss_coeff return z_loss diff --git a/megatron/core/transformer/moe/router.py b/megatron/core/transformer/moe/router.py index c9ec950d19..e6b8c6b74e 100644 --- a/megatron/core/transformer/moe/router.py +++ b/megatron/core/transformer/moe/router.py @@ -42,7 +42,6 @@ def __init__(self, config: TransformerConfig) -> None: self.weight = torch.nn.Parameter( torch.empty((self.config.num_moe_experts, self.config.hidden_size)) ) - torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) with get_cuda_rng_tracker().fork(get_data_parallel_rng_tracker_name()): config.init_method(self.weight) setattr(self.weight, 'sequence_parallel', config.sequence_parallel) @@ -184,7 +183,7 @@ def apply_z_loss(self, logits): torch.Tensor: The logits after applying the z-loss. """ if self.config.moe_z_loss_coeff is not None: - z_loss = z_loss_func(logits) + z_loss = z_loss_func(logits, self.config.moe_z_loss_coeff) logits = MoEAuxLossAutoScaler.apply(logits, z_loss) return logits diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json index 7117cde778..2e759bef60 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8003, 10.85686, 10.86025, 10.80027, 10.71796, 10.63616, 10.20806, 10.31289, 10.2103, 9.90374]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16370.0, 19919.0, 19446.0, 18830.0, 17430.0, 18019.0, 15536.0, 18028.0, 18299.0, 19161.0]}, "iteration_timing_avg": 0.18801823529411768} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80299, 10.85298, 10.86262, 10.79516, 10.72134, 10.63641, 10.20727, 10.31594, 10.21293, 9.90292]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16202.0, 19817.0, 19787.0, 18858.0, 17645.0, 17931.0, 15872.0, 18124.0, 18472.0, 19200.0]}, "iteration_timing_avg": 0.176695} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json index 609ee21961..c5f9203a92 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.81353, 10.86306, 10.86978, 10.8003, 10.67659, 10.58919, 10.08786, 10.19866, 10.0957, 9.76239]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62436.0, 65688.0, 65763.0, 65321.0, 63782.0, 64892.0, 63489.0, 66207.0, 66785.0, 68431.0]}, "iteration_timing_avg": 0.25937588235294123} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80968, 10.86879, 10.86821, 10.8024, 10.67623, 10.58875, 10.0839, 10.19807, 10.09912, 9.76346]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62498.0, 65685.0, 65926.0, 65244.0, 64040.0, 64832.0, 63529.0, 66406.0, 66810.0, 68223.0]}, "iteration_timing_avg": 0.2556055882352941} \ No newline at end of file From f1b6c966164fcfb73f53e2f58ef412ecd2f40150 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Mon, 22 Jan 2024 11:33:52 +0000 Subject: [PATCH 182/296] Add input jitter. --- megatron/arguments.py | 2 ++ megatron/core/transformer/moe/router.py | 24 +++++++++++++++++++ .../core/transformer/transformer_config.py | 2 ++ 3 files changed, 28 insertions(+) diff --git a/megatron/arguments.py b/megatron/arguments.py index 8d3c2cec12..154ef55608 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -1422,6 +1422,8 @@ def _add_moe_args(parser): help='Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended.') group.add_argument('--moe-z-loss-coeff', type=float, default=None, help='Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended.') + group.add_argument('--moe-input-jitter-eps', type=float, default=None, + help='Add noise to the input tensor by applying jitter with a specified epsilon value.') group.add_argument('--moe-token-dropping', action='store_true', help='This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to GShard, Switch-Transformer, and DeepSpeed-MoE. Note: Currently unsupported.') diff --git a/megatron/core/transformer/moe/router.py b/megatron/core/transformer/moe/router.py index e6b8c6b74e..39291faacf 100644 --- a/megatron/core/transformer/moe/router.py +++ b/megatron/core/transformer/moe/router.py @@ -186,6 +186,27 @@ def apply_z_loss(self, logits): z_loss = z_loss_func(logits, self.config.moe_z_loss_coeff) logits = MoEAuxLossAutoScaler.apply(logits, z_loss) return logits + + def apply_input_jitter(self, input: torch.Tensor): + """Add noise to the input tensor. + Refer to https://arxiv.org/abs/2101.03961. + + Args: + input (Tensor): Input tensor. + + Returns: + Tensor: Jittered input. + """ + if self.config.moe_input_jitter_eps is not None: + eps = self.config.moe_input_jitter_eps + if self.input_jitter is None: + self.input_jitter = torch.distributions.uniform.Uniform( + torch.tensor(1.0 - eps, device=input.device), + torch.tensor(1.0 + eps, device=input.device), + ).rsample + return input * self.input_jitter(input.shape) + else: + return input def routing(self, logits: torch.Tensor): """Top-k routing function @@ -197,8 +218,11 @@ def routing(self, logits: torch.Tensor): Tuple[torch.Tensor, torch.Tensor]: Probs and the indices tensor. """ logits = logits.view(-1, self.config.num_moe_experts) + # Apply Z-Loss logits = self.apply_z_loss(logits) + # Apply input jitter + logits = self.apply_input_jitter(logits) if self.routing_type == "sinkhorn": scores, indices = self.sinkhorn_load_balancing(logits) diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 9bbf2eb0ab..af34ac87be 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -64,6 +64,7 @@ class TransformerConfig(ModelParallelConfig): gemms in a single kernel launch to improve the utilization and performance by leveraging the Grouped GEMM feature introduced since CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm). moe_aux_loss_coeff (float): Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended. moe_z_loss_coeff (float): Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended. + moe_input_jitter_eps (float): Add noise to the input tensor by applying jitter with a specified epsilon value. moe_token_dropping (bool): This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to GShard, Switch-Transformer, and DeepSpeed-MoE. Note: Currently unsupported. """ @@ -139,6 +140,7 @@ class TransformerConfig(ModelParallelConfig): moe_grouped_gemm: bool = False moe_aux_loss_coeff: float = 0 # 1e-2 would be a good start value for load balance loss. moe_z_loss_coeff: float = None # 1e-3 would be a good start value for z-loss + moe_input_jitter_eps: float = None moe_token_dropping: bool = False # TODO: Support token dropping. def __post_init__(self): From f24abd1b57e3a6428d56278950c18e49a899c397 Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Mon, 22 Jan 2024 12:18:50 -0800 Subject: [PATCH 183/296] Moved offloading configs to Model parallel config from TF config Signed-off-by: Selvaraj Anandaraj --- megatron/core/model_parallel_config.py | 15 +++++++++++++++ megatron/core/transformer/transformer_config.py | 11 ----------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/megatron/core/model_parallel_config.py b/megatron/core/model_parallel_config.py index f9590615dc..2b07cdcd23 100644 --- a/megatron/core/model_parallel_config.py +++ b/megatron/core/model_parallel_config.py @@ -149,6 +149,14 @@ class ModelParallelConfig: to make sure calling barrier with their timers will not result in hangs. This can happen if for example the user adds a level 1 timer that is not called by all ranks. Defaults to True. + CPU Offloading + -------------- + + cpu_offloading (bool): When set to True, all the activations are offloaded to the CPU asynchronously. Defaults to True. + cpu_offloading_num_layers (int): Tells the number of transformer layers for which activations has to be offloaded. Defaults to 0. + cpu_offloading_activations (bool): If True, offloads the activations to CPU. Defaults to True. + cpu_offloading_weights (bool): If True, offloads the weights to CPU. Defaults to True. + """ # Model parallelism @@ -202,6 +210,13 @@ class ModelParallelConfig: param_sync_func: Callable = None pipeline_model_parallel_split_rank: Optional[int] = None + #CPU Offloading + cpu_offloading: bool = False + cpu_offloading_num_layers: int = 0 + _cpu_offloading_context: ContextManager = None # Used for internal use only, not to be set by the user. TODO: Need to move to the 'right' place when possible. + cpu_offloading_activations: bool = True + cpu_offloading_weights: bool = True + # Timing barrier_with_L1_time: bool = True diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 74a472da01..162e5c7d8c 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -51,10 +51,6 @@ class TransformerConfig(ModelParallelConfig): fp8_amax_history_len (int): The length of the amax history window used for scaling factor computation. fp8_amax_compute_algo (str): Algorithm used for choosing the `amax` value for the scaling factor computation. There are 2 predefined choices: `max` chooses the largest `amax` in the history window, while `most_recent` always chooses the most recently seen value. fp8_wgrad (bool): When set to False, override FP8 config options and do the wgrad computation in higher precision. Defaults to True. - cpu_offloading (bool): When set to True, all the activations are offloaded to the CPU asynchronously - cpu_offloading_num_layers (int): Tells the number of transformer layers for which activations has to be offloaded. - cpu_offloading_activations (bool): If True, offloads the activations to CPU - cpu_offloading_weights (bool): If True, offloads the weights to CPU clone_scatter_output_in_embedding (bool): When set to true, clone the output of scatter_to_sequence_parallel_region in embedding layer to facilitate garbage collection of input. normalization (str): Swtich b/w `LayerNorm` and `RMSNorm` as normalization layers. For now, these are primarily used by Transformer-Engine's layers like `LayerNormLinear`. Default value is `LayerNorm`. window_size ((int,int) or None): If not None, then will use sliding window attention. The size of the window is specified by the numbers inside the tuple; -1 is special value meaning "infinite window size". @@ -115,13 +111,6 @@ class TransformerConfig(ModelParallelConfig): fp8_amax_compute_algo: str = "most_recent" fp8_wgrad: bool = True - # cpu offload - cpu_offloading: bool = False - cpu_offloading_num_layers: int = 0 - _cpu_offloading_context: ContextManager = None # Used for internal use only, not to be set by the user. TODO: Need to move to the 'right' place when possible. - cpu_offloading_activations: bool = True - cpu_offloading_weights: bool = True - # miscellaneous clone_scatter_output_in_embedding: bool = True From 288134e315c7cf1c8f6ecde4a98d269a2798235c Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Mon, 22 Jan 2024 12:29:10 -0800 Subject: [PATCH 184/296] Fixed formatting and imports Signed-off-by: Selvaraj Anandaraj --- megatron/core/model_parallel_config.py | 4 ++-- megatron/core/transformer/transformer_config.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/megatron/core/model_parallel_config.py b/megatron/core/model_parallel_config.py index 2b07cdcd23..15995f9ecb 100644 --- a/megatron/core/model_parallel_config.py +++ b/megatron/core/model_parallel_config.py @@ -1,7 +1,7 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. from dataclasses import dataclass -from typing import Callable, Optional +from typing import Callable, ContextManager, Optional import torch @@ -210,7 +210,7 @@ class ModelParallelConfig: param_sync_func: Callable = None pipeline_model_parallel_split_rank: Optional[int] = None - #CPU Offloading + # CPU Offloading cpu_offloading: bool = False cpu_offloading_num_layers: int = 0 _cpu_offloading_context: ContextManager = None # Used for internal use only, not to be set by the user. TODO: Need to move to the 'right' place when possible. diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 162e5c7d8c..38c42b6f0d 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -2,7 +2,7 @@ import types from dataclasses import dataclass -from typing import Callable, ContextManager, Optional, Tuple +from typing import Callable, Optional, Tuple import torch import torch.nn.functional as F From 18723850886285b61aece11d2d1c689dd8499b08 Mon Sep 17 00:00:00 2001 From: Boxin Wang Date: Mon, 22 Jan 2024 13:14:59 -0800 Subject: [PATCH 185/296] Update retro doc --- tools/retro/README.md | 148 ++++++++++++++++++++++++++++++------------ 1 file changed, 108 insertions(+), 40 deletions(-) diff --git a/tools/retro/README.md b/tools/retro/README.md index c36cb39ce8..6e3e77c1c2 100644 --- a/tools/retro/README.md +++ b/tools/retro/README.md @@ -1,34 +1,60 @@ # Retro and InstructRetro -Retro [(Borgeaud et al., 2022)](https://arxiv.org/abs/2112.04426) is an autoregressive decoder-only language model (LM) pretrained with retrieval-augmentation. -Retro features practical scalibility to support large-scale pretraining from scratch by retrieving from trillions of token. -Pretraining with retrieval provides a more efficient storage mechanism of factual knowledge, when compared to storing factual knowledge implicitly within the network's parameters, thus largely reducing model parameters while achieving lower perplexity than standard GPT. +Retro [(Borgeaud et al., 2022)](https://arxiv.org/abs/2112.04426) is an autoregressive decoder-only language model (LM) +pretrained with retrieval-augmentation. +Retro features practical scalibility to support large-scale pretraining from scratch by retrieving from trillions of +token. +Pretraining with retrieval provides a more efficient storage mechanism of factual knowledge, when compared to storing +factual knowledge implicitly within the network's parameters, thus largely reducing model parameters while achieving +lower perplexity than standard GPT. Retro also provides the flexibility to update the knowledge stored in LMs [(Wang et al., 2023a)](https://arxiv.org/abs/2304.06762) by updating the retrieval database without training LMs again. -InstructRetro [(Wang et al., 2023b)](https://arxiv.org/abs/2310.07713) further scales up the size of Retro to 48B, featuring the largest LLM pretrained with retrieval (as of December 2023). +InstructRetro [(Wang et al., 2023b)](https://arxiv.org/abs/2310.07713) further scales up the size of Retro to 48B, +featuring the largest LLM pretrained with retrieval (as of December 2023). The obtained foundation model, Retro 48B, largely outperforms the GPT counterpart in terms of perplexity. -With instruction tuning on Retro, InstructRetro demonstrates significant improvement over the instruction tuned GPT on downstream tasks in the zero-shot setting. Specifically, the average improvement of InstructRetro is 7% over its GPT counterpart across 8 short-form QA tasks, and 10% over GPT across 4 challenging long-form QA tasks. We also find that one can ablate the encoder from InstructRetro architecture and directly use the InstructRetro decoder backbone as GPT, while achieving comparable results. +With instruction tuning on Retro, InstructRetro demonstrates significant improvement over the instruction tuned GPT on +downstream tasks in the zero-shot setting. Specifically, the average improvement of InstructRetro is 7% over its GPT +counterpart across 8 short-form QA tasks, and 10% over GPT across 4 challenging long-form QA tasks. We also find that +one can ablate the encoder from InstructRetro architecture and directly use the InstructRetro decoder backbone as GPT, +while achieving comparable results. This README provides an end-to-end tutorial to reproduce Retro and InstructRetro. # Contents - * [End-to-end Reproduction Guide](#end-to-end-reproduction-guide) - * [Step 0: Prepare the environment](#step-0-prepare-the-environment) + +* [Checkpoints](#checkpoints) +* [End-to-end Reproduction Guide](#end-to-end-reproduction-guide) + * [Step 0: Prepare the environment](#step-0-prepare-the-environment) * [Docker image](#docker-image) * [Install dependencies](#install-dependencies) - * [Step 1: Build retrieval database](#step-1-build-retrieval-database) - * [Step 2: Pretraining](#step-2-pretraining) - * [Step 3: Perplexity evaluation](#step-3-perplexity-evaluation) - * [Step 4: Instruction tuning](#step-4-instruction-tuning) - * [Step 5: Downstream task evaluation](#step-5-downstream-task-evaluation) - * [Citations](#citations) + * [Step 1: Build retrieval database](#step-1-build-retrieval-database) + * [Step 2: Pretraining](#step-2-pretraining) + * [Step 3: Perplexity evaluation](#step-3-perplexity-evaluation) + * [Step 4: Instruction tuning](#step-4-instruction-tuning) + * [Step 5: Downstream task evaluation](#step-5-downstream-task-evaluation) +* [Citations](#citations) + +# Checkpoints + +We provide the pretrained checkpoints of Retro and InstructRetro in the following table. The checkpoints are available +to download through the following links: + +| Model | Size | Instruction Tuning | Download Link 1 | Download Link 2 | Download Link 3 | +|-------------------------|------|--------------------|--------------------------------------------------------------------|--------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------| +| `retro-8b-base-4k` | 8b | | [Huggingface](https://huggingface.co/nvidia/retro-8b-base-4k) | [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/retro-8b-base-4k) | [Google Drive](https://drive.google.com/drive/folders/1uSQ5DAsuvx_8XcbtnVfs_MGvEOcx0uK_?usp=sharing) | +| `retro-8b-instruct-4k` | 8b | ✅ | [Huggingface](https://huggingface.co/nvidia/retro-8b-instruct-4k) | [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/retro-8b-instruct-4k) | [Google Drive](https://drive.google.com/drive/folders/1v5dKaSN0cm2lwyAWpFaJtlTrLhtMZXsI?usp=sharing) | +| `retro-48b-base-4k` | 48b | | [Huggingface](https://huggingface.co/nvidia/retro-48b-base-4k) | [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/retro-48b-base-4k) | [Google Drive](https://drive.google.com/drive/folders/1rtNpf0CiLElSHQcr3aLI3zgfI3teGTP5?usp=sharing) | +| `retro-48b-instruct-4k` | 48b | ✅ | [Huggingface](https://huggingface.co/nvidia/retro-48b-instruct-4k) | [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/retro-48b-instruct-4k) | [Google Drive](https://drive.google.com/drive/folders/1qdb0AQjSsAPGlWaIu3wgHPjf_nwLeY5h?usp=sharing) | # End-to-end Reproduction Guide -In this README, we provide an end-to-end reproduction guide for InstructRetro, covering from large-scale retrieval construction, pretraining, perplexity evaluation, instruction tuning, to downstream task evaluation. +In this README, we provide an end-to-end reproduction guide for InstructRetro, covering from large-scale retrieval +construction, pretraining, perplexity evaluation, instruction tuning, to downstream task evaluation. +If you are interested in evaluation only, we also [open-sourced our checkpoints](#checkpoints) and you can directly go +to [Step 5](#step-5-downstream-task-evaluation) to evaluate the checkpoints on downstream tasks. ## Step 0: Prepare the environment @@ -36,9 +62,8 @@ We recommend using docker environment to run the code. ### Docker image - -We provide a docker build file in [tools/retro/examples/Dockerfile](examples/Dockerfile) for the reproduction. The docker image is based on `nvcr.io/nvidia/pytorch:23.09-py3`. - +We provide a docker build file in [tools/retro/examples/Dockerfile](examples/Dockerfile) for the reproduction. The +docker image is based on `nvcr.io/nvidia/pytorch:23.09-py3`. ### Install dependencies @@ -48,7 +73,8 @@ Clone the Megatron repo: git clone --branch InstructRetro https://github.com/NVIDIA/Megatron-LM.git ``` -If docker is not available, we recommend starting from a clean conda environment with the following runtime dependencies: +If docker is not available, we recommend starting from a clean conda environment with the following runtime +dependencies: - Python 3.10 - NVIDIA CUDA® 12.2.1 @@ -58,6 +84,7 @@ If docker is not available, we recommend starting from a clean conda environment - PyTorch 2.1.0a0+32f93b1 Then install Retro-specific dependencies, including: + ```bash pip install -U faiss-gpu pip install -U transformers @@ -67,36 +94,52 @@ pip install -U nltk pip install -U einops ``` - ## Step 1: Build retrieval database -In this step, we build a large-scale retrieval database for InstructRetro through [Faiss](https://github.com/facebookresearch/faiss) to retrieve from trillions of tokens, and preprocess (and save) the retrieval neighbors for the pretraining step. +In this step, we build a large-scale retrieval database for InstructRetro +through [Faiss](https://github.com/facebookresearch/faiss) to retrieve from trillions of tokens, and preprocess (and +save) the retrieval neighbors for the pretraining step. Please refer to [tools/retro/build_db.md](build_db.md) for more details. ## Step 2: Pretraining -*Please strictly follow Step 1 to build the retrieval database before pretraining to make sure the preprocessed retrieval neighbors match the pretraining corpus.* +*Please strictly follow Step 1 to build the retrieval database before pretraining to make sure the preprocessed +retrieval neighbors match the pretraining corpus.* In the pretraining step, we support both pretraining from scratch and continued pretraining from a pretrained GPT model. -We provide a template pretraining script to pretrain 843M Retro from scratch. Prepare your own arguments and update our templates in [tools/retro/examples/pretrain_model.sh](examples/pretrain_model.sh). Please note that the data path should be exactly matching the one used in Step 1 to make sure the preprocessed retrieval neighbors match the pretraining corpus. +We provide a template pretraining script to pretrain 843M Retro from scratch. Prepare your own arguments and update our +templates in [tools/retro/examples/pretrain_model.sh](examples/pretrain_model.sh). Please note that the data path should +be exactly matching the one used in Step 1 to make sure the preprocessed retrieval neighbors match the pretraining +corpus. [//]: # (Take the example of the Wikipedia corpus) ```bash bash tools/retro/examples/pretrain_model.sh ``` -After pretraining, the model checkpoints will be saved in the `--save` directory if you specified the arg in `pretrain_model.sh`. -To continue pretraining with retrieval from a pretrained GPT model, please specify `--load` in `pretrain_model.sh` to load the pretrained GPT model checkpoint (the architecture of GPT, including hidden size, number of layers, and activation methods, should be exactly the same as the one used for Retro). You should also specify `--no-load-optim --finetune` to make sure the optimizer state is not loaded from the pretrained GPT model and the continued pretraining with retrieval is from a clean start. After the first job / the first run, you will continue pretraining with retrieval from your last checkpoint. In the follow-up jobs, you should launch the pretraining without the flags `--no-load-optim --finetune` to make sure the optimizer state is correctly loaded from your last job. +After pretraining, the model checkpoints will be saved in the `--save` directory if you specified the arg +in `pretrain_model.sh`. +To continue pretraining with retrieval from a pretrained GPT model, please specify `--load` in `pretrain_model.sh` to +load the pretrained GPT model checkpoint (the architecture of GPT, including hidden size, number of layers, and +activation methods, should be exactly the same as the one used for Retro). You should also +specify `--no-load-optim --finetune` to make sure the optimizer state is not loaded from the pretrained GPT model and +the continued pretraining with retrieval is from a clean start. After the first job / the first run, you will continue +pretraining with retrieval from your last checkpoint. In the follow-up jobs, you should launch the pretraining without +the flags `--no-load-optim --finetune` to make sure the optimizer state is correctly loaded from your last job. ## Step 3: Perplexity evaluation -During pretraining, we will automatically evaluate the model perplexity on the specified validation corpus every `--eval-interval` steps. The validation corpus should be exactly the same as the one used in Step 1 to make sure the preprocessed retrieval neighbors match the pretraining corpus. +During pretraining, we will automatically evaluate the model perplexity on the specified validation corpus +every `--eval-interval` steps. The validation corpus should be exactly the same as the one used in Step 1 to make sure +the preprocessed retrieval neighbors match the pretraining corpus. -To evaluate the perplexity of a pretrained model, please add `--skip-train` in `pretrain_model.sh` to skip the pretraining step and only evaluate the perplexity of the model specified in `--load` on the validation corpus. Run the above command again to evaluate the perplexity of a pretrained model: +To evaluate the perplexity of a pretrained model, please add `--skip-train` in `pretrain_model.sh` to skip the +pretraining step and only evaluate the perplexity of the model specified in `--load` on the validation corpus. Run the +above command again to evaluate the perplexity of a pretrained model: ```bash bash tools/retro/examples/pretrain_model.sh @@ -104,11 +147,15 @@ bash tools/retro/examples/pretrain_model.sh ## Step 4: Instruction tuning -In this step, we fine-tune the pretrained model on the downstream task with instructions. We provide a template instruction tuning script to fine-tune 843M Retro. +In this step, we fine-tune the pretrained model on the downstream task with instructions. We provide a template +instruction tuning script to fine-tune 843M Retro. -We also provide an open-source blend of instruction tuning datasets. The dataset is available to download through [here](https://drive.google.com/file/d/1nzKwwYf8lYb9gN3P4YO8pFNU_B2nMYe1/view?usp=sharing). The blendable dataset consists of the following open-source instruction tuning datasets: +We also provide an open-source blend of instruction tuning datasets. The dataset is available to download +through [here](https://drive.google.com/file/d/1nzKwwYf8lYb9gN3P4YO8pFNU_B2nMYe1/view?usp=sharing). The blendable +dataset consists of the following open-source instruction tuning datasets: ### Instruction Tuning Dataset Breakdown + | Dataset | Samples | Epochs | Sampling Prob | |------------------------------------------------------------|--------:|-------:|--------------:| | [soda](https://arxiv.org/abs/2212.10465) | 2560 | 0.005 | 0.020 | @@ -123,35 +170,55 @@ We also provide an open-source blend of instruction tuning datasets. The dataset Refer to the paper links above for more details about each instruction tuning dataset. -*We note that the provided instruction tuning dataset is all from open-source instruction tuning datasets. It is slightly different from what we use in [InstructRetro](https://arxiv.org/abs/2310.07713), which contains private and proprietary datasets. Thus a 1-2% accuracy difference in downstream tasks may be expected.* +*We note that the provided instruction tuning dataset is all from open-source instruction tuning datasets. It is +slightly different from what we use in [InstructRetro](https://arxiv.org/abs/2310.07713), which contains private and +proprietary datasets. Thus a 1-2% accuracy difference in downstream tasks may be expected.* ### Instruction tuning script -Download the [blended instruction tuning dataset](https://drive.google.com/file/d/1nzKwwYf8lYb9gN3P4YO8pFNU_B2nMYe1/view?usp=sharing) in your data home directory `$DATA_HOME` and update our templates in [tools/retro/sft/sft_retro_lm.sh](sft/sft_retro_lm.sh). + +Download +the [blended instruction tuning dataset](https://drive.google.com/file/d/1nzKwwYf8lYb9gN3P4YO8pFNU_B2nMYe1/view?usp=sharing) +in your data home directory `$DATA_HOME` and update our templates +in [tools/retro/sft/sft_retro_lm.sh](sft/sft_retro_lm.sh). An example command to run instruction tuning on 843M Retro is as follows: + ```bash [blend-dataset-name] [model-size] [batch-size] [lr] [checkpoints] bash tools/retro/sft/sft_retro_lm.sh open_inst 843m 128 5e-6 ``` -The `blend_dataset_name` argument will blend all the datasets within the `$DATA_HOME` following the weights and configurations specified in the `${blend_dataset_name}.sh` ([open_inst.sh](sft/open_inst.sh) in the example above). -The checkpoints will be saved in the `--save` directory. For example, it will be saved to -`/checkpoints/applications/retro-sft_pp1_same_format_ctx1_843m_128_5e-6`. +The `blend_dataset_name` argument will blend all the datasets within the `$DATA_HOME` following the weights and +configurations specified in the `${blend_dataset_name}.sh` ([open_inst.sh](sft/open_inst.sh) in the example above). +The checkpoints will be saved in the `--save` directory. For example, it will be saved to +`/checkpoints/applications/retro-sft_pp1_same_format_ctx1_843m_128_5e-6`. ## Step 5: Downstream task evaluation -In this step, we demonstrate how to run InstructRetro for zero-shot evaluation on downstream question answering (QA) tasks. +In this step, we demonstrate how to run InstructRetro for zero-shot evaluation on downstream question answering (QA) +tasks. We provide the pre-processed open-source evaluation datasets with a unified format for different tasks. The +evaluation datasets used in our paper are available to download +through [here](https://drive.google.com/drive/folders/1xw-N0LJR_lIWnH6BKzHIb49quVCS_V72?usp=sharing). Please stick to +the same retro workdir used in Step 0-4 to make sure the preprocessed retrieval neighbors match the pretraining corpus. +If you directly come to Step 5, an example retro workdir with `args.json` for 800M Retro is +provided [here](https://drive.google.com/file/d/121GqAdMvf8bJEBZRt-SD4uhW-SRWgI3s/view?usp=sharing). Note that the args +in the json can be overwritten through the command line. -We present an example command to run retro generation given the InstructRetro checkpoints and the Natural Question (NQ) task. The example command is for the 843m InstructRetro obtained in Step 4. Please specify the directory for the NQ dataset and update the command accordingly for other checkpoints. +We present an example command to run retro generation given the InstructRetro checkpoints and the Natural Question (NQ) +task. The example command is for the 843m InstructRetro obtained in Step 4. Please specify the directory for the NQ +dataset and update the command accordingly for other checkpoints. ```bash bash tools/retro/text_generation/retro_generate.sh nq 843m greedy test 0 20000 1000 5 pp1 /checkpoints/applications/retro-sft_pp1_same_format_ctx1_843m_128_5e-6 2 ``` -The generated responses will be saved in the corresponding checkpoint directory. For example, for the 843m InstructRetro, it will be saved to +The generated responses will be saved in the corresponding checkpoint directory. For example, for the 843m +InstructRetro, it will be saved to `/checkpoints/applications/retro-sft_pp1_same_format_ctx1_843m_128_5e-6/retro-generate-nq_5_2_843m_test_greedy_0_20000_1000.txt`. -To evaluate the F1 / Exact Match (EM) scores of the generated responses, we provide an example script to run the evaluation on the NQ dataset. Please specify the directory for the NQ dataset and update the command accordingly for other checkpoints and downstream tasks. +To evaluate the F1 / Exact Match (EM) scores of the generated responses, we provide an example script to run the +evaluation on the NQ dataset. Please specify the directory for the NQ dataset and update the command accordingly for +other checkpoints and downstream tasks. ```bash python3 tools/retro/text_generation/evaluate.py @@ -163,11 +230,12 @@ See more details from our papers: [Shall we Pretrain Autoregressive Language Models with Retrieval? A Comprehensive Study.](https://arxiv.org/abs/2304.06762) -_Boxin Wang, Wei Ping, Peng Xu, Lawrence McAfee, Zihan Liu, Mohammad Shoeybi, Yi Dong, Oleksii Kuchaiev, Bo Li, Chaowei Xiao, Anima Anandkumar, Bryan Catanzaro._ (EMNLP 2023) +_Boxin Wang, Wei Ping, Peng Xu, Lawrence McAfee, Zihan Liu, Mohammad Shoeybi, Yi Dong, Oleksii Kuchaiev, Bo Li, Chaowei +Xiao, Anima Anandkumar, Bryan Catanzaro._ (EMNLP 2023) -[InstructRetro: Instruction Tuning post Retrieval-Augmented Pretraining.](https://arxiv.org/abs/2310.07713) +[InstructRetro: Instruction Tuning post Retrieval-Augmented Pretraining.](https://arxiv.org/abs/2310.07713) -_Boxin Wang, Wei Ping, Lawrence McAfee, Peng Xu, Bo Li, Mohammad Shoeybi, Bryan Catanzaro._ +_Boxin Wang, Wei Ping, Lawrence McAfee, Peng Xu, Bo Li, Mohammad Shoeybi, Bryan Catanzaro._ Please cite the papers as follows if you use the data or code from this repo: From 8fb44df701dfca3455d99c6c6f0109459d53c07d Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Thu, 30 Nov 2023 17:59:18 -0800 Subject: [PATCH 186/296] Log progress (iterations, floating-point operations, tokens) to progress.txt file - Also log job ID and number of GPUs in progress file. - Log job throughput and cumulative throughput separately. --- megatron/checkpointing.py | 11 +-- megatron/training.py | 146 +++++++++++++++++++++++++++++++++----- 2 files changed, 135 insertions(+), 22 deletions(-) diff --git a/megatron/checkpointing.py b/megatron/checkpointing.py index 3967103a0d..f181794b46 100644 --- a/megatron/checkpointing.py +++ b/megatron/checkpointing.py @@ -238,7 +238,8 @@ def get_rng_state(): return rng_state_list -def save_checkpoint(iteration, model, optimizer, opt_param_scheduler): +def save_checkpoint(iteration, model, optimizer, opt_param_scheduler, + num_floating_point_operations_so_far): """Save a model checkpoint.""" args = get_args() @@ -270,6 +271,7 @@ def save_checkpoint(iteration, model, optimizer, opt_param_scheduler): state_dict['args'] = args state_dict['checkpoint_version'] = 3.0 state_dict['iteration'] = iteration + state_dict['num_floating_point_operations_so_far'] = num_floating_point_operations_so_far if len(model) == 1: state_dict['model'] = model[0].state_dict_for_save_checkpoint() else: @@ -544,8 +546,8 @@ def load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', stri torch.distributed.barrier() sys.exit() - # Iteration defaults to 0. - return 0 + # Iteration and num_floating_point_operations_so_far default to 0. + return 0, 0 # Set checkpoint version. set_checkpoint_version(state_dict.get('checkpoint_version', 0)) @@ -564,6 +566,7 @@ def load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', stri 'iteration from checkpoint {}, exiting'.format( checkpoint_name)) sys.exit() + num_floating_point_operations_so_far = state_dict.get('num_floating_point_operations_so_far', 0) # Check arguments. assert args.consumed_train_samples == 0 @@ -669,7 +672,7 @@ def load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', stri print_rank_0(f' successfully loaded checkpoint from {args.load} ' f'at iteration {iteration}') - return iteration + return iteration, num_floating_point_operations_so_far def load_biencoder_checkpoint(model, only_query_model=False, diff --git a/megatron/training.py b/megatron/training.py index 29ab904c90..ac29a63d6d 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -6,6 +6,7 @@ from datetime import datetime import math import logging +import os import sys from .log_handler import CustomHandler # Make default logging level INFO, but filter out all log messages not from MCore. @@ -76,6 +77,65 @@ def num_floating_point_operations(args, batch_size): ) +def append_to_progress_log(string): + args = get_args() + if args.save is None: + return + progress_log_filename = os.path.join(args.save, "progress.txt") + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + with open(progress_log_filename, 'a') as f: + job_id = os.getenv('SLURM_JOB_ID', '') + num_gpus = args.world_size + f.write(f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\tJob ID: {job_id}\t" + f"# GPUs: {num_gpus}\t{string}\n") + + +def get_start_time_from_progress_log(): + """ + Gets start time of earliest job with same world size. Also returns the number + of floating-point operations completed in last saved checkpoint. + """ + args = get_args() + assert args.save is not None + progress_log_filename = os.path.join(args.save, "progress.txt") + + # start_time is time when job with same world size started. + # start_num_floating_point_operations is the number of floating-point operations + # completed when this job started. + # latest_num_floating_point_operations is the number of floating-point operations + # completed in most recent saved checkpoint. + start_time = None + start_num_floating_point_operations = None + latest_num_floating_point_operations = 0 + + def _get_field(string, type): + return type(string.split(': ')[1]) + + with open(progress_log_filename, 'r') as f: + for line in f: + line = line.strip() + line_tokens = line.split('\t') + world_size_in_line = _get_field(line_tokens[2], int) + if line_tokens[3] == "Saved checkpoint": + latest_num_floating_point_operations = \ + _get_field(line_tokens[7], float) + if world_size_in_line != args.world_size: + # Re-start search if we see a different world size. + start_time = None + start_num_floating_point_operations = None + continue + if line_tokens[3] == "Starting job": + if start_time is None: + start_time = line_tokens[0] + start_num_floating_point_operations = \ + latest_num_floating_point_operations + assert start_time is not None and start_num_floating_point_operations is not None, \ + "Should have seen at least one 'Starting job' entry with same world_size" + return datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S'), \ + start_num_floating_point_operations + + def pretrain(train_valid_test_dataset_provider, model_provider, model_type, @@ -115,6 +175,7 @@ def pretrain(train_valid_test_dataset_provider, # Initalize and get arguments, timers, and Tensorboard writer. initialize_megatron(extra_args_provider=extra_args_provider, args_defaults=args_defaults) + append_to_progress_log("Starting job") # Set pytorch JIT layer fusion options and warmup JIT functions. set_jit_fusion_options() @@ -179,15 +240,17 @@ def pretrain(train_valid_test_dataset_provider, iteration = 0 if args.do_train and args.train_iters > 0: - iteration = train(forward_step_func, - model, optimizer, opt_param_scheduler, - train_data_iterator, valid_data_iterator, - process_non_loss_data_func, config) + iteration, num_floating_point_operations_so_far = train( + forward_step_func, + model, optimizer, opt_param_scheduler, + train_data_iterator, valid_data_iterator, + process_non_loss_data_func, config) print_datetime('after training is done') if args.save and iteration != 0: - save_checkpoint(iteration, model, optimizer, opt_param_scheduler) + save_checkpoint(iteration, model, optimizer, opt_param_scheduler, + num_floating_point_operations_so_far) else: print_rank_0('skipping training (--skip-train is on) ...') @@ -412,11 +475,13 @@ def setup_model_and_optimizer(model_provider_func, if args.load is not None: timers = get_timers() timers('load-checkpoint', log_level=0).start(barrier=True) - args.iteration = load_checkpoint(model, optimizer, opt_param_scheduler) + args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( + model, optimizer, opt_param_scheduler) timers('load-checkpoint').stop(barrier=True) timers.log(['load-checkpoint']) else: args.iteration = 0 + args.num_floating_point_operations_so_far = 0 # get model without FP16 and/or DDP wrappers if args.iteration == 0 and len(unwrapped_model) == 1 \ @@ -709,15 +774,53 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, return report_memory_flag -def save_checkpoint_and_time(iteration, model, optimizer, opt_param_scheduler): +def compute_throughputs_and_append_to_progress_log(iteration, + num_floating_point_operations_so_far): + args = get_args() + if args.save is None: + return + + # Compute job throughput. + # args.num_floating_point_operations_so_far keeps track of floating-point operations + # completed at the start of job. + global _TRAIN_START_TIME + job_throughput = \ + (num_floating_point_operations_so_far - + args.num_floating_point_operations_so_far) / ( + (time.time() - _TRAIN_START_TIME) * 10**12 * args.world_size) + + # Compute cumulative throughput since jobs of this world size were launched. + # `get_start_time_from_progress_log` returns start time and number of floating-point + # operations of first job of this world size. + start_time, start_num_floating_point_operations = get_start_time_from_progress_log() + elapsed_time = (datetime.now() - start_time).total_seconds() + cumulative_throughput = \ + (num_floating_point_operations_so_far - + start_num_floating_point_operations) / ( + elapsed_time * 10**12 * args.world_size) + + tokens_so_far = args.consumed_train_samples * args.seq_length + + append_to_progress_log(f"Saved checkpoint\tIteration: {iteration}\t" + f"Job throughput: {job_throughput:.1f} TFLOP/s/GPU\t" + f"Cumulative throughput: {cumulative_throughput:.1f} TFLOP/s/GPU\t" + f"Floating-point operations: {num_floating_point_operations_so_far:.2e}\t" + f"Tokens (in billions): {tokens_so_far / 10**9:.2f}") + + +def save_checkpoint_and_time(iteration, model, optimizer, opt_param_scheduler, + num_floating_point_operations_so_far): timers = get_timers() - # Extra barrier is added to make sure - # all ranks report the max time. + # Extra barrier is added to make sure all ranks report the max time. timers('save-checkpoint', log_level=0).start(barrier=True) - save_checkpoint(iteration, model, optimizer, opt_param_scheduler) + save_checkpoint(iteration, model, optimizer, opt_param_scheduler, + num_floating_point_operations_so_far) timers('save-checkpoint').stop(barrier=True) timers.log(['save-checkpoint']) + compute_throughputs_and_append_to_progress_log(iteration, + num_floating_point_operations_so_far) + def train(forward_step_func, model, optimizer, opt_param_scheduler, train_data_iterator, valid_data_iterator, @@ -738,6 +841,7 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, # Iterations. iteration = args.iteration + num_floating_point_operations_so_far = args.num_floating_point_operations_so_far # Setup some training config params config.grad_scale_func = optimizer.scale_loss @@ -803,9 +907,11 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, opt_param_scheduler, config) iteration += 1 - args.consumed_train_samples += mpu.get_data_parallel_world_size() * \ - args.micro_batch_size * \ - get_num_microbatches() + batch_size = mpu.get_data_parallel_world_size() * \ + args.micro_batch_size * \ + get_num_microbatches() + args.consumed_train_samples += batch_size + num_floating_point_operations_so_far += num_floating_point_operations(args, batch_size) # Logging. loss_scale = optimizer.get_loss_scale().item() @@ -847,7 +953,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, signal_handler = get_signal_handler() if any(signal_handler.signals_received()): save_checkpoint_and_time(iteration, model, optimizer, - opt_param_scheduler) + opt_param_scheduler, + num_floating_point_operations_so_far) print_datetime('exiting program after receiving SIGTERM.') exit = True break @@ -856,7 +963,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, iteration % args.save_interval == 0: timers('interval-time').stop() save_checkpoint_and_time(iteration, model, optimizer, - opt_param_scheduler) + opt_param_scheduler, + num_floating_point_operations_so_far) saved_checkpoint = True timers('interval-time', log_level=0).start(barrier=True) @@ -872,7 +980,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if done: if not saved_checkpoint: save_checkpoint_and_time(iteration, model, optimizer, - opt_param_scheduler) + opt_param_scheduler, + num_floating_point_operations_so_far) print_datetime('exiting program after {} minutes'.format(train_time)) exit = True break @@ -881,7 +990,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if args.exit_interval and iteration % args.exit_interval == 0: if args.save and not saved_checkpoint: save_checkpoint_and_time(iteration, model, optimizer, - opt_param_scheduler) + opt_param_scheduler, + num_floating_point_operations_so_far) torch.distributed.barrier() print_datetime('exiting program at iteration {}'.format(iteration)) exit = True @@ -908,7 +1018,7 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if exit: sys.exit() - return iteration + return iteration, num_floating_point_operations_so_far def evaluate(forward_step_func, From 781d86a27089a2b357cdd78ec4c47e1221a33635 Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Mon, 22 Jan 2024 12:57:56 -0800 Subject: [PATCH 187/296] Hide progress logging behind a command-line argument --- megatron/arguments.py | 4 ++++ megatron/training.py | 17 +++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 64de0c77e8..ee4aa6759e 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -675,6 +675,10 @@ def _add_logging_args(parser): help='If set, calculate and log the number of zeros in gradient.') group.add_argument('--log-throughput', action='store_true', help='If set, calculate and log throughput per GPU.') + group.add_argument('--log-progress', action='store_true', + help='If set, log progress (in terms of number of processed tokens and ' + 'number of floating-point operations) to progress.txt file in checkpoint ' + 'directory.') group.add_argument('--timing-log-level', type=int, default=0, choices=range(0,3), help='Granularity level to measure and report timing. ' diff --git a/megatron/training.py b/megatron/training.py index ac29a63d6d..9f48979f01 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -175,7 +175,13 @@ def pretrain(train_valid_test_dataset_provider, # Initalize and get arguments, timers, and Tensorboard writer. initialize_megatron(extra_args_provider=extra_args_provider, args_defaults=args_defaults) - append_to_progress_log("Starting job") + + args = get_args() + timers = get_timers() + + if args.log_progress: + append_to_progress_log("Starting job") + # Set pytorch JIT layer fusion options and warmup JIT functions. set_jit_fusion_options() @@ -193,9 +199,6 @@ def pretrain(train_valid_test_dataset_provider, time.time() - _TRAIN_START_TIME)) print_datetime('after megatron is initialized') - args = get_args() - timers = get_timers() - # Model, optimizer, and learning rate. timers('model-and-optimizer-setup', log_level=0).start(barrier=True) model, optimizer, opt_param_scheduler = setup_model_and_optimizer( @@ -810,6 +813,7 @@ def compute_throughputs_and_append_to_progress_log(iteration, def save_checkpoint_and_time(iteration, model, optimizer, opt_param_scheduler, num_floating_point_operations_so_far): + args = get_args() timers = get_timers() # Extra barrier is added to make sure all ranks report the max time. timers('save-checkpoint', log_level=0).start(barrier=True) @@ -818,8 +822,9 @@ def save_checkpoint_and_time(iteration, model, optimizer, opt_param_scheduler, timers('save-checkpoint').stop(barrier=True) timers.log(['save-checkpoint']) - compute_throughputs_and_append_to_progress_log(iteration, - num_floating_point_operations_so_far) + if args.log_progress: + compute_throughputs_and_append_to_progress_log(iteration, + num_floating_point_operations_so_far) def train(forward_step_func, model, optimizer, opt_param_scheduler, From b03eae3dd0b2e96ac4430b571f5266f6d3031f5e Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Tue, 23 Jan 2024 06:03:17 +0000 Subject: [PATCH 188/296] Updated CI value after removing kaiming_init. --- .gitlab-ci.yml | 2 +- megatron/core/transformer/moe/router.py | 4 ++-- megatron/core/transformer/transformer_config.py | 2 +- ..._pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json | 2 +- ...50steps_core_enabled_te_8experts2parallel_groupedGEMM.json | 2 +- ..._50steps_core_enabled_te_8experts2parallel_top2router.json | 2 +- ..._pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b9b7eda180..950cf34173 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -533,7 +533,7 @@ train.te_core_moe_gpt3.345m_tp2_pp2_2experts_1node_50steps: USE_CORE: 1 TEST_LEVEL: NIGHTLY_TESTS METADATA: "te_2experts" - ADDITIONAL_PARAMS: "--num-experts 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" + ADDITIONAL_PARAMS: "--num-experts 2 --sequence-parallel --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" train.te_core_moe_gpt3.345m_tp2_pp2_4experts2parallel_1node_50steps: <<: *selene-test-launcher diff --git a/megatron/core/transformer/moe/router.py b/megatron/core/transformer/moe/router.py index 39291faacf..b7e72965d1 100644 --- a/megatron/core/transformer/moe/router.py +++ b/megatron/core/transformer/moe/router.py @@ -186,7 +186,7 @@ def apply_z_loss(self, logits): z_loss = z_loss_func(logits, self.config.moe_z_loss_coeff) logits = MoEAuxLossAutoScaler.apply(logits, z_loss) return logits - + def apply_input_jitter(self, input: torch.Tensor): """Add noise to the input tensor. Refer to https://arxiv.org/abs/2101.03961. @@ -218,7 +218,7 @@ def routing(self, logits: torch.Tensor): Tuple[torch.Tensor, torch.Tensor]: Probs and the indices tensor. """ logits = logits.view(-1, self.config.num_moe_experts) - + # Apply Z-Loss logits = self.apply_z_loss(logits) # Apply input jitter diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index af34ac87be..5ee299262f 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -140,7 +140,7 @@ class TransformerConfig(ModelParallelConfig): moe_grouped_gemm: bool = False moe_aux_loss_coeff: float = 0 # 1e-2 would be a good start value for load balance loss. moe_z_loss_coeff: float = None # 1e-3 would be a good start value for z-loss - moe_input_jitter_eps: float = None + moe_input_jitter_eps: float = None moe_token_dropping: bool = False # TODO: Support token dropping. def __post_init__(self): diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json index a03930027e..103f0ef6cd 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79995, 10.86816, 10.86502, 10.80149, 10.71138, 10.63815, 10.19945, 10.30719, 10.2155, 9.90987]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16119.0, 19407.0, 19395.0, 18709.0, 17372.0, 18070.0, 15753.0, 18008.0, 18946.0, 19784.0]}, "iteration_timing_avg": 0.2843088235294118} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79896, 10.8601, 10.87152, 10.79856, 10.71624, 10.6355, 10.19683, 10.30917, 10.21632, 9.90782]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16152.0, 19202.0, 19645.0, 18594.0, 17375.0, 17768.0, 15576.0, 17888.0, 18387.0, 18810.0]}, "iteration_timing_avg": 0.2777326470588235} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json index 2e759bef60..93557798a7 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_groupedGEMM.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80299, 10.85298, 10.86262, 10.79516, 10.72134, 10.63641, 10.20727, 10.31594, 10.21293, 9.90292]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16202.0, 19817.0, 19787.0, 18858.0, 17645.0, 17931.0, 15872.0, 18124.0, 18472.0, 19200.0]}, "iteration_timing_avg": 0.176695} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80961, 10.86075, 10.86755, 10.80331, 10.71906, 10.64746, 10.21053, 10.32037, 10.22013, 9.92389]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16604.0, 19509.0, 19801.0, 18644.0, 17084.0, 17721.0, 14980.0, 17754.0, 18357.0, 18375.0]}, "iteration_timing_avg": 0.18734941176470588} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json index c5f9203a92..defdb50cec 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp1_1nodes_50steps_core_enabled_te_8experts2parallel_top2router.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80968, 10.86879, 10.86821, 10.8024, 10.67623, 10.58875, 10.0839, 10.19807, 10.09912, 9.76346]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62498.0, 65685.0, 65926.0, 65244.0, 64040.0, 64832.0, 63529.0, 66406.0, 66810.0, 68223.0]}, "iteration_timing_avg": 0.2556055882352941} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80682, 10.86725, 10.87968, 10.79328, 10.66888, 10.57819, 10.06276, 10.18504, 10.1014, 9.76741]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62567.0, 65584.0, 65506.0, 65118.0, 64028.0, 64819.0, 63611.0, 65997.0, 66843.0, 67788.0]}, "iteration_timing_avg": 0.26514323529411765} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json index 70e1102250..97033d78eb 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82669, 10.87408, 10.85677, 10.80443, 10.7074, 10.63353, 10.15437, 10.27397, 10.17955, 9.86891]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [7132.0, 8526.0, 8992.0, 8638.0, 7665.0, 8074.0, 7151.0, 8425.0, 8985.0, 9522.0]}, "iteration_timing_avg": 0.27723117647058826} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79674, 10.84347, 10.81547, 10.76604, 10.65416, 10.56322, 10.08548, 10.21617, 10.1139, 9.8322]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2912.0, 3584.0, 3414.0, 3357.0, 3298.0, 3173.0, 2816.0, 3211.0, 3817.0, 3728.0]}, "iteration_timing_avg": 0.27967117647058826} \ No newline at end of file From d2e5f788736ba0f284509d4243dea5d5edd16fee Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Tue, 23 Jan 2024 23:38:53 +0800 Subject: [PATCH 189/296] Add one_logger commandline arguments --- megatron/arguments.py | 9 +++++++++ megatron/global_vars.py | 7 ++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 0f7f47365e..4a3cf02fd0 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -741,6 +741,15 @@ def _add_logging_args(parser): '--index-url=https://sc-hw-artf.nvidia.com/api/pypi/hwinf-ml-pypi/simple' ' one_logger` or go to https://gitlab-master.nvidia.com/hwinf-dcm/onelogger ' 'for more details') + group.add_argument('--one-logger-project', type=str, default='e2e-tracking', + help='The one-logger project name. Will ignore if ' + '--enable-one-logger is not set') + group.add_argument('--one-logger-entity', type=str, default='hwinf_dcm', + help='The one-logger username or team name. Will ignore if ' + '--enable-one-logger is not set') + group.add_argument('--one-logger-run-name', type=str, default='', + help='The one-logger run name displayed. Will ignore if ' + '--enable-one-logger is not set') return parser diff --git a/megatron/global_vars.py b/megatron/global_vars.py index 50d8e75b94..5709ecf99f 100644 --- a/megatron/global_vars.py +++ b/megatron/global_vars.py @@ -200,7 +200,12 @@ def _set_one_logger(args): if args.enable_onelogger and args.rank == (args.world_size - 1): try: from one_logger.core import OneLogger - one_logger = OneLogger() + config = { + 'project': args.one_logger_project, + 'entity': args.one_logger_entity, + 'name': args.one_logger_name + } + one_logger = OneLogger(config=config) _GLOBAL_ONE_LOGGER = one_logger except BaseException: print('WARNING: one_logger package is required to enable e2e metrics ' From 62a5a3eb15bfe3822db31b9362a80aadfebb2efb Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Tue, 23 Jan 2024 23:43:40 +0800 Subject: [PATCH 190/296] Remove one_logger config file --- megatron/config/default.yaml | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 megatron/config/default.yaml diff --git a/megatron/config/default.yaml b/megatron/config/default.yaml deleted file mode 100644 index 73b74afd3a..0000000000 --- a/megatron/config/default.yaml +++ /dev/null @@ -1,11 +0,0 @@ -enable_one_logger: True - -wandb: - host: https://api.wandb.ai - api_key: ${oc.env:WANDB_API_KEY} - entity: zshao - project: MNIST - name: one-logger-megatron-test - tags: - - e2e_metrics_enabled - - e2e_metrics_testing \ No newline at end of file From 49727deb2210d8651493b8fce45b93593ff4d7de Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Tue, 23 Jan 2024 23:47:05 +0800 Subject: [PATCH 191/296] Hardcode train_iterations_warmup to 5 --- megatron/training.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/training.py b/megatron/training.py index a34c0efcab..93fd4cf3f9 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -139,7 +139,7 @@ def pretrain(train_valid_test_dataset_provider, one_logger = get_one_logger() if one_logger: one_logger.log_metrics({ - 'train_iterations_warmup': args.lr_warmup_iters, + 'train_iterations_warmup': 5 }) # Model, optimizer, and learning rate. From 0cb693a21f2c7db9a0bd4ed6a2069d9ffcf7f470 Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Wed, 24 Jan 2024 00:07:52 +0800 Subject: [PATCH 192/296] Add clarification for internal one_logger --- megatron/arguments.py | 1 + 1 file changed, 1 insertion(+) diff --git a/megatron/arguments.py b/megatron/arguments.py index 4a3cf02fd0..cfda8c1786 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -737,6 +737,7 @@ def _add_logging_args(parser): help='Path to save the wandb results locally.') group.add_argument('--enable-one-logger', action='store_true', help='If set, use one_logger to track E2E metrics' + 'Note that one_logger is an internal tool and not available externally. ' 'For installation, please try command: `pip install ' '--index-url=https://sc-hw-artf.nvidia.com/api/pypi/hwinf-ml-pypi/simple' ' one_logger` or go to https://gitlab-master.nvidia.com/hwinf-dcm/onelogger ' From ae1cd89ccbb09deecd84ba8fcd53c35ae3255748 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Tue, 23 Jan 2024 17:59:36 +0000 Subject: [PATCH 193/296] Fix SwiGLU for input dimension 2 after rebased main. --- megatron/core/fusions/fused_bias_swiglu.py | 8 +++++--- megatron/core/transformer/transformer_config.py | 2 +- .../unit_tests/transformer/moe/test_switch_mlp.py | 15 +++++++++++++-- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/megatron/core/fusions/fused_bias_swiglu.py b/megatron/core/fusions/fused_bias_swiglu.py index de4cb753e5..710a5e1ff7 100644 --- a/megatron/core/fusions/fused_bias_swiglu.py +++ b/megatron/core/fusions/fused_bias_swiglu.py @@ -66,13 +66,15 @@ def backward(ctx, grad_output): def bias_swiglu_impl(input, bias): - shape = input.shape - input = input.view(-1, shape[2]) + ori_shape = input.shape + assert len(ori_shape) in [2, 3] + input = input.view(-1, ori_shape[-1]) if bias is not None: output = BiasSwiGLUFunction.apply(input, bias) else: output = SwiGLUFunction.apply(input) - return output.view(shape[0], shape[1], -1) + + return output if len(ori_shape) == 2 else output.view(ori_shape[0], ori_shape[1], -1) # bias_swiglu_impl = BiasSwiGLUFunction.apply diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 5ee299262f..9feda54149 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -31,7 +31,7 @@ class TransformerConfig(ModelParallelConfig): add_bias_linear (bool): Include a bias term in all linear layers (QKV projections, after core attention, and two in MLP layer). Default is True. gated_linear_unit (bool): Use a gated linear unit for the first linear layer in the MLP. Defaults to False. activation_func (Callable): Activation function to use for the non-linearity in the MLP. Defaults to F.gelu. - num_moe_experts (int): Number of experts to use for Mixture of Experts. When set, it replaces MLP with Switch MLP. Defaults to None (no MoE). + num_moe_experts (int): Number of experts to use for MoE layer. When set, it replaces MLP with MoE layer. Defaults to None (no MoE). init_method (Callable): Method to initialize weights. Note that bias is always set to zero. Should be a function that takes a single Tensor and initializes it. Defaults to megatron.core.utils.init_method_normal(init_method_std) which is torch nn init normal with mean=0.0 and std=init_method_Std. output_layer_init_method (Callable): Method to initialize weights of the output layer of both attention and MLP blocks. Defaults to megatron.core.utils.scaled_init_method_normal(init_method_std) which is torch nn init normal with mean=0.0 and std=init_method_std / math.sqrt(2.0 * num_layers). init_method_std (float): Standard deviation of the zero mean normal for the default initialization method, not used if init_method and output_layer_init_method are provided. Defaults to 0.02. diff --git a/tests/unit_tests/transformer/moe/test_switch_mlp.py b/tests/unit_tests/transformer/moe/test_switch_mlp.py index 65c02252e0..65f5ad319d 100644 --- a/tests/unit_tests/transformer/moe/test_switch_mlp.py +++ b/tests/unit_tests/transformer/moe/test_switch_mlp.py @@ -17,7 +17,18 @@ def setup_method(self, method): model_parallel_cuda_manual_seed(123) print("done intializing") num_moe_experts = 2 - transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, moe_router_load_balancing_type="sinkhorn", moe_router_topk=1) + transformer_config = TransformerConfig( + num_layers=2, + hidden_size=12, + num_attention_heads=4, + num_moe_experts=num_moe_experts, + use_cpu_initialization=True, + activation_func=torch.nn.functional.silu, + gated_linear_unit=True, + bias_activation_fusion=True, + moe_router_load_balancing_type="sinkhorn", + moe_router_topk=1 + ) transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( num_experts=num_moe_experts, moe_grouped_gemm=False) self.switch_mlp = MoELayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) @@ -29,7 +40,7 @@ def test_constructor(self): assert isinstance(self.switch_mlp, MoELayer) num_weights = sum([p.numel() for p in self.switch_mlp.parameters()]) - assert num_weights == 2448 + assert num_weights == 3696 @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") From ebb1484327af4eb73ee923bf736e44db72aa1831 Mon Sep 17 00:00:00 2001 From: Boxin Wang Date: Tue, 23 Jan 2024 11:22:06 -0800 Subject: [PATCH 194/296] Update retro doc following the suggestion of Wei and Lawrence --- README.md | 2 +- tools/retro/README.md | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 81b23c9ed3..bc8f93bb90 100644 --- a/README.md +++ b/README.md @@ -241,7 +241,7 @@ With full global batch size of 1536 on 1024 A100 GPUs, each iteration takes arou Retro [(Borgeaud et al., 2022)](https://arxiv.org/abs/2112.04426) is an autoregressive decoder-only language model (LM) pretrained with retrieval-augmentation. -Retro features practical scalibility to support large-scale pretraining from scratch by retrieving from trillions of token. +Retro features practical scalability to support large-scale pretraining from scratch by retrieving from trillions of tokens. Pretraining with retrieval provides a more efficient storage mechanism of factual knowledge, when compared to storing factual knowledge implicitly within the network's parameters, thus largely reducing model parameters while achieving lower perplexity than standard GPT. Retro also provides the flexibility to update the knowledge stored in LMs [(Wang et al., 2023a)](https://arxiv.org/abs/2304.06762) diff --git a/tools/retro/README.md b/tools/retro/README.md index 6e3e77c1c2..f7a38c8a04 100644 --- a/tools/retro/README.md +++ b/tools/retro/README.md @@ -2,8 +2,8 @@ Retro [(Borgeaud et al., 2022)](https://arxiv.org/abs/2112.04426) is an autoregressive decoder-only language model (LM) pretrained with retrieval-augmentation. -Retro features practical scalibility to support large-scale pretraining from scratch by retrieving from trillions of -token. +Retro features practical scalability to support large-scale pretraining from scratch by retrieving from trillions of +tokens. Pretraining with retrieval provides a more efficient storage mechanism of factual knowledge, when compared to storing factual knowledge implicitly within the network's parameters, thus largely reducing model parameters while achieving lower perplexity than standard GPT. @@ -16,9 +16,9 @@ featuring the largest LLM pretrained with retrieval (as of December 2023). The obtained foundation model, Retro 48B, largely outperforms the GPT counterpart in terms of perplexity. With instruction tuning on Retro, InstructRetro demonstrates significant improvement over the instruction tuned GPT on downstream tasks in the zero-shot setting. Specifically, the average improvement of InstructRetro is 7% over its GPT -counterpart across 8 short-form QA tasks, and 10% over GPT across 4 challenging long-form QA tasks. We also find that -one can ablate the encoder from InstructRetro architecture and directly use the InstructRetro decoder backbone as GPT, -while achieving comparable results. +counterpart across 8 short-form QA tasks, 10% over GPT across 4 challenging long-form QA tasks, and 16% over GPT across +3 summarization tasks. We also find that one can ablate the encoder from InstructRetro architecture and directly use the +InstructRetro decoder backbone as GPT, while achieving comparable results. This README provides an end-to-end tutorial to reproduce Retro and InstructRetro. @@ -63,7 +63,7 @@ We recommend using docker environment to run the code. ### Docker image We provide a docker build file in [tools/retro/examples/Dockerfile](examples/Dockerfile) for the reproduction. The -docker image is based on `nvcr.io/nvidia/pytorch:23.09-py3`. +docker image is based on the [NGC docker](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch/tags) `nvcr.io/nvidia/pytorch:23.09-py3`. ### Install dependencies From 7298d15fa4943b8f4c567aefb32747fc6090166a Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Fri, 19 Jan 2024 17:08:08 -0800 Subject: [PATCH 195/296] Add distributed optimizer tests with --overlap-param-gather (and corresponding gold values) --- .gitlab-ci.yml | 46 +++++++++++++++++++ ...izer_overlap_grad_reduce_param_gather.json | 1 + ...izer_overlap_grad_reduce_param_gather.json | 1 + ...izer_overlap_grad_reduce_param_gather.json | 1 + 4 files changed, 49 insertions(+) create mode 100644 tests/functional_tests/test_results/gpt3/gpt3_tp1_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json create mode 100644 tests/functional_tests/test_results/gpt3/gpt3_tp1_pp4_interleaved_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json create mode 100644 tests/functional_tests/test_results/gpt3/gpt3_tp4_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c0553de5a3..05c1de1f61 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -393,6 +393,21 @@ train.gpt3.345m_tp1_pp1_1node_50steps_dist_optimizer_overlap_grad_reduce: METADATA: dist_optimizer_overlap_grad_reduce ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce" +train.gpt3.345m_tp1_pp1_1node_50steps_dist_optimizer_overlap_grad_reduce_param_gather: + <<: *selene-test-launcher + variables: + <<: [*VARS] + RUN_MODEL: gpt3 + USE_TE: 0 + TP_SIZE: 1 + PP_SIZE: 1 + NUM_NODES: 1 + MAX_STEPS: 50 + USE_CORE: 0 + TEST_LEVEL: NIGHTLY_TESTS + METADATA: dist_optimizer_overlap_grad_reduce_param_gather + ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather" + train.gpt3.345m_tp4_pp1_1node_50steps_overlap_grad_reduce: <<: *selene-test-launcher variables: @@ -423,6 +438,21 @@ train.gpt3.345m_tp4_pp1_1node_50steps_dist_optimizer_overlap_grad_reduce: METADATA: dist_optimizer_overlap_grad_reduce ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce" +train.gpt3.345m_tp4_pp1_1node_50steps_dist_optimizer_overlap_grad_reduce_param_gather: + <<: *selene-test-launcher + variables: + <<: [*VARS] + RUN_MODEL: gpt3 + USE_TE: 0 + TP_SIZE: 4 + PP_SIZE: 1 + NUM_NODES: 1 + MAX_STEPS: 50 + USE_CORE: 0 + TEST_LEVEL: MR_TESTS + METADATA: dist_optimizer_overlap_grad_reduce_param_gather + ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather" + train.gpt3.345m_tp1_pp4_1node_50steps_overlap_grad_reduce: <<: *selene-test-launcher variables: @@ -470,6 +500,22 @@ train.gpt3.345m_tp1_pp4_interleaved_1node_50steps_dist_optimizer_overlap_grad_re METADATA: dist_optimizer_overlap_grad_reduce ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce" +train.gpt3.345m_tp1_pp4_interleaved_1node_50steps_dist_optimizer_overlap_grad_reduce_param_gather: + <<: *selene-test-launcher + variables: + <<: [*VARS] + RUN_MODEL: gpt3 + USE_TE: 0 + TP_SIZE: 1 + PP_SIZE: 4 + VP_SIZE: 1 + NUM_NODES: 1 + MAX_STEPS: 50 + USE_CORE: 0 + TEST_LEVEL: MR_TESTS + METADATA: dist_optimizer_overlap_grad_reduce_param_gather + ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather" + train.gpt3.345m_tp2_pp2_1node_50steps_overlap_grad_reduce: <<: *selene-test-launcher variables: diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json b/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json new file mode 100644 index 0000000000..4ceb167669 --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.87174, 10.89545, 10.88847, 10.88533, 10.893, 10.84895, 10.70048, 10.64124, 10.53839, 10.3107]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1238.0, 1318.0, 1648.0, 1423.0, 1535.0, 1350.0, 1271.0]}, "iteration_timing_avg": 0.06580882352941175} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp4_interleaved_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json b/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp4_interleaved_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json new file mode 100644 index 0000000000..3ad3d83d39 --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp1_pp4_interleaved_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.7951, 10.84939, 10.87411, 10.83459, 10.82865, 10.78676, 10.56492, 10.57063, 10.48544, 10.19547]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [2586.0, 2828.0, 2105.0, 2725.0, 2711.0, 2428.0, 2946.0]}, "iteration_timing_avg": 0.12188999999999997} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp4_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json b/tests/functional_tests/test_results/gpt3/gpt3_tp4_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json new file mode 100644 index 0000000000..b12f79670b --- /dev/null +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp4_pp1_1nodes_50steps_dist_optimizer_overlap_grad_reduce_param_gather.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85921, 10.8797, 10.87381, 10.88658, 10.88912, 10.84826, 10.68571, 10.62947, 10.54289, 10.26918]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2288.0, 2326.0, 2454.0, 2011.0, 2111.0, 2436.0, 2446.0]}, "iteration_timing_avg": 0.20696529411764708} \ No newline at end of file From 33111c9c9aeb932c4a9b6404b3dbf03ab99d689c Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Sat, 20 Jan 2024 10:32:26 -0800 Subject: [PATCH 196/296] Fix bug causing issues with fp16 and --overlap-param-gather by disabling overlapped param gather for validation --- megatron/optimizer/distrib_optimizer.py | 70 +++++++++++++++---------- megatron/training.py | 8 +++ 2 files changed, 51 insertions(+), 27 deletions(-) diff --git a/megatron/optimizer/distrib_optimizer.py b/megatron/optimizer/distrib_optimizer.py index 52f41fb9d6..16e0742229 100644 --- a/megatron/optimizer/distrib_optimizer.py +++ b/megatron/optimizer/distrib_optimizer.py @@ -504,6 +504,7 @@ def __init__( (gbuf_index, dtype, bucket_index) ) all_gather_handle_index = len(self.all_gather_handle_index_to_bucket_index_map) - 1 + self.all_gather_handles.append(None) # Store all all_gather_handle_indices. model_idx = self.gbuf_idx_to_model_idx_map[gbuf_index] @@ -519,12 +520,9 @@ def __init__( self.num_all_gather_handles = len(self.all_gather_handle_index_to_bucket_index_map) self.overlap_param_gather = get_args().overlap_param_gather + self.remove_pre_hook_handle = None if self.overlap_param_gather: - self.remove_pre_hook_handle = torch.nn.modules.module.register_module_forward_pre_hook( - self._make_forward_pre_hook() - ) - else: - self.remove_pre_hook_handle = None + self.enable_pre_hook() self.update_successful = False @@ -534,6 +532,20 @@ def __init__( self.optimizer.param_groups = [g["orig_group"] for g in self.opt_group_ranges] self.optimizer.load_state_dict(self.optimizer.state_dict()) + def disable_pre_hook(self): + assert self.remove_pre_hook_handle is not None + self.remove_pre_hook_handle.remove() + self.remove_pre_hook_handle = None + + # Make sure all-gathers are completed as needed. + self._reset_metadata_and_sync_gather_all_model_params(force_sync=True) + + def enable_pre_hook(self): + assert self.remove_pre_hook_handle is None + self.remove_pre_hook_handle = torch.nn.modules.module.register_module_forward_pre_hook( + self._make_forward_pre_hook() + ) + def get_model_param_range_map(self, param): """ Given a model param, get the index sub-range of the param that this @@ -981,7 +993,7 @@ def get_model_param_buffer_dp_views(self): return view_items - def _dispatch_gather_model_params(self, all_gather_handle_index): + def _dispatch_gather_model_params(self, all_gather_handle_index, force_sync=False): """ All-gather updated model params. @@ -989,6 +1001,7 @@ def _dispatch_gather_model_params(self, all_gather_handle_index): tensors are dynamically allocated. After the all-gather, the params can be copied from the param buffer to the param. """ + async_op = self.overlap_param_gather and not force_sync if self.update_successful: data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) data_parallel_group = mpu.get_data_parallel_group(with_context_parallel=True) @@ -1001,22 +1014,18 @@ def _dispatch_gather_model_params(self, all_gather_handle_index): (gbuf_index, dtype, bucket_index, pbuf, pbuf_views) = self.pbuf_view_items[ all_gather_handle_index ] - assert all_gather_handle_index == len(self.all_gather_handles) + assert all_gather_handle_index < len(self.all_gather_handles) all_gather_handle = torch.distributed._all_gather_base( - pbuf, - pbuf_views[data_parallel_rank], - group=data_parallel_group, - async_op=self.overlap_param_gather, + pbuf, pbuf_views[data_parallel_rank], group=data_parallel_group, async_op=async_op, ) - self.all_gather_handles.append(all_gather_handle) + self.all_gather_handles[all_gather_handle_index] = all_gather_handle assert self.all_gather_handle_index_to_bucket_index_map[all_gather_handle_index] == ( gbuf_index, dtype, bucket_index, ) - self.param_buffer_copied.append(False) - if not self.overlap_param_gather: + if not async_op: self._copy_params_from_param_buffer(all_gather_handle_index) def _make_forward_pre_hook(self): @@ -1062,9 +1071,7 @@ def _finish_param_sync_helper(self, all_gather_handle_index): # First check if there is an outstanding all-gather handle for this param. # If so, wait on the handle to ensure the communication is finished. - if all_gather_handle_index >= len(self.all_gather_handles): - return - + assert all_gather_handle_index < len(self.all_gather_handles) all_gather_handle = self.all_gather_handles[all_gather_handle_index] if all_gather_handle is not None: all_gather_handle.wait() @@ -1221,20 +1228,29 @@ def copy_group_params(model_groups, shard_main_groups): copy_group_params(self.model_float16_groups, self.shard_fp32_from_float16_groups) copy_group_params(self.model_fp32_groups, self.shard_fp32_groups) + def _reset_metadata_and_sync_gather_all_model_params(self, force_sync): + # Reset metadata needed to track results of all-gathers. + self.all_gather_handles = [None for _ in range(len(self.all_gather_handles))] + self.param_buffer_copied = [False for _ in range(len(self.param_buffer_copied))] + + # Launch synchronous all-gather if --overlap-param-gather is turned on or if force_sync + # is explicitly set to True (e.g., if we are going to turn off all-gather overlapping for + # validation / test iterations). + if not self.overlap_param_gather or force_sync: + for all_gather_handle_index in range(self.num_all_gather_handles): + self._dispatch_gather_model_params(all_gather_handle_index, force_sync=force_sync) + @torch.no_grad() def step(self, args, timers): self.update_successful, grad_norm, num_zeros_in_grad = super().step(args, timers) - # Reset metadata needed to track results of all-gathers. - self.all_gather_handles = [] - self.param_buffer_copied = [] - # If not overlapping all-gather for parameters, launch synchronous all-gather - # communication calls here. - if not self.overlap_param_gather: - timers('params-all-gather', log_level=1).start(barrier=args.barrier_with_L1_time) - for all_gather_handle_index in range(self.num_all_gather_handles): - self._dispatch_gather_model_params(all_gather_handle_index) - timers('params-all-gather').stop() + # communication calls here. If overlapping all-gather for parameters, the following + # call to _gather_all_model_params is a no-op: the first all-gather is launched + # asynchronously in the next optimizer.zero_grad() call and subsequent all-gathers + # are launched in the forward pre-hook. + timers('params-all-gather', log_level=1).start(barrier=args.barrier_with_L1_time) + self._reset_metadata_and_sync_gather_all_model_params(force_sync=False) + timers('params-all-gather').stop() return self.update_successful, grad_norm, num_zeros_in_grad diff --git a/megatron/training.py b/megatron/training.py index 29ab904c90..e906b86e58 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -828,6 +828,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if args.eval_interval and iteration % args.eval_interval == 0 and \ args.do_valid: timers('interval-time').stop() + if args.use_distributed_optimizer and args.overlap_param_gather: + optimizer.disable_pre_hook() if args.manual_gc and args.manual_gc_eval: # Collect all objects. gc.collect() @@ -839,6 +841,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if args.manual_gc and args.manual_gc_eval: # Collect only the objects created and used in evaluation. gc.collect(generation=0) + if args.use_distributed_optimizer and args.overlap_param_gather: + optimizer.enable_pre_hook() timers('interval-time', log_level=0).start(barrier=True) # Checkpointing @@ -904,6 +908,10 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, if wandb_writer: wandb_writer.finish() + # Close out pre-hooks if using distributed optimizer and overlapped param gather. + if args.use_distributed_optimizer and args.overlap_param_gather: + optimizer.disable_pre_hook() + # If any exit conditions (signal handler, duration, iterations) have been reached, exit. if exit: sys.exit() From f634ccaa7ec82ce753a9f85623b84ed46b68e17f Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Wed, 24 Jan 2024 01:44:37 +0000 Subject: [PATCH 197/296] Add softmax for sinkhorn when k > 1. --- megatron/core/transformer/moe/router.py | 13 ++++++++++--- megatron/core/transformer/moe/token_dispatcher.py | 6 +++++- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/megatron/core/transformer/moe/router.py b/megatron/core/transformer/moe/router.py index b7e72965d1..0cf0ae6568 100644 --- a/megatron/core/transformer/moe/router.py +++ b/megatron/core/transformer/moe/router.py @@ -118,18 +118,25 @@ def sinkhorn_load_balancing(self, logits: torch.Tensor): Returns: torch.Tensor: The logits tensor after applying sinkhorn routing. """ + + def _sinkhorn_activation(logits): + if self.topk == 1: + logits = torch.sigmoid(logits) + else: # k > 1 + logits = torch.softmax(logits, dim=-1, dtype=torch.float32).type_as(logits) + return logits + assert self.config.moe_aux_loss_coeff == 0, "Sinkhorn routing does not support aux loss." - router_activation = torch.sigmoid if self.training: with torch.no_grad(): norm_logits = sinkhorn( logits.to(dtype=torch.float32) ) # explicit fp32 conversion for stability _, indices = torch.topk(norm_logits, k=self.topk, dim=1) - logits = router_activation(logits) + logits = _sinkhorn_activation(logits) scores = torch.gather(logits, 1, indices) else: - logits = router_activation(logits) + logits = _sinkhorn_activation(logits) scores, indices = torch.topk(logits, k=self.topk, dim=1) return scores, indices diff --git a/megatron/core/transformer/moe/token_dispatcher.py b/megatron/core/transformer/moe/token_dispatcher.py index c802adaeb9..15ef70fb03 100644 --- a/megatron/core/transformer/moe/token_dispatcher.py +++ b/megatron/core/transformer/moe/token_dispatcher.py @@ -99,6 +99,8 @@ def token_permutation( Args: hidden_states: input tokens of shape [SeqLen/TP, MBS, HiddenSize] + max_prob: probs of token assignment to local experts. + max_ind: token assignment to local experts. Returns: permuted_local_hidden_states: Permutation of tokens to local experts group. @@ -189,11 +191,13 @@ def token_unpermutation( Args: hidden_states: 2D tensor of shape [sum_tokens_of_all_local_experts, HiddenSize], ouput of local experts. + scores: 2D tensor of the probs of token assignment to local experts. indices: 2D tensor of the indices of `local_indices` (which holds the un-sorted expert indices of tokens that local expert can process) that give its sorted order along dim 0. global_local_map (optional): 2D tensor, a mask of mapping between global and local tokens where each element is True if it's between the local_expert_indices. Only useful - when cross device token permutation is enabled and **AllGahter** is performed. + when cross device token permutation is enabled and **AllGather** is performed. + bias (optional): The bias tensor. Returns: output_total: un-permuted updated hidden states output from all local experts From 9e773fafda2a33a7feb1257335132f72ab30b248 Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Wed, 24 Jan 2024 10:30:33 +0800 Subject: [PATCH 198/296] Change default value of --one-logger-run-name to None --- megatron/arguments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index cfda8c1786..2608fc5f53 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -748,7 +748,7 @@ def _add_logging_args(parser): group.add_argument('--one-logger-entity', type=str, default='hwinf_dcm', help='The one-logger username or team name. Will ignore if ' '--enable-one-logger is not set') - group.add_argument('--one-logger-run-name', type=str, default='', + group.add_argument('--one-logger-run-name', type=str, default=None, help='The one-logger run name displayed. Will ignore if ' '--enable-one-logger is not set') return parser From 95b214687b7b4e072a363d6c8524e193bc14fc30 Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Tue, 23 Jan 2024 19:59:22 -0800 Subject: [PATCH 199/296] Packed Sequence --- .../common/embeddings/rotary_pos_embedding.py | 66 ++++++++++- megatron/core/models/gpt/gpt_model.py | 3 + megatron/core/packed_seq_params.py | 13 +++ megatron/core/transformer/attention.py | 91 ++++++++++----- .../custom_layers/transformer_engine.py | 24 +++- .../core/transformer/dot_product_attention.py | 6 + .../core/transformer/transformer_block.py | 23 +++- .../core/transformer/transformer_layer.py | 2 + .../transformer/test_attention_packed_seq.py | 106 ++++++++++++++++++ 9 files changed, 295 insertions(+), 39 deletions(-) create mode 100644 megatron/core/packed_seq_params.py create mode 100644 tests/unit_tests/transformer/test_attention_packed_seq.py diff --git a/megatron/core/models/common/embeddings/rotary_pos_embedding.py b/megatron/core/models/common/embeddings/rotary_pos_embedding.py index ee2260e3ae..35063738b4 100644 --- a/megatron/core/models/common/embeddings/rotary_pos_embedding.py +++ b/megatron/core/models/common/embeddings/rotary_pos_embedding.py @@ -2,17 +2,32 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.transformer.transformer_block import TransformerBlock +import logging + import torch from torch import Tensor, nn from megatron.core import parallel_state +logger = logging.getLogger(__name__) + +try: + from apex.transformer.functional import ( + fused_apply_rotary_pos_emb, + fused_apply_rotary_pos_emb_thd, + ) + + HAVE_APPLY_ROPE_FUSION = True +except: + HAVE_APPLY_ROPE_FUSION = False + + __all__ = ['RotaryEmbedding', 'apply_rotary_pos_emb'] @@ -141,7 +156,7 @@ def _rotate_half(x: Tensor) -> Tensor: return torch.cat((-x2, x1), dim=-1) -def apply_rotary_pos_emb(t: Tensor, freqs: Tensor) -> Tensor: +def apply_rotary_pos_emb_bshd(t: Tensor, freqs: Tensor) -> Tensor: """Apply rotary positional embedding to input tensor T. check https://kexue.fm/archives/8265 for detailed formulas @@ -165,3 +180,50 @@ def apply_rotary_pos_emb(t: Tensor, freqs: Tensor) -> Tensor: t = (t * cos_) + (_rotate_half(t) * sin_) return torch.cat((t, t_pass), dim=-1) + + +def apply_rotary_pos_emb_thd(t: Tensor, cu_seqlens: Tensor, freqs: Tensor) -> Tensor: + """A baseline implementation of applying RoPE for `thd` format. + + Args: + t (Tensor): Input tensor T is of shape [t, h, d] + cu_seqlens(Tensor): Cumulative sum of sequence lengths in a batch for `t`, + with shape [b + 1] and dtype torch.int32. + freqs (Tensor): Rotary Positional embedding tensor freq is of shape [max_s, 1, 1, d] + + Returns: + Tensor: Shape [t, h, d]. The input tensor after applying RoPE. + """ + + seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist() + return torch.cat( + [ + apply_rotary_pos_emb_bshd(x.unsqueeze(1), freqs[: x.size(0)]) + for x in torch.split(t, seqlens) + ] + ).squeeze(1) + + +def apply_rotary_pos_emb( + t: Tensor, freqs: Tensor, fused: bool = False, cu_seqlens: Optional[Tensor] = None +): + """ + Reroute to the appropriate apply_rotary_pos_emb function depending on + fused/unfused kernels, or bshd (conventional) / thd (packed seq) format + """ + if fused and not HAVE_APPLY_ROPE_FUSION: + fused = False + logger.warning( + "set apply_rope_fusion to false because its implementation" + " is not included in Apex. Try upgrading to the latest version" + ) + if fused: + if cu_seqlens is None: + return fused_apply_rotary_pos_emb(t, freqs, transpose_output_memory=True) + else: + return fused_apply_rotary_pos_emb_thd(t, cu_seqlens, freqs) + else: + if cu_seqlens is None: + return apply_rotary_pos_emb_bshd(t, freqs) + else: + return apply_rotary_pos_emb_thd(t, cu_seqlens, freqs) diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index 39ef8c9cea..a6384d70c6 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -11,6 +11,7 @@ from megatron.core.models.common.embeddings.language_model_embedding import LanguageModelEmbedding from megatron.core.models.common.embeddings.rotary_pos_embedding import RotaryEmbedding from megatron.core.models.common.language_module.language_module import LanguageModule +from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.transformer.enums import AttnMaskType, ModelType from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.transformer_block import TransformerBlock @@ -134,6 +135,7 @@ def forward( decoder_input: Tensor = None, labels: Tensor = None, inference_params: InferenceParams = None, + packed_seq_params: PackedSeqParams = None, extra_block_kwargs: dict = None, ) -> Tensor: """Forward function of the GPT Model This function passes the input tensors @@ -169,6 +171,7 @@ def forward( attention_mask=attention_mask, inference_params=inference_params, rotary_pos_emb=rotary_pos_emb, + packed_seq_params=packed_seq_params, **(extra_block_kwargs or {}), ) diff --git a/megatron/core/packed_seq_params.py b/megatron/core/packed_seq_params.py new file mode 100644 index 0000000000..478c17265f --- /dev/null +++ b/megatron/core/packed_seq_params.py @@ -0,0 +1,13 @@ +from dataclasses import dataclass + +from torch import Tensor + + +@dataclass +class PackedSeqParams: + # parameters to TEDotProductAttention and fused rope kernels for the `thd` (packed) sequence format, + qkv_format: str = None + cu_seqlens_q: Tensor = None + cu_seqlens_kv: Tensor = None + max_seqlen_q: Tensor = None + max_seqlen_kv: Tensor = None diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 2d49dc3dad..7a7bb888ca 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -1,24 +1,11 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. - -import logging from abc import ABC, abstractmethod from dataclasses import dataclass from importlib.metadata import version from typing import Union -from pkg_resources import packaging - -logger = logging.getLogger(__name__) - import torch - -try: - from apex.transformer.functional import fused_apply_rotary_pos_emb - - HAVE_APPLY_ROPE_FUSION = True -except: - HAVE_APPLY_ROPE_FUSION = False - +from pkg_resources import packaging from megatron.core import parallel_state, tensor_parallel from megatron.core.models.common.embeddings.rotary_pos_embedding import apply_rotary_pos_emb @@ -84,13 +71,6 @@ def __init__( self.num_attention_heads_per_partition = divide(self.config.num_attention_heads, world_size) self.num_query_groups_per_partition = divide(self.config.num_query_groups, world_size) - if self.config.apply_rope_fusion and not HAVE_APPLY_ROPE_FUSION: - self.config.apply_rope_fusion = False - logger.warning( - "set apply_rope_fusion to false because its implementation" - " is not included in Apex. Try upgrading to the latest version" - ) - self.core_attention = build_module( submodules.core_attention, config=self.config, @@ -116,7 +96,14 @@ def __init__( ) def _checkpointed_attention_forward( - self, query, key, value, attention_mask, rotary_pos_emb=None, attn_mask_type=None + self, + query, + key, + value, + attention_mask, + rotary_pos_emb=None, + attn_mask_type=None, + packed_seq_params=None, ): """Forward method with selective activation checkpointing.""" @@ -128,7 +115,12 @@ def custom_forward(*inputs): attn_mask_type = inputs[5] attn_mask_type = AttnMaskType(attn_mask_type.item()) output_ = self.core_attention( - query, key, value, attention_mask, attn_mask_type=attn_mask_type + query, + key, + value, + attention_mask, + attn_mask_type=attn_mask_type, + packed_seq_params=packed_seq_params, ) return output_ @@ -136,7 +128,14 @@ def custom_forward(*inputs): attn_mask_type = self.attn_mask_type attn_mask_type = torch.tensor([attn_mask_type.value], dtype=torch.int) hidden_states = tensor_parallel.checkpoint( - custom_forward, False, query, key, value, attention_mask, rotary_pos_emb, attn_mask_type + custom_forward, + False, + query, + key, + value, + attention_mask, + rotary_pos_emb, + attn_mask_type, ) return hidden_states @@ -239,6 +238,7 @@ def forward( key_value_states=None, inference_params=None, rotary_pos_emb=None, + packed_seq_params=None, ): # hidden_states: [sq, b, h] @@ -259,17 +259,29 @@ def forward( key, value, rotary_pos_emb, attn_mask_type = self._adjust_key_value_for_inference( inference_params, key, value, rotary_pos_emb ) + + if packed_seq_params is not None: + query = query.squeeze(1) + key = key.squeeze(1) + value = value.squeeze(1) + # ================================================ # relative positional embedding (rotary embedding) # ================================================ if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - if self.config.apply_rope_fusion: - query = fused_apply_rotary_pos_emb(query, q_pos_emb, transpose_output_memory=True) - key = fused_apply_rotary_pos_emb(key, k_pos_emb, transpose_output_memory=True) + + if packed_seq_params is not None: + cu_seqlens_q = packed_seq_params.cu_seqlens_q + cu_seqlens_kv = packed_seq_params.cu_seqlens_kv else: - query = apply_rotary_pos_emb(query, q_pos_emb) - key = apply_rotary_pos_emb(key, k_pos_emb) + cu_seqlens_q = cu_seqlens_kv = None + query = apply_rotary_pos_emb( + query, q_pos_emb, fused=self.config.apply_rope_fusion, cu_seqlens=cu_seqlens_q + ) + key = apply_rotary_pos_emb( + key, k_pos_emb, fused=self.config.apply_rope_fusion, cu_seqlens=cu_seqlens_kv + ) # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect @@ -281,13 +293,30 @@ def forward( if self.checkpoint_core_attention: core_attn_out = self._checkpointed_attention_forward( - query, key, value, attention_mask, attn_mask_type=attn_mask_type + query, + key, + value, + attention_mask, + attn_mask_type=attn_mask_type, + packed_seq_params=packed_seq_params, ) else: core_attn_out = self.core_attention( - query, key, value, attention_mask, attn_mask_type=attn_mask_type + query, + key, + value, + attention_mask, + attn_mask_type=attn_mask_type, + packed_seq_params=packed_seq_params, ) + if packed_seq_params is not None: + # reshape to same output shape as unpacked case + # (t, np, hn) -> (t, b=1, h=np*hn) + # t is the pack size = sum (sq_i) + # note that batch is a dummy dimension in the packed case + core_attn_out = core_attn_out.reshape(core_attn_out.size(0), 1, -1) + # ================= # Output. [sq, b, h] # ================= diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index e52a9789f6..df886872f9 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -1,3 +1,4 @@ +import dataclasses import os from importlib.metadata import version from typing import Callable @@ -8,6 +9,7 @@ from torch import Tensor from megatron.core import ModelParallelConfig +from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.parallel_state import ( get_context_parallel_global_ranks, get_context_parallel_group, @@ -361,7 +363,7 @@ def __init__( ): self.config = config self.te_forward_mask_type = False - self.qkv_format = 'sbhd' + self.qkv_format: str = 'sbhd' if self.config.apply_query_key_layer_scaling != bool( int(os.getenv('NVTE_APPLY_QK_LAYER_SCALING', '0')) @@ -438,16 +440,32 @@ def forward( value: Tensor, attention_mask: Tensor, attn_mask_type: AttnMaskType, + packed_seq_params: PackedSeqParams = None, ): + packed_seq_kwargs = ( + dataclasses.asdict(packed_seq_params) if packed_seq_params is not None else {} + ) + te_version = packaging.version.Version(version("transformer-engine")) + if te_version < packaging.version.Version("1.3.0"): + # TE 1.3.0 introduces precomputing max_seqlen to remove unnecessary kernels and D2H copies (#555) + # These two arguments did not exist prior to 1.3.0 + packed_seq_kwargs.pop("max_seqlen_q", None) + packed_seq_kwargs.pop("max_seqlen_kv", None) + if self.config.apply_rope_fusion and self.qkv_format == 'bshd': query, key, value = [x.transpose(0, 1).contiguous() for x in (query, key, value)] if self.te_forward_mask_type: core_attn_out = super().forward( - query, key, value, attention_mask, attn_mask_type=attn_mask_type.name + query, + key, + value, + attention_mask, + attn_mask_type=attn_mask_type.name, + **packed_seq_kwargs, ) else: - core_attn_out = super().forward(query, key, value, attention_mask) + core_attn_out = super().forward(query, key, value, attention_mask, **packed_seq_kwargs,) if self.config.apply_rope_fusion and self.qkv_format == 'bshd': return core_attn_out.transpose(0, 1) diff --git a/megatron/core/transformer/dot_product_attention.py b/megatron/core/transformer/dot_product_attention.py index 859c734306..967d0ce8d8 100644 --- a/megatron/core/transformer/dot_product_attention.py +++ b/megatron/core/transformer/dot_product_attention.py @@ -8,6 +8,7 @@ from megatron.core import parallel_state, tensor_parallel from megatron.core.fusions.fused_softmax import FusedScaleMaskSoftmax +from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig @@ -93,7 +94,12 @@ def forward( value: Tensor, attention_mask: Tensor, attn_mask_type: AttnMaskType = None, + packed_seq_params: PackedSeqParams = None, ): + assert packed_seq_params is None, ( + "Packed sequence is not supported by DotProductAttention." + "Please use TEDotProductAttention instead." + ) # =================================== # Raw attention scores. [b, n/p, s, s] diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index 7d8c654b77..269dd57dbb 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -12,6 +12,7 @@ from megatron.core.dist_checkpointing.mapping import ShardedStateDict from megatron.core.dist_checkpointing.utils import replace_prefix_for_sharding from megatron.core.fusions.fused_layer_norm import FusedLayerNorm +from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.transformer.custom_layers.transformer_engine import ( TENorm, get_cpu_offload_context, @@ -183,12 +184,18 @@ def _checkpointed_forward( context: Tensor, context_mask: Tensor, rotary_pos_emb: Tensor, + packed_seq_params: PackedSeqParams, ): """Forward method with activation checkpointing.""" def custom(start: int, end: int): def custom_forward( - hidden_states, attention_mask, context, context_mask, rotary_pos_emb, + hidden_states, + attention_mask, + context, + context_mask, + rotary_pos_emb, + packed_seq_params, ): for index in range(start, end): layer = self._get_layer(index) @@ -199,6 +206,7 @@ def custom_forward( context_mask=context_mask, rotary_pos_emb=rotary_pos_emb, inference_params=None, + packed_seq_params=packed_seq_params, ) return hidden_states, context @@ -218,6 +226,7 @@ def custom_forward( context, context_mask, rotary_pos_emb, + packed_seq_params, ) l += self.config.recompute_num_layers @@ -236,10 +245,16 @@ def custom_forward( context, context_mask, rotary_pos_emb, + packed_seq_params, ) else: hidden_states, context = custom(l, l + 1)( - hidden_states, attention_mask, context, context_mask, rotary_pos_emb, + hidden_states, + attention_mask, + context, + context_mask, + rotary_pos_emb, + packed_seq_params, ) else: raise ValueError("Invalid activation recompute method.") @@ -264,6 +279,7 @@ def forward( context_mask: Tensor = None, rotary_pos_emb: Tensor = None, inference_params: InferenceParams = None, + packed_seq_params: PackedSeqParams = None, ): # hidden_states (float): [s, b, h] # attention_mask (bool): [1, 1, s, s] @@ -332,10 +348,10 @@ def forward( context=context, context_mask=context_mask, rotary_pos_emb=rotary_pos_emb, + packed_seq_params=packed_seq_params, ) else: for layer in self.layers: - with self.offload_context: hidden_states, context = layer( hidden_states=hidden_states, @@ -344,6 +360,7 @@ def forward( context_mask=context_mask, rotary_pos_emb=rotary_pos_emb, inference_params=inference_params, + packed_seq_params=packed_seq_params, ) if ( diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index b37a983284..612c333a1c 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -145,6 +145,7 @@ def forward( context_mask=None, rotary_pos_emb=None, inference_params=None, + packed_seq_params=None, ): # hidden_states: [s, b, h] @@ -160,6 +161,7 @@ def forward( attention_mask=attention_mask, inference_params=inference_params, rotary_pos_emb=rotary_pos_emb, + packed_seq_params=packed_seq_params, ) # TODO: could we move `bias_dropout_add_exec_handler` itself diff --git a/tests/unit_tests/transformer/test_attention_packed_seq.py b/tests/unit_tests/transformer/test_attention_packed_seq.py new file mode 100644 index 0000000000..75e77c0de1 --- /dev/null +++ b/tests/unit_tests/transformer/test_attention_packed_seq.py @@ -0,0 +1,106 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import pytest + +import torch + +from megatron.core.packed_seq_params import PackedSeqParams +from megatron.core.transformer.attention import SelfAttention +from megatron.core.transformer.enums import AttnMaskType +from tests.unit_tests.test_utilities import Utils +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec + +# Note: this test requires TE >= 0.13 as well as Flash Attention to run +# FIXME this unit test doesn't work in the current test container. to be fixed soon +""" +def make_test_packed_seq_params(sequence_length): + cu_seqlens = torch.IntTensor([0, 6, 19, 22, sequence_length]).cuda() + seqlens = cu_seqlens[1:] - cu_seqlens[:-1] + max_seqlen, _ = seqlens.max(dim=0, keepdim=True) + packed_seq_params = PackedSeqParams( + cu_seqlens_q=cu_seqlens, + cu_seqlens_kv=cu_seqlens, + max_seqlen_q=max_seqlen, + max_seqlen_kv=max_seqlen, + qkv_format='thd', + ) + return packed_seq_params + + +class TestParallelAttentionWithPackedSequence: + + def setup_method(self, method): + Utils.initialize_model_parallel(1,1) + model_parallel_cuda_manual_seed(123) + # use BF16 and a large enough hidden size to enable FlashAttention for thd format. + self.transformer_config = TransformerConfig(num_layers=2, hidden_size=64, num_attention_heads=4, use_cpu_initialization=True, + bf16=True, params_dtype=torch.bfloat16, + pipeline_dtype=torch.bfloat16, autocast_dtype=torch.bfloat16) + self.parallel_attention = SelfAttention(self.transformer_config, + get_gpt_layer_with_transformer_engine_spec().submodules.self_attention.submodules, + layer_number=1, + attn_mask_type=AttnMaskType.causal) + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + def test_cpu_forward(self): + # we can't currently do this because the global memory buffer is on GPU + pass + + def test_gpu_forward(self): + + config = self.parallel_attention.config + sequence_length = 32 + micro_batch_size = 1 + + self.parallel_attention.cuda() + + # [sequence length, batch size, hidden size] + hidden_states = torch.ones((sequence_length, micro_batch_size, self.parallel_attention.config.hidden_size)) + hidden_states = hidden_states.cuda().to(torch.bfloat16) + + attention_mask = None + + packed_seq_params = make_test_packed_seq_params(sequence_length) + output, bias = self.parallel_attention(hidden_states, attention_mask, packed_seq_params=packed_seq_params) + + assert config.recompute_granularity is None + assert output.shape[0] == sequence_length + assert output.shape[1] == micro_batch_size + assert output.shape[2] == config.hidden_size + assert bias.shape[0] == config.hidden_size + + def test_checkpointed_gpu_forward(self): + transformer_config = self.transformer_config + transformer_config.recompute_granularity='selective' + checkpointed_parallel_attention = SelfAttention(transformer_config, + get_gpt_layer_with_transformer_engine_spec().submodules.self_attention.submodules, + layer_number=1, + attn_mask_type=AttnMaskType.causal) + config = checkpointed_parallel_attention.config + + sequence_length = 32 + micro_batch_size = 1 + + checkpointed_parallel_attention.cuda() + + # [sequence length, batch size, hidden size] + hidden_states = torch.ones( + (sequence_length, micro_batch_size, checkpointed_parallel_attention.config.hidden_size) + ) + hidden_states = hidden_states.cuda().to(torch.bfloat16) + + attention_mask = None + + packed_seq_params = make_test_packed_seq_params(sequence_length) + output, bias = checkpointed_parallel_attention(hidden_states, attention_mask, packed_seq_params=packed_seq_params) + + assert config.recompute_granularity == 'selective' + assert output.shape[0] == sequence_length + assert output.shape[1] == micro_batch_size + assert output.shape[2] == config.hidden_size + assert bias.shape[0] == config.hidden_size +""" \ No newline at end of file From 83c0423549c780c8854cee841107ea3e1d4c9ad0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 5 Jan 2024 10:08:19 +0100 Subject: [PATCH 200/296] Add replica_id field to factories --- megatron/core/dist_checkpointing/mapping.py | 3 ++- megatron/core/transformer/mlp.py | 15 +++++++++++---- .../unit_tests/dist_checkpointing/test_mapping.py | 6 +++--- .../dist_checkpointing/test_serialization.py | 10 +++++----- 4 files changed, 21 insertions(+), 13 deletions(-) diff --git a/megatron/core/dist_checkpointing/mapping.py b/megatron/core/dist_checkpointing/mapping.py index a8307b7c24..ad1b59dac6 100644 --- a/megatron/core/dist_checkpointing/mapping.py +++ b/megatron/core/dist_checkpointing/mapping.py @@ -245,9 +245,10 @@ class ShardedTensorFactory: data: torch.Tensor build_fn: Callable[[str, torch.Tensor], ShardedStateDict] merge_fn: Callable[[StateDict], torch.Tensor] + replica_id: ReplicaId = 0 def build(self): - return self.build_fn(self.key, self.data) + return self.build_fn(self.key, self.data, self.replica_id) def apply_factories(sharded_state_dict: ShardedStateDict): diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index 5e32743268..de593ce03d 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -8,7 +8,11 @@ from megatron.core import parallel_state from megatron.core.dist_checkpointing import ShardedTensor -from megatron.core.dist_checkpointing.mapping import ShardedStateDict, ShardedTensorFactory +from megatron.core.dist_checkpointing.mapping import ( + ReplicaId, + ShardedStateDict, + ShardedTensorFactory, +) from megatron.core.fusions.fused_bias_gelu import bias_gelu_impl from megatron.core.fusions.fused_bias_swiglu import bias_swiglu_impl from megatron.core.transformer.module import MegatronModule @@ -144,10 +148,9 @@ def _sharded_state_dict_for_glu( tp_size = parallel_state.get_tensor_model_parallel_world_size() tp_shard_axis = 0 - replica_id = prev_sh_ten.replica_id prepend_axis_num = len(sharded_offsets) - def sh_ten_build_fn(key: str, t: torch.Tensor): + def sh_ten_build_fn(key: str, t: torch.Tensor, replica_id: ReplicaId): offset_w = (tp_shard_axis + prepend_axis_num, tp_rank, tp_size * 2) offset_v = (tp_shard_axis + prepend_axis_num, tp_size + tp_rank, tp_size * 2) with torch.no_grad(): @@ -176,6 +179,10 @@ def sh_ten_merge_fn(sub_state_dict): return torch.cat(sub_state_dict) sharded_state_dict[weight_key] = ShardedTensorFactory( - prev_sh_ten.key, prev_sh_ten.data, sh_ten_build_fn, sh_ten_merge_fn + prev_sh_ten.key, + prev_sh_ten.data, + sh_ten_build_fn, + sh_ten_merge_fn, + prev_sh_ten.replica_id, ) return sharded_state_dict diff --git a/tests/unit_tests/dist_checkpointing/test_mapping.py b/tests/unit_tests/dist_checkpointing/test_mapping.py index 5e55669828..fcd742ee65 100644 --- a/tests/unit_tests/dist_checkpointing/test_mapping.py +++ b/tests/unit_tests/dist_checkpointing/test_mapping.py @@ -38,10 +38,10 @@ def test_from_rank_offsets_constructor(self, dtype=torch.float, device='cuda'): class TestShardedTensorFactory: def test_build_and_merge(self): - def build_fn(key, tensor): + def build_fn(key, tensor, replica_id): return { - 'level2_a': ShardedTensor.from_rank_offsets(key + 'part1', tensor + 1), - 'level2_b': ShardedTensor.from_rank_offsets(key + 'part2', tensor + 2) + 'level2_a': ShardedTensor.from_rank_offsets(key + 'part1', tensor + 1, replica_id=replica_id), + 'level2_b': ShardedTensor.from_rank_offsets(key + 'part2', tensor + 2, replica_id=replica_id) } # state_dict will be modified in-place diff --git a/tests/unit_tests/dist_checkpointing/test_serialization.py b/tests/unit_tests/dist_checkpointing/test_serialization.py index 25dd9e0a91..233215d56a 100644 --- a/tests/unit_tests/dist_checkpointing/test_serialization.py +++ b/tests/unit_tests/dist_checkpointing/test_serialization.py @@ -192,11 +192,11 @@ def test_load_tensors_metadata(self, tmp_path_dist_ckpt): def test_can_mix_sharded_tensors_and_factories(self, tmp_path_dist_ckpt): Utils.initialize_model_parallel(1, 1) - def _build_fn(key, tensor): + def _build_fn(key, tensor, replica_id): return [ - ShardedTensor.from_rank_offsets(key + 'part1', tensor, replica_id=Utils.rank), - ShardedTensor.from_rank_offsets(key + 'part2', tensor, replica_id=Utils.rank), - ShardedTensor.from_rank_offsets(key + 'part3', tensor, replica_id=Utils.rank), + ShardedTensor.from_rank_offsets(key + 'part1', tensor, replica_id=replica_id), + ShardedTensor.from_rank_offsets(key + 'part2', tensor, replica_id=replica_id), + ShardedTensor.from_rank_offsets(key + 'part3', tensor, replica_id=replica_id), ] # state dict can be modified by dist_checkpointing.save, so two copies @@ -205,7 +205,7 @@ def get_sharded_state_dict(base=0): ShardedTensor.from_rank_offsets('A', torch.arange(2) + base, replica_id=Utils.rank), ShardedTensor.from_rank_offsets('B', torch.arange(3) + base, replica_id=Utils.rank), ShardedTensor.from_rank_offsets('C', torch.arange(4) + base, replica_id=Utils.rank), - ShardedTensorFactory('D', torch.arange(5) + base, _build_fn, sum), + ShardedTensorFactory('D', torch.arange(5) + base, _build_fn, sum, replica_id=Utils.rank), ]} with TempNamedDir(tmp_path_dist_ckpt / 'test_can_mix_sharded_tensors_and_factories') as ckpt_dir: From 00358e5edb38dd75ef8d64baac9032bb569f7c78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Thu, 4 Jan 2024 19:25:29 +0100 Subject: [PATCH 201/296] Implement sharded_state_dict for SwitchMLP --- megatron/core/transformer/moe/experts.py | 41 ++++++++++ megatron/core/transformer/moe/switch_mlp.py | 0 .../models/test_switch_mlp.py | 79 +++++++++++++++++++ tests/unit_tests/test_utilities.py | 4 +- 4 files changed, 122 insertions(+), 2 deletions(-) create mode 100644 megatron/core/transformer/moe/switch_mlp.py create mode 100644 tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py diff --git a/megatron/core/transformer/moe/experts.py b/megatron/core/transformer/moe/experts.py index cc8afcd322..6a6f03491b 100644 --- a/megatron/core/transformer/moe/experts.py +++ b/megatron/core/transformer/moe/experts.py @@ -5,6 +5,7 @@ from torch.nn.parameter import Parameter from megatron.core import parallel_state +from megatron.core.dist_checkpointing.utils import replace_prefix_for_sharding from megatron.core.tensor_parallel.layers import ( _initialize_affine_weight_cpu, _initialize_affine_weight_gpu, @@ -178,3 +179,43 @@ def forward(self, permuted_local_hidden_states, tokens_per_expert): output_bias_local[start:end, :] = output_bias return output_local, output_bias_local + + def sharded_state_dict(self, prefix='', sharded_offsets=()): + """ Maps local expert to global experts. """ + sharded_state_dict = {} + num_global_experts = ( + parallel_state.get_expert_model_parallel_world_size() * self.num_local_experts + ) + local_expert_indices_offset = ( + parallel_state.get_expert_model_parallel_rank() * self.num_local_experts + ) + + expert_sharded_prefix = f'{prefix}experts.' + for expert_local_idx, expert in enumerate(self.local_experts): + expert_global_idx = local_expert_indices_offset + expert_local_idx + expert_state_dict_prefix = f'{prefix}local_experts.{expert_local_idx}.' + expert_sharded_offsets = ( + *sharded_offsets, + (len(sharded_offsets), expert_global_idx, num_global_experts), + ) + + expert_state_dict = expert.sharded_state_dict( + expert_state_dict_prefix, expert_sharded_offsets + ) + # Remove expert layers indexing from sharded keys + replace_prefix_for_sharding( + expert_state_dict, expert_state_dict_prefix, expert_sharded_prefix + ) + # Adjust replica ids - replication along DP modulo EP + for k, sh_ten in expert_state_dict.items(): + replica_id = sh_ten.replica_id + assert ( + len(replica_id) == 3 + ), f'Expected replica_id for {k} to be in (PP, TP, DP) format, got: {replica_id}' + sh_ten.replica_id = ( + *replica_id[:2], + parallel_state.get_data_modulo_expert_parallel_rank(), + ) + + sharded_state_dict.update(expert_state_dict) + return sharded_state_dict diff --git a/megatron/core/transformer/moe/switch_mlp.py b/megatron/core/transformer/moe/switch_mlp.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py b/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py new file mode 100644 index 0000000000..f7a6fd8e72 --- /dev/null +++ b/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py @@ -0,0 +1,79 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import pytest +import torch + +from megatron.core import parallel_state +from megatron.core.dist_checkpointing import save, load, load_plain_tensors +from megatron.core.dist_checkpointing.dict_utils import diff +from megatron.core.models.gpt.gpt_layer_specs import \ + get_gpt_layer_with_transformer_engine_spec +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.moe.experts import SequentialMLP +from megatron.core.transformer.transformer_config import TransformerConfig +from tests.unit_tests.dist_checkpointing import TempNamedDir +from tests.unit_tests.test_utilities import Utils + + +def initialize_switch_mlp(seed, **config_kwargs): + torch.manual_seed(seed) + model_parallel_cuda_manual_seed(seed) + + pp_size = parallel_state.get_pipeline_model_parallel_world_size() + num_moe_experts = 8 + num_local_experts = num_moe_experts // parallel_state.get_expert_model_parallel_world_size() + default_config_kwargs = dict(num_layers=pp_size, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True) + default_config_kwargs.update(**config_kwargs) + transformer_config = TransformerConfig(**default_config_kwargs) + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec(num_experts=num_moe_experts, moe_grouped_gemm=False) + model = SequentialMLP(num_local_experts, + transformer_config, + transformer_layer_spec.submodules.mlp.submodules) + return model + + +def get_pp_offsets(): + pp_rank = parallel_state.get_pipeline_model_parallel_rank() + pp_size = parallel_state.get_pipeline_model_parallel_world_size() + return ((0, pp_rank, pp_size),) + + +class TestSwitchMLPReconfiguration: + @pytest.mark.parametrize("src_tp_pp_exp,dest_tp_pp_exp,", [ + # changing PP is impossible because the number of layers must be the same + ((2, 4, 1), (2, 4, 1)), + ((1, 1, 1), (1, 1, 1)), + ((1, 1, 1), (1, 1, 4)), + ((1, 1, 8), (1, 1, 2)), + ((2, 2, 2), (4, 2, 1)), + ((1, 1, 4), (8, 1, 1)), + ((1, 8, 1), (1, 8, 1)), + ((1, 1, 4), (2, 1, 1)), + ]) + def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp_exp, dest_tp_pp_exp): + """ Test model saving and loading with different TP/PP/expert parallelism """ + src_tp, src_pp, src_exp = src_tp_pp_exp + dest_tp, dest_pp, dest_exp = dest_tp_pp_exp + with TempNamedDir(tmp_path_dist_ckpt / 'test_switch_mlp_reconfiguration_model_A') as ckpt_dir_A, \ + TempNamedDir(tmp_path_dist_ckpt / 'test_switch_mlp_reconfiguration_model_B') as ckpt_dir_B: + # Save checkpoint A + Utils.initialize_model_parallel(src_tp, src_pp, expert_model_parallel_size=src_exp) + model_A = initialize_switch_mlp(1) + sharded_state_dict = model_A.sharded_state_dict(sharded_offsets=get_pp_offsets()) + save(sharded_state_dict, ckpt_dir_A) + Utils.destroy_model_parallel() + + # Load checkpoint A with different TP/PP/expert and save as checkpoint B + Utils.initialize_model_parallel(dest_tp, dest_pp, expert_model_parallel_size=dest_exp) + model_B = initialize_switch_mlp(2) + state_dict = load(model_B.sharded_state_dict(sharded_offsets=get_pp_offsets()), ckpt_dir_A) + model_B.load_state_dict(state_dict) + save(model_B.sharded_state_dict(sharded_offsets=get_pp_offsets()), ckpt_dir_B) + Utils.destroy_model_parallel() + + # Test both checkpoints are equal + Utils.initialize_model_parallel(1, 1) + state_dict_A = load_plain_tensors(ckpt_dir_A) + state_dict_B = load_plain_tensors(ckpt_dir_B) + diffs = diff(state_dict_A, state_dict_B) + assert not any(map(bool, diffs)), diffs \ No newline at end of file diff --git a/tests/unit_tests/test_utilities.py b/tests/unit_tests/test_utilities.py index b35c77b58d..f5abd3987f 100644 --- a/tests/unit_tests/test_utilities.py +++ b/tests/unit_tests/test_utilities.py @@ -23,8 +23,8 @@ def destroy_model_parallel(): torch.distributed.barrier() @staticmethod - def initialize_model_parallel(tensor_model_parallel_size = 1, pipeline_model_parallel_size = 1, virtual_pipeline_model_parallel_size = None, pipeline_model_parallel_split_rank = None): + def initialize_model_parallel(tensor_model_parallel_size = 1, pipeline_model_parallel_size = 1, virtual_pipeline_model_parallel_size = None, pipeline_model_parallel_split_rank = None, **kwargs): ps.destroy_model_parallel() if not torch.distributed.is_initialized(): Utils.initialize_distributed() - ps.initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank) \ No newline at end of file + ps.initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank, **kwargs) \ No newline at end of file From 431ce99320ea7efa457813092040f85aaf260bbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Fri, 5 Jan 2024 10:21:21 +0100 Subject: [PATCH 202/296] Handle MoE with GeLU --- megatron/core/transformer/mlp.py | 4 +-- .../models/test_switch_mlp.py | 33 +++++++++++-------- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/megatron/core/transformer/mlp.py b/megatron/core/transformer/mlp.py index de593ce03d..a7df9caa45 100644 --- a/megatron/core/transformer/mlp.py +++ b/megatron/core/transformer/mlp.py @@ -162,7 +162,7 @@ def sh_ten_build_fn(key: str, t: torch.Tensor, replica_id: ReplicaId): *sharded_offsets, offset_w, replica_id=replica_id, - prepend_axis_num=1, + prepend_axis_num=prepend_axis_num, ), ShardedTensor.from_rank_offsets( key, @@ -170,7 +170,7 @@ def sh_ten_build_fn(key: str, t: torch.Tensor, replica_id: ReplicaId): *sharded_offsets, offset_v, replica_id=replica_id, - prepend_axis_num=1, + prepend_axis_num=prepend_axis_num, ), ] diff --git a/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py b/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py index f7a6fd8e72..bf13162066 100644 --- a/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py +++ b/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py @@ -15,14 +15,15 @@ from tests.unit_tests.test_utilities import Utils -def initialize_switch_mlp(seed, **config_kwargs): +def initialize_switch_mlp(seed, glu=True, **config_kwargs): torch.manual_seed(seed) model_parallel_cuda_manual_seed(seed) pp_size = parallel_state.get_pipeline_model_parallel_world_size() num_moe_experts = 8 num_local_experts = num_moe_experts // parallel_state.get_expert_model_parallel_world_size() - default_config_kwargs = dict(num_layers=pp_size, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True) + default_config_kwargs = dict(num_layers=pp_size, hidden_size=12, num_attention_heads=4, num_moe_experts=num_moe_experts, use_cpu_initialization=True, + gated_linear_unit=glu) default_config_kwargs.update(**config_kwargs) transformer_config = TransformerConfig(**default_config_kwargs) transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec(num_experts=num_moe_experts, moe_grouped_gemm=False) @@ -39,18 +40,22 @@ def get_pp_offsets(): class TestSwitchMLPReconfiguration: - @pytest.mark.parametrize("src_tp_pp_exp,dest_tp_pp_exp,", [ + @pytest.mark.parametrize("src_tp_pp_exp,dest_tp_pp_exp,use_glu", [ # changing PP is impossible because the number of layers must be the same - ((2, 4, 1), (2, 4, 1)), - ((1, 1, 1), (1, 1, 1)), - ((1, 1, 1), (1, 1, 4)), - ((1, 1, 8), (1, 1, 2)), - ((2, 2, 2), (4, 2, 1)), - ((1, 1, 4), (8, 1, 1)), - ((1, 8, 1), (1, 8, 1)), - ((1, 1, 4), (2, 1, 1)), + ((2, 4, 1), (2, 4, 1), False), + ((1, 1, 1), (1, 1, 1), False), + ((1, 1, 1), (1, 1, 4), False), + ((1, 1, 8), (1, 1, 2), False), + ((2, 2, 2), (4, 2, 1), False), + ((1, 1, 4), (8, 1, 1), False), + ((1, 8, 1), (1, 8, 1), False), + ((1, 1, 4), (2, 1, 1), False), + ((1, 1, 1), (1, 1, 1), True), + ((1, 1, 1), (1, 1, 4), True), + ((1, 1, 1), (2, 1, 1), True), + ((1, 1, 4), (8, 1, 1), True), ]) - def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp_exp, dest_tp_pp_exp): + def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp_exp, dest_tp_pp_exp, use_glu): """ Test model saving and loading with different TP/PP/expert parallelism """ src_tp, src_pp, src_exp = src_tp_pp_exp dest_tp, dest_pp, dest_exp = dest_tp_pp_exp @@ -58,14 +63,14 @@ def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp_exp, d TempNamedDir(tmp_path_dist_ckpt / 'test_switch_mlp_reconfiguration_model_B') as ckpt_dir_B: # Save checkpoint A Utils.initialize_model_parallel(src_tp, src_pp, expert_model_parallel_size=src_exp) - model_A = initialize_switch_mlp(1) + model_A = initialize_switch_mlp(1, use_glu) sharded_state_dict = model_A.sharded_state_dict(sharded_offsets=get_pp_offsets()) save(sharded_state_dict, ckpt_dir_A) Utils.destroy_model_parallel() # Load checkpoint A with different TP/PP/expert and save as checkpoint B Utils.initialize_model_parallel(dest_tp, dest_pp, expert_model_parallel_size=dest_exp) - model_B = initialize_switch_mlp(2) + model_B = initialize_switch_mlp(2, use_glu) state_dict = load(model_B.sharded_state_dict(sharded_offsets=get_pp_offsets()), ckpt_dir_A) model_B.load_state_dict(state_dict) save(model_B.sharded_state_dict(sharded_offsets=get_pp_offsets()), ckpt_dir_B) From e2fd6cad32278fb2a16083fb297d4b87fc085543 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Thu, 18 Jan 2024 15:22:23 +0100 Subject: [PATCH 203/296] Add __init__ to resolve test name clash --- tests/unit_tests/dist_checkpointing/models/__init__.py | 0 tests/unit_tests/transformer/moe/__init__.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/unit_tests/dist_checkpointing/models/__init__.py create mode 100644 tests/unit_tests/transformer/moe/__init__.py diff --git a/tests/unit_tests/dist_checkpointing/models/__init__.py b/tests/unit_tests/dist_checkpointing/models/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit_tests/transformer/moe/__init__.py b/tests/unit_tests/transformer/moe/__init__.py new file mode 100644 index 0000000000..e69de29bb2 From 472d54ed23a51f055aa0f99fef8d1783101eb78e Mon Sep 17 00:00:00 2001 From: Jared Casper Date: Wed, 24 Jan 2024 01:11:16 -0800 Subject: [PATCH 204/296] Only print warning about fused rotary position embedding once. --- .../models/common/embeddings/rotary_pos_embedding.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/megatron/core/models/common/embeddings/rotary_pos_embedding.py b/megatron/core/models/common/embeddings/rotary_pos_embedding.py index 35063738b4..5a48ace83e 100644 --- a/megatron/core/models/common/embeddings/rotary_pos_embedding.py +++ b/megatron/core/models/common/embeddings/rotary_pos_embedding.py @@ -213,10 +213,12 @@ def apply_rotary_pos_emb( """ if fused and not HAVE_APPLY_ROPE_FUSION: fused = False - logger.warning( - "set apply_rope_fusion to false because its implementation" - " is not included in Apex. Try upgrading to the latest version" - ) + if not getattr(apply_rotary_pos_emb, "printed_fused_warning", False): + logger.warning( + "Setting apply_rope_fusion to false because its implementation" + " is not included in Apex. Try upgrading to the latest version" + ) + apply_rotary_pos_emb.printed_fused_warning = True if fused: if cu_seqlens is None: return fused_apply_rotary_pos_emb(t, freqs, transpose_output_memory=True) From 98fbb428435bcaa10f73443a8fcfb634e00aec94 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Wed, 24 Jan 2024 16:49:04 -0500 Subject: [PATCH 205/296] Fix --- megatron/core/datasets/gpt_dataset.py | 7 +++---- megatron/data/data_samplers.py | 5 ----- megatron/model/transformer.py | 3 --- megatron/optimizer/optimizer.py | 8 -------- megatron/training.py | 12 +----------- pretrain_gpt.py | 9 +-------- 6 files changed, 5 insertions(+), 39 deletions(-) diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index 5b38eae4eb..b12fdedf8e 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -603,8 +603,6 @@ def _build_shuffle_index( return numpy.concatenate((shuffle_idx_first, shuffle_idx_last)) -<<<<<<< HEAD - # From https://github.com/EleutherAI/gpt-neox/blob/FIM-clean/megatron/data/gpt2_dataset.py#L339 def permute(sample, np_rng, fim_rate, fim_spm_rate, tokenizer, truncate_or_pad=True, suffix_tok_id=None, prefix_tok_id=None, middle_tok_id=None, pad_tok_id=None): @@ -668,7 +666,8 @@ def permute(sample, np_rng, fim_rate, fim_spm_rate, tokenizer, truncate_or_pad=T new_sample = sample return new_sample -======= + + def _get_ltor_masks_and_position_ids( data: torch.Tensor, eod_token: int, @@ -740,4 +739,4 @@ def _get_ltor_masks_and_position_ids( attention_mask = attention_mask < 0.5 return attention_mask, loss_mask, position_ids ->>>>>>> main + diff --git a/megatron/data/data_samplers.py b/megatron/data/data_samplers.py index 4134d65347..e9622fe3d0 100644 --- a/megatron/data/data_samplers.py +++ b/megatron/data/data_samplers.py @@ -43,15 +43,10 @@ def build_pretraining_data_loader(dataset, consumed_samples, num_workers=None): # Torch dataloader. return torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, -<<<<<<< HEAD num_workers=num_workers, - pin_memory=True) -======= - num_workers=args.num_workers, pin_memory=True, persistent_workers=True if args.num_workers > 0 else False, ) ->>>>>>> main class MegatronPretrainingSampler: diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py index 82f9b7b194..2f3fd839d4 100644 --- a/megatron/model/transformer.py +++ b/megatron/model/transformer.py @@ -25,11 +25,8 @@ get_data_parallel_rng_tracker_name ) from megatron.core.parallel_state import get_tensor_model_parallel_group, get_tensor_and_expert_parallel_group -<<<<<<< HEAD from megatron.tensor_logging import log_tensor -======= from megatron.core.jit import jit_fuser ->>>>>>> main try: from einops import rearrange diff --git a/megatron/optimizer/optimizer.py b/megatron/optimizer/optimizer.py index 9941b604bc..9fac1161ea 100644 --- a/megatron/optimizer/optimizer.py +++ b/megatron/optimizer/optimizer.py @@ -67,7 +67,6 @@ def __init__( self.check_for_nan_in_grad = check_for_nan_in_grad self.params_have_main_grad = params_have_main_grad -<<<<<<< HEAD args=get_args() if args.debug_param_init: log_generator("PP init generator after reset") @@ -76,13 +75,6 @@ def __init__( for param in sorted(self.get_parameters(), key=lambda p: p.param_idx): log_tensor(f"Global param: {param.param_name}", param, level=args.debug_param_init) - # 'models' are retained for access to the contiguous grad buffers. - # (see distributed optimizer) - self.models = models - - -======= ->>>>>>> main def get_parameters(self): params = [] for param_group in self.optimizer.param_groups: diff --git a/megatron/training.py b/megatron/training.py index d5fd0ee6dc..4693cd4f56 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -201,15 +201,9 @@ def pretrain(train_valid_test_dataset_provider, time.time() - _TRAIN_START_TIME)) print_datetime('after megatron is initialized') -<<<<<<< HEAD - args = get_args() - timers = get_timers() - if args.structured_logs_dir is not None: reset_tensor_stats_logging() -======= ->>>>>>> main # Model, optimizer, and learning rate. timers('model-and-optimizer-setup', log_level=0).start(barrier=True) model, optimizer, opt_param_scheduler = setup_model_and_optimizer( @@ -906,7 +900,6 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, gc.disable() gc.collect() -<<<<<<< HEAD rank = torch.distributed.get_rank() if args.torch_profile_dir is not None and rank in args.profile_ranks: os.makedirs(args.torch_profile_dir, exist_ok=True) @@ -931,12 +924,9 @@ def trace_fn(p: torch.profiler.profile): else: profiler = None + num_microbatches = get_num_microbatches() with contextlib.nullcontext() if profiler is None else profiler: while iteration < args.train_iters: -======= - num_microbatches = get_num_microbatches() - while iteration < args.train_iters: ->>>>>>> main if args.profile and \ iteration == args.profile_step_start and \ torch.distributed.get_rank() in args.profile_ranks: diff --git a/pretrain_gpt.py b/pretrain_gpt.py index 91b9c27622..e698e9144b 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -24,17 +24,10 @@ get_batch_on_this_tp_rank, average_losses_across_data_parallel_group ) -from megatron.arguments import core_transformer_config_from_args -<<<<<<< HEAD -from megatron.core.models.gpt.gpt_layer_specs import ( - get_gpt_layer_with_transformer_engine_spec, - gpt_layer_with_transformer_engine_spec_moe -) from megatron.tensor_logging import log_tensor, run_and_log_exception -======= +from megatron.arguments import core_transformer_config_from_args from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec ->>>>>>> main def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megatron.model.GPTModel]: """Builds the model. From c4678ffd88b47cef1ad33fbff240174f91391fa9 Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Thu, 25 Jan 2024 08:40:09 +0800 Subject: [PATCH 206/296] Update s_app_tag with {job_name}_{batch_size}_{gpu_req} --- megatron/__init__.py | 1 + megatron/global_vars.py | 10 ++++++++-- megatron/training.py | 18 +++++++++++++++++- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/megatron/__init__.py b/megatron/__init__.py index e9faa069ed..4b4eb35cbe 100644 --- a/megatron/__init__.py +++ b/megatron/__init__.py @@ -11,6 +11,7 @@ from .global_vars import get_tensorboard_writer from .global_vars import get_wandb_writer from .global_vars import get_one_logger +from .global_vars import get_app_tag from .global_vars import get_adlr_autoresume from .global_vars import get_timers from .initialize import initialize_megatron diff --git a/megatron/global_vars.py b/megatron/global_vars.py index 5709ecf99f..24cfaf1171 100644 --- a/megatron/global_vars.py +++ b/megatron/global_vars.py @@ -18,6 +18,7 @@ _GLOBAL_TENSORBOARD_WRITER = None _GLOBAL_WANDB_WRITER = None _GLOBAL_ONE_LOGGER = None +_GLOBAL_APP_TAG = [] _GLOBAL_ADLR_AUTORESUME = None _GLOBAL_TIMERS = None _GLOBAL_SIGNAL_HANDLER = None @@ -69,6 +70,11 @@ def get_one_logger(): to check if it is initialized.""" return _GLOBAL_ONE_LOGGER +def get_app_tag(): + """Return app tag. It can be None so no need + to check if it is initialized.""" + return _GLOBAL_APP_TAG + def get_adlr_autoresume(): """ADLR autoresume object. It can be None so no need @@ -197,13 +203,13 @@ def _set_one_logger(args): global _GLOBAL_ONE_LOGGER _ensure_var_is_not_initialized(_GLOBAL_ONE_LOGGER, 'one logger') - if args.enable_onelogger and args.rank == (args.world_size - 1): + if args.enable_one_logger and args.rank == (args.world_size - 1): try: from one_logger.core import OneLogger config = { 'project': args.one_logger_project, 'entity': args.one_logger_entity, - 'name': args.one_logger_name + 'name': args.one_logger_run_name } one_logger = OneLogger(config=config) _GLOBAL_ONE_LOGGER = one_logger diff --git a/megatron/training.py b/megatron/training.py index 93fd4cf3f9..247ed3cdda 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -4,9 +4,10 @@ import gc from datetime import datetime +import hashlib import math import logging -import sys +import sys, os from .log_handler import CustomHandler # Make default logging level INFO, but filter out all log messages not from MCore. logging.basicConfig(handlers=[CustomHandler()], level=logging.INFO) @@ -22,6 +23,7 @@ from megatron import get_tensorboard_writer from megatron import get_wandb_writer from megatron import get_one_logger +from megatron import get_app_tag from megatron import get_current_global_batch_size from megatron import get_num_microbatches from megatron import is_last_rank @@ -516,6 +518,8 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, timers = get_timers() writer = get_tensorboard_writer() wandb_writer = get_wandb_writer() + one_logger = get_one_logger() + app_tag = get_app_tag() # Advanced, skipped, and Nan iterations. advanced_iters_key = 'advanced iterations' @@ -577,6 +581,18 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, batch_size = args.micro_batch_size * args.data_parallel_size * \ get_num_microbatches() + # Track app tag & app tag ID + if one_logger: + job_name = os.environ.get('SLURM_JOB_NAME', None) + current_app_tag = f'{job_name}_{batch_size}_{args.world_size}' + if current_app_tag not in app_tag: + app_tag.append(current_app_tag) + + # Get app_tag ID + app_tag_id = [hashlib.md5(i.encode('utf-8')).hexdigest() for i in app_tag] + + one_logger.log_metrics({'app_tag': app_tag, 'app_tag_id': app_tag_id}) + total_iterations = total_loss_dict[advanced_iters_key] + \ total_loss_dict[skipped_iters_key] From de859b385f6a34c310edd68b857f2a0d39273ca8 Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Thu, 25 Jan 2024 11:30:46 +0800 Subject: [PATCH 207/296] Log metrics in consistent order --- megatron/training.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/training.py b/megatron/training.py index 247ed3cdda..fe55f31e72 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -770,8 +770,8 @@ def train(forward_step_func, model, optimizer, opt_param_scheduler, train_samples_start = args.consumed_train_samples train_samples_target = args.train_samples one_logger.log_metrics({ - 'train_iterations_start': iteration, 'train_samples_start': args.consumed_train_samples, + 'train_iterations_start': iteration, 'train_samples_target': train_samples_target, 'train_iterations_target': args.train_iters, }) From 7027a1d725215457f716ad20efe865028e99e69a Mon Sep 17 00:00:00 2001 From: Zhengjiang Date: Thu, 25 Jan 2024 11:52:28 +0800 Subject: [PATCH 208/296] Add app_tag_count tracking --- megatron/training.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/megatron/training.py b/megatron/training.py index fe55f31e72..1229acdd74 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -591,7 +591,11 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, # Get app_tag ID app_tag_id = [hashlib.md5(i.encode('utf-8')).hexdigest() for i in app_tag] - one_logger.log_metrics({'app_tag': app_tag, 'app_tag_id': app_tag_id}) + one_logger.log_metrics({ + 'app_tag': app_tag, + 'app_tag_id': app_tag_id, + 'app_tag_count': len(app_tag) + }) total_iterations = total_loss_dict[advanced_iters_key] + \ total_loss_dict[skipped_iters_key] From 83442032b344c173bc86dda5a802fb3387b38809 Mon Sep 17 00:00:00 2001 From: Zhengjiang Shao Date: Thu, 25 Jan 2024 00:03:35 -0800 Subject: [PATCH 209/296] Resolve merging conflict --- megatron/global_vars.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/megatron/global_vars.py b/megatron/global_vars.py index 6866bb5925..98d45c3915 100644 --- a/megatron/global_vars.py +++ b/megatron/global_vars.py @@ -70,15 +70,11 @@ def get_one_logger(): to check if it is initialized.""" return _GLOBAL_ONE_LOGGER -<<<<<<< HEAD -======= def get_app_tag(): """Return app tag. It can be None so no need to check if it is initialized.""" return _GLOBAL_APP_TAG ->>>>>>> 7027a1d725215457f716ad20efe865028e99e69a - def get_adlr_autoresume(): """ADLR autoresume object. It can be None so no need to check if it is initialized.""" @@ -206,12 +202,6 @@ def _set_one_logger(args): global _GLOBAL_ONE_LOGGER _ensure_var_is_not_initialized(_GLOBAL_ONE_LOGGER, 'one logger') -<<<<<<< HEAD - if args.enable_onelogger and args.rank == (args.world_size - 1): - from one_logger.core import OneLogger - one_logger = OneLogger() - _GLOBAL_ONE_LOGGER = one_logger -======= if args.enable_one_logger and args.rank == (args.world_size - 1): try: from one_logger.core import OneLogger @@ -227,8 +217,6 @@ def _set_one_logger(args): 'tracking. Try pip install ' '--index-url=https://sc-hw-artf.nvidia.com/api/pypi/hwinf-ml-pypi/simple' ' one_logger to install it') ->>>>>>> 7027a1d725215457f716ad20efe865028e99e69a - def _set_adlr_autoresume(args): """Initialize ADLR autoresume.""" From 7af41ab9bfdd4504599abdfb2e58a0ea909e4e37 Mon Sep 17 00:00:00 2001 From: zshao Date: Thu, 25 Jan 2024 17:51:07 +0800 Subject: [PATCH 210/296] Use app tag logging wrapper api --- megatron/training.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/megatron/training.py b/megatron/training.py index 1229acdd74..6a231454f7 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -585,17 +585,7 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, if one_logger: job_name = os.environ.get('SLURM_JOB_NAME', None) current_app_tag = f'{job_name}_{batch_size}_{args.world_size}' - if current_app_tag not in app_tag: - app_tag.append(current_app_tag) - - # Get app_tag ID - app_tag_id = [hashlib.md5(i.encode('utf-8')).hexdigest() for i in app_tag] - - one_logger.log_metrics({ - 'app_tag': app_tag, - 'app_tag_id': app_tag_id, - 'app_tag_count': len(app_tag) - }) + one_logger.log_app_tag(current_app_tag) total_iterations = total_loss_dict[advanced_iters_key] + \ total_loss_dict[skipped_iters_key] From e713cd72e9e901914b3b46fdc37f4424f330a0cd Mon Sep 17 00:00:00 2001 From: zshao Date: Thu, 25 Jan 2024 17:58:02 +0800 Subject: [PATCH 211/296] Remove app_tag global var --- megatron/__init__.py | 1 - megatron/global_vars.py | 6 ------ megatron/training.py | 2 -- 3 files changed, 9 deletions(-) diff --git a/megatron/__init__.py b/megatron/__init__.py index 4b4eb35cbe..e9faa069ed 100644 --- a/megatron/__init__.py +++ b/megatron/__init__.py @@ -11,7 +11,6 @@ from .global_vars import get_tensorboard_writer from .global_vars import get_wandb_writer from .global_vars import get_one_logger -from .global_vars import get_app_tag from .global_vars import get_adlr_autoresume from .global_vars import get_timers from .initialize import initialize_megatron diff --git a/megatron/global_vars.py b/megatron/global_vars.py index 98d45c3915..e1fd67faa6 100644 --- a/megatron/global_vars.py +++ b/megatron/global_vars.py @@ -18,7 +18,6 @@ _GLOBAL_TENSORBOARD_WRITER = None _GLOBAL_WANDB_WRITER = None _GLOBAL_ONE_LOGGER = None -_GLOBAL_APP_TAG = [] _GLOBAL_ADLR_AUTORESUME = None _GLOBAL_TIMERS = None _GLOBAL_SIGNAL_HANDLER = None @@ -70,11 +69,6 @@ def get_one_logger(): to check if it is initialized.""" return _GLOBAL_ONE_LOGGER -def get_app_tag(): - """Return app tag. It can be None so no need - to check if it is initialized.""" - return _GLOBAL_APP_TAG - def get_adlr_autoresume(): """ADLR autoresume object. It can be None so no need to check if it is initialized.""" diff --git a/megatron/training.py b/megatron/training.py index 6a231454f7..d24f2b1042 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -23,7 +23,6 @@ from megatron import get_tensorboard_writer from megatron import get_wandb_writer from megatron import get_one_logger -from megatron import get_app_tag from megatron import get_current_global_batch_size from megatron import get_num_microbatches from megatron import is_last_rank @@ -519,7 +518,6 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, writer = get_tensorboard_writer() wandb_writer = get_wandb_writer() one_logger = get_one_logger() - app_tag = get_app_tag() # Advanced, skipped, and Nan iterations. advanced_iters_key = 'advanced iterations' From fdafcc507d201f140544eb2e6326e1cf72421be2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Thu, 25 Jan 2024 18:26:42 +0100 Subject: [PATCH 212/296] Add doc --- megatron/core/dist_checkpointing/mapping.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/megatron/core/dist_checkpointing/mapping.py b/megatron/core/dist_checkpointing/mapping.py index ad1b59dac6..cb4c4d7a47 100644 --- a/megatron/core/dist_checkpointing/mapping.py +++ b/megatron/core/dist_checkpointing/mapping.py @@ -239,11 +239,18 @@ class ShardedTensorFactory: Builder creates a sub-state-dict out of a tensor before saving, and merger merges the corresponding state dict after loading. + + Args: + key (str): unique identifier of the factory + data (torch.Tensor): original model parameter that will be further transformed by this factory + build_fn (callable): function that transforms the original tensor to a sharded state dict + merge_fn (callable): function that transforms loaded subtree back into a single tensor (inverse of `build_fn`) + replica_id (ReplicaId): indicates factory replication wrt. factories in different processes """ key: str data: torch.Tensor - build_fn: Callable[[str, torch.Tensor], ShardedStateDict] + build_fn: Callable[[str, torch.Tensor, ReplicaId], ShardedStateDict] merge_fn: Callable[[StateDict], torch.Tensor] replica_id: ReplicaId = 0 From c40c047f178745af0a5bbe30bcfa1b74bff8431c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20B=C5=82a=C5=BC?= Date: Thu, 25 Jan 2024 18:47:57 +0100 Subject: [PATCH 213/296] Add no support info --- megatron/core/transformer/moe/experts.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/megatron/core/transformer/moe/experts.py b/megatron/core/transformer/moe/experts.py index 6a6f03491b..06232bc514 100644 --- a/megatron/core/transformer/moe/experts.py +++ b/megatron/core/transformer/moe/experts.py @@ -1,10 +1,12 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +from typing import Tuple import numpy as np import torch from torch.nn.parameter import Parameter from megatron.core import parallel_state +from megatron.core.dist_checkpointing.mapping import ShardedStateDict from megatron.core.dist_checkpointing.utils import replace_prefix_for_sharding from megatron.core.tensor_parallel.layers import ( _initialize_affine_weight_cpu, @@ -141,6 +143,11 @@ def forward(self, permuted_local_hidden_states, tokens_per_expert): return fc2_output, None + def sharded_state_dict(self, prefix='', sharded_offsets=()): + raise NotImplementedError( + 'Currently distributed checkpointing is not supported for GroupedMLP' + ) + class SequentialMLP(MegatronModule): """An implementation of the Experts layer using a sequence of MLP layers. From e25970fe9dce9f740928ba9473600e597109fa5a Mon Sep 17 00:00:00 2001 From: Shanmugam Ramasamy Date: Thu, 25 Jan 2024 13:25:04 -0800 Subject: [PATCH 214/296] Adding bert local spec test --- .gitlab-ci.yml | 14 ++++++++++++++ pretrain_bert.py | 12 ++++++++---- .../bert/pretrain_bert_distributed_test.sh | 1 + 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 05c1de1f61..1cae674c9e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -725,6 +725,20 @@ train.bert_core.345m_tp2_pp2_1node_50steps: TIME_LIMIT: "20:00" TEST_LEVEL: MR_TESTS +train.bert_core.345m_tp2_pp2_1node_50steps_local_spec: + <<: *selene-test-launcher + variables: + <<: [*VARS] + RUN_MODEL: bert + TP_SIZE: 2 + PP_SIZE: 2 + NUM_NODES: 1 + USE_CORE: 1 + MAX_STEPS: 50 + TIME_LIMIT: "20:00" + TEST_LEVEL: MR_TESTS + ADDITIONAL_PARAMS: "--spec local" + train.bert_core.345m_tp1_pp2_1node_50steps: <<: *selene-test-launcher variables: diff --git a/pretrain_bert.py b/pretrain_bert.py index 47db48c2be..28ab44db11 100644 --- a/pretrain_bert.py +++ b/pretrain_bert.py @@ -19,7 +19,7 @@ from megatron.utils import average_losses_across_data_parallel_group from megatron.arguments import core_transformer_config_from_args from megatron.core.transformer.spec_utils import import_module -from megatron.core.models.bert.bert_layer_specs import bert_layer_with_transformer_engine_spec +from megatron.core.models.bert.bert_layer_specs import bert_layer_with_transformer_engine_spec, bert_layer_local_spec def model_provider(pre_process=True, post_process=True): """Build the model.""" @@ -32,10 +32,14 @@ def model_provider(pre_process=True, post_process=True): if args.use_mcore_models: - if args.spec is not None: + + if args.spec is None: + transformer_layer_spec = bert_layer_with_transformer_engine_spec #default spec + elif args.spec == 'local': + transformer_layer_spec = bert_layer_local_spec + else : transformer_layer_spec = import_module(args.spec) - else: - transformer_layer_spec = bert_layer_with_transformer_engine_spec + model = BertModel( config=config, diff --git a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh index 11f427276c..58541ab688 100755 --- a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh +++ b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh @@ -70,6 +70,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --eval-iters 10 \ --tensor-model-parallel-size $TP_SIZE \ --pipeline-model-parallel-size $PP_SIZE \ + ${MODEL_SPEC:+--spec "$MODEL_SPEC"} \ ${VP_SIZE:+--num-layers-per-virtual-pipeline-stage "$VP_SIZE"} \ ${USE_MCORE:+--use-mcore-models} \ ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS} \ From 2b0decc841476237200bf4311013b7bf0de55304 Mon Sep 17 00:00:00 2001 From: Shanmugam Ramasamy Date: Thu, 25 Jan 2024 13:27:23 -0800 Subject: [PATCH 215/296] Adding bert local spec test --- .../test_scripts/bert/pretrain_bert_distributed_test.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh index 58541ab688..11f427276c 100755 --- a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh +++ b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh @@ -70,7 +70,6 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --eval-iters 10 \ --tensor-model-parallel-size $TP_SIZE \ --pipeline-model-parallel-size $PP_SIZE \ - ${MODEL_SPEC:+--spec "$MODEL_SPEC"} \ ${VP_SIZE:+--num-layers-per-virtual-pipeline-stage "$VP_SIZE"} \ ${USE_MCORE:+--use-mcore-models} \ ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS} \ From e6ef9ea57117660387ca83293ce91a2937e008ff Mon Sep 17 00:00:00 2001 From: Shanmugam Ramasamy Date: Thu, 25 Jan 2024 15:41:00 -0800 Subject: [PATCH 216/296] Adding bert local spec test --- megatron/arguments.py | 5 +++-- megatron/core/models/bert/bert_model.py | 12 +++++++++++- pretrain_bert.py | 2 +- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index ee4aa6759e..ecf120c977 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -1421,10 +1421,11 @@ def _add_vision_args(parser): def _add_experimental_args(parser): group = parser.add_argument_group(title='experimental') - group.add_argument('--spec', type=str, default=None, nargs=2, + group.add_argument('--spec', type=str, default=None, nargs='*', help='Specify the pair ' 'that returns a spec to customize a model, transformer ' - 'block, or transformer layer, depending on the use case. ' + 'block, or transformer layer, depending on the use case.' + 'To use local spec specify local as the argument.' 'For more details, see the model class, ' '`transformer_block.py`, or `transformer_layer.py`') diff --git a/megatron/core/models/bert/bert_model.py b/megatron/core/models/bert/bert_model.py index a556ac8ea5..a08d0aca79 100644 --- a/megatron/core/models/bert/bert_model.py +++ b/megatron/core/models/bert/bert_model.py @@ -2,8 +2,10 @@ from typing import Literal, Optional import torch +import os from torch import Tensor +from megatron.core import parallel_state from megatron.core.models.bert.bert_lm_head import BertLMHead from megatron.core.models.bert.pooler import Pooler from megatron.core.models.common.embeddings.language_model_embedding import LanguageModelEmbedding @@ -58,6 +60,9 @@ def __init__( if return_embeddings: assert self.post_process and self.add_binary_head + assert os.getenv('NVTE_FLASH_ATTN') == '0', "Bert currently does not support flash attention. Please set env variable NVTE_FLASH_ATTN=0" + assert os.getenv('NVTE_FUSED_ATTN') == '0', "Bert currently does not support fused attention. Please set env variable NVTE_FUSED_ATTN=0" + self.config: TransformerConfig = config self.transformer_layer_spec: ModuleSpec = transformer_layer_spec self.vocab_size = vocab_size @@ -193,7 +198,12 @@ def forward( """ extended_attention_mask = self.bert_extended_attention_mask(attention_mask) - position_ids = self.bert_position_ids(input_ids) + if parallel_state.is_pipeline_first_stage(): + input_ids = input_ids + position_ids = self.bert_position_ids(input_ids) + else: + position_ids = None + input_ids = None # Encoder embedding. if self.pre_process: diff --git a/pretrain_bert.py b/pretrain_bert.py index 28ab44db11..2defee3fa5 100644 --- a/pretrain_bert.py +++ b/pretrain_bert.py @@ -35,7 +35,7 @@ def model_provider(pre_process=True, post_process=True): if args.spec is None: transformer_layer_spec = bert_layer_with_transformer_engine_spec #default spec - elif args.spec == 'local': + elif args.spec[0] == 'local': transformer_layer_spec = bert_layer_local_spec else : transformer_layer_spec = import_module(args.spec) From c2d44ff58471d2ee35eb9d3bc666fee5850e1cf7 Mon Sep 17 00:00:00 2001 From: Shanmugam Ramasamy Date: Thu, 25 Jan 2024 16:22:35 -0800 Subject: [PATCH 217/296] Adding bert local spec test --- megatron/core/models/bert/bert_model.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/megatron/core/models/bert/bert_model.py b/megatron/core/models/bert/bert_model.py index a08d0aca79..497745b45a 100644 --- a/megatron/core/models/bert/bert_model.py +++ b/megatron/core/models/bert/bert_model.py @@ -60,9 +60,6 @@ def __init__( if return_embeddings: assert self.post_process and self.add_binary_head - assert os.getenv('NVTE_FLASH_ATTN') == '0', "Bert currently does not support flash attention. Please set env variable NVTE_FLASH_ATTN=0" - assert os.getenv('NVTE_FUSED_ATTN') == '0', "Bert currently does not support fused attention. Please set env variable NVTE_FUSED_ATTN=0" - self.config: TransformerConfig = config self.transformer_layer_spec: ModuleSpec = transformer_layer_spec self.vocab_size = vocab_size From fc316fff117127e7b0f87d783c0442161f2d6e72 Mon Sep 17 00:00:00 2001 From: Shanmugam Ramasamy Date: Thu, 25 Jan 2024 16:23:18 -0800 Subject: [PATCH 218/296] Adding bert local spec test --- pretrain_bert.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pretrain_bert.py b/pretrain_bert.py index 2defee3fa5..5c91fefd91 100644 --- a/pretrain_bert.py +++ b/pretrain_bert.py @@ -36,6 +36,7 @@ def model_provider(pre_process=True, post_process=True): if args.spec is None: transformer_layer_spec = bert_layer_with_transformer_engine_spec #default spec elif args.spec[0] == 'local': + print_rank_0('Using Local spec for transformer layers') transformer_layer_spec = bert_layer_local_spec else : transformer_layer_spec = import_module(args.spec) From 85788005740d99ba53b70d1d7382d993ff872b2e Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Thu, 25 Jan 2024 16:30:58 -0800 Subject: [PATCH 219/296] update `apply_rope_fusion` in config after checking availability Signed-off-by: Chen Cui --- .../models/common/embeddings/rotary_pos_embedding.py | 9 +++++---- megatron/core/transformer/attention.py | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/megatron/core/models/common/embeddings/rotary_pos_embedding.py b/megatron/core/models/common/embeddings/rotary_pos_embedding.py index 5a48ace83e..e713e05097 100644 --- a/megatron/core/models/common/embeddings/rotary_pos_embedding.py +++ b/megatron/core/models/common/embeddings/rotary_pos_embedding.py @@ -205,21 +205,22 @@ def apply_rotary_pos_emb_thd(t: Tensor, cu_seqlens: Tensor, freqs: Tensor) -> Te def apply_rotary_pos_emb( - t: Tensor, freqs: Tensor, fused: bool = False, cu_seqlens: Optional[Tensor] = None + t: Tensor, freqs: Tensor, config: TransformerConfig, cu_seqlens: Optional[Tensor] = None ): """ Reroute to the appropriate apply_rotary_pos_emb function depending on fused/unfused kernels, or bshd (conventional) / thd (packed seq) format """ - if fused and not HAVE_APPLY_ROPE_FUSION: - fused = False + if config.apply_rope_fusion and not HAVE_APPLY_ROPE_FUSION: + # setting apply_rope_fusion in config to False so that subsequent queries to this config also return Flase + config.apply_rope_fusion = False if not getattr(apply_rotary_pos_emb, "printed_fused_warning", False): logger.warning( "Setting apply_rope_fusion to false because its implementation" " is not included in Apex. Try upgrading to the latest version" ) apply_rotary_pos_emb.printed_fused_warning = True - if fused: + if config.apply_rope_fusion: if cu_seqlens is None: return fused_apply_rotary_pos_emb(t, freqs, transpose_output_memory=True) else: diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 7a7bb888ca..bd5859baac 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -277,10 +277,10 @@ def forward( else: cu_seqlens_q = cu_seqlens_kv = None query = apply_rotary_pos_emb( - query, q_pos_emb, fused=self.config.apply_rope_fusion, cu_seqlens=cu_seqlens_q + query, q_pos_emb, config=self.config, cu_seqlens=cu_seqlens_q ) key = apply_rotary_pos_emb( - key, k_pos_emb, fused=self.config.apply_rope_fusion, cu_seqlens=cu_seqlens_kv + key, k_pos_emb, config=self.config, cu_seqlens=cu_seqlens_kv ) # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. From 6e599dcea8d0592ae6dfc813e52525d50c6226bb Mon Sep 17 00:00:00 2001 From: Shanmugam Ramasamy Date: Thu, 25 Jan 2024 17:09:12 -0800 Subject: [PATCH 220/296] Adding bert local spec test --- .gitlab-ci.yml | 1 + megatron/core/models/bert/bert_model.py | 2 ++ .../bert_tp2_pp2_1nodes_50steps_core_enabled_local_spec.json | 1 + 3 files changed, 4 insertions(+) create mode 100644 tests/functional_tests/test_results/bert/bert_tp2_pp2_1nodes_50steps_core_enabled_local_spec.json diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1cae674c9e..fb98e17fb1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -737,6 +737,7 @@ train.bert_core.345m_tp2_pp2_1node_50steps_local_spec: MAX_STEPS: 50 TIME_LIMIT: "20:00" TEST_LEVEL: MR_TESTS + METADATA: local_spec ADDITIONAL_PARAMS: "--spec local" train.bert_core.345m_tp1_pp2_1node_50steps: diff --git a/megatron/core/models/bert/bert_model.py b/megatron/core/models/bert/bert_model.py index 497745b45a..8df3e39693 100644 --- a/megatron/core/models/bert/bert_model.py +++ b/megatron/core/models/bert/bert_model.py @@ -60,6 +60,8 @@ def __init__( if return_embeddings: assert self.post_process and self.add_binary_head + assert os.getenv('NVTE_ALLOW_NONDETERMINISTIC_ALGO') == '0' or os.getenv('NVTE_FLASH_ATTN') == '0', "Bert currently does not support flash attention. Please set env variable NVTE_FLASH_ATTN=0 or set NVTE_ALLOW_NONDETERMINISTIC_ALGO=0" + self.config: TransformerConfig = config self.transformer_layer_spec: ModuleSpec = transformer_layer_spec self.vocab_size = vocab_size diff --git a/tests/functional_tests/test_results/bert/bert_tp2_pp2_1nodes_50steps_core_enabled_local_spec.json b/tests/functional_tests/test_results/bert/bert_tp2_pp2_1nodes_50steps_core_enabled_local_spec.json new file mode 100644 index 0000000000..60d32e4938 --- /dev/null +++ b/tests/functional_tests/test_results/bert/bert_tp2_pp2_1nodes_50steps_core_enabled_local_spec.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.49849, 10.48909, 10.48383, 10.45052, 10.4396, 10.34793, 10.13229, 10.03818, 9.86253, 9.67165]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2210.0, 2505.0, 2330.0, 2235.0, 2290.0, 2400.0, 2866.0, 3249.0, 3522.0, 2958.0]}, "iteration_timing_avg": 0.6923926470588235} From 1e95136ded28fdd5df0ceb880486755ca055564c Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Thu, 25 Jan 2024 17:55:39 -0800 Subject: [PATCH 221/296] add unit tests Signed-off-by: Chen Cui --- .../unit_tests/transformer/test_attention.py | 24 ++++++++++++++++++ .../transformer/test_attention_packed_seq.py | 25 +++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/tests/unit_tests/transformer/test_attention.py b/tests/unit_tests/transformer/test_attention.py index 7fac9d3eda..4a5680ea05 100644 --- a/tests/unit_tests/transformer/test_attention.py +++ b/tests/unit_tests/transformer/test_attention.py @@ -57,6 +57,30 @@ def test_gpu_forward(self): assert output.shape[2] == config.hidden_size assert bias.shape[0] == config.hidden_size + def test_fused_rope_gpu_forward(self): + self.parallel_attention.config.apply_rope_fusion = True + config = self.parallel_attention.config + sequence_length = 32 + micro_batch_size = 2 + + self.parallel_attention.cuda() + + # [sequence length, batch size, hidden size] + hidden_states = torch.ones((sequence_length, micro_batch_size, self.parallel_attention.config.hidden_size)) + hidden_states = hidden_states.cuda() + + attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda() + rotary_pos_emb = torch.ones(sequence_length, 1, 1, self.parallel_attention.config.kv_channels).cuda() + output, bias = self.parallel_attention(hidden_states, attention_mask, rotary_pos_emb=rotary_pos_emb) + + assert config.recompute_granularity is None + assert output.shape[0] == sequence_length + assert output.shape[1] == micro_batch_size + assert output.shape[2] == config.hidden_size + assert bias.shape[0] == config.hidden_size + self.parallel_attention.config.apply_rope_fusion = False + + def test_checkpointed_gpu_forward(self): transformer_config = self.transformer_config transformer_config.recompute_granularity='selective' diff --git a/tests/unit_tests/transformer/test_attention_packed_seq.py b/tests/unit_tests/transformer/test_attention_packed_seq.py index 75e77c0de1..c8be7dba3d 100644 --- a/tests/unit_tests/transformer/test_attention_packed_seq.py +++ b/tests/unit_tests/transformer/test_attention_packed_seq.py @@ -73,6 +73,31 @@ def test_gpu_forward(self): assert output.shape[2] == config.hidden_size assert bias.shape[0] == config.hidden_size + def test_fused_rope_gpu_forward(self): + self.parallel_attention.config.apply_rope_fusion = True + config = self.parallel_attention.config + sequence_length = 32 + micro_batch_size = 1 + + self.parallel_attention.cuda() + + # [sequence length, batch size, hidden size] + hidden_states = torch.ones((sequence_length, micro_batch_size, self.parallel_attention.config.hidden_size)) + hidden_states = hidden_states.cuda().to(torch.bfloat16) + + attention_mask = None + rotary_pos_emb = torch.ones(sequence_length, 1, 1, self.parallel_attention.config.kv_channels).cuda() + + packed_seq_params = make_test_packed_seq_params(sequence_length) + output, bias = self.parallel_attention(hidden_states, attention_mask, packed_seq_params=packed_seq_params) + + assert config.recompute_granularity is None + assert output.shape[0] == sequence_length + assert output.shape[1] == micro_batch_size + assert output.shape[2] == config.hidden_size + assert bias.shape[0] == config.hidden_size + self.parallel_attention.config.apply_rope_fusion = False + def test_checkpointed_gpu_forward(self): transformer_config = self.transformer_config transformer_config.recompute_granularity='selective' From 5c10cb417e8e7f4463d01b8f45e1e6038feec8ee Mon Sep 17 00:00:00 2001 From: Jared Casper Date: Wed, 24 Jan 2024 01:10:02 -0800 Subject: [PATCH 222/296] Use new memory_efficient argument to fused layernorm functions when available in apex. See https://github.com/NVIDIA/apex/pull/1715 --- megatron/core/fusions/fused_layer_norm.py | 39 ++++++++++++++----- .../core/transformer/transformer_config.py | 2 + 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/megatron/core/fusions/fused_layer_norm.py b/megatron/core/fusions/fused_layer_norm.py index c12ec173d0..82b4b75b0d 100644 --- a/megatron/core/fusions/fused_layer_norm.py +++ b/megatron/core/fusions/fused_layer_norm.py @@ -1,6 +1,7 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. import importlib +import inspect import numbers import torch @@ -63,10 +64,12 @@ def __init__( ): super().__init__() - self.zero_centered_gamma = config.layernorm_zero_centered_gamma + self.config = config + + self.zero_centered_gamma = self.config.layernorm_zero_centered_gamma assert ( - config.normalization == "LayerNorm" - ), f'({config.normalization}) is not supported in FusedLayerNorm' + self.config.normalization == "LayerNorm" + ), f'({self.config.normalization}) is not supported in FusedLayerNorm' # List of hiddens sizes supported in the persistent layer norm kernel # If the hidden size is not supported, fall back to the non-persistent @@ -97,7 +100,7 @@ def __init__( 49152, 65536, ] - persist_layer_norm = config.persist_layer_norm + persist_layer_norm = self.config.persist_layer_norm if hidden_size not in persist_ln_hidden_sizes or not HAVE_PERSIST_LAYER_NORM: persist_layer_norm = False @@ -113,7 +116,7 @@ def __init__( self.bias = Parameter(torch.Tensor(*hidden_size)) self.reset_parameters() self.persist_layer_norm = persist_layer_norm - self.sequence_parallel = config.sequence_parallel + self.sequence_parallel = self.config.sequence_parallel # set sequence parallelism flag on weight and bias parameters setattr(self.weight, 'sequence_parallel', self.sequence_parallel) @@ -133,7 +136,12 @@ def forward(self, input: Tensor) -> Tensor: weight = self.weight + 1 if self.zero_centered_gamma else self.weight if self.persist_layer_norm: - output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) + if 'memory_efficient' in inspect.getfullargspec(FastLayerNormFN.forward).args: + output = FastLayerNormFN.apply( + input, weight, self.bias, self.eps, self.config.memory_efficient_layer_norm + ) + else: + output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) # Apex's fast layer norm function outputs a 'view' tensor (i.e., has # a populated '_base' field). This will result in schedule.py's @@ -144,8 +152,21 @@ def forward(self, input: Tensor) -> Tensor: ) else: - output = FusedLayerNormAffineFunction.apply( - input, weight, self.bias, self.hidden_size, self.eps - ) + if ( + 'memory_efficient' + in inspect.getfullargspec(FusedLayerNormAffineFunction.forward).args + ): + return FusedLayerNormAffineFunction.apply( + input, + weight, + self.bias, + self.hidden_size, + self.eps, + self.config.memory_efficient_layer_norm, + ) + else: + return FusedLayerNormAffineFunction.apply( + input, weight, self.bias, self.hidden_size, self.eps + ) return output diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 74a472da01..4c4f40cfb9 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -40,6 +40,7 @@ class TransformerConfig(ModelParallelConfig): bias_gelu_fustion (bool): If true, fuses bias and gelu. Defaults to False. masked_softmax_fusion (bool): If true, uses softmax fusion. persist_layer_norm (bool): If true, uses the persistent fused layer norm kernel. This kernel only supports a fixed set of hidden sizes. Defaults to False. + memory_efficient_layer_norm(bool): If True, and using local layers (not from TransformerEngine), tells Apex to use the memory efficient fused LayerNorm kernel. Ignored if not using LayerNorm. Defaults to False. bias_dropout_fusion (bool): If true, uses bias dropout fusion. recompute_granularity (str): megatron-core supports 'selective' activation checkpointing where only the memory intensive part of attention is checkpointed. These memory intensive activations are also less compute intensive which makes activation checkpointing more efficient for LLMs (20B+). See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details. 'full' will checkpoint the entire transformer layer. Must be 'selective' or 'full'. 'selective' always uses all layers. Defaults to None. recompute_method (str): uniform will uniformly divide the total number of transformer layers in a transformer block and recompute the input activation of each divided chunk at the specified granularity. block will recompute the input activations for only a set number of transformer layers per pipeline stage. The rest of the layers in the pipeline stage will not have any activations recomputed. Must be 'uniform' or 'block'. Defaults to None. @@ -98,6 +99,7 @@ class TransformerConfig(ModelParallelConfig): bias_activation_fusion: bool = False masked_softmax_fusion: bool = False persist_layer_norm: bool = False + memory_efficient_layer_norm: bool = False bias_dropout_fusion: bool = False # TODO: this should be bias_dropout_add_fusion? apply_rope_fusion: bool = False From 4a08560669c0fd7d9a0761cc3fb56fb6d46cc9b6 Mon Sep 17 00:00:00 2001 From: Helen Ngo Date: Thu, 25 Jan 2024 22:38:05 -0800 Subject: [PATCH 223/296] Add `num_floating_point_operations_so_far` arg to save_checkpoint call in checkpoint/util.py --- tools/checkpoint/saver_megatron.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/checkpoint/saver_megatron.py b/tools/checkpoint/saver_megatron.py index a1812682bb..b075e648dc 100644 --- a/tools/checkpoint/saver_megatron.py +++ b/tools/checkpoint/saver_megatron.py @@ -402,5 +402,6 @@ def get_models(count, dtype, pre_process, post_process): for tp_rank in range(args.target_tensor_parallel_size): mpu.set_tensor_model_parallel_rank(tp_rank) - save_checkpoint(md.iteration, [models[tp_rank]], None, None) + save_checkpoint(md.iteration, [models[tp_rank]], None, None, + num_floating_point_operations_so_far=0) print("Done!") From 88ddc36ec715ee6820bd29fbae3290845622d3a9 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Fri, 26 Jan 2024 00:03:29 -0800 Subject: [PATCH 224/296] Fixing the nightly ci for #1018. --- .gitlab-ci.yml | 2 +- megatron/core/pipeline_parallel/schedules.py | 8 ++++++-- megatron/core/transformer/moe/moe_utils.py | 6 +++--- megatron/core/transformer/moe/router.py | 1 + .../gpt3/gpt3_tp2_pp2_1nodes_50steps_4experts.json | 2 +- ...3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json | 2 +- ..._1nodes_50steps_core_enabled_te_4experts2parallel.json | 2 +- 7 files changed, 14 insertions(+), 9 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2632caa524..da87a67684 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -656,7 +656,7 @@ train.moe_gpt3.345m_tp2_pp2_4experts_1node_50steps: USE_CORE: 0 TEST_LEVEL: NIGHTLY_TESTS METADATA: "4experts" - ADDITIONAL_PARAMS: "--num-experts 4 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" + ADDITIONAL_PARAMS: "--sequence-parallel --num-experts 4 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" train.bert.345m_tp4_pp1_1node_50steps: <<: *selene-test-launcher diff --git a/megatron/core/pipeline_parallel/schedules.py b/megatron/core/pipeline_parallel/schedules.py index 81126c6a5d..b45aa8c87a 100644 --- a/megatron/core/pipeline_parallel/schedules.py +++ b/megatron/core/pipeline_parallel/schedules.py @@ -211,8 +211,12 @@ def forward_step( # Set the loss scale for the auxiliary loss of the MoE layer. # Since we use a trick to do backward on the auxiliary loss, we need to set the scale explicitly. if config.num_moe_experts is not None: - # Calculate the loss scale based on the grad_scale_func if available, else default to 1.0. - loss_scale = config.grad_scale_func(1.0) if config.grad_scale_func is not None else 1.0 + # Calculate the loss scale based on the grad_scale_func if available, else default to 1. + loss_scale = ( + config.grad_scale_func(torch.tensor(1.0)) + if config.grad_scale_func is not None + else torch.tensor(1.0) + ) # Set the loss scale MoEAuxLossAutoScaler.set_loss_scale(loss_scale / num_microbatches) diff --git a/megatron/core/transformer/moe/moe_utils.py b/megatron/core/transformer/moe/moe_utils.py index 52712d5155..36c3279f52 100644 --- a/megatron/core/transformer/moe/moe_utils.py +++ b/megatron/core/transformer/moe/moe_utils.py @@ -57,7 +57,7 @@ class MoEAuxLossAutoScaler(torch.autograd.Function): """ - main_loss_backward_scale: int = 1 + main_loss_backward_scale: torch.Tensor = torch.tensor(1.0) @staticmethod def forward(ctx, output: torch.Tensor, aux_loss: torch.Tensor): @@ -89,10 +89,10 @@ def backward(ctx, grad_output: torch.Tensor): return grad_output, scaled_aux_loss_grad @staticmethod - def set_loss_scale(scale: int): + def set_loss_scale(scale: torch.Tensor): """set the scale of the aux loss. Args: - scale (int): The scale value to set. Please ensure that the scale passed in matches the scale of the main_loss. + scale (torch.Tensor): The scale value to set. Please ensure that the scale passed in matches the scale of the main_loss. """ MoEAuxLossAutoScaler.main_loss_backward_scale = scale diff --git a/megatron/core/transformer/moe/router.py b/megatron/core/transformer/moe/router.py index 0cf0ae6568..c4470fab6c 100644 --- a/megatron/core/transformer/moe/router.py +++ b/megatron/core/transformer/moe/router.py @@ -108,6 +108,7 @@ def __init__( self.topk = self.config.moe_router_topk self.routing_type = self.config.moe_router_load_balancing_type self.moe_aux_loss_func = switch_load_balancing_loss_func + self.input_jitter = None def sinkhorn_load_balancing(self, logits: torch.Tensor): """Apply sinkhorn routing to the logits tensor. diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_4experts.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_4experts.json index 022dee643b..4bdd9b671d 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_4experts.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_4experts.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79931, 10.855, 10.86219, 10.8371, 10.83378, 10.8008, 10.60169, 10.6114, 10.53828, 10.26949]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [8398.0, 8514.0, 7788.0, 8985.0, 9107.0, 8981.0, 9279.0]}, "iteration_timing_avg": 0.37232617647058813} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80342, 10.85864, 10.86188, 10.83807, 10.83268, 10.80489, 10.60813, 10.61632, 10.53669, 10.27118]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [8302.0, 7865.0, 7784.0, 8919.0, 9202.0, 9007.0, 9274.0]}, "iteration_timing_avg": 0.3891070588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json index 876e61c788..8617eca761 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_2experts.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.7912, 10.83963, 10.81166, 10.76004, 10.65544, 10.56972, 10.08242, 10.21343, 10.10767, 9.8192]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [3019.0, 3460.0, 3563.0, 3285.0, 3236.0, 3287.0, 2839.0, 3374.0, 3794.0, 3731.0]}, "iteration_timing_avg": 0.23343970588235297} +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79674, 10.84347, 10.81547, 10.76604, 10.65416, 10.56322, 10.08548, 10.21617, 10.1139, 9.8322]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2912.0, 3584.0, 3414.0, 3357.0, 3298.0, 3173.0, 2816.0, 3211.0, 3817.0, 3728.0]}, "iteration_timing_avg": 0.2862067647058823} \ No newline at end of file diff --git a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json index 97033d78eb..98fc4c9355 100644 --- a/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json +++ b/tests/functional_tests/test_results/gpt3/gpt3_tp2_pp2_1nodes_50steps_core_enabled_te_4experts2parallel.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79674, 10.84347, 10.81547, 10.76604, 10.65416, 10.56322, 10.08548, 10.21617, 10.1139, 9.8322]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2912.0, 3584.0, 3414.0, 3357.0, 3298.0, 3173.0, 2816.0, 3211.0, 3817.0, 3728.0]}, "iteration_timing_avg": 0.27967117647058826} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82194, 10.86461, 10.85816, 10.80566, 10.71345, 10.63249, 10.15902, 10.27938, 10.18516, 9.88286]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [7126.0, 8754.0, 8834.0, 8614.0, 7854.0, 8202.0, 7007.0, 8641.0, 9234.0, 9655.0]}, "iteration_timing_avg": 0.30157323529411767} \ No newline at end of file From 5cce2b57a67d7c39986e21826ac82cc163a86711 Mon Sep 17 00:00:00 2001 From: zshao Date: Fri, 26 Jan 2024 18:17:02 +0800 Subject: [PATCH 225/296] Move e2e metrics tracking before training_log call --- megatron/training.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/megatron/training.py b/megatron/training.py index 7c91c968fe..27423c139e 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -979,15 +979,16 @@ def track_e2e_metrics(): params_norm = None if args.log_params_norm: params_norm = calc_params_l2_norm(model) + + if iteration % args.log_interval == 0: + track_e2e_metrics() + report_memory_flag = training_log(loss_dict, total_loss_dict, optimizer.param_groups[0]['lr'], iteration, loss_scale, report_memory_flag, skipped_iter, grad_norm, params_norm, num_zeros_in_grad) - if iteration % args.log_interval == 0: - track_e2e_metrics() - # Autoresume if args.adlr_autoresume and \ (iteration % args.adlr_autoresume_interval == 0): From 1fc103f361770d43597640d9f40b722e5f7fa40b Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Fri, 26 Jan 2024 08:47:31 -0800 Subject: [PATCH 226/296] formatting Signed-off-by: Chen Cui --- megatron/core/transformer/attention.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index bd5859baac..d677003c50 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -279,9 +279,7 @@ def forward( query = apply_rotary_pos_emb( query, q_pos_emb, config=self.config, cu_seqlens=cu_seqlens_q ) - key = apply_rotary_pos_emb( - key, k_pos_emb, config=self.config, cu_seqlens=cu_seqlens_kv - ) + key = apply_rotary_pos_emb(key, k_pos_emb, config=self.config, cu_seqlens=cu_seqlens_kv) # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect From 16e6e9b8522722df500dd07328093680e1f69091 Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Fri, 26 Jan 2024 08:49:01 -0800 Subject: [PATCH 227/296] typo Signed-off-by: Chen Cui --- megatron/core/models/common/embeddings/rotary_pos_embedding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/core/models/common/embeddings/rotary_pos_embedding.py b/megatron/core/models/common/embeddings/rotary_pos_embedding.py index e713e05097..2ab5164d57 100644 --- a/megatron/core/models/common/embeddings/rotary_pos_embedding.py +++ b/megatron/core/models/common/embeddings/rotary_pos_embedding.py @@ -212,7 +212,7 @@ def apply_rotary_pos_emb( fused/unfused kernels, or bshd (conventional) / thd (packed seq) format """ if config.apply_rope_fusion and not HAVE_APPLY_ROPE_FUSION: - # setting apply_rope_fusion in config to False so that subsequent queries to this config also return Flase + # setting apply_rope_fusion in config to False so that subsequent queries to this config also return False config.apply_rope_fusion = False if not getattr(apply_rotary_pos_emb, "printed_fused_warning", False): logger.warning( From 3df96f11739e7c7eb886b714313d33cebb3ab6fe Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis Date: Fri, 26 Jan 2024 10:41:55 -0800 Subject: [PATCH 228/296] Add _CPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE flag in parallel-state to allow... --- megatron/core/parallel_state.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/megatron/core/parallel_state.py b/megatron/core/parallel_state.py index c65d8a5f7f..ef62e76969 100644 --- a/megatron/core/parallel_state.py +++ b/megatron/core/parallel_state.py @@ -37,8 +37,10 @@ # These values enable us to change the mpu sizes on the fly. _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None +_MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE = None _MPU_TENSOR_MODEL_PARALLEL_RANK = None _MPU_PIPELINE_MODEL_PARALLEL_RANK = None +_MPU_EXPERT_MODEL_PARALLEL_RANK = None # A list of ranks that have a copy of the embedding. _EMBEDDING_GLOBAL_RANKS = None @@ -622,6 +624,11 @@ def get_data_modulo_expert_parallel_group(): return _DATA_MODULO_EXPERT_PARALLEL_GROUP +def set_expert_model_parallel_world_size(world_size): + global _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE + _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE = world_size + + def set_tensor_model_parallel_world_size(world_size): """Set the tensor model parallel size""" global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE @@ -656,6 +663,12 @@ def get_pipeline_model_parallel_world_size(): return torch.distributed.get_world_size(group=get_pipeline_model_parallel_group()) +def set_expert_model_parallel_rank(rank): + """Set expert model parallel rank.""" + global _MPU_EXPERT_MODEL_PARALLEL_RANK + _MPU_EXPERT_MODEL_PARALLEL_RANK = rank + + def set_tensor_model_parallel_rank(rank): """Set tensor model parallel rank.""" global _MPU_TENSOR_MODEL_PARALLEL_RANK @@ -674,6 +687,14 @@ def set_pipeline_model_parallel_split_rank(rank): _PIPELINE_MODEL_PARALLEL_SPLIT_RANK = rank +def get_expert_model_parallel_rank(): + """Return my rank for the tensor model parallel group.""" + global _MPU_EXPERT_MODEL_PARALLEL_RANK + if _MPU_EXPERT_MODEL_PARALLEL_RANK is not None: + return _MPU_EXPERT_MODEL_PARALLEL_RANK + return torch.distributed.get_rank(group=get_tensor_and_expert_parallel_group()) + + def get_tensor_model_parallel_rank(): """Return my rank for the tensor model parallel group.""" global _MPU_TENSOR_MODEL_PARALLEL_RANK @@ -889,6 +910,8 @@ def get_context_parallel_rank(): def get_expert_model_parallel_world_size(): """Return world size for the expert model parallel group""" + if _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE: + return _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE if torch.distributed.is_available() and torch.distributed.is_initialized(): tensor_and_expert_parallel_world_size = torch.distributed.get_world_size( group=get_tensor_and_expert_parallel_group() @@ -913,6 +936,8 @@ def get_tensor_and_expert_parallel_world_size(): def get_expert_model_parallel_rank(): """Return my rank for the expert parallel group""" + if _MPU_EXPERT_MODEL_PARALLEL_RANK: + return _MPU_EXPERT_MODEL_PARALLEL_RANK if torch.distributed.is_available() and torch.distributed.is_initialized(): tensor_and_expert_parallel_rank = torch.distributed.get_rank( group=get_tensor_and_expert_parallel_group() @@ -991,3 +1016,7 @@ def destroy_model_parallel(): _MPU_PIPELINE_MODEL_PARALLEL_RANK = None global _GLOBAL_MEMORY_BUFFER _GLOBAL_MEMORY_BUFFER = None + global _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE + _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE = None + global _MPU_EXPERT_MODEL_PARALLEL_RANK + _MPU_EXPERT_MODEL_PARALLEL_RANK = None From 567fab7bdfa9fef326793c0f4a991d3ceef411f9 Mon Sep 17 00:00:00 2001 From: shanmugamr Date: Fri, 26 Jan 2024 11:08:21 -0800 Subject: [PATCH 229/296] Fix formatting --- megatron/core/models/bert/bert_model.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/megatron/core/models/bert/bert_model.py b/megatron/core/models/bert/bert_model.py index 8df3e39693..14eabf1737 100644 --- a/megatron/core/models/bert/bert_model.py +++ b/megatron/core/models/bert/bert_model.py @@ -1,8 +1,8 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +import os from typing import Literal, Optional import torch -import os from torch import Tensor from megatron.core import parallel_state @@ -60,7 +60,10 @@ def __init__( if return_embeddings: assert self.post_process and self.add_binary_head - assert os.getenv('NVTE_ALLOW_NONDETERMINISTIC_ALGO') == '0' or os.getenv('NVTE_FLASH_ATTN') == '0', "Bert currently does not support flash attention. Please set env variable NVTE_FLASH_ATTN=0 or set NVTE_ALLOW_NONDETERMINISTIC_ALGO=0" + assert ( + os.getenv('NVTE_ALLOW_NONDETERMINISTIC_ALGO') == '0' + or os.getenv('NVTE_FLASH_ATTN') == '0' + ), "Bert currently does not support flash attention. Please set env variable NVTE_FLASH_ATTN=0 or set NVTE_ALLOW_NONDETERMINISTIC_ALGO=0" self.config: TransformerConfig = config self.transformer_layer_spec: ModuleSpec = transformer_layer_spec From 8d8241a9cfdf5ad6c511c5303a2623185ee18c3c Mon Sep 17 00:00:00 2001 From: John Kamalu Date: Fri, 26 Jan 2024 15:15:52 -0800 Subject: [PATCH 230/296] Support for raw and mock datasets --- megatron/arguments.py | 3 + megatron/core/datasets/blended_dataset.py | 4 +- .../blended_megatron_dataset_builder.py | 98 +++--- .../blended_megatron_dataset_config.py | 44 ++- megatron/core/datasets/gpt_dataset.py | 168 ++++++---- megatron/core/datasets/megatron_dataset.py | 150 ++++++--- megatron/core/datasets/megatron_tokenizer.py | 141 ++++++++ megatron/tokenizer/tokenizer.py | 82 +---- pretrain_gpt.py | 19 +- tests/unit_tests/data/test_builder.py | 165 ++++++++++ .../data/test_builder_mock_gpt_dataset.py | 54 ++++ .../unit_tests/data/test_preprocess_mmdata.py | 47 ++- tests/unit_tests/test_utilities.py | 18 +- tools/retro/query/multi_split_gpt_dataset.py | 5 +- tools/retro/sft/dataset_conv.py | 302 +++++++++--------- tools/retro/sft/sft_gpt_dataset.py | 90 ------ tools/retro/sft/sft_retro.py | 75 ++++- 17 files changed, 965 insertions(+), 500 deletions(-) create mode 100644 megatron/core/datasets/megatron_tokenizer.py create mode 100644 tests/unit_tests/data/test_builder.py create mode 100644 tests/unit_tests/data/test_builder_mock_gpt_dataset.py delete mode 100644 tools/retro/sft/sft_gpt_dataset.py diff --git a/megatron/arguments.py b/megatron/arguments.py index 90d8651f17..695b96ca1c 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -1217,6 +1217,9 @@ def _add_data_args(parser): 'dataset2-path ...') group.add_argument('--data-cache-path', default=None, help='Path to a directory to hold cached index files.') + group.add_argument('--mock-data', action='store_true', + help='Skip data loading and validation and opt for artificial ' + 'generation of mock data when an implementation is available.') group.add_argument('--vocab-size', type=int, default=None, help='Size of vocab before EOD or padding.') diff --git a/megatron/core/datasets/blended_dataset.py b/megatron/core/datasets/blended_dataset.py index 421d193c3b..7c424f1ce8 100644 --- a/megatron/core/datasets/blended_dataset.py +++ b/megatron/core/datasets/blended_dataset.py @@ -68,7 +68,9 @@ def __init__( unique_identifiers["weights"] = self.weights unique_identifiers["size"] = self.size - self.unique_description = json.dumps(unique_identifiers, indent=4) + self.unique_description = json.dumps( + unique_identifiers, indent=4, default=lambda obj: obj.unique_identifiers + ) self.unique_description_hash = hashlib.md5( self.unique_description.encode("utf-8") ).hexdigest() diff --git a/megatron/core/datasets/blended_megatron_dataset_builder.py b/megatron/core/datasets/blended_megatron_dataset_builder.py index c5c509ea7c..383d9b4a05 100644 --- a/megatron/core/datasets/blended_megatron_dataset_builder.py +++ b/megatron/core/datasets/blended_megatron_dataset_builder.py @@ -2,21 +2,24 @@ import logging import math -from typing import Any, Callable, List, Optional, Tuple, Type, Union +from typing import Any, Callable, Iterable, List, Optional, Tuple, Type, Union import numpy import torch from megatron.core.datasets.blended_dataset import BlendedDataset from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig -from megatron.core.datasets.indexed_dataset import MMapIndexedDataset -from megatron.core.datasets.megatron_dataset import MegatronDataset +from megatron.core.datasets.megatron_dataset import LowLevelDataset, MegatronDataset, MockDataset from megatron.core.datasets.utils import Split, normalize logger = logging.getLogger(__name__) +MidLevelDataset = Union[MegatronDataset, MockDataset] + +TopLevelDataset = Union[BlendedDataset, MidLevelDataset] + DistributedDataset = Union[ - BlendedDataset, MegatronDataset, MMapIndexedDataset, torch.utils.data.Dataset + TopLevelDataset, MidLevelDataset, LowLevelDataset, torch.utils.data.Dataset ] @@ -33,13 +36,15 @@ class BlendedMegatronDatasetBuilder(object): """ def __init__( - self, cls: Type[MegatronDataset], sizes: List[int], config: BlendedMegatronDatasetConfig, + self, cls: Type[MidLevelDataset], sizes: List[int], config: BlendedMegatronDatasetConfig, ): self.cls = cls self.sizes = sizes self.config = config - def build(self) -> List[Optional[Union[BlendedDataset, MegatronDataset]]]: + assert not self.config.mock or issubclass(self.cls, MockDataset) + + def build(self) -> List[Optional[TopLevelDataset]]: """Build all dataset splits according to the provided blend(s) This method is distributed-aware and must be called on all ranks. @@ -50,24 +55,28 @@ def build(self) -> List[Optional[Union[BlendedDataset, MegatronDataset]]]: splits from separate distributions. Returns: - List[Optional[Union[BlendedDataset, MegatronDataset]]]: A list of either - MegatronDataset or BlendedDataset (or None) per split + List[Optional[TopLevelDataset]]: A list containing a dataset instance (or None) per + split """ return self._build_blended_dataset_splits() - def _build_blended_dataset_splits( - self, - ) -> List[Optional[Union[BlendedDataset, MegatronDataset]]]: + def _build_blended_dataset_splits(self,) -> List[Optional[TopLevelDataset]]: """Build all dataset splits according to the provided blend(s) See the BlendedMegatronDatasetBuilder.build alias for more information. Returns: - List[Optional[Union[BlendedDataset, MegatronDataset]]]: A list of either - MegatronDataset or BlendedDataset (or None) per split + List[Optional[TopLevelDataset]]: A list containing a dataset instance (or None) per + split """ - if self.config.blend: + # Return fake "mock" datasets + if self.config.mock: + + return self._build_megatron_dataset_splits(None, None, self.sizes) + + # All splits come from the same distribution + elif self.config.blend: blend = self.config.blend split = self.config.split_matrix @@ -117,6 +126,7 @@ def _build_blended_dataset_splits( return blended_datasets + # Each split comes from a separate distribution else: blended_datasets = [] for i in range(len(Split)): @@ -170,30 +180,33 @@ def _build_blended_dataset_splits( return blended_datasets def _build_megatron_dataset_splits( - self, path_prefix: str, split: List[float], sizes: List[int], - ) -> List[Optional[MegatronDataset]]: - """Build each MegatronDataset split from a single MMapIndexedDataset + self, dataset_path: Optional[str], split: List[float], sizes: List[int], + ) -> List[Optional[MidLevelDataset]]: + """Build each MidLevelDataset split from a single LowLevelDataset Args: - path_prefix (str): The MMapIndexedDataset .bin and .idx file prefix + dataset_path (Optional[str]): The path on disk which defines the underlying + LowLevelDataset, e.g. the .bin and .idx file prefix when self.cls is of type + IndexedMegatronDataset or None when self.cls is of type MockDataset split (List[Tuple[float, float]]): The dataset split matrix sizes (List[int]): The number of total samples to draw from each split Returns: - List[Optional[MegatronDataset]]: The MegatronDatset (or None) per split + List[Optional[MidLevelDataset]]: The MidLevelDataset (or None) per split """ - indexed_dataset = self.build_generic_dataset( - MMapIndexedDataset, self.config.is_built_on_rank, path_prefix, self.cls.is_multimodal(), - ) - - if indexed_dataset is not None: - if self.cls.is_split_by_sequence(): - num_elements = indexed_dataset.sequence_lengths.shape[0] - else: - num_elements = indexed_dataset.document_indices.shape[0] - 1 + # Build the low level dataset + if issubclass(self.cls, MockDataset): + low_level_dataset = None + elif issubclass(self.cls, MegatronDataset): + low_level_dataset = self.cls.build_low_level_dataset(dataset_path, self.config) + else: + raise NotImplementedError + # Build the split indices for the low level dataset + if low_level_dataset is not None: + num_elements = self.cls.numel_low_level_dataset(low_level_dataset) split_indices = [] for i, _ in enumerate(Split): if split[i] is not None: @@ -207,16 +220,18 @@ def _build_megatron_dataset_splits( else: split_indices = [None for _ in Split] - megatron_datasets = [] + # Build the mid level dataset + mid_level_datasets = [] for i, _split in enumerate(Split): - if split[i] is None: - megatron_datasets.append(None) + if not self.config.mock and split[i] is None: + mid_level_datasets.append(None) else: - megatron_datasets.append( + mid_level_datasets.append( self.build_generic_dataset( self.cls, self.config.is_built_on_rank, - indexed_dataset, + low_level_dataset, + dataset_path, split_indices[i], sizes[i], _split, @@ -224,19 +239,21 @@ def _build_megatron_dataset_splits( ) ) - return megatron_datasets + return mid_level_datasets @staticmethod def build_generic_dataset( - cls: Type[DistributedDataset], is_built_on_rank: Callable, *args: Any - ) -> Optional[DistributedDataset]: + cls: Union[Type[DistributedDataset], Callable], is_built_on_rank: Callable, *args: Any + ) -> Optional[Union[DistributedDataset, Iterable]]: """Build the DistributedDataset - Return None if and only if the underlying MegatronDataset class is not built on the current - rank and torch.distributed is initialized. + Return None if and only if the underlying dataset class is not built on the current rank + and torch.distributed is initialized. Args: - cls (Type[DistributedDataset]): The DistributedDataset class to be built + cls (Union[Type[DistributedDataset], Callable]): The DistributedDataset class to be + built. In special cases, e.g. when we are building the low level dataset for a + RawMegatronDataset instance, we can accept a Callable which returns an Iterable. args (Tuple[Any]): The positional arguments used to build the provided DistributedDataset class @@ -245,7 +262,8 @@ def build_generic_dataset( Exception: When the dataset constructor raises an OSError Returns: - Optional[DistributedDataset]: The DistributedDataset instantion or None + Optional[Union[DistributedDataset, Iterable]]: The DistributedDataset instantion, the + Iterable instantiation, or None """ if torch.distributed.is_initialized(): rank = torch.distributed.get_rank() diff --git a/megatron/core/datasets/blended_megatron_dataset_config.py b/megatron/core/datasets/blended_megatron_dataset_config.py index 9f8344e791..a6370eb19f 100644 --- a/megatron/core/datasets/blended_megatron_dataset_config.py +++ b/megatron/core/datasets/blended_megatron_dataset_config.py @@ -8,6 +8,7 @@ import torch +from megatron.core.datasets.megatron_tokenizer import MegatronTokenizer from megatron.core.datasets.utils import Split, log_single_rank, normalize from megatron.core.parallel_state import get_virtual_pipeline_model_parallel_rank @@ -46,6 +47,12 @@ class BlendedMegatronDatasetConfig: passed in to the constructor. path_to_cache (str): Where all re-useable dataset indices are to be cached. + + mock (bool): Whether to bypass real data loading and validation in favor of mock data + generation. + + tokenizer (Optional[MegatronTokenizer]): The MegatronTokenizer instance or None. Required + for datasets which do online tokenization. """ is_built_on_rank: Callable @@ -62,7 +69,11 @@ class BlendedMegatronDatasetConfig: split_matrix: Optional[List[Tuple[float, float]]] = field(init=False, default=None) - path_to_cache: str = None + path_to_cache: Optional[str] = None + + mock: bool = False + + tokenizer: Optional[MegatronTokenizer] = None def __post_init__(self): if torch.distributed.is_initialized(): @@ -73,20 +84,23 @@ def __post_init__(self): self.is_built_on_rank() ), "is_built_on_rank must return True when global rank = 0 and vp rank = 0" - if self.blend_per_split is not None and any(self.blend_per_split): - assert self.blend is None, "blend and blend_per_split are incompatible" - assert len(self.blend_per_split) == len( - Split - ), f"blend_per_split must contain {len(Split)} blends" - if self.split is not None: - self.split = None - log_single_rank(logger, logging.WARNING, f"Let split = {self.split}") - else: - assert self.blend is not None, "one of either blend or blend_per_split must be provided" - assert self.split is not None, "both blend and split must be provided" - split_vector = parse_and_normalize_split(self.split) - self.split_matrix = convert_split_vector_to_split_matrix(split_vector) - log_single_rank(logger, logging.INFO, f"Let split_matrix = {self.split_matrix}") + log_single_rank(logger, logging.INFO, f"mock = {self.mock}") + + if not self.mock: + if self.blend_per_split is not None and any(self.blend_per_split): + assert self.blend is None, "blend and blend_per_split are incompatible" + assert self.split is None, "split and blend_per_split are incompatible" + assert len(self.blend_per_split) == len( + Split + ), f"blend_per_split must contain {len(Split)} blends" + else: + assert ( + self.blend is not None + ), "one of either blend or blend_per_split must be provided" + assert self.split is not None, "both blend and split must be provided" + split_vector = parse_and_normalize_split(self.split) + self.split_matrix = convert_split_vector_to_split_matrix(split_vector) + log_single_rank(logger, logging.INFO, f"Let split_matrix = {self.split_matrix}") def parse_and_normalize_split(split: str) -> List[float]: diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index 52b7dfffa7..b0d9a80fc8 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -4,14 +4,14 @@ import os import time from dataclasses import dataclass -from typing import Dict, Tuple, Union +from typing import Dict, Tuple import numpy import torch from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig from megatron.core.datasets.indexed_dataset import MMapIndexedDataset -from megatron.core.datasets.megatron_dataset import MegatronDataset +from megatron.core.datasets.megatron_dataset import MegatronDataset, MockDataset from megatron.core.datasets.utils import Split, log_single_rank logger = logging.getLogger(__name__) @@ -21,24 +21,76 @@ class GPTDatasetConfig(BlendedMegatronDatasetConfig): """Configuration object for Megatron Core GPT datasets - Attributes: - return_document_ids (bool): Whether to return the document ids when querying the dataset. - + Attributes: reset_position_ids (bool): Option to reset the position IDs in the dataset at an interval reset_attention_mask (bool): Option to reset the attention mask from the dataset eod_mask_loss (bool): Option to enable the EOD mask loss + """ + + reset_position_ids: bool = None + + reset_attention_mask: bool = None + + eod_mask_loss: bool = None + + def __post_init__(self): + super().__post_init__() - eod_id (int): Has the identity of the end of document - + assert self.tokenizer is not None + + assert self.reset_position_ids is not None + assert self.reset_attention_mask is not None + assert self.eod_mask_loss is not None + + +class MockGPTDataset(MockDataset): + """The mock GPT dataset """ - return_document_ids: bool = False - reset_position_ids: bool = False - reset_attention_mask: bool = False - eod_mask_loss: bool = False - eod_id: int = 0 + def __getitem__(self, idx: int) -> Dict[str, numpy.ndarray]: + """Return a sequence_length + 1 token sequence consisting of the following: + - (1) S, the RNG length-sentinel in the range [0, sequence_length) + - (S) tokens + - (1) end of document token + - (sequence_length - S - 1) padding tokens + + Args: + idx (int): The integer seed for mock data generation + + Returns: + Dict[str, numpy.ndarray]: The mock data + """ + tok = 1 + pad = 2 + eod = 0 + + rng = numpy.random.default_rng(seed=[self.split.value, idx]) + length = rng.integers(low=0, high=self.config.sequence_length) + sample_toks = numpy.zeros(length) + tok + sample_pads = numpy.zeros(self.config.sequence_length - length - 1) + pad + sample = numpy.int64(numpy.concatenate([[length], sample_toks, [eod], sample_pads])) + + text = torch.from_numpy(sample).long() + labels = text[1:].contiguous() + tokens = text[:-1].contiguous() + + attention_mask, loss_mask, position_ids = _get_ltor_masks_and_position_ids( + tokens, + eod, + self.config.reset_position_ids, + self.config.reset_attention_mask, + self.config.eod_mask_loss, + ) + + return { + "tokens": tokens, + "labels": labels, + "attention_mask": attention_mask, + "loss_mask": loss_mask, + "position_ids": position_ids, + } class GPTDataset(MegatronDataset): @@ -48,6 +100,8 @@ class GPTDataset(MegatronDataset): indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the MegatronDataset + dataset_path (str): The real path on disk to the dataset, for bookkeeping + indexed_indices (numpy.ndarray): The set of the documents indices to expose num_samples (int): The number of samples to draw from the indexed dataset @@ -60,26 +114,56 @@ class GPTDataset(MegatronDataset): def __init__( self, indexed_dataset: MMapIndexedDataset, + dataset_path: str, indexed_indices: numpy.ndarray, num_samples: int, index_split: Split, config: GPTDatasetConfig, ) -> None: - super().__init__(indexed_dataset, indexed_indices, num_samples, index_split, config) + super().__init__( + indexed_dataset, dataset_path, indexed_indices, num_samples, index_split, config + ) def _finalize(self) -> None: """Abstract method implementation Load or build/cache the document, sample, and shuffle indices """ - assert isinstance(self.config, GPTDatasetConfig) - ( self.document_index, self.sample_index, self.shuffle_index, ) = self._build_document_sample_shuffle_indices() + @staticmethod + def numel_low_level_dataset(low_level_dataset: MMapIndexedDataset) -> int: + """Abstract method implementation + + For GPT, the underlying MMapIndexedDataset should be split by sequence, as opposed to, say, + BERT, which should be split by document + + Args: + low_level_dataset (MMapIndexedDataset): The underlying MMapIndexedDataset + + Returns: + int: The number of unique elements in the underlying MMapIndexedDataset + """ + return low_level_dataset.sequence_lengths.shape[0] + + @staticmethod + def build_low_level_dataset(dataset_path: str, config: GPTDatasetConfig) -> MMapIndexedDataset: + """Abstract method implementation + + Args: + dataset_path (str): The real path prefix to the MMapIndexedDataset .bin and .idx files + + config (BlendedMegatronDatasetConfig): The dataset config + + Returns: + MMapIndexedDataset: The underlying MMapIndexedDataset + """ + return MMapIndexedDataset(dataset_path, False) + def __len__(self) -> int: """Abstract method implementation @@ -99,15 +183,13 @@ def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: """ text, _ = self._query_document_sample_shuffle_indices(idx) - text = torch.from_numpy(text) - - tokens_ = text.long() - labels = tokens_[1:].contiguous() - tokens = tokens_[:-1].contiguous() + text = torch.from_numpy(text).long() + labels = text[1:].contiguous() + tokens = text[:-1].contiguous() attention_mask, loss_mask, position_ids = _get_ltor_masks_and_position_ids( tokens, - self.config.eod_id, + self.config.tokenizer.eod, self.config.reset_position_ids, self.config.reset_attention_mask, self.config.eod_mask_loss, @@ -121,24 +203,6 @@ def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: "position_ids": position_ids, } - @staticmethod - def is_multimodal() -> bool: - """Abstract method implementation - - Returns: - bool: False - """ - return False - - @staticmethod - def is_split_by_sequence() -> bool: - """Abstract method implementation - - Returns: - bool: True - """ - return True - def _query_document_sample_shuffle_indices( self, idx: int ) -> Tuple[numpy.ndarray, numpy.ndarray]: @@ -167,7 +231,7 @@ def _query_document_sample_shuffle_indices( # Add the entire sample sample_parts.append( - self.indexed_dataset.get( + self.dataset.get( self.document_index[doc_index_beg], offset=doc_index_beg_offset, length=doc_index_end_offset - doc_index_beg_offset + 1, @@ -184,7 +248,7 @@ def _query_document_sample_shuffle_indices( offset = 0 if i > doc_index_beg else doc_index_beg_offset length = None if i < doc_index_end else doc_index_end_offset + 1 sample_parts.append( - self.indexed_dataset.get(self.document_index[i], offset=offset, length=length) + self.dataset.get(self.document_index[i], offset=offset, length=length) ) return ( @@ -218,7 +282,7 @@ def _build_document_sample_shuffle_indices( path_to_cache = self.config.path_to_cache if path_to_cache is None: path_to_cache = os.path.join( - self.indexed_dataset.path_prefix, "cache", f"{type(self).__name__}_indices" + self.dataset.path_prefix, "cache", f"{type(self).__name__}_indices" ) get_path_to = lambda suffix: os.path.join( @@ -304,7 +368,7 @@ def _build_document_sample_shuffle_indices( ) t_beg = time.time() document_index = _build_document_index( - self.indexed_indices, num_epochs, numpy_random_state, separate_final_epoch + self.indices, num_epochs, numpy_random_state, separate_final_epoch ) numpy.save(path_to_document_index, document_index, allow_pickle=True) t_end = time.time() @@ -320,9 +384,9 @@ def _build_document_sample_shuffle_indices( from megatron.core.datasets import helpers assert document_index.dtype == numpy.int32 - assert self.indexed_dataset.sequence_lengths.dtype == numpy.int32 + assert self.dataset.sequence_lengths.dtype == numpy.int32 sample_index = helpers.build_sample_idx( - self.indexed_dataset.sequence_lengths, + self.dataset.sequence_lengths, document_index, sequence_length, num_epochs, @@ -405,7 +469,7 @@ def _get_num_tokens_per_epoch(self) -> int: Returns: int: The number of tokens in a single epoch """ - return int(numpy.sum(self.indexed_dataset.sequence_lengths[self.indexed_indices])) + return int(numpy.sum(self.dataset.sequence_lengths[self.indices])) def _get_num_epochs(self, num_tokens_per_epoch: int) -> int: """Calculate the number of epochs @@ -521,10 +585,7 @@ def _get_ltor_masks_and_position_ids( torch.Tensor : The mask used for loss value during training torch.Tensor : The position ID's of the token - """ - - # Extract batch size and sequence length. seq_length = data.numel() attention_mask = torch.tril(torch.ones((seq_length, seq_length), device=data.device)).unsqueeze( @@ -543,14 +604,13 @@ def _get_ltor_masks_and_position_ids( position_ids = position_ids.clone() if reset_position_ids or reset_attention_mask: - - # Find indecies where EOD token is. - eod_index = position_ids[data[b] == eod_token] - # Detach indecies from positions if going to modify positions. + # Find indices where EOD token is. + eod_index = position_ids[data == eod_token] + # Detach indices from positions if going to modify positions. if reset_position_ids: eod_index = eod_index.clone() - # Loop through EOD indecies: + # Loop through EOD indices: prev_index = 0 for j in range(eod_index.numel()): i = eod_index[j] diff --git a/megatron/core/datasets/megatron_dataset.py b/megatron/core/datasets/megatron_dataset.py index e7fecb64fa..c95a7d2ea5 100644 --- a/megatron/core/datasets/megatron_dataset.py +++ b/megatron/core/datasets/megatron_dataset.py @@ -2,9 +2,9 @@ import hashlib import json -from abc import ABC, abstractmethod, abstractstaticmethod +from abc import ABC, abstractmethod from collections import OrderedDict -from typing import Dict, List, Union +from typing import Any, Dict, Iterable, List, Union import numpy import torch @@ -13,63 +13,115 @@ from megatron.core.datasets.indexed_dataset import MMapIndexedDataset from megatron.core.datasets.utils import Split +LowLevelDataset = Union[MMapIndexedDataset, Iterable] + class MegatronDataset(ABC, torch.utils.data.Dataset): - """The wrapper class from which dataset classes should inherit e.g. GPTDataset + """The highest level wrapper class from which all dataset classes should inherit Args: - indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the - MegatronDataset + dataset (LowLevelDataset): The dataset around which to build the MegatronDataset + + dataset_path (str): The real path on disk to the dataset, for bookkeeping. TODO: subsume + this argument by enforcing auto-bookkeeping in the dataset class type. - indexed_indices (numpy.ndarray): The set of the documents indices to expose + indices (numpy.ndarray): The set of the documents indices to expose num_samples (int): The number of samples to draw from the indexed dataset - index_split (Split): The indexed_indices Split + index_split (Split): The indices Split config (BlendedMegatronDatasetConfig): The container for all config sourced parameters """ def __init__( self, - indexed_dataset: MMapIndexedDataset, - indexed_indices: numpy.ndarray, + dataset: LowLevelDataset, + dataset_path: str, + indices: numpy.ndarray, num_samples: int, index_split: Split, config: BlendedMegatronDatasetConfig, ) -> None: - assert indexed_indices.size > 0 - assert num_samples > 0 - assert self.is_multimodal() == indexed_dataset.multimodal - assert self.is_split_by_sequence() != self.is_split_by_document() - - self.indexed_dataset = indexed_dataset - self.indexed_indices = indexed_indices + self.dataset = dataset + self.dataset_path = dataset_path + self.indices = indices self.num_samples = num_samples self.index_split = index_split self.config = config self.unique_identifiers = OrderedDict() self.unique_identifiers["class"] = type(self).__name__ - self.unique_identifiers["path_prefix"] = self.indexed_dataset.path_prefix + self.unique_identifiers["dataset_path"] = self.dataset_path self.unique_identifiers["num_samples"] = self.num_samples self.unique_identifiers["index_split"] = self.index_split.name for attr in self._key_config_attributes(): self.unique_identifiers[attr] = getattr(self.config, attr) - self.unique_description = json.dumps(self.unique_identifiers, indent=4) + self.unique_description = json.dumps( + self.unique_identifiers, indent=4, default=lambda obj: obj.unique_identifiers + ) self.unique_description_hash = hashlib.md5( self.unique_description.encode("utf-8") ).hexdigest() self._finalize() - @abstractmethod def _finalize(self) -> None: """Build the dataset and assert any subclass-specific conditions """ pass + @staticmethod + def numel_low_level_dataset(low_level_dataset: LowLevelDataset) -> int: + """Return the number of elements in the underlying low level dataset for the purpose of + segregating the train/valid/test split indices + + It may be that the low level dataset can be split any number of ways, depending on the mid + level dataset it supports, which is why we define the "number of elements" function + separately from the __len__ function here in the mid level dataset class + + Args: + low_level_dataset (LowLevelDataset): The underlying low level dataset + + Returns: + int: The number of elements in the underlying low level dataset + """ + raise NotImplementedError + + @staticmethod + def build_low_level_dataset( + dataset_path: str, config: BlendedMegatronDatasetConfig + ) -> LowLevelDataset: + """Build the low level dataset via a function to be called from within + BlendedMegatronDatasetBuilder.build_generic_dataset + + It may be that the low level dataset spans any subset of train/valid/test splits, which is + why we define a static "build" function separately from the constructor in the mid level + dataset class + + Args: + dataset_path (str): The real path on disk to the dataset + + config (BlendedMegatronDatasetConfig): The dataset config + + Returns: + LowLevelDataset: The low level dataset + """ + raise NotImplementedError + + @staticmethod + def _key_config_attributes() -> List[str]: + """Return all config attributes which contribute to uniquely identifying the dataset. + + These attributes will be used to build a uniquely identifying string and MD5 hash which + will be used to cache/load dataset resources from run to run. + + Returns: + List[str]: The key config attributes + """ + return ["random_seed", "sequence_length", "split", "split_matrix", "tokenizer"] + @abstractmethod def __len__(self) -> int: """Return the length of the dataset @@ -91,45 +143,45 @@ def __getitem__(self, idx: int) -> Dict[str, Union[torch.Tensor, numpy.ndarray]] """ pass - @abstractstaticmethod - def is_multimodal() -> bool: - """Return True if the inheritor class and its internal MMapIndexedDataset are multimodal - Returns: - bool: See abstract implementation - """ - pass +class MockDataset(MegatronDataset): + """The highest level wrapper class from which all dataset classes should inherit - @abstractstaticmethod - def is_split_by_sequence() -> bool: - """Return whether the dataset is split by sequence + The MockDataset is a special, one-off class that should not serve as a precedent for developers + seeking to extend the MegatronDataset. This class is incompatible with BlendedDataset - For example, the GPT train/valid/test split is document agnostic + This class cannibalizes the constructor of the parent class. As such, we do not need to + enumerate the constructor parameters. They may be populated, but most are superfluous and can + be None. Only the split and the config are required. - Returns: - bool: See abstract implementation - """ - pass + Args: + args (Tuple[Any]): The positional arguments used to build an arbitrary MegatronDataset + """ - @classmethod - def is_split_by_document(cls) -> bool: - """Return whether the dataset is split by document + def __init__(self, *args: Any) -> None: + self.split = None + self.config = None - For example, the BERT train/valid/test split is document aware + # Extract a select few parameters + for arg in args: + # Extract the split for RNG parameterization + if issubclass(type(arg), Split): + assert self.split is None + self.split = arg + # Extract the config for sequence_length and mock attribute values + if issubclass(type(arg), BlendedMegatronDatasetConfig): + assert self.config is None + self.config = arg - Returns: - bool: The negation of cls.is_split_by_sequence - """ - return not cls.is_split_by_sequence() + assert self.split is not None + assert self.config is not None - @staticmethod - def _key_config_attributes() -> List[str]: - """Return all config attributes which contribute to uniquely identifying the dataset. + assert self.config.mock - These attributes will be used to build a uniquely identifying string and MD5 hash which - will be used to cache/load the dataset from run to run. + def __len__(self) -> int: + """Return an arbitrary length Returns: - List[str]: The key config attributes + int: The torch.int16 max representable value """ - return ["random_seed", "sequence_length", "split", "split_matrix"] + return torch.iinfo(torch.int16).max diff --git a/megatron/core/datasets/megatron_tokenizer.py b/megatron/core/datasets/megatron_tokenizer.py new file mode 100644 index 0000000000..fbea419969 --- /dev/null +++ b/megatron/core/datasets/megatron_tokenizer.py @@ -0,0 +1,141 @@ +import json +from abc import ABC, abstractmethod +from collections import OrderedDict +from typing import Any + +import numpy + + +class MegatronTokenizer(ABC): + """Abstract class for tokenizer + + Absent a config or class-specific tracking of which objects are uniquely identifying, we must + include all key word arguments as unique identifiers + + Args: + tokenizer_paths (Tuple[str]): All tokenizer source paths or prefixes + + kwargs (Dict[str, Any]): All tokenizer options + """ + + def __init__(self, *tokenizer_paths: str, **tokenizer_options: Any): + + self.unique_identifiers = OrderedDict() + self.unique_identifiers["class"] = type(self).__name__ + self.unique_identifiers["tokenizer_path"] = list(tokenizer_paths) + for option in tokenizer_options: + self.unique_identifiers[option] = str(tokenizer_options[option]) + + self.unique_description = json.dumps(self.unique_identifiers, indent=4) + + super().__init__() + + @abstractmethod + def tokenize(self, text: str) -> numpy.ndarray: + """Convert text to embedding ids + + Args: + text (str): The text to convert + + Returns: + numpy.ndarray: The converted embedding ids + """ + pass + + def detokenize(self, ids: numpy.ndarray) -> str: + """Convert embedding ids to text + + Args: + ids (numpy.ndarray): The ids to convert + + Returns: + str: The converted text + + Raises: + NotImplementedError: Non-abstract, optional method + """ + raise NotImplementedError("{} has no method 'detokenize'".format(type(self).__name__)) + + @property + @abstractmethod + def vocab(self): + """Dictionary from vocab text token to id token + """ + pass + + @property + @abstractmethod + def inv_vocab(self): + """Dictionary from vocab id token to text token + """ + pass + + @property + @abstractmethod + def vocab_size(self): + """The vocabulary size + """ + pass + + @property + def cls(self): + """The CLS token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'cls'".format(type(self).__name__)) + + @property + def sep(self): + """The SEP token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'sep'".format(type(self).__name__)) + + @property + def pad(self): + """The PAD token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'pad'".format(type(self).__name__)) + + @property + def eod(self): + """The EOD token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'eod'".format(type(self).__name__)) + + @property + def bos(self): + """The BOS token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'bos'".format(type(self).__name__)) + + @property + def eos(self): + """The EOS token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'eos'".format(type(self).__name__)) + + @property + def mask(self): + """The MASK token id + + Raises: + NotImplementedError: Non-abstract, optional attribute + """ + raise NotImplementedError("{} has no attribute 'mask'".format(type(self).__name__)) diff --git a/megatron/tokenizer/tokenizer.py b/megatron/tokenizer/tokenizer.py index 98643343c5..c618b99809 100644 --- a/megatron/tokenizer/tokenizer.py +++ b/megatron/tokenizer/tokenizer.py @@ -5,9 +5,12 @@ from abc import ABC from abc import abstractmethod +from megatron.core.datasets.megatron_tokenizer import MegatronTokenizer + from .bert_tokenization import FullTokenizer as FullBertTokenizer from .gpt2_tokenization import GPT2Tokenizer + def build_tokenizer(args): """Initialize tokenizer.""" if args.rank == 0: @@ -69,73 +72,11 @@ def _vocab_size_with_padding(orig_vocab_size, args): return after -class AbstractTokenizer(ABC): - """Abstract class for tokenizer.""" - - def __init__(self, name): - self.name = name - super().__init__() - - @property - @abstractmethod - def vocab_size(self): - pass - - @property - @abstractmethod - def vocab(self): - """Dictionary from vocab text token to id token.""" - pass - - @property - @abstractmethod - def inv_vocab(self): - """Dictionary from vocab id token to text token.""" - pass - - @abstractmethod - def tokenize(self, text): - pass - - def detokenize(self, token_ids): - raise NotImplementedError('detokenizer is not implemented for {} ' - 'tokenizer'.format(self.name)) - - @property - def cls(self): - raise NotImplementedError('CLS is not provided for {} ' - 'tokenizer'.format(self.name)) - - @property - def sep(self): - raise NotImplementedError('SEP is not provided for {} ' - 'tokenizer'.format(self.name)) - - @property - def pad(self): - raise NotImplementedError('PAD is not provided for {} ' - 'tokenizer'.format(self.name)) - - @property - def eod(self): - raise NotImplementedError('EOD is not provided for {} ' - 'tokenizer'.format(self.name)) - - @property - def mask(self): - raise NotImplementedError('MASK is not provided for {} ' - 'tokenizer'.format(self.name)) - - -class _BertWordPieceTokenizer(AbstractTokenizer): +class _BertWordPieceTokenizer(MegatronTokenizer): """Original BERT wordpiece tokenizer.""" def __init__(self, vocab_file, lower_case=True, vocab_extra_ids=0): - if lower_case: - name = 'BERT Lower Case' - else: - name = 'BERT Upper Case' - super().__init__(name) + super().__init__(vocab_file, lower_case=lower_case, vocab_extra_ids=vocab_extra_ids) self.tokenizer = FullBertTokenizer(vocab_file, do_lower_case=lower_case) self.cls_id = self.tokenizer.vocab['[CLS]'] self.sep_id = self.tokenizer.vocab['[SEP]'] @@ -258,12 +199,11 @@ def additional_special_tokens(self, value): self._additional_special_tokens = value -class _GPT2BPETokenizer(AbstractTokenizer): +class _GPT2BPETokenizer(MegatronTokenizer): """Original GPT2 BPE tokenizer.""" def __init__(self, vocab_file, merge_file): - name = 'GPT2 BPE' - super().__init__(name) + super().__init__(vocab_file, merge_file) self.tokenizer = GPT2Tokenizer(vocab_file, merge_file, errors='replace', special_tokens=[], max_len=None) @@ -292,12 +232,11 @@ def eod(self): return self.eod_id -class _SentencePieceTokenizer(AbstractTokenizer): +class _SentencePieceTokenizer(MegatronTokenizer): """SentencePieceTokenizer-Megatron wrapper""" def __init__(self, model_file, vocab_extra_ids=0): - name = 'SentencePieceTokenizer' - super().__init__(name) + super().__init__(model_file, vocab_extra_ids=vocab_extra_ids) import sentencepiece self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=model_file) @@ -466,6 +405,7 @@ def mask(self): def additional_special_tokens_ids(self): return [self.vocab[k] for k in self._t5_tokens] + class _GPTSentencePieceTokenizer(_SentencePieceTokenizer): """SentencePieceTokenizer-Megatron wrapper""" @@ -505,6 +445,7 @@ def eod(self): def additional_special_tokens_ids(self): return None + class _Llama2Tokenizer(_SentencePieceTokenizer): """SentencePieceTokenizer-Megatron wrapper""" @@ -554,6 +495,7 @@ def eod(self): def additional_special_tokens_ids(self): return None + class _NullTokenizer: def __init__(self, vocab_size): vocab_size = int(vocab_size) diff --git a/pretrain_gpt.py b/pretrain_gpt.py index acf5ea8377..499243f2c7 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -14,7 +14,7 @@ from megatron.core.enums import ModelType from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder from megatron.core.datasets.gpt_dataset import GPTDatasetConfig -from megatron.core.datasets.gpt_dataset import GPTDataset +from megatron.core.datasets.gpt_dataset import MockGPTDataset, GPTDataset import megatron.model from megatron.core.models.gpt import GPTModel from megatron.training import pretrain @@ -153,6 +153,8 @@ def is_dataset_built_on_rank(): def core_gpt_dataset_config_from_args(args): + tokenizer = get_tokenizer() + return GPTDatasetConfig( is_built_on_rank=is_dataset_built_on_rank, random_seed=args.seed, @@ -161,11 +163,11 @@ def core_gpt_dataset_config_from_args(args): blend_per_split=[args.train_data_path, args.valid_data_path, args.test_data_path], split=args.split, path_to_cache=args.data_cache_path, - return_document_ids=args.retro_return_doc_ids, + mock=args.mock_data, + tokenizer=tokenizer, reset_position_ids=args.reset_position_ids, reset_attention_mask=args.reset_attention_mask, eod_mask_loss=args.eod_mask_loss, - eod_id=get_tokenizer().eod ) @@ -177,12 +179,19 @@ def train_valid_test_datasets_provider(train_val_test_num_samples): """ args = get_args() + config = core_gpt_dataset_config_from_args(args) + + if config.mock: + dataset_type = MockGPTDataset + else: + dataset_type = GPTDataset + print_rank_0("> building train, validation, and test datasets for GPT ...") train_ds, valid_ds, test_ds = BlendedMegatronDatasetBuilder( - GPTDataset, + dataset_type, train_val_test_num_samples, - core_gpt_dataset_config_from_args(args) + config ).build() print_rank_0("> finished creating GPT datasets ...") diff --git a/tests/unit_tests/data/test_builder.py b/tests/unit_tests/data/test_builder.py new file mode 100644 index 0000000000..1052c2fdb2 --- /dev/null +++ b/tests/unit_tests/data/test_builder.py @@ -0,0 +1,165 @@ +## +# Compile megatron.core.datasets.helpers dependencies before BlendedDataset import +## + +import torch + +from megatron.core.datasets.utils import compile_helpers +from tests.unit_tests.test_utilities import Utils + +if torch.distributed.is_available(): + Utils.initialize_distributed() + if torch.distributed.get_rank() == 0: + compile_helpers() + torch.distributed.barrier() +else: + compile_helpers() + +## +# Done +## + +import os +import tempfile +from collections import defaultdict +from typing import Dict + +import numpy +import torch + +from megatron.core.datasets.blended_dataset import BlendedDataset +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder +from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig +from megatron.core.datasets.megatron_dataset import LowLevelDataset, MegatronDataset +from megatron.core.datasets.utils import Split + + +_NUM_DATASETS = 10 + +_SEQUENCE_LENGTH = 10 + +_SIZES_PER_SPLIT = { + Split.train: 900, + Split.valid: 90, + Split.test: 10, +} + + +def do_setup(odir): + paths = defaultdict(list) + + for i in range(_NUM_DATASETS): + path_to_data = os.path.join(odir, str(i)) + os.mkdir(path_to_data) + + for split in _SIZES_PER_SPLIT: + data = numpy.zeros((_SIZES_PER_SPLIT[split], _SEQUENCE_LENGTH)) + path = os.path.join(path_to_data, f"{split.name}.npy") + numpy.save(path, data) + paths[split].append(path) + + return paths + + +def test_builder(): + + # Define the class here to avoid pytest warnings + + class TestDataset(MegatronDataset): + def _finalize(self) -> None: + self.sample_index = numpy.random.choice(self.indices, size=self.num_samples) + + @staticmethod + def numel_low_level_dataset(low_level_dataset: LowLevelDataset) -> int: + return len(low_level_dataset) + + @staticmethod + def build_low_level_dataset( + dataset_path: str, config: BlendedMegatronDatasetConfig + ) -> LowLevelDataset: + return numpy.load(dataset_path) + + def __len__(self) -> int: + return len(self.sample_index) + + def __getitem__(self, idx: int) -> Dict[str, numpy.ndarray]: + return {"text": self.dataset[self.sample_index[idx]]} + + with tempfile.TemporaryDirectory() as temp_dir: + + paths = do_setup(temp_dir) + + blends = { + split: [ + weight_or_path + for pair in zip(list(range(len(paths[split]))), paths[split]) + for weight_or_path in pair + ] + for split in Split + } + + # one dataset, one split AND multiple datasets, one split + config = BlendedMegatronDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=_SEQUENCE_LENGTH, + blend_per_split=[[paths[Split.train][0]], blends[Split.valid], None,], + ) + datasets = BlendedMegatronDatasetBuilder(TestDataset, [100, 100, 100], config).build() + assert len(datasets[0]) == 100 and isinstance(datasets[0], TestDataset) + assert len(datasets[1]) >= 100 and isinstance(datasets[1], BlendedDataset) + assert datasets[2] is None + + # blend_per_split, all splits + config = BlendedMegatronDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=_SEQUENCE_LENGTH, + blend_per_split=[blends[Split.train], blends[Split.valid], blends[Split.test],], + ) + datasets = BlendedMegatronDatasetBuilder(TestDataset, [100, 100, 100], config).build() + assert len(datasets[0]) >= 100 + assert len(datasets[1]) >= 100 + assert len(datasets[2]) >= 100 + + # blend_per_split, one split + config = BlendedMegatronDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=_SEQUENCE_LENGTH, + blend_per_split=[blends[Split.train], None, None,], + ) + datasets = BlendedMegatronDatasetBuilder(TestDataset, [100, 100, 100], config).build() + assert len(datasets[0]) >= 100 + assert datasets[1] is None + assert datasets[2] is None + + # blend, 90,9,1 split + config = BlendedMegatronDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=_SEQUENCE_LENGTH, + blend=blends[Split.train], + split="90,9,1", + ) + datasets = BlendedMegatronDatasetBuilder(TestDataset, [100, 100, 100], config).build() + assert len(datasets[0]) >= 100 + assert len(datasets[1]) >= 100 + assert len(datasets[2]) >= 100 + + # blend, 100,0,0 split + config = BlendedMegatronDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=_SEQUENCE_LENGTH, + blend=blends[Split.train], + split="100,0,0", + ) + datasets = BlendedMegatronDatasetBuilder(TestDataset, [100, 100, 100], config).build() + assert len(datasets[0]) >= 100 + assert datasets[1] is None + assert datasets[2] is None + + +if __name__ == "__main__": + test_builder() diff --git a/tests/unit_tests/data/test_builder_mock_gpt_dataset.py b/tests/unit_tests/data/test_builder_mock_gpt_dataset.py new file mode 100644 index 0000000000..4c91569d22 --- /dev/null +++ b/tests/unit_tests/data/test_builder_mock_gpt_dataset.py @@ -0,0 +1,54 @@ +import random +import sys +from types import SimpleNamespace + +import numpy + +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder +from megatron.core.datasets.gpt_dataset import GPTDatasetConfig, MockGPTDataset + + +def sample_N(dataset, N, randomize): + if randomize: + indices = [random.randint(0, sys.maxsize) for _ in range(N)] + else: + indices = list(range(N)) + samples = [dataset[index]["tokens"].numpy() for index in indices] + return samples + + +def test_builder_mock_data(): + config = GPTDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=1024, + mock=True, + reset_position_ids=True, + reset_attention_mask=True, + eod_mask_loss=True, + tokenizer=SimpleNamespace(), + ) + + datasets = BlendedMegatronDatasetBuilder(MockGPTDataset, [None, None, None], config).build() + + N = 10 + + # Check iso-index split variance + subsets = [sample_N(dataset, N, randomize=False) for dataset in datasets] + assert not numpy.allclose(subsets[0], subsets[1]) + assert not numpy.allclose(subsets[0], subsets[2]) + assert not numpy.allclose(subsets[1], subsets[2]) + + # Check iso-split / iso-index identity + subset_1A = sample_N(datasets[0], N, randomize=False) + subset_1B = sample_N(datasets[0], N, randomize=False) + assert numpy.allclose(subset_1A, subset_1B) + + # Check iso-split index variance + subset_1A = sample_N(datasets[0], N, randomize=True) + subset_1B = sample_N(datasets[0], N, randomize=True) + assert not numpy.allclose(subset_1A, subset_1B) + + +if __name__ == "__main__": + test_builder_mock_data() diff --git a/tests/unit_tests/data/test_preprocess_mmdata.py b/tests/unit_tests/data/test_preprocess_mmdata.py index 34cd441827..08975a3889 100644 --- a/tests/unit_tests/data/test_preprocess_mmdata.py +++ b/tests/unit_tests/data/test_preprocess_mmdata.py @@ -9,7 +9,7 @@ import numpy from megatron.core.datasets.indexed_dataset import MMapIndexedDataset -from tests.unit_tests.data.test_preprocess_data import dummy_jsonl, gpt2_vocab, gpt2_merge +from tests.unit_tests.data.test_preprocess_data import dummy_jsonl, gpt2_merge, gpt2_vocab from tools.merge_datasets import main as merge_main from tools.preprocess_mmdata import Encoder from tools.preprocess_mmdata import get_args as build_args @@ -22,9 +22,11 @@ def dummy_img(odir_txt, odir_img): length = sum(1 for _ in reader_txt) os.makedirs(os.path.join(odir_img, os.path.splitext(name)[0]), exist_ok=False) for i in range(length): - with open(os.path.join(odir_img, os.path.splitext(name)[0], f"{str(i).zfill(4)}.img"), "wb") as writer_img: + with open( + os.path.join(odir_img, os.path.splitext(name)[0], f"{str(i).zfill(4)}.img"), "wb" + ) as writer_img: # 32 * 32 - 1 to induce preprocessing 0-index padding - writer_img.write(bytes([random.randint(0 , 255) for _ in range(32 * 32 - 1)])) + writer_img.write(bytes([random.randint(0, 255) for _ in range(32 * 32 - 1)])) def build_datasets(idir_txt, idir_img, odir, extra_args=[]): @@ -42,7 +44,14 @@ def build_datasets(idir_txt, idir_img, odir, extra_args=[]): def merge_datasets(idir): - sys.argv = [sys.argv[0], "--input", idir, "--output-prefix", os.path.join(idir, "merge"), "--multimodal"] + sys.argv = [ + sys.argv[0], + "--input", + idir, + "--output-prefix", + os.path.join(idir, "merge"), + "--multimodal", + ] merge_main() @@ -72,7 +81,15 @@ def do_test_preprocess_mmdata(temp_dir, extra_args=[]): # merge the datasets merge_datasets(path_to_data) - sys.argv = [sys.argv[0], "--input", None, "--input-image", None, "--output-prefix", None,] + extra_args + sys.argv = [ + sys.argv[0], + "--input", + None, + "--input-image", + None, + "--output-prefix", + None, + ] + extra_args encoder = Encoder(build_args()) encoder.initializer() @@ -119,7 +136,13 @@ def tokens_to_string(toks): merged_doc_index_index += len(dataset.document_indices) - 1 with open(realpath_raw_txt, "rt") as reader: - for json_line, image_path in zip(reader, [os.path.join(realpath_raw_img, basename) for basename in os.listdir(realpath_raw_img)]): + for json_line, image_path in zip( + reader, + [ + os.path.join(realpath_raw_img, basename) + for basename in os.listdir(realpath_raw_img) + ], + ): toks, image, length = encoder.encode((json_line, image_path)) raw_text = tokens_to_string(toks) @@ -133,14 +156,14 @@ def tokens_to_string(toks): processed_image = dataset[dataset_index + 1][0] assert dataset[dataset_index + 1][1] == 1 # reverse to account for preprocessing 0-index padding - processed_image = processed_image[::-1][0:raw_image.size] + processed_image = processed_image[::-1][0 : raw_image.size] assert ( raw_text == processed_text ), f"ERROR: {basename.split('_')[:-2]}: raw and processed documents (text) do not match" - assert ( - numpy.allclose(raw_image, processed_image) + assert numpy.allclose( + raw_image, processed_image ), f"ERROR: {basename.split('_')[:-2]}: raw and processed documents (image) do not match" dataset_index += 2 @@ -152,14 +175,14 @@ def tokens_to_string(toks): merged_image = merged_dataset[merged_index + 1][0] assert merged_dataset[merged_index + 1][1] == 1 # reverse to account for preprocessing 0-index padding - merged_image = merged_image[::-1][0:raw_image.size] + merged_image = merged_image[::-1][0 : raw_image.size] assert ( raw_text == merged_text ), f"ERROR: {basename.split('_')[:-2]}: raw and merged documents (text) do not match" - assert ( - numpy.allclose(raw_image, merged_image) + assert numpy.allclose( + raw_image, merged_image ), f"ERROR: {basename.split('_')[:-2]}: raw and merged documents (image) do not match" merged_index += 2 diff --git a/tests/unit_tests/test_utilities.py b/tests/unit_tests/test_utilities.py index b35c77b58d..49e16b0c18 100644 --- a/tests/unit_tests/test_utilities.py +++ b/tests/unit_tests/test_utilities.py @@ -9,13 +9,14 @@ class Utils: @staticmethod def initialize_distributed(): - print(f'Initializing torch.distributed with rank: {Utils.rank}, world_size: {Utils.world_size}') - torch.cuda.set_device(Utils.rank % torch.cuda.device_count()) - init_method = 'tcp://' - master_ip = os.getenv('MASTER_ADDR', 'localhost') - master_port = os.getenv('MASTER_PORT', '6000') - init_method += master_ip + ':' + master_port - torch.distributed.init_process_group(backend='nccl', world_size=Utils.world_size, rank=Utils.rank, init_method=init_method) + if not torch.distributed.is_initialized(): + print(f'Initializing torch.distributed with rank: {Utils.rank}, world_size: {Utils.world_size}') + torch.cuda.set_device(Utils.rank % torch.cuda.device_count()) + init_method = 'tcp://' + master_ip = os.getenv('MASTER_ADDR', 'localhost') + master_port = os.getenv('MASTER_PORT', '6000') + init_method += master_ip + ':' + master_port + torch.distributed.init_process_group(backend='nccl', world_size=Utils.world_size, rank=Utils.rank, init_method=init_method) @staticmethod def destroy_model_parallel(): @@ -25,6 +26,5 @@ def destroy_model_parallel(): @staticmethod def initialize_model_parallel(tensor_model_parallel_size = 1, pipeline_model_parallel_size = 1, virtual_pipeline_model_parallel_size = None, pipeline_model_parallel_split_rank = None): ps.destroy_model_parallel() - if not torch.distributed.is_initialized(): - Utils.initialize_distributed() + Utils.initialize_distributed() ps.initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank) \ No newline at end of file diff --git a/tools/retro/query/multi_split_gpt_dataset.py b/tools/retro/query/multi_split_gpt_dataset.py index 502f06d075..7b6d744877 100644 --- a/tools/retro/query/multi_split_gpt_dataset.py +++ b/tools/retro/query/multi_split_gpt_dataset.py @@ -59,6 +59,8 @@ class MultiSplitGPTDataset(GPTDataset): indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the MegatronDataset + dataset_path (str): The real path on disk to the dataset, for bookkeeping + indexed_indices (numpy.ndarray): The set of the documents indices to expose num_samples (int): The number of samples to draw from the indexed dataset @@ -71,12 +73,13 @@ class MultiSplitGPTDataset(GPTDataset): def __init__( self, indexed_dataset: MMapIndexedDataset, + dataset_path: str, indexed_indices: numpy.ndarray, num_samples: int, index_split: Split, config: MultiSplitGPTDatasetConfig, ) -> None: - super().__init__(indexed_dataset, indexed_indices, num_samples, index_split, config) + super().__init__(indexed_dataset, dataset_path, indexed_indices, num_samples, index_split, config) def __getitem__(self, idx: int) -> Dict[str, numpy.ndarray]: """Abstract method implementation diff --git a/tools/retro/sft/dataset_conv.py b/tools/retro/sft/dataset_conv.py index cd41748e87..d7bde54f78 100644 --- a/tools/retro/sft/dataset_conv.py +++ b/tools/retro/sft/dataset_conv.py @@ -1,74 +1,167 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +import re import json +import os +from typing import Any, Iterable, Dict + +from numpy import ndarray +from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig +from megatron.core.datasets.utils import Split import torch -import numpy as np +import numpy import glob from collections import OrderedDict -from megatron import get_tokenizer, get_args, get_retro_args +from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig +from megatron.core.datasets.megatron_dataset import LowLevelDataset, MegatronDataset +from megatron.core.datasets.utils import Split +from dataclasses import dataclass + +_DATASET_NAME_PATTERNS = { + Split.train: r"(?P[^\0]+)\/(?P=name)\_QA\_train.json", + Split.valid: r"(?P[^\0]+)\/(?P=name)\_QA\_dev.json", +} -class FtDataset(torch.utils.data.Dataset): + +@dataclass +class JsonQADatasetConfig(BlendedMegatronDatasetConfig): + """Configuration object for the QA finetuning pipeline """ - This class represents a dataset for fine-tuning GPT models using the Megatron framework. + ft_neighbours: int = 1 + + bert_retriever_neighbours: bool = False + + longform_answer: bool = False + + inference_only: bool = False + + retrieved_neighbours: bool = False - Args: - name (str): Name of the dataset equals to data_prefix + fix_newsqa: bool = True - indexed_dataset (IndexedDataset): The dataset object containing the data samples. + def __post_init__(self) -> None: + super().__post_init__() + assert self.blend_per_split is not None - max_seq_length (int): Maximum sequence length for each sample in the dataset. - fewshot_list (list): A list of few-shot learning examples, if applicable. +@dataclass +class RetroJsonQADatasetConfig(JsonQADatasetConfig): + """Configuration object for the Retro QA finetuning pipeline """ - def __init__(self, name, indexed_dataset, max_seq_length, - fewshot_list=None): + retro_num_neighbors: int = None + + retro_gpt_retrieved_length: int = None + + def __post_init__(self) -> None: + super().__post_init__() + assert self.retro_num_neighbors is not None + assert self.retro_gpt_retrieved_length is not None + + +class JsonQADataset(MegatronDataset): + + def __init__(self, dataset: Any, dataset_path: str, indices: ndarray, num_samples: int, index_split: Split, config: BlendedMegatronDatasetConfig) -> None: + super().__init__(dataset, dataset_path, indices, num_samples, index_split, config) + matches = re.findall(_DATASET_NAME_PATTERNS[index_split], dataset_path) + assert len(matches) == 1 + assert len(matches[0]) > 0 + self.dataset_name = matches[0] - # Params to store. - self.dataset_name = name # dataset_name equals to data_prefix in pretrain - self.max_seq_length = max_seq_length - self.desc = name + @staticmethod + def numel_low_level_dataset(low_level_dataset: LowLevelDataset) -> int: + return len(low_level_dataset) - # For compatibility with Megatron Core BlendedDataset - self.unique_identifiers = OrderedDict() - self.unique_identifiers["class"] = type(self).__name__ - self.unique_identifiers["name"] = name + @staticmethod + def build_low_level_dataset(dataset_path: str, config: JsonQADatasetConfig) -> Iterable: + assert os.path.isfile(dataset_path), f"{dataset_path} does not exist on disk" + return preprocess(dataset_path, config) - # Dataset. - self.indexed_dataset = indexed_dataset + def __len__(self) -> int: + return len(self.dataset) - # Vocab stuff. - tokenizer = get_tokenizer() - self.eos_id = tokenizer.eod - self.pad_id = tokenizer.eod - self.fewshot_list = fewshot_list + def __getitem__(self, idx: int) -> Dict[str, ndarray]: + sample = self.dataset[idx % len(self.dataset)] - self.args = get_args() + # unpack tokens + query, answer, neighbours = sample - def __len__(self): - return len(list(self.indexed_dataset)) + # tokenization + output_tokens = self.config.tokenizer.tokenize(answer) - def __getitem__(self, idx): + input_tokens = reformat_prompt( + query, + neighbours, + self.dataset_name, + self.config.ft_neighbours, + len(output_tokens), + self.config.tokenizer, + self.config.sequence_length + ) - idx = idx % len(self.indexed_dataset) - sample = self.indexed_dataset[idx] + # padding + tokens, answer_mask = pad_and_convert_to_numpy( + input_tokens, output_tokens, self.config.tokenizer.pad, self.config.sequence_length, self.config.tokenizer.eos + ) - if self.args.retro_add_retriever: - return build_retro_training_sample(sample, - self.max_seq_length, # needed for padding - self.pad_id, self.eos_id, - self.dataset_name, - self.args.ft_neighbours, - self.args.shuffle_topn) - else: - return build_normal_training_sample(sample, - self.max_seq_length, # needed for padding - self.pad_id, self.eos_id, - self.dataset_name, - self.args.ft_neighbours, - self.args.shuffle_topn, - self.fewshot_list) + train_sample = { + 'text': tokens, + 'answer_mask': answer_mask, + } + + return train_sample + + +class RetroJsonQADataset(JsonQADataset): + + def __getitem__(self, idx: int) -> Dict[str, ndarray]: + + sample = self.dataset[idx % len(self.dataset)] + + # unpack tokens + query, answer, neighbours = sample + + # tokenization + output_tokens = self.config.tokenizer.tokenize(answer) + + input_tokens = reformat_prompt_retro( + query, + neighbours, + self.dataset_name, + self.config.ft_neighbours, + len(output_tokens), + self.config.tokenizer, + self.config.sequence_length + ) + + # padding + tokens, answer_mask = pad_and_convert_to_numpy( + input_tokens, + output_tokens, + self.config.tokenizer.pad, + self.config.sequence_length, + self.config.tokenizer.eos + ) + + # get retro neighbors + # context chunk and answer chunk + n_chunks_per_sample = 2 + num_neighbors = self.config.retro_num_neighbors + # disable retro encoder + neighbor_tokens = numpy.zeros( + [n_chunks_per_sample, num_neighbors, self.config.retro_gpt_retrieved_length], + dtype=numpy.int64 + ) + + train_sample = { + 'text': tokens, + 'answer_mask': answer_mask, + 'neighbor_tokens': neighbor_tokens, + 'context_len': len(input_tokens) + } + + return train_sample def format_multichoice(multichoice_options): @@ -85,17 +178,16 @@ def format_answer(answer): return " {}".format(answer) -def preprocess(data_file, inference_only=False, retrieved_neighbours=False, fix_newsqa=True): - args = get_args() - assert args.ft_neighbours > 0 - if args.longform_answer: +def preprocess(dataset_path: str, config: JsonQADatasetConfig): + assert config.ft_neighbours > 0 + if config.longform_answer: nq_examples = [] - with open(data_file, "r") as f: + with open(dataset_path, "r") as f: for fn in f: nq_examples.append(json.loads(fn)) else: nq_examples = [] - for my_data_file in sorted(glob.glob(data_file)): + for my_data_file in sorted(glob.glob(dataset_path)): with open(my_data_file, "r", encoding='utf-8') as f: nq_examples.extend(json.load(f)) @@ -104,11 +196,11 @@ def preprocess(data_file, inference_only=False, retrieved_neighbours=False, fix_ question = instance["question"] if 'qa_type' in instance and instance['qa_type'] == "multi_choice_qa": question = format_multichoice_question(question, instance["multichoice_options"]) - if args.bert_retriever_neighbours: + if config.bert_retriever_neighbours: contexts = instance["bert_pretrain_corpus_neighbours"] neighbours = ["source: " + ctx for ctx in contexts] else: - if retrieved_neighbours: + if config.retrieved_neighbours: contexts = instance["ctxs"] neighbours = ["title: " + ctx["title"] + ", source: " + ctx["text"] for ctx in contexts] else: @@ -118,15 +210,15 @@ def preprocess(data_file, inference_only=False, retrieved_neighbours=False, fix_ "title: " + instance["sub-paragraphs"][0] + ", source: " + instance["sub-paragraphs"][1]] else: neighbours = ["title: , source: " + instance["sub-paragraphs"]] - elif fix_newsqa and "sub_paragraph" in instance: + elif config.fix_newsqa and "sub_paragraph" in instance: neighbours = ["title: , source: " + instance["sub_paragraph"]] else: neighbours = ["title: , source: "] - if inference_only: + if config.inference_only: data.append((question, None, neighbours)) else: - if args.longform_answer: + if config.longform_answer: if "longform_answer" in instance: answers = [instance["longform_answer"]] else: @@ -160,28 +252,11 @@ def preprocess(data_file, inference_only=False, retrieved_neighbours=False, fix_ return data -def get_processed_dataset(name, data_folder): - training_file = data_folder + "/{}/{}_QA_train*.json".format(name, name) - validation_file = data_folder + "/{}/{}_QA_dev.json".format(name, name) - - dataset = {} - dataset["train"] = preprocess(training_file) - dataset["valid"] = preprocess(validation_file) - dataset["test"] = preprocess(validation_file) - - print(name, "train", len(dataset["train"])) - print(name, "valid", len(dataset["valid"])) - print(name, "test", len(dataset["test"])) - - return dataset - - -def count_stat(dataset, tokenizer): - args = get_args() +def count_stat(dataset, tokenizer, k): nb_lens = [] for i, d in enumerate(dataset): query, answer, neighbours = d - nb_lens.extend([len(tokenizer.tokenize(neighbour)) for neighbour in neighbours[:args.k]]) + nb_lens.extend([len(tokenizer.tokenize(neighbour)) for neighbour in neighbours[:k]]) print("len of nb", len(nb_lens)) print("max of len nb", max(nb_lens)) @@ -342,75 +417,6 @@ def reformat_prompt_short(query, neighbours, dataset_name, ft_neighbours, \ return input_tokens -def build_normal_training_sample(sample, - max_seq_length, - pad_id, - eos_id, - dataset_name, - ft_neighbours=1, - shuffle_topn=False, - fewshot_list=None): - # unpack tokens - query, answer, neighbours = sample - - # tokenization - tokenizer = get_tokenizer() - output_tokens = tokenizer.tokenize(answer) - - input_tokens = reformat_prompt(query, neighbours, dataset_name, ft_neighbours, len(output_tokens), tokenizer, - max_seq_length) - - # Padding - tokens, answer_mask \ - = pad_and_convert_to_numpy(input_tokens, output_tokens, - pad_id, max_seq_length, eos_id) - - train_sample = { - 'text': tokens, - 'answer_mask': answer_mask, - } - return train_sample - - -def build_retro_training_sample(sample, - max_seq_length, - pad_id, - eos_id, - dataset_name, - ft_neighbours=1, - shuffle_topn=False): - # unpack tokens - query, answer, neighbours = sample - - # tokenization - tokenizer = get_tokenizer() - output_tokens = tokenizer.tokenize(answer) - - input_tokens = reformat_prompt_retro(query, neighbours, dataset_name, ft_neighbours, len(output_tokens), tokenizer, - max_seq_length) - - # Padding - tokens, answer_mask \ - = pad_and_convert_to_numpy(input_tokens, output_tokens, - pad_id, max_seq_length, eos_id) - - # get retro neighbors - args = get_args() - retro_args = get_retro_args() - n_chunks_per_sample = 2 # context chunk and answer chunk - num_neighbors = args.retro_num_neighbors - neighbor_tokens = np.zeros([n_chunks_per_sample, num_neighbors, retro_args.retro_gpt_retrieved_length], - dtype=np.int64) # disable retro encoder - - train_sample = { - 'text': tokens, - 'answer_mask': answer_mask, - 'neighbor_tokens': neighbor_tokens, - 'context_len': len(input_tokens) - } - return train_sample - - def pad_and_convert_to_numpy(input_ids, output_ids, pad_id, max_seq_length, eos_id): @@ -431,10 +437,10 @@ def pad_and_convert_to_numpy(input_ids, output_ids, # Tokens. filler = [pad_id] * padding_length - tokens = np.array(tokens + [eos_id] + filler, dtype=np.int64) + tokens = numpy.array(tokens + [eos_id] + filler, dtype=numpy.int64) # answer mask answer_mask = answer_mask + [1] + [0] * padding_length - answer_mask = np.array(answer_mask, dtype=np.int64) + answer_mask = numpy.array(answer_mask, dtype=numpy.int64) return tokens, answer_mask diff --git a/tools/retro/sft/sft_gpt_dataset.py b/tools/retro/sft/sft_gpt_dataset.py deleted file mode 100644 index 72c9ded849..0000000000 --- a/tools/retro/sft/sft_gpt_dataset.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. - -"""GPT style dataset.""" -from types import SimpleNamespace - -from megatron import print_rank_0, get_args -from megatron.core import mpu -from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder -from megatron.core.datasets.blended_dataset import BlendedDataset -from megatron.data.dataset_utils import get_datasets_weights_and_num_samples -from tools.retro.sft.dataset_conv import FtDataset as SFTDataset -from tools.retro.sft.dataset_conv import get_processed_dataset - - -def build_train_valid_test_datasets(data_prefix, seq_length): - """Build train, valid, and test datasets.""" - - assert data_prefix - - args = get_args() - - if len(data_prefix) == 1: - processed_datasets = get_processed_dataset(data_prefix[0], args.data_folder) - - train_ds = SFTDataset(data_prefix[0], processed_datasets["train"], seq_length) - valid_ds = SFTDataset(data_prefix[0], processed_datasets["valid"], seq_length) - test_ds = SFTDataset(data_prefix[0], processed_datasets["test"], seq_length) - - return train_ds, valid_ds, test_ds - - prefixes, weights, _ = get_datasets_weights_and_num_samples(data_prefix, train_valid_test_num_samples=0) - train_datasets, valid_datasets, test_datasets = [], [], [] - train_size, valid_size, test_size = 0, 0, 0 - - for i in range(len(prefixes)): - processed_datasets = get_processed_dataset(prefixes[i], args.data_folder) - - train_ds = SFTDataset(prefixes[i], processed_datasets["train"], seq_length) - valid_ds = SFTDataset(prefixes[i], processed_datasets["valid"], seq_length) - test_ds = SFTDataset(prefixes[i], processed_datasets["test"], seq_length) - - if train_ds: - train_datasets.append(train_ds) - train_size += len(train_ds) - if valid_ds: - valid_datasets.append(valid_ds) - valid_size += len(valid_ds) - if test_ds: - test_datasets.append(test_ds) - test_size += len(test_ds) - - # Blend - MEGATRON_CORE_DUMMY_CONFIG = SimpleNamespace( - is_built_on_rank=lambda: mpu.get_tensor_model_parallel_rank() == 0, - path_to_cache=getattr(get_args(), "data_cache_path") - ) - - blending_train_dataset = None - if train_datasets: - blending_train_dataset = BlendedMegatronDatasetBuilder.build_generic_dataset( - BlendedDataset, - MEGATRON_CORE_DUMMY_CONFIG.is_built_on_rank, - train_datasets, - weights, - train_size, - MEGATRON_CORE_DUMMY_CONFIG, - ) - blending_valid_dataset = None - if valid_datasets: - blending_valid_dataset = BlendedMegatronDatasetBuilder.build_generic_dataset( - BlendedDataset, - MEGATRON_CORE_DUMMY_CONFIG.is_built_on_rank, - valid_datasets, - weights, - valid_size, - MEGATRON_CORE_DUMMY_CONFIG, - ) - blending_test_dataset = None - if test_datasets: - blending_test_dataset = BlendedMegatronDatasetBuilder.build_generic_dataset( - BlendedDataset, - MEGATRON_CORE_DUMMY_CONFIG.is_built_on_rank, - test_datasets, - weights, - test_size, - MEGATRON_CORE_DUMMY_CONFIG, - ) - - return (blending_train_dataset, blending_valid_dataset, - blending_test_dataset) diff --git a/tools/retro/sft/sft_retro.py b/tools/retro/sft/sft_retro.py index c8d6fb227e..fd95c05586 100644 --- a/tools/retro/sft/sft_retro.py +++ b/tools/retro/sft/sft_retro.py @@ -3,7 +3,7 @@ """Pretrain GPT""" import torch -from functools import partial +from functools import partial, reduce import sys, os sys.path.append(os.path.abspath(os.path.join( @@ -14,11 +14,12 @@ from megatron import get_tokenizer from megatron.core import tensor_parallel from megatron.core.enums import ModelType +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder from megatron.training import pretrain from megatron.utils import get_ltor_masks_and_position_ids from megatron.utils import average_losses_across_data_parallel_group -from pretrain_gpt import model_provider -from tools.retro.sft.sft_gpt_dataset import build_train_valid_test_datasets +from pretrain_gpt import model_provider, is_dataset_built_on_rank +from tools.retro.sft.dataset_conv import JsonQADataset, JsonQADatasetConfig, RetroJsonQADataset, RetroJsonQADatasetConfig def get_tasks_args(parser): @@ -187,12 +188,74 @@ def forward_step(data_iterator, model): def train_valid_test_datasets_provider(train_val_test_num_samples): """Build train, valid, and test datasets.""" args = get_args() + retro_args = get_retro_args() + + tokenizer = get_tokenizer() + + def fix_and_split_blend_pair(pair): + weight, name = pair + return [ + [weight, os.path.join(args.data_folder, name, f"{name}_QA_train.json")], + [weight, os.path.join(args.data_folder, name, f"{name}_QA_dev.json")], + None, + ] + + blend = [args.data_path[i:i+2] for i in range(0, len(args.data_path), 2)] + + if len(blend) == 1: + blend_per_split = [ + os.path.join(args.data_folder, blend[0], f"{blend[0]}_QA_train.json"), + os.path.join(args.data_folder, blend[0], f"{blend[0]}_QA_dev.json"), + None, + ] + else: + blend_per_split = [ + list( + reduce( + lambda x, y: x + y, + list(zip(*map(fix_and_split_blend_pair, blend)))[0] + ) + ), + None, + None, + ] + + extra_kwargs = {} + + if args.retro_add_retriever: + dataset_cls = RetroJsonQADataset + config_cls = RetroJsonQADatasetConfig + extra_kwargs["retro_num_neighbors"] = args.retro_num_neighbors + extra_kwargs["retro_gpt_retrieved_length"] = retro_args.retro_gpt_retrieved_length + else: + dataset_cls = JsonQADataset + config_cls = JsonQADatasetConfig + + config = config_cls( + is_built_on_rank=is_dataset_built_on_rank, + random_seed=args.seed, + sequence_length=args.seq_length, + blend_per_split=blend_per_split, + split=args.split, + path_to_cache=args.data_cache_path, + mock=args.mock_data, + tokenizer=tokenizer, + ft_neighbours=args.ft_neighbours, + bert_retriever_neighbours=args.bert_retriever_neighbours, + longform_answer=args.longform_answer, + inference_only=False, + retrieved_neighbours=False, + fix_newsqa=True, + **extra_kwargs + ) print_rank_0('> building train, validation, and test datasets ' 'for GPT ...') - train_ds, valid_ds, test_ds = build_train_valid_test_datasets( - data_prefix=args.data_path, - seq_length=args.seq_length) + train_ds, valid_ds, test_ds = BlendedMegatronDatasetBuilder( + dataset_cls, + train_val_test_num_samples, + config + ).build() print_rank_0("> finished creating GPT datasets ...") return train_ds, valid_ds, test_ds From eaaf92f986aa0880cfe7da7531e6f6ad010ac420 Mon Sep 17 00:00:00 2001 From: Shanmugam Ramasamy Date: Mon, 29 Jan 2024 12:32:12 -0800 Subject: [PATCH 231/296] Adding bert local spec test --- tests/unit_tests/models/test_bert_model.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unit_tests/models/test_bert_model.py b/tests/unit_tests/models/test_bert_model.py index 00c1becc91..e1d01557dd 100644 --- a/tests/unit_tests/models/test_bert_model.py +++ b/tests/unit_tests/models/test_bert_model.py @@ -3,6 +3,7 @@ import pytest import torch +import os from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.models.bert.bert_model import BertModel @@ -13,6 +14,7 @@ class TestBertModel: def setup_method(self, method): + os.environ['NVTE_ALLOW_NONDETERMINISTIC_ALGO'] = '0' #Bert does not support flash attention Utils.initialize_model_parallel(1,1) model_parallel_cuda_manual_seed(123) transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True, perform_initialization=True) From a4b5a9e49c48b39f0cf6f4ea56a3aaf2848530e9 Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Mon, 29 Jan 2024 20:28:28 -0800 Subject: [PATCH 232/296] Fix `qkv_format` in TEDotProductAttention --- .../transformer/custom_layers/transformer_engine.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index df886872f9..f4b0c78ddb 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -394,9 +394,6 @@ def __init__( if te_version > packaging.version.Version("0.12.0"): self.te_forward_mask_type = True - if self.config.apply_rope_fusion and te_version > packaging.version.Version("0.13.0"): - extra_kwargs["qkv_format"] = self.qkv_format = 'bshd' - # Only Transformer-Engine version >= 1.0.0 supports context parallelism if te_version >= packaging.version.Version("1.0.0"): if getattr(TEDotProductAttention, "cp_stream") is None: @@ -446,13 +443,19 @@ def forward( dataclasses.asdict(packed_seq_params) if packed_seq_params is not None else {} ) te_version = packaging.version.Version(version("transformer-engine")) + # overwrite self.qkv_format depending on self.config.apply_rope_fusion, which can be set after init + if self.config.apply_rope_fusion and te_version > packaging.version.Version("0.13.0"): + self.qkv_format = 'bshd' + + qkv_format = packed_seq_kwargs.get('qkv_format', self.qkv_format) + if te_version < packaging.version.Version("1.3.0"): # TE 1.3.0 introduces precomputing max_seqlen to remove unnecessary kernels and D2H copies (#555) # These two arguments did not exist prior to 1.3.0 packed_seq_kwargs.pop("max_seqlen_q", None) packed_seq_kwargs.pop("max_seqlen_kv", None) - if self.config.apply_rope_fusion and self.qkv_format == 'bshd': + if self.config.apply_rope_fusion and qkv_format == 'bshd': query, key, value = [x.transpose(0, 1).contiguous() for x in (query, key, value)] if self.te_forward_mask_type: @@ -467,7 +470,7 @@ def forward( else: core_attn_out = super().forward(query, key, value, attention_mask, **packed_seq_kwargs,) - if self.config.apply_rope_fusion and self.qkv_format == 'bshd': + if self.config.apply_rope_fusion and qkv_format == 'bshd': return core_attn_out.transpose(0, 1) else: return core_attn_out From 25a99468cdfa0b42be463c8fef155da18ed6e5a3 Mon Sep 17 00:00:00 2001 From: John Kamalu Date: Mon, 29 Jan 2024 20:36:52 -0800 Subject: [PATCH 233/296] Add support for masked WordPiece datasets BERT and T5 --- megatron/core/datasets/bert_dataset.py | 207 +++++++++ megatron/core/datasets/blended_dataset.py | 2 +- .../blended_megatron_dataset_config.py | 8 +- megatron/core/datasets/gpt_dataset.py | 16 +- megatron/core/datasets/masked_dataset.py | 430 ++++++++++++++++++ megatron/core/datasets/megatron_dataset.py | 2 +- megatron/core/datasets/t5_dataset.py | 239 ++++++++++ megatron/core/datasets/utils.py | 8 +- megatron/data/bert_dataset.py | 183 -------- megatron/data/dataset_utils.py | 23 +- megatron/data/t5_dataset.py | 258 ----------- megatron/tokenizer/tokenizer.py | 28 +- pretrain_bert.py | 48 +- pretrain_gpt.py | 9 +- pretrain_t5.py | 61 ++- 15 files changed, 1000 insertions(+), 522 deletions(-) create mode 100644 megatron/core/datasets/bert_dataset.py create mode 100644 megatron/core/datasets/masked_dataset.py create mode 100644 megatron/core/datasets/t5_dataset.py delete mode 100644 megatron/data/bert_dataset.py delete mode 100644 megatron/data/t5_dataset.py diff --git a/megatron/core/datasets/bert_dataset.py b/megatron/core/datasets/bert_dataset.py new file mode 100644 index 0000000000..1168ca239a --- /dev/null +++ b/megatron/core/datasets/bert_dataset.py @@ -0,0 +1,207 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from dataclasses import dataclass +from typing import Dict, List, Optional, Union + +import numpy + +from megatron.core.datasets.indexed_dataset import MMapIndexedDataset +from megatron.core.datasets.masked_dataset import ( + MaskedWordPieceDataset, + MaskedWordPieceDatasetConfig, +) +from megatron.core.datasets.utils import Split + + +@dataclass +class BERTMaskedWordPieceDatasetConfig(MaskedWordPieceDatasetConfig): + """Configuration object for Megatron Core BERT WordPiece datasets + + Attributes: + classification_head (bool): Option to perform the next sequence prediction during + sampling + """ + + classification_head: bool = None + + def __post_init__(self) -> None: + """Do asserts and set fields post init + """ + super().__post_init__() + + assert self.classification_head is not None + + +class BERTMaskedWordPieceDataset(MaskedWordPieceDataset): + """The BERT dataset that assumes WordPiece tokenization + + Args: + indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the + MegatronDataset + + dataset_path (str): The real path on disk to the dataset, for bookkeeping + + indexed_indices (numpy.ndarray): The set of the documents indices to expose + + num_samples (int): The number of samples to draw from the indexed dataset + + index_split (Split): The indexed_indices Split + + config (BERTMaskedWordPieceDatasetConfig): The config + """ + + def __init__( + self, + indexed_dataset: MMapIndexedDataset, + dataset_path: str, + indexed_indices: numpy.ndarray, + num_samples: int, + index_split: Split, + config: BERTMaskedWordPieceDatasetConfig, + ) -> None: + super().__init__( + indexed_dataset, dataset_path, indexed_indices, num_samples, index_split, config + ) + + def _finalize(self) -> None: + """Abstract method implementation + """ + self.token_lookup = list(self.config.tokenizer.inv_vocab.keys()) + # Account for the single and two token ids + self.sample_index = self._build_sample_index( + self.config.sequence_length - 3, 2 if self.config.classification_head else 1 + ) + + @staticmethod + def _key_config_attributes() -> List[str]: + """Inherited method implementation + + Returns: + List[str]: The key config attributes + """ + return super( + BERTMaskedWordPieceDataset, BERTMaskedWordPieceDataset + )._key_config_attributes() + ["classification_head",] + + def __getitem__(self, idx: int) -> Dict[str, Union[int, numpy.ndarray]]: + """Abstract method implementation + + Args: + idx (int): The index into the dataset + + Returns: + Dict[str, Union[int, numpy.ndarray]]: The + """ + idx_beg, idx_end, target_sequence_length = self.sample_index[idx] + sample = [self.dataset[i] for i in range(idx_beg, idx_end)] + numpy_random_state = numpy.random.RandomState( + seed=(self.config.random_seed + idx) % 2 ** 32 + ) + + assert target_sequence_length <= self.config.sequence_length + + # Split the sample into contiguous subsegments A and B + pivot = len(sample) + is_next_random = False + if self.config.classification_head: + assert len(sample) > 1, "the sample must contain at least two sentences" + pivot = 1 + if len(sample) >= 3: + pivot = numpy_random_state.randint(low=1, high=len(sample)) + is_next_random = numpy_random_state.random() < 0.5 + split_A = [] + for sample_a in sample[:pivot]: + split_A.extend(sample_a) + split_B = [] + for sample_b in sample[pivot:]: + split_B.extend(sample_b) + if is_next_random: + split_A, split_B = split_B, split_A + + # Trim the subsegments from either end to a desired joint length + length_A = len(split_A) + length_B = len(split_B) + if length_A + length_B <= target_sequence_length: + truncated = False + else: + while length_A + length_B > target_sequence_length: + split = split_A if length_A > length_B else split_B + if numpy_random_state.random() < 0.5: + del split[0] + else: + del split[-1] + length_A = len(split_A) + length_B = len(split_B) + truncated = True + + # Merge the subsegments and create the token assignment labels + tokens = [ + self.config.tokenizer.cls, + *split_A, + self.config.tokenizer.sep, + ] + assignments = [0 for _ in range(1 + len(split_A) + 1)] + if split_B: + tokens += [*split_B, self.config.tokenizer.sep] + assignments += [1 for _ in range(len(split_B) + 1)] + + # Masking + tokens, masked_positions, masked_labels, _, _ = self._create_masked_lm_predictions( + tokens, target_sequence_length, numpy_random_state + ) + + # Pad the sequences and convert to NumPy + length_toks = len(tokens) + length_pads = self.config.sequence_length - length_toks + assert length_pads >= 0 + + tokens = numpy.array(tokens, dtype=numpy.int64) + tokens = numpy.pad(tokens, (0, length_pads), constant_values=self.config.tokenizer.pad) + + assignments = numpy.array(assignments, dtype=numpy.int64) + assignments = numpy.pad( + assignments, (0, length_pads), constant_values=self.config.tokenizer.pad + ) + + # Get the padding mask + mask_pads = numpy.ones(length_toks, dtype=numpy.int64) + mask_pads = numpy.pad( + mask_pads, (0, length_pads), constant_values=self.config.tokenizer.pad + ) + + # Mask the labels + labels = numpy.zeros(self.config.sequence_length, dtype=numpy.int64) - 1 + labels[masked_positions] = masked_labels + + # Get the loss mask + mask_loss = numpy.zeros(self.config.sequence_length, dtype=numpy.int64) + mask_loss[masked_positions] = 1 + + return { + "text": tokens, + "types": assignments, + "labels": labels, + "is_random": int(is_next_random), + "padding_mask": mask_pads, + "loss_mask": mask_loss, + "truncated": int(truncated), + } + + def _get_token_mask(self, numpy_random_state: numpy.random.RandomState) -> Optional[int]: + """Abstract method implementation + + 80% of the time, replace the token id with mask token id. 10% of the time, replace token id + with a random token id from the vocabulary. 10% of the time, do nothing. + + Args: + numpy_random_state (RandomState): The NumPy random state + + Returns: + Optional[int]: The replacement token id or None + """ + if numpy_random_state.random() < 0.8: + return self.config.tokenizer.mask + else: + if numpy_random_state.random() >= 0.5: + return self.token_lookup[numpy_random_state.randint(0, len(self.token_lookup))] + return None diff --git a/megatron/core/datasets/blended_dataset.py b/megatron/core/datasets/blended_dataset.py index 7c424f1ce8..a21fe02202 100644 --- a/megatron/core/datasets/blended_dataset.py +++ b/megatron/core/datasets/blended_dataset.py @@ -30,7 +30,7 @@ class BlendedDataset(torch.utils.data.Dataset): size (int): The number of samples to draw from the blend - config (BlendedMegatronDatasetConfig): The config object which informs dataset creation + config (BlendedMegatronDatasetConfig): The config Raises: RuntimeError: When the dataset has fewer or more samples than 'size' post-initialization diff --git a/megatron/core/datasets/blended_megatron_dataset_config.py b/megatron/core/datasets/blended_megatron_dataset_config.py index a6370eb19f..60ecdf190b 100644 --- a/megatron/core/datasets/blended_megatron_dataset_config.py +++ b/megatron/core/datasets/blended_megatron_dataset_config.py @@ -17,8 +17,8 @@ @dataclass class BlendedMegatronDatasetConfig: - """Configuration object for megatron-core blended and megatron datasets - + """Configuration object for Megatron Core datasets + Attributes: is_built_on_rank (Callable): A callable which returns True if the dataset should be built on the current rank. It should be Megatron Core parallelism aware i.e. global rank, group @@ -75,7 +75,9 @@ class BlendedMegatronDatasetConfig: tokenizer: Optional[MegatronTokenizer] = None - def __post_init__(self): + def __post_init__(self) -> None: + """Do asserts and set fields post init + """ if torch.distributed.is_initialized(): gb_rank = torch.distributed.get_rank() vp_rank = get_virtual_pipeline_model_parallel_rank() diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index b0d9a80fc8..a8737a5e1f 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. import logging import os @@ -21,12 +21,12 @@ class GPTDatasetConfig(BlendedMegatronDatasetConfig): """Configuration object for Megatron Core GPT datasets - Attributes: - reset_position_ids (bool): Option to reset the position IDs in the dataset at an interval + Attributes: + reset_position_ids (bool): Option to reset the position IDs in the dataset at an interval - reset_attention_mask (bool): Option to reset the attention mask from the dataset + reset_attention_mask (bool): Option to reset the attention mask from the dataset - eod_mask_loss (bool): Option to enable the EOD mask loss + eod_mask_loss (bool): Option to enable the EOD mask loss """ reset_position_ids: bool = None @@ -35,7 +35,9 @@ class GPTDatasetConfig(BlendedMegatronDatasetConfig): eod_mask_loss: bool = None - def __post_init__(self): + def __post_init__(self) -> None: + """Do asserts and set fields post init + """ super().__post_init__() assert self.tokenizer is not None @@ -108,7 +110,7 @@ class GPTDataset(MegatronDataset): index_split (Split): The indexed_indices Split - config (GPTDatasetConfig): The GPT-specific container for all config sourced parameters + config (GPTDatasetConfig): The config """ def __init__( diff --git a/megatron/core/datasets/masked_dataset.py b/megatron/core/datasets/masked_dataset.py new file mode 100644 index 0000000000..03c922b9d5 --- /dev/null +++ b/megatron/core/datasets/masked_dataset.py @@ -0,0 +1,430 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import logging +import os +import time +from abc import abstractmethod +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy +import torch + +from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig +from megatron.core.datasets.indexed_dataset import MMapIndexedDataset +from megatron.core.datasets.megatron_dataset import MegatronDataset +from megatron.core.datasets.utils import Split, log_single_rank + +logger = logging.getLogger(__name__) + + +@dataclass +class MaskedWordPieceDatasetConfig(BlendedMegatronDatasetConfig): + """Configuration object for Megatron Core Masked WordPiece datasets + + Attributes: + masking_probability (float): The probability we mask a candidate N-gram + + short_sequence_probability (float): The probability we return a sequence shorter than the + target sequence length + + masking_max_ngram (int): The maximum length N-gram to consider masking or permuting + + masking_do_full_word (bool): Whether we mask the the whole word or its component parts + + masking_do_permutation (bool): Whether we shuffle a subset of candidate N-grams in addition + to masking + + masking_use_longer_ngrams (bool): Wehther to favor longer N-grams over shorter N-grams + + masking_use_geometric_distribution (bool): Whether to draw the size of the N-gram from a + geometric distribution according to SpanBERT https://arxiv.org/abs/1907.10529 (Section 3.1) + """ + + masking_probability: float = None + + short_sequence_probability: float = None + + masking_max_ngram: int = None + + masking_do_full_word: bool = None + + masking_do_permutation: bool = None + + masking_use_longer_ngrams: bool = None + + masking_use_geometric_distribution: bool = None + + def __post_init__(self) -> None: + """Do asserts and set fields post init + """ + super().__post_init__() + + assert self.tokenizer is not None + + assert self.masking_probability is not None + assert self.short_sequence_probability is not None + assert self.masking_max_ngram is not None + assert self.masking_do_full_word is not None + assert self.masking_do_permutation is not None + assert self.masking_use_longer_ngrams is not None + assert self.masking_use_geometric_distribution is not None + + assert self.masking_probability > 0 and self.masking_probability < 1.0 + assert self.short_sequence_probability >= 0 and self.short_sequence_probability <= 1.0 + assert self.masking_max_ngram > 0 + assert not (self.masking_use_geometric_distribution and self.masking_do_permutation) + + if self.masking_use_geometric_distribution and self.masking_use_longer_ngrams: + log_single_rank( + logger, + logging.WARNING, + "The use of a geometric distribution overrides the default distribution", + ) + + +class MaskedWordPieceDataset(MegatronDataset): + """The semi-abstract base class for masked WordPiece datasets + + This implementation makes the rigid assumption that all inheritor datasets are built upon the + MMapIndexedDataset class. This assumption may be pushed down to the inheritors in future if + necessary. + + NB: WordPiece tokenization prepends a double hash "##" to all tokens/pieces in a word, save the + first token/piece. + + Args: + indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the + MegatronDataset + + dataset_path (str): The real path on disk to the dataset, for bookkeeping + + indexed_indices (numpy.ndarray): The set of the documents indices to expose + + num_samples (int): The number of samples to draw from the indexed dataset + + index_split (Split): The indexed_indices Split + + config (MaskedWordPieceDatasetConfig): The config + """ + + def __init__( + self, + indexed_dataset: MMapIndexedDataset, + dataset_path: str, + indexed_indices: numpy.ndarray, + num_samples: int, + index_split: Split, + config: MaskedWordPieceDatasetConfig, + ) -> None: + super().__init__( + indexed_dataset, dataset_path, indexed_indices, num_samples, index_split, config + ) + + @staticmethod + def numel_low_level_dataset(low_level_dataset: MMapIndexedDataset) -> int: + return low_level_dataset.document_indices.shape[0] - 1 + + @staticmethod + def build_low_level_dataset( + dataset_path: str, config: MaskedWordPieceDatasetConfig + ) -> MMapIndexedDataset: + return MMapIndexedDataset(dataset_path) + + @staticmethod + def _key_config_attributes() -> List[str]: + """Inherited method implementation + + Returns: + List[str]: The key config attributes + """ + return super(MaskedWordPieceDataset, MaskedWordPieceDataset)._key_config_attributes() + [ + "masking_probability", + "short_sequence_probability", + "masking_max_ngram", + "masking_do_full_word", + "masking_do_permutation", + "masking_use_longer_ngrams", + "masking_use_geometric_distribution", + ] + + def __len__(self) -> int: + return self.sample_index.shape[0] + + def _build_sample_index( + self, sequence_length: int, min_sentences_per_sample: int + ) -> numpy.ndarray: + path_to_cache = self.config.path_to_cache + if path_to_cache is None: + path_to_cache = os.path.join( + self.dataset.path_prefix, "cache", f"{type(self).__name__}_indices" + ) + + get_path_to = lambda suffix: os.path.join( + path_to_cache, f"{self.unique_description_hash}-{type(self).__name__}-{suffix}" + ) + path_to_description = get_path_to("description.txt") + path_to_sample_index = get_path_to("sample_index.npy") + cache_hit = all(map(os.path.isfile, [path_to_description, path_to_sample_index,],)) + + num_epochs = numpy.iinfo(numpy.int32).max - 1 + + if not cache_hit and torch.distributed.get_rank() == 0: + log_single_rank( + logger, + logging.INFO, + f"Build and save the {type(self).__name__} {self.index_split.name} indices", + ) + + os.makedirs(path_to_cache, exist_ok=True) + + # Write the description + with open(path_to_description, "wt") as writer: + writer.write(self.unique_description) + + # Build the sample index + log_single_rank( + logger, + logging.INFO, + f"\tBuild and save the sample index to {os.path.basename(path_to_sample_index)}", + ) + t_beg = time.time() + from megatron.core.datasets import helpers + + # Add +1 for access to document upper bound + indices = numpy.append(self.indices, self.indices[-1] + 1) + + sample_index = helpers.build_mapping( + self.dataset.document_indices[indices], + self.dataset.sequence_lengths, + num_epochs, + self.num_samples, + sequence_length, + self.config.short_sequence_probability, + self.config.random_seed, + False, + min_sentences_per_sample, + ) + numpy.save(path_to_sample_index, sample_index, allow_pickle=True) + t_end = time.time() + log_single_rank(logger, logging.DEBUG, f"\t> time elapsed: {t_end - t_beg:4f} seconds") + + log_single_rank( + logger, logging.INFO, f"> total number of samples: {sample_index.shape[0]}" + ) + log_single_rank(logger, logging.INFO, f"> total number of epochs: {num_epochs}") + + return sample_index + + log_single_rank( + logger, logging.INFO, f"Load the {type(self).__name__} {self.index_split.name} indices" + ) + + log_single_rank( + logger, + logging.INFO, + f"\tLoad the sample index from {os.path.basename(path_to_sample_index)}", + ) + t_beg = time.time() + sample_index = numpy.load(path_to_sample_index, allow_pickle=True, mmap_mode="r") + t_end = time.time() + log_single_rank(logger, logging.DEBUG, f"\t> time elapsed: {t_end - t_beg:4f} seconds") + + return sample_index + + def _create_masked_lm_predictions( + self, + token_ids: List[int], + target_sequence_length: int, + numpy_random_state: numpy.random.RandomState, + ) -> Tuple[List[int], List[int], List[int], List[int], List[Tuple[List[int], List[int]]]]: + """Creates the predictions for the masked LM objective + + Args: + token_ids (List[int]): The token ids + target_sequence_length (int): The target sequence length + numpy_random_state (numpy.random.RandomState): The NumPy random state + + Returns: + Tuple[List[int], List[int], List[int], List[int], List[Tuple[List[int], List[int]]]]: + 1. masked_token_ids -> The masked sequence + 2. masked_positions -> The indices for the masked token ids + 3. masked_labels -> The original token ids for the masked token ids + 4. boundaries -> The sentence and word boundaries for the sequence + 4. masked_spans -> The masked positions and labels with N-gram info intact + """ + # Build the token sentence and word boundaries and the masking candidates + # e.g. [cls, id, ##id, ##id, id, ##id, sep, id, ##id, sep] + # -> boundaries: [1, 1, 0, 0, 1, 0, 1, 1, 0, 1] + # -> candidates with whole word masking: [[1, 2, 3], [4, 5], [7, 8]] + # -> candidates sans whole word masking: [[1], [2], [3], [4], [5], [7], [8]] + boundaries = [] + candidates = [] + for i, token_id in enumerate(token_ids): + if token_id == self.config.tokenizer.cls or token_id == self.config.tokenizer.sep: + boundaries.append(1) + else: + if not self.config.tokenizer.inv_vocab[token_id].startswith("##"): + boundaries.append(1) + candidates.append([i]) + else: + boundaries.append(0) + if self.config.masking_do_full_word and len(candidates) > 0: + candidates[-1].append(i) + else: + candidates.append([i]) + + n_maskings = min( + self.config.masking_probability * target_sequence_length, + max(1, int(round(len(token_ids) * self.config.masking_probability))), + ) + + ngram_nvals = numpy.arange(self.config.masking_max_ngram, dtype=numpy.int64) + 1 + + # By default, the N-gram probabilites are inversely proportional to N + # e.g. N = 3 + # -> P = array([0.54545455, 0.27272727, 0.18181818]) + nprobs = 1.0 / ngram_nvals + nprobs = nprobs / nprobs.sum(keepdims=True) + if self.config.masking_use_longer_ngrams: + nprobs = nprobs[::-1] + + # Create a nested list of depth 3 + # layer 1: the candidate dimension + # layer 2: the N-gram dimension + # layer 3: the token dimension + candidate_ngrams = [ + [candidates[idx : idx + n] for n in ngram_nvals] for idx in range(len(candidates)) + ] + numpy_random_state.shuffle(candidate_ngrams) + + masked_token_ids = list(token_ids) + masked_positions_and_labels = [] + masked_spans = [] + masked_indices = set() + for candidate_idx in range(len(candidate_ngrams)): + n_ngrams = len(candidate_ngrams[candidate_idx]) + + # Stop when we hit our desired number of maskings + if len(masked_positions_and_labels) >= n_maskings: + break + + # Do nothing for candidates with no ngrams + if not candidate_ngrams[candidate_idx]: + continue + + # Choose the initial value of N + if self.config.masking_use_geometric_distribution: + # Sample N from a geometric distribution with p = 0.2 and clip + # i.e. SpanBERT + # -> https://arxiv.org/abs/1907.10529 (Section 3.1) + p = 0.2 + n = min(numpy_random_state.geometric(p), self.config.masking_max_ngram) + else: + p = nprobs[:n_ngrams] / nprobs[:n_ngrams].sum(keepdims=True) + n = numpy_random_state.choice(ngram_nvals[:n_ngrams], p=p) + + while True: + ngram_indices = sum(candidate_ngrams[candidate_idx][n - 1], []) + n = n - 1 + # Success: masking this N-gram puts us below the desired number of maskings + if n_maskings >= len(masked_positions_and_labels) + len(ngram_indices): + skip_candidate = False + break + # Failure: no N-grams remain for this candidate + if n == 0: + skip_candidate = True + break + + # Do nothing for candidates whose 1-gram is too long + if skip_candidate: + continue + + # Do nothing for candidate indices which have already been masked + if any(map(lambda idx: idx in masked_indices, ngram_indices)): + continue + + # Mask the tokens and record their original positions and values + for index in ngram_indices: + masked_indices.add(index) + mask = self._get_token_mask(numpy_random_state) + if mask is None: + masked_token_ids[index] = token_ids[index] + else: + masked_token_ids[index] = mask + masked_positions_and_labels.append((index, token_ids[index])) + + masked_spans.append((ngram_indices, [token_ids[index] for index in ngram_indices])) + + assert len(masked_positions_and_labels) <= n_maskings + + numpy_random_state.shuffle(candidate_ngrams) + + if self.config.masking_do_permutation: + + n_swappings = n_maskings + + permuted_indices = set() + for candidate_idx in range(len(candidate_ngrams)): + n_ngrams = len(candidate_ngrams[candidate_idx]) + + if len(permuted_indices) >= n_swappings: + break + + # Do nothing for candidates with no ngrams + if not candidate_ngrams[candidate_idx]: + continue + + p = nprobs[:n_ngrams] / nprobs[:n_ngrams].sum(keepdims=True) + n = numpy.random.choice(ngram_nvals[:n_ngrams], p=p) + + while True: + ngram_indices = sum(candidate_ngrams[candidate_idx][n - 1], []) + n = n - 1 + # Success: swapping this N-gram puts us below the desired number of swappings + if n_swappings >= len(permuted_indices) + len(ngram_indices): + skip_candidate = False + break + # Failure: no N-grams remain for this candidate + if n == 0: + skip_candidate = True + break + + # Do nothing for candidates whose 1-gram is too long + if skip_candidate: + continue + + # Do nothing for candidate indices which have already been masked or permuted + if any( + map(lambda idx: idx in masked_indices or idx in permuted_indices, ngram_indices) + ): + continue + + for index in ngram_indices: + permuted_indices.add(index) + + assert len(permuted_indices) <= n_swappings + + permuted_indices = sorted(permuted_indices) + permuted_indices_copy = list(permuted_indices) + numpy_random_state.shuffle(permuted_indices_copy) + masked_token_ids_copy = list(masked_token_ids) + + for idx, idx_copy in zip(permuted_indices, permuted_indices_copy): + masked_token_ids[idx] = masked_token_ids_copy[idx_copy] + masked_positions_and_labels.append((idx, masked_token_ids_copy[idx])) + + masked_positions_and_labels = sorted(masked_positions_and_labels, key=lambda x: x[0]) + masked_positions = [] + masked_labels = [] + for position, label in masked_positions_and_labels: + masked_positions.append(position) + masked_labels.append(label) + + masked_spans = sorted(masked_spans, key=lambda x: x[0][0]) + + return masked_token_ids, masked_positions, masked_labels, boundaries, masked_spans + + @abstractmethod + def _get_token_mask(self, numpy_random_state: numpy.random.RandomState) -> Optional[int]: + pass diff --git a/megatron/core/datasets/megatron_dataset.py b/megatron/core/datasets/megatron_dataset.py index c95a7d2ea5..4c8b962c89 100644 --- a/megatron/core/datasets/megatron_dataset.py +++ b/megatron/core/datasets/megatron_dataset.py @@ -31,7 +31,7 @@ class MegatronDataset(ABC, torch.utils.data.Dataset): index_split (Split): The indices Split - config (BlendedMegatronDatasetConfig): The container for all config sourced parameters + config (BlendedMegatronDatasetConfig): The config """ def __init__( diff --git a/megatron/core/datasets/t5_dataset.py b/megatron/core/datasets/t5_dataset.py new file mode 100644 index 0000000000..9baa16368c --- /dev/null +++ b/megatron/core/datasets/t5_dataset.py @@ -0,0 +1,239 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from collections import deque +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Union + +import numpy + +from megatron.core.datasets.indexed_dataset import MMapIndexedDataset +from megatron.core.datasets.masked_dataset import ( + MaskedWordPieceDataset, + MaskedWordPieceDatasetConfig, +) +from megatron.core.datasets.utils import Split + + +@dataclass +class T5MaskedWordPieceDatasetConfig(MaskedWordPieceDatasetConfig): + """Configuration object for Megatron Core T5 WordPiece datasets + + NB: As a temporary holdover from Megatron-LM. The T5 tokenizer has an attribute which defines + a number of special sentinel tokens used during sampling. The assert in __post_init__ serves to + preserve compatibility with Megatron-LM until the T5 tokenizer is in Megatron Core. + + Attributes: + sequence_length_encoder (Optional[int]): A sequence_length alias and the sequence length + for the encoder + + sequence_length_decoder (int): The sequence length for the decoder + """ + + sequence_length_encoder: Optional[int] = field(init=False, default=None) + + sequence_length_decoder: int = None + + def __post_init__(self) -> None: + """Do asserts and set fields post init + """ + super().__post_init__() + + self.sequence_length_encoder = self.sequence_length + + assert self.sequence_length_encoder is not None + assert self.sequence_length_decoder is not None + + assert len(self.tokenizer.additional_special_tokens_ids) > 0 + + +class T5MaskedWordPieceDataset(MaskedWordPieceDataset): + """The T5 dataset that assumes WordPiece tokenization + + Args: + indexed_dataset (MMapIndexedDataset): The MMapIndexedDataset around which to build the + MegatronDataset + + dataset_path (str): The real path on disk to the dataset, for bookkeeping + + indexed_indices (numpy.ndarray): The set of the documents indices to expose + + num_samples (int): The number of samples to draw from the indexed dataset + + index_split (Split): The indexed_indices Split + + config (T5MaskedWordPieceDatasetConfig): The config + """ + + def __init__( + self, + indexed_dataset: MMapIndexedDataset, + dataset_path: str, + indexed_indices: numpy.ndarray, + num_samples: int, + index_split: Split, + config: T5MaskedWordPieceDatasetConfig, + ) -> None: + super().__init__( + indexed_dataset, dataset_path, indexed_indices, num_samples, index_split, config + ) + + def _finalize(self) -> None: + """Abstract method implementation + """ + self.token_lookup = list(self.config.tokenizer.inv_vocab.keys()) + # Account for the single and single token ids + self.sample_index = self._build_sample_index(self.config.sequence_length - 2, 1) + + @staticmethod + def _key_config_attributes() -> List[str]: + """Inherited method implementation + + Returns: + List[str]: The key config attributes + """ + return super( + T5MaskedWordPieceDataset, T5MaskedWordPieceDataset + )._key_config_attributes() + ["sequence_length_decoder",] + + def __getitem__(self, idx: int) -> Dict[str, Union[int, numpy.ndarray]]: + """Abstract method implementation + + Args: + idx (int): The index into the dataset + + Returns: + Dict[str, Union[int, numpy.ndarray]]: The + """ + idx_beg, idx_end, target_sequence_length = self.sample_index[idx] + sample = [self.dataset[i] for i in range(idx_beg, idx_end)] + + numpy_random_state = numpy.random.RandomState( + seed=(self.config.random_seed + idx) % 2 ** 32 + ) + + assert target_sequence_length <= self.config.sequence_length + + # Flatten the sample into a list of tokens + tokens = [token for sentence in sample for token in sentence] + + # Truncate the list of tokens to a desired length + truncated = len(tokens) > target_sequence_length + tokens = tokens[:target_sequence_length] + + # Masking + (tokens, _, _, _, masked_spans,) = self._create_masked_lm_predictions( + tokens, target_sequence_length, numpy_random_state + ) + + # Prepare the encoder input and decoder input and output + sentinels = deque(self.config.tokenizer.additional_special_tokens_ids) + encoder_input = [] + decoder_input = [self.config.tokenizer.bos] + decoder_output = [] + idx_beg = 0 + for indices, labels in masked_spans: + sentinel = sentinels.popleft() + + # set the end index + idx_end = indices[0] + + encoder_input.extend(tokens[idx_beg:idx_end]) + encoder_input.append(sentinel) + + decoder_input.append(sentinel) + decoder_input.extend(labels) + + decoder_output.append(sentinel) + decoder_output.extend(labels) + + # set the start index + idx_beg = indices[-1] + 1 + + encoder_input.extend(tokens[idx_beg:]) + decoder_output.append(self.config.tokenizer.eos) + + # Pad the sequences and convert to NumPy + length_toks_encoder = len(encoder_input) + length_toks_decoder = len(decoder_input) + length_pads_encoder = self.config.sequence_length_encoder - length_toks_encoder + length_pads_decoder = self.config.sequence_length_decoder - length_toks_decoder + assert length_pads_encoder >= 0 + assert length_pads_decoder >= 0 + + encoder_input = numpy.array(encoder_input, dtype=numpy.int64) + encoder_input = numpy.pad( + encoder_input, (0, length_pads_encoder), constant_values=self.config.tokenizer.pad + ) + + decoder_input = numpy.array(decoder_input, dtype=numpy.int64) + decoder_input = numpy.pad( + decoder_input, (0, length_pads_decoder), constant_values=self.config.tokenizer.pad + ) + + # Create attention and history masks + mask_encoder = self._make_attention_mask(encoder_input, encoder_input) + mask_encoder_decoder = self._make_attention_mask(decoder_input, encoder_input) + mask_decoder = self._make_attention_mask(decoder_input, decoder_input) + mask_decoder = mask_decoder * self._make_history_mask(decoder_input) + + # Mask the labels + decoder_output = numpy.array(decoder_output, dtype=numpy.int64) + decoder_output = numpy.pad(decoder_output, (0, length_pads_decoder), constant_values=-1) + + # Get the loss mask + loss_mask = numpy.zeros(self.config.sequence_length_decoder, dtype=numpy.int64) + loss_mask[:length_toks_decoder] = 1 + + return { + "text_enc": encoder_input, + "text_dec": decoder_input, + "labels": decoder_output, + "loss_mask": loss_mask, + "truncated": int(truncated), + "enc_mask": mask_encoder, + "dec_mask": mask_decoder, + "enc_dec_mask": mask_encoder_decoder, + } + + @staticmethod + def _make_attention_mask( + source_block: numpy.ndarray, target_block: numpy.ndarray + ) -> numpy.ndarray: + """Return a 2-D attention mask + + Args: + source_block (numpy.ndarray): A 1-D array + target_block (numpy.ndarray): A 1-D array + + Returns: + numpy.ndarray: The 2-D attention mask + """ + mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1) + return mask.astype(numpy.int64) + + @staticmethod + def _make_history_mask(block: numpy.ndarray) -> numpy.ndarray: + """Return a 2-D history (lower-left-triangular) mask + + Args: + block (numpy.ndarray): A 1-D array + + Returns: + numpy.ndarray: The 2-D history (lower-left-triangular) mask + """ + arange = numpy.arange(block.shape[0]) + mask = arange[None,] <= arange[:, None] + return mask.astype(numpy.int64) + + def _get_token_mask(self, numpy_random_state: numpy.random.RandomState) -> int: + """Abstract method implementation + + 100% of the time, replace the token id with mask token id. + + Args: + numpy_random_state (RandomState): The NumPy random state + + Returns: + int: The mask token id + """ + return self.config.tokenizer.mask diff --git a/megatron/core/datasets/utils.py b/megatron/core/datasets/utils.py index 8a3279b5f4..def0fb7611 100644 --- a/megatron/core/datasets/utils.py +++ b/megatron/core/datasets/utils.py @@ -2,7 +2,7 @@ import logging from enum import Enum -from typing import List +from typing import Any, List import numpy import torch @@ -30,13 +30,17 @@ def compile_helpers(): sys.exit(1) -def log_single_rank(logger: logging.Logger, *args, rank=0, **kwargs): +def log_single_rank(logger: logging.Logger, *args: Any, rank: int = 0, **kwargs: Any): """If torch distributed is initialized, log only on rank Args: logger (logging.Logger): The logger to write the logs + args (Tuple[Any]): All logging.Logger.log positional arguments + rank (int, optional): The rank to write on. Defaults to 0. + + kwargs (Dict[str, Any]): All logging.Logger.log keyword arguments """ if torch.distributed.is_initialized(): if torch.distributed.get_rank() == rank: diff --git a/megatron/data/bert_dataset.py b/megatron/data/bert_dataset.py deleted file mode 100644 index 036e6bccc9..0000000000 --- a/megatron/data/bert_dataset.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. - -"""BERT Style dataset.""" - -import numpy as np -import torch - -from megatron import ( - get_args, - get_tokenizer, - mpu, - print_rank_0 -) -from megatron.data.dataset_utils import ( - get_samples_mapping, - get_a_and_b_segments, - truncate_segments, - create_tokens_and_tokentypes, - create_masked_lm_predictions -) - -class BertDataset(torch.utils.data.Dataset): - - def __init__(self, name, indexed_dataset, data_prefix, - num_epochs, max_num_samples, masked_lm_prob, - max_seq_length, short_seq_prob, seed, binary_head): - - # Params to store. - self.name = name - self.seed = seed - self.masked_lm_prob = masked_lm_prob - self.max_seq_length = max_seq_length - self.binary_head = binary_head - - # Dataset. - self.indexed_dataset = indexed_dataset - - # Build the samples mapping. - self.samples_mapping = get_samples_mapping(self.indexed_dataset, - data_prefix, - num_epochs, - max_num_samples, - self.max_seq_length - 3, # account for added tokens - short_seq_prob, - self.seed, - self.name, - self.binary_head) - - # Vocab stuff. - tokenizer = get_tokenizer() - self.vocab_id_list = list(tokenizer.inv_vocab.keys()) - self.vocab_id_to_token_dict = tokenizer.inv_vocab - self.cls_id = tokenizer.cls - self.sep_id = tokenizer.sep - self.mask_id = tokenizer.mask - self.pad_id = tokenizer.pad - - def __len__(self): - return self.samples_mapping.shape[0] - - def __getitem__(self, idx): - start_idx, end_idx, seq_length = self.samples_mapping[idx] - sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)] - # Note that this rng state should be numpy and not python since - # python randint is inclusive whereas the numpy one is exclusive. - # We % 2**32 since numpy requres the seed to be between 0 and 2**32 - 1 - np_rng = np.random.RandomState(seed=((self.seed + idx) % 2**32)) - return build_training_sample(sample, seq_length, - self.max_seq_length, # needed for padding - self.vocab_id_list, - self.vocab_id_to_token_dict, - self.cls_id, self.sep_id, - self.mask_id, self.pad_id, - self.masked_lm_prob, np_rng, - self.binary_head) - - - - -def build_training_sample(sample, - target_seq_length, max_seq_length, - vocab_id_list, vocab_id_to_token_dict, - cls_id, sep_id, mask_id, pad_id, - masked_lm_prob, np_rng, binary_head): - """Biuld training sample. - - Arguments: - sample: A list of sentences in which each sentence is a list token ids. - target_seq_length: Desired sequence length. - max_seq_length: Maximum length of the sequence. All values are padded to - this length. - vocab_id_list: List of vocabulary ids. Used to pick a random id. - vocab_id_to_token_dict: A dictionary from vocab ids to text tokens. - cls_id: Start of example id. - sep_id: Separator id. - mask_id: Mask token id. - pad_id: Padding token id. - masked_lm_prob: Probability to mask tokens. - np_rng: Random number genenrator. Note that this rng state should be - numpy and not python since python randint is inclusive for - the opper bound whereas the numpy one is exclusive. - """ - - if binary_head: - # We assume that we have at least two sentences in the sample - assert len(sample) > 1 - assert target_seq_length <= max_seq_length - - # Divide sample into two segments (A and B). - if binary_head: - tokens_a, tokens_b, is_next_random = get_a_and_b_segments(sample, - np_rng) - else: - tokens_a = [] - for j in range(len(sample)): - tokens_a.extend(sample[j]) - tokens_b = [] - is_next_random = False - - # Truncate to `target_sequence_length`. - max_num_tokens = target_seq_length - truncated = truncate_segments(tokens_a, tokens_b, len(tokens_a), - len(tokens_b), max_num_tokens, np_rng) - - # Build tokens and toketypes. - tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b, - cls_id, sep_id) - - # Masking. - max_predictions_per_seq = masked_lm_prob * max_num_tokens - (tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions( - tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob, - cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng) - - # Padding. - tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np \ - = pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, - masked_labels, pad_id, max_seq_length) - - train_sample = { - 'text': tokens_np, - 'types': tokentypes_np, - 'labels': labels_np, - 'is_random': int(is_next_random), - 'loss_mask': loss_mask_np, - 'padding_mask': padding_mask_np, - 'truncated': int(truncated)} - return train_sample - - -def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, - masked_labels, pad_id, max_seq_length): - """Pad sequences and convert them to numpy.""" - - # Some checks. - num_tokens = len(tokens) - padding_length = max_seq_length - num_tokens - assert padding_length >= 0, \ - f"num_tokens ({num_tokens}) is greater than " \ - "max_seq_length ({max_seq_length})." - assert len(tokentypes) == num_tokens - assert len(masked_positions) == len(masked_labels) - - # Tokens and token types. - filler = [pad_id] * padding_length - tokens_np = np.array(tokens + filler, dtype=np.int64) - tokentypes_np = np.array(tokentypes + filler, dtype=np.int64) - - # Padding mask. - padding_mask_np = np.array([1] * num_tokens + [0] * padding_length, - dtype=np.int64) - - # Lables and loss mask. - labels = [-1] * max_seq_length - loss_mask = [0] * max_seq_length - for i in range(len(masked_positions)): - assert masked_positions[i] < num_tokens - labels[masked_positions[i]] = masked_labels[i] - loss_mask[masked_positions[i]] = 1 - labels_np = np.array(labels, dtype=np.int64) - loss_mask_np = np.array(loss_mask, dtype=np.int64) - - return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np diff --git a/megatron/data/dataset_utils.py b/megatron/data/dataset_utils.py index e8e5855db4..a7f45f5b32 100644 --- a/megatron/data/dataset_utils.py +++ b/megatron/data/dataset_utils.py @@ -535,11 +535,12 @@ def build_dataset(name, data_prefix, max_num_samples, max_seq_length_dec, dataset_type='standard_bert', indexed_dataset=None): - from megatron.data.bert_dataset import BertDataset from megatron.data.ict_dataset import ICTDataset - from megatron.data.t5_dataset import T5Dataset from megatron.data.multimodal_dataset import MultiModalDataset + if dataset_type == DSET_TYPE_BERT or dataset_type == DSET_TYPE_T5: + raise ValueError("The Megatron-LM BERT and T5 datasets are deprecated.") + if dataset_type not in DSET_TYPES: raise ValueError("Invalid dataset_type: ", dataset_type) @@ -571,24 +572,6 @@ def build_dataset(name, data_prefix, max_num_samples, binary_head=binary_head, **kwargs ) - elif dataset_type == DSET_TYPE_T5: - args = get_args() - dataset = T5Dataset( - indexed_dataset=indexed_dataset, - masked_lm_prob=args.mask_prob, - max_seq_length_dec=max_seq_length_dec, - short_seq_prob=args.short_seq_prob, - **kwargs - ) - elif dataset_type == DSET_TYPE_BERT: - args = get_args() - dataset = BertDataset( - indexed_dataset=indexed_dataset, - masked_lm_prob=args.mask_prob, - short_seq_prob=args.short_seq_prob, - binary_head=binary_head, - **kwargs - ) elif dataset_type == DSET_TYPE_MULTIMODAL: args = get_args() dataset = MultiModalDataset( diff --git a/megatron/data/t5_dataset.py b/megatron/data/t5_dataset.py deleted file mode 100644 index 075b089f8e..0000000000 --- a/megatron/data/t5_dataset.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. - -"""T5 Style dataset.""" - -import collections - -import numpy as np -import torch - -from megatron import get_tokenizer -from megatron.data.dataset_utils import ( - create_masked_lm_predictions, - get_samples_mapping -) - -class T5Dataset(torch.utils.data.Dataset): - - def __init__(self, name, indexed_dataset, data_prefix, - num_epochs, max_num_samples, masked_lm_prob, - max_seq_length, max_seq_length_dec, - short_seq_prob, seed): - - # Params to store. - self.name = name - self.desc = name - self.seed = seed - self.masked_lm_prob = masked_lm_prob - self.max_seq_length = max_seq_length - self.max_seq_length_dec = max_seq_length_dec - - # Dataset. - self.indexed_dataset = indexed_dataset - - # Build the samples mapping. - self.samples_mapping = get_samples_mapping(self.indexed_dataset, - data_prefix, - num_epochs, - max_num_samples, - self.max_seq_length - 2, # account for added tokens - short_seq_prob, - self.seed, - self.name, - False) - - # Vocab stuff. - tokenizer = get_tokenizer() - self.vocab_id_list = list(tokenizer.inv_vocab.keys()) - self.vocab_id_to_token_dict = tokenizer.inv_vocab - self.cls_id = tokenizer.cls - self.sep_id = tokenizer.sep - self.mask_id = tokenizer.mask - self.pad_id = tokenizer.pad - self.bos_id = tokenizer.bos_token_id - self.eos_id = tokenizer.eos_token_id - self.sentinel_tokens = tokenizer.additional_special_tokens_ids - assert len(self.sentinel_tokens) > 0, "Provide the argument --vocab-extra-ids 100 to the script" - - def __len__(self): - return self.samples_mapping.shape[0] - - def __getitem__(self, idx): - - start_index, end_index, seq_length = self.samples_mapping[idx] - sample = [] - for index in range(start_index, end_index): - sample.append(self.indexed_dataset[index]) - # Note that this rng state should be numpy and not python since - # python randint is inclusive whereas the numpy one is exclusive. - np_rng = np.random.RandomState(seed=(self.seed + idx)) - return build_training_sample(sample, seq_length, - self.max_seq_length, # needed for padding - self.max_seq_length_dec, - self.vocab_id_list, - self.vocab_id_to_token_dict, - self.cls_id, self.sep_id, - self.mask_id, self.pad_id, - self.masked_lm_prob, np_rng, - self.bos_id, self.eos_id, - self.sentinel_tokens) - - -def build_training_sample(sample, target_seq_length, - max_seq_length, max_seq_length_dec, - vocab_id_list, vocab_id_to_token_dict, - cls_id, sep_id, mask_id, pad_id, - masked_lm_prob, np_rng, bos_id=None, - eos_id=None, sentinel_tokens=None): - """Build training sample. - - Arguments: - sample: A list of sentences in which each sentence is a list token ids. - target_seq_length: Desired sequence length. - max_seq_length: Maximum length of the sequence. All values are padded to - this length. - vocab_id_list: List of vocabulary ids. Used to pick a random id. - vocab_id_to_token_dict: A dictionary from vocab ids to text tokens. - cls_id: Start of example id. - sep_id: Separator id. - mask_id: Mask token id. - pad_id: Padding token id. - masked_lm_prob: Probability to mask tokens. - np_rng: Random number genenrator. Note that this rng state should be - numpy and not python since python randint is inclusive for - the opper bound whereas the numpy one is exclusive. - bos_id: start of decoder example id - eos_id: end of generation id - sentinel_tokens: unique value to be substituted for every replaced span - """ - - assert target_seq_length <= max_seq_length - - # flatten sentences into one list - tokens = [token for sentence in sample for token in sentence] - - # Truncate to `target_sequence_length`. - max_num_tokens = target_seq_length - truncated = len(tokens) > max_num_tokens - tokens = tokens[:max_num_tokens] - - # Masking. - max_predictions_per_seq = masked_lm_prob * max_num_tokens - (tokens, masked_positions, masked_labels, _, masked_spans) = create_masked_lm_predictions( - tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob, - cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng, - max_ngrams=10, geometric_dist=True, masking_style="t5") - - # Padding. - tokens_enc, tokens_dec_in, labels, enc_mask, \ - dec_mask, enc_dec_mask, loss_mask \ - = pad_and_convert_to_numpy(tokens, masked_positions, - masked_labels, pad_id, max_seq_length, - max_seq_length_dec, masked_spans, - bos_id, eos_id, sentinel_tokens) - - train_sample = { - 'text_enc': tokens_enc, - 'text_dec': tokens_dec_in, - 'labels': labels, - 'loss_mask': loss_mask, - 'truncated': int(truncated), - 'enc_mask': enc_mask, - 'dec_mask': dec_mask, - 'enc_dec_mask': enc_dec_mask, - } - return train_sample - - -def pad_and_convert_to_numpy(tokens, masked_positions, - masked_labels, pad_id, - max_seq_length, max_seq_length_dec, - masked_spans=None, bos_id=None, - eos_id=None, sentinel_tokens=None): - """Pad sequences and convert them to numpy.""" - - sentinel_tokens = collections.deque(sentinel_tokens) - t5_input = [] - (t5_decoder_in, t5_decoder_out) = ([bos_id], []) - (start_index, end_index) = (0, None) - for span in masked_spans: - flag = sentinel_tokens.popleft() - - # Append the same tokens in decoder input and output - t5_decoder_in.append(flag) - t5_decoder_in.extend(span.label) - t5_decoder_out.append(flag) - t5_decoder_out.extend(span.label) - - end_index = span.index[0] - t5_input.extend(tokens[start_index: end_index]) - t5_input.append(flag) - - # the next start index is the token after the last span token - start_index = span.index[-1] + 1 - - # Add token to the t5_decoder_out - t5_decoder_out.append(eos_id) - - # Add the remaining tokens to the t5 input - t5_input.extend(tokens[start_index:]) - - # assert (len(t5_input) - len(masked_spans)) + \ - # (len(t5_decoder_in) - (len(masked_spans) + 1)) == len(tokens) - - # Some checks. - - # Encoder-side padding mask. - num_tokens = len(t5_input) - padding_length = max_seq_length - num_tokens - assert padding_length >= 0 - assert len(masked_positions) == len(masked_labels) - - # Tokens.. - filler = [pad_id] * padding_length - tokens_enc = np.array(t5_input + filler, dtype=np.int64) - - # Decoder-side padding mask. - num_tokens_dec = len(t5_decoder_in) - padding_length_dec = max_seq_length_dec - num_tokens_dec - assert padding_length_dec >= 0 - filler_dec = [pad_id] * padding_length_dec - tokens_dec_in = np.array(t5_decoder_in + filler_dec, dtype=np.int64) - - # Create attention masks - enc_mask = make_attention_mask(tokens_enc, tokens_enc) - enc_dec_mask = make_attention_mask(tokens_dec_in, tokens_enc) - dec_mask = make_attention_mask(tokens_dec_in, tokens_dec_in) - dec_mask = dec_mask * make_history_mask(tokens_dec_in) - - # Labels mask. - labels = t5_decoder_out + ([-1] * padding_length_dec) - labels = np.array(labels, dtype=np.int64) - - # Loss mask - loss_mask = ([1] * num_tokens_dec) + ([0] * padding_length_dec) - loss_mask = np.array(loss_mask, dtype=np.int64) - - return tokens_enc, tokens_dec_in, labels, enc_mask, \ - dec_mask, enc_dec_mask, loss_mask - - -def make_attention_mask(source_block, target_block): - """ - Returns a 2-dimensional (2-D) attention mask - :param source_block: 1-D array - :param target_block: 1-D array - """ - mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1) - mask = mask.astype(np.int64) - # (source_length, target_length) - return mask - - -def make_attention_mask_3d(source_block, target_block): - """ - Returns a 3-dimensional (3-D) attention mask - :param source_block: 1-D array - :param target_block: 1-D array - """ - mask = (target_block[:, None, :] >= 1) * (source_block[:, :, None] >= 1) - # (batch, source_length, target_length) - # mask = mask.astype(np.int64) - return mask - - -def make_history_mask(block): - length = block.shape[0] - arange = np.arange(length) - history_mask = (arange[None, ] <= arange[:, None]) - history_mask = history_mask.astype(np.int64) - return history_mask - - -def make_history_mask_3d(block): - batch, length = block.shape - arange = torch.arange(length, device=block.device) - history_mask = (arange[None, ] <= arange[:, None])[None, ] - history_mask = history_mask.expand(batch, length, length) - return history_mask diff --git a/megatron/tokenizer/tokenizer.py b/megatron/tokenizer/tokenizer.py index c618b99809..1d60489d7b 100644 --- a/megatron/tokenizer/tokenizer.py +++ b/megatron/tokenizer/tokenizer.py @@ -164,6 +164,16 @@ def pad(self): def mask(self): return self.mask_id + @property + def bos(self): + """ Id of the beginning of sentence token in the vocabulary.""" + return self._bos_token_id + + @property + def eos(self): + """ Id of the end of sentence token in the vocabulary.""" + return self._eos_token_id + @property def bos_token(self): """ Beginning of sentence token id """ @@ -179,16 +189,6 @@ def additional_special_tokens(self): """ All the additional special tokens you may want to use (list of strings).""" return self._additional_special_tokens - @property - def bos_token_id(self): - """ Id of the beginning of sentence token in the vocabulary.""" - return self._bos_token_id - - @property - def eos_token_id(self): - """ Id of the end of sentence token in the vocabulary.""" - return self._eos_token_id - @property def additional_special_tokens_ids(self): """ Ids of all the additional special tokens in the vocabulary (list of integers).""" @@ -377,10 +377,6 @@ def sep(self): def pad(self): return self._pad_id - @property - def bos_token_id(self): - return self._bos_id - @property def bos(self): return self._bos_id @@ -389,10 +385,6 @@ def bos(self): def eod(self): return self._eod_id - @property - def eos_token_id(self): - return self._eos_id - @property def eos(self): return self._eos_id diff --git a/pretrain_bert.py b/pretrain_bert.py index 47db48c2be..08fc90802d 100644 --- a/pretrain_bert.py +++ b/pretrain_bert.py @@ -8,11 +8,11 @@ import torch.nn.functional as F from megatron import get_args +from megatron import get_tokenizer from megatron import print_rank_0 from megatron import get_timers from megatron.core import tensor_parallel from megatron.core.enums import ModelType -from megatron.data.dataset_utils import build_train_valid_test_datasets import megatron.model from megatron.core.models.bert.bert_model import BertModel from megatron.training import pretrain @@ -20,6 +20,9 @@ from megatron.arguments import core_transformer_config_from_args from megatron.core.transformer.spec_utils import import_module from megatron.core.models.bert.bert_layer_specs import bert_layer_with_transformer_engine_spec +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder +from megatron.core.datasets.bert_dataset import BERTMaskedWordPieceDataset, BERTMaskedWordPieceDatasetConfig +from megatron.core import mpu, tensor_parallel def model_provider(pre_process=True, post_process=True): """Build the model.""" @@ -137,15 +140,41 @@ def train_valid_test_datasets_provider(train_val_test_num_samples): """Build train, valid, and test datasets.""" args = get_args() + tokenizer = get_tokenizer() + + config = BERTMaskedWordPieceDatasetConfig( + is_built_on_rank=lambda: mpu.get_tensor_model_parallel_rank() == 0, + random_seed=args.seed, + sequence_length=args.seq_length, + blend=args.data_path, + blend_per_split=[ + args.train_data_path, + args.valid_data_path, + args.test_data_path, + ], + split=args.split, + path_to_cache=args.data_cache_path, + mock=False, + tokenizer=tokenizer, + masking_probability=args.mask_prob, + short_sequence_probability=args.short_seq_prob, + masking_max_ngram=3, + masking_do_full_word=True, + masking_do_permutation=False, + masking_use_longer_ngrams=False, + masking_use_geometric_distribution=False, + classification_head=args.bert_binary_head, + ) + print_rank_0('> building train, validation, and test datasets ' 'for BERT ...') - train_ds, valid_ds, test_ds = build_train_valid_test_datasets( - data_prefix=args.data_path, - splits_string=args.split, - train_valid_test_num_samples=train_val_test_num_samples, - max_seq_length=args.seq_length, - seed=args.seed, - binary_head=args.bert_binary_head) + + train_ds, valid_ds, test_ds = BlendedMegatronDatasetBuilder( + BERTMaskedWordPieceDataset, + train_val_test_num_samples, + config, + ).build() + print_rank_0("> finished creating BERT datasets ...") return train_ds, valid_ds, test_ds @@ -153,6 +182,9 @@ def train_valid_test_datasets_provider(train_val_test_num_samples): if __name__ == "__main__": + # Temporary for transition to core datasets + train_valid_test_datasets_provider.is_distributed = True + pretrain(train_valid_test_datasets_provider, model_provider, ModelType.encoder_or_decoder, forward_step, args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'}) diff --git a/pretrain_gpt.py b/pretrain_gpt.py index 499243f2c7..3c978518c0 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -3,14 +3,13 @@ import os import torch -from torch import Tensor from functools import partial from typing import Union from megatron import get_args from megatron import print_rank_0 from megatron import get_timers from megatron import get_tokenizer -from megatron.core import mpu, tensor_parallel +from megatron.core import mpu from megatron.core.enums import ModelType from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder from megatron.core.datasets.gpt_dataset import GPTDatasetConfig @@ -94,12 +93,12 @@ def get_batch(data_iterator): return batch.values() -def loss_func(loss_mask: Tensor, output_tensor: Tensor): +def loss_func(loss_mask: torch.Tensor, output_tensor: torch.Tensor): """Loss function. Args: - loss_mask (Tensor): Used to mask out some portions of the loss - output_tensor (Tensor): The tensor with the losses + loss_mask (torch.Tensor): Used to mask out some portions of the loss + output_tensor (torch.Tensor): The tensor with the losses """ args = get_args() diff --git a/pretrain_t5.py b/pretrain_t5.py index 8ad2ca86d8..f6b93cabd5 100644 --- a/pretrain_t5.py +++ b/pretrain_t5.py @@ -5,25 +5,26 @@ from functools import partial import torch -from torch import Tensor from megatron import ( get_args, get_timers, + get_tokenizer, print_rank_0 ) -from megatron.core import tensor_parallel +from megatron.core import mpu, tensor_parallel from megatron.core.enums import ModelType -from megatron.data.dataset_utils import build_train_valid_test_datasets from megatron.core.models.T5 import T5Model from megatron.training import pretrain from megatron.utils import average_losses_across_data_parallel_group from megatron.arguments import core_transformer_config_from_args -from megatron.core.transformer.spec_utils import import_module +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder +from megatron.core.datasets.t5_dataset import T5MaskedWordPieceDataset, T5MaskedWordPieceDatasetConfig from megatron.core.models.T5.t5_spec import (get_t5_encoder_with_transformer_engine_block_spec, get_t5_decoder_with_transformer_engine_block_spec, get_t5_encoder_with_local_block_spec, get_t5_decoder_with_local_block_spec) +from megatron.model import T5Model as NonCoreT5Model """ Pipeline parallelism for T5 @@ -99,7 +100,7 @@ def model_provider(pre_process=True, post_process=True, add_encoder=True, add_de rotary_percent=args.rotary_percent ) else: - model = megatron.model.T5Model(config=config, + model = NonCoreT5Model(config=config, num_tokentypes=0, parallel_output=True, pre_process=pre_process, @@ -137,12 +138,12 @@ def get_batch(data_iterator): enc_mask, dec_mask, enc_dec_mask -def loss_func(loss_mask: Tensor, output_tensor: Tensor): +def loss_func(loss_mask: torch.Tensor, output_tensor: torch.Tensor): """Loss function. Args: - loss_mask (Tensor): Used to mask out some portions of the loss - output_tensor (Tensor): The tensor with the losses + loss_mask (torch.Tensor): Used to mask out some portions of the loss + output_tensor (torch.Tensor): The tensor with the losses """ lm_loss_ = output_tensor.float() lm_loss = torch.sum( @@ -190,16 +191,41 @@ def train_valid_test_datasets_provider(train_val_test_num_samples: int): """ args = get_args() + tokenizer = get_tokenizer() + + config = T5MaskedWordPieceDatasetConfig( + is_built_on_rank=lambda: mpu.get_tensor_model_parallel_rank() == 0, + random_seed=args.seed, + sequence_length=args.encoder_seq_length, + sequence_length_decoder=args.decoder_seq_length, + blend=args.data_path, + blend_per_split=[ + args.train_data_path, + args.valid_data_path, + args.test_data_path, + ], + split=args.split, + path_to_cache=args.data_cache_path, + mock=False, + tokenizer=tokenizer, + masking_probability=args.mask_prob, + short_sequence_probability=args.short_seq_prob, + masking_max_ngram=10, + masking_do_full_word=True, + masking_do_permutation=False, + masking_use_longer_ngrams=False, + masking_use_geometric_distribution=True, + ) + print_rank_0('> building train, validation, and test datasets ' 'for T5 ...') - train_ds, valid_ds, test_ds = build_train_valid_test_datasets( - data_prefix=args.data_path, - splits_string=args.split, - train_valid_test_num_samples=train_val_test_num_samples, - max_seq_length=args.encoder_seq_length, - max_seq_length_dec=args.decoder_seq_length, - seed=args.seed, - dataset_type='t5') + + train_ds, valid_ds, test_ds = BlendedMegatronDatasetBuilder( + T5MaskedWordPieceDataset, + train_val_test_num_samples, + config, + ).build() + print_rank_0("> finished creating T5 datasets ...") return train_ds, valid_ds, test_ds @@ -207,5 +233,8 @@ def train_valid_test_datasets_provider(train_val_test_num_samples: int): if __name__ == "__main__": + # Temporary for transition to core datasets + train_valid_test_datasets_provider.is_distributed = True + pretrain(train_valid_test_datasets_provider, model_provider, ModelType.encoder_and_decoder, forward_step, args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'}) \ No newline at end of file From e2ff3e62a291b37045d8b8c03a1393de0e5c160d Mon Sep 17 00:00:00 2001 From: zshao Date: Tue, 30 Jan 2024 14:36:32 +0800 Subject: [PATCH 234/296] Remove config file and hardcoded cache path --- megatron/config/default.yaml | 11 ----------- .../core/datasets/blended_megatron_dataset_builder.py | 1 - 2 files changed, 12 deletions(-) delete mode 100644 megatron/config/default.yaml diff --git a/megatron/config/default.yaml b/megatron/config/default.yaml deleted file mode 100644 index 73b74afd3a..0000000000 --- a/megatron/config/default.yaml +++ /dev/null @@ -1,11 +0,0 @@ -enable_one_logger: True - -wandb: - host: https://api.wandb.ai - api_key: ${oc.env:WANDB_API_KEY} - entity: zshao - project: MNIST - name: one-logger-megatron-test - tags: - - e2e_metrics_enabled - - e2e_metrics_testing \ No newline at end of file diff --git a/megatron/core/datasets/blended_megatron_dataset_builder.py b/megatron/core/datasets/blended_megatron_dataset_builder.py index 39f6d23630..c5c509ea7c 100644 --- a/megatron/core/datasets/blended_megatron_dataset_builder.py +++ b/megatron/core/datasets/blended_megatron_dataset_builder.py @@ -38,7 +38,6 @@ def __init__( self.cls = cls self.sizes = sizes self.config = config - self.config.path_to_cache = '/lustre/fsw/portfolios/hwinf/users/zshao/onelogger-test/Megatron-LM/data_cache' def build(self) -> List[Optional[Union[BlendedDataset, MegatronDataset]]]: """Build all dataset splits according to the provided blend(s) From eef48ef31cc037f05196c3b1d6e474348f4054c5 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Tue, 30 Jan 2024 10:45:14 -0800 Subject: [PATCH 235/296] Fix the case when none token is allocated for local expert(s) with EP>1. --- megatron/core/transformer/moe/experts.py | 19 +++++++++++----- .../transformer/moe/test_grouped_mlp.py | 22 ++++++++++++++++++- 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/megatron/core/transformer/moe/experts.py b/megatron/core/transformer/moe/experts.py index cc8afcd322..2597ec673c 100644 --- a/megatron/core/transformer/moe/experts.py +++ b/megatron/core/transformer/moe/experts.py @@ -128,15 +128,22 @@ def glu(x): setattr(self.weight2, 'allreduce', not self.expert_parallel) def forward(self, permuted_local_hidden_states, tokens_per_expert): - # Reshape the weights for the grouped GEMMs. - w1 = self.weight1.view(self.num_local_experts, self.config.hidden_size, -1) - w2 = self.weight2.view(self.num_local_experts, -1, self.config.hidden_size) + if permuted_local_hidden_states.nelement() != 0: + # Reshape the weights for the grouped GEMMs. + w1 = self.weight1.view(self.num_local_experts, self.config.hidden_size, -1) + w2 = self.weight2.view(self.num_local_experts, -1, self.config.hidden_size) - fc1_output = gg.ops.gmm(permuted_local_hidden_states, w1, tokens_per_expert, trans_b=False) + fc1_output = gg.ops.gmm( + permuted_local_hidden_states, w1, tokens_per_expert, trans_b=False + ) - intermediate_parallel = self.activation_func(fc1_output) + intermediate_parallel = self.activation_func(fc1_output) - fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=False) + fc2_output = gg.ops.gmm(intermediate_parallel, w2, tokens_per_expert, trans_b=False) + else: + # None token is allocated for local experts. + assert torch.count_nonzero(tokens_per_expert) == 0 + fc2_output = permuted_local_hidden_states return fc2_output, None diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index 468a594c3e..e10f4413fa 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -7,6 +7,7 @@ from megatron.arguments import parse_args from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec +from megatron.core.transformer.moe import grouped_gemm_util as gg from megatron.core.transformer.moe.moe_layer import MoELayer from megatron.core.transformer.transformer_config import TransformerConfig from megatron.initialize import _set_random_seed @@ -99,7 +100,7 @@ def test_constructor(self): assert self.switch_mlp_gmm.experts.weight2.shape[0] == self.num_experts * self.fc2_ffn_hidden_size assert self.switch_mlp_gmm.experts.weight2.shape[1] == self.hidden_size else: - assert self.switch_mlp_gmm.experts.weight1.shape == self.switch_mlp_gmm.weight2.t().shape + assert self.switch_mlp_gmm.experts.weight1.shape == self.switch_mlp_gmm.experts.weight2.t().shape def test_weight_init_value_the_same(self): gmm_w1 = self.switch_mlp_gmm.experts.weight1.view(self.num_experts, -1, self.hidden_size) @@ -144,6 +145,24 @@ def test_gpu_forward(self): # the same between gmm and smm (refer to test_weight_init_value_the_same.) # assert torch.equal(output_smm, output_gmm) + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + @pytest.mark.skipif( + not DEVICE_CAPABILITY or DEVICE_CAPABILITY[0] < 8, reason='GroupedGEMM kernels are not supported on this device.' + ) + def test_gpu_forward_with_no_tokens_allocated(self): + """Test the case when no token is allocated for groupedGEMM kernels.""" + w1 = self.switch_mlp_gmm.experts.weight1.view(self.num_experts, -1, self.hidden_size) + num_allocated_tokens = 0 + tokens_per_expert = torch.zeros(self.num_experts) + hidden_states = torch.rand((num_allocated_tokens, self.hidden_size), dtype=torch.bfloat16) + hidden_states = hidden_states.cuda() + try: + gg.ops.gmm(hidden_states, w1, tokens_per_expert, trans_b=False) + except Exception as e: + print("Expected error message from groupedGEMM:", e) + assert str(e) == "Input batch_sizes should not be all zeros!" + + if __name__ == "__main__": for use_cpu_unitilization in [True, False]: for swiglu in [True, False]: @@ -155,4 +174,5 @@ def test_gpu_forward(self): GMLP_test.test_constructor() GMLP_test.test_weight_init_value_the_same() GMLP_test.test_gpu_forward() + GMLP_test.test_gpu_forward_with_no_tokens_allocated() GMLP_test.teardown_method(method=None) From 0bfeeaee6aa8f95f57003cf5f4f0b0540c6da86b Mon Sep 17 00:00:00 2001 From: Max Tian Date: Tue, 30 Jan 2024 14:30:59 -0500 Subject: [PATCH 236/296] rename output layer --- megatron/model/language_model.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/megatron/model/language_model.py b/megatron/model/language_model.py index c1819e212c..7ea8b6de63 100644 --- a/megatron/model/language_model.py +++ b/megatron/model/language_model.py @@ -451,6 +451,8 @@ def __init__(self, key=["layers",str(args.encoder_num_layers+1), "final_norm"]+key[1:] elif key[0]=="embedding": key=["layers", "0", "_".join(key[1:])] + elif key[0] == "output_layer": + key = ["layers", str(args.encoder_num_layers+1), "output_weights"] else: # Not implemented but still ok pass From a45805a3ee0645b85b48d14b0a8077fa5b1216b2 Mon Sep 17 00:00:00 2001 From: Jan Lasek Date: Tue, 30 Jan 2024 11:59:44 -0800 Subject: [PATCH 237/296] Generate causal mask for local layer spec --- megatron/core/fusions/fused_softmax.py | 18 +++++++- megatron/core/transformer/utils.py | 7 +++ .../unit_tests/fusions/test_torch_softmax.py | 44 +++++++++++++++++++ 3 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 tests/unit_tests/fusions/test_torch_softmax.py diff --git a/megatron/core/fusions/fused_softmax.py b/megatron/core/fusions/fused_softmax.py index 56eb2e8011..c9c0baef09 100644 --- a/megatron/core/fusions/fused_softmax.py +++ b/megatron/core/fusions/fused_softmax.py @@ -1,10 +1,12 @@ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +from typing import Optional import torch import torch.nn as nn from megatron.core.transformer.enums import AttnMaskType +from megatron.core.transformer.utils import get_default_causal_mask class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function): @@ -131,7 +133,12 @@ def __init__( assert self.scale is None or softmax_in_fp32, "softmax should be in fp32 when scaled" - def forward(self, input, mask): + def forward(self, input: torch.Tensor, mask: Optional[torch.Tensor]): + """Forward pass of softmax with masked input. + + In case attn_mask_type is causal the mask is generated and None can be passed. + A user-defined mask is only needed when attn_mask_type is not causal. + """ # [b, np, sq, sk] assert input.dim() == 4 @@ -186,6 +193,15 @@ def forward_torch_softmax(self, input, mask): if self.scale is not None: input = input * self.scale + + # Generate causal mask if not given + sq, sk = input.size(2), input.size(3) + if self.attn_mask_type == AttnMaskType.causal and mask is None and sq > 1: + # If sq == 1 then either KV cache is used or one-element context is passed + # so keeping mask=None in this case; subsequent code should handle it + assert sq == sk, "causal mask is only for self attention" + mask = get_default_causal_mask(sq) + mask_output = self.mask_func(input, mask) if mask is not None else input probs = torch.nn.Softmax(dim=-1)(mask_output) diff --git a/megatron/core/transformer/utils.py b/megatron/core/transformer/utils.py index c5bf81b4bf..d128255aa8 100644 --- a/megatron/core/transformer/utils.py +++ b/megatron/core/transformer/utils.py @@ -1,6 +1,7 @@ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. """Utilities for transformer layers.""" +from functools import lru_cache from operator import itemgetter from typing import Any, Dict, Iterable, Iterator, Optional, Tuple, Union @@ -25,6 +26,12 @@ def get_linear_layer(rows, columns, init_method, perform_initialization=True): return layer +@lru_cache(maxsize=32) +def get_default_causal_mask(sq: int) -> torch.Tensor: + """Return the causal upper triangular mask for softmax input.""" + return torch.triu(torch.ones(sq, sq, device="cuda"), diagonal=1).bool() + + def attention_mask_func(attention_scores, attention_mask): attention_scores.masked_fill_(attention_mask, -10000.0) return attention_scores diff --git a/tests/unit_tests/fusions/test_torch_softmax.py b/tests/unit_tests/fusions/test_torch_softmax.py new file mode 100644 index 0000000000..e09c08936c --- /dev/null +++ b/tests/unit_tests/fusions/test_torch_softmax.py @@ -0,0 +1,44 @@ +import pytest +import torch + +from megatron.core.fusions.fused_softmax import FusedScaleMaskSoftmax +from megatron.core.transformer.enums import AttnMaskType +from megatron.core.transformer.utils import attention_mask_func + + +class TestTorchSoftmax: + def setup_method(self, method): + # The important settings tested are forward_torch_softmax path + # with locally generated casual mask for attention_mask_func: + self.softmax = FusedScaleMaskSoftmax( + input_in_fp16=False, + input_in_bf16=False, + attn_mask_type=AttnMaskType.causal, + scaled_masked_softmax_fusion=False, + mask_func=attention_mask_func, + softmax_in_fp32=True, + scale=None, + ) + + def test_output_shape(self): + x = torch.randn(8, 2, 4, 4, device="cuda") + y = self.softmax(x, None) + assert x.shape == y.shape + + def test_causal_mask_input_shape_assert(self): + x = torch.randn(1, 1, 4, 16, device="cuda") + with pytest.raises(AssertionError): + self.softmax(x, None) + + def test_causal_mask_equal_scores(self): + # For equal input values (e.g. zero) correctly masked softmax should + # produce equal scores among non-masked elements. For example, in case + # sq == sk == 2 the expected output is (ignoring b and np dimensions): + # [[1.0, 0.0], + # [0.5, 0.5]] + b, np, sq, sk = 8, 2, 32, 32 + x = torch.zeros([b, np, sq, sk]).cuda() + y = self.softmax(x, None) + y_expected = torch.tril(torch.ones(b, np, sq, sk, device="cuda")) + y_expected /= torch.arange(1, sq + 1, device="cuda").reshape((-1, 1)) + assert torch.allclose(y, y_expected, rtol=1e-08, atol=1e-08) From 918d415624fb8d25ae76bc41cabc9526d159a57d Mon Sep 17 00:00:00 2001 From: Eric Harper Date: Tue, 30 Jan 2024 12:41:00 -0800 Subject: [PATCH 238/296] Update minor version --- megatron/core/package_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/core/package_info.py b/megatron/core/package_info.py index 55c49b1785..07de3fba41 100644 --- a/megatron/core/package_info.py +++ b/megatron/core/package_info.py @@ -2,7 +2,7 @@ MAJOR = 0 -MINOR = 4 +MINOR = 5 PATCH = 0 PRE_RELEASE = 'rc0' From eeb1b21af71e8a91ac362c14835ca1c9b76e5ee4 Mon Sep 17 00:00:00 2001 From: Jimmy Zhang Date: Tue, 30 Jan 2024 15:02:33 -0800 Subject: [PATCH 239/296] use TE checkpointing when FP8 Signed-off-by: Jimmy Zhang --- .../core/transformer/transformer_block.py | 48 ++++++++++++------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index 269dd57dbb..a60351cb25 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -212,14 +212,25 @@ def custom_forward( return custom_forward - if self.config.recompute_method == 'uniform': - # Uniformly divide the total number of Transformer layers and checkpoint - # the input activation of each divided chunk. - # A method to further reduce memory usage reducing checkpoints. - l = 0 - while l < self.num_layers_per_pipeline_rank: - hidden_states, context = tensor_parallel.checkpoint( - custom(l, l + self.config.recompute_num_layers), + def checkpoint_handler(forward_func): + if self.config.fp8: + from transformer_engine.pytorch.distributed import checkpoint as te_checkpoint + + return te_checkpoint( + forward_func, + self.config.distribute_saved_activations, + tensor_parallel.random.get_cuda_rng_tracker, + parallel_state.get_tensor_model_parallel_group(), + hidden_states, + attention_mask, + context, + context_mask, + rotary_pos_emb, + packed_seq_params, + ) + else: + return tensor_parallel.checkpoint( + forward_func, self.config.distribute_saved_activations, hidden_states, attention_mask, @@ -229,6 +240,16 @@ def custom_forward( packed_seq_params, ) + if self.config.recompute_method == 'uniform': + # Uniformly divide the total number of Transformer layers and checkpoint + # the input activation of each divided chunk. + # A method to further reduce memory usage reducing checkpoints. + l = 0 + while l < self.num_layers_per_pipeline_rank: + hidden_states, context = checkpoint_handler( + custom(l, l + self.config.recompute_num_layers) + ) + l += self.config.recompute_num_layers elif self.config.recompute_method == 'block': @@ -237,16 +258,7 @@ def custom_forward( # A method fully use the device memory removing redundant re-computation. for l in range(self.num_layers_per_pipeline_rank): if l < self.config.recompute_num_layers: - hidden_states, context = tensor_parallel.checkpoint( - custom(l, l + 1), - self.config.distribute_saved_activations, - hidden_states, - attention_mask, - context, - context_mask, - rotary_pos_emb, - packed_seq_params, - ) + hidden_states, context = checkpoint_handler(custom(l, l + 1)) else: hidden_states, context = custom(l, l + 1)( hidden_states, From f8b277adbea9c09d8ea078fac74b9d20bf27d765 Mon Sep 17 00:00:00 2001 From: zshao Date: Wed, 31 Jan 2024 14:42:51 +0800 Subject: [PATCH 240/296] Remove unused hashlib --- megatron/training.py | 1 - 1 file changed, 1 deletion(-) diff --git a/megatron/training.py b/megatron/training.py index 27423c139e..f2f0819e49 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -4,7 +4,6 @@ import gc from datetime import datetime -import hashlib import math import logging import os From 0fcbff052bd98b015da19b9fc0cc7536b7d0a28b Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis Date: Tue, 30 Jan 2024 03:10:57 -0800 Subject: [PATCH 241/296] Move grad-scale to loss.device Signed-off-by: Alexandros Koumparoulis --- megatron/core/pipeline_parallel/schedules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/core/pipeline_parallel/schedules.py b/megatron/core/pipeline_parallel/schedules.py index b45aa8c87a..79939f3797 100644 --- a/megatron/core/pipeline_parallel/schedules.py +++ b/megatron/core/pipeline_parallel/schedules.py @@ -213,7 +213,7 @@ def forward_step( if config.num_moe_experts is not None: # Calculate the loss scale based on the grad_scale_func if available, else default to 1. loss_scale = ( - config.grad_scale_func(torch.tensor(1.0)) + config.grad_scale_func(torch.tensor(1.0, device=loss.device)) if config.grad_scale_func is not None else torch.tensor(1.0) ) From c3d057f5865cf7c8fb2e05ae9df55d2fa3e8528f Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Thu, 1 Feb 2024 02:12:41 +0000 Subject: [PATCH 242/296] code clean for moe. --- .../core/distributed/finalize_model_grads.py | 2 +- megatron/core/models/gpt/gpt_layer_specs.py | 2 +- megatron/core/transformer/moe/README.md | 11 ++-- megatron/core/transformer/moe/switch_mlp.py | 0 .../core/transformer/transformer_layer.py | 2 +- .../models/test_switch_mlp.py | 12 ++--- .../transformer/moe/test_grouped_mlp.py | 54 ++++++++++--------- .../transformer/moe/test_routers.py | 20 +++---- ...t_switch_mlp.py => test_sequential_mlp.py} | 20 +++---- 9 files changed, 65 insertions(+), 58 deletions(-) delete mode 100644 megatron/core/transformer/moe/switch_mlp.py rename tests/unit_tests/transformer/moe/{test_switch_mlp.py => test_sequential_mlp.py} (74%) diff --git a/megatron/core/distributed/finalize_model_grads.py b/megatron/core/distributed/finalize_model_grads.py index 916e4f3ecb..632ef49e3a 100644 --- a/megatron/core/distributed/finalize_model_grads.py +++ b/megatron/core/distributed/finalize_model_grads.py @@ -94,7 +94,7 @@ def _allreduce_expert_grads(model: List[torch.nn.Module], config: TransformerCon All-reduce expert grads (for expert parallelism). """ - # All-reduce switchmlp parameters across data modulo expert parallel nodes + # All-reduce MoE parameters across data modulo expert parallel nodes if ( config.expert_model_parallel_size > 1 and config.expert_model_parallel_size < parallel_state.get_data_parallel_world_size() diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index 2e35e1f250..c76a842c77 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -90,7 +90,7 @@ def _get_mlp_module_spec( ), ) else: - # SwitchMLP based MoE with modules in megatron core. + # Mixture of experts with modules in megatron core. return ModuleSpec( module=MoELayer, submodules=MLPSubmodules(linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear,) diff --git a/megatron/core/transformer/moe/README.md b/megatron/core/transformer/moe/README.md index fad581695b..5b28c9c318 100644 --- a/megatron/core/transformer/moe/README.md +++ b/megatron/core/transformer/moe/README.md @@ -22,9 +22,7 @@ ### Performance Optimizations - GroupedGEMM when num local experts > 1 - - Supported dtype: fp32/bf16/fp16 -- Token permutation / unpermutation fusion -- Fused Sinkhorn Kernel + - Supported dtype: bf16 ### Token Dispatch Mechanism @@ -36,6 +34,13 @@ ## Upcoming features +- Enhanced GroupedGEMM kernels + - Less host-device syncs. + - More supported dtype: fp32/bf16/fp16 + - Kernel heuristics tuned for A100/A10/L40S + - BWD cutlass GroupedGEMM kernels supported +- Token permutation / unpermutation fusion +- Fused Sinkhorn Kernel - Context Parallel with MoE - FP8 training support - Enable ’--tp-comm-overlap‘ for MoE diff --git a/megatron/core/transformer/moe/switch_mlp.py b/megatron/core/transformer/moe/switch_mlp.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index 612c333a1c..140f651469 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -97,7 +97,7 @@ def __init__( ## [Module 8: MLP block] # TODO how to set the gpt_layer_spec.py when we have moe_frequency > 1, - # where MLP and SwitchMLP both appear alternately? + # where MLP and MoE layer both appear alternately? self.mlp = build_module(submodules.mlp, config=self.config) ## [Module 9: BiasDropoutFusion] diff --git a/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py b/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py index bf13162066..663c2bc418 100644 --- a/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py +++ b/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py @@ -15,7 +15,7 @@ from tests.unit_tests.test_utilities import Utils -def initialize_switch_mlp(seed, glu=True, **config_kwargs): +def initialize_sequential_mlp(seed, glu=True, **config_kwargs): torch.manual_seed(seed) model_parallel_cuda_manual_seed(seed) @@ -39,7 +39,7 @@ def get_pp_offsets(): return ((0, pp_rank, pp_size),) -class TestSwitchMLPReconfiguration: +class TestSequentialMLPReconfiguration: @pytest.mark.parametrize("src_tp_pp_exp,dest_tp_pp_exp,use_glu", [ # changing PP is impossible because the number of layers must be the same ((2, 4, 1), (2, 4, 1), False), @@ -59,18 +59,18 @@ def test_parallel_reconfiguration_e2e(self, tmp_path_dist_ckpt, src_tp_pp_exp, d """ Test model saving and loading with different TP/PP/expert parallelism """ src_tp, src_pp, src_exp = src_tp_pp_exp dest_tp, dest_pp, dest_exp = dest_tp_pp_exp - with TempNamedDir(tmp_path_dist_ckpt / 'test_switch_mlp_reconfiguration_model_A') as ckpt_dir_A, \ - TempNamedDir(tmp_path_dist_ckpt / 'test_switch_mlp_reconfiguration_model_B') as ckpt_dir_B: + with TempNamedDir(tmp_path_dist_ckpt / 'test_sequential_mlp_reconfiguration_model_A') as ckpt_dir_A, \ + TempNamedDir(tmp_path_dist_ckpt / 'test_sequential_mlp_reconfiguration_model_B') as ckpt_dir_B: # Save checkpoint A Utils.initialize_model_parallel(src_tp, src_pp, expert_model_parallel_size=src_exp) - model_A = initialize_switch_mlp(1, use_glu) + model_A = initialize_sequential_mlp(1, use_glu) sharded_state_dict = model_A.sharded_state_dict(sharded_offsets=get_pp_offsets()) save(sharded_state_dict, ckpt_dir_A) Utils.destroy_model_parallel() # Load checkpoint A with different TP/PP/expert and save as checkpoint B Utils.initialize_model_parallel(dest_tp, dest_pp, expert_model_parallel_size=dest_exp) - model_B = initialize_switch_mlp(2, use_glu) + model_B = initialize_sequential_mlp(2, use_glu) state_dict = load(model_B.sharded_state_dict(sharded_offsets=get_pp_offsets()), ckpt_dir_A) model_B.load_state_dict(state_dict) save(model_B.sharded_state_dict(sharded_offsets=get_pp_offsets()), ckpt_dir_B) diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index e10f4413fa..8aa552654a 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -53,7 +53,7 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): _set_random_seed(seed_=123, data_parallel_random_init=False) transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( self.num_experts, moe_grouped_gemm=False) - self.switch_mlp_smm = MoELayer(tf_config, + self.sequential_mlp = MoELayer(tf_config, transformer_layer_spec.submodules.mlp.submodules) self.args = parse_args(ignore_unknown_args=True) @@ -61,25 +61,25 @@ def setup_method(self, method, use_cpu_initialization=False, swiglu=True): # Bias is not supported in grouped gemm currently, thus we disable the # bias in the linear layer. self.args.add_bias_linear=False - self.switch_mlp_smm = Float16Module(self.switch_mlp_smm, self.args).module + self.sequential_mlp = Float16Module(self.sequential_mlp, self.args).module print("done intializing for sequential gemm") ## Grouped GEMM _set_random_seed(seed_=123, data_parallel_random_init=False) tf_config.moe_grouped_gemm = True - self.switch_mlp_gmm = MoELayer(tf_config) - self.switch_mlp_gmm = Float16Module(self.switch_mlp_gmm, self.args).module + self.grouped_mlp = MoELayer(tf_config) + self.grouped_mlp = Float16Module(self.grouped_mlp, self.args).module print("done intializing for grouped gemm") def teardown_method(self, method): Utils.destroy_model_parallel() def test_constructor(self): - assert isinstance(self.switch_mlp_smm, MoELayer) - assert isinstance(self.switch_mlp_gmm, MoELayer) + assert isinstance(self.sequential_mlp, MoELayer) + assert isinstance(self.grouped_mlp, MoELayer) - num_weights_smm = sum([p.numel() for p in self.switch_mlp_smm.parameters()]) - num_weights_gmm = sum([p.numel() for p in self.switch_mlp_gmm.parameters()]) + num_weights_smm = sum([p.numel() for p in self.sequential_mlp.parameters()]) + num_weights_gmm = sum([p.numel() for p in self.grouped_mlp.parameters()]) # For the same hyper-parm model configs except the `moe_grouped_gemm`, # GroupedGEMM and sequential GEMMs should hold the same number of parms. @@ -90,30 +90,30 @@ def test_constructor(self): self.hidden_size * (self.fc1_ffn_hidden_size + self.fc2_ffn_hidden_size) * self.num_experts assert num_weights_smm == expected_num_weights - assert torch.equal(self.switch_mlp_smm.router.weight, self.switch_mlp_gmm.router.weight) + assert torch.equal(self.sequential_mlp.router.weight, self.grouped_mlp.router.weight) # weight1: [h, num_experts*4h] # weight2: [num_experts*4h, h] - assert self.switch_mlp_gmm.experts.weight1.shape[0] == self.hidden_size - assert self.switch_mlp_gmm.experts.weight1.shape[1] == self.num_experts * self.fc1_ffn_hidden_size + assert self.grouped_mlp.experts.weight1.shape[0] == self.hidden_size + assert self.grouped_mlp.experts.weight1.shape[1] == self.num_experts * self.fc1_ffn_hidden_size if self.gated_linear_unit: - assert self.switch_mlp_gmm.experts.weight2.shape[0] == self.num_experts * self.fc2_ffn_hidden_size - assert self.switch_mlp_gmm.experts.weight2.shape[1] == self.hidden_size + assert self.grouped_mlp.experts.weight2.shape[0] == self.num_experts * self.fc2_ffn_hidden_size + assert self.grouped_mlp.experts.weight2.shape[1] == self.hidden_size else: - assert self.switch_mlp_gmm.experts.weight1.shape == self.switch_mlp_gmm.experts.weight2.t().shape + assert self.grouped_mlp.experts.weight1.shape == self.grouped_mlp.experts.weight2.t().shape def test_weight_init_value_the_same(self): - gmm_w1 = self.switch_mlp_gmm.experts.weight1.view(self.num_experts, -1, self.hidden_size) - gmm_w2 = self.switch_mlp_gmm.experts.weight2.view(self.num_experts, self.hidden_size, -1) + gmm_w1 = self.grouped_mlp.experts.weight1.view(self.num_experts, -1, self.hidden_size) + gmm_w2 = self.grouped_mlp.experts.weight2.view(self.num_experts, self.hidden_size, -1) gmm_expert1_fc1 = gmm_w1[0] gmm_expert1_fc2 = gmm_w2[0] gmm_expert2_fc1 = gmm_w1[1] gmm_expert2_fc2 = gmm_w2[1] - smm_expert1_fc1 = self.switch_mlp_smm.experts.local_experts[0].linear_fc1.weight - smm_expert1_fc2 = self.switch_mlp_smm.experts.local_experts[0].linear_fc2.weight - smm_expert2_fc1 = self.switch_mlp_smm.experts.local_experts[1].linear_fc1.weight - smm_expert2_fc2 = self.switch_mlp_smm.experts.local_experts[1].linear_fc2.weight + smm_expert1_fc1 = self.sequential_mlp.experts.local_experts[0].linear_fc1.weight + smm_expert1_fc2 = self.sequential_mlp.experts.local_experts[0].linear_fc2.weight + smm_expert2_fc1 = self.sequential_mlp.experts.local_experts[1].linear_fc1.weight + smm_expert2_fc2 = self.sequential_mlp.experts.local_experts[1].linear_fc2.weight assert torch.equal(gmm_expert1_fc1, smm_expert1_fc1) if not self.use_cpu_initialization: @@ -129,17 +129,17 @@ def test_weight_init_value_the_same(self): not DEVICE_CAPABILITY or DEVICE_CAPABILITY[0] < 8, reason='GroupedGEMM kernels are not supported on this device.' ) def test_gpu_forward(self): - self.switch_mlp_smm.cuda() - self.switch_mlp_gmm.cuda() + self.sequential_mlp.cuda() + self.grouped_mlp.cuda() # [sequence length, batch size, hidden size] seq_len = 3 #32 batch_size = 2 hidden_states = torch.rand( - (seq_len, batch_size, self.switch_mlp_smm.config.hidden_size), + (seq_len, batch_size, self.sequential_mlp.config.hidden_size), dtype=torch.bfloat16) hidden_states = hidden_states.cuda() - output_smm, _ = self.switch_mlp_smm(hidden_states) - output_gmm, _ = self.switch_mlp_gmm(hidden_states) + output_smm, _ = self.sequential_mlp(hidden_states) + output_gmm, _ = self.grouped_mlp(hidden_states) # The following assert fails due to the param init value is not exactly # the same between gmm and smm (refer to test_weight_init_value_the_same.) @@ -151,7 +151,7 @@ def test_gpu_forward(self): ) def test_gpu_forward_with_no_tokens_allocated(self): """Test the case when no token is allocated for groupedGEMM kernels.""" - w1 = self.switch_mlp_gmm.experts.weight1.view(self.num_experts, -1, self.hidden_size) + w1 = self.grouped_mlp.experts.weight1.view(self.num_experts, -1, self.hidden_size) num_allocated_tokens = 0 tokens_per_expert = torch.zeros(self.num_experts) hidden_states = torch.rand((num_allocated_tokens, self.hidden_size), dtype=torch.bfloat16) @@ -175,4 +175,6 @@ def test_gpu_forward_with_no_tokens_allocated(self): GMLP_test.test_weight_init_value_the_same() GMLP_test.test_gpu_forward() GMLP_test.test_gpu_forward_with_no_tokens_allocated() + import pdb + pdb.set_trace() GMLP_test.teardown_method(method=None) diff --git a/tests/unit_tests/transformer/moe/test_routers.py b/tests/unit_tests/transformer/moe/test_routers.py index fb6668ddf1..f1db99f371 100644 --- a/tests/unit_tests/transformer/moe/test_routers.py +++ b/tests/unit_tests/transformer/moe/test_routers.py @@ -31,10 +31,10 @@ def setup_method(self, method): transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( num_experts=num_moe_experts, moe_grouped_gemm=False ) - self.switch_mlp = MoELayer( + self.sequential_mlp = MoELayer( self.transformer_config, transformer_layer_spec.submodules.mlp.submodules ) - self.router = self.switch_mlp.router + self.router = self.sequential_mlp.router def teardown_method(self, method): Utils.destroy_model_parallel() @@ -62,25 +62,25 @@ def test_router_forward(self): @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_aux_loss(self): - self.switch_mlp = self.switch_mlp.cuda() + self.sequential_mlp = self.sequential_mlp.cuda() # Without aux loss hidden_states = torch.randn((32, 2, self.router.config.hidden_size)) hidden_states = hidden_states.cuda() - out = self.switch_mlp(hidden_states)[0] + out = self.sequential_mlp(hidden_states)[0] out.sum().mul_(0).backward() - assert self.switch_mlp.router.weight.grad.abs().sum() == 0 + assert self.sequential_mlp.router.weight.grad.abs().sum() == 0 # With aux loss self.transformer_config.moe_aux_loss_coeff = 1 - out = self.switch_mlp(hidden_states)[0] + out = self.sequential_mlp(hidden_states)[0] out.sum().mul_(0).backward() - assert self.switch_mlp.router.weight.grad.abs().sum() > 0 + assert self.sequential_mlp.router.weight.grad.abs().sum() > 0 # With Z loss self.transformer_config.moe_aux_loss_coeff = 0 self.transformer_config.moe_z_loss_coeff = 1 - self.switch_mlp.router.weight.grad.fill_(0) - out = self.switch_mlp(hidden_states)[0] + self.sequential_mlp.router.weight.grad.fill_(0) + out = self.sequential_mlp(hidden_states)[0] out.sum().mul_(0).backward() - assert self.switch_mlp.router.weight.grad.abs().sum() > 0 \ No newline at end of file + assert self.sequential_mlp.router.weight.grad.abs().sum() > 0 \ No newline at end of file diff --git a/tests/unit_tests/transformer/moe/test_switch_mlp.py b/tests/unit_tests/transformer/moe/test_sequential_mlp.py similarity index 74% rename from tests/unit_tests/transformer/moe/test_switch_mlp.py rename to tests/unit_tests/transformer/moe/test_sequential_mlp.py index 65f5ad319d..3865ea6972 100644 --- a/tests/unit_tests/transformer/moe/test_switch_mlp.py +++ b/tests/unit_tests/transformer/moe/test_sequential_mlp.py @@ -10,7 +10,7 @@ from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec -class TestParallelSwitchMLP: +class TestParallelSequentialMLP: def setup_method(self, method): Utils.initialize_model_parallel(1,1) @@ -31,30 +31,30 @@ def setup_method(self, method): ) transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( num_experts=num_moe_experts, moe_grouped_gemm=False) - self.switch_mlp = MoELayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) + self.sequentail_mlp = MoELayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) def teardown_method(self, method): Utils.destroy_model_parallel() def test_constructor(self): - assert isinstance(self.switch_mlp, MoELayer) + assert isinstance(self.sequentail_mlp, MoELayer) - num_weights = sum([p.numel() for p in self.switch_mlp.parameters()]) + num_weights = sum([p.numel() for p in self.sequentail_mlp.parameters()]) assert num_weights == 3696 @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_gpu_forward(self): - switch_mlp = self.switch_mlp - switch_mlp.cuda() + sequentail_mlp = self.sequentail_mlp + sequentail_mlp.cuda() # [sequence length, batch size, hidden size] - hidden_states = torch.ones((32, 2, switch_mlp.config.hidden_size)) + hidden_states = torch.ones((32, 2, sequentail_mlp.config.hidden_size)) hidden_states = hidden_states.cuda() - output, output_bias = switch_mlp(hidden_states) + output, output_bias = sequentail_mlp(hidden_states) assert output.shape[0] == 32 assert output.shape[1] == 2 - assert output.shape[2] == switch_mlp.config.hidden_size - assert output_bias.shape[2] == switch_mlp.config.hidden_size + assert output.shape[2] == sequentail_mlp.config.hidden_size + assert output_bias.shape[2] == sequentail_mlp.config.hidden_size assert output.dtype == torch.float32 assert output.device.type == 'cuda' assert output_bias.device.type == 'cuda' From a1ba50f878ba6c6d3c0c679c4ec9e5e5bbd1bfa1 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Wed, 31 Jan 2024 19:01:33 -0800 Subject: [PATCH 243/296] update readme. --- megatron/core/transformer/moe/README.md | 13 ++++++++----- .../transformer/moe/test_sequential_mlp.py | 18 +++++++++--------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/megatron/core/transformer/moe/README.md b/megatron/core/transformer/moe/README.md index 5b28c9c318..907573a705 100644 --- a/megatron/core/transformer/moe/README.md +++ b/megatron/core/transformer/moe/README.md @@ -5,18 +5,17 @@ - **Expert Parallel** - A specific method of parallelism for MoE models, where experts are partitioned onto different workers and each worker processes a different batch of training samples, each worker process one or more experts for each MoE layer. - **3D Parallel**: Data Parallel , Tensor Parallel, Pipeline Parallel, Sequence Parallel - - Note: When using MoE and tensor parallelism, sequence parallelism must be used. + - Note: When using MoE with expert parallelism and tensor parallelism, sequence parallelism must be used. - **Richer parallel mappings**: EP can be combined with DP/TP/PP/SP for handling larger MoE variants. - **Distributed optimizer.** ### Router and Load Balancing - Router type: - - Top-K router + - Top-K MLP router - Expert Choice router (coming soon) - Load Balancing algorithms: - Sinkhorn (S-BASE) - - Z-Loss - Aux loss / Load balancing loss ### Performance Optimizations @@ -34,8 +33,8 @@ ## Upcoming features -- Enhanced GroupedGEMM kernels - - Less host-device syncs. +- Enhanced cutlass GroupedGEMM kernels + - Reduced host-device syncs. - More supported dtype: fp32/bf16/fp16 - Kernel heuristics tuned for A100/A10/L40S - BWD cutlass GroupedGEMM kernels supported @@ -44,6 +43,7 @@ - Context Parallel with MoE - FP8 training support - Enable ’--tp-comm-overlap‘ for MoE +- Distributed optimizer for MoE params. # User Guide @@ -58,6 +58,7 @@ | moe-router-topk | Number of experts to route to for each token. The default is 2. | | moe-aux-loss-coeff | Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended. | | moe-z-loss-coeff | Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended. | +| moe-input-jitter-eps | Add noise to the input tensor by applying jitter with a specified epsilon value. | | moe-token-dropping | This feature involves selectively dropping and padding tokens for each expert to achieve a specified capacity, similar to GShard, Switch-Transformer, and DeepSpeed-MoE. Note: Currently unsupported. | ### Example @@ -67,9 +68,11 @@ To train a top-2 MoE model with an auxiliary loss, include the following argumen ```python --num-experts 8 --expert-model-parallel-size 8 +--moe-grouped-gemm --moe-router-load-balancing-type aux_loss # options: aux_loss, sinkhorn, None. Default is sinkhorn1. --moe-router-topk 2 --moe-aux-loss-coeff 1e-2 +--use-distributed-optimizer ``` ## A detailed MoE script:
diff --git a/tests/unit_tests/transformer/moe/test_sequential_mlp.py b/tests/unit_tests/transformer/moe/test_sequential_mlp.py index 3865ea6972..0ebb85333e 100644 --- a/tests/unit_tests/transformer/moe/test_sequential_mlp.py +++ b/tests/unit_tests/transformer/moe/test_sequential_mlp.py @@ -31,30 +31,30 @@ def setup_method(self, method): ) transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( num_experts=num_moe_experts, moe_grouped_gemm=False) - self.sequentail_mlp = MoELayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) + self.sequential_mlp = MoELayer(transformer_config, transformer_layer_spec.submodules.mlp.submodules) def teardown_method(self, method): Utils.destroy_model_parallel() def test_constructor(self): - assert isinstance(self.sequentail_mlp, MoELayer) + assert isinstance(self.sequential_mlp, MoELayer) - num_weights = sum([p.numel() for p in self.sequentail_mlp.parameters()]) + num_weights = sum([p.numel() for p in self.sequential_mlp.parameters()]) assert num_weights == 3696 @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_gpu_forward(self): - sequentail_mlp = self.sequentail_mlp - sequentail_mlp.cuda() + sequential_mlp = self.sequential_mlp + sequential_mlp.cuda() # [sequence length, batch size, hidden size] - hidden_states = torch.ones((32, 2, sequentail_mlp.config.hidden_size)) + hidden_states = torch.ones((32, 2, sequential_mlp.config.hidden_size)) hidden_states = hidden_states.cuda() - output, output_bias = sequentail_mlp(hidden_states) + output, output_bias = sequential_mlp(hidden_states) assert output.shape[0] == 32 assert output.shape[1] == 2 - assert output.shape[2] == sequentail_mlp.config.hidden_size - assert output_bias.shape[2] == sequentail_mlp.config.hidden_size + assert output.shape[2] == sequential_mlp.config.hidden_size + assert output_bias.shape[2] == sequential_mlp.config.hidden_size assert output.dtype == torch.float32 assert output.device.type == 'cuda' assert output_bias.device.type == 'cuda' From 2ee86c51c2e3db315f45958d51ae7ba1ca340a9a Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Wed, 31 Jan 2024 22:53:56 -0800 Subject: [PATCH 244/296] divide the selection_mean by top_k for normalization. --- megatron/core/transformer/moe/moe_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/megatron/core/transformer/moe/moe_utils.py b/megatron/core/transformer/moe/moe_utils.py index 36c3279f52..aae0f55544 100644 --- a/megatron/core/transformer/moe/moe_utils.py +++ b/megatron/core/transformer/moe/moe_utils.py @@ -14,7 +14,8 @@ def switch_load_balancing_loss_func(gates, mask, moe_aux_loss_coeff): """ num_experts = mask.size(-1) gates_mean = gates.mean(dim=0) - selection_mean = mask.float().mean(dim=0) + top_k = mask[0].count_nonzero() + selection_mean = mask.float().mean(dim=0) / top_k aux_loss = torch.sum(gates_mean * selection_mean) * num_experts aux_loss *= moe_aux_loss_coeff return aux_loss From 2e1f8699b3cdcd358a7fb29a19dc0fdb158257d3 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Wed, 31 Jan 2024 23:09:46 -0800 Subject: [PATCH 245/296] add license. --- megatron/core/transformer/moe/experts.py | 1 + megatron/core/transformer/moe/moe_utils.py | 2 ++ megatron/core/transformer/moe/token_dispatcher.py | 2 ++ tests/unit_tests/transformer/moe/test_grouped_mlp.py | 2 -- 4 files changed, 5 insertions(+), 2 deletions(-) diff --git a/megatron/core/transformer/moe/experts.py b/megatron/core/transformer/moe/experts.py index 9d1539d5d3..b7c4118d49 100644 --- a/megatron/core/transformer/moe/experts.py +++ b/megatron/core/transformer/moe/experts.py @@ -1,4 +1,5 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + from typing import Tuple import numpy as np diff --git a/megatron/core/transformer/moe/moe_utils.py b/megatron/core/transformer/moe/moe_utils.py index aae0f55544..3e42151642 100644 --- a/megatron/core/transformer/moe/moe_utils.py +++ b/megatron/core/transformer/moe/moe_utils.py @@ -1,3 +1,5 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + import torch diff --git a/megatron/core/transformer/moe/token_dispatcher.py b/megatron/core/transformer/moe/token_dispatcher.py index 15ef70fb03..1b7857b6b2 100644 --- a/megatron/core/transformer/moe/token_dispatcher.py +++ b/megatron/core/transformer/moe/token_dispatcher.py @@ -1,3 +1,5 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + from abc import abstractmethod from typing import List diff --git a/tests/unit_tests/transformer/moe/test_grouped_mlp.py b/tests/unit_tests/transformer/moe/test_grouped_mlp.py index 8aa552654a..e443272db8 100644 --- a/tests/unit_tests/transformer/moe/test_grouped_mlp.py +++ b/tests/unit_tests/transformer/moe/test_grouped_mlp.py @@ -175,6 +175,4 @@ def test_gpu_forward_with_no_tokens_allocated(self): GMLP_test.test_weight_init_value_the_same() GMLP_test.test_gpu_forward() GMLP_test.test_gpu_forward_with_no_tokens_allocated() - import pdb - pdb.set_trace() GMLP_test.teardown_method(method=None) From e5102e705bd4bf4a9869edce6a3aaec71f385111 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Thu, 1 Feb 2024 02:34:16 -0800 Subject: [PATCH 246/296] update readme. --- megatron/core/transformer/moe/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/core/transformer/moe/README.md b/megatron/core/transformer/moe/README.md index 907573a705..56cae2f586 100644 --- a/megatron/core/transformer/moe/README.md +++ b/megatron/core/transformer/moe/README.md @@ -69,7 +69,7 @@ To train a top-2 MoE model with an auxiliary loss, include the following argumen --num-experts 8 --expert-model-parallel-size 8 --moe-grouped-gemm ---moe-router-load-balancing-type aux_loss # options: aux_loss, sinkhorn, None. Default is sinkhorn1. +--moe-router-load-balancing-type aux_loss # options: aux_loss, sinkhorn, None. Default is aux_loss. --moe-router-topk 2 --moe-aux-loss-coeff 1e-2 --use-distributed-optimizer From 6aad2116dfeeeeff9da0dd732a76fb7057200c9f Mon Sep 17 00:00:00 2001 From: Maanu Grover Date: Thu, 1 Feb 2024 12:14:18 -0800 Subject: [PATCH 247/296] JET Migration Updates --- .gitlab-ci.yml | 9 +- jet-tests.yml | 91 +++++++------ .../functional_tests/jet_recipes/MR-bert.yaml | 108 ++++++++++++++++ .../functional_tests/jet_recipes/MR-gpt.yaml | 122 ++++++++++++++++++ tests/functional_tests/jet_recipes/MR-t5.yaml | 50 +++++++ .../jet_recipes/build-pyt.yaml | 21 +++ .../jet_recipes/monthly-t5.yaml | 108 ++++++++++++++++ .../jet_recipes/nightly-bert.yaml | 51 ++++++++ .../jet_recipes/nightly-gpt.yaml | 61 +++++++++ .../python_test_utils/jet_test_pipeline.py | 84 +++++++----- ...eps-50_tp-1_pp-2_mcore-false_te-false.json | 1 + ...0_tp-1_pp-4_mcore-false_te-false_vp-2.json | 1 + ...eps-50_tp-2_pp-2_mcore-false_te-false.json | 1 + ...teps-50_tp-2_pp-2_mcore-true_te-false.json | 1 + ...ethod-uniform-recompute-num-layers-1-.json | 1 - ...des-1_gpus-8_bs-32_steps-50_tp-1_pp-2.json | 1 - ...2_args--position-embedding-type-rope-.json | 1 - ...des-1_gpus-8_bs-32_steps-50_tp-1_pp-4.json | 1 - ...0_tp-1_pp-4_args--disable-bias-linear.json | 1 - ...-50_tp-1_pp-4_args--sequence-parallel.json | 1 - ...bs-32_steps-50_tp-1_pp-4_args--swiglu.json | 1 - ...--untie-embeddings-and-output-weights.json | 1 - ...des-1_gpus-8_bs-32_steps-50_tp-2_pp-2.json | 1 - ...des-1_gpus-8_bs-32_steps-50_tp-4_pp-1.json | 1 - ...ps-100_tp-1_pp-2_mcore-false_te-false.json | 1 + ...ute-num-layers-1-_mcore-true_te-false.json | 1 + ...ibuted-optimizer_mcore-false_te-false.json | 1 + ...edding-type-rope-_mcore-true_te-false.json | 1 + ...sable-bias-linear_mcore-true_te-false.json | 1 + ...sequence-parallel_mcore-true_te-false.json | 1 + ...pp-4_args--swiglu_mcore-true_te-false.json | 1 + ...nd-output-weights_mcore-true_te-false.json | 1 + ...grad-reduce_mcore-false_te-false_vp-1.json | 1 + ...0_tp-1_pp-4_mcore-false_te-false_vp-1.json | 1 + ...50_tp-1_pp-4_mcore-true_te-false_vp-1.json | 1 + ...-parallel-size-2-_mcore-true_te-false.json | 1 + ...eps-50_tp-2_pp-2_mcore-false_te-false.json | 1 + ...teps-50_tp-2_pp-2_mcore-false_te-true.json | 1 + ...teps-50_tp-2_pp-2_mcore-true_te-false.json | 1 + ...rlap-grad-reduce_mcore-false_te-false.json | 1 + ...rlap-grad-reduce_mcore-false_te-false.json | 1 + ...lap-grad-reduce-_mcore-false_te-false.json | 1 + ...eps-50_tp-1_pp-2_mcore-false_te-false.json | 1 + ...teps-50_tp-1_pp-2_mcore-true_te-false.json | 1 + ...rlap-grad-reduce_mcore-false_te-false.json | 1 + ...grad-reduce_mcore-false_te-false_vp-1.json | 1 + ...eps-50_tp-1_pp-4_mcore-false_te-false.json | 1 + ...teps-50_tp-1_pp-4_mcore-true_te-false.json | 1 + ...s--num-experts-2-_mcore-true_te-false.json | 1 + ...--num-experts-4-_mcore-false_te-false.json | 1 + ...rlap-grad-reduce_mcore-false_te-false.json | 1 + ...-parallel-size-2-_mcore-true_te-false.json | 1 + ...rlap-grad-reduce_mcore-false_te-false.json | 1 + ...eps-50_tp-4_pp-1_mcore-false_te-false.json | 1 + ...teps-50_tp-4_pp-1_mcore-true_te-false.json | 1 + ...100_tp-1_pp-1_mcore-true_te-true_vp-1.json | 1 + ...bert_distributed_resume_checkpoint_test.sh | 10 +- .../bert/pretrain_bert_distributed_test.sh | 4 +- ...gpt3_distributed_resume_checkpoint_test.sh | 13 +- ...n_t5_distributed_resume_checkpoint_test.sh | 9 +- .../t5/pretrain_t5_distributed_test.sh | 4 +- 61 files changed, 690 insertions(+), 101 deletions(-) create mode 100644 tests/functional_tests/jet_recipes/MR-bert.yaml create mode 100644 tests/functional_tests/jet_recipes/MR-gpt.yaml create mode 100644 tests/functional_tests/jet_recipes/MR-t5.yaml create mode 100644 tests/functional_tests/jet_recipes/build-pyt.yaml create mode 100644 tests/functional_tests/jet_recipes/monthly-t5.yaml create mode 100644 tests/functional_tests/jet_recipes/nightly-bert.yaml create mode 100644 tests/functional_tests/jet_recipes/nightly-gpt.yaml create mode 100644 tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json create mode 100644 tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d0ad2c1eb7..4983188e29 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -14,6 +14,7 @@ variables: &VARS TESTS_TO_RUN_AFTER_MERGING: "MR_TESTS NIGHTLY_TESTS" # Can specify levels TESTS_TO_RUN_ON_THIS_COMMIT: unit_tests TEST_REGEX_ON_THIS_COMMIT: NONE #https://github.com/google/re2/wiki/Syntax (Can define regex as in this spec) e.g /.*gpt3.*/ + JET_CUSTOM_FILTER: "" DISPLAY_OUTPUT: "True" # Set to true for new tests to copy the logs for creating golden truth file TIME_LIMIT: "10:00" # Default time limit for all jobs MOE_GROUPED_GEMM: 0 # Set to 1 to enable grouped gemm for MoE @@ -85,9 +86,9 @@ formatting: when: always - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGING' when: always - - if: $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED when: always - - if: '$CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED when: always allow_failure: false retry: 2 @@ -108,9 +109,9 @@ formatting: when: always - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGING' when: always - - if: $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED when: always - - if: '$CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED when: always allow_failure: false retry: 2 diff --git a/jet-tests.yml b/jet-tests.yml index 02d441354a..ae77f14b4a 100644 --- a/jet-tests.yml +++ b/jet-tests.yml @@ -1,58 +1,65 @@ .jet_common: stage: jet rules: - - if: '"JET" =~ $TESTS_TO_RUN_ON_THIS_COMMIT' - - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && "JET" =~ $TESTS_TO_RUN_AFTER_MERGING - - if: $CI_MERGE_REQUEST_APPROVED && "JET" =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED - - if: '$CI_MERGE_REQUEST_LABELS == "READY FOR REVIEW" && "JET" =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED' + - if: $CI_PIPELINE_SOURCE == 'merge_request_event' && ( $CI_MERGE_REQUEST_APPROVED || $CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" ) + - if: $JET_CUSTOM_FILTER != "" && $CI_PIPELINE_SOURCE != 'merge_request_event' && $CI_PIPELINE_SOURCE != 'schedule' + - when: never -jet-generate: - extends: .jet_common +include: + - project: dl/jet/gitlab-templates + ref: main + file: downstreams.yml + +jet-setup: + extends: [ .jet_common ] + tags: + - os/linux + script: + - set -x + - | + if [[ $CI_PIPELINE_SOURCE == "merge_request_event" ]] && [[ $CI_MERGE_REQUEST_APPROVED || $CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" ]]; then + JET_FILTER="type == 'build' or 'merge-request' in spec.scope" + elif [[ -n $JET_CUSTOM_FILTER && $CI_PIPELINE_SOURCE != 'merge_request_event' && $CI_PIPELINE_SOURCE != 'schedule' ]]; then + JET_FILTER=$JET_CUSTOM_FILTER + else + JET_FILTER="False" + fi + echo "_JET_FILTER=$JET_FILTER" | tee -a config.env + artifacts: + reports: + dotenv: config.env + +jet-configure: + extends: [.jet_common, .jet-configure] tags: - - docker_local_runner - variables: - JET_WORKLOADS_REF_MAIN: megatron-core - JET_WORKLOADS_REF_EPHEMERAL: ephemeral/${CI_PROJECT_PATH_SLUG}/${CI_PIPELINE_ID} + - os/linux script: - wget https://github.com/mikefarah/yq/releases/download/v4.35.2/yq_linux_amd64.tar.gz -O - | tar xz && mv yq_linux_amd64 /usr/local/bin/yq - - git clone https://gitlab-ci-token:${JET_WORKLOADS_TOKEN}@gitlab-master.nvidia.com/dl/jet/workloads-registry jet-workloads-registry - - - cd jet-workloads-registry - - git config user.name "Megatron-LM CI" - - git config user.email "megatron-lm@ci.nvidia.com" - - - git checkout -f "$JET_WORKLOADS_REF_MAIN" - - git checkout -b "$JET_WORKLOADS_REF_EPHEMERAL" - + - cd tests/functional_tests/jet_recipes - | if [[ $CI_PIPELINE_SOURCE == "merge_request_event" ]]; then - yq e ".spec.source.ref = \"merge-requests/${CI_MERGE_REQUEST_IID}/head\"" -i recipes/build-pyt.yaml + yq e ".spec.source.ref = \"merge-requests/${CI_MERGE_REQUEST_IID}/head\"" -i build-pyt.yaml else - yq e ".spec.source.ref = \"${CI_COMMIT_REF_NAME}\"" -i recipes/build-pyt.yaml + yq e ".spec.source.ref = \"${CI_COMMIT_REF_NAME}\"" -i build-pyt.yaml fi - - - git add recipes/build-pyt.yaml - - git commit -m "Dynamic configuration - ${CI_PIPELINE_ID}" - - git push origin "$JET_WORKLOADS_REF_EPHEMERAL" + artifacts: + paths: + - tests/functional_tests/jet_recipes jet-trigger: - extends: .jet_common - needs: [ jet-generate ] - when: on_success - inherit: - variables: - - CI_PROJECT_PATH_SLUG - - CI_PIPELINE_ID - - TESTS_TO_RUN_ON_THIS_COMMIT - - TESTS_TO_RUN_AFTER_MERGING - - TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED - variables: - JET_WORKLOADS_REF: ephemeral/${CI_PROJECT_PATH_SLUG}/${CI_PIPELINE_ID} - JET_WORKLOADS_FILTER: "True" + stage: jet + extends: [.jet_common, .jet-trigger] + needs: [ jet-configure, jet-setup ] trigger: project: dl/jet/ci - branch: megatron-core + branch: mcore/eos strategy: depend + inherit: + variables: + - JET_CUSTOM_FILTER + variables: + JET_WORKLOADS_FILTER: "$_JET_FILTER" + jet-functional-results: extends: .jet_common @@ -60,12 +67,11 @@ jet-functional-results: - docker_local_runner image: gitlab-master.nvidia.com:5005/dl/jet/api:latest needs: [ jet-trigger ] - when: on_success before_script: - jet secrets jwt-login jwt/nvidia/gitlab-master adlr-megatron-lm-ci $CI_JOB_JWT script: - python -m pip install -U --no-cache-dir prettytable - - python tests/functional_tests/python_test_utils/jet_test_pipeline.py "ephemeral/${CI_PROJECT_PATH_SLUG}/${CI_PIPELINE_ID}" --test exit + - python tests/functional_tests/python_test_utils/jet_test_pipeline.py ${CI_PIPELINE_ID} --test exit jet-compare-metrics: extends: .jet_common @@ -73,9 +79,8 @@ jet-compare-metrics: - docker_local_runner image: gitlab-master.nvidia.com:5005/dl/jet/api:latest needs: [ jet-functional-results ] - when: on_success before_script: - jet secrets jwt-login jwt/nvidia/gitlab-master adlr-megatron-lm-ci $CI_JOB_JWT script: - python -m pip install -U --no-cache-dir pytest tensorboard - - python tests/functional_tests/python_test_utils/jet_test_pipeline.py "ephemeral/${CI_PROJECT_PATH_SLUG}/${CI_PIPELINE_ID}" --test metrics + - python tests/functional_tests/python_test_utils/jet_test_pipeline.py ${CI_PIPELINE_ID} --test metrics diff --git a/tests/functional_tests/jet_recipes/MR-bert.yaml b/tests/functional_tests/jet_recipes/MR-bert.yaml new file mode 100644 index 0000000000..4c9a6cbfaf --- /dev/null +++ b/tests/functional_tests/jet_recipes/MR-bert.yaml @@ -0,0 +1,108 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: bert + variant: 345m + build: mcore-pyt + scope: merge-request + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 50 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + micro_batch_size: 4 # MBS + batch_size: 128 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1200 + artifacts: {/workspace/data/bert_data: text/the_pile/bert_shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh \ + DATA_PATH=/workspace/data/bert_data/my-bert_00_text_sentence \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + # MCore + - {tp_size: [2], pp_size: [2]} + # Non-MCore + - {use_mcore: [False], tp_size: [2], pp_size: [2]} + - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [2]} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + extra_args: args + + +--- +### Resume from ckpt ### +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: bert + variant: 345m + build: mcore-pyt + scope: merge-request-resume + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 50 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + micro_batch_size: 4 # MBS + batch_size: 128 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1200 + artifacts: {/workspace/data/bert_data: text/the_pile/bert_shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh \ + DATA_PATH=/workspace/data/bert_data/my-bert_00_text_sentence \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + - {use_mcore: [False], tp_size: [1], pp_size: [2]} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + extra_args: args diff --git a/tests/functional_tests/jet_recipes/MR-gpt.yaml b/tests/functional_tests/jet_recipes/MR-gpt.yaml new file mode 100644 index 0000000000..e0d5b982f8 --- /dev/null +++ b/tests/functional_tests/jet_recipes/MR-gpt.yaml @@ -0,0 +1,122 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: gpt3 + variant: 345m + build: mcore-pyt + scope: merge-request + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 50 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + micro_batch_size: 4 # MBS + batch_size: 32 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1200 + artifacts: {/workspace/data/gpt3_data: text/the_pile/shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh \ + DATA_PATH=/workspace/data/gpt3_data/my-gpt3_00_text_document \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + VOCAB_FILE=/workspace/data/gpt3_data/bpe/vocab.json \ + MERGE_FILE=/workspace/data/gpt3_data/bpe/merges.txt \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + # MCore + - {tp_size: [2], pp_size: [2]} + - {tp_size: [1], pp_size: [4], vp_size: [1]} + - {tp_size: [1], pp_size: [2], extra_args: ['"--position-embedding-type rope"']} + - tp_size: [1] + pp_size: [4] + extra_args: ["--swiglu", "--disable-bias-linear", "--untie-embeddings-and-output-weights", "--sequence-parallel"] + - {tp_size: [1], pp_size: [1], extra_args: ['"--recompute-granularity full --recompute-method uniform --recompute-num-layers 1"']} + # - {tp_size: [2], pp_size: [1], extra_args: ['"--context-parallel-size 2 --sequence-parallel --hidden-dropout 0.0 --attention-dropout 0.0"']} # TODO: need updated container with TE > 1.0.0 + - {tp_size: [2], pp_size: [1], extra_args: ['"--sequence-parallel --num-experts 8 --expert-model-parallel-size 2"']} + # Non-MCore + - {use_mcore: [False], use_te: [False, True], tp_size: [2], pp_size: [2]} + - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [1]} + - {use_mcore: [False], tp_size: [1], pp_size: [1], extra_args: ["--use-distributed-optimizer"]} + - {use_mcore: [False], tp_size: [4], pp_size: [1], extra_args: ["--use-distributed-optimizer --overlap-grad-reduce"]} + - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ["--use-distributed-optimizer --overlap-grad-reduce"]} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + extra_args: args + + +--- +### Resume from ckpt ### +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: gpt3 + variant: 345m + build: mcore-pyt + scope: merge-request-resume + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 100 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + micro_batch_size: 4 # MBS + batch_size: 32 # GBS, JET schema requires 'batch_size' + precision: 16 + time_limit: 1200 + artifacts: {/workspace/data/gpt3_data: text/the_pile/shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh \ + DATA_PATH=/workspace/data/gpt3_data/my-gpt3_00_text_document \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + VOCAB_FILE=/workspace/data/gpt3_data/bpe/vocab.json \ + MERGE_FILE=/workspace/data/gpt3_data/bpe/merges.txt \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + - {use_mcore: [False], tp_size: [1], pp_size: [2]} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + extra_args: args diff --git a/tests/functional_tests/jet_recipes/MR-t5.yaml b/tests/functional_tests/jet_recipes/MR-t5.yaml new file mode 100644 index 0000000000..a7895effa3 --- /dev/null +++ b/tests/functional_tests/jet_recipes/MR-t5.yaml @@ -0,0 +1,50 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: t5 + variant: 220m + build: mcore-pyt + scope: merge-request + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 100 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + micro_batch_size: 4 # MBS + batch_size: 32 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1800 + artifacts: {/workspace/data/t5_data: text/the_pile/t5_shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh \ + DATA_PATH="/workspace/data/t5_data/my-t5_00_text_document" \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + - {use_te: [True], tp_size: [1], pp_size: [1], vp_size: [1]} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + extra_args: args diff --git a/tests/functional_tests/jet_recipes/build-pyt.yaml b/tests/functional_tests/jet_recipes/build-pyt.yaml new file mode 100644 index 0000000000..5bc86217bc --- /dev/null +++ b/tests/functional_tests/jet_recipes/build-pyt.yaml @@ -0,0 +1,21 @@ +type: build +format_version: 1 +maintainers: [maanug] +spec: + name: pyt + platforms: [linux/amd64] + source: + image: nvcr.io/nvidia/pytorch:23.04-py3 + +--- +type: build +format_version: 1 +maintainers: [maanug] +spec: + name: mcore-pyt + platforms: [linux/amd64] + parent: pyt + source: + repo: https://gitlab-master.nvidia.com/ADLR/megatron-lm.git + ref: main + dockerfile: Dockerfile.ci diff --git a/tests/functional_tests/jet_recipes/monthly-t5.yaml b/tests/functional_tests/jet_recipes/monthly-t5.yaml new file mode 100644 index 0000000000..65269b7006 --- /dev/null +++ b/tests/functional_tests/jet_recipes/monthly-t5.yaml @@ -0,0 +1,108 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: t5 + variant: 220m + build: mcore-pyt + scope: monthly + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 100 + use_te: False + use_mcore: True + vp_size: 1 + extra_args: null + micro_batch_size: 4 # MBS + batch_size: 32 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1800 + artifacts: {/workspace/data/t5_data: text/the_pile/t5_shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh \ + DATA_PATH="/workspace/data/t5_data/my-t5_00_text_document" \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + - { tp_size: [1,2], pp_size: [1] } + - use_te: [True] + tp_size: [2] + pp_size: [1] + extra_args: [null, "--sequence-parallel"] +key_segments: + # vp_size: vp + use_mcore: mcore + use_te: te + extra_args: args + + +--- +### Resume from ckpt ### +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: t5 + variant: 220m + build: mcore-pyt + scope: monthly-resume + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 100 + use_te: False + use_mcore: True + vp_size: 1 + extra_args: null + micro_batch_size: 4 # MBS + batch_size: 32 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1800 + artifacts: {/workspace/data/t5_data: text/the_pile/t5_shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh \ + DATA_PATH="/workspace/data/t5_data/my-t5_00_text_document" \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + VOCAB_PATH="/workspace/data/t5_data/bert-large-cased-vocab.txt" \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + - {use_te: [False, True], tp_size: [1], pp_size: [1]} +key_segments: + # vp_size: vp + use_mcore: mcore + use_te: te + extra_args: args diff --git a/tests/functional_tests/jet_recipes/nightly-bert.yaml b/tests/functional_tests/jet_recipes/nightly-bert.yaml new file mode 100644 index 0000000000..2569833aaf --- /dev/null +++ b/tests/functional_tests/jet_recipes/nightly-bert.yaml @@ -0,0 +1,51 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: bert + variant: 345m + build: mcore-pyt + scope: nightly + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 50 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + micro_batch_size: 4 # MBS + batch_size: 128 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1200 + artifacts: {/workspace/data/bert_data: text/the_pile/bert_shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh \ + DATA_PATH=/workspace/data/bert_data/my-bert_00_text_sentence \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + - {use_mcore: [True, False], tp_size: [4], pp_size: [1]} + - {use_mcore: [True, False], tp_size: [1], pp_size: [2,4]} +key_segments: + # vp_size: vp + use_mcore: mcore + use_te: te + extra_args: args diff --git a/tests/functional_tests/jet_recipes/nightly-gpt.yaml b/tests/functional_tests/jet_recipes/nightly-gpt.yaml new file mode 100644 index 0000000000..5cc8c6444f --- /dev/null +++ b/tests/functional_tests/jet_recipes/nightly-gpt.yaml @@ -0,0 +1,61 @@ +type: recipe +format_version: 1 +maintainers: [maanug] +loggers: [stdout] +spec: + model: gpt3 + variant: 345m + build: mcore-pyt + scope: nightly + nodes: 1 + gpus: 8 + platforms: [dgx_h100] + steps: 50 + use_te: False + use_mcore: True + vp_size: null + extra_args: null + micro_batch_size: 4 # MBS + batch_size: 32 # GBS, JET schema requires 'batch_size' + precision: bf16 + time_limit: 1200 + artifacts: {/workspace/data/gpt3_data: text/the_pile/shard00} + script: |- + ls + cd /workspace/megatron-lm + + ./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh \ + DATA_PATH=/workspace/data/gpt3_data/my-gpt3_00_text_document \ + CHECKPOINT_PATH=/workspace/checkpoints \ + TENSORBOARD_DIR={assets_dir} \ + VOCAB_FILE=/workspace/data/gpt3_data/bpe/vocab.json \ + MERGE_FILE=/workspace/data/gpt3_data/bpe/merges.txt \ + DATA_CACHE=/workspace/data/index-cache \ + USE_TE={"1" if use_te else "0"} \ + TP_SIZE={tp_size} \ + PP_SIZE={pp_size} \ + NUM_NODES={nodes} \ + MAX_STEPS={steps} \ + USE_CORE={"1" if use_mcore else "0"} \ + VP_SIZE={vp_size if vp_size is not None else '""'} \ + MBS={micro_batch_size} \ + GBS={batch_size} \ + ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ + python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ + tee {assets_dir}/results.json +products: + - {use_mcore: [True, False], tp_size: [4], pp_size: [1]} + - {use_mcore: [True, False], tp_size: [1], pp_size: [2,4]} + - tp_size: [2] + pp_size: [2] + extra_args: ['"--num-experts 2"', '"--sequence-parallel --num-experts 4 --expert-model-parallel-size 2"'] +# Non-MCore + - {use_mcore: [False], tp_size: [1,4], pp_size: [1], extra_args: ["--overlap-grad-reduce"]} + - {use_mcore: [False], tp_size: [1], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"']} + - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [null, 1], extra_args: ["--overlap-grad-reduce"]} + - {use_mcore: [False], tp_size: [2], pp_size: [2], extra_args: ["--overlap-grad-reduce", '"--num-experts 4"']} +key_segments: + vp_size: vp + use_mcore: mcore + use_te: te + extra_args: args diff --git a/tests/functional_tests/python_test_utils/jet_test_pipeline.py b/tests/functional_tests/python_test_utils/jet_test_pipeline.py index 6bf2a483e3..6ab4ac5666 100644 --- a/tests/functional_tests/python_test_utils/jet_test_pipeline.py +++ b/tests/functional_tests/python_test_utils/jet_test_pipeline.py @@ -11,14 +11,14 @@ def select_asset(assets, prefix): return asset['s_url'] -def query_results(ephemeral_branch): +def query_results(triggering_pipeline_id): service = JETInstance().log_service() query = ( JETLogsQuery() - .filter(Field('obj_workloads_registry.s_commit_ref') == ephemeral_branch) + .filter(Field('obj_ci.obj_upstream.l_pipeline_id') == triggering_pipeline_id) .filter(Field('obj_workload.s_type') == 'recipe') - .select('l_exit_code', 'nested_assets', 'obj_workload.s_key', 'obj_workload.obj_spec') - .orderby('-ts_created') # decreasing (most recent in case of timestamp) + .select('l_exit_code', 'nested_assets', 'obj_workload.s_key', 'obj_workload.obj_spec', 'ts_created') + .orderby('ts_created') # increasing (least recent in case of timestamp) ) return service.query(query, flatten=False) @@ -26,22 +26,24 @@ def query_results(ephemeral_branch): def check_exitcodes(results): from prettytable import PrettyTable - exit_codes = [] - log_urls = [] - names = [] + exit_codes = {} + log_urls = {} + names = {} for result in results: - exit_codes.append(result['l_exit_code']) - log_urls.append(select_asset(result['nested_assets'], 'output_script.log')) - name = result['obj_workload']['s_key'].strip('recipe/') + key = result['obj_workload']['s_key'] + + exit_codes[key] = result['l_exit_code'] + log_urls[key] = select_asset(result['nested_assets'], 'output_script-0.log') + name = result['obj_workload']['s_key'].lstrip('recipe/') remove_substr = result['obj_workload']['obj_spec']['s_build'] + \ '_' + result['obj_workload']['obj_spec']['s_scope'] - names.append(''.join(name.split(remove_substr))) + names[key] = ''.join(name.split(remove_substr)) table = PrettyTable() - table.add_column("Job Key", names) - table.add_column("Exit Code", exit_codes) - table.add_column("Log URL", log_urls) - exit_codes_good = [ec == 0 for ec in exit_codes] + table.add_column("Job Key", list(names.values())) + table.add_column("Exit Code", list(exit_codes.values())) + table.add_column("Log URL", list(log_urls.values())) + exit_codes_good = [ec == 0 for ec in exit_codes.values()] if not all(exit_codes_good): raise Exception("Some jobs failed to complete successfully\n" + table.get_string()) else: @@ -49,22 +51,23 @@ def check_exitcodes(results): print("All jobs completed successfully!") -def check_baselines(results): +def _download_log(url, save_dir): import requests - import pytest - from tempfile import TemporaryDirectory + if not os.path.exists(save_dir): + os.mkdir(save_dir) + filepath = os.path.join(save_dir, url.split('/')[-1]) + + r = requests.get(url) + if r.ok: + with open(filepath, mode='wb') as f: + f.write(r.content) + else: + print(f"WARNING: Unable to download file at {url}. Received status {r.status_code}") - def download_log(url, save_dir): - if not os.path.exists(save_dir): - os.mkdir(save_dir) - filepath = os.path.join(save_dir, url.split('/')[-1]) - r = requests.get(url) - if r.ok: - with open(filepath, mode='wb') as f: - f.write(r.content) - else: - print(f"WARNING: Unable to download file at {url}. Received status {r.status_code}") +def check_baselines(results): + import pytest + from tempfile import TemporaryDirectory with TemporaryDirectory() as tmpdir: # Download TB event logs @@ -72,7 +75,7 @@ def download_log(url, save_dir): event_log_url = select_asset(result['nested_assets'], 'events.out.tfevents') target_dir = result['obj_workload']['s_key'].lstrip('recipe/') target_dir = os.path.join(tmpdir, target_dir) - download_log(event_log_url, target_dir) + _download_log(event_log_url, target_dir) # Run pytest on logs os.environ["EXPECTED_METRICS_DIR"] = "tests/functional_tests/test_results/jet" @@ -81,15 +84,32 @@ def download_log(url, save_dir): ['tests/functional_tests/python_test_utils/multitest_ci_pipeline.py::TestBulkCIPipeline'])) +def fetch_metrics_files(results, save_dir): + for result in results: + metrics_url = select_asset(result['nested_assets'], 'results.json') + if metrics_url is not None: + cfg = result['obj_workload']['s_key'].lstrip('recipe/') + target_dir = os.path.join(save_dir, cfg) + _download_log(metrics_url, target_dir) + + with open(os.path.join(target_dir, 'results.json'), 'r') as full_results_file: + with open(os.path.join(target_dir, cfg+'.json'), 'w') as golden_file: + golden_file.write(full_results_file.readlines()[-1].strip()) + + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( - 'eph_branch', help="JET Workloads registry ephemeral branch created by 'jet-generate' job in this pipeline") - parser.add_argument('--test', required=True, choices=[ + 'pipeline_id', help="Pipeline ID for pipeline in MLM repo that triggers the JET CI") + parser.add_argument('--test', required=False, choices=[ 'exit', 'metrics'], help="Check exit status of jobs with 'exit' or perf and loss with 'metrics'") + parser.add_argument('--download_metrics_dir', help="Directory in which to save the results.json files from jobs. Will not save files if not set. Set this if you want to update golden values.") args = parser.parse_args() - results = query_results(args.eph_branch) + results = query_results(args.pipeline_id) + + if args.download_metrics_dir: + fetch_metrics_files(results, args.download_metrics_dir) if args.test == 'exit': check_exitcodes(results) diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..f38be476c4 --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.51553, 10.51031, 10.52063, 10.52246, 10.51819, 10.50918, 10.43691, 10.29866, 10.16894, 9.98642, 9.91462, 9.78574, 9.67453, 9.55759, 9.50386, 9.35031, 9.34045, 9.27913, 9.27768, 9.20723]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [21436.0, 21632.0, 23818.0, 19149.0, 23732.0, 18947.0, 19899.0, 26923.0, 24942.0, 25962.0, 15012.0, 34688.0, 26498.0, 21937.0, 37472.0, 28599.0, 23063.0]}, "iteration_timing_avg": 0.25193253731343285} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json new file mode 100644 index 0000000000..941af1117d --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.42108, 10.43552, 10.43934, 10.43349, 10.42826, 10.42499, 10.37549, 10.2337, 10.1091, 9.93972]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19496.0, 22201.0, 23780.0, 21779.0, 22701.0, 20018.0, 22409.0]}, "iteration_timing_avg": 0.6054652941176473} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..681919dd63 --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.46209, 10.46586, 10.47036, 10.48285, 10.46953, 10.4551, 10.4144, 10.27757, 10.15408, 9.98652]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19468.0, 20366.0, 23078.0, 23209.0, 20501.0, 21956.0, 23051.0]}, "iteration_timing_avg": 0.48852117647058824} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json new file mode 100644 index 0000000000..5022434376 --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.4791, 10.47202, 10.4682, 10.45128, 10.42934, 10.35805, 10.16903, 10.0907, 9.91791, 9.7432]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2250.0, 1699.0, 2376.0, 2808.0, 2117.0, 2783.0, 2170.0, 2896.0, 1835.0, 2867.0]}, "iteration_timing_avg": 0.63432} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-.json deleted file mode 100644 index 33dc6ccf25..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-.json +++ /dev/null @@ -1 +0,0 @@ - {"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83721, 10.87648, 10.85329, 10.79637, 10.67873, 10.60491, 10.12635, 10.22253, 10.13979, 9.82348]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1589.0, 1913.0, 1924.0, 1876.0, 2005.0, 1749.0, 1631.0, 1981.0, 2346.0, 2380.0]}, "iteration_timing_avg": 0.07807617647058823} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2.json deleted file mode 100644 index dbab21195c..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 36, "step_interval": 5, "values": [10.83273, 10.86849, 10.89112, 10.80713, 10.68491, 10.61253, 10.09319, 10.21393]}, "num-zeros": {"start_step": 0, "end_step": 36, "step_interval": 5, "values": [1551.0, 1809.0, 1799.0, 1862.0, 1872.0, 1643.0, 1596.0, 1880.0]}, "iteration_timing_avg": 0.09391500000000001} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-.json deleted file mode 100644 index 0e1b686347..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-.json +++ /dev/null @@ -1 +0,0 @@ - {"lm loss": {"start_step": 0, "end_step": 49, "step_interval": 5, "values": [10.84608, 10.87634, 10.90424, 10.81754, 10.67579, 10.60283, 10.06667, 10.19261, 10.11413, 9.7617]}, "num-zeros": {"start_step": 0, "end_step": 49, "step_interval": 5, "values": [1709.0, 2192.0, 2059.0, 1960.0, 2164.0, 1846.0, 1614.0, 2074.0, 2176.0, 2249.0]}, "iteration_timing_avg": 0.10411636363636363} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4.json deleted file mode 100644 index 41ec145eb9..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 29, "step_interval": 5, "values": [10.79373, 10.86651, 10.89091, 10.78164, 10.66101, 10.58089]}, "num-zeros": {"start_step": 0, "end_step": 29, "step_interval": 5, "values": [1670.0, 1864.0, 1826.0, 1965.0, 1861.0, 1605.0]}, "iteration_timing_avg": 0.12559400000000004} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear.json deleted file mode 100644 index 47f6b7f2d7..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 38, "step_interval": 5, "values": [10.79374, 10.86745, 10.89179, 10.78304, 10.66262, 10.58362, 10.08688, 10.19342]}, "num-zeros": {"start_step": 0, "end_step": 38, "step_interval": 5, "values": [1567.0, 1904.0, 1912.0, 1931.0, 1799.0, 1722.0, 1591.0, 1950.0]}, "iteration_timing_avg": 0.12253038461538461} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel.json deleted file mode 100644 index 6f18af2e36..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 42, "step_interval": 5, "values": [10.79373, 10.86651, 10.89091, 10.78164, 10.66101, 10.58089, 10.08413, 10.19034, 10.13461]}, "num-zeros": {"start_step": 0, "end_step": 42, "step_interval": 5, "values": [1670.0, 1864.0, 1826.0, 1965.0, 1861.0, 1605.0, 1609.0, 1931.0, 2343.0]}, "iteration_timing_avg": 0.12682214285714286} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu.json deleted file mode 100644 index 610578a37a..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 29, "step_interval": 5, "values": [10.73353, 10.81676, 10.83941, 10.7586, 10.70146, 10.62786]}, "num-zeros": {"start_step": 0, "end_step": 28, "step_interval": 5, "values": [2536.0, 2988.0, 2925.0, 2895.0, 2617.0, 2603.0]}, "iteration_timing_avg": 0.1284436842105263} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights.json deleted file mode 100644 index c707a0a903..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 28, "step_interval": 5, "values": [10.8968, 10.90735, 10.91688, 10.84693, 10.70699, 10.63243]}, "num-zeros": {"start_step": 0, "end_step": 28, "step_interval": 5, "values": [22727844.0, 23021590.0, 22500488.0, 22830910.0, 22739472.0, 22546526.0]}, "iteration_timing_avg": 0.12624631578947368} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2.json deleted file mode 100644 index 3b63e1c3d0..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 40, "step_interval": 5, "values": [10.92392, 10.93645, 10.89657, 10.86919, 10.74782, 10.658, 10.15864, 10.24906]}, "num-zeros": {"start_step": 0, "end_step": 40, "step_interval": 5, "values": [1735.0, 1861.0, 2111.0, 1844.0, 1762.0, 1858.0, 1554.0, 2031.0]}, "iteration_timing_avg": 0.14889185185185186} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1.json deleted file mode 100644 index 74da2480d5..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_func-train_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86174, 10.88685, 10.87663, 10.83061, 10.71359, 10.60783, 10.13039, 10.23076, 10.15871, 9.83396]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1747.0, 2204.0, 2061.0, 2108.0, 2163.0, 1914.0, 1682.0, 2267.0, 2474.0, 2569.0]}, "iteration_timing_avg": 0.20121235294117648} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..330e0b9c3b --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.79022, 10.84034, 10.85603, 10.8232, 10.83355, 10.78173, 10.59641, 10.58331, 10.49545, 10.22799, 10.1995, 9.94815, 9.94997, 9.91997, 9.79865, 9.25224, 9.61409, 9.19153, 9.46281, 9.62472]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2085.0, 2613.0, 2387.0, 2215.0, 2074.0, 2039.0, 2766.0, 2722.0, 2763.0, 2395.0, 2859.0, 3089.0, 3405.0, 2982.0, 3134.0, 2896.0, 3986.0]}, "iteration_timing_avg": 0.057955522388059705} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json new file mode 100644 index 0000000000..c7c5e0bab9 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.89293, 10.8995, 10.87875, 10.855, 10.73496, 10.63535, 10.1566, 10.24211, 10.15574, 9.82117]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1653.0, 1779.0, 1911.0, 1928.0, 1880.0, 1881.0, 1618.0, 1983.0, 2375.0, 2352.0]}, "iteration_timing_avg": 0.05425676470588235} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer_mcore-false_te-false.json new file mode 100644 index 0000000000..6db1c6fba9 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83801, 10.8696, 10.87494, 10.85972, 10.85916, 10.81678, 10.65633, 10.6236, 10.52854, 10.29768]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1227.0, 1343.0, 1547.0, 1357.0, 1571.0, 1230.0, 1219.0]}, "iteration_timing_avg": 0.038630588235294125} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json new file mode 100644 index 0000000000..a4f609529b --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85699, 10.89518, 10.87243, 10.82432, 10.68786, 10.58313, 10.08482, 10.18068, 10.10597, 9.75607]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1858.0, 1946.0, 2096.0, 1900.0, 2011.0, 1803.0, 1737.0, 2092.0, 2335.0, 2201.0]}, "iteration_timing_avg": 0.06518264705882353} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json new file mode 100644 index 0000000000..ac62b7581a --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85535, 10.89042, 10.88142, 10.82973, 10.70858, 10.61199, 10.1184, 10.22418, 10.13702, 9.80781]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1629.0, 1692.0, 1882.0, 1929.0, 1936.0, 1669.0, 1603.0, 1903.0, 2128.0, 2278.0]}, "iteration_timing_avg": 0.07373852941176468} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json new file mode 100644 index 0000000000..cfde369603 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85534, 10.88947, 10.8806, 10.8283, 10.70687, 10.60921, 10.11533, 10.22106, 10.13408, 9.80477]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1669.0, 1816.0, 1897.0, 1831.0, 1824.0, 1649.0, 1484.0, 1877.0, 2140.0, 2202.0]}, "iteration_timing_avg": 0.07589941176470587} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json new file mode 100644 index 0000000000..42d4cd72ba --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78152, 10.8477, 10.85991, 10.80229, 10.72398, 10.64556, 10.25979, 10.36953, 10.30726, 9.969]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2441.0, 2962.0, 2986.0, 2963.0, 2701.0, 2657.0, 2300.0, 2619.0, 2655.0, 2484.0]}, "iteration_timing_avg": 0.07880588235294116} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json new file mode 100644 index 0000000000..2800068b0b --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.91778, 10.93688, 10.92414, 10.85264, 10.74695, 10.66448, 10.16759, 10.27157, 10.17695, 9.86116]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22728092.0, 23020904.0, 22500632.0, 22830582.0, 22739828.0, 22547742.0, 22955712.0, 22588520.0, 22658932.0, 22885368.0]}, "iteration_timing_avg": 0.07554499999999999} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..d2758ca67b --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80629, 10.6169, 10.59573, 10.50423, 10.22237]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2381.0, 2498.0, 2552.0, 2166.0, 2258.0, 2542.0, 2425.0]}, "iteration_timing_avg": 0.07675470588235295} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..ad49a6aa83 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80628, 10.6169, 10.59573, 10.50423, 10.22238]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2348.0, 2524.0, 2517.0, 2205.0, 2198.0, 2558.0, 2398.0]}, "iteration_timing_avg": 0.07661735294117648} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..f2b584f1a7 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88918, 10.82635, 10.70816, 10.61006, 10.11963, 10.22999, 10.15774, 9.83337]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1846.0, 1868.0, 1856.0, 1652.0, 1638.0, 1903.0, 2315.0, 2381.0]}, "iteration_timing_avg": 0.07899852941176469} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json new file mode 100644 index 0000000000..8c98a7e5ab --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79006, 10.84111, 10.85509, 10.77861, 10.65335, 10.5612, 10.0453, 10.17548, 10.08263, 9.73342]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62799.0, 65700.0, 66095.0, 65614.0, 64292.0, 65219.0, 63857.0, 66058.0, 67089.0, 67822.0]}, "iteration_timing_avg": 0.30804088235294114} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..9f7df4510a --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85706, 10.8832, 10.88511, 10.87562, 10.8708, 10.83108, 10.65065, 10.63723, 10.53201, 10.25681]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2479.0, 2534.0, 2786.0, 2310.0, 2385.0, 2586.0, 2472.0]}, "iteration_timing_avg": 0.0920511764705882} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json new file mode 100644 index 0000000000..4b0cfd6b44 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85899, 10.88286, 10.87687, 10.82429, 10.69664, 10.60784, 10.11662, 10.2347, 10.14673, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1627.0, 1874.0, 1894.0, 1862.0, 1901.0, 1649.0, 1553.0, 1949.0, 2281.0, 2225.0]}, "iteration_timing_avg": 0.09437176470588234} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json new file mode 100644 index 0000000000..92e1f21efc --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86873, 10.891, 10.89716, 10.84022, 10.70435, 10.61599, 10.11661, 10.23183, 10.14875, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1619.0, 1839.0, 1712.0, 1853.0, 1810.0, 1682.0, 1567.0, 1997.0, 2186.0, 2376.0]}, "iteration_timing_avg": 0.0935938235294118} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..4d473a5e7e --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84879, 10.88953, 10.88082, 10.88243, 10.86947, 10.83539, 10.64785, 10.63863, 10.52242, 10.23812]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2301.0, 2328.0, 2414.0, 1973.0, 2168.0, 2471.0, 2419.0]}, "iteration_timing_avg": 0.120935} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..a042df661f --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83801, 10.8696, 10.87494, 10.85972, 10.85916, 10.81678, 10.65633, 10.6236, 10.52854, 10.29768]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1304.0, 1403.0, 1377.0, 1380.0, 1272.0, 1176.0, 1272.0]}, "iteration_timing_avg": 0.04439352941176471} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json new file mode 100644 index 0000000000..35f8847c88 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83801, 10.8696, 10.87494, 10.85972, 10.85916, 10.81678, 10.65633, 10.6236, 10.52854, 10.29768]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1227.0, 1343.0, 1547.0, 1357.0, 1571.0, 1230.0, 1219.0]}, "iteration_timing_avg": 0.03908823529411766} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..d1b26c3e5a --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79022, 10.84034, 10.85603, 10.82319, 10.83355, 10.78173, 10.59641, 10.58331, 10.49545, 10.22799]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2130.0, 2531.0, 2368.0, 2204.0, 2141.0, 2068.0, 2772.0]}, "iteration_timing_avg": 0.05724441176470588} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json new file mode 100644 index 0000000000..49c0ec8442 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85892, 10.88861, 10.86994, 10.82442, 10.69985, 10.60452, 10.11465, 10.21649, 10.13247, 9.80078]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1630.0, 1743.0, 1840.0, 1746.0, 1857.0, 1749.0, 1522.0, 1957.0, 2244.0, 2275.0]}, "iteration_timing_avg": 0.05806264705882354} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..33edc35038 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.76735, 10.82061, 10.85176, 10.80762, 10.80235, 10.75942, 10.55108, 10.55646, 10.48053, 10.18986]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2463.0, 2560.0, 2625.0, 2343.0, 2301.0, 2659.0, 2515.0]}, "iteration_timing_avg": 0.07604500000000002} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..9caed9a476 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80628, 10.6169, 10.59573, 10.50423, 10.22238]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2348.0, 2524.0, 2517.0, 2205.0, 2198.0, 2558.0, 2398.0]}, "iteration_timing_avg": 0.07640823529411767} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json new file mode 100644 index 0000000000..c9fed16590 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.76735, 10.82061, 10.85176, 10.80762, 10.80235, 10.75942, 10.55108, 10.55646, 10.48053, 10.18986]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2463.0, 2560.0, 2625.0, 2343.0, 2301.0, 2659.0, 2515.0]}, "iteration_timing_avg": 0.07574117647058824} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json new file mode 100644 index 0000000000..f78097878b --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85534, 10.88947, 10.8806, 10.8283, 10.70687, 10.60921, 10.11533, 10.22106, 10.13408, 9.80477]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1669.0, 1816.0, 1897.0, 1831.0, 1824.0, 1649.0, 1484.0, 1877.0, 2140.0, 2202.0]}, "iteration_timing_avg": 0.07627117647058825} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json new file mode 100644 index 0000000000..198829bc86 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78716, 10.84699, 10.85759, 10.78461, 10.67832, 10.57601, 10.12353, 10.23947, 10.14691, 9.8453]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2854.0, 3564.0, 3434.0, 3325.0, 3414.0, 3098.0, 2890.0, 3447.0, 3763.0, 3722.0]}, "iteration_timing_avg": 0.1694220588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json new file mode 100644 index 0000000000..e9f91c3218 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83396, 10.86879, 10.87134, 10.85907, 10.8533, 10.82064, 10.63379, 10.6223, 10.54684, 10.28702]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [8033.0, 8627.0, 7962.0, 8736.0, 9022.0, 8598.0, 9184.0]}, "iteration_timing_avg": 0.24976352941176466} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..66db39da61 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85706, 10.8832, 10.88511, 10.87562, 10.8708, 10.83108, 10.65065, 10.63723, 10.53201, 10.25681]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2479.0, 2534.0, 2786.0, 2310.0, 2385.0, 2586.0, 2472.0]}, "iteration_timing_avg": 0.08829235294117646} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json new file mode 100644 index 0000000000..8406f71c56 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82019, 10.86146, 10.84723, 10.80694, 10.71538, 10.62576, 10.19501, 10.29544, 10.20202, 9.89846]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [7232.0, 8819.0, 8924.0, 8402.0, 7411.0, 8004.0, 6922.0, 8255.0, 8761.0, 8825.0]}, "iteration_timing_avg": 0.18263705882352937} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..241acc5584 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84879, 10.88953, 10.88082, 10.88243, 10.86947, 10.8354, 10.64786, 10.63862, 10.52242, 10.23812]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2301.0, 2328.0, 2442.0, 1993.0, 2210.0, 2464.0, 2376.0]}, "iteration_timing_avg": 0.12472558823529412} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json new file mode 100644 index 0000000000..cf0bfe8b21 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84879, 10.88953, 10.88082, 10.88243, 10.86947, 10.8354, 10.64786, 10.63862, 10.52242, 10.23812]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2301.0, 2328.0, 2442.0, 1993.0, 2210.0, 2464.0, 2376.0]}, "iteration_timing_avg": 0.1177205882352941} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json new file mode 100644 index 0000000000..65ce4c00d4 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84429, 10.86285, 10.86177, 10.81154, 10.69313, 10.61794, 10.16497, 10.25034, 10.15227, 9.83669]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1932.0, 2302.0, 2138.0, 2132.0, 2358.0, 2122.0, 1902.0, 2296.0, 2565.0, 2589.0]}, "iteration_timing_avg": 0.13276323529411763} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json b/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json new file mode 100644 index 0000000000..8257f4c707 --- /dev/null +++ b/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.34848, 9.45337, 8.89369, 8.56467, 8.28131, 8.12832, 7.82238, 7.55462, 7.42172, 7.28716, 7.32811, 7.22045, 7.11648, 7.03859, 6.87728, 6.94356, 6.94705, 7.02828, 6.71597, 6.9486]}, "num-zeros": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [43307.0, 40999.0, 44043.0, 41749.0, 44811.0, 44001.0, 41304.0, 42490.0, 44698.0, 43956.0, 41137.0, 43230.0, 39726.0, 45427.0, 43358.0, 43930.0, 45426.0, 45701.0, 46301.0, 44734.0]}, "iteration_timing_avg": 0.1228444776119403} \ No newline at end of file diff --git a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh index 48dccc39d6..1b1920f7ac 100755 --- a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh +++ b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh @@ -13,6 +13,8 @@ do done echo "---------------------------------" +if [[ -z $VOCAB_FILE ]]; then VOCAB_FILE="/workspace/data/bert_data/vocab.txt" ; fi + GPUS_PER_NODE=8 # Change for multinode config MASTER_ADDR=localhost @@ -48,7 +50,7 @@ torchrun $DISTRIBUTED_ARGS \ --save $CHECKPOINT_PATH \ --load $CHECKPOINT_PATH \ --data-path $DATA_PATH \ - --vocab-file /workspace/data/bert_data/vocab.txt \ + --vocab-file $VOCAB_FILE \ --split 949,50,1 \ --distributed-backend nccl \ --lr 0.0001 \ @@ -61,6 +63,7 @@ torchrun $DISTRIBUTED_ARGS \ --tensor-model-parallel-size $TP_SIZE \ --pipeline-model-parallel-size $PP_SIZE \ --no-gradient-accumulation-fusion \ + ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ --fp16 echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt @@ -88,7 +91,7 @@ torchrun $DISTRIBUTED_ARGS \ --save $CHECKPOINT_PATH \ --load $CHECKPOINT_PATH \ --data-path $DATA_PATH \ - --vocab-file /workspace/data/bert_data/vocab.txt \ + --vocab-file $VOCAB_FILE \ --split 949,50,1 \ --distributed-backend nccl \ --lr 0.0001 \ @@ -101,4 +104,5 @@ torchrun $DISTRIBUTED_ARGS \ --tensor-model-parallel-size $TP_SIZE \ --pipeline-model-parallel-size $PP_SIZE \ --no-gradient-accumulation-fusion \ - --fp16 \ No newline at end of file + ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ + --fp16 diff --git a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh index 11f427276c..23508c3290 100755 --- a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh +++ b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh @@ -15,6 +15,7 @@ echo "---------------------------------" set -x if [[ -z $MBS ]]; then MBS=4; fi if [[ -z $GBS ]]; then GBS=128; fi +if [[ -z $VOCAB_FILE ]]; then VOCAB_FILE="/workspace/data/bert_data/vocab.txt" ; fi # Change for multinode config GPUS_PER_NODE=8 @@ -58,7 +59,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --save $CHECKPOINT_PATH \ --load $CHECKPOINT_PATH \ --data-path $DATA_PATH \ - --vocab-file /workspace/data/bert_data/vocab.txt \ + --vocab-file $VOCAB_FILE \ --split 949,50,1 \ --distributed-backend nccl \ --lr 0.0001 \ @@ -74,6 +75,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ ${USE_MCORE:+--use-mcore-models} \ ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS} \ --no-gradient-accumulation-fusion \ + ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ --${TRAINING_DTYPE}" if [[ "${TRAINING_DTYPE}" == "fp16" ]]; then diff --git a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh index c38cdf5b01..cb9ccf68f0 100755 --- a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh +++ b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh @@ -12,6 +12,9 @@ do done echo "---------------------------------" +if [[ -z $VOCAB_FILE ]]; then VOCAB_FILE="/workspace/data/gpt3_data/vocab.json" ; fi +if [[ -z $MERGE_FILE ]]; then MERGE_FILE="/workspace/data/gpt3_data/merges.txt" ; fi + GPUS_PER_NODE=8 # Change for multinode config MASTER_ADDR=localhost @@ -47,8 +50,8 @@ torchrun $DISTRIBUTED_ARGS \ --save $CHECKPOINT_PATH \ --load $CHECKPOINT_PATH \ --data-path $DATA_PATH \ - --vocab-file /workspace/data/gpt3_data/gpt2-vocab.json \ - --merge-file /workspace/data/gpt3_data/gpt2-merges.txt \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ --split 949,50,1 \ --distributed-backend nccl \ --lr 0.00015 \ @@ -66,6 +69,7 @@ torchrun $DISTRIBUTED_ARGS \ --no-gradient-accumulation-fusion \ --no-bias-swiglu-fusion \ --no-rope-fusion \ + ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ --fp16 echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt @@ -93,8 +97,8 @@ torchrun $DISTRIBUTED_ARGS \ --save $CHECKPOINT_PATH \ --load $CHECKPOINT_PATH \ --data-path $DATA_PATH \ - --vocab-file /workspace/data/gpt3_data/gpt2-vocab.json \ - --merge-file /workspace/data/gpt3_data/gpt2-merges.txt \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ --split 949,50,1 \ --distributed-backend nccl \ --lr 0.00015 \ @@ -110,5 +114,6 @@ torchrun $DISTRIBUTED_ARGS \ --tensor-model-parallel-size $TP_SIZE \ --pipeline-model-parallel-size $PP_SIZE \ --no-gradient-accumulation-fusion \ + ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ --fp16 diff --git a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh index fa4d62667a..dc5bdbab3b 100755 --- a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh +++ b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh @@ -15,6 +15,7 @@ echo "---------------------------------" set -x if [[ -z $MBS ]]; then MBS=4; fi if [[ -z $GBS ]]; then GBS=32; fi +if [[ -z $VOCAB_PATH ]]; then VOCAB_PATH="/workspace/data/t5_data/bert-large-cased-vocab.txt"; fi GPUS_PER_NODE=8 # Change for multinode config @@ -76,7 +77,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --global-batch-size ${GBS:-32} \ --lr 0.0001 \ --train-iters 100 \ - --lr-decay-iters $MAX_STEPS \ + --lr-decay-iters 100 \ --lr-decay-style linear \ --min-lr 0.00001 \ --weight-decay 1e-2 \ @@ -104,6 +105,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --eval-interval 1000 \ --eval-iters 10 \ --distributed-backend nccl \ + ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS}" command1="$command $torch_run_cmd" @@ -133,7 +135,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --global-batch-size ${GBS:-32} \ --lr 0.0001 \ --train-iters 100 \ - --lr-decay-iters $MAX_STEPS \ + --lr-decay-iters 100 \ --lr-decay-style linear \ --min-lr 0.00001 \ --weight-decay 1e-2 \ @@ -161,6 +163,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --eval-interval 1000 \ --eval-iters 10 \ --distributed-backend nccl \ + ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS}" command2="$command $torch_run_cmd" @@ -169,4 +172,4 @@ echo "$command2" echo "-----------------------------------------------------------------------------" echo "$command2" >> $SCRIPTS_DIR/pretrain_t5_distributed_command.sh -eval $command2 \ No newline at end of file +eval $command2 diff --git a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh index 90d78f4917..fae02fb755 100755 --- a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh +++ b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh @@ -15,6 +15,7 @@ echo "---------------------------------" set -x if [[ -z $MBS ]]; then MBS=4; fi if [[ -z $GBS ]]; then GBS=32; fi +if [[ -z $VOCAB_PATH ]]; then VOCAB_PATH="/workspace/data/t5_data/bert-large-cased-vocab.txt"; fi GPUS_PER_NODE=8 # Change for multinode config @@ -103,6 +104,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --eval-interval 1000 \ --eval-iters 10 \ --distributed-backend nccl \ + ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS}" command="$command $torch_run_cmd" @@ -111,4 +113,4 @@ echo "$command" echo "-----------------------------------------------------------------------------" echo "$command" > $SCRIPTS_DIR/pretrain_t5_distributed_command.sh -eval $command \ No newline at end of file +eval $command From 50f83844c198254aa084c7bd17f443ce897891cb Mon Sep 17 00:00:00 2001 From: shanmugamr Date: Thu, 1 Feb 2024 12:28:34 -0800 Subject: [PATCH 248/296] Fixing bugs in inference and adding mcore support --- examples/detxoify_lm/generate_samples_gpt.py | 57 ++++++++++++++++++-- megatron/model/transformer.py | 6 +-- 2 files changed, 55 insertions(+), 8 deletions(-) diff --git a/examples/detxoify_lm/generate_samples_gpt.py b/examples/detxoify_lm/generate_samples_gpt.py index 47e1590ea5..8c5b621510 100644 --- a/examples/detxoify_lm/generate_samples_gpt.py +++ b/examples/detxoify_lm/generate_samples_gpt.py @@ -18,14 +18,61 @@ from megatron.model import GPTModel from megatron.training import get_model from megatron.text_generation import generate_and_post_process +from megatron.arguments import core_transformer_config_from_args +from megatron.core.models.gpt import GPTModel +from typing import Union +import megatron.model +from megatron.core.transformer.spec_utils import import_module +from megatron.arguments import core_transformer_config_from_args +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec +def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megatron.model.GPTModel]: + """Builds the model. -def model_provider(pre_process=True, post_process=True): - """Build the model.""" + If you set the use_mcore_models to True, it will return the mcore GPT model and if not the legacy GPT model. + + Args: + pre_process (bool, optional): Set to true if you need to compute embedings. Defaults to True. + post_process (bool, optional): Set to true if you need to want to compute output logits/loss. Defaults to True. + + + Returns: + Union[GPTModel, megatron.model.GPTModel]: The returned model + """ + args = get_args() print_rank_0('building GPT model ...') - model = GPTModel(num_tokentypes=0, parallel_output=False, - pre_process=pre_process, post_process=post_process) + config = core_transformer_config_from_args(get_args()) + + if args.use_mcore_models: + if args.spec is not None: + transformer_layer_spec = import_module(args.spec) + else: + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec(args.num_experts, args.moe_grouped_gemm) + + model = GPTModel( + config=config, + transformer_layer_spec=transformer_layer_spec, + vocab_size=args.padded_vocab_size, + max_sequence_length=args.max_position_embeddings, + pre_process=pre_process, + post_process=post_process, + fp16_lm_cross_entropy=args.fp16_lm_cross_entropy, + parallel_output=True, + share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights, + position_embedding_type=args.position_embedding_type, + rotary_percent=args.rotary_percent + ) + else: + assert(args.context_parallel_size == 1), "Context parallelism is only supported with Megatron Core!" + + model = megatron.model.GPTModel( + config, + num_tokentypes=0, + parallel_output=True, + pre_process=pre_process, + post_process=post_process + ) return model @@ -103,7 +150,7 @@ def generate_samples_conditional(model): fname = open(args.sample_input_file, "r") lines = fname.readlines() - all_raw_text = [json.loads(line)['prompt']['text'] for line in lines] + all_raw_text = lines #[json.loads(line)['prompt']['text'] for line in lines] input_count = len(all_raw_text) input_pos = 0 diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py index 8a47171d38..c4a221fe9a 100644 --- a/megatron/model/transformer.py +++ b/megatron/model/transformer.py @@ -509,7 +509,7 @@ def __init__(self, config, layer_number, self.attn_mask_type = attn_mask_type self.params_dtype = config.params_dtype self.sequence_parallel = config.sequence_parallel - + self.config = config self.group_query_attention = args.group_query_attention self.num_query_groups = args.num_query_groups @@ -783,8 +783,8 @@ def forward(self, hidden_states, attention_mask, # apply relative positional encoding (rotary embedding) if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb - query_layer = apply_rotary_pos_emb(query_layer, q_pos_emb) - key_layer = apply_rotary_pos_emb(key_layer, k_pos_emb) + query_layer = apply_rotary_pos_emb(query_layer, q_pos_emb,self.config) + key_layer = apply_rotary_pos_emb(key_layer, k_pos_emb,self.config) # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect From 7329f7386ee7d3ac06c147cb4b94b705ac662aff Mon Sep 17 00:00:00 2001 From: shanmugamr Date: Thu, 1 Feb 2024 12:29:52 -0800 Subject: [PATCH 249/296] Fixing bugs in inference and adding mcore support --- examples/detxoify_lm/generate_samples_gpt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/detxoify_lm/generate_samples_gpt.py b/examples/detxoify_lm/generate_samples_gpt.py index 8c5b621510..f308c6e854 100644 --- a/examples/detxoify_lm/generate_samples_gpt.py +++ b/examples/detxoify_lm/generate_samples_gpt.py @@ -150,7 +150,7 @@ def generate_samples_conditional(model): fname = open(args.sample_input_file, "r") lines = fname.readlines() - all_raw_text = lines #[json.loads(line)['prompt']['text'] for line in lines] + all_raw_text = [json.loads(line)['prompt']['text'] for line in lines] input_count = len(all_raw_text) input_pos = 0 From 376337d41477f1f2c2787476062b4d48c813cd21 Mon Sep 17 00:00:00 2001 From: shanmugamr Date: Thu, 1 Feb 2024 12:30:24 -0800 Subject: [PATCH 250/296] Fixing bugs in inference and adding mcore support --- examples/detxoify_lm/generate_samples_gpt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/detxoify_lm/generate_samples_gpt.py b/examples/detxoify_lm/generate_samples_gpt.py index f308c6e854..cb5a731e11 100644 --- a/examples/detxoify_lm/generate_samples_gpt.py +++ b/examples/detxoify_lm/generate_samples_gpt.py @@ -42,7 +42,7 @@ def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megat args = get_args() print_rank_0('building GPT model ...') - config = core_transformer_config_from_args(get_args()) + config = core_transformer_config_from_args(args) if args.use_mcore_models: if args.spec is not None: From d91c5a60fd4ea12d41e98b3f4c75ce9e8210e242 Mon Sep 17 00:00:00 2001 From: shanmugamr Date: Thu, 1 Feb 2024 14:22:56 -0800 Subject: [PATCH 251/296] Fixing bugs in inference and adding mcore support --- examples/detxoify_lm/generate_samples_gpt.py | 25 ++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/examples/detxoify_lm/generate_samples_gpt.py b/examples/detxoify_lm/generate_samples_gpt.py index cb5a731e11..da12bbd7dc 100644 --- a/examples/detxoify_lm/generate_samples_gpt.py +++ b/examples/detxoify_lm/generate_samples_gpt.py @@ -24,7 +24,7 @@ import megatron.model from megatron.core.transformer.spec_utils import import_module from megatron.arguments import core_transformer_config_from_args -from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec, get_gpt_layer_local_spec def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megatron.model.GPTModel]: """Builds the model. @@ -45,10 +45,27 @@ def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megat config = core_transformer_config_from_args(args) if args.use_mcore_models: - if args.spec is not None: - transformer_layer_spec = import_module(args.spec) + + if args.spec is None: + if args.transformer_impl == 'local': + transformer_layer_spec = get_gpt_layer_local_spec( + num_experts=args.num_experts, + moe_grouped_gemm=args.moe_grouped_gemm + ) + elif args.transformer_impl == 'transformer_engine': + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec( + num_experts=args.num_experts, + moe_grouped_gemm=args.moe_grouped_gemm + ) + else: + raise ValueError(f"Invalid transformer_impl {args.transformer_impl}") + elif args.spec[0] == 'local': + transformer_layer_spec = get_gpt_layer_local_spec( + num_experts=args.num_experts, + moe_grouped_gemm=args.moe_grouped_gemm + ) else: - transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec(args.num_experts, args.moe_grouped_gemm) + transformer_layer_spec = import_module(args.spec) model = GPTModel( config=config, From 075d5b0216ef8b88ef46b3ee3b934e0b1f4dc168 Mon Sep 17 00:00:00 2001 From: Shiqing Fan Date: Thu, 1 Feb 2024 16:40:27 -0800 Subject: [PATCH 252/296] rename test_switch_mlp to test_sequential_mlp --- .../models/{test_switch_mlp.py => test_sequential_mlp.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/unit_tests/dist_checkpointing/models/{test_switch_mlp.py => test_sequential_mlp.py} (100%) diff --git a/tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py b/tests/unit_tests/dist_checkpointing/models/test_sequential_mlp.py similarity index 100% rename from tests/unit_tests/dist_checkpointing/models/test_switch_mlp.py rename to tests/unit_tests/dist_checkpointing/models/test_sequential_mlp.py From 680b67c881b7b14a7bda32228f739fc27e88b429 Mon Sep 17 00:00:00 2001 From: Aishwarya Bhandare Date: Thu, 1 Feb 2024 17:44:27 -0800 Subject: [PATCH 253/296] Move Megatron timer to core --- megatron/core/__init__.py | 2 + megatron/{ => core}/timers.py | 244 ++++++++++++++++++++++------------ megatron/global_vars.py | 2 +- 3 files changed, 165 insertions(+), 83 deletions(-) rename megatron/{ => core}/timers.py (55%) diff --git a/megatron/core/__init__.py b/megatron/core/__init__.py index 2858dc692d..b4165eb23d 100644 --- a/megatron/core/__init__.py +++ b/megatron/core/__init__.py @@ -4,6 +4,7 @@ from megatron.core.distributed import DistributedDataParallel from megatron.core.inference_params import InferenceParams from megatron.core.model_parallel_config import ModelParallelConfig +from megatron.core.timers import Timers # Alias parallel_state as mpu, its legacy name mpu = parallel_state @@ -15,4 +16,5 @@ "DistributedDataParallel", "InferenceParams", "ModelParallelConfig", + "Timers", ] diff --git a/megatron/timers.py b/megatron/core/timers.py similarity index 55% rename from megatron/timers.py rename to megatron/core/timers.py index e64d41e044..672a79f531 100644 --- a/megatron/timers.py +++ b/megatron/core/timers.py @@ -2,16 +2,14 @@ """Megatron timers.""" -from abc import ABC -from abc import abstractmethod import time +from abc import ABC, abstractmethod +from typing import List import torch - class TimerBase(ABC): - def __init__(self, name): self.name = name @@ -32,9 +30,7 @@ def elapsed(self, reset=True, barrier=False): pass - class DummyTimer(TimerBase): - def __init__(self): super().__init__('dummy timer') @@ -48,13 +44,13 @@ def reset(self): return def elapsed(self, reset=True, barrier=False): - raise Exception('dummy timer should not be used to ' - 'calculate elapsed time') - + raise Exception('dummy timer should not be used to calculate elapsed time') class Timer(TimerBase): """ + Timer class with ability to start/stop. + Comment on using `barrier`: If this flag is passed, then all the caller processes will wait till all reach the timing routine. It is up to the user to make sure all the ranks in `barrier_group` @@ -64,21 +60,32 @@ class Timer(TimerBase): """ def __init__(self, name): + """Initialize Timer. + + Args: + name (str): Name of the timer. + """ super().__init__(name) self._elapsed = 0.0 - self._active_time = 0.0 self._started = False # Note that None will default to the global process group self._barrier_group = None self._start_time = time.time() - def set_barrier_group(self, barrier_group): - self._barrier_group = barrier_group + """Sets barrier group. + Args: + barrier_group (ProcessGroup): Torch ProcessGroup for barrier. + """ + self._barrier_group = barrier_group def start(self, barrier=False): - """Start the timer.""" + """Start the timer. + + Args: + barrier (bool, optional): Synchronizes ranks before starting. Defaults to False. + """ assert not self._started, 'timer has already been started' if barrier: torch.distributed.barrier(group=self._barrier_group) @@ -86,28 +93,35 @@ def start(self, barrier=False): self._start_time = time.time() self._started = True - def stop(self, barrier=False): - """Stop the timer.""" + """Stop the timer. + + Args: + barrier (bool, optional): Synchronizes ranks before stopping. Defaults to False. + """ assert self._started, 'timer is not started' if barrier: torch.distributed.barrier(group=self._barrier_group) torch.cuda.synchronize() - elapsed = time.time() - self._start_time - self._elapsed += elapsed - self._active_time += elapsed + self._elapsed += time.time() - self._start_time self._started = False - def reset(self): - """Reset timer.""" - # Don't reset _active_time + """Reset timer. + """ self._elapsed = 0.0 self._started = False - def elapsed(self, reset=True, barrier=False): - """Calculate the elapsed time.""" + """Calculates the elapsed time and restarts timer. + + Args: + reset (bool, optional): Resets timer before restarting. Defaults to True. + barrier (bool, optional): Synchronizes ranks before stopping. Defaults to False. + + Returns: + float: Elapsed time. + """ _started = self._started # If the timing in progress, end it first. if self._started: @@ -122,40 +136,51 @@ def elapsed(self, reset=True, barrier=False): self.start(barrier=barrier) return _elapsed - def active_time(self): - return self._active_time - - class Timers: - """Group of timers.""" + """Class for a group of Timers. + """ def __init__(self, log_level, log_option): + """Initialize group of timers. + + Args: + log_level (int): Log level to control what timers are enabled. + log_option (str): Setting for logging statistics over ranks for all the timers. Allowed: ['max', 'minmax', 'all']. + """ self._log_level = log_level + allowed_log_options = set(['max', 'minmax', 'all']) + assert ( + log_option in allowed_log_options + ), 'input log option {} is invalid. It must be one of {}'.format( + log_option, allowed_log_options + ) self._log_option = log_option self._timers = {} self._log_levels = {} self._dummy_timer = DummyTimer() self._max_log_level = 2 - def __call__(self, name, log_level=None): + """Call timer with name and log level.""" # If the timer has already been set, then check if the log-level # is provided, it matches the one that the timer was created with. if name in self._timers: if log_level is not None: - assert log_level == self._log_levels[name], \ - 'input log level {} does not match already existing '\ - 'log level {} for {} timer'.format( - log_level, self._log_levels[name], name) + assert log_level == self._log_levels[name], ( + 'input log level {} does not match already existing ' + 'log level {} for {} timer'.format(log_level, self._log_levels[name], name) + ) return self._timers[name] # If timer does not exist and no log level is provided, # set it to the max log level which is 2. if log_level is None: log_level = self._max_log_level - assert log_level <= self._max_log_level, \ - 'log level {} is larger than max supported log level {}'.format( - log_level, self._max_log_level) + assert ( + log_level <= self._max_log_level + ), 'log level {} is larger than max supported log level {}'.format( + log_level, self._max_log_level + ) # Now if the input log level is larger than the one set for # the timers class, just ignore it and return a dummy timer. if log_level > self._log_level: @@ -165,18 +190,21 @@ def __call__(self, name, log_level=None): self._log_levels[name] = log_level return self._timers[name] - def _get_elapsed_time_all_ranks(self, names, reset, barrier): - """ + """Returns elapsed times of timers in names. Assumptions: - All the ranks call this function. - `names` are identical on all ranks. If the above assumptions are not met, calling this function will result in hang. - Arguments: - - names: list of timer names - - reset: reset the timer after recording the elapsed time - - barrier: if set, do a global barrier before time measurments + + Args: + names (List[str]): list of timer names + reset (bool): reset the timer after recording the elapsed time + barrier (bool): if set, do a global barrier before time measurments + + Returns: + torch.tensor: Tensor of size [world_size, len(names)] with times in float. """ # First make sure all the callers are in sync. @@ -191,30 +219,28 @@ def _get_elapsed_time_all_ranks(self, names, reset, barrier): # pytorch yet. It is simpler to deal with a single tensor # and since we are only gathering a small amount of data, # it should be ok to use all-gather instead of gather. - rank_name_to_time = torch.zeros((world_size, len(names)), - dtype=torch.float, - device=torch.cuda.current_device()) + rank_name_to_time = torch.zeros( + (world_size, len(names)), dtype=torch.float, device=torch.cuda.current_device() + ) for i, name in enumerate(names): if name in self._timers: # Here we don't need to pass the barrier flag as all # the processes are already in sync. This avoids the # issue of different timers having different barrier # groups inside their class. - rank_name_to_time[rank, i] = self._timers[name].elapsed( - reset=reset) + rank_name_to_time[rank, i] = self._timers[name].elapsed(reset=reset) # See the note above for why we are not using gather. - torch.distributed._all_gather_base(rank_name_to_time.view(-1), - rank_name_to_time[rank, :].view(-1)) + torch.distributed._all_gather_base( + rank_name_to_time.view(-1), rank_name_to_time[rank, :].view(-1) + ) return rank_name_to_time - def _get_global_min_max_time(self, names, reset, barrier, normalizer): """Report only min and max times across all ranks.""" - rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset, - barrier) + rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset, barrier) name_to_min_max_time = {} for i, name in enumerate(names): rank_to_time = rank_name_to_time[:, i] @@ -224,32 +250,32 @@ def _get_global_min_max_time(self, names, reset, barrier, normalizer): if rank_to_time.numel() > 0: name_to_min_max_time[name] = ( rank_to_time.min().item() / normalizer, - rank_to_time.max().item() / normalizer) + rank_to_time.max().item() / normalizer, + ) return name_to_min_max_time - - def _get_global_min_max_time_string(self, names, reset, barrier, - normalizer, max_only): - name_to_min_max_time = self._get_global_min_max_time( - names, reset, barrier, normalizer) + def _get_global_min_max_time_string(self, names, reset, barrier, normalizer, max_only): + """Report strings for max/minmax times across all ranks.""" + name_to_min_max_time = self._get_global_min_max_time(names, reset, barrier, normalizer) if not name_to_min_max_time: return None - output_string = '(min, max) time across ranks (ms):' + if max_only: + output_string = 'max time across ranks (ms):' + else: + output_string = '(min, max) time across ranks (ms):' for name in name_to_min_max_time: min_time, max_time = name_to_min_max_time[name] if max_only: - output_string += '\n {}: {:.2f}'.format( - (name+' ').ljust(48, '.'), max_time) + output_string += '\n {}: {:.2f}'.format((name + ' ').ljust(48, '.'), max_time) else: output_string += '\n {}: ({:.2f}, {:.2f})'.format( - (name+' ').ljust(48, '.'), min_time, max_time) + (name + ' ').ljust(48, '.'), min_time, max_time + ) return output_string - def _get_all_ranks_time_string(self, names, reset, barrier, normalizer): """Report times across all ranks.""" - rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset, - barrier) + rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset, barrier) output_string = 'times across ranks (ms):' no_reported_timing = True @@ -262,49 +288,103 @@ def _get_all_ranks_time_string(self, names, reset, barrier, normalizer): not_yet_found = False output_string += '\n {}:'.format(name) output_string += '\n rank {:2d}: {:.2f}'.format( - rank, rank_name_to_time[rank, i] / normalizer) + rank, rank_name_to_time[rank, i] / normalizer + ) if no_reported_timing: return None return output_string + def get_all_timers_string( + self, + names: List[str] = None, + normalizer: float = 1.0, + reset: bool = True, + barrier: bool = False, + ): + """Returns the output string with logged timer values according to configured options. + + Args: + names (List[str]): Names of the timers to log. If None, all registered timers are fetched. Defaults to None. + normalizer (float, optional): Normalizes the timer values by the factor. Defaults to 1.0. + reset (bool, optional): Whether to reset timer values after logging. Defaults to True. + barrier (bool, optional): Whether to do a global barrier before time measurments. Defaults to False. + + Raises: + Exception: Raises if log option is invalid. + + Returns: + str: Formatted string with the timer values. + """ - def log(self, names, rank=None, normalizer=1.0, reset=True, barrier=False): - """Log a group of timers.""" + if names == None: # get all registered timers + names = self._timers.keys() - # Print. assert normalizer > 0.0 if self._log_option in ['max', 'minmax']: max_only = False if self._log_option == 'max': max_only = True output_string = self._get_global_min_max_time_string( - names, reset, barrier, normalizer/1000.0, max_only) + names, reset, barrier, normalizer / 1000.0, max_only + ) elif self._log_option == 'all': - output_string = self._get_all_ranks_time_string(names, - reset, barrier, - normalizer/1000.0) + output_string = self._get_all_ranks_time_string( + names, reset, barrier, normalizer / 1000.0 + ) else: - raise Exception('unknown timing log option {}'.format( - self._log_option)) + raise Exception('unknown timing log option {}'.format(self._log_option)) + return output_string + def log( + self, + names: List[str], + rank: int = None, + normalizer: float = 1.0, + reset: bool = True, + barrier: bool = False, + ): + """logs the timers passed in names to stdout. Example usage is to log average per step value for timer 'foo', + this function can be called with normalizer factor set to logging interval. + + Args: + names (List[str]): Names of the timers to log. + rank (int, optional): logs the timers to a specific rank. If set to None, logs to the last rank. Defaults to None. + normalizer (float, optional): Normalizes the timer values by the factor. Defaults to 1.0. + reset (bool, optional): Whether to reset timer values after logging. Defaults to True. + barrier (bool, optional): Whether to do a global barrier before time measurments. Defaults to False. + """ + + output_string = self.get_all_timers_string(names, normalizer, reset, barrier) # If no input rank is provided, log on last rank. if rank is None: rank = torch.distributed.get_world_size() - 1 if rank == torch.distributed.get_rank() and output_string is not None: print(output_string, flush=True) - - def write(self, names, writer, iteration, normalizer=1.0, - reset=False, barrier=False): - """Write timers to a tensorboard writer - Note that we only report maximum time across ranks to tensorboard. + def write( + self, + names: List[str], + writer, + iteration: int, + normalizer: float = 1.0, + reset: bool = True, + barrier: bool = False, + ): + """Write timers to a tensorboard writer. Note that we only report maximum time across ranks to tensorboard. + + Args: + names (List[str]): Names of the timers to log. + writer (SummaryWriter): Tensorboard SummaryWriter object + iteration (int): Current iteration. + normalizer (float, optional): Normalizes the timer values by the factor. Defaults to 1.0. + reset (bool, optional): Whether to reset timer values after logging. Defaults to True. + barrier (bool, optional): Whether to do a global barrier before time measurments. Defaults to False. """ # currently when using add_scalars, # torch.utils.add_scalars makes each timer its own run, which # polutes the runs list, so we just add each as a scalar assert normalizer > 0.0 - name_to_min_max_time = self._get_global_min_max_time( - names, reset, barrier, normalizer) + name_to_min_max_time = self._get_global_min_max_time(names, reset, barrier, normalizer) if writer is not None: for name in name_to_min_max_time: _, max_time = name_to_min_max_time[name] diff --git a/megatron/global_vars.py b/megatron/global_vars.py index e1fd67faa6..45e7723860 100644 --- a/megatron/global_vars.py +++ b/megatron/global_vars.py @@ -7,9 +7,9 @@ import torch from megatron import dist_signal_handler +from megatron.core import Timers from megatron.tokenizer import build_tokenizer from .microbatches import build_num_microbatches_calculator -from .timers import Timers _GLOBAL_ARGS = None _GLOBAL_RETRO_ARGS = None From aa96ab735361de65ddf1e2050e3b1e969b6a33d1 Mon Sep 17 00:00:00 2001 From: Maanu Grover Date: Fri, 2 Feb 2024 23:38:41 -0800 Subject: [PATCH 254/296] JET fix: Migrate tests and run functional results always not on success --- .gitlab-ci.yml | 789 ------------------ jet-tests.yml | 8 +- .../functional_tests/jet_recipes/MR-bert.yaml | 7 +- .../functional_tests/jet_recipes/MR-gpt.yaml | 33 +- tests/functional_tests/jet_recipes/MR-t5.yaml | 3 +- .../jet_recipes/monthly-t5.yaml | 20 +- .../jet_recipes/nightly-bert.yaml | 8 +- .../jet_recipes/nightly-gpt.yaml | 20 +- .../python_test_utils/jet_test_pipeline.py | 33 +- ...eps-50_tp-1_pp-2_mcore-false_te-false.json | 2 +- ...0_tp-1_pp-4_mcore-false_te-false_vp-2.json | 2 +- ...2_args-local-spec_mcore-true_te-false.json | 1 + ...eps-50_tp-2_pp-2_mcore-false_te-false.json | 2 +- ...teps-50_tp-2_pp-2_mcore-true_te-false.json | 2 +- ...ps-100_tp-1_pp-2_mcore-false_te-false.json | 2 +- ...s-dist-optimizer_mcore-false_te-false.json | 1 + ...rm-full-recompute_mcore-true_te-false.json | 1 + ...s-rope-embeddings_mcore-true_te-false.json | 1 + ...sable-bias-linear_mcore-true_te-false.json | 1 + ...aram-gather_mcore-false_te-false_vp-1.json | 1 + ...grad-reduce_mcore-false_te-false_vp-1.json | 1 + ...sequence-parallel_mcore-true_te-false.json | 1 + ..._pp-4_args-swiglu_mcore-true_te-false.json | 1 + ...dings-and-outputs_mcore-true_te-false.json | 1 + ...0_tp-1_pp-4_mcore-false_te-false_vp-1.json | 2 +- ...50_tp-1_pp-4_mcore-true_te-false_vp-1.json | 2 +- ...allel-groupedgemm_mcore-true_te-false.json | 1 + ...rallel-top2router_mcore-true_te-false.json | 1 + ...8experts2parallel_mcore-true_te-false.json | 1 + ...eps-50_tp-2_pp-2_mcore-false_te-false.json | 2 +- ...teps-50_tp-2_pp-2_mcore-false_te-true.json | 2 +- ...teps-50_tp-2_pp-2_mcore-true_te-false.json | 2 +- ...uce-param-gather_mcore-false_te-false.json | 1 + ...rlap-grad-reduce_mcore-false_te-false.json | 1 + ...100_tp-1_pp-1_mcore-true_te-true_vp-1.json | 2 +- 35 files changed, 108 insertions(+), 850 deletions(-) create mode 100644 tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4983188e29..3f218047fd 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -116,674 +116,6 @@ formatting: allow_failure: false retry: 2 -train.te_gpt3.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 1 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: MR_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -train.gpt3_core.345m_tp4_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3_core.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - -train.gpt3_core.345m_tp1_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TIME_LIMIT: "10:00" - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3_core.345m_tp1_pp4_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3_core.345m_tp1_pp4_interleaved_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - -train.gpt3_core.345m_tp1_pp2_1node_50steps_rope: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: rope_embeddings - ADDITIONAL_PARAMS: "--position-embedding-type rope" - -train.gpt3_core.345m_tp1_pp4_1node_50steps_swiglu: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: swiglu - ADDITIONAL_PARAMS: "--swiglu" - -train.gpt3_core.345m_tp1_pp4_1node_50steps_disable_bias_linear: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: disable_bias_linear - ADDITIONAL_PARAMS: "--disable-bias-linear" - -train.gpt3_core.345m_tp1_pp4_1node_50steps_untie_embeddings_and_outputs: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: untie_embeddings_and_outputs - ADDITIONAL_PARAMS: "--untie-embeddings-and-output-weights" - -train.gpt3_core.345m_tp1_pp4_1node_50steps_sequence_parallel: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: sequence_parallel - ADDITIONAL_PARAMS: "--sequence-parallel" - -train.gpt3.345m_tp4_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - -train.gpt3.345m_tp1_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3.345m_tp1_pp4_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - -train.gpt3.345m_tp1_pp4_interleaved_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - -resume.checkpoint.gpt3.345m_tp1_pp2_1node: - <<: *selene-test-resume-checkpoint-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - TIME_LIMIT: "15:00" - TEST_LEVEL: MR_TESTS - -train.gpt3.345m_tp1_pp1_1node_50steps_dist_optimizer: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - METADATA: dist_optimizer - ADDITIONAL_PARAMS: "--use-distributed-optimizer" - -train.gpt3.345m_tp1_pp1_1node_50steps_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: overlap_grad_reduce - ADDITIONAL_PARAMS: "--overlap-grad-reduce" - -train.gpt3.345m_tp1_pp1_1node_50steps_dist_optimizer_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: dist_optimizer_overlap_grad_reduce - ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce" - -train.gpt3.345m_tp1_pp1_1node_50steps_dist_optimizer_overlap_grad_reduce_param_gather: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: dist_optimizer_overlap_grad_reduce_param_gather - ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather" - -train.gpt3.345m_tp4_pp1_1node_50steps_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: overlap_grad_reduce - ADDITIONAL_PARAMS: "--overlap-grad-reduce" - -train.gpt3.345m_tp4_pp1_1node_50steps_dist_optimizer_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - METADATA: dist_optimizer_overlap_grad_reduce - ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce" - -train.gpt3.345m_tp4_pp1_1node_50steps_dist_optimizer_overlap_grad_reduce_param_gather: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - METADATA: dist_optimizer_overlap_grad_reduce_param_gather - ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather" - -train.gpt3.345m_tp1_pp4_1node_50steps_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: overlap_grad_reduce - ADDITIONAL_PARAMS: "--overlap-grad-reduce" - -train.gpt3.345m_tp1_pp4_interleaved_1node_50steps_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: overlap_grad_reduce - ADDITIONAL_PARAMS: "--overlap-grad-reduce" - -train.gpt3.345m_tp1_pp4_interleaved_1node_50steps_dist_optimizer_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - METADATA: dist_optimizer_overlap_grad_reduce - ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce" - -train.gpt3.345m_tp1_pp4_interleaved_1node_50steps_dist_optimizer_overlap_grad_reduce_param_gather: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: MR_TESTS - METADATA: dist_optimizer_overlap_grad_reduce_param_gather - ADDITIONAL_PARAMS: "--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather" - -train.gpt3.345m_tp2_pp2_1node_50steps_overlap_grad_reduce: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: overlap_grad_reduce - ADDITIONAL_PARAMS: "--overlap-grad-reduce" - -train.gpt3_core.345m_cp2_tp2_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TIME_LIMIT: "20:00" - TEST_LEVEL: MR_TESTS - METADATA: "context_parallelism_cp2" - PYTORCH_IMAGE: "/lustre/fsw/adlr/adlr-nlp/adlr_ci/megatron/pytorch_23.10_flash_attn_1.0.9_context_parallelism.sqsh" - ADDITIONAL_PARAMS: "--context-parallel-size 2 --sequence-parallel --hidden-dropout 0.0 --attention-dropout 0.0" - -train.gpt3_core.345m_cp2_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TIME_LIMIT: "20:00" - TEST_LEVEL: MR_TESTS - METADATA: "context_parallelism_cp2" - PYTORCH_IMAGE: "/lustre/fsw/adlr/adlr-nlp/adlr_ci/megatron/pytorch_23.10_flash_attn_1.0.9_context_parallelism.sqsh" - ADDITIONAL_PARAMS: "--context-parallel-size 2 --sequence-parallel --hidden-dropout 0.0 --attention-dropout 0.0" - -# Note: Core MoE models currently will run TE by default -train.te_core_moe_gpt3.345m_tp2_pp2_2experts_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: "te_2experts" - ADDITIONAL_PARAMS: "--num-experts 2 --sequence-parallel --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" - -train.te_core_moe_gpt3.345m_tp2_pp2_4experts2parallel_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: "te_4experts2parallel" - ADDITIONAL_PARAMS: "--sequence-parallel --num-experts 4 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" - -train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - TEST_LEVEL: MR_TESTS - METADATA: "te_8experts2parallel" - ADDITIONAL_PARAMS: "--sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" - -train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_groupedGEMM_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - MOE_GROUPED_GEMM: 1 - TEST_LEVEL: MR_TESTS - METADATA: "te_8experts2parallel_groupedGEMM" - ADDITIONAL_PARAMS: "--moe-grouped-gemm --disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" - -train.te_core_moe_gpt3.345m_tp2_pp1_8experts2parallel_top2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 1 - MOE_GROUPED_GEMM: 1 - TEST_LEVEL: MR_TESTS - METADATA: "te_8experts2parallel_top2router" - ADDITIONAL_PARAMS: "--disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type aux_loss --moe-router-topk 2 --moe-aux-loss-coeff 1e-2" - -train.moe_gpt3.345m_tp2_pp2_4experts_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: gpt3 - USE_TE: 0 - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - USE_CORE: 0 - TEST_LEVEL: NIGHTLY_TESTS - METADATA: "4experts" - ADDITIONAL_PARAMS: "--sequence-parallel --num-experts 4 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1" - -train.bert.345m_tp4_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "10:00" - TEST_LEVEL: NIGHTLY_TESTS - -train.bert.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TEST_LEVEL: MR_TESTS - -train.bert.345m_tp1_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TEST_LEVEL: NIGHTLY_TESTS - -train.bert.345m_tp1_pp4_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 4 - NUM_NODES: 1 - MAX_STEPS: 50 - TEST_LEVEL: NIGHTLY_TESTS - -train.bert.345m_tp1_pp4_interleaved_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 2 - NUM_NODES: 1 - MAX_STEPS: 50 - TEST_LEVEL: MR_TESTS - -train.bert_core.345m_tp4_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 4 - PP_SIZE: 1 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: NIGHTLY_TESTS - -train.bert_core.345m_tp2_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: MR_TESTS - -train.bert_core.345m_tp2_pp2_1node_50steps_local_spec: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 2 - PP_SIZE: 2 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: MR_TESTS - METADATA: local_spec - ADDITIONAL_PARAMS: "--spec local" - -train.bert_core.345m_tp1_pp2_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: NIGHTLY_TESTS - -train.bert_core.345m_tp1_pp4_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 4 - VP_SIZE: 2 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: NIGHTLY_TESTS - train.bert_core.345m_tp1_pp2_1node_50steps_rope: <<: *selene-test-launcher variables: @@ -814,16 +146,6 @@ train.bert_core.345m_tp1_pp2_1node_50steps_sequence_parallel: METADATA: sequence_parallel ADDITIONAL_PARAMS: "--sequence-parallel" -resume.checkpoint.bert.345m_tp1_pp2_1node: - <<: *selene-test-resume-checkpoint-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - TEST_LEVEL: MR_TESTS - train.retro_core.tp1_pp1_1node_50steps: <<: *selene-test-launcher variables: @@ -838,117 +160,6 @@ train.retro_core.tp1_pp1_1node_50steps: TIME_LIMIT: "20:00" TEST_LEVEL: MONTHLY_TESTS -train.t5_core.220m_tp1_pp1_1node_100steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 0 - USE_CORE: 1 - TP_SIZE: 1 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 100 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -train.t5_core.220m_tp2_pp1_1node_100steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 0 - USE_CORE: 1 - TP_SIZE: 2 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 100 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -train.t5_core.220m_te_tp1_pp1_1node_100steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 1 - USE_CORE: 1 - TP_SIZE: 1 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 100 - TIME_LIMIT: "30:00" - TEST_LEVEL: MR_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -train.t5_core.220m_te_tp2_pp1_1node_100steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 1 - USE_CORE: 1 - TP_SIZE: 2 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 100 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -train.t5_core.220m_te_tp2_pp1_sp_1node_100steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 1 - USE_CORE: 1 - TP_SIZE: 2 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 100 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - ADDITIONAL_PARAMS: "--sequence-parallel" - -resume.checkpoint.t5_core.220m_tp1_pp1_1node: - <<: *selene-test-resume-checkpoint-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 0 - USE_CORE: 1 - TP_SIZE: 1 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - -resume.checkpoint.t5_core.220m_te_tp1_pp1_1node: - <<: *selene-test-resume-checkpoint-launcher - variables: - <<: [*VARS] - RUN_MODEL: t5 - USE_TE: 1 - USE_CORE: 1 - TP_SIZE: 1 - PP_SIZE: 1 - VP_SIZE: 1 - NUM_NODES: 1 - TIME_LIMIT: "30:00" - TEST_LEVEL: MONTHLY_TESTS - PYTORCH_IMAGE: nvcr.io/nvidia/pytorch:23.07-py3 - cleanup.selene: tags: - ssh_selene_runner diff --git a/jet-tests.yml b/jet-tests.yml index ae77f14b4a..45085451eb 100644 --- a/jet-tests.yml +++ b/jet-tests.yml @@ -62,7 +62,7 @@ jet-trigger: jet-functional-results: - extends: .jet_common + stage: jet tags: - docker_local_runner image: gitlab-master.nvidia.com:5005/dl/jet/api:latest @@ -72,6 +72,12 @@ jet-functional-results: script: - python -m pip install -U --no-cache-dir prettytable - python tests/functional_tests/python_test_utils/jet_test_pipeline.py ${CI_PIPELINE_ID} --test exit + rules: + - if: $CI_PIPELINE_SOURCE == 'merge_request_event' && ( $CI_MERGE_REQUEST_APPROVED || $CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" ) + when: always + - if: $JET_CUSTOM_FILTER != "" && $CI_PIPELINE_SOURCE != 'merge_request_event' && $CI_PIPELINE_SOURCE != 'schedule' + when: always + - when: never jet-compare-metrics: extends: .jet_common diff --git a/tests/functional_tests/jet_recipes/MR-bert.yaml b/tests/functional_tests/jet_recipes/MR-bert.yaml index 4c9a6cbfaf..edfe09371b 100644 --- a/tests/functional_tests/jet_recipes/MR-bert.yaml +++ b/tests/functional_tests/jet_recipes/MR-bert.yaml @@ -15,6 +15,7 @@ spec: use_mcore: True vp_size: null extra_args: null + args_meta: null micro_batch_size: 4 # MBS batch_size: 128 # GBS, JET schema requires 'batch_size' precision: bf16 @@ -44,6 +45,7 @@ spec: products: # MCore - {tp_size: [2], pp_size: [2]} + - {tp_size: [2], pp_size: [2], extra_args: ['"--spec local"'], args_meta: ["local_spec"]} # Non-MCore - {use_mcore: [False], tp_size: [2], pp_size: [2]} - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [2]} @@ -51,7 +53,7 @@ key_segments: vp_size: vp use_mcore: mcore use_te: te - extra_args: args + args_meta: args --- @@ -73,6 +75,7 @@ spec: use_mcore: True vp_size: null extra_args: null + args_meta: null micro_batch_size: 4 # MBS batch_size: 128 # GBS, JET schema requires 'batch_size' precision: bf16 @@ -105,4 +108,4 @@ key_segments: vp_size: vp use_mcore: mcore use_te: te - extra_args: args + args_meta: args diff --git a/tests/functional_tests/jet_recipes/MR-gpt.yaml b/tests/functional_tests/jet_recipes/MR-gpt.yaml index e0d5b982f8..2f615240e0 100644 --- a/tests/functional_tests/jet_recipes/MR-gpt.yaml +++ b/tests/functional_tests/jet_recipes/MR-gpt.yaml @@ -15,8 +15,10 @@ spec: use_mcore: True vp_size: null extra_args: null + args_meta: null micro_batch_size: 4 # MBS batch_size: 32 # GBS, JET schema requires 'batch_size' + moe_grouped_gemm: 0 precision: bf16 time_limit: 1200 artifacts: {/workspace/data/gpt3_data: text/the_pile/shard00} @@ -40,6 +42,7 @@ spec: VP_SIZE={vp_size if vp_size is not None else '""'} \ MBS={micro_batch_size} \ GBS={batch_size} \ + MOE_GROUPED_GEMM={moe_grouped_gemm} \ ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ tee {assets_dir}/results.json @@ -47,24 +50,29 @@ products: # MCore - {tp_size: [2], pp_size: [2]} - {tp_size: [1], pp_size: [4], vp_size: [1]} - - {tp_size: [1], pp_size: [2], extra_args: ['"--position-embedding-type rope"']} - - tp_size: [1] - pp_size: [4] - extra_args: ["--swiglu", "--disable-bias-linear", "--untie-embeddings-and-output-weights", "--sequence-parallel"] - - {tp_size: [1], pp_size: [1], extra_args: ['"--recompute-granularity full --recompute-method uniform --recompute-num-layers 1"']} - # - {tp_size: [2], pp_size: [1], extra_args: ['"--context-parallel-size 2 --sequence-parallel --hidden-dropout 0.0 --attention-dropout 0.0"']} # TODO: need updated container with TE > 1.0.0 - - {tp_size: [2], pp_size: [1], extra_args: ['"--sequence-parallel --num-experts 8 --expert-model-parallel-size 2"']} + - {tp_size: [1], pp_size: [2], extra_args: ['"--position-embedding-type rope"'], args_meta: ["rope_embeddings"]} + - {tp_size: [1], pp_size: [4], extra_args: ["--swiglu"], args_meta: ["swiglu"]} + - {tp_size: [1], pp_size: [4], extra_args: ["--disable-bias-linear"], args_meta: ["disable_bias_linear"]} + - {tp_size: [1], pp_size: [4], extra_args: ["--untie-embeddings-and-output-weights"], args_meta: ["untie_embeddings_and_outputs"]} + - {tp_size: [1], pp_size: [4], extra_args: ["--sequence-parallel"], args_meta: ["sequence_parallel"]} + - {tp_size: [1], pp_size: [1], extra_args: ['"--recompute-granularity full --recompute-method uniform --recompute-num-layers 1"'], args_meta: ["uniform_full_recompute"]} + # - {tp_size: [2], pp_size: [1,2], extra_args: ['"--context-parallel-size 2 --sequence-parallel --hidden-dropout 0.0 --attention-dropout 0.0"']} # TODO: need updated container with TE > 1.0.0 + - {tp_size: [2], pp_size: [1], extra_args: ['"--sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_8experts2parallel"]} + - {tp_size: [2], pp_size: [1], extra_args: ['"--moe-grouped-gemm --disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], moe_grouped_gemm: [1], args_meta: ["te_8experts2parallel_groupedGEMM"]} + - {tp_size: [2], pp_size: [1], extra_args: ['"--disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type aux_loss --moe-router-topk 2 --moe-aux-loss-coeff 1e-2"'], moe_grouped_gemm: [1], args_meta: ["te_8experts2parallel_top2router"]} # Non-MCore - {use_mcore: [False], use_te: [False, True], tp_size: [2], pp_size: [2]} - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [1]} - - {use_mcore: [False], tp_size: [1], pp_size: [1], extra_args: ["--use-distributed-optimizer"]} - - {use_mcore: [False], tp_size: [4], pp_size: [1], extra_args: ["--use-distributed-optimizer --overlap-grad-reduce"]} - - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ["--use-distributed-optimizer --overlap-grad-reduce"]} + - {use_mcore: [False], tp_size: [1], pp_size: [1], extra_args: ["--use-distributed-optimizer"], args_meta: ["dist_optimizer"]} + - {use_mcore: [False], tp_size: [4], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"'], args_meta: ["dist_optimizer_overlap_grad_reduce"]} + - {use_mcore: [False], tp_size: [4], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather"'], args_meta: ["dist_optimizer_overlap_grad_reduce_param_gather"]} + - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"'], args_meta: ["dist_optimizer_overlap_grad_reduce"]} + - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather"'], args_meta: ["dist_optimizer_overlap_grad_reduce_param_gather"]} key_segments: vp_size: vp use_mcore: mcore use_te: te - extra_args: args + args_meta: args --- @@ -86,6 +94,7 @@ spec: use_mcore: True vp_size: null extra_args: null + args_meta: null micro_batch_size: 4 # MBS batch_size: 32 # GBS, JET schema requires 'batch_size' precision: 16 @@ -119,4 +128,4 @@ key_segments: vp_size: vp use_mcore: mcore use_te: te - extra_args: args + args_meta: args diff --git a/tests/functional_tests/jet_recipes/MR-t5.yaml b/tests/functional_tests/jet_recipes/MR-t5.yaml index a7895effa3..9d8490b130 100644 --- a/tests/functional_tests/jet_recipes/MR-t5.yaml +++ b/tests/functional_tests/jet_recipes/MR-t5.yaml @@ -15,6 +15,7 @@ spec: use_mcore: True vp_size: null extra_args: null + args_meta: null micro_batch_size: 4 # MBS batch_size: 32 # GBS, JET schema requires 'batch_size' precision: bf16 @@ -47,4 +48,4 @@ key_segments: vp_size: vp use_mcore: mcore use_te: te - extra_args: args + args_meta: args diff --git a/tests/functional_tests/jet_recipes/monthly-t5.yaml b/tests/functional_tests/jet_recipes/monthly-t5.yaml index 65269b7006..6eb3490fe8 100644 --- a/tests/functional_tests/jet_recipes/monthly-t5.yaml +++ b/tests/functional_tests/jet_recipes/monthly-t5.yaml @@ -15,6 +15,7 @@ spec: use_mcore: True vp_size: 1 extra_args: null + args_meta: null micro_batch_size: 4 # MBS batch_size: 32 # GBS, JET schema requires 'batch_size' precision: bf16 @@ -42,16 +43,14 @@ spec: python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ tee {assets_dir}/results.json products: - - { tp_size: [1,2], pp_size: [1] } - - use_te: [True] - tp_size: [2] - pp_size: [1] - extra_args: [null, "--sequence-parallel"] + - { tp_size: [1,2], pp_size: [1], vp_size: [1] } + - {use_te: [True], tp_size: [2], pp_size: [1], vp_size: [1]} + - {use_te: [True], tp_size: [2], pp_size: [1], vp_size: [1], extra_args: ["--sequence-parallel"], args_meta: ["sequence_parallel"]} key_segments: - # vp_size: vp + vp_size: vp use_mcore: mcore use_te: te - extra_args: args + args_meta: args --- @@ -73,6 +72,7 @@ spec: use_mcore: True vp_size: 1 extra_args: null + args_meta: null micro_batch_size: 4 # MBS batch_size: 32 # GBS, JET schema requires 'batch_size' precision: bf16 @@ -100,9 +100,9 @@ spec: python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ tee {assets_dir}/results.json products: - - {use_te: [False, True], tp_size: [1], pp_size: [1]} + - {use_te: [False, True], tp_size: [1], pp_size: [1], vp_size: [1]} key_segments: - # vp_size: vp + vp_size: vp use_mcore: mcore use_te: te - extra_args: args + args_meta: args diff --git a/tests/functional_tests/jet_recipes/nightly-bert.yaml b/tests/functional_tests/jet_recipes/nightly-bert.yaml index 2569833aaf..6641d7926a 100644 --- a/tests/functional_tests/jet_recipes/nightly-bert.yaml +++ b/tests/functional_tests/jet_recipes/nightly-bert.yaml @@ -15,6 +15,7 @@ spec: use_mcore: True vp_size: null extra_args: null + args_meta: null micro_batch_size: 4 # MBS batch_size: 128 # GBS, JET schema requires 'batch_size' precision: bf16 @@ -42,10 +43,11 @@ spec: python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ tee {assets_dir}/results.json products: + - {tp_size: [1], pp_size: [4], vp_size: [2]} - {use_mcore: [True, False], tp_size: [4], pp_size: [1]} - - {use_mcore: [True, False], tp_size: [1], pp_size: [2,4]} + - {use_mcore: [True, False], tp_size: [1], pp_size: [2]} key_segments: - # vp_size: vp + vp_size: vp use_mcore: mcore use_te: te - extra_args: args + args_meta: args diff --git a/tests/functional_tests/jet_recipes/nightly-gpt.yaml b/tests/functional_tests/jet_recipes/nightly-gpt.yaml index 5cc8c6444f..b00de0da54 100644 --- a/tests/functional_tests/jet_recipes/nightly-gpt.yaml +++ b/tests/functional_tests/jet_recipes/nightly-gpt.yaml @@ -15,8 +15,10 @@ spec: use_mcore: True vp_size: null extra_args: null + args_meta: null micro_batch_size: 4 # MBS batch_size: 32 # GBS, JET schema requires 'batch_size' + moe_grouped_gemm: 0 precision: bf16 time_limit: 1200 artifacts: {/workspace/data/gpt3_data: text/the_pile/shard00} @@ -40,22 +42,24 @@ spec: VP_SIZE={vp_size if vp_size is not None else '""'} \ MBS={micro_batch_size} \ GBS={batch_size} \ + MOE_GROUPED_GEMM={moe_grouped_gemm} \ ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ tee {assets_dir}/results.json products: - {use_mcore: [True, False], tp_size: [4], pp_size: [1]} - {use_mcore: [True, False], tp_size: [1], pp_size: [2,4]} - - tp_size: [2] - pp_size: [2] - extra_args: ['"--num-experts 2"', '"--sequence-parallel --num-experts 4 --expert-model-parallel-size 2"'] + - {tp_size: [2], pp_size: [2], extra_args: ['"--num-experts 2 --sequence-parallel --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_2experts"]} + - {tp_size: [2], pp_size: [2], extra_args: ['"--sequence-parallel --num-experts 4 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_4experts2parallel"]} # Non-MCore - - {use_mcore: [False], tp_size: [1,4], pp_size: [1], extra_args: ["--overlap-grad-reduce"]} - - {use_mcore: [False], tp_size: [1], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"']} - - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [null, 1], extra_args: ["--overlap-grad-reduce"]} - - {use_mcore: [False], tp_size: [2], pp_size: [2], extra_args: ["--overlap-grad-reduce", '"--num-experts 4"']} + - {use_mcore: [False], tp_size: [1,4], pp_size: [1], extra_args: ["--overlap-grad-reduce"], args_meta: ["overlap_grad_reduce"]} + - {use_mcore: [False], tp_size: [1], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"'], args_meta: ["dist_optimizer_overlap_grad_reduce"]} + - {use_mcore: [False], tp_size: [1], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather"'], args_meta: ["dist_optimizer_overlap_grad_reduce_param_gather"]} + - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [null, 1], extra_args: ["--overlap-grad-reduce"], args_meta: ["overlap_grad_reduce"]} + - {use_mcore: [False], tp_size: [2], pp_size: [2], extra_args: ["--overlap-grad-reduce"], args_meta: ["overlap_grad_reduce"]} + - {use_mcore: [False], tp_size: [2], pp_size: [2], extra_args: ['"--sequence-parallel --num-experts 4 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["4experts"]} key_segments: vp_size: vp use_mcore: mcore use_te: te - extra_args: args + args_meta: args diff --git a/tests/functional_tests/python_test_utils/jet_test_pipeline.py b/tests/functional_tests/python_test_utils/jet_test_pipeline.py index 6ab4ac5666..9b20fd59bc 100644 --- a/tests/functional_tests/python_test_utils/jet_test_pipeline.py +++ b/tests/functional_tests/python_test_utils/jet_test_pipeline.py @@ -5,10 +5,13 @@ from jet.logs.queries import JETLogsQuery, Field -def select_asset(assets, prefix): - for asset in assets: - if asset['s_name'].startswith(prefix): - return asset['s_url'] +def select_asset(result_obj, prefix): + if result_obj['obj_ci']['s_job_status'] != "skipped": + assets = result_obj['nested_assets'] + for asset in assets: + if asset['s_name'].startswith(prefix): + return asset['s_url'] + return 'not found' def query_results(triggering_pipeline_id): @@ -17,7 +20,7 @@ def query_results(triggering_pipeline_id): JETLogsQuery() .filter(Field('obj_ci.obj_upstream.l_pipeline_id') == triggering_pipeline_id) .filter(Field('obj_workload.s_type') == 'recipe') - .select('l_exit_code', 'nested_assets', 'obj_workload.s_key', 'obj_workload.obj_spec', 'ts_created') + .select('l_exit_code', 'nested_assets', 'obj_workload.s_key', 'obj_workload.obj_spec', 'obj_ci', 'ts_created') .orderby('ts_created') # increasing (least recent in case of timestamp) ) return service.query(query, flatten=False) @@ -26,25 +29,29 @@ def query_results(triggering_pipeline_id): def check_exitcodes(results): from prettytable import PrettyTable + all_keys = [] exit_codes = {} log_urls = {} names = {} for result in results: key = result['obj_workload']['s_key'] + all_keys.append(key) - exit_codes[key] = result['l_exit_code'] - log_urls[key] = select_asset(result['nested_assets'], 'output_script-0.log') + exit_codes[key] = result.get('l_exit_code', -1) + log_urls[key] = select_asset(result, 'output_script-0.log') name = result['obj_workload']['s_key'].lstrip('recipe/') remove_substr = result['obj_workload']['obj_spec']['s_build'] + \ '_' + result['obj_workload']['obj_spec']['s_scope'] names[key] = ''.join(name.split(remove_substr)) table = PrettyTable() - table.add_column("Job Key", list(names.values())) - table.add_column("Exit Code", list(exit_codes.values())) - table.add_column("Log URL", list(log_urls.values())) + table.add_column("Job Key", [names[k] for k in all_keys]) + table.add_column("Exit Code", [exit_codes[k] for k in all_keys]) + table.add_column("Log URL", [log_urls[k] for k in all_keys]) exit_codes_good = [ec == 0 for ec in exit_codes.values()] - if not all(exit_codes_good): + if exit_codes_good == []: + raise Exception("Can't find any jobs, something went wrong.\n" + table.get_string()) + if exit_codes_good == [] or not all(exit_codes_good): raise Exception("Some jobs failed to complete successfully\n" + table.get_string()) else: print(table) @@ -72,7 +79,7 @@ def check_baselines(results): with TemporaryDirectory() as tmpdir: # Download TB event logs for result in results: - event_log_url = select_asset(result['nested_assets'], 'events.out.tfevents') + event_log_url = select_asset(result, 'events.out.tfevents') target_dir = result['obj_workload']['s_key'].lstrip('recipe/') target_dir = os.path.join(tmpdir, target_dir) _download_log(event_log_url, target_dir) @@ -86,7 +93,7 @@ def check_baselines(results): def fetch_metrics_files(results, save_dir): for result in results: - metrics_url = select_asset(result['nested_assets'], 'results.json') + metrics_url = select_asset(result, 'results.json') if metrics_url is not None: cfg = result['obj_workload']['s_key'].lstrip('recipe/') target_dir = os.path.join(save_dir, cfg) diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json index f38be476c4..9ee243fd58 100644 --- a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.51553, 10.51031, 10.52063, 10.52246, 10.51819, 10.50918, 10.43691, 10.29866, 10.16894, 9.98642, 9.91462, 9.78574, 9.67453, 9.55759, 9.50386, 9.35031, 9.34045, 9.27913, 9.27768, 9.20723]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [21436.0, 21632.0, 23818.0, 19149.0, 23732.0, 18947.0, 19899.0, 26923.0, 24942.0, 25962.0, 15012.0, 34688.0, 26498.0, 21937.0, 37472.0, 28599.0, 23063.0]}, "iteration_timing_avg": 0.25193253731343285} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.51553, 10.51031, 10.52063, 10.52246, 10.51819, 10.50918, 10.43691, 10.29866, 10.16894, 9.98642, 9.91462, 9.78574, 9.67453, 9.55759, 9.50386, 9.35031, 9.34045, 9.27913, 9.27768, 9.20723]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [21436.0, 21632.0, 23818.0, 19149.0, 23732.0, 18947.0, 19899.0, 26923.0, 24942.0, 25962.0, 15012.0, 34688.0, 26498.0, 21937.0, 37472.0, 28599.0, 23063.0]}, "iteration_timing_avg": 0.24888507462686574} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json index 941af1117d..a8886517f5 100644 --- a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.42108, 10.43552, 10.43934, 10.43349, 10.42826, 10.42499, 10.37549, 10.2337, 10.1091, 9.93972]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19496.0, 22201.0, 23780.0, 21779.0, 22701.0, 20018.0, 22409.0]}, "iteration_timing_avg": 0.6054652941176473} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.42108, 10.43552, 10.43934, 10.43349, 10.42826, 10.42499, 10.37549, 10.2337, 10.1091, 9.93972]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19496.0, 22201.0, 23780.0, 21779.0, 22701.0, 20018.0, 22409.0]}, "iteration_timing_avg": 0.5799538235294118} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json new file mode 100644 index 0000000000..163496d61e --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.47903, 10.47213, 10.46828, 10.4513, 10.4294, 10.35818, 10.16921, 10.09081, 9.918, 9.74324]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2380.0, 1691.0, 2420.0, 2698.0, 2183.0, 2873.0, 2112.0, 3007.0, 1784.0, 2883.0]}, "iteration_timing_avg": 0.48770147058823515} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json index 681919dd63..e3733adeb7 100644 --- a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.46209, 10.46586, 10.47036, 10.48285, 10.46953, 10.4551, 10.4144, 10.27757, 10.15408, 9.98652]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19468.0, 20366.0, 23078.0, 23209.0, 20501.0, 21956.0, 23051.0]}, "iteration_timing_avg": 0.48852117647058824} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.46209, 10.46586, 10.47036, 10.48285, 10.46953, 10.4551, 10.4144, 10.27757, 10.15408, 9.98652]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19468.0, 20366.0, 23078.0, 23209.0, 20501.0, 21956.0, 23051.0]}, "iteration_timing_avg": 0.47122588235294105} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json index 5022434376..2936e747d2 100644 --- a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.4791, 10.47202, 10.4682, 10.45128, 10.42934, 10.35805, 10.16903, 10.0907, 9.91791, 9.7432]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2250.0, 1699.0, 2376.0, 2808.0, 2117.0, 2783.0, 2170.0, 2896.0, 1835.0, 2867.0]}, "iteration_timing_avg": 0.63432} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.4791, 10.47202, 10.4682, 10.45128, 10.42934, 10.35805, 10.16903, 10.0907, 9.91791, 9.7432]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2250.0, 1699.0, 2376.0, 2808.0, 2117.0, 2783.0, 2170.0, 2896.0, 1835.0, 2867.0]}, "iteration_timing_avg": 0.6237708823529412} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json index 330e0b9c3b..5d41fc6f1c 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.79022, 10.84034, 10.85603, 10.8232, 10.83355, 10.78173, 10.59641, 10.58331, 10.49545, 10.22799, 10.1995, 9.94815, 9.94997, 9.91997, 9.79865, 9.25224, 9.61409, 9.19153, 9.46281, 9.62472]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2085.0, 2613.0, 2387.0, 2215.0, 2074.0, 2039.0, 2766.0, 2722.0, 2763.0, 2395.0, 2859.0, 3089.0, 3405.0, 2982.0, 3134.0, 2896.0, 3986.0]}, "iteration_timing_avg": 0.057955522388059705} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.79022, 10.84034, 10.85603, 10.8232, 10.83355, 10.78173, 10.59641, 10.58331, 10.49545, 10.22799, 10.1995, 9.94815, 9.94997, 9.91997, 9.79865, 9.25224, 9.61409, 9.19153, 9.46281, 9.62472]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2085.0, 2613.0, 2387.0, 2215.0, 2074.0, 2039.0, 2766.0, 2722.0, 2763.0, 2395.0, 2859.0, 3089.0, 3405.0, 2982.0, 3134.0, 2896.0, 3986.0]}, "iteration_timing_avg": 0.06181014925373134} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json new file mode 100644 index 0000000000..2b13d0e4e2 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83801, 10.8696, 10.87494, 10.85972, 10.85916, 10.81678, 10.65633, 10.6236, 10.52854, 10.29768]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1227.0, 1343.0, 1547.0, 1357.0, 1571.0, 1230.0, 1219.0]}, "iteration_timing_avg": 0.04080235294117647} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json new file mode 100644 index 0000000000..b68287b6eb --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.89293, 10.8995, 10.87875, 10.855, 10.73496, 10.63535, 10.1566, 10.24211, 10.15574, 9.82117]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1653.0, 1779.0, 1911.0, 1928.0, 1880.0, 1881.0, 1618.0, 1983.0, 2375.0, 2352.0]}, "iteration_timing_avg": 0.06516882352941178} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json new file mode 100644 index 0000000000..2dcc249220 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85699, 10.89518, 10.87243, 10.82432, 10.68786, 10.58313, 10.08482, 10.18068, 10.10597, 9.75607]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1858.0, 1946.0, 2096.0, 1900.0, 2011.0, 1803.0, 1737.0, 2092.0, 2335.0, 2201.0]}, "iteration_timing_avg": 0.07560441176470588} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json new file mode 100644 index 0000000000..018a6ecd39 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85535, 10.89042, 10.88142, 10.82973, 10.70858, 10.61199, 10.1184, 10.22418, 10.13702, 9.80781]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1629.0, 1692.0, 1882.0, 1929.0, 1936.0, 1669.0, 1603.0, 1903.0, 2128.0, 2278.0]}, "iteration_timing_avg": 0.0864920588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..7dd1291c75 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80629, 10.6169, 10.59573, 10.50423, 10.22237]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2381.0, 2498.0, 2552.0, 2166.0, 2258.0, 2542.0, 2425.0]}, "iteration_timing_avg": 0.08087911764705882} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..a2df49d42a --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80629, 10.6169, 10.59573, 10.50423, 10.22237]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2381.0, 2498.0, 2552.0, 2166.0, 2258.0, 2542.0, 2425.0]}, "iteration_timing_avg": 0.07611323529411766} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json new file mode 100644 index 0000000000..e4c1262364 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85534, 10.88947, 10.8806, 10.8283, 10.70687, 10.60921, 10.11533, 10.22106, 10.13408, 9.80477]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1669.0, 1816.0, 1897.0, 1831.0, 1824.0, 1649.0, 1484.0, 1877.0, 2140.0, 2202.0]}, "iteration_timing_avg": 0.0912420588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json new file mode 100644 index 0000000000..6775db704b --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78152, 10.8477, 10.85991, 10.80229, 10.72398, 10.64556, 10.25979, 10.36953, 10.30726, 9.969]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2441.0, 2962.0, 2986.0, 2963.0, 2701.0, 2657.0, 2300.0, 2619.0, 2655.0, 2484.0]}, "iteration_timing_avg": 0.09503617647058824} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json new file mode 100644 index 0000000000..cc1244e378 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.91778, 10.93688, 10.92414, 10.85264, 10.74695, 10.66448, 10.16759, 10.27157, 10.17695, 9.86116]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22728092.0, 23020904.0, 22500632.0, 22830582.0, 22739828.0, 22547742.0, 22955712.0, 22588520.0, 22658932.0, 22885368.0]}, "iteration_timing_avg": 0.09069441176470588} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json index ad49a6aa83..61d841b3d7 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80628, 10.6169, 10.59573, 10.50423, 10.22238]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2348.0, 2524.0, 2517.0, 2205.0, 2198.0, 2558.0, 2398.0]}, "iteration_timing_avg": 0.07661735294117648} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80628, 10.6169, 10.59573, 10.50423, 10.22238]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2348.0, 2524.0, 2517.0, 2205.0, 2198.0, 2558.0, 2398.0]}, "iteration_timing_avg": 0.07500764705882351} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json index f2b584f1a7..a99307432e 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88918, 10.82635, 10.70816, 10.61006, 10.11963, 10.22999, 10.15774, 9.83337]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1846.0, 1868.0, 1856.0, 1652.0, 1638.0, 1903.0, 2315.0, 2381.0]}, "iteration_timing_avg": 0.07899852941176469} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88918, 10.82635, 10.70816, 10.61006, 10.11963, 10.22999, 10.15774, 9.83337]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1846.0, 1868.0, 1856.0, 1652.0, 1638.0, 1903.0, 2315.0, 2381.0]}, "iteration_timing_avg": 0.08791117647058823} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json new file mode 100644 index 0000000000..f464650d3b --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80426, 10.84849, 10.86146, 10.81012, 10.72201, 10.64589, 10.2092, 10.32252, 10.23908, 9.92465]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16350.0, 19608.0, 19689.0, 19043.0, 17602.0, 17956.0, 15632.0, 18288.0, 18606.0, 19277.0]}, "iteration_timing_avg": 0.13919470588235297} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json new file mode 100644 index 0000000000..c3f6400d8c --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78922, 10.84167, 10.85605, 10.78017, 10.65475, 10.56494, 10.04887, 10.17872, 10.08664, 9.73742]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62363.0, 65732.0, 66174.0, 65596.0, 64478.0, 64769.0, 63854.0, 66376.0, 67110.0, 67650.0]}, "iteration_timing_avg": 0.21506794117647057} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json new file mode 100644 index 0000000000..f58d4c4ceb --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79066, 10.83467, 10.85342, 10.77851, 10.70005, 10.61316, 10.15957, 10.27971, 10.19511, 9.87028]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16055.0, 19166.0, 19161.0, 18797.0, 17405.0, 17721.0, 15678.0, 18223.0, 18580.0, 19742.0]}, "iteration_timing_avg": 0.20099058823529406} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json index 9f7df4510a..a465e34711 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85706, 10.8832, 10.88511, 10.87562, 10.8708, 10.83108, 10.65065, 10.63723, 10.53201, 10.25681]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2479.0, 2534.0, 2786.0, 2310.0, 2385.0, 2586.0, 2472.0]}, "iteration_timing_avg": 0.0920511764705882} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85706, 10.8832, 10.88511, 10.87562, 10.8708, 10.83108, 10.65065, 10.63723, 10.53201, 10.25681]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2479.0, 2534.0, 2786.0, 2310.0, 2385.0, 2586.0, 2472.0]}, "iteration_timing_avg": 0.09594764705882353} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json index 4b0cfd6b44..c218a0ad40 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85899, 10.88286, 10.87687, 10.82429, 10.69664, 10.60784, 10.11662, 10.2347, 10.14673, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1627.0, 1874.0, 1894.0, 1862.0, 1901.0, 1649.0, 1553.0, 1949.0, 2281.0, 2225.0]}, "iteration_timing_avg": 0.09437176470588234} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85899, 10.88286, 10.87687, 10.82429, 10.69664, 10.60784, 10.11662, 10.2347, 10.14673, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1627.0, 1874.0, 1894.0, 1862.0, 1901.0, 1649.0, 1553.0, 1949.0, 2281.0, 2225.0]}, "iteration_timing_avg": 0.10429970588235296} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json index 92e1f21efc..79db29b177 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86873, 10.891, 10.89716, 10.84022, 10.70435, 10.61599, 10.11661, 10.23183, 10.14875, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1619.0, 1839.0, 1712.0, 1853.0, 1810.0, 1682.0, 1567.0, 1997.0, 2186.0, 2376.0]}, "iteration_timing_avg": 0.0935938235294118} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86873, 10.891, 10.89716, 10.84022, 10.70435, 10.61599, 10.11661, 10.23183, 10.14875, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1619.0, 1839.0, 1712.0, 1853.0, 1810.0, 1682.0, 1567.0, 1997.0, 2186.0, 2376.0]}, "iteration_timing_avg": 0.1169185294117647} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json new file mode 100644 index 0000000000..ba026bbe85 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84879, 10.88953, 10.88082, 10.88243, 10.86947, 10.83539, 10.64785, 10.63863, 10.52242, 10.23812]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2301.0, 2328.0, 2414.0, 1973.0, 2168.0, 2471.0, 2419.0]}, "iteration_timing_avg": 0.1338870588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..8b9cb738c6 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84879, 10.88953, 10.88082, 10.88243, 10.86947, 10.83539, 10.64785, 10.63863, 10.52242, 10.23812]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2301.0, 2328.0, 2414.0, 1973.0, 2168.0, 2471.0, 2419.0]}, "iteration_timing_avg": 0.13206588235294117} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json b/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json index 8257f4c707..5b613dea44 100644 --- a/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json +++ b/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.34848, 9.45337, 8.89369, 8.56467, 8.28131, 8.12832, 7.82238, 7.55462, 7.42172, 7.28716, 7.32811, 7.22045, 7.11648, 7.03859, 6.87728, 6.94356, 6.94705, 7.02828, 6.71597, 6.9486]}, "num-zeros": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [43307.0, 40999.0, 44043.0, 41749.0, 44811.0, 44001.0, 41304.0, 42490.0, 44698.0, 43956.0, 41137.0, 43230.0, 39726.0, 45427.0, 43358.0, 43930.0, 45426.0, 45701.0, 46301.0, 44734.0]}, "iteration_timing_avg": 0.1228444776119403} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.34848, 9.45337, 8.89369, 8.56467, 8.28131, 8.12832, 7.82238, 7.55462, 7.42172, 7.28716, 7.32811, 7.22045, 7.11648, 7.03859, 6.87728, 6.94356, 6.94705, 7.02828, 6.71597, 6.9486]}, "num-zeros": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [43307.0, 40999.0, 44043.0, 41749.0, 44811.0, 44001.0, 41304.0, 42490.0, 44698.0, 43956.0, 41137.0, 43230.0, 39726.0, 45427.0, 43358.0, 43930.0, 45426.0, 45701.0, 46301.0, 44734.0]}, "iteration_timing_avg": 0.12808164179104478} \ No newline at end of file From f89f388639cff4566bed1279616edaedf00bd8d6 Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis Date: Mon, 5 Feb 2024 23:04:02 -0800 Subject: [PATCH 255/296] MoE argument sanity checks --- megatron/core/transformer/moe/experts.py | 1 - megatron/core/transformer/moe/moe_layer.py | 2 ++ megatron/core/transformer/moe/token_dispatcher.py | 2 ++ megatron/core/transformer/transformer_config.py | 3 +++ 4 files changed, 7 insertions(+), 1 deletion(-) diff --git a/megatron/core/transformer/moe/experts.py b/megatron/core/transformer/moe/experts.py index b7c4118d49..1f0ea46cb5 100644 --- a/megatron/core/transformer/moe/experts.py +++ b/megatron/core/transformer/moe/experts.py @@ -30,7 +30,6 @@ def __init__(self, num_local_experts: int, config: TransformerConfig): super().__init__(config=config) self.config: TransformerConfig = config self.num_local_experts = num_local_experts - gg.assert_grouped_gemm_is_available() assert ( config.add_bias_linear == False diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index fe89d64766..42cadb3428 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -24,6 +24,7 @@ def __init__(self, config: TransformerConfig): super(BaseMoELayer, self).__init__(config) self.config = config self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size() + assert self.expert_parallel_size > 0, "Expected non-negative expert parallel size" assert self.config.num_moe_experts % self.expert_parallel_size == 0 self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size local_expert_indices_offset = ( @@ -32,6 +33,7 @@ def __init__(self, config: TransformerConfig): self.local_expert_indices = [ local_expert_indices_offset + i for i in range(self.num_local_experts) ] + assert all(map(lambda x: x < self.config.num_moe_experts, self.local_expert_indices)) self.router = None self.experts = None self.token_dispatcher = None diff --git a/megatron/core/transformer/moe/token_dispatcher.py b/megatron/core/transformer/moe/token_dispatcher.py index 1b7857b6b2..e99c40fbde 100644 --- a/megatron/core/transformer/moe/token_dispatcher.py +++ b/megatron/core/transformer/moe/token_dispatcher.py @@ -66,7 +66,9 @@ def __init__( """ super().__init__(config=config) self.num_local_experts = num_local_experts + assert self.num_local_experts > 0, "Expected at least one expert" self.local_expert_indices = local_expert_indices + assert len(self.local_expert_indices) > 0, "Expected at least one local expert index" self.router_topk = config.moe_router_topk self.add_bias = config.add_bias_linear diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 988c7212f3..1e7c16f1ff 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -171,6 +171,9 @@ def __post_init__(self): if self.expert_model_parallel_size > 1 and self.num_moe_experts is None: raise ValueError(f'num_moe_experts must be non None to use expert-parallel.') + if self.num_moe_experts is not None and self.num_moe_experts <= 0: + raise ValueError(f'num_moe_experts must be non-negative.') + if self.cpu_offloading_num_layers < 0 or self.cpu_offloading_num_layers >= self.num_layers: raise ValueError( f'CPU offloading can be done only for layers less than {self.num_layers}' From f6995e5c9bbfda37bea19a86d2c44e7b60612f64 Mon Sep 17 00:00:00 2001 From: Xue Huang Date: Tue, 6 Feb 2024 09:59:29 -0800 Subject: [PATCH 256/296] add add_qkv_bias config --- megatron/arguments.py | 3 +++ megatron/checkpointing.py | 1 + megatron/core/transformer/attention.py | 2 +- megatron/core/transformer/transformer_config.py | 2 ++ megatron/model/transformer.py | 2 +- 5 files changed, 8 insertions(+), 2 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 68727010b3..51406f9594 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -927,6 +927,9 @@ def _add_training_args(parser): group.add_argument('--disable-bias-linear', action='store_false', help='Disable bias in the linear layers', dest='add_bias_linear') + group.add_argument('--add-qkv-bias', action='store_true', + help='Enable bias only in the QKV linear layers', + dest='add_qkv_bias') group.add_argument('--optimizer', type=str, default='adam', choices=['adam', 'sgd'], help='Optimizer function') diff --git a/megatron/checkpointing.py b/megatron/checkpointing.py index f181794b46..d85ae25e4b 100644 --- a/megatron/checkpointing.py +++ b/megatron/checkpointing.py @@ -507,6 +507,7 @@ def _set_arg(arg_name, old_arg_name=None, force=False): _set_arg('use_rotary_position_embeddings', force=True) _set_arg('rotary_percent', force=True) _set_arg('add_bias_linear', force=True) + _set_arg('add_qkv_bias', force=True) _set_arg('swiglu', force=True) _set_arg('untie_embeddings_and_output_weights', force=True) _set_arg('apply_layernorm_1p', force=True) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index d677003c50..1d5fbbff79 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -353,7 +353,7 @@ def __init__( config=self.config, init_method=self.config.init_method, gather_output=False, - bias=self.config.add_bias_linear, + bias=self.config.add_bias_linear or self.config.add_qkv_bias, skip_bias_add=False, is_expert=False, tp_comm_buffer_name='qkv', diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 988c7212f3..d0eac5ea26 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -29,6 +29,7 @@ class TransformerConfig(ModelParallelConfig): layernorm_epsilon (float): Layernorm epsilon. Defaults to 1e-5. layernorm_zero_centered_gamma (bool): if set to 'True', the LayerNorm is adjusted to center the gamma values around 0. This improves numerical stability. Defaults to False. add_bias_linear (bool): Include a bias term in all linear layers (QKV projections, after core attention, and two in MLP layer). Default is True. + add_qkv_bias (bool): Add a bias term only for QKV projections. Default is False. gated_linear_unit (bool): Use a gated linear unit for the first linear layer in the MLP. Defaults to False. activation_func (Callable): Activation function to use for the non-linearity in the MLP. Defaults to F.gelu. num_moe_experts (int): Number of experts to use for MoE layer. When set, it replaces MLP with MoE layer. Defaults to None (no MoE). @@ -81,6 +82,7 @@ class TransformerConfig(ModelParallelConfig): layernorm_epsilon: float = 1e-5 layernorm_zero_centered_gamma: bool = False add_bias_linear: bool = True + add_qkv_bias: bool = False gated_linear_unit: bool = False activation_func: Callable = F.gelu num_moe_experts: int = None diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py index c4a221fe9a..c90307f0ce 100644 --- a/megatron/model/transformer.py +++ b/megatron/model/transformer.py @@ -556,7 +556,7 @@ def __init__(self, config, layer_number, query_projection_size + 2 * kv_projection_size, config=config, init_method=config.init_method, - bias=args.add_bias_linear, + bias=args.add_bias_linear or args.add_qkv_bias, gather_output=False) else: assert attention_type == AttnType.cross_attn From c8f50b4c829ba0612060060af307a08051f82287 Mon Sep 17 00:00:00 2001 From: Maanu Grover Date: Tue, 6 Feb 2024 11:03:43 -0800 Subject: [PATCH 257/296] Minor fixes for JET CI --- .gitlab-ci.yml | 16 ------- jet-tests.yml | 13 +++--- .../python_test_utils/jet_test_pipeline.py | 45 +++++++++++-------- 3 files changed, 35 insertions(+), 39 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3f218047fd..f1f9117af1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -159,19 +159,3 @@ train.retro_core.tp1_pp1_1node_50steps: MAX_STEPS: 50 TIME_LIMIT: "20:00" TEST_LEVEL: MONTHLY_TESTS - -cleanup.selene: - tags: - - ssh_selene_runner - stage: cleanup - variables: - <<: [*VARS] - script: - - set +e - - NUM_CLEANUP=`find ${SELENE_ADLR_CI_PATH}/* -type d -ctime +20 | grep -v data | wc -l` - - find ${SELENE_ADLR_CI_PATH}/* -type d -ctime +20 | grep -v data | xargs rm -rf - - find ${SELENE_ADLR_CI_PATH}/* -type d -name "checkpoints" -ctime +2 | grep -v data | xargs rm -rf - - echo "Finished cleaning $NUM_CLEANUP directories older than 20 days everything in Selene" - allow_failure: true - rules: - - when: always diff --git a/jet-tests.yml b/jet-tests.yml index 45085451eb..8bba162ae8 100644 --- a/jet-tests.yml +++ b/jet-tests.yml @@ -1,8 +1,9 @@ .jet_common: stage: jet rules: - - if: $CI_PIPELINE_SOURCE == 'merge_request_event' && ( $CI_MERGE_REQUEST_APPROVED || $CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" ) - - if: $JET_CUSTOM_FILTER != "" && $CI_PIPELINE_SOURCE != 'merge_request_event' && $CI_PIPELINE_SOURCE != 'schedule' + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_APPROVED + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_LABELS =~ /READY FOR REVIEW/' + - if: $JET_CUSTOM_FILTER != "" && $CI_PIPELINE_SOURCE != 'merge_request_event' - when: never include: @@ -19,7 +20,7 @@ jet-setup: - | if [[ $CI_PIPELINE_SOURCE == "merge_request_event" ]] && [[ $CI_MERGE_REQUEST_APPROVED || $CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" ]]; then JET_FILTER="type == 'build' or 'merge-request' in spec.scope" - elif [[ -n $JET_CUSTOM_FILTER && $CI_PIPELINE_SOURCE != 'merge_request_event' && $CI_PIPELINE_SOURCE != 'schedule' ]]; then + elif [[ -n $JET_CUSTOM_FILTER && $CI_PIPELINE_SOURCE != 'merge_request_event' ]]; then JET_FILTER=$JET_CUSTOM_FILTER else JET_FILTER="False" @@ -73,9 +74,11 @@ jet-functional-results: - python -m pip install -U --no-cache-dir prettytable - python tests/functional_tests/python_test_utils/jet_test_pipeline.py ${CI_PIPELINE_ID} --test exit rules: - - if: $CI_PIPELINE_SOURCE == 'merge_request_event' && ( $CI_MERGE_REQUEST_APPROVED || $CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" ) + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_APPROVED when: always - - if: $JET_CUSTOM_FILTER != "" && $CI_PIPELINE_SOURCE != 'merge_request_event' && $CI_PIPELINE_SOURCE != 'schedule' + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_LABELS =~ /READY FOR REVIEW/' + when: always + - if: $JET_CUSTOM_FILTER != "" && $CI_PIPELINE_SOURCE != 'merge_request_event' when: always - when: never diff --git a/tests/functional_tests/python_test_utils/jet_test_pipeline.py b/tests/functional_tests/python_test_utils/jet_test_pipeline.py index 9b20fd59bc..ce5957dd20 100644 --- a/tests/functional_tests/python_test_utils/jet_test_pipeline.py +++ b/tests/functional_tests/python_test_utils/jet_test_pipeline.py @@ -7,10 +7,11 @@ def select_asset(result_obj, prefix): if result_obj['obj_ci']['s_job_status'] != "skipped": - assets = result_obj['nested_assets'] - for asset in assets: - if asset['s_name'].startswith(prefix): - return asset['s_url'] + assets = result_obj.get('nested_assets', None) + if assets is not None: + for asset in assets: + if asset['s_name'].startswith(prefix): + return asset['s_url'] return 'not found' @@ -25,30 +26,37 @@ def query_results(triggering_pipeline_id): ) return service.query(query, flatten=False) +def dedupe_results(results): + deduped = {} + for result in results: + key = result['obj_workload']['s_key'] + if key not in deduped: + deduped[key] = result + else: + if result['ts_created'] > deduped[key]['ts_created']: + deduped[key] = result + + return deduped.values() def check_exitcodes(results): from prettytable import PrettyTable - all_keys = [] - exit_codes = {} - log_urls = {} - names = {} + exit_codes = [] + log_urls = [] + names = [] for result in results: - key = result['obj_workload']['s_key'] - all_keys.append(key) - - exit_codes[key] = result.get('l_exit_code', -1) - log_urls[key] = select_asset(result, 'output_script-0.log') + exit_codes.append(result.get('l_exit_code', -1)) + log_urls.append(select_asset(result, 'output_script-0.log')) name = result['obj_workload']['s_key'].lstrip('recipe/') remove_substr = result['obj_workload']['obj_spec']['s_build'] + \ '_' + result['obj_workload']['obj_spec']['s_scope'] - names[key] = ''.join(name.split(remove_substr)) + names.append(''.join(name.split(remove_substr))) table = PrettyTable() - table.add_column("Job Key", [names[k] for k in all_keys]) - table.add_column("Exit Code", [exit_codes[k] for k in all_keys]) - table.add_column("Log URL", [log_urls[k] for k in all_keys]) - exit_codes_good = [ec == 0 for ec in exit_codes.values()] + table.add_column("Job Key", names) + table.add_column("Exit Code", exit_codes) + table.add_column("Log URL", log_urls) + exit_codes_good = [ec == 0 for ec in exit_codes] if exit_codes_good == []: raise Exception("Can't find any jobs, something went wrong.\n" + table.get_string()) if exit_codes_good == [] or not all(exit_codes_good): @@ -114,6 +122,7 @@ def fetch_metrics_files(results, save_dir): args = parser.parse_args() results = query_results(args.pipeline_id) + results = dedupe_results(results) if args.download_metrics_dir: fetch_metrics_files(results, args.download_metrics_dir) From 9760e119b5f4d81187eb314dde28d4bdcb6c2e8c Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Tue, 6 Feb 2024 17:32:39 -0500 Subject: [PATCH 258/296] Tokenizer fix --- megatron/core/datasets/gpt_dataset.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index b12fdedf8e..ff5216373a 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -72,15 +72,17 @@ def __init__( self.tokenizer = get_tokenizer() self.np_rng = numpy.random.RandomState(seed=self.config.random_seed) # rng state for FIM - self.fim_rate = self.args.fim_rate - self.fim_spm_rate = self.args.fim_spm_rate - self.fragment_fim_rate = self.args.fragment_fim_rate - self.fim_split_sample = self.tokenizer.vocab[self.args.fim_split_sample] if self.args.fim_split_sample is not None else None - - try: - self.suffix_tok_id, self.prefix_tok_id, self.middle_tok_id, self.pad_tok_id = (self.tokenizer.special_tokens[tok] for tok in [FIM_SUFFIX, FIM_PREFIX, FIM_MIDDLE, FIM_PAD]) - except KeyError: - self.suffix_tok_id, self.prefix_tok_id, self.middle_tok_id, self.pad_tok_id = (self.tokenizer.vocab[tok] for tok in [FIM_SUFFIX, FIM_PREFIX, FIM_MIDDLE, FIM_PAD]) + self.use_fim = self.args.fim_rate!=0 + if self.use_fim: + self.fim_rate = self.args.fim_rate + self.fim_spm_rate = self.args.fim_spm_rate + self.fragment_fim_rate = self.args.fragment_fim_rate + self.fim_split_sample = self.tokenizer.vocab[self.args.fim_split_sample] if self.args.fim_split_sample is not None else None + + try: + self.suffix_tok_id, self.prefix_tok_id, self.middle_tok_id, self.pad_tok_id = (self.tokenizer.special_tokens[tok] for tok in [FIM_SUFFIX, FIM_PREFIX, FIM_MIDDLE, FIM_PAD]) + except KeyError: + self.suffix_tok_id, self.prefix_tok_id, self.middle_tok_id, self.pad_tok_id = (self.tokenizer.vocab[tok] for tok in [FIM_SUFFIX, FIM_PREFIX, FIM_MIDDLE, FIM_PAD]) def _finalize(self) -> None: """Abstract method implementation @@ -216,7 +218,7 @@ def _query_document_sample_shuffle_indices( eod = self.tokenizer.eod segment_breaks = numpy.argwhere(sample == eod) # split sample by document - if self.fim_rate == 0: + if not self.use_fim: return ( numpy.array(sample, dtype=numpy.int64), numpy.array(document_ids, dtype=numpy.int64), From bb235cca3c1575ed08b438bca5b18719c4384dbe Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis Date: Tue, 6 Feb 2024 15:12:10 -0800 Subject: [PATCH 259/296] Check if config has num_moe_experts --- megatron/core/pipeline_parallel/schedules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/core/pipeline_parallel/schedules.py b/megatron/core/pipeline_parallel/schedules.py index 79939f3797..6dc4011fe2 100644 --- a/megatron/core/pipeline_parallel/schedules.py +++ b/megatron/core/pipeline_parallel/schedules.py @@ -210,7 +210,7 @@ def forward_step( # Set the loss scale for the auxiliary loss of the MoE layer. # Since we use a trick to do backward on the auxiliary loss, we need to set the scale explicitly. - if config.num_moe_experts is not None: + if hasattr(config, 'num_moe_experts') and config.num_moe_experts is not None: # Calculate the loss scale based on the grad_scale_func if available, else default to 1. loss_scale = ( config.grad_scale_func(torch.tensor(1.0, device=loss.device)) From 548e57a00d985c15d5c56fb4749656cf011be9ee Mon Sep 17 00:00:00 2001 From: Mikolaj Blaz Date: Tue, 6 Feb 2024 15:14:20 -0800 Subject: [PATCH 260/296] Add dist ckpt package docs for Sphinx documentation --- docs/source/api-guide/dist_checkpointing.rst | 38 +++++++++------ .../dist_checkpointing.strategies.rst | 5 ++ megatron/core/dist_checkpointing/core.py | 38 ++++++++++++++- .../core/dist_checkpointing/dict_utils.py | 47 ++++++++++++------- megatron/core/dist_checkpointing/mapping.py | 47 +++++++++++++++++-- megatron/core/dist_checkpointing/optimizer.py | 41 +++++++++++++++- .../core/dist_checkpointing/serialization.py | 42 ++++++++++++++++- .../dist_checkpointing/strategies/base.py | 15 ++++++ megatron/core/dist_checkpointing/utils.py | 43 +++++++++++++++++ 9 files changed, 278 insertions(+), 38 deletions(-) diff --git a/docs/source/api-guide/dist_checkpointing.rst b/docs/source/api-guide/dist_checkpointing.rst index 67c4f6f525..7e384a08a3 100644 --- a/docs/source/api-guide/dist_checkpointing.rst +++ b/docs/source/api-guide/dist_checkpointing.rst @@ -1,6 +1,15 @@ dist\_checkpointing package =========================== +A library for saving and loading the distributed checkpoints. +A "distributed checkpoint" can have various underlying formats (current default format is based on Zarr) +but has a distinctive property - the checkpoint saved in one parallel configuration (tensor/pipeline/data parallelism) +can be loaded in a different parallel configuration. + +Using the library requires defining sharded state_dict dictionaries with functions from *mapping* and *optimizer* modules. +Those state dicts can be saved or loaded with a *serialization* module using strategies from *strategies* module. + + Subpackages ----------- @@ -12,18 +21,10 @@ Subpackages Submodules ---------- -dist\_checkpointing.core module -------------------------------- - -.. automodule:: core.dist_checkpointing.core - :members: - :undoc-members: - :show-inheritance: - -dist\_checkpointing.dict\_utils module --------------------------------------- +dist\_checkpointing.serialization module +---------------------------------------- -.. automodule:: core.dist_checkpointing.dict_utils +.. automodule:: core.dist_checkpointing.serialization :members: :undoc-members: :show-inheritance: @@ -44,14 +45,23 @@ dist\_checkpointing.optimizer module :undoc-members: :show-inheritance: -dist\_checkpointing.serialization module ----------------------------------------- +dist\_checkpointing.core module +------------------------------- -.. automodule:: core.dist_checkpointing.serialization +.. automodule:: core.dist_checkpointing.core + :members: + :undoc-members: + :show-inheritance: + +dist\_checkpointing.dict\_utils module +-------------------------------------- + +.. automodule:: core.dist_checkpointing.dict_utils :members: :undoc-members: :show-inheritance: + dist\_checkpointing.utils module -------------------------------- diff --git a/docs/source/api-guide/dist_checkpointing.strategies.rst b/docs/source/api-guide/dist_checkpointing.strategies.rst index c18d2464c2..41e674c761 100644 --- a/docs/source/api-guide/dist_checkpointing.strategies.rst +++ b/docs/source/api-guide/dist_checkpointing.strategies.rst @@ -1,6 +1,11 @@ dist\_checkpointing.strategies package ====================================== +Package defining different checkpoint formats (backends) and saving/loading algorithms (strategies). + +Strategies can be used for implementing new checkpoint formats or implementing new (more optimal for a given use case) ways of saving/loading of existing formats. +Strategies are passed to `dist_checkpointing.load` and `dist_checkpointing.save` functions and control the actual saving/loading procedure. + Submodules ---------- diff --git a/megatron/core/dist_checkpointing/core.py b/megatron/core/dist_checkpointing/core.py index f20a0c3a2d..50384e661b 100644 --- a/megatron/core/dist_checkpointing/core.py +++ b/megatron/core/dist_checkpointing/core.py @@ -1,5 +1,7 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +""" Module for managing distributed checkpoints metadata. """ + import json from dataclasses import asdict, dataclass from pathlib import Path @@ -9,12 +11,21 @@ class CheckpointingException(Exception): + """ Base checkpointing related exception """ + pass @dataclass class CheckpointingConfig: - """ Documents backends used in the checkpoint. """ + """ Documents backends used in the checkpoint. + + Checkpoint config keeps track of formats used for storing the sharded tensors + (sharded_backend) and other objects (common_backend). + + Note that versioning is not for the checkpoint content (which is application specific), + but for the checkpoint format itself. + """ sharded_backend: str sharded_backend_version: int = 1 @@ -23,10 +34,26 @@ class CheckpointingConfig: def check_is_distributed_checkpoint(checkpoint_dir): + """ Checks if `metadata.json` exists in the checkpoint and is a valid config. + + Args: + checkpoint_dir: checkpoint directory + + Returns: + bool: True if `metadata.json` exists in the checkpoint and is a valid config. + """ return maybe_load_config(checkpoint_dir) is not None def maybe_load_config(checkpoint_dir: str) -> Optional[CheckpointingConfig]: + """ Returns checkpoint config if `checkpoint_dir` is a distributed checkpoint and None otherwise + + Args: + checkpoint_dir: checkpoint directory + + Returns: + CheckpointingConfig (optional): None if checkpoint is not a valid distributed checkpoint + """ config_path = Path(checkpoint_dir, CONFIG_FNAME) if not config_path.exists(): return None @@ -36,6 +63,15 @@ def maybe_load_config(checkpoint_dir: str) -> Optional[CheckpointingConfig]: def save_config(config: CheckpointingConfig, checkpoint_dir: str): + """ Save given config to checkpoint directory. + + Args: + config: checkpoint config + checkpoint_dir: checkpoint directory + + Returns: + None + """ config_path = Path(checkpoint_dir, CONFIG_FNAME) with config_path.open('w') as f: json.dump(asdict(config), f) diff --git a/megatron/core/dist_checkpointing/dict_utils.py b/megatron/core/dist_checkpointing/dict_utils.py index 493a61c91a..95591cd99e 100644 --- a/megatron/core/dist_checkpointing/dict_utils.py +++ b/megatron/core/dist_checkpointing/dict_utils.py @@ -1,6 +1,10 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. -""" Utilities for operating with dicts and lists. """ +""" Utilities for operating with dicts and lists. + +All functions in this module handle nesting of dicts and lists. +Other objects (e.g. tuples) are treated as atomic leaf types that cannot be traversed. +""" from collections import defaultdict from typing import Any, Callable, Iterable, Optional, Tuple, Union @@ -13,7 +17,7 @@ def extract_matching_values( ) -> Tuple[Union[dict, list], Union[dict, list]]: """ Return matching and nonmatching values. Keeps hierarchy. - Arguments: + Args: x (Union[dict, list]) : state dict to process. Top-level argument must be a dict or list predicate (object -> bool): determines matching values return_lists_as_dicts (bool): if True, matching lists will be turned @@ -60,6 +64,21 @@ def _set_elem(target, k, v): def diff(x1: Any, x2: Any, prefix: Tuple = ()) -> Tuple[list, list, list]: + """ Recursive diff of dicts. + + Args: + x1 (object): left dict + x2 (object): right dict + prefix (tuple): tracks recursive calls. Used for reporting differing keys. + + Returns: + Tuple[list, list, list]: tuple of: + - only_left: Prefixes present only in left dict + - only_right: Prefixes present only in right dict + - mismatch: values present in both dicts but not equal across dicts. + For tensors equality of all elems is checked. + Each element is a tuple (prefix, type of left value, type of right value). + """ mismatch = [] if isinstance(x1, dict) and isinstance(x2, dict): only_left = [prefix + (k,) for k in x1.keys() - x2.keys()] @@ -94,22 +113,8 @@ def diff(x1: Any, x2: Any, prefix: Tuple = ()) -> Tuple[list, list, list]: return only_left, only_right, mismatch -def inspect_keys_types(d: dict, prefix: Tuple = (), indent: int = 4): - print_indent = lambda: print(' ' * indent * len(prefix), end='') - for k, v in d.items(): - if isinstance(v, dict): - print_indent() - print(f'> {k}:') - inspect_keys_types(v, prefix + (k,), indent) - else: - print_indent() - if isinstance(v, torch.Tensor): - print(f'> {k}: {type(v)} of shape {v.shape}') - else: - print(f'> {k}: {type(v)}') - - def inspect_types(x: Any, prefix: Tuple = (), indent: int = 4): + """ Helper to print types of (nested) dict values. """ print_indent = lambda: print(' ' * indent * len(prefix), end='') if isinstance(x, dict): print() @@ -137,6 +142,7 @@ def inspect_types(x: Any, prefix: Tuple = (), indent: int = 4): def nested_values(x: Union[dict, list]): + """ Returns iterator over (nested) values of a given dict or list. """ x_iter = x.values() if isinstance(x, dict) else x for v in x_iter: if isinstance(v, (dict, list)): @@ -146,6 +152,7 @@ def nested_values(x: Union[dict, list]): def nested_items_iter(x: Union[dict, list]): + """ Returns iterator over (nested) tuples (container, key, value) of a given dict or list. """ x_iter = x.items() if isinstance(x, dict) else enumerate(x) for k, v in x_iter: if isinstance(v, (dict, list)): @@ -155,16 +162,19 @@ def nested_items_iter(x: Union[dict, list]): def dict_map(f: Callable, d: dict): + """ `map` equivalent for dicts. """ for sub_d, k, v in nested_items_iter(d): sub_d[k] = f(v) def dict_map_with_key(f: Callable, d: dict): + """ `map` equivalent for dicts with a function that accepts tuple (key, value). """ for sub_d, k, v in nested_items_iter(d): sub_d[k] = f(k, v) def dict_list_map_inplace(f: Callable, x: Union[dict, list]): + """ Maps dicts and lists *in-place* with a given function. """ if isinstance(x, dict): for k, v in x.items(): x[k] = dict_list_map_inplace(f, v) @@ -176,6 +186,7 @@ def dict_list_map_inplace(f: Callable, x: Union[dict, list]): def dict_list_map_outplace(f: Callable, x: Union[dict, list]): + """ Maps dicts and lists *out-of-place* with a given function. """ if isinstance(x, dict): return {k: dict_list_map_outplace(f, v) for k, v in x.items()} elif isinstance(x, list): @@ -185,6 +196,7 @@ def dict_list_map_outplace(f: Callable, x: Union[dict, list]): def merge(x1: dict, x2: dict, key: Tuple[str, ...] = ()): + """ Merges dicts and lists recursively. """ if isinstance(x1, dict) and isinstance(x2, dict): for k, v2 in x2.items(): if k not in x1: @@ -211,6 +223,7 @@ def map_reduce( value_fn: Callable = lambda x: x, reduce_fn: Callable = lambda x: x, ) -> dict: + """ Simple map-reduce implementation following `more_itertools.map_reduce` interface. """ res = defaultdict(list) for x in xs: res[key_fn(x)].append(value_fn(x)) diff --git a/megatron/core/dist_checkpointing/mapping.py b/megatron/core/dist_checkpointing/mapping.py index cb4c4d7a47..362ffd4a8e 100644 --- a/megatron/core/dist_checkpointing/mapping.py +++ b/megatron/core/dist_checkpointing/mapping.py @@ -1,6 +1,11 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. -""" Core library classes. """ +""" Core library classes for representing sharding of tensors and objects. + +The main expected usage is wrapping torch.Tensors in state dicts with +ShardedTensor class (mostly with the ShardedTensor.from_rank_offsets classmethod). +""" + import logging from dataclasses import dataclass, replace from itertools import chain @@ -172,7 +177,21 @@ def __str__(self): return f'{self.__class__.__name__}(key=\'{self.key}\')' -def is_main_replica(replica_id): +def is_main_replica(replica_id: ReplicaId): + """ Checks if given `replica_id` is considered as main. + + "Main" replica is: + - integer 0 + - or an iterable with all 0 elements + + It is the application responsibility to set correct replicas for sharded tensors. + + Args: + replica_id (Union[int, Tuple[int, ...]]): replica id + + Returns: + (bool): True for a "main" replica + """ if isinstance(replica_id, int): return replica_id == 0 return all(r == 0 for r in replica_id) @@ -259,6 +278,15 @@ def build(self): def apply_factories(sharded_state_dict: ShardedStateDict): + """ Turn ShardedTensorFactories into ShardedTensors *in-place*. + + Args: + sharded_state_dict (ShardedStateDict): state dict possibly containing ShardedTensorFactory objects + + Returns: + None: state dict is modified in place + """ + def apply(x): if isinstance(x, ShardedTensorFactory): x = x.build() @@ -267,7 +295,20 @@ def apply(x): dict_list_map_inplace(apply, sharded_state_dict) -def apply_factory_merges(x1: StateDict, x2: ShardedStateDict, key: Tuple[str, ...] = ()): +def apply_factory_merges( + x1: StateDict, x2: ShardedStateDict, key: Tuple[str, ...] = () +) -> StateDict: + """ Apply merges defined by ShardedTensorFactories *in-place*. + + Args: + x1 (StateDict): state dict loaded from the checkpoint + x2 (ShardedStateDict): subset of `x1` (in terms of dict keys) with ShardedTensorFactory + as (possibly nested) values that define how to merge objects from the `x1` state dict + key (Tuple[str, ...]): current key in a recursive call. Used only for reporting meaningful errors + + Returns: + StateDict: `x1` modified in-place + """ if isinstance(x2, ShardedTensorFactory): return x2.merge_fn(x1) diff --git a/megatron/core/dist_checkpointing/optimizer.py b/megatron/core/dist_checkpointing/optimizer.py index d1c698787c..bec174209e 100644 --- a/megatron/core/dist_checkpointing/optimizer.py +++ b/megatron/core/dist_checkpointing/optimizer.py @@ -1,6 +1,6 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. -""" Optimizer related helpers. """ +""" Helpers for defining sharding for optimizer states based on existing sharding for model parameters. """ import logging from copy import deepcopy @@ -20,7 +20,7 @@ ShardedTensorFactory, StateDict, ) -from .utils import extract_sharded_tensors, extract_sharded_tensors_and_factories +from .utils import extract_sharded_tensors_and_factories def get_optim_param_to_id_map(optim_params_iter: Iterable[torch.nn.Parameter]) -> Dict[int, int]: @@ -34,6 +34,17 @@ def get_optim_param_to_id_map(optim_params_iter: Iterable[torch.nn.Parameter]) - def get_param_id_to_sharded_param_map( model_sharded_state_dict: ShardedStateDict, optim_params_iter: Iterable[torch.nn.Parameter] ) -> Dict[int, Union[ShardedTensor, ShardedTensorFactory]]: + """ Generate mapping from optimizer state ids to model sharded parameters. + + Args: + model_sharded_state_dict: sharded state dict with all model sharded tensors (can have any structure) + optim_params_iter: iterable which iterates over model parameters tracked by the optimizer. + The iteration must be in the same order as in the optimizer parameters. + + Returns: + Dict[int, Union[ShardedTensor, ShardedTensorFactory]]: mapping from optimizer state ids + to model sharded parameters. + """ model_sharded_state_dict, _ = extract_sharded_tensors_and_factories(model_sharded_state_dict) id_to_sharded_param_map = {} param_to_id_map = get_optim_param_to_id_map(optim_params_iter) @@ -55,6 +66,16 @@ def get_param_id_to_sharded_param_map( def make_sharded_optimizer_tensor( model_param: Union[ShardedTensor, ShardedTensorFactory], optim_param: torch.Tensor, prefix: str ) -> Union[ShardedTensor, ShardedTensorFactory]: + """ Build a ShardedTensor or ShardedTensorFactory for optimizer param based on model param + + Args: + model_param (Union[ShardedTensor, ShardedTensorFactory]): model param + optim_param (torch.Tensor): corresponding optimizer param + prefix (str): optimizer prefix for the ShardedTensor or ShardedTensorFactory + + Returns: + Union[ShardedTensor, ShardedTensorFactory]: wrapped optimizer parameter + """ if isinstance(model_param, ShardedTensorFactory): return replace(model_param, key=f'{prefix}.{model_param.key}', data=optim_param) @@ -71,6 +92,22 @@ def optim_state_to_sharding_state( id_to_sharded_param_map: Dict[int, ShardedTensor], exclude_keys: Tuple[str] = (), ): + """ Turn optimizer state dict to sharded state dict based on model state dict *in-place*. + + Can be used to add sharding information to most common optimizer state dict. + Creates separate ShardedTensors for each key in `optim_state_dict['state']` + (e.g. for torch.optim.Adam there will be separate tensors for `exp_avg` and `exp_avg_sq`) + + Args: + optim_state_dict (StateDict): optimizer state dict with + state parameters under `state` key and group hyperparameters under `param_groups` -> `params` key. + id_to_sharded_param_map (Dict[int, ShardedTensor]): mapping from optimizer param ids to model sharded tensors. + Can be generated with `get_param_id_to_sharded_param_map` function + exclude_keys (Tuple[str]): optimizer state keys to exclude from the final state dict. + + Returns: + None: state dict is modified in place + """ sharded_state = {} for param_id, param_state in optim_state_dict['state'].items(): sharded_state[param_id] = {} diff --git a/megatron/core/dist_checkpointing/serialization.py b/megatron/core/dist_checkpointing/serialization.py index dfc710a559..96eb54b977 100644 --- a/megatron/core/dist_checkpointing/serialization.py +++ b/megatron/core/dist_checkpointing/serialization.py @@ -1,5 +1,12 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +""" Entrypoints for saving and loading the distributed checkpoints. + +Functions `load` and `save` are equivalents of `torch.load` and `torch.save` +but expect torch.Tensors to be wrapped with classes from the `mapping module`. +Additionally, `load` expects the sharded state dict argument as a guidance for loading the sharded tensors. +""" + import logging import os from collections import Counter, defaultdict @@ -131,7 +138,15 @@ def _verify_checkpoint_and_load_strategy( # TODO: implement it as common torch strategy -def load_common_state_dict(checkpoint_dir: Path): +def load_common_state_dict(checkpoint_dir: Path) -> StateDict: + """ Load common (non-sharded) objects state dict from the checkpoint. + + Args: + checkpoint_dir (Path): checkpoint directory + + Returns: + StateDict: state dict with non-sharded objects from the checkpoint + """ load_path = Path(checkpoint_dir) / COMMON_STATE_FNAME try: return torch.load(load_path, map_location='cpu') @@ -143,6 +158,15 @@ def load_common_state_dict(checkpoint_dir: Path): def load_sharded_objects(sharded_state_dict: ShardedStateDict, checkpoint_dir: Path): + """ Replaces all ShardedObject from a given state dict with values loaded from the checkpoint. + + Args: + sharded_state_dict (ShardedStateDict): sharded state dict defining what objects should be loaded. + checkpoint_dir (Path): checkpoint directory + + Returns: + None: state dict is modified in place + """ sharded_objects, sharded_state_dict = extract_matching_values( sharded_state_dict, lambda v: isinstance(v, ShardedObject) ) @@ -292,6 +316,22 @@ def _extract_and_save_sharded_objects( def validate_sharding_integrity(sharded_tensors: Iterable[ShardedTensor]): + """ Validate if the ShardedTensors from multiple processes define correct sharding of a global tensor. + + Local ShardedTensors metadata is exchanged with `torch.distributed.all_gather_object` + and then process with global rank 0 checks if main replicas of the shards: + - cover the whole global tensors + - don't overlap + + Args: + sharded_tensors (Iterable[ShardedTensor]): sharded tensors local to this process + + Returns: + None + + Raises: + CheckpointingException for invalid access pattern + """ sharding = [ten.without_data() for ten in sharded_tensors] all_sharding = [None] * torch.distributed.get_world_size() torch.distributed.all_gather_object(all_sharding, sharding) diff --git a/megatron/core/dist_checkpointing/strategies/base.py b/megatron/core/dist_checkpointing/strategies/base.py index 3989ea74a2..3af945900f 100644 --- a/megatron/core/dist_checkpointing/strategies/base.py +++ b/megatron/core/dist_checkpointing/strategies/base.py @@ -1,5 +1,7 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +""" Strategies base interfaces. """ + from abc import ABC, abstractmethod from collections import defaultdict from enum import Enum @@ -20,6 +22,7 @@ class StrategyAction(Enum): def get_default_strategy(action: StrategyAction, backend: str, version: int): + """ Retrieves a default strategy for a given action, backend and version. """ try: return default_strategies[action.value][(backend, version)] except KeyError as e: @@ -36,6 +39,8 @@ def get_default_strategy(action: StrategyAction, backend: str, version: int): class LoadStrategyBase(ABC): + """ Base class for a load strategy. Requires implementing checks for compatibility with a given checkpoint version. """ + @abstractmethod def check_backend_compatibility(self, loaded_version): raise NotImplementedError @@ -46,18 +51,24 @@ def check_version_compatibility(self, loaded_version): class SaveStrategyBase(ABC): + """ Base class for a save strategy. Requires defining a backend type and version of the saved format. """ + def __init__(self, backend: str, version: int): self.backend = backend self.version = version class LoadCommonStrategy(LoadStrategyBase): + """ Load strategy for common (non-sharded) objects """ + @abstractmethod def load(self, checkpoint_dir: Path): raise NotImplementedError class LoadShardedStrategy(LoadStrategyBase): + """ Load strategy for sharded tensors """ + @abstractmethod def load(self, sharded_state_dict: ShardedStateDict, checkpoint_dir: Path): raise NotImplementedError @@ -79,12 +90,16 @@ def load_tensors_metadata(self, checkpoint_dir: Path): class SaveCommonStrategy(SaveStrategyBase): + """ Save strategy for common (non-sharded) objects """ + @abstractmethod def save(self, common_state_dict: StateDict, checkpoint_dir: Path): raise NotImplementedError class SaveShardedStrategy(SaveStrategyBase): + """ Save strategy for sharded tensors """ + @abstractmethod def save(self, sharded_tensors: List[ShardedTensor], checkpoint_dir: Path): raise NotImplementedError diff --git a/megatron/core/dist_checkpointing/utils.py b/megatron/core/dist_checkpointing/utils.py index a234a4ced6..ad22fe77b9 100644 --- a/megatron/core/dist_checkpointing/utils.py +++ b/megatron/core/dist_checkpointing/utils.py @@ -1,5 +1,7 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +""" Helpers for manipulating sharded tensors and sharded state dicts. """ + from typing import Dict, Tuple from .dict_utils import dict_list_map_inplace, extract_matching_values @@ -16,12 +18,32 @@ def extract_sharded_tensors( sharded_state_dict: ShardedStateDict, ) -> Tuple[ShardedStateDict, StateDict]: + """ Extract a dict consisting of only ShardedTensor objects from a given state dict with any objects. + + Args: + sharded_state_dict: state dict possibly containing ShardedTensor objects + + Returns: + Tuple[ShardedStateDict, StateDict]: tuple of: + - state dict with all ShardedTensor (keeping the original state dict structure) + - state dict with all objects other than ShardedTensor (keeping the original state dict structure) + """ return extract_matching_values(sharded_state_dict, lambda v: isinstance(v, ShardedTensor)) def extract_sharded_tensors_and_factories( sharded_state_dict: ShardedStateDict, ) -> Tuple[ShardedStateDict, StateDict]: + """ Extract a dict consisting of only ShardedTensor and ShardedTensorFactory objects from a given state dict with any objects. + + Args: + sharded_state_dict: state dict possibly containing ShardedTensor and ShardedTensorFactory objects + + Returns: + Tuple[ShardedStateDict, StateDict]: tuple of: + - state dict with all ShardedTensor and ShardedTensorFactory (keeping the original state dict structure) + - state dict with all other objects (keeping the original state dict structure) + """ return extract_matching_values( sharded_state_dict, lambda v: isinstance(v, (ShardedTensor, ShardedTensorFactory)) ) @@ -30,6 +52,17 @@ def extract_sharded_tensors_and_factories( def extract_sharded_tensors_or_nonpersistent( sharded_state_dict: ShardedStateDict, ) -> Tuple[ShardedStateDict, StateDict]: + """ Extract a dict consisting of only ShardedTensor, ShardedTensorFactory and LocalNonpersitentObject + objects from a given state dict with any objects. + + Args: + sharded_state_dict: state dict possibly containing ShardedTensor, ShardedTensorFactory and LocalNonpersitentObject objects + + Returns: + Tuple[ShardedStateDict, StateDict]: tuple of: + - state dict with all ShardedTensor, ShardedTensorFactory and LocalNonpersitentObject (keeping the original state dict structure) + - state dict with all other objects (keeping the original state dict structure) + """ return extract_matching_values( sharded_state_dict, lambda v: isinstance(v, (ShardedTensor, LocalNonpersitentObject, ShardedTensorFactory)), @@ -37,6 +70,16 @@ def extract_sharded_tensors_or_nonpersistent( def add_prefix_for_sharding(sharded_state_dict: ShardedStateDict, prefix: str): + """ Prepend a given prefix to all ShardedTensor objects in a given state dict *in-place*. + + Args: + sharded_state_dict (ShardedStateDict): sharded state dict + prefix (str): prefix to be prepended + + Returns: + None: state dict is modified in-place + """ + def add_prefix(t): if isinstance(t, ShardedTensor): t.key = f'{prefix}.{t.key}' From 960c06b972fd7813d39eced3079a50038207bbcc Mon Sep 17 00:00:00 2001 From: Peter Dykas Date: Tue, 6 Feb 2024 15:28:06 -0800 Subject: [PATCH 261/296] Fix oob perf --- megatron/core/datasets/gpt_dataset.py | 12 ++++++++++++ megatron/core/tensor_parallel/layers.py | 3 --- pretrain_gpt.py | 1 + 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index a8737a5e1f..a5c4083636 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -2,6 +2,7 @@ import logging import os +import sys import time from dataclasses import dataclass from typing import Dict, Tuple @@ -27,6 +28,9 @@ class GPTDatasetConfig(BlendedMegatronDatasetConfig): reset_attention_mask (bool): Option to reset the attention mask from the dataset eod_mask_loss (bool): Option to enable the EOD mask loss + + vocab_size (int): Size of vocabulary + """ reset_position_ids: bool = None @@ -35,6 +39,8 @@ class GPTDatasetConfig(BlendedMegatronDatasetConfig): eod_mask_loss: bool = None + vocab_size: int = sys.maxsize + def __post_init__(self) -> None: """Do asserts and set fields post init """ @@ -126,6 +132,8 @@ def __init__( indexed_dataset, dataset_path, indexed_indices, num_samples, index_split, config ) + self.vocab_size = config.vocab_size + def _finalize(self) -> None: """Abstract method implementation @@ -189,6 +197,10 @@ def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: labels = text[1:].contiguous() tokens = text[:-1].contiguous() + assert not torch.any( + tokens >= self.vocab_size + ), "An input token is out of bounds of the tokenizer vocabulary" + attention_mask, loss_mask, position_ids = _get_ltor_masks_and_position_ids( tokens, self.config.tokenizer.eod, diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index ea13029e6d..a73803a5a3 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -206,9 +206,6 @@ def __init__( _initialize_affine_weight_gpu(self.weight, init_method, partition_dim=0, stride=1) def forward(self, input_): - assert not torch.any( - (input_ < 0) | (input_ >= self.num_embeddings) - ), "An input token is out of bounds of the embedding table" if self.tensor_model_parallel_size > 1: # Build the mask. input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index) diff --git a/pretrain_gpt.py b/pretrain_gpt.py index 3c978518c0..8eb8cee212 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -167,6 +167,7 @@ def core_gpt_dataset_config_from_args(args): reset_position_ids=args.reset_position_ids, reset_attention_mask=args.reset_attention_mask, eod_mask_loss=args.eod_mask_loss, + vocab_size=get_tokenizer().vocab_size, ) From 260c4f242d99ff81d1097f2c9fdccd2b1c7b0e8d Mon Sep 17 00:00:00 2001 From: Xue Huang Date: Tue, 6 Feb 2024 15:40:01 -0800 Subject: [PATCH 262/296] Add interleaved rotary embedding in MCore --- megatron/arguments.py | 9 ++++- megatron/checkpointing.py | 1 + megatron/core/models/T5/t5_model.py | 7 +++- megatron/core/models/bert/bert_model.py | 5 ++- .../common/embeddings/rotary_pos_embedding.py | 39 +++++++++++++------ megatron/core/models/gpt/gpt_model.py | 1 + megatron/core/transformer/attention.py | 7 +++- .../core/transformer/transformer_config.py | 4 ++ megatron/model/language_model.py | 6 +-- pretrain_gpt.py | 2 +- .../functional_tests/jet_recipes/MR-gpt.yaml | 1 + ...rleaved-no-fusion_mcore-true_te-false.json | 1 + 12 files changed, 62 insertions(+), 21 deletions(-) create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json diff --git a/megatron/arguments.py b/megatron/arguments.py index 51406f9594..847b188b8a 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -388,6 +388,10 @@ def validate_args(args, defaults={}): # Legacy RoPE arguments if args.use_rotary_position_embeddings: args.position_embedding_type = 'rope' + if args.rotary_interleaved and args.apply_rope_fusion: + raise RuntimeError('--rotary-interleaved does not work with rope_fusion.') + if args.rotary_interleaved and not args.use_mcore_models: + raise RuntimeError('--rotary-interleaved only support Megatron Core, please add --use-mcore-models.') # Would just need to add 'NoPE' as a position_embedding_type to support this, but for now # don't allow it to keep things simple @@ -448,8 +452,9 @@ def core_transformer_config_from_args(args): kw_args['layernorm_epsilon'] = args.norm_epsilon kw_args['deallocate_pipeline_outputs'] = True kw_args['pipeline_dtype'] = args.params_dtype - kw_args['batch_p2p_comm'] = not args.overlap_p2p_comm + kw_args['batch_p2p_comm'] = not args.overlap_p2p_comm kw_args['num_moe_experts'] = args.num_experts + kw_args['rotary_interleaved'] = args.rotary_interleaved if args.swiglu: kw_args['activation_func'] = F.silu kw_args['gated_linear_unit'] = True @@ -619,6 +624,8 @@ def _add_network_size_args(parser): 'Deprecated: use --position-embedding-type') group.add_argument('--rotary-percent', type=float, default=1.0, help='Percent of rotary dimension to use, default 100%%') + group.add_argument('--rotary-interleaved', action='store_true', + help='Use interleaved rotary embedding.') group.add_argument('--rotary-seq-len-interpolation-factor', type=int, default=None, help='Sequence length interpolation factor for rotary embeddings.') group.add_argument('--no-position-embedding', diff --git a/megatron/checkpointing.py b/megatron/checkpointing.py index d85ae25e4b..d21ed3f146 100644 --- a/megatron/checkpointing.py +++ b/megatron/checkpointing.py @@ -506,6 +506,7 @@ def _set_arg(arg_name, old_arg_name=None, force=False): _set_arg('add_position_embedding', force=True) _set_arg('use_rotary_position_embeddings', force=True) _set_arg('rotary_percent', force=True) + _set_arg('rotary_interleaved', force=True) _set_arg('add_bias_linear', force=True) _set_arg('add_qkv_bias', force=True) _set_arg('swiglu', force=True) diff --git a/megatron/core/models/T5/t5_model.py b/megatron/core/models/T5/t5_model.py index 5ad6b26dcc..d6010a116f 100644 --- a/megatron/core/models/T5/t5_model.py +++ b/megatron/core/models/T5/t5_model.py @@ -78,7 +78,7 @@ class T5Model(LanguageModule): transformer_encoder_layer_spec (ModuleSpec): transformer layer customization specs for encoder transformer_decoder_layer_spec (ModuleSpec): transformer layer customization specs for decoder - + vocab_size (int): vocabulary size max_sequence_length (int): maximum size of sequence. This is used for positional embedding @@ -151,7 +151,10 @@ def __init__( # Rotary Position Embeddings if self.position_embedding_type == 'rope': self.rotary_pos_emb = RotaryEmbedding( - self.config.kv_channels, rotary_percent, seq_len_interpolation_factor + kv_channels=self.config.kv_channels, + rotary_percent=rotary_percent, + rotary_interleaved=self.config.rotary_interleaved, + seq_len_interpolation_factor=seq_len_interpolation_factor, ) # Transformer encoder diff --git a/megatron/core/models/bert/bert_model.py b/megatron/core/models/bert/bert_model.py index 14eabf1737..15c49d2a50 100644 --- a/megatron/core/models/bert/bert_model.py +++ b/megatron/core/models/bert/bert_model.py @@ -93,7 +93,10 @@ def __init__( if self.position_embedding_type == 'rope': self.rotary_pos_emb = RotaryEmbedding( - self.config.kv_channels, rotary_percent, seq_len_interpolation_factor + kv_channels=self.config.kv_channels, + rotary_percent=rotary_percent, + rotary_interleaved=self.config.rotary_interleaved, + seq_len_interpolation_factor=seq_len_interpolation_factor, ) # Transformer. diff --git a/megatron/core/models/common/embeddings/rotary_pos_embedding.py b/megatron/core/models/common/embeddings/rotary_pos_embedding.py index 2ab5164d57..238838fa6b 100644 --- a/megatron/core/models/common/embeddings/rotary_pos_embedding.py +++ b/megatron/core/models/common/embeddings/rotary_pos_embedding.py @@ -57,6 +57,7 @@ def __init__( self, kv_channels: int, rotary_percent: float, + rotary_interleaved: bool = False, seq_len_interpolation_factor: float = None, rotary_base: int = 10000, ) -> None: @@ -65,6 +66,7 @@ def __init__( dim = kv_channels if rotary_percent < 1.0: dim = int(dim * rotary_percent) + self.rotary_interleaved = rotary_interleaved self.seq_len_interpolation_factor = seq_len_interpolation_factor self.inv_freq = 1.0 / ( @@ -96,7 +98,12 @@ def forward(self, max_seq_len: int, offset: int = 0) -> Tensor: freqs = torch.outer(seq, self.inv_freq) # first part even vector components, second part odd vector components, # 2 * dim in dimension size - emb = torch.cat((freqs, freqs), dim=-1) + if not self.rotary_interleaved: + emb = torch.cat((freqs, freqs), dim=-1) + else: + emb = torch.stack((freqs.view(-1, 1), freqs.view(-1, 1)), dim=-1).view( + freqs.shape[0], -1 + ) # emb [seq_length, .., dim] emb = emb[:, None, None, :] if parallel_state.get_context_parallel_world_size() > 1: @@ -142,7 +149,7 @@ def get_rotary_seq_len( return rotary_seq_len -def _rotate_half(x: Tensor) -> Tensor: +def _rotate_half(x: Tensor, rotary_interleaved: bool) -> Tensor: """Change sign so the last dimension becomes [-odd, +even] Args: @@ -151,12 +158,17 @@ def _rotate_half(x: Tensor) -> Tensor: Returns: Tensor: Tensor rotated half """ - - x1, x2 = torch.chunk(x, 2, dim=-1) - return torch.cat((-x2, x1), dim=-1) + if not rotary_interleaved: + x1, x2 = torch.chunk(x, 2, dim=-1) + return torch.cat((-x2, x1), dim=-1) + else: + x1 = x[:, :, :, ::2] + x2 = x[:, :, :, 1::2] + x_new = torch.stack((-x2, x1), dim=-1) + return x_new.view(x_new.shape[0], x_new.shape[1], x_new.shape[2], -1) -def apply_rotary_pos_emb_bshd(t: Tensor, freqs: Tensor) -> Tensor: +def apply_rotary_pos_emb_bshd(t: Tensor, freqs: Tensor, rotary_interleaved: bool = False) -> Tensor: """Apply rotary positional embedding to input tensor T. check https://kexue.fm/archives/8265 for detailed formulas @@ -178,11 +190,14 @@ def apply_rotary_pos_emb_bshd(t: Tensor, freqs: Tensor) -> Tensor: cos_ = torch.cos(freqs).to(t.dtype) sin_ = torch.sin(freqs).to(t.dtype) - t = (t * cos_) + (_rotate_half(t) * sin_) + t = (t * cos_) + (_rotate_half(t, rotary_interleaved) * sin_) return torch.cat((t, t_pass), dim=-1) -def apply_rotary_pos_emb_thd(t: Tensor, cu_seqlens: Tensor, freqs: Tensor) -> Tensor: +def apply_rotary_pos_emb_thd( + t: Tensor, cu_seqlens: Tensor, freqs: Tensor, rotary_interleaved: bool = False +) -> Tensor: + """A baseline implementation of applying RoPE for `thd` format. Args: @@ -205,7 +220,7 @@ def apply_rotary_pos_emb_thd(t: Tensor, cu_seqlens: Tensor, freqs: Tensor) -> Te def apply_rotary_pos_emb( - t: Tensor, freqs: Tensor, config: TransformerConfig, cu_seqlens: Optional[Tensor] = None + t: Tensor, freqs: Tensor, config: TransformerConfig, cu_seqlens: Optional[Tensor] = None, ): """ Reroute to the appropriate apply_rotary_pos_emb function depending on @@ -227,6 +242,8 @@ def apply_rotary_pos_emb( return fused_apply_rotary_pos_emb_thd(t, cu_seqlens, freqs) else: if cu_seqlens is None: - return apply_rotary_pos_emb_bshd(t, freqs) + return apply_rotary_pos_emb_bshd(t, freqs, rotary_interleaved=config.rotary_interleaved) else: - return apply_rotary_pos_emb_thd(t, cu_seqlens, freqs) + return apply_rotary_pos_emb_thd( + t, cu_seqlens, freqs, rotary_interleaved=config.rotary_interleaved + ) diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index a6384d70c6..d096b47c22 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -82,6 +82,7 @@ def __init__( self.rotary_pos_emb = RotaryEmbedding( kv_channels=self.config.kv_channels, rotary_percent=rotary_percent, + rotary_interleaved=self.config.rotary_interleaved, seq_len_interpolation_factor=seq_len_interpolation_factor, rotary_base=rotary_base, ) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index 1d5fbbff79..111f1008b5 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -277,9 +277,12 @@ def forward( else: cu_seqlens_q = cu_seqlens_kv = None query = apply_rotary_pos_emb( - query, q_pos_emb, config=self.config, cu_seqlens=cu_seqlens_q + query, q_pos_emb, config=self.config, cu_seqlens=cu_seqlens_q, ) - key = apply_rotary_pos_emb(key, k_pos_emb, config=self.config, cu_seqlens=cu_seqlens_kv) + key = apply_rotary_pos_emb( + key, k_pos_emb, config=self.config, cu_seqlens=cu_seqlens_kv, + ) + # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 25169765c8..8437f4b85c 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -33,6 +33,7 @@ class TransformerConfig(ModelParallelConfig): gated_linear_unit (bool): Use a gated linear unit for the first linear layer in the MLP. Defaults to False. activation_func (Callable): Activation function to use for the non-linearity in the MLP. Defaults to F.gelu. num_moe_experts (int): Number of experts to use for MoE layer. When set, it replaces MLP with MoE layer. Defaults to None (no MoE). + rotary_interleaved (bool): True is rotate pairs of even and odd dimensions (RoFormer style), False is rotate pairs of first half and second half (LLaMa style). Default to False. init_method (Callable): Method to initialize weights. Note that bias is always set to zero. Should be a function that takes a single Tensor and initializes it. Defaults to megatron.core.utils.init_method_normal(init_method_std) which is torch nn init normal with mean=0.0 and std=init_method_Std. output_layer_init_method (Callable): Method to initialize weights of the output layer of both attention and MLP blocks. Defaults to megatron.core.utils.scaled_init_method_normal(init_method_std) which is torch nn init normal with mean=0.0 and std=init_method_std / math.sqrt(2.0 * num_layers). init_method_std (float): Standard deviation of the zero mean normal for the default initialization method, not used if init_method and output_layer_init_method are provided. Defaults to 0.02. @@ -86,6 +87,7 @@ class TransformerConfig(ModelParallelConfig): gated_linear_unit: bool = False activation_func: Callable = F.gelu num_moe_experts: int = None + rotary_interleaved: bool = False window_size: Optional[Tuple[int, int]] = None # initialization @@ -242,6 +244,8 @@ def __post_init__(self): raise ValueError( "When bias_activation_fusion is True and activation function is gelu, add_bias_linear must also be True." ) + if self.apply_rope_fusion and self.rotary_interleaved: + raise ValueError(f'rotary_interleaved does not work with apply_rope_fusion.') if self.init_method is None: self.init_method = init_method_normal(self.init_method_std) diff --git a/megatron/model/language_model.py b/megatron/model/language_model.py index 69bfa2e801..948d1c3cc5 100644 --- a/megatron/model/language_model.py +++ b/megatron/model/language_model.py @@ -376,9 +376,9 @@ def __init__(self, # Wang and Komatsuzaki et al # https://github.com/kingoflolz/mesh-transformer-jax/ self.rotary_pos_emb = RotaryEmbedding( - rotary_dim, - args.rotary_percent, - seq_len_interpolation_factor=args.rotary_seq_len_interpolation_factor + kv_channels=rotary_dim, + rotary_percent=args.rotary_percent, + seq_len_interpolation_factor=args.rotary_seq_len_interpolation_factor, ) # Encoder (usually set to True, False if part of an encoder-decoder diff --git a/pretrain_gpt.py b/pretrain_gpt.py index 3c978518c0..8c9504e15c 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -62,7 +62,7 @@ def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megat parallel_output=True, share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights, position_embedding_type=args.position_embedding_type, - rotary_percent=args.rotary_percent + rotary_percent=args.rotary_percent, ) else: assert(args.context_parallel_size == 1), "Context parallelism is only supported with Megatron Core!" diff --git a/tests/functional_tests/jet_recipes/MR-gpt.yaml b/tests/functional_tests/jet_recipes/MR-gpt.yaml index 2f615240e0..5a093e6c94 100644 --- a/tests/functional_tests/jet_recipes/MR-gpt.yaml +++ b/tests/functional_tests/jet_recipes/MR-gpt.yaml @@ -51,6 +51,7 @@ products: - {tp_size: [2], pp_size: [2]} - {tp_size: [1], pp_size: [4], vp_size: [1]} - {tp_size: [1], pp_size: [2], extra_args: ['"--position-embedding-type rope"'], args_meta: ["rope_embeddings"]} + - {tp_size: [1], pp_size: [2], extra_args: ['"--position-embedding-type rope --rotary-interleaved --no-rope-fusion"'], args_meta: ["rope_embeddings_interleaved_no_fusion"]} - {tp_size: [1], pp_size: [4], extra_args: ["--swiglu"], args_meta: ["swiglu"]} - {tp_size: [1], pp_size: [4], extra_args: ["--disable-bias-linear"], args_meta: ["disable_bias_linear"]} - {tp_size: [1], pp_size: [4], extra_args: ["--untie-embeddings-and-output-weights"], args_meta: ["untie_embeddings_and_outputs"]} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json new file mode 100644 index 0000000000..345d7fcc5f --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.858, 10.89563, 10.87285, 10.8249, 10.68816, 10.58405, 10.08513, 10.18125, 10.1058, 9.75605]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1864.0, 2004.0, 2086.0, 1978.0, 1975.0, 1889.0, 1656.0, 2059.0, 2227.0, 2306.0]}, "iteration_timing_avg": 0.08140323529411765} \ No newline at end of file From 6fdbfa73cdd2e8cdbf7d4b5a00255ffecb59041c Mon Sep 17 00:00:00 2001 From: Gerald Shen Date: Tue, 6 Feb 2024 15:40:10 -0800 Subject: [PATCH 263/296] fix activation checkpointing mutation --- megatron/core/transformer/attention.py | 2 +- megatron/core/transformer/transformer_block.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index d677003c50..883c2dcb21 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -289,7 +289,7 @@ def forward( # core attention computation # ================================== - if self.checkpoint_core_attention: + if self.checkpoint_core_attention and self.training: core_attn_out = self._checkpointed_attention_forward( query, key, diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index a60351cb25..09f6c1033a 100644 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -353,7 +353,7 @@ def forward( with rng_context and fp8_context: # Forward pass. - if self.config.recompute_granularity == 'full': + if self.config.recompute_granularity == 'full' and self.training: hidden_states = self._checkpointed_forward( hidden_states=hidden_states, attention_mask=attention_mask, From b22634d4f1a8dea88b28aca51798512d733980a9 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Tue, 6 Feb 2024 18:42:48 -0500 Subject: [PATCH 264/296] fix --- megatron/training.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/megatron/training.py b/megatron/training.py index 2a7db889f4..a095a39133 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -956,8 +956,6 @@ def trace_fn(p: torch.profiler.profile): profiler = None num_microbatches = get_num_microbatches() - with contextlib.nullcontext() if profiler is None else profiler: - while iteration < args.train_iters: eval_duration = 0.0 eval_iterations = 0 def track_e2e_metrics(): @@ -981,7 +979,8 @@ def track_e2e_metrics(): 'validation_iterations_time_msecs_avg': validation_iterations_time_msecs_avg }) - while iteration < args.train_iters: + with contextlib.nullcontext() if profiler is None else profiler: + while iteration < args.train_iters: if args.profile and \ iteration == args.profile_step_start and \ torch.distributed.get_rank() in args.profile_ranks: From 21659193c66f7e57ae8e261bee0350142d697cf7 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Tue, 6 Feb 2024 19:17:11 -0500 Subject: [PATCH 265/296] Better wandb --- megatron/training.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/megatron/training.py b/megatron/training.py index a095a39133..e062622c44 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -779,13 +779,14 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, wandb_writer.log({'tokens_per_sec_per_gpu': tokens_per_sec_per_gpu}, iteration) log_string += ' learning rate: {:.3E} |'.format(learning_rate) log_string += ' global batch size: {:5d} |'.format(batch_size) + loss_dict_avg={} for key in total_loss_dict: if key not in [advanced_iters_key, skipped_iters_key, nan_iters_key]: - avg = total_loss_dict[key].item() / \ + loss_dict_avg[key] = total_loss_dict[key].item() / \ float(max(1, total_loss_dict[advanced_iters_key])) - if avg > 0.0: - log_string += ' {}: {:.6E} |'.format(key, avg) + if loss_dict_avg[key] > 0.0: + log_string += ' {}: {:.6E} |'.format(key, loss_dict_avg[key]) total_loss_dict[key] = torch.tensor([0.0], dtype=torch.float, device='cuda') log_string += ' loss scale: {:.1f} |'.format(loss_scale) if grad_norm is not None: @@ -811,6 +812,19 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, report_memory_flag = False timers.log(timers_to_log, normalizer=args.log_interval) + # Weights and biases reporting + if is_last_rank() and wandb_writer is not None: + metrics = { + 'learning_rate': learning_rate, + 'consumed_samples': args.consumed_train_samples, + 'loss_scale': loss_scale, + 'grad_norm': grad_norm, + 'model_tflops': throughput, + 'tokens_per_sec_per_gpu': tokens_per_sec_per_gpu, + **loss_dict_avg + } + wandb_writer.log({"Training":metrics}, step=iteration) + return report_memory_flag From c478f4846b62c8618d5b66382860e026808dddd5 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Tue, 6 Feb 2024 22:19:43 -0500 Subject: [PATCH 266/296] misc --- megatron/training.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/megatron/training.py b/megatron/training.py index e062622c44..55ed93e4d5 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -819,7 +819,7 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, 'consumed_samples': args.consumed_train_samples, 'loss_scale': loss_scale, 'grad_norm': grad_norm, - 'model_tflops': throughput, + 'tflops': throughput, 'tokens_per_sec_per_gpu': tokens_per_sec_per_gpu, **loss_dict_avg } From b6ce19388894d5588e779daa9d288e9e72792b18 Mon Sep 17 00:00:00 2001 From: Zijie Yan Date: Tue, 6 Feb 2024 23:21:41 -0800 Subject: [PATCH 267/296] [MoE] fix the convergence issue when EP>1 and K>1 --- megatron/arguments.py | 4 +- megatron/core/parallel_state.py | 8 ---- megatron/core/transformer/moe/README.md | 8 ++-- megatron/core/transformer/moe/moe_layer.py | 4 +- megatron/core/transformer/moe/router.py | 12 +++-- .../core/transformer/moe/token_dispatcher.py | 44 +++++++------------ .../core/transformer/transformer_config.py | 2 +- ...rallel-top2router_mcore-true_te-false.json | 2 +- .../transformer/moe/test_token_dispatcher.py | 2 - 9 files changed, 30 insertions(+), 56 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index 847b188b8a..d10b4f3020 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -1444,9 +1444,9 @@ def _add_moe_args(parser): group.add_argument('--num-experts', type=int, default=None, help='Number of Experts in MoE (None means no MoE)') group.add_argument('--moe-router-load-balancing-type', type=str, - choices=['aux_loss', 'sinkhorn', None], + choices=['aux_loss', 'sinkhorn', "none"], default='aux_loss', - help='Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "None" implies no load balancing. The default is "aux_loss".') + help='Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "none" implies no load balancing. The default is "aux_loss".') group.add_argument('--moe-router-topk', type=int, default=2, help='Number of experts to route to for each token. The default is 2.') group.add_argument('--moe-grouped-gemm', action='store_true', diff --git a/megatron/core/parallel_state.py b/megatron/core/parallel_state.py index ef62e76969..4307f629d2 100644 --- a/megatron/core/parallel_state.py +++ b/megatron/core/parallel_state.py @@ -687,14 +687,6 @@ def set_pipeline_model_parallel_split_rank(rank): _PIPELINE_MODEL_PARALLEL_SPLIT_RANK = rank -def get_expert_model_parallel_rank(): - """Return my rank for the tensor model parallel group.""" - global _MPU_EXPERT_MODEL_PARALLEL_RANK - if _MPU_EXPERT_MODEL_PARALLEL_RANK is not None: - return _MPU_EXPERT_MODEL_PARALLEL_RANK - return torch.distributed.get_rank(group=get_tensor_and_expert_parallel_group()) - - def get_tensor_model_parallel_rank(): """Return my rank for the tensor model parallel group.""" global _MPU_TENSOR_MODEL_PARALLEL_RANK diff --git a/megatron/core/transformer/moe/README.md b/megatron/core/transformer/moe/README.md index 56cae2f586..8e53c723e5 100644 --- a/megatron/core/transformer/moe/README.md +++ b/megatron/core/transformer/moe/README.md @@ -54,7 +54,7 @@ | num-experts | Number of Experts in MoE (None means no MoE) | | expert-model-parallel-size | Degree of expert model parallelism. | | moe-grouped-gemm | When there are multiple experts per rank, compress multiple local gemms into a single kernel launch to improve the utilization and performance by leveraging the Grouped GEMM feature introduced since CUTLASS 2.8 | -| moe-router-load-balancing-type | Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "None" implies no load balancing. The default is "aux_loss". | +| moe-router-load-balancing-type | Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "none" implies no load balancing. The default is "aux_loss". | | moe-router-topk | Number of experts to route to for each token. The default is 2. | | moe-aux-loss-coeff | Scaling coefficient for the aux loss: a starting value of 1e-2 is recommended. | | moe-z-loss-coeff | Scaling coefficient for the z-loss: a starting value of 1e-3 is recommended. | @@ -69,7 +69,7 @@ To train a top-2 MoE model with an auxiliary loss, include the following argumen --num-experts 8 --expert-model-parallel-size 8 --moe-grouped-gemm ---moe-router-load-balancing-type aux_loss # options: aux_loss, sinkhorn, None. Default is aux_loss. +--moe-router-load-balancing-type aux_loss # options: aux_loss, sinkhorn, none. Default is aux_loss. --moe-router-topk 2 --moe-aux-loss-coeff 1e-2 --use-distributed-optimizer @@ -129,9 +129,11 @@ MODEL_ARGS=( MOE_ARGS=( --num-experts 8 + --expert-model-parallel-size 4 --moe-router-load-balancing-type aux_loss # options: aux_loss, sinkhorn, None. Default is aux_loss. --moe-router-topk 2 --moe-aux-loss-coeff 1e-2 + --moe-grouped-gemm ) DATA_ARGS=( @@ -158,8 +160,8 @@ TRAINING_ARGS=( MODEL_PARALLEL_ARGS=( --tensor-model-parallel-size 4 --pipeline-model-parallel-size 1 - --expert-model-parallel-size 4 --sequence-parallel + --use-distributed-optimizer ) LOGGING_ARGS=( diff --git a/megatron/core/transformer/moe/moe_layer.py b/megatron/core/transformer/moe/moe_layer.py index 42cadb3428..6b10f6c4b0 100644 --- a/megatron/core/transformer/moe/moe_layer.py +++ b/megatron/core/transformer/moe/moe_layer.py @@ -53,9 +53,7 @@ class MoELayer(BaseMoELayer): def __init__(self, config: TransformerConfig, submodules: MLPSubmodules = None): self.submodules = submodules super(MoELayer, self).__init__(config=config) - self.router = TopKRouter( - self.num_local_experts, self.local_expert_indices, config=self.config - ) + self.router = TopKRouter(config=self.config) if self.config.moe_grouped_gemm: self.experts = GroupedMLP(self.num_local_experts, self.config) else: diff --git a/megatron/core/transformer/moe/router.py b/megatron/core/transformer/moe/router.py index c4470fab6c..672565192f 100644 --- a/megatron/core/transformer/moe/router.py +++ b/megatron/core/transformer/moe/router.py @@ -93,14 +93,10 @@ def forward(self, input: torch.Tensor): class TopKRouter(Router): """Route each token to the top-k experts.""" - def __init__( - self, num_local_experts: int, local_expert_indices: List[int], config: TransformerConfig, - ) -> None: + def __init__(self, config: TransformerConfig,) -> None: """Initialize the zero token dropping router. Args: - num_local_experts (int): The number of local experts. - local_expert_indices (List[int]): The indices of the local experts. config (TransformerConfig): The configuration for the transformer model. """ super().__init__(config=config) @@ -236,9 +232,11 @@ def routing(self, logits: torch.Tensor): scores, indices = self.sinkhorn_load_balancing(logits) elif self.routing_type == "aux_loss": scores, indices = self.aux_loss_load_balancing(logits) - elif self.routing_type is None: + elif self.routing_type == "none": # A naive top-k routing without load balancing - top_logits, indices = torch.topk(logits, k=self.k, dim=1) + top_logits, indices = torch.topk(logits, k=self.topk, dim=1) scores = torch.softmax(top_logits, dim=-1, dtype=torch.float32).type_as(logits) + else: + raise ValueError(f"Unsupported MoE routing type: {self.routing_type}") return scores, indices diff --git a/megatron/core/transformer/moe/token_dispatcher.py b/megatron/core/transformer/moe/token_dispatcher.py index e99c40fbde..69bace767e 100644 --- a/megatron/core/transformer/moe/token_dispatcher.py +++ b/megatron/core/transformer/moe/token_dispatcher.py @@ -72,24 +72,6 @@ def __init__( self.router_topk = config.moe_router_topk self.add_bias = config.add_bias_linear - def gather_indices(self, local_indices: torch.Tensor): - """ Gather tensors and concatenate along the first dimension.""" - group = get_tensor_and_expert_parallel_group() - world_size = torch.distributed.get_world_size(group=group) - # Bypass the function if we are using only 1 GPU. - if world_size == 1: - return local_indices - - dim_size = list(local_indices.size()) - dim_size[0] = dim_size[0] * world_size - - # TODO pre allocate memory - output = torch.empty( - dim_size, dtype=local_indices.dtype, device=torch.cuda.current_device() - ) - torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group) - return output - def token_permutation( self, hidden_states: torch.Tensor, max_prob: torch.Tensor, max_ind: torch.Tensor ): @@ -126,21 +108,25 @@ def token_permutation( hidden_states ) with torch.no_grad(): - global_indices = self.gather_indices(max_ind) + global_indices = tensor_parallel.gather_from_sequence_parallel_region_to_moe( + max_ind + ) # Create a mask of mapping between global and local tokens where each # element is True if it's between the local_expert_indices - global_local_map = (global_indices >= self.local_expert_indices[0]) & ( + global_local_mask = (global_indices >= self.local_expert_indices[0]) & ( global_indices <= self.local_expert_indices[-1] ) - local_indices = global_indices.masked_select(global_local_map) - if self.router_topk > 1: # k > 1 - global_probs = self.gather_indices(max_prob) - local_probs = global_probs.masked_select(global_local_map) - else: - local_probs = max_prob - # Reshape global_local_map to be compatible with Tensor.gather - global_local_map = global_local_map.nonzero()[:, 0] - global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1]) + local_indices = global_indices.masked_select(global_local_mask) + + if self.router_topk > 1: # k > 1 + global_probs = tensor_parallel.gather_from_sequence_parallel_region_to_moe(max_prob) + local_probs = global_probs.masked_select(global_local_mask) + else: + local_probs = max_prob + + # Reshape global_local_mask to be compatible with Tensor.gather + global_local_map = global_local_mask.nonzero()[:, 0] + global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1]) local_hidden_states = torch.gather(global_hidden_states, 0, global_local_map) else: if self.router_topk > 1: diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index 8437f4b85c..cba3454a6a 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -57,7 +57,7 @@ class TransformerConfig(ModelParallelConfig): clone_scatter_output_in_embedding (bool): When set to true, clone the output of scatter_to_sequence_parallel_region in embedding layer to facilitate garbage collection of input. normalization (str): Swtich b/w `LayerNorm` and `RMSNorm` as normalization layers. For now, these are primarily used by Transformer-Engine's layers like `LayerNormLinear`. Default value is `LayerNorm`. window_size ((int,int) or None): If not None, then will use sliding window attention. The size of the window is specified by the numbers inside the tuple; -1 is special value meaning "infinite window size". - moe_router_load_balancing_type (str): Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "None" implies no load balancing. The default is "aux_loss". + moe_router_load_balancing_type (str): Determines the load balancing strategy for the router. "aux_loss" corresponds to the load balancing loss used in GShard and SwitchTransformer, "sinkhorn" corresponds to the balancing algorithm used in S-BASE, and "none" implies no load balancing. The default is "aux_loss". moe_router_topk (int): Number of experts to route to for each token. The default is 2. moe_grouped_gemm (bool): When there are multiple experts per rank, compress multiple local (potentially small) gemms in a single kernel launch to improve the utilization and performance by leveraging the Grouped GEMM feature introduced since CUTLASS 2.8 (https://github.com/fanshiqing/grouped_gemm). diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json index c3f6400d8c..761c53aecb 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78922, 10.84167, 10.85605, 10.78017, 10.65475, 10.56494, 10.04887, 10.17872, 10.08664, 9.73742]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [62363.0, 65732.0, 66174.0, 65596.0, 64478.0, 64769.0, 63854.0, 66376.0, 67110.0, 67650.0]}, "iteration_timing_avg": 0.21506794117647057} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78922, 10.8416, 10.85552, 10.77966, 10.65528, 10.56398, 10.04054, 10.17415, 10.08488, 9.73406]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [13541.0, 16797.0, 17213.0, 16564.0, 15382.0, 15817.0, 14915.0, 17089.0, 17939.0, 18387.0]}, "iteration_timing_avg": 0.21506794117647057} \ No newline at end of file diff --git a/tests/unit_tests/transformer/moe/test_token_dispatcher.py b/tests/unit_tests/transformer/moe/test_token_dispatcher.py index 40b49d0d75..ec067a41fb 100644 --- a/tests/unit_tests/transformer/moe/test_token_dispatcher.py +++ b/tests/unit_tests/transformer/moe/test_token_dispatcher.py @@ -27,8 +27,6 @@ def setup_method(self, method): moe_router_topk=2, ) self.router = TopKRouter( - num_local_experts=num_moe_experts, - local_expert_indices=range(num_moe_experts), config=transformer_config, ) self.token_dispatcher = MoEDroplessTokenDispatcher( From 84c7af234d9ba962a1031bba0d3b545b2198eb5c Mon Sep 17 00:00:00 2001 From: WangXi Date: Tue, 26 Dec 2023 16:28:24 +0800 Subject: [PATCH 268/296] Use view() to set param_buffer from grad_buffer Move away from storage(); this helps reduce peak storage --- megatron/optimizer/distrib_optimizer.py | 27 +++---------------------- 1 file changed, 3 insertions(+), 24 deletions(-) diff --git a/megatron/optimizer/distrib_optimizer.py b/megatron/optimizer/distrib_optimizer.py index 16e0742229..9152ba5476 100644 --- a/megatron/optimizer/distrib_optimizer.py +++ b/megatron/optimizer/distrib_optimizer.py @@ -455,32 +455,11 @@ def __init__( self.param_buffers = [] for gbuf_index, grad_buffer in enumerate(self.grad_buffers): size_ratio = torch.finfo(grad_buffer.dtype).bits // torch.finfo(params_dtype).bits + assert size_ratio >= 1, "param_dtype size should be smaller than or equal to grad_dtype size" current_param_buffers = [] for bucket in grad_buffer.buckets: - - # Handle older/newer method for getting untyped storage. - try: - storage = bucket.data.untyped_storage() - except: - try: - storage = bucket.data.storage()._untyped() - except: - storage = bucket.data.storage().untyped() - - # Typed param buffer. - param_buffer = torch.tensor(storage, dtype=params_dtype, device=bucket.data.device) - - # .storage() ignores views / slices, so param_buffer now points to the start - # of the grad_buffer instead of to the start of each bucket. As a result, - # add bucket.offset to make sure param_buffers point to the right region of - # memory. - # Since we want the start of each bucket's param_buffer to coincide with the - # start of the same bucket's grad_buffer (this ensures that zeroing the grad - # buffer does not zero out params in the param_buffer before they are copied - # into the model_params), multiply the offset by the size ratio of grads and - # params. - offset = bucket.offset * size_ratio - param_buffer = param_buffer[offset : offset + bucket.data.numel()] + param_buffer = bucket.data.view(dtype=params_dtype) + param_buffer = param_buffer[:bucket.data.numel()] assert ( param_buffer.data_ptr() == bucket.data.data_ptr() ), "param_buffer and grad_buffer for same bucket should start at the same byte address" From 2fb398cfe34c319ef4ff0b63bb6e860f9ac694d0 Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Wed, 7 Feb 2024 12:03:59 -0800 Subject: [PATCH 269/296] Add missing num_floating_point_operations_so_far argument to save_checkpoint_and_time call --- megatron/training.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/megatron/training.py b/megatron/training.py index f2f0819e49..6402182bee 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -954,7 +954,8 @@ def track_e2e_metrics(): assert get_num_microbatches() > num_microbatches, \ "number of microbatches should be increasing due to batch size rampup" save_checkpoint_and_time(iteration, model, optimizer, - opt_param_scheduler) + opt_param_scheduler, + num_floating_point_operations_so_far) num_microbatches = get_num_microbatches() update_num_microbatches(args.consumed_train_samples, consistency_check=True) From 6e25554133d2887eea8c358c7b77783fb847bf3f Mon Sep 17 00:00:00 2001 From: Ankur Joshi Date: Fri, 9 Feb 2024 04:03:03 -0800 Subject: [PATCH 270/296] Adding back the changes needed in timers.py for E2E work --- megatron/core/timers.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/megatron/core/timers.py b/megatron/core/timers.py index 672a79f531..1447feaea3 100644 --- a/megatron/core/timers.py +++ b/megatron/core/timers.py @@ -67,6 +67,7 @@ def __init__(self, name): """ super().__init__(name) self._elapsed = 0.0 + self._active_time = 0.0 self._started = False # Note that None will default to the global process group self._barrier_group = None @@ -103,12 +104,15 @@ def stop(self, barrier=False): if barrier: torch.distributed.barrier(group=self._barrier_group) torch.cuda.synchronize() - self._elapsed += time.time() - self._start_time + elapsed = time.time() - self._start_time + self._elapsed += elapsed + self._active_time += elapsed self._started = False def reset(self): """Reset timer. """ + # Don't reset _active_time self._elapsed = 0.0 self._started = False @@ -136,6 +140,8 @@ def elapsed(self, reset=True, barrier=False): self.start(barrier=barrier) return _elapsed + def active_time(self): + return self._active_time class Timers: """Class for a group of Timers. From a8182eeea8ed9ef2f9a898822493587d7931b62e Mon Sep 17 00:00:00 2001 From: Selvaraj Anandaraj Date: Fri, 9 Feb 2024 18:31:27 -0800 Subject: [PATCH 271/296] Fixed atomic gemm defaults/fixed the offloading check Signed-off-by: Selvaraj Anandaraj --- megatron/core/model_parallel_config.py | 8 ++++---- megatron/core/transformer/transformer_config.py | 4 +++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/megatron/core/model_parallel_config.py b/megatron/core/model_parallel_config.py index 15995f9ecb..144fa2d0f0 100644 --- a/megatron/core/model_parallel_config.py +++ b/megatron/core/model_parallel_config.py @@ -72,13 +72,13 @@ class ModelParallelConfig: and All-Gather splits. Don't care if tp_comm_overlap is False. Defaults to True. tp_comm_atomic_ag (bool, optional): If true, allows All-Gather overlap with Fprop GEMM by pipelining the GEMM - and All-Gather both done atomically. Don't care if tp_comm_overlap is False. Defaults to True. + and All-Gather both done atomically. Don't care if tp_comm_overlap is False. Defaults to False. tp_comm_split_rs (bool, optional): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the GEMM and Reduce-Scatter splits. Don't care if tp_comm_overlap is False. Defaults to True. tp_comm_atomic_rs (bool, optional): If true, allows Reduce-Scatter overlap with Fprop GEMM by pipelining the - GEMM and Reduce-Scatter both done atomically. Don't care if tp_comm_overlap is False. Defaults to True. + GEMM and Reduce-Scatter both done atomically. Don't care if tp_comm_overlap is False. Defaults to False. tp_comm_bulk_dgrad (bool, optional): If true, allows All-Gather overlap with Bprop activation gradient GEMM. Don't care if tp_comm_overlap is False. Defaults to True. @@ -184,9 +184,9 @@ class ModelParallelConfig: # Debug Options tp_comm_split_ag: bool = True - tp_comm_atomic_ag: bool = True + tp_comm_atomic_ag: bool = False tp_comm_split_rs: bool = True - tp_comm_atomic_rs: bool = True + tp_comm_atomic_rs: bool = False tp_comm_bulk_wgrad: bool = True tp_comm_bulk_dgrad: bool = True diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index cba3454a6a..ce6d38aba8 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -178,7 +178,9 @@ def __post_init__(self): if self.num_moe_experts is not None and self.num_moe_experts <= 0: raise ValueError(f'num_moe_experts must be non-negative.') - if self.cpu_offloading_num_layers < 0 or self.cpu_offloading_num_layers >= self.num_layers: + if self.cpu_offloading and ( + self.cpu_offloading_num_layers < 0 or self.cpu_offloading_num_layers >= self.num_layers + ): raise ValueError( f'CPU offloading can be done only for layers less than {self.num_layers}' ) From daf000673726b7dee40c834f181f76703808b2fc Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Sun, 28 Jan 2024 11:50:17 -0800 Subject: [PATCH 272/296] Put embedding layers in separate buckets to make sure embedding tying works --- megatron/arguments.py | 2 + megatron/core/distributed/grad_buffer.py | 61 ++++++++++--------- .../common/language_module/language_module.py | 5 ++ megatron/model/module.py | 4 ++ 4 files changed, 44 insertions(+), 28 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index d10b4f3020..535190e693 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -173,6 +173,8 @@ def validate_args(args, defaults={}): '--overlap-param-gather only supported with distributed optimizer' assert args.overlap_grad_reduce, \ '--overlap-grad-reduce should be turned on when using --overlap-param-gather' + assert args.use_mcore_models, \ + '--overlap-param-gather only supported with MCore models' # Parameters dtype. args.params_dtype = torch.float diff --git a/megatron/core/distributed/grad_buffer.py b/megatron/core/distributed/grad_buffer.py index e60d40dd80..ebb422140e 100644 --- a/megatron/core/distributed/grad_buffer.py +++ b/megatron/core/distributed/grad_buffer.py @@ -218,6 +218,16 @@ def _pad_if_needed(data_index: int): self.bucket_indices = [] per_bucket_numel_unpadded = [] bucket_id = 0 + + def _create_new_bucket(data_end_index: int): + nonlocal bucket_data_start_index, bucket_params, bucket_id + per_bucket_numel_unpadded.append(data_end_index - bucket_data_start_index) + data_end_index = _pad_if_needed(data_end_index) + self.bucket_indices.append((bucket_data_start_index, data_end_index)) + bucket_data_start_index = data_end_index + bucket_params = set() + bucket_id += 1 + for param in params[::-1]: # Iterate through parameters in reverse order to roughly follow backprop order, # and skip parameters that don't require gradients. @@ -225,6 +235,21 @@ def _pad_if_needed(data_index: int): continue this_numel = param.data.nelement() data_end_index = data_start_index + this_numel + + def _does_param_require_new_bucket(param): + # Split shared embedding parameters into separate bucket if using distributed + # optimizer that makes use of reduce-scatters instead of all-reduces. + # This ensures that the first and last pipeline stage partition optimizer state + # for the shared embedding parameters the same way across DP replicas, allowing + # the DP reduce-scatter to be before the embedding all-reduce. + return getattr(param, "shared_embedding", False) and self.use_distributed_optimizer + + # Create bucket with already collected parameters if current param needs its own bucket. + if _does_param_require_new_bucket(param) and len(bucket_params) > 0: + # We are creating a bucket for the already accumulated parameters, whose params + # end at the current data_start_index. + _create_new_bucket(data_start_index) + self.param_index_map[param] = ( data_start_index, data_end_index, @@ -232,33 +257,18 @@ def _pad_if_needed(data_index: int): ) bucket_params.add(param) - # If we have enough elements already, form a new bucket. - # If bucket_size is None, accumulate everything into a single bucket. - - # TODO: Remove len(bucket_params) > 1 when the final head that transforms token - # representations from hidden space to vocabulary space is in a PyTorch module - # whose forward method is called. If it is not and a bucket contains only this - # one parameter, we get incorrect behavior (i.e., higher losses) since we do not - # call the wait function on the bucket's all_gather_handle (we use forward pre- - # hooks on PyTorch modules to do this when --overlap-param-gather is used). - # As a temporary workaround, we make sure that no bucket has only one parameter. - if bucket_size is not None: - if (data_end_index - bucket_data_start_index) >= bucket_size and len( - bucket_params - ) > 1: - per_bucket_numel_unpadded.append(data_end_index - bucket_data_start_index) - data_end_index = _pad_if_needed(data_end_index) - self.bucket_indices.append((bucket_data_start_index, data_end_index)) - bucket_data_start_index = data_end_index - bucket_params = set() - bucket_id += 1 + # If we have enough elements already or the current param is part of the shared embedding + # layer and needs a separate bucket, form a new bucket. + if ( + bucket_size is not None + and (data_end_index - bucket_data_start_index) >= bucket_size + ) or _does_param_require_new_bucket(param): + _create_new_bucket(data_end_index) data_start_index = data_end_index # Add remaining params to a new bucket. if len(bucket_params) > 0: - per_bucket_numel_unpadded.append(data_end_index - bucket_data_start_index) - data_end_index = _pad_if_needed(data_end_index) - self.bucket_indices.append((bucket_data_start_index, data_end_index)) + _create_new_bucket(data_end_index) # Next, create underlying storage for buffer (with numel elements that includes # padding as necessary). @@ -305,11 +315,6 @@ def _pad_if_needed(data_index: int): bucket_id=cur_bucket_id, ) - if not overlap_grad_reduce: - assert len(bucket_params) == len( - params - ), 'All params should be in one bucket when overlap_grad_reduce is False' - # Log buckets for all PP stages. if ( parallel_state.get_data_parallel_rank(with_context_parallel=True) == 0 diff --git a/megatron/core/models/common/language_module/language_module.py b/megatron/core/models/common/language_module/language_module.py index 3883b7acd1..1e8b510824 100644 --- a/megatron/core/models/common/language_module/language_module.py +++ b/megatron/core/models/common/language_module/language_module.py @@ -53,12 +53,17 @@ def initialize_last_stage_with_word_embeddings(self) -> None: self.shared_embedding_or_output_weight().zero_out_wgrad = True return + if self.pre_process and not self.post_process: + assert parallel_state.is_pipeline_first_stage() + self.shared_embedding_or_output_weight().shared_embedding = True + if self.post_process and not self.pre_process: assert not parallel_state.is_pipeline_first_stage() # set word_embeddings weights to 0 here, then copy first # stage's weights using all_reduce below. self.output_layer.weight.data.fill_(0) self.output_layer.weight.shared = True + self.output_layer.weight.shared_embedding = True # Parameters are shared between the word embeddings layers, and the # heads at the end of the model. In a pipelined setup with more than diff --git a/megatron/model/module.py b/megatron/model/module.py index dfd01f5667..1741d4b850 100644 --- a/megatron/model/module.py +++ b/megatron/model/module.py @@ -63,6 +63,9 @@ def initialize_word_embeddings(self): self.shared_embedding_or_output_weight().zero_out_wgrad = True return + if mpu.is_pipeline_first_stage() and self.pre_process and not self.post_process: + self.shared_embedding_or_output_weight().shared_embedding = True + # Parameters are shared between the word embeddings layers, and the # heads at the end of the model. In a pipelined setup with more than # one stage, the initial embedding layer and the head are on different @@ -85,6 +88,7 @@ def initialize_word_embeddings(self): config=self.config, init_method=self.config.init_method) self.word_embeddings.weight.data.fill_(0) self.word_embeddings.weight.shared = True + self.word_embeddings.weight.shared_embedding = True # Zero out initial weights for decoder embedding. # NOTE: We don't currently support T5 with the interleaved schedule. From a73b1139c627858ff90ac3005f2e9a2763b2f3ce Mon Sep 17 00:00:00 2001 From: Ankur Joshi Date: Sun, 11 Feb 2024 20:29:48 -0800 Subject: [PATCH 273/296] Ran black(19.10b0) on megatron/core --- megatron/core/timers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/megatron/core/timers.py b/megatron/core/timers.py index 1447feaea3..b61eb4ed22 100644 --- a/megatron/core/timers.py +++ b/megatron/core/timers.py @@ -143,6 +143,7 @@ def elapsed(self, reset=True, barrier=False): def active_time(self): return self._active_time + class Timers: """Class for a group of Timers. """ From 2482a4ae38f0ff88004283f7edeb196c159b16f1 Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Fri, 9 Feb 2024 13:10:02 -0800 Subject: [PATCH 274/296] Use MCore for distributed optimizer tests --- tests/functional_tests/jet_recipes/MR-gpt.yaml | 11 ++++++----- ...se-distributed-optimizer_mcore-false_te-false.json | 1 - ...pp-1_args-dist-optimizer_mcore-false_te-false.json | 1 - ..._pp-1_args-dist-optimizer_mcore-true_te-false.json | 1 + ...overlap-grad-reduce_mcore-false_te-false_vp-1.json | 1 - ...reduce-param-gather_mcore-false_te-false_vp-1.json | 1 - ...-reduce-param-gather_mcore-true_te-false_vp-1.json | 1 + ...p-grad-reduce-untied_mcore-true_te-false_vp-1.json | 1 + ...overlap-grad-reduce_mcore-false_te-false_vp-1.json | 1 - ...-overlap-grad-reduce_mcore-true_te-false_vp-1.json | 1 + ...izer-overlap-grad-reduce_mcore-false_te-false.json | 1 - ...grad-reduce-param-gather_mcore-false_te-false.json | 1 - ...-grad-reduce-param-gather_mcore-true_te-false.json | 1 + ...izer-overlap-grad-reduce_mcore-false_te-false.json | 1 - ...mizer-overlap-grad-reduce_mcore-true_te-false.json | 1 + 15 files changed, 12 insertions(+), 13 deletions(-) delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer_mcore-false_te-false.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json diff --git a/tests/functional_tests/jet_recipes/MR-gpt.yaml b/tests/functional_tests/jet_recipes/MR-gpt.yaml index 5a093e6c94..4c03391c57 100644 --- a/tests/functional_tests/jet_recipes/MR-gpt.yaml +++ b/tests/functional_tests/jet_recipes/MR-gpt.yaml @@ -61,14 +61,15 @@ products: - {tp_size: [2], pp_size: [1], extra_args: ['"--sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_8experts2parallel"]} - {tp_size: [2], pp_size: [1], extra_args: ['"--moe-grouped-gemm --disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], moe_grouped_gemm: [1], args_meta: ["te_8experts2parallel_groupedGEMM"]} - {tp_size: [2], pp_size: [1], extra_args: ['"--disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type aux_loss --moe-router-topk 2 --moe-aux-loss-coeff 1e-2"'], moe_grouped_gemm: [1], args_meta: ["te_8experts2parallel_top2router"]} + - {tp_size: [1], pp_size: [1], extra_args: ["--use-distributed-optimizer"], args_meta: ["dist_optimizer"]} + - {tp_size: [4], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"'], args_meta: ["dist_optimizer_overlap_grad_reduce"]} + - {tp_size: [4], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather"'], args_meta: ["dist_optimizer_overlap_grad_reduce_param_gather"]} + - {tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"'], args_meta: ["dist_optimizer_overlap_grad_reduce"]} + - {tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --untie-embeddings-and-output-weights"'], args_meta: ["dist_optimizer_overlap_grad_reduce_untied"]} + - {tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather"'], args_meta: ["dist_optimizer_overlap_grad_reduce_param_gather"]} # Non-MCore - {use_mcore: [False], use_te: [False, True], tp_size: [2], pp_size: [2]} - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [1]} - - {use_mcore: [False], tp_size: [1], pp_size: [1], extra_args: ["--use-distributed-optimizer"], args_meta: ["dist_optimizer"]} - - {use_mcore: [False], tp_size: [4], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"'], args_meta: ["dist_optimizer_overlap_grad_reduce"]} - - {use_mcore: [False], tp_size: [4], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather"'], args_meta: ["dist_optimizer_overlap_grad_reduce_param_gather"]} - - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"'], args_meta: ["dist_optimizer_overlap_grad_reduce"]} - - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather"'], args_meta: ["dist_optimizer_overlap_grad_reduce_param_gather"]} key_segments: vp_size: vp use_mcore: mcore diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer_mcore-false_te-false.json deleted file mode 100644 index 6db1c6fba9..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer_mcore-false_te-false.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83801, 10.8696, 10.87494, 10.85972, 10.85916, 10.81678, 10.65633, 10.6236, 10.52854, 10.29768]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1227.0, 1343.0, 1547.0, 1357.0, 1571.0, 1230.0, 1219.0]}, "iteration_timing_avg": 0.038630588235294125} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json deleted file mode 100644 index 2b13d0e4e2..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83801, 10.8696, 10.87494, 10.85972, 10.85916, 10.81678, 10.65633, 10.6236, 10.52854, 10.29768]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1227.0, 1343.0, 1547.0, 1357.0, 1571.0, 1230.0, 1219.0]}, "iteration_timing_avg": 0.04080235294117647} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json new file mode 100644 index 0000000000..8abb3869de --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.89293, 10.89952, 10.87875, 10.85504, 10.73491, 10.63533, 10.15658, 10.2421, 10.15573, 9.82116]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1608.0, 1717.0, 1868.0, 1920.0, 1891.0, 1766.0, 1630.0, 1955.0, 2416.0, 2390.0]}, "iteration_timing_avg": 0.04569411764705883} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json deleted file mode 100644 index d2758ca67b..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80629, 10.6169, 10.59573, 10.50423, 10.22237]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2381.0, 2498.0, 2552.0, 2166.0, 2258.0, 2542.0, 2425.0]}, "iteration_timing_avg": 0.07675470588235295} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json deleted file mode 100644 index 7dd1291c75..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80629, 10.6169, 10.59573, 10.50423, 10.22237]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2381.0, 2498.0, 2552.0, 2166.0, 2258.0, 2542.0, 2425.0]}, "iteration_timing_avg": 0.08087911764705882} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..23a753821c --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88916, 10.82637, 10.70814, 10.61007, 10.11963, 10.22997, 10.15772, 9.83339]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1801.0, 1862.0, 1827.0, 1711.0, 1708.0, 1954.0, 2328.0, 2335.0]}, "iteration_timing_avg": 0.09368529411764706} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..4113dfc61d --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.92853, 10.937, 10.92943, 10.87789, 10.75133, 10.67044, 10.17418, 10.27899, 10.1883, 9.87023]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22727964.0, 23020600.0, 22500812.0, 22830580.0, 22739790.0, 22548252.0, 22955676.0, 22589500.0, 22659010.0, 22884684.0]}, "iteration_timing_avg": 0.085995} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json deleted file mode 100644 index a2df49d42a..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80629, 10.6169, 10.59573, 10.50423, 10.22237]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2381.0, 2498.0, 2552.0, 2166.0, 2258.0, 2542.0, 2425.0]}, "iteration_timing_avg": 0.07611323529411766} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..262b2c579e --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88916, 10.82637, 10.70814, 10.61007, 10.11963, 10.22997, 10.15772, 9.83339]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1801.0, 1862.0, 1827.0, 1711.0, 1708.0, 1954.0, 2328.0, 2335.0]}, "iteration_timing_avg": 0.08397176470588234} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false.json deleted file mode 100644 index 4d473a5e7e..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--use-distributed-optimizer-overlap-grad-reduce_mcore-false_te-false.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84879, 10.88953, 10.88082, 10.88243, 10.86947, 10.83539, 10.64785, 10.63863, 10.52242, 10.23812]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2301.0, 2328.0, 2414.0, 1973.0, 2168.0, 2471.0, 2419.0]}, "iteration_timing_avg": 0.120935} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json deleted file mode 100644 index ba026bbe85..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84879, 10.88953, 10.88082, 10.88243, 10.86947, 10.83539, 10.64785, 10.63863, 10.52242, 10.23812]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2301.0, 2328.0, 2414.0, 1973.0, 2168.0, 2471.0, 2419.0]}, "iteration_timing_avg": 0.1338870588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json new file mode 100644 index 0000000000..baf2c64a93 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84429, 10.86285, 10.86177, 10.81156, 10.69316, 10.61799, 10.16498, 10.25035, 10.15231, 9.83669]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1932.0, 2302.0, 2138.0, 2193.0, 2254.0, 2189.0, 1844.0, 2313.0, 2538.0, 2473.0]}, "iteration_timing_avg": 0.16636205882352936} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json deleted file mode 100644 index 8b9cb738c6..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84879, 10.88953, 10.88082, 10.88243, 10.86947, 10.83539, 10.64785, 10.63863, 10.52242, 10.23812]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2301.0, 2328.0, 2414.0, 1973.0, 2168.0, 2471.0, 2419.0]}, "iteration_timing_avg": 0.13206588235294117} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json new file mode 100644 index 0000000000..5db54e4e03 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84429, 10.86285, 10.86177, 10.81156, 10.69316, 10.61799, 10.16498, 10.25035, 10.15231, 9.83669]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1932.0, 2302.0, 2138.0, 2193.0, 2254.0, 2189.0, 1844.0, 2313.0, 2538.0, 2473.0]}, "iteration_timing_avg": 0.1574994117647059} \ No newline at end of file From 9e17a15e4416dd314d96eb9cb3d7b453f2cf449b Mon Sep 17 00:00:00 2001 From: Lawrence McAfee Date: Tue, 13 Feb 2024 11:03:37 -0800 Subject: [PATCH 275/296] Condition TE init_method on config.perform_initialization. --- .../transformer/custom_layers/transformer_engine.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/megatron/core/transformer/custom_layers/transformer_engine.py b/megatron/core/transformer/custom_layers/transformer_engine.py index f4b0c78ddb..eb4b917227 100644 --- a/megatron/core/transformer/custom_layers/transformer_engine.py +++ b/megatron/core/transformer/custom_layers/transformer_engine.py @@ -35,6 +35,10 @@ def _get_extra_te_kwargs(config: TransformerConfig): return extra_transformer_engine_kwargs +def condition_init_method(config, init_method): + return init_method if config.perform_initialization else (lambda w: None) + + class TENorm: """ A conditional wrapper to initialize an instance of Transformer-Engine's @@ -129,7 +133,7 @@ def __init__( tp_group=get_tensor_model_parallel_group(check_initialized=False), tp_size=self.config.tensor_model_parallel_size, get_rng_state_tracker=get_cuda_rng_tracker, - init_method=init_method, + init_method=condition_init_method(config, init_method), bias=bias, return_bias=self.te_return_bias, parallel_mode=parallel_mode, @@ -220,7 +224,7 @@ def __init__( tp_group=get_tensor_model_parallel_group(check_initialized=False), tp_size=self.config.tensor_model_parallel_size, get_rng_state_tracker=get_cuda_rng_tracker, - init_method=init_method, + init_method=condition_init_method(config, init_method), bias=bias, return_bias=self.te_return_bias, parallel_mode="column", @@ -279,7 +283,7 @@ def __init__( output_size=output_size, parallel_mode="column", config=config, - init_method=init_method, + init_method=condition_init_method(config, init_method), bias=bias, skip_bias_add=skip_bias_add, skip_weight_param_allocation=skip_weight_param_allocation, @@ -326,7 +330,7 @@ def __init__( output_size=output_size, parallel_mode="row", config=config, - init_method=init_method, + init_method=condition_init_method(config, init_method), bias=bias, skip_bias_add=skip_bias_add, skip_weight_param_allocation=False, # We don't currently use this for row parallel layers From 32f91553f6b28bc0c44a9267cd72c0795a73813f Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Tue, 13 Feb 2024 14:41:09 -0800 Subject: [PATCH 276/296] Move optimizers to MCore --- megatron/arguments.py | 4 +- megatron/core/distributed/__init__.py | 3 + .../distributed/distributed_data_parallel.py | 2 +- .../core/distributed/finalize_model_grads.py | 2 +- megatron/core/distributed/grad_buffer.py | 2 +- megatron/core/model_parallel_config.py | 5 +- megatron/{ => core}/optimizer/__init__.py | 84 ++++++------- megatron/{ => core}/optimizer/clip_grads.py | 11 +- .../{ => core}/optimizer/distrib_optimizer.py | 78 +++++++----- megatron/{ => core}/optimizer/grad_scaler.py | 5 +- megatron/{ => core}/optimizer/optimizer.py | 33 +++-- megatron/core/optimizer/optimizer_config.py | 116 ++++++++++++++++++ megatron/optimizer/utils.py | 19 --- megatron/training.py | 10 +- 14 files changed, 246 insertions(+), 128 deletions(-) rename megatron/{ => core}/optimizer/__init__.py (76%) rename megatron/{ => core}/optimizer/clip_grads.py (96%) rename megatron/{ => core}/optimizer/distrib_optimizer.py (95%) rename megatron/{ => core}/optimizer/grad_scaler.py (97%) rename megatron/{ => core}/optimizer/optimizer.py (97%) create mode 100644 megatron/core/optimizer/optimizer_config.py delete mode 100644 megatron/optimizer/utils.py diff --git a/megatron/arguments.py b/megatron/arguments.py index d10b4f3020..aa4ea33254 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -1006,7 +1006,7 @@ def _add_learning_rate_args(parser): group.add_argument('--lr', type=float, default=None, help='Initial learning rate. Depending on decay style ' - 'and initial warmup, the learing rate at each ' + 'and initial warmup, the learning rate at each ' 'iteration would be different.') group.add_argument('--lr-decay-style', type=str, default='linear', choices=['constant', 'linear', 'cosine', 'inverse-square-root'], @@ -1101,7 +1101,7 @@ def _add_mixed_precision_args(parser): group.add_argument('--initial-loss-scale', type=float, default=2**32, help='Initial loss-scale for dynamic loss scaling.') group.add_argument('--min-loss-scale', type=float, default=1.0, - help='Minimum loss scale for dynamic loss scale.') + help='Minimum loss scale for dynamic loss scaling.') group.add_argument('--loss-scale-window', type=float, default=1000, help='Window over which to raise/lower dynamic scale.') group.add_argument('--hysteresis', type=int, default=2, diff --git a/megatron/core/distributed/__init__.py b/megatron/core/distributed/__init__.py index 34c7209a27..328c3101eb 100644 --- a/megatron/core/distributed/__init__.py +++ b/megatron/core/distributed/__init__.py @@ -1,2 +1,5 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + from .distributed_data_parallel import DistributedDataParallel from .finalize_model_grads import finalize_model_grads +from .grad_buffer import shard_buffer diff --git a/megatron/core/distributed/distributed_data_parallel.py b/megatron/core/distributed/distributed_data_parallel.py index e09564b396..c1d9dc11c0 100644 --- a/megatron/core/distributed/distributed_data_parallel.py +++ b/megatron/core/distributed/distributed_data_parallel.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. from contextlib import contextmanager from typing import Dict diff --git a/megatron/core/distributed/finalize_model_grads.py b/megatron/core/distributed/finalize_model_grads.py index 632ef49e3a..587a59e247 100644 --- a/megatron/core/distributed/finalize_model_grads.py +++ b/megatron/core/distributed/finalize_model_grads.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. from typing import List diff --git a/megatron/core/distributed/grad_buffer.py b/megatron/core/distributed/grad_buffer.py index e60d40dd80..14ae2191ea 100644 --- a/megatron/core/distributed/grad_buffer.py +++ b/megatron/core/distributed/grad_buffer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. import math from logging import getLogger diff --git a/megatron/core/model_parallel_config.py b/megatron/core/model_parallel_config.py index 15995f9ecb..4a34c79d13 100644 --- a/megatron/core/model_parallel_config.py +++ b/megatron/core/model_parallel_config.py @@ -48,9 +48,10 @@ class ModelParallelConfig: bf16 (bool): If true, train with bf16 mixed precision training. Defaults to False. - params_dtype (torch.dtype): dtype used when intializing the weights. Defaults to torch.float32 + params_dtype (torch.dtype): dtype used when intializing the weights. Defaults to torch.float32. + + timers (optional, default=None): TODO. - timers (optional, default=None): TODO Optimizations ------------- diff --git a/megatron/optimizer/__init__.py b/megatron/core/optimizer/__init__.py similarity index 76% rename from megatron/optimizer/__init__.py rename to megatron/core/optimizer/__init__.py index 395485bf00..a8fb749bd3 100644 --- a/megatron/optimizer/__init__.py +++ b/megatron/core/optimizer/__init__.py @@ -1,24 +1,19 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. from apex.optimizers import FusedAdam as Adam from apex.optimizers import FusedSGD as SGD -from megatron import get_args - from .distrib_optimizer import DistributedOptimizer from .grad_scaler import ConstantGradScaler, DynamicGradScaler -from .optimizer import ( - Float16OptimizerWithFloat16Params, - FP32Optimizer, - ChainedOptimizer, -) +from .optimizer import ChainedOptimizer, Float16OptimizerWithFloat16Params, FP32Optimizer +from .optimizer_config import OptimizerConfig def get_param_groups(model_chunks, no_weight_decay_cond, scale_lr_cond, lr_mult): """Create parameter groups for optimizer. Creates parameter groups based on weight decay condition (regularized vs - non regularized), learning rate scale condition (args.lr vs lr_mult * args.lr), + non regularized), learning rate scale condition (lr vs lr_mult * lr), and whether it is expert parameters. scale_lr_cond is used during finetuning where head of the network requires a scaled version of the base learning rate. @@ -89,7 +84,7 @@ def get_param_groups(model_chunks, no_weight_decay_cond, scale_lr_cond, lr_mult) return param_groups -def get_megatron_optimizer_based_on_param_groups(param_groups, grad_buffers=None): +def get_megatron_optimizer_based_on_param_groups(config, param_groups, grad_buffers=None): """Get megatron optimizer based on parameter groups. For distributed optimizer, we need the parameter gradients to be stored in a @@ -99,22 +94,23 @@ def get_megatron_optimizer_based_on_param_groups(param_groups, grad_buffers=None param_groups (list): list of parameter groups. grad_buffers (list, optional): list of gradient buffers. Defaults to None. """ - args = get_args() - - if args.optimizer == 'adam': + if config.optimizer == 'adam': optimizer = Adam( param_groups, - lr=args.lr, - weight_decay=args.weight_decay, - betas=(args.adam_beta1, args.adam_beta2), - eps=args.adam_eps, + lr=config.lr, + weight_decay=config.weight_decay, + betas=(config.adam_beta1, config.adam_beta2), + eps=config.adam_eps, ) - elif args.optimizer == 'sgd': + elif config.optimizer == 'sgd': optimizer = SGD( - param_groups, lr=args.lr, weight_decay=args.weight_decay, momentum=args.sgd_momentum + param_groups, + lr=config.lr, + weight_decay=config.weight_decay, + momentum=config.sgd_momentum, ) else: - raise Exception('{} optimizer is not supported.'.format(args.optimizer)) + raise Exception('{} optimizer is not supported.'.format(config.optimizer)) # Determine whether the params have main-grad field. params_have_main_grad = True @@ -122,7 +118,7 @@ def get_megatron_optimizer_based_on_param_groups(param_groups, grad_buffers=None # If it is expert parameters, we do not use the distributed optimizer. # TODO: enable support for distributed optimizer with expert parameters # (need to support DistOpt across process group with size dp_size / ep_size). - use_distributed_optimizer = args.use_distributed_optimizer and not any( + use_distributed_optimizer = config.use_distributed_optimizer and not any( [pg['is_expert_parallel'] for pg in param_groups] ) @@ -130,7 +126,7 @@ def get_megatron_optimizer_based_on_param_groups(param_groups, grad_buffers=None # - Note: both the Float16Optimizer and the DistributedOptimizer inherit # from the MixedPrecisionOptimizer, which manages any optimizer where # the model params and main params are distinct. - if args.fp16 or args.bf16 or use_distributed_optimizer: + if config.fp16 or config.bf16 or use_distributed_optimizer: # Grad scaler: # if loss-scale is provided, instantiate the constant scaler. @@ -141,34 +137,36 @@ def get_megatron_optimizer_based_on_param_groups(param_groups, grad_buffers=None grad_scaler = None # Constant loss scale. - if args.loss_scale: - grad_scaler = ConstantGradScaler(args.loss_scale) + if config.loss_scale: + grad_scaler = ConstantGradScaler(config.loss_scale) # Dynamic loss scale. else: - if args.fp16: + if config.fp16: grad_scaler = DynamicGradScaler( - initial_scale=args.initial_loss_scale, - min_scale=args.min_loss_scale, + initial_scale=config.initial_loss_scale, + min_scale=config.min_loss_scale, growth_factor=2.0, backoff_factor=0.5, - growth_interval=args.loss_scale_window, - hysteresis=args.hysteresis, + growth_interval=config.loss_scale_window, + hysteresis=config.hysteresis, ) optimizer_args = [ optimizer, - args.clip_grad, - args.log_num_zeros_in_grad, - args.check_for_nan_in_loss_and_grad, + config.clip_grad, + config.log_num_zeros_in_grad, + config.check_for_nan_in_loss_and_grad, params_have_main_grad, - args.fp16, - args.bf16, - args.params_dtype, + config.fp16, + config.bf16, + config.params_dtype, grad_scaler, ] if use_distributed_optimizer: - optimizer = DistributedOptimizer(*optimizer_args, grad_buffers) + optimizer = DistributedOptimizer( + *optimizer_args, grad_buffers, config.overlap_param_gather + ) else: optimizer = Float16OptimizerWithFloat16Params(*optimizer_args) @@ -177,15 +175,15 @@ def get_megatron_optimizer_based_on_param_groups(param_groups, grad_buffers=None # FP32. return FP32Optimizer( optimizer, - args.clip_grad, - args.log_num_zeros_in_grad, - args.check_for_nan_in_loss_and_grad, + config.clip_grad, + config.log_num_zeros_in_grad, + config.check_for_nan_in_loss_and_grad, params_have_main_grad, ) def get_megatron_optimizer( - model_chunks, no_weight_decay_cond=None, scale_lr_cond=None, lr_mult=1.0 + config, model_chunks, no_weight_decay_cond=None, scale_lr_cond=None, lr_mult=1.0 ): """Retrieve the Megatron optimizer for model chunks. @@ -215,10 +213,12 @@ def get_megatron_optimizer( # Create optimizers. optimizers = [ - get_megatron_optimizer_based_on_param_groups(dense_param_groups, per_model_grad_buffers) + get_megatron_optimizer_based_on_param_groups( + config, dense_param_groups, per_model_grad_buffers + ) ] if len(moe_param_groups): - optimizers.append(get_megatron_optimizer_based_on_param_groups(moe_param_groups)) + optimizers.append(get_megatron_optimizer_based_on_param_groups(config, moe_param_groups)) if len(optimizers) == 1: return optimizers[0] diff --git a/megatron/optimizer/clip_grads.py b/megatron/core/optimizer/clip_grads.py similarity index 96% rename from megatron/optimizer/clip_grads.py rename to megatron/core/optimizer/clip_grads.py index 904502e3dc..4ad2445a89 100644 --- a/megatron/optimizer/clip_grads.py +++ b/megatron/core/optimizer/clip_grads.py @@ -1,17 +1,16 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. """Gradient clipping.""" import os +import amp_C import torch -from torch import inf - from apex.multi_tensor_apply import multi_tensor_applier -import amp_C +from torch import inf -from megatron.model.module import param_is_not_shared -from megatron.core.tensor_parallel import param_is_not_tensor_parallel_duplicate +from ..tensor_parallel import param_is_not_tensor_parallel_duplicate +from ..transformer.module import param_is_not_shared def clip_grad_norm_fp32( diff --git a/megatron/optimizer/distrib_optimizer.py b/megatron/core/optimizer/distrib_optimizer.py similarity index 95% rename from megatron/optimizer/distrib_optimizer.py rename to megatron/core/optimizer/distrib_optimizer.py index 9152ba5476..3e5943c0b1 100644 --- a/megatron/optimizer/distrib_optimizer.py +++ b/megatron/core/optimizer/distrib_optimizer.py @@ -1,20 +1,19 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. """Megatron distributed optimizer.""" -from apex.optimizers import FusedAdam as Adam -import math -import torch import itertools +from logging import getLogger -from megatron import get_args -from megatron import get_timers -from megatron import print_rank_0 -from megatron.core import mpu, tensor_parallel +import torch +from apex.optimizers import FusedAdam as Adam +from .. import parallel_state, tensor_parallel +from ..distributed import shard_buffer from .optimizer import MixedPrecisionOptimizer, _zero_grad_group_helper -from .utils import shard_buffer + +logger = getLogger(__name__) class Range: @@ -141,8 +140,10 @@ def build_model_gbuf_range(cls, grad_buffer, bucket_index): reduce-scatter and all-gather. """ - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) - data_parallel_world_size = mpu.get_data_parallel_world_size(with_context_parallel=True) + data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) + data_parallel_world_size = parallel_state.get_data_parallel_world_size( + with_context_parallel=True + ) bucket = grad_buffer.buckets[bucket_index] bucket_buffer = bucket.data @@ -382,6 +383,7 @@ def __init__( params_dtype, grad_scaler, per_model_grad_buffers, + overlap_param_gather, ): """ See top of class definition for argument descriptions. @@ -455,11 +457,13 @@ def __init__( self.param_buffers = [] for gbuf_index, grad_buffer in enumerate(self.grad_buffers): size_ratio = torch.finfo(grad_buffer.dtype).bits // torch.finfo(params_dtype).bits - assert size_ratio >= 1, "param_dtype size should be smaller than or equal to grad_dtype size" + assert ( + size_ratio >= 1 + ), "param_dtype size should be smaller than or equal to grad_dtype size" current_param_buffers = [] for bucket in grad_buffer.buckets: param_buffer = bucket.data.view(dtype=params_dtype) - param_buffer = param_buffer[:bucket.data.numel()] + param_buffer = param_buffer[: bucket.data.numel()] assert ( param_buffer.data_ptr() == bucket.data.data_ptr() ), "param_buffer and grad_buffer for same bucket should start at the same byte address" @@ -498,7 +502,7 @@ def __init__( self.param_buffer_copied.append(False) self.num_all_gather_handles = len(self.all_gather_handle_index_to_bucket_index_map) - self.overlap_param_gather = get_args().overlap_param_gather + self.overlap_param_gather = overlap_param_gather self.remove_pre_hook_handle = None if self.overlap_param_gather: self.enable_pre_hook() @@ -644,14 +648,14 @@ def load_state_dict(self, state_dict): # Grad scaler. if 'grad_scaler' not in state_dict: if self.fp16: - print_rank_0( + logger.info( '***WARNING*** found an old checkpoint, will not ' 'load grad scaler ...' ) else: if self.grad_scaler: self.grad_scaler.load_state_dict(state_dict['grad_scaler']) else: - print_rank_0( + logger.info( '***WARNING*** fould the grad scaler in the ' 'checkpoint but it is None in the class. ' 'Skipping loading grad scaler ...' @@ -669,10 +673,14 @@ def get_parameter_state(self): """ # Data parallelism variables. - data_parallel_world_size = mpu.get_data_parallel_world_size(with_context_parallel=True) - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) - data_parallel_group_gloo = mpu.get_data_parallel_group_gloo(with_context_parallel=True) - data_parallel_global_ranks = list(mpu._DATA_PARALLEL_GLOBAL_RANKS_WITH_CP) + data_parallel_world_size = parallel_state.get_data_parallel_world_size( + with_context_parallel=True + ) + data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) + data_parallel_group_gloo = parallel_state.get_data_parallel_group_gloo( + with_context_parallel=True + ) + data_parallel_global_ranks = list(parallel_state._DATA_PARALLEL_GLOBAL_RANKS_WITH_CP) # Collect param states. state = { @@ -757,7 +765,7 @@ def save_parameter_state(self, filename): filename (str): path to save parameter state to. """ - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) + data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) state_dict = self.get_parameter_state() if data_parallel_rank == 0: torch.save(state_dict, filename) @@ -774,10 +782,14 @@ def load_parameter_state_from_state_dict(self, state_dict): """ # Data parallelism variables. - data_parallel_world_size = mpu.get_data_parallel_world_size(with_context_parallel=True) - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) - data_parallel_group_gloo = mpu.get_data_parallel_group_gloo(with_context_parallel=True) - data_parallel_global_ranks = list(mpu._DATA_PARALLEL_GLOBAL_RANKS_WITH_CP) + data_parallel_world_size = parallel_state.get_data_parallel_world_size( + with_context_parallel=True + ) + data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) + data_parallel_group_gloo = parallel_state.get_data_parallel_group_gloo( + with_context_parallel=True + ) + data_parallel_global_ranks = list(parallel_state._DATA_PARALLEL_GLOBAL_RANKS_WITH_CP) # Scatter tensors to all DP ranks. for gbuf_idx, gbuf_range_maps in enumerate(self.gbuf_ranges): @@ -827,7 +839,7 @@ def load_parameter_state_from_state_dict(self, state_dict): ) if world_tensor.numel() > numel: # Truncate extra values, which are padding anyway. - print_rank_0( + logger.info( f"Truncating extra values from checkpoint (numel_in_checkpoint={numel_in_checkpoint}, " f"numel={numel}, numel_unpadded={numel_unpadded})" ) @@ -835,7 +847,7 @@ def load_parameter_state_from_state_dict(self, state_dict): elif world_tensor.numel() < numel: # In this case, numel > world_tensor.numel() (which is numel_in_checkpoint). # Create new tensor with right number of values, then copy and use new tensor. - print_rank_0( + logger.info( f"Expanding tensor from checkpoint (numel_in_checkpoint={numel_in_checkpoint}, " f"numel={numel}, numel_unpadded={numel_unpadded})" ) @@ -847,7 +859,7 @@ def load_parameter_state_from_state_dict(self, state_dict): world_tensor_reshaped[:numel_in_checkpoint].copy_(world_tensor) world_tensor = world_tensor_reshaped else: - print_rank_0( + logger.info( "***WARNING*** Using older checkpoint so skipping padding checks" ) gbuf_start_idxs = list(range(0, gbuf_world_numel, gbuf_local_numel)) @@ -893,7 +905,7 @@ def load_parameter_state(self, filename): filename (str): path to load parameter state from. """ - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) + data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) state_dict = None if data_parallel_rank == 0: state_dict = torch.load(filename) @@ -964,7 +976,9 @@ def get_model_param_buffer_dp_views(self): view_items_per_model_chunk = [] dtype = self.grad_buffers[gbuf_index].dtype for bucket_index, buf in enumerate(buffers): - buf_views = shard_buffer(buf) + buf_views = shard_buffer( + buf, parallel_state.get_data_parallel_world_size(with_context_parallel=True) + ) view_items_per_model_chunk.insert( 0, (gbuf_index, dtype, bucket_index, buf, buf_views) ) @@ -982,8 +996,8 @@ def _dispatch_gather_model_params(self, all_gather_handle_index, force_sync=Fals """ async_op = self.overlap_param_gather and not force_sync if self.update_successful: - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) - data_parallel_group = mpu.get_data_parallel_group(with_context_parallel=True) + data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) + data_parallel_group = parallel_state.get_data_parallel_group(with_context_parallel=True) # All-gather updated main params. # All param_buf views are guaranteed to have the same number of elements diff --git a/megatron/optimizer/grad_scaler.py b/megatron/core/optimizer/grad_scaler.py similarity index 97% rename from megatron/optimizer/grad_scaler.py rename to megatron/core/optimizer/grad_scaler.py index 4d5d302ea1..d9ef633b23 100644 --- a/megatron/optimizer/grad_scaler.py +++ b/megatron/core/optimizer/grad_scaler.py @@ -1,9 +1,8 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. """Megatron grad scaler.""" -from abc import ABC -from abc import abstractmethod +from abc import ABC, abstractmethod import torch diff --git a/megatron/optimizer/optimizer.py b/megatron/core/optimizer/optimizer.py similarity index 97% rename from megatron/optimizer/optimizer.py rename to megatron/core/optimizer/optimizer.py index 6afb888f52..843f83f0ce 100644 --- a/megatron/optimizer/optimizer.py +++ b/megatron/core/optimizer/optimizer.py @@ -1,22 +1,21 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. """Megatron optimizer.""" -from abc import ABC -from abc import abstractmethod -from apex.multi_tensor_apply import multi_tensor_applier -import amp_C -import torch import math +from abc import ABC, abstractmethod +from logging import getLogger -from megatron import get_timers -from megatron import print_rank_0 -from megatron.core import mpu, tensor_parallel -from megatron.model import Float16Module -from megatron.model.module import param_is_not_shared +import amp_C +import torch +from apex.multi_tensor_apply import multi_tensor_applier +from .. import parallel_state, tensor_parallel +from ..transformer.module import param_is_not_shared from .clip_grads import clip_grad_norm_fp32, count_zeros_fp32 +logger = getLogger(__name__) + def _zero_grad_group_helper(group, set_to_none): """Zero out the gradient for a group of parameters. @@ -93,7 +92,7 @@ def get_main_grads_for_grad_norm(self): def get_model_parallel_group(self): """Default returned here, but the distributed optimizer overrides this.""" - return mpu.get_model_parallel_group() + return parallel_state.get_model_parallel_group() def clip_grad_norm(self, clip_grad, check_for_nan_in_grad): params = self.get_parameters() @@ -524,20 +523,20 @@ def load_state_dict(self, state_dict): optimizer_key = 'optimizer' if optimizer_key not in state_dict: optimizer_key = 'optimizer_state_dict' - print_rank_0('***WARNING*** loading optimizer from ' 'an old checkpoint ...') + logger.info('***WARNING*** loading optimizer from ' 'an old checkpoint ...') self.optimizer.load_state_dict(state_dict[optimizer_key]) # Grad scaler. if 'grad_scaler' not in state_dict: if self.fp16: - print_rank_0( + logger.info( '***WARNING*** found an old checkpoint, will not ' 'load grad scaler ...' ) else: if self.grad_scaler: self.grad_scaler.load_state_dict(state_dict['grad_scaler']) else: - print_rank_0( + logger.info( '***WARNING*** fould the grad scaler in the ' 'checkpoint but it is None in the class. ' 'Skipping loading grad scaler ...' @@ -690,7 +689,7 @@ def save_parameter_state(self, filename): Args: filename (str): path to save parameter state to. """ - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) + data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) states = [] for optimizer in self.chained_optimizers: @@ -708,7 +707,7 @@ def load_parameter_state(self, filename): Args: filename (str): path to load parameter state from. """ - data_parallel_rank = mpu.get_data_parallel_rank(with_context_parallel=True) + data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) num_of_optimizers = len(self.chained_optimizers) if data_parallel_rank == 0: states = torch.load(filename) diff --git a/megatron/core/optimizer/optimizer_config.py b/megatron/core/optimizer/optimizer_config.py new file mode 100644 index 0000000000..2689d667bd --- /dev/null +++ b/megatron/core/optimizer/optimizer_config.py @@ -0,0 +1,116 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from dataclasses import dataclass +from typing import Optional + +import torch + + +@dataclass +class OptimizerConfig: + """ + Configuration for optimizer. + + + Precision + --------- + + fp16 (bool): If true, train with fp16 mixed precision training. Defaults to False. + + bf16 (bool): If true, train with bf16 mixed precision training. Defaults to False. + + params_dtype (torch.dtype): dtype used when intializing the weights. Defaults to torch.float32. + + + General Optimizer + ----------------- + + optimizer (str): Optimizer to use (one of Adam or SGD). + + lr (float, optional): Initial learning rate. Depending on decay style and initial warmup, the learning + rate at each iteration would be different. + + + Loss Scaler + ----------- + + loss_scale (float, optional): Static loss scaling, positive power of 2 values can improve fp16 convergence. + If None, dynamic loss scaling is used. + + initial_loss_scale (float): Initial loss-scale for dynamic loss scaling. + + min_loss_scale (float): Minimum loss scale for dynamic loss scaling. + + loss_scale_window (float): Window over which to raise/lower dynamic scale. + + hysteresis (int): Hysteresis for dynamic loss scaling. + + + Weight Decay + ------------ + + weight_decay (float): Weight decay coefficient for L2 regularization. + + + Base Optimizer + -------------- + + adam_beta1 (float): First coefficient for computing running averages of gradient and its square in Adam optimizer. + + adam_beta2 (float): Second coefficient for computing running averages of gradient and its square in Adam optimizer. + + adam_eps (float): Term added to the denominator to improve numerical stability in Adam optimizer. + + sgd_momentum (float): Momentum factor for SGD optimizer. + + + Distributed Optimizer + --------------------- + + use_distributed_optimizer (bool): Distribute optimizer state over data-parallel replicas. + + overlap_param_gather (bool): If true, overlap param all-gather with forward compute in distributed optimizer. + + + Miscellaneous + ------------- + + clip_grad (float): Gradient clipping based on global L2 norm. + + log_num_zeros_in_grad (bool): If true, calculate and log the number of zeros in gradient. + + check_for_nan_in_loss_and_grad (bool): If true, check for NaNs in loss and gradient. + """ + + # Precision. + fp16: bool = False + bf16: bool = False + params_dtype: torch.dtype = torch.float32 + + optimizer: str = 'adam' + lr: Optional[float] = None + + # Loss scaling. + loss_scale: Optional[float] = None + initial_loss_scale: float = 2 ** 32 + min_loss_scale: float = 1.0 + loss_scale_window: float = 1000 + hysteresis: int = 2 + + weight_decay: float = 0.01 + + # Adam. + adam_beta1: float = 0.9 + adam_beta2: float = 0.999 + adam_eps: float = 1e-08 + # SGD. + sgd_momentum: float = 0.9 + + # Distributed optimizer. + use_distributed_optimizer: bool = False + overlap_param_gather: bool = False + + # Miscellaneous. + clip_grad: float = 1.0 + log_num_zeros_in_grad: bool = False + check_for_nan_in_loss_and_grad: bool = False diff --git a/megatron/optimizer/utils.py b/megatron/optimizer/utils.py deleted file mode 100644 index 6376f45de8..0000000000 --- a/megatron/optimizer/utils.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. - -"""Utility functions for Megatron optimizer.""" - - -from megatron.core import mpu - - -def shard_buffer(buffer): - """ - Shard buffer into dp_size chunks of equal size. - """ - data_parallel_world_size = mpu.get_data_parallel_world_size(with_context_parallel=True) - assert buffer.numel() % data_parallel_world_size == 0 - shard_size = buffer.numel() // data_parallel_world_size - sharded_buffer = [ - buffer[(r * shard_size) : ((r + 1) * shard_size)] for r in range(data_parallel_world_size) - ] - return sharded_buffer diff --git a/megatron/training.py b/megatron/training.py index 6402182bee..9b80971bbc 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -3,6 +3,7 @@ """Pretrain utilities.""" import gc +import dataclasses from datetime import datetime import math import logging @@ -38,7 +39,7 @@ from megatron.core.distributed import DistributedDataParallel as DDP from megatron.core.distributed import finalize_model_grads from megatron.core.enums import ModelType -from megatron.optimizer import get_megatron_optimizer +from megatron.core.optimizer import get_megatron_optimizer, OptimizerConfig from megatron.initialize import initialize_megatron from megatron.initialize import write_args_to_tensorboard from megatron.initialize import set_jit_fusion_options @@ -483,7 +484,12 @@ def setup_model_and_optimizer(model_provider_func, model = get_model(model_provider_func, model_type) unwrapped_model = unwrap_model(model) - optimizer = get_megatron_optimizer(model, no_wd_decay_cond, + kwargs = {} + for f in dataclasses.fields(OptimizerConfig): + if hasattr(args, f.name): + kwargs[f.name] = getattr(args, f.name) + config = OptimizerConfig(**kwargs) + optimizer = get_megatron_optimizer(config, model, no_wd_decay_cond, scale_lr_cond, lr_mult) opt_param_scheduler = get_optimizer_param_scheduler(optimizer) From 5b4bbd5905142ba8a6c8abdea04681ea3e43415a Mon Sep 17 00:00:00 2001 From: Sudhakar Singh Date: Wed, 14 Feb 2024 12:32:12 -0800 Subject: [PATCH 277/296] add support wrapper for TE TransformerLayer in mcore --- megatron/core/models/gpt/gpt_layer_specs.py | 1 + .../core/transformer/transformer_block.py | 8 +- .../core/transformer/transformer_layer.py | 19 +++- pretrain_gpt.py | 4 +- .../transformer/test_spec_customization.py | 99 +++++++++++++------ 5 files changed, 96 insertions(+), 35 deletions(-) mode change 100644 => 100755 megatron/core/transformer/transformer_block.py diff --git a/megatron/core/models/gpt/gpt_layer_specs.py b/megatron/core/models/gpt/gpt_layer_specs.py index c76a842c77..ef9b5a5184 100755 --- a/megatron/core/models/gpt/gpt_layer_specs.py +++ b/megatron/core/models/gpt/gpt_layer_specs.py @@ -16,6 +16,7 @@ from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.moe.moe_layer import MoELayer from megatron.core.transformer.spec_utils import ModuleSpec +from megatron.core.transformer.transformer_block import TransformerBlockSubmodules from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py old mode 100644 new mode 100755 index 09f6c1033a..8b8dad0c4e --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -21,7 +21,7 @@ from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.transformer.transformer_layer import TransformerLayer +from megatron.core.transformer.transformer_layer import BaseTransformerLayer, TransformerLayer from megatron.core.transformer.utils import sharded_state_dict_default from megatron.core.utils import make_sharded_tensor_for_checkpoint, make_viewless_tensor @@ -73,11 +73,13 @@ def _get_block_submodules( if isinstance(spec, TransformerBlockSubmodules): return spec - # ModuleSpec here is generally assumed to be for a transformer layer. + # ModuleSpec here is generally assumed to be for a transformer layer that + # is implemented in `transformer_layer.py` or if it subclasses + # `BaseTransformerLayer` from the `transformer_layer.py` file. elif isinstance(spec, ModuleSpec): if issubclass(spec.module, TransformerBlock): return spec.submodules - elif issubclass(spec.module, TransformerLayer): + elif issubclass(spec.module, BaseTransformerLayer): num_layers = get_num_layers_to_build(config) return TransformerBlockSubmodules(layer_specs=[spec] * num_layers) else: diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index 140f651469..edc45bbec4 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -1,5 +1,6 @@ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +from abc import ABC from dataclasses import dataclass, field from typing import Dict, Union @@ -34,7 +35,23 @@ class TransformerLayerSubmodules: sharded_state_dict_keys_map: Dict[str, str] = field(default_factory=dict) -class TransformerLayer(MegatronModule): +class BaseTransformerLayer(ABC): + """ A common parent class for `TransformerLayer` like implementations. + + A dummy class that is subclassed by similar `TransformerLayer`s e.g. the + `TransformerLayer` in this file and possibly other `TransformerLayer` + implementations that aim to use `TransformerBlock` as the base module. + The main purpose is to check if any layer (or module) provided in the spec + is a subclass of this class to allow fanning-out of that spec for all the + layers in the `TransformerBlock`. See `_get_block_submodules` method + implementation in `transformer_block.py` file for more details. + """ + + def __init__(self): + pass + + +class TransformerLayer(MegatronModule, BaseTransformerLayer): """A single transformer layer. Transformer layer takes input with size [s, b, h] and returns an diff --git a/pretrain_gpt.py b/pretrain_gpt.py index b7d38dab8e..03764030fa 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -86,7 +86,7 @@ def get_batch(data_iterator): return None, None, None, None, None # get batches based on the TP rank you are on - batch = get_batch_on_this_tp_rank(data_iterator) + batch = get_batch_on_this_tp_rank(data_iterator) # slice batch along sequence dimension for context parallelism batch = get_batch_on_this_cp_rank(batch) @@ -99,7 +99,7 @@ def loss_func(loss_mask: torch.Tensor, output_tensor: torch.Tensor): Args: loss_mask (torch.Tensor): Used to mask out some portions of the loss output_tensor (torch.Tensor): The tensor with the losses - """ + """ args = get_args() losses = output_tensor.float() diff --git a/tests/unit_tests/transformer/test_spec_customization.py b/tests/unit_tests/transformer/test_spec_customization.py index c13b5a6482..ebefe5de5b 100755 --- a/tests/unit_tests/transformer/test_spec_customization.py +++ b/tests/unit_tests/transformer/test_spec_customization.py @@ -10,6 +10,7 @@ from pkg_resources import packaging from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_local_spec from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import ( @@ -22,8 +23,9 @@ from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.identity_op import IdentityFuncOp, IdentityOp from megatron.core.transformer.spec_utils import ModuleSpec, build_module, import_module +from megatron.core.transformer.transformer_block import TransformerBlock, TransformerBlockSubmodules from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.transformer.transformer_layer import TransformerLayerSubmodules +from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules from tests.unit_tests.test_utilities import Utils @@ -45,7 +47,7 @@ def setup_method(self, method): submodules=SelfAttentionSubmodules( linear_qkv=TELayerNormColumnParallelLinear, core_attention=TEDotProductAttention, - linear_proj=TERowParallelLinear + linear_proj=TERowParallelLinear, ), ) @@ -93,9 +95,7 @@ def test_build_module(self): assert x == random_input # Check SelfAttention - self_attention = build_module( - self.attention_spec, config=self.config, layer_number=1, - ) + self_attention = build_module(self.attention_spec, config=self.config, layer_number=1,) assert isinstance(self_attention, SelfAttention) assert self_attention.layer_number == 1 assert self_attention.attn_mask_type == self.attention_spec.params['attn_mask_type'] @@ -131,31 +131,24 @@ def test_build_module(self): bda_op = build_module(self.bda_spec) assert id(bda_op) == id(get_bias_dropout_add) - - def test_sliding_window_attention(self): te_version = packaging.version.Version(version("transformer-engine")) - if te_version < packaging.version.Version( - "1.2.0" - ): - print("SWA not tested because TE version is not >= 1.2.0", file=sys.stderr) - return + if te_version < packaging.version.Version("1.2.0"): + print("SWA not tested because TE version is not >= 1.2.0", file=sys.stderr) + return config = TransformerConfig( num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True, - window_size=[10,0] + window_size=[10, 0], ) # Make sure DotProductAttention throws (swa unsupported). threw = False try: attn = DotProductAttention( - config, - layer_number=1, - attn_mask_type=AttnMaskType.causal, - attention_type='self' + config, layer_number=1, attn_mask_type=AttnMaskType.causal, attention_type='self' ) except: threw = True @@ -164,10 +157,7 @@ def test_sliding_window_attention(self): # Test TEDotProductAttention attn = TEDotProductAttention( - config, - layer_number=1, - attn_mask_type=AttnMaskType.causal, - attention_type='self' + config, layer_number=1, attn_mask_type=AttnMaskType.causal, attention_type='self' ) # Make sure window-size is what we expect. assert attn.window_size == config.window_size @@ -177,10 +167,7 @@ def test_sliding_window_attention(self): try: config.window_size = 11 attn = TEDotProductAttention( - config, - layer_number=1, - attn_mask_type=AttnMaskType.causal, - attention_type='self' + config, layer_number=1, attn_mask_type=AttnMaskType.causal, attention_type='self' ) except: threw = True @@ -190,10 +177,64 @@ def test_sliding_window_attention(self): # `None` makes this causal. config.window_size = None attn = TEDotProductAttention( - config, - layer_number=1, - attn_mask_type=AttnMaskType.causal, - attention_type='self' + config, layer_number=1, attn_mask_type=AttnMaskType.causal, attention_type='self' ) # Make sure it's causal. assert attn.window_size == (-1, 0) + + def test_transformer_block_custom(self): + """ + This test checks that the two ways of passing `layer_spec` to a + `TransformerBlock` result in an identical model: + 1. ModuleSpec(module=..., submodules=...) + 2. TransformerBlockSubmodules(layer_specs=...) + """ + + transformer_config = TransformerConfig( + num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True + ) + layer_local_spec = get_gpt_layer_local_spec() + + # The following way can be used to pass a different `TransformerLayer` + # and internally the `TransformerBlock` would fan out the single + # `ModuleSpec` layer spec provided to all the layers of the block. + layer_spec1 = ModuleSpec(module=TransformerLayer, submodules=layer_local_spec.submodules) + model_parallel_cuda_manual_seed(123) + torch.manual_seed(0) + parallel_transformer_block1 = TransformerBlock(transformer_config, layer_spec1) + + layer_spec2 = TransformerBlockSubmodules( + layer_specs=[ + ModuleSpec(module=TransformerLayer, submodules=layer_local_spec.submodules) + ] + * transformer_config.num_layers + ) + # make sure the model init conditions are identical + model_parallel_cuda_manual_seed(123) + torch.manual_seed(0) + parallel_transformer_block2 = TransformerBlock(transformer_config, layer_spec2) + + sequence_length = 32 + micro_batch_size = 2 + parallel_transformer_block1.cuda() + parallel_transformer_block2.cuda() + + # [sequence length, batch size, hidden size] + hidden_states = torch.ones( + (sequence_length, micro_batch_size, transformer_config.hidden_size) + ) + hidden_states = hidden_states.cuda() + + attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda() + + out1 = parallel_transformer_block1( + hidden_states=hidden_states, attention_mask=attention_mask + ) + out2 = parallel_transformer_block2( + hidden_states=hidden_states, attention_mask=attention_mask + ) + + assert torch.all(torch.eq(out1, out2)) + assert out1.shape[0] == sequence_length == out2.shape[0] + assert out1.shape[1] == micro_batch_size == out2.shape[1] + assert out1.shape[2] == transformer_config.hidden_size == out2.shape[2] From 1b6ae2705270731df9d0192f8e31cdc028c2d9f2 Mon Sep 17 00:00:00 2001 From: Shanmugam Ramasamy Date: Wed, 14 Feb 2024 21:38:03 -0800 Subject: [PATCH 278/296] Fixing examples --- examples/bert/train_bert_340m_distributed.sh | 6 +++--- examples/gpt3/train_gpt3_175b_distributed.sh | 10 +++++----- examples/t5/train_t5_220m_distributed.sh | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/examples/bert/train_bert_340m_distributed.sh b/examples/bert/train_bert_340m_distributed.sh index b9019fcecf..7d489917e5 100644 --- a/examples/bert/train_bert_340m_distributed.sh +++ b/examples/bert/train_bert_340m_distributed.sh @@ -12,9 +12,9 @@ NUM_NODES=1 NODE_RANK=0 WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -CHECKPOINT_PATH=$0 # -TENSORBOARD_LOGS_PATH=$1 # -VOCAB_FILE=$2 #/bert-vocab.json +CHECKPOINT_PATH=$1 # +TENSORBOARD_LOGS_PATH=$2 # +VOCAB_FILE=$3 #/bert-vocab.json DATA_PATH=$4 #_text_document DISTRIBUTED_ARGS=( diff --git a/examples/gpt3/train_gpt3_175b_distributed.sh b/examples/gpt3/train_gpt3_175b_distributed.sh index 01ca2e0309..ccba78784b 100755 --- a/examples/gpt3/train_gpt3_175b_distributed.sh +++ b/examples/gpt3/train_gpt3_175b_distributed.sh @@ -12,11 +12,11 @@ NUM_NODES=1 NODE_RANK=0 WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -CHECKPOINT_PATH=$0 # -TENSORBOARD_LOGS_PATH=$1 # -VOCAB_FILE=$2 #/gpt2-vocab.json -MERGE_FILE=$3 #/gpt2-merges.txt -DATA_PATH=$4 #_text_document +CHECKPOINT_PATH=$1 # +TENSORBOARD_LOGS_PATH=$2 # +VOCAB_FILE=$3 #/gpt2-vocab.json +MERGE_FILE=$4 #/gpt2-merges.txt +DATA_PATH=$5 #_text_document DISTRIBUTED_ARGS=( --nproc_per_node $GPUS_PER_NODE diff --git a/examples/t5/train_t5_220m_distributed.sh b/examples/t5/train_t5_220m_distributed.sh index 9385e390ed..4a55bb6e95 100755 --- a/examples/t5/train_t5_220m_distributed.sh +++ b/examples/t5/train_t5_220m_distributed.sh @@ -12,10 +12,10 @@ NUM_NODES=1 NODE_RANK=0 WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -CHECKPOINT_PATH=$0 # -TENSORBOARD_DIR=$1 # -VOCAB_FILE=$2 #/bert-large-cased-vocab.txt -DATA_PATH=$3 #_text_document +CHECKPOINT_PATH=$1 # +TENSORBOARD_DIR=$2 # +VOCAB_FILE=$3 #/bert-large-cased-vocab.txt +DATA_PATH=$4 #_text_document DISTRIBUTED_ARGS=" --nproc_per_node $GPUS_PER_NODE \ From 72a255a7a418e432695878f76f771d11165b8166 Mon Sep 17 00:00:00 2001 From: Jianbin Chang Date: Tue, 20 Feb 2024 16:06:58 -0800 Subject: [PATCH 279/296] [MoE] Expert data parallel w/ ZeRO-1 support --- .../distributed/distributed_data_parallel.py | 129 +++++++++++------- .../core/distributed/finalize_model_grads.py | 36 +---- megatron/core/distributed/grad_buffer.py | 13 +- megatron/core/optimizer/__init__.py | 54 ++++++-- megatron/core/optimizer/distrib_optimizer.py | 52 ++++--- megatron/core/optimizer/optimizer.py | 39 +++--- megatron/core/parallel_state.py | 11 ++ megatron/training.py | 1 + .../functional_tests/jet_recipes/MR-gpt.yaml | 1 + ...el-dist-optimizer_mcore-true_te-false.json | 1 + 10 files changed, 191 insertions(+), 146 deletions(-) create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json diff --git a/megatron/core/distributed/distributed_data_parallel.py b/megatron/core/distributed/distributed_data_parallel.py index c1d9dc11c0..e3c8ece83a 100644 --- a/megatron/core/distributed/distributed_data_parallel.py +++ b/megatron/core/distributed/distributed_data_parallel.py @@ -1,7 +1,7 @@ # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. from contextlib import contextmanager -from typing import Dict +from typing import Dict, Optional import torch @@ -44,6 +44,7 @@ def __init__( accumulate_allreduce_grads_in_fp32: bool, overlap_grad_reduce: bool, use_distributed_optimizer: bool, + expert_data_parallel_group: Optional[torch.distributed.ProcessGroup] = None, disable_bucketing: bool = False, bucket_size: int = 40000000, ): @@ -68,53 +69,75 @@ def __init__( self.bucket_size = bucket_size self.module = module - self.grad_buffers = {} - self.expert_grads = [] - self.grad_buffer_param_index_map = {} self.param_to_grad_buffer = {} # Group parameters by their gradient type. - grad_dtype_to_params = {} param_to_name = {} + dense_params = [] + expert_parallel_params = [] for name, param in self.module.named_parameters(): - if param.requires_grad and getattr(param, 'allreduce', True): - param.grad_added_to_main_grad = False - param_to_name[param] = name + if not param.requires_grad: + continue + + param.grad_added_to_main_grad = False + param_to_name[param] = name + + if getattr(param, 'allreduce', True): + dense_params.append(param) + else: + expert_parallel_params.append(param) + + def allocate_grad_buffers_for_parameters( + input_params, data_parallel_group, gradient_scaling_factor=1.0, + ): + grad_dtype_to_params = {} + + # Group parameters by their gradient type. + for param in input_params: + if not param.requires_grad: + continue + dtype = torch.float if accumulate_allreduce_grads_in_fp32 else param.dtype params = grad_dtype_to_params.get(dtype, []) params.append(param) grad_dtype_to_params[dtype] = params - # Allocate the grad buffers and map the grads. - # The grad buffer under the hood creates buckets as appropriate based on bucket_size. - self.data_parallel_world_size = torch.distributed.get_world_size(group=data_parallel_group) - for dtype, params in grad_dtype_to_params.items(): - self.grad_buffers[dtype] = GradBuffer( - dtype, - params, - data_parallel_group, - bucket_size, - param_to_name, - self.overlap_grad_reduce, - self.use_distributed_optimizer, - ) - self.grad_buffer_param_index_map[dtype] = self.grad_buffers[dtype].param_index_map - for param in params: - self.param_to_grad_buffer[param] = self.grad_buffers[dtype] - - # Allocate separate buffer for MoE params' grads. - for param in self.module.parameters(): - if param.requires_grad and not getattr(param, 'allreduce', True): - param.grad_added_to_main_grad = False - dtype = torch.float if accumulate_allreduce_grads_in_fp32 else param.dtype - param.main_grad = torch.zeros( - param.data.shape, - dtype=dtype, - device=torch.cuda.current_device(), - requires_grad=False, + # Allocate the grad buffers and map the grads. + grad_buffers = [] + for dtype, params in grad_dtype_to_params.items(): + grad_buffers.append( + GradBuffer( + dtype, + params, + data_parallel_group, + bucket_size, + param_to_name, + self.overlap_grad_reduce, + self.use_distributed_optimizer, + gradient_scaling_factor=gradient_scaling_factor, + ) ) - self.expert_grads.append(param.main_grad) + for param in params: + self.param_to_grad_buffer[param] = grad_buffers[-1] + + return grad_buffers + + data_parallel_world_size = torch.distributed.get_world_size(data_parallel_group) + + # Allocate the grad buffers for dense params' grads. + self.grad_buffers = allocate_grad_buffers_for_parameters( + dense_params, + data_parallel_group, + gradient_scaling_factor=1.0 / data_parallel_world_size, + ) + + # Allocate separate grad buffers for expert parallel params' grads. + self.expert_parallel_grad_buffers = allocate_grad_buffers_for_parameters( + expert_parallel_params, + expert_data_parallel_group, + gradient_scaling_factor=1.0 / data_parallel_world_size, + ) # Register backward hook. # Accumulation function for the gradients need to be stored so they @@ -163,12 +186,12 @@ def no_sync(self): """ Context manager that turns off gradient synchronization. """ - for grad_buffer in self.grad_buffers.values(): + for grad_buffer in self.grad_buffers + self.expert_parallel_grad_buffers: grad_buffer.is_last_microbatch = False try: yield finally: - for grad_buffer in self.grad_buffers.values(): + for grad_buffer in self.grad_buffers + self.expert_parallel_grad_buffers: grad_buffer.is_last_microbatch = True def start_grad_sync(self, *unused): @@ -180,7 +203,7 @@ def start_grad_sync(self, *unused): calls. When overlap_grad_reduce is set to False, calls synchronous communication ops. """ - for grad_buffer in self.grad_buffers.values(): + for grad_buffer in self.grad_buffers + self.expert_parallel_grad_buffers: grad_buffer.start_grad_sync() def finish_grad_sync(self): @@ -192,12 +215,9 @@ def finish_grad_sync(self): calls to complete. When overlap_grad_reduce is set to False, calls synchronous communication ops. """ - for grad_buffer in self.grad_buffers.values(): + for grad_buffer in self.grad_buffers + self.expert_parallel_grad_buffers: grad_buffer.finish_grad_sync() - for expert_grad in self.expert_grads: - expert_grad /= self.data_parallel_world_size - def zero_grad_buffer(self, zero_buffer): """ Zeros out all grad buffers. Needs to be called at the beginning of each @@ -208,21 +228,28 @@ def zero_grad_buffer(self, zero_buffer): for param in self.module.parameters(): if param.requires_grad: param.grad_added_to_main_grad = False - for grad_buffer in self.grad_buffers.values(): + for grad_buffer in self.grad_buffers + self.expert_parallel_grad_buffers: grad_buffer.reset(zero_buffer) - for expert_grad in self.expert_grads: - expert_grad.zero_() def broadcast_params(self): """ Syncs parameters across all DP ranks. """ for param in self.module.parameters(): - torch.distributed.broadcast( - param.data, - src=parallel_state.get_data_parallel_src_rank(with_context_parallel=True), - group=parallel_state.get_data_parallel_group(with_context_parallel=True), - ) + is_expert_parallel = not getattr(param, 'allreduce', True) + + if is_expert_parallel: + torch.distributed.broadcast( + param.data, + src=torch.distributed.get_process_group_ranks(self.expert_data_parallel_group), + group=self.expert_data_parallel_group, + ) + else: + torch.distributed.broadcast( + param.data, + src=torch.distributed.get_process_group_ranks(self.data_parallel_group), + group=self.data_parallel_group, + ) def state_dict(self, prefix='', keep_vars=False): """ diff --git a/megatron/core/distributed/finalize_model_grads.py b/megatron/core/distributed/finalize_model_grads.py index 587a59e247..f6387b85c4 100644 --- a/megatron/core/distributed/finalize_model_grads.py +++ b/megatron/core/distributed/finalize_model_grads.py @@ -89,35 +89,10 @@ def _allreduce_layernorm_grads(model: List[torch.nn.Module], config: Transformer buf.copy_(synced) -def _allreduce_expert_grads(model: List[torch.nn.Module], config: TransformerConfig): - """ - All-reduce expert grads (for expert parallelism). - """ - - # All-reduce MoE parameters across data modulo expert parallel nodes - if ( - config.expert_model_parallel_size > 1 - and config.expert_model_parallel_size < parallel_state.get_data_parallel_world_size() - ): - grads = [] - for model_chunk in model: - for param in get_attr_wrapped_model(model_chunk, 'parameters')(): - if not getattr(param, 'allreduce', True): - grad = param.main_grad - grads.append(grad.data) - coalesced = _flatten_dense_tensors(grads) - torch.distributed.all_reduce( - coalesced, group=parallel_state.get_data_modulo_expert_parallel_group() - ) - for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)): - buf.copy_(synced) - - def finalize_model_grads(model: List[torch.nn.Module]): """ All-reduce all model grads across DP replicas, layernorm grads for sequence parallelism, - embedding grads across first and last pipeline stages (if not tied), and expert grads - for expert parallelism. + embedding grads across first and last pipeline stages (if not tied). """ config = get_model_config(model[0]) @@ -147,12 +122,3 @@ def finalize_model_grads(model: List[torch.nn.Module]): _allreduce_embedding_grads(model, config) if config.timers is not None: config.timers('embedding-grads-all-reduce').stop() - - # All-reduce expert grads (for expert parallelism). - if config.timers is not None: - config.timers('expert-grads-all-reduce', log_level=1).start( - barrier=config.barrier_with_L1_time - ) - _allreduce_expert_grads(model, config) - if config.timers is not None: - config.timers('expert-grads-all-reduce').stop() diff --git a/megatron/core/distributed/grad_buffer.py b/megatron/core/distributed/grad_buffer.py index 9a6506957f..949bc9468c 100644 --- a/megatron/core/distributed/grad_buffer.py +++ b/megatron/core/distributed/grad_buffer.py @@ -41,6 +41,9 @@ class Bucket: is used instead. use_distributed_optimizer: If true, issue reduce-scatter communication calls as part of distributed optimizer. If false, issue all-reduce communication calls. + gradient_scaling_factor: This factor is utilized to scale gradients prior to their + communication. Its application is twofold: it facilitates the averaging of gradients + and the scaling of gradients in the context of the Mixture of Experts (MoE) model. """ def __init__( @@ -53,6 +56,7 @@ def __init__( data_parallel_world_size: int, overlap_grad_reduce: bool, use_distributed_optimizer: bool, + gradient_scaling_factor: float, ): # State for bookkeeping: params is the set of parameters this bucket is # responsible for, params_with_grad is the set of parameters with grads @@ -71,6 +75,7 @@ def __init__( self.data_parallel_rank = torch.distributed.get_rank(group=data_parallel_group) self.overlap_grad_reduce = overlap_grad_reduce self.use_distributed_optimizer = use_distributed_optimizer + self.gradient_scaling_factor = gradient_scaling_factor self.reset() @@ -95,7 +100,7 @@ def start_grad_sync(self): self.communication_handle is None and not self.communication_issued ), 'Should not have multiple communication calls in flight at once' - self.data /= self.data_parallel_world_size + self.data *= self.gradient_scaling_factor # Use async_op only when overlap_grad_reduce is True. if self.use_distributed_optimizer: local_data_view = shard_buffer(self.data, self.data_parallel_world_size)[ @@ -165,6 +170,9 @@ class GradBuffer: is used instead. use_distributed_optimizer: If true, issue reduce-scatter communication calls as part of distributed optimizer. If false, issue all-reduce communication calls. + gradient_scaling_factor: This factor is utilized to scale gradients prior to their + communication. Its application is twofold: it facilitates the averaging of gradients + and the scaling of gradients in the context of the Mixture of Experts (MoE) model. """ def __init__( @@ -176,6 +184,7 @@ def __init__( param_to_name: Dict[torch.nn.Parameter, str], overlap_grad_reduce: bool, use_distributed_optimizer: bool, + gradient_scaling_factor: float, ): # Check that params are unique. @@ -193,6 +202,7 @@ def __init__( ) self.overlap_grad_reduce = overlap_grad_reduce self.use_distributed_optimizer = use_distributed_optimizer + self.gradient_scaling_factor = gradient_scaling_factor self.is_last_microbatch = True # Data structures to store underlying buckets and relevant indexing data. @@ -373,6 +383,7 @@ def _set_bucket( data_parallel_world_size=self.data_parallel_world_size, overlap_grad_reduce=self.overlap_grad_reduce, use_distributed_optimizer=self.use_distributed_optimizer, + gradient_scaling_factor=self.gradient_scaling_factor, ) self.buckets.append(bucket) for bucket_param in bucket_params: diff --git a/megatron/core/optimizer/__init__.py b/megatron/core/optimizer/__init__.py index a8fb749bd3..b3461f9032 100644 --- a/megatron/core/optimizer/__init__.py +++ b/megatron/core/optimizer/__init__.py @@ -3,6 +3,8 @@ from apex.optimizers import FusedAdam as Adam from apex.optimizers import FusedSGD as SGD +from megatron.core import mpu + from .distrib_optimizer import DistributedOptimizer from .grad_scaler import ConstantGradScaler, DynamicGradScaler from .optimizer import ChainedOptimizer, Float16OptimizerWithFloat16Params, FP32Optimizer @@ -84,7 +86,13 @@ def get_param_groups(model_chunks, no_weight_decay_cond, scale_lr_cond, lr_mult) return param_groups -def get_megatron_optimizer_based_on_param_groups(config, param_groups, grad_buffers=None): +def get_megatron_optimizer_based_on_param_groups( + config, + param_groups, + per_model_grad_buffers=None, + data_parallel_group=None, + data_parallel_group_gloo=None, +): """Get megatron optimizer based on parameter groups. For distributed optimizer, we need the parameter gradients to be stored in a @@ -92,7 +100,12 @@ def get_megatron_optimizer_based_on_param_groups(config, param_groups, grad_buff Args: param_groups (list): list of parameter groups. - grad_buffers (list, optional): list of gradient buffers. Defaults to None. + per_model_grad_buffers (list, optional): list of gradient buffers for + distributed optimizer. Defaults to None. + data_parallel_group (ProcessGroup, optional): data parallel group for + distributed optimizer. Defaults to None. + data_parallel_group_gloo (ProcessGroup, optional): data parallel + group-gloo for distributed optimizer. Defaults to None. """ if config.optimizer == 'adam': optimizer = Adam( @@ -115,18 +128,11 @@ def get_megatron_optimizer_based_on_param_groups(config, param_groups, grad_buff # Determine whether the params have main-grad field. params_have_main_grad = True - # If it is expert parameters, we do not use the distributed optimizer. - # TODO: enable support for distributed optimizer with expert parameters - # (need to support DistOpt across process group with size dp_size / ep_size). - use_distributed_optimizer = config.use_distributed_optimizer and not any( - [pg['is_expert_parallel'] for pg in param_groups] - ) - # Mixed precision optimizer. # - Note: both the Float16Optimizer and the DistributedOptimizer inherit # from the MixedPrecisionOptimizer, which manages any optimizer where # the model params and main params are distinct. - if config.fp16 or config.bf16 or use_distributed_optimizer: + if config.fp16 or config.bf16 or config.use_distributed_optimizer: # Grad scaler: # if loss-scale is provided, instantiate the constant scaler. @@ -163,9 +169,13 @@ def get_megatron_optimizer_based_on_param_groups(config, param_groups, grad_buff config.params_dtype, grad_scaler, ] - if use_distributed_optimizer: + if config.use_distributed_optimizer: optimizer = DistributedOptimizer( - *optimizer_args, grad_buffers, config.overlap_param_gather + *optimizer_args, + per_model_grad_buffers=per_model_grad_buffers, + data_parallel_group=data_parallel_group, + data_parallel_group_gloo=data_parallel_group_gloo, + overlap_param_gather=config.overlap_param_gather, ) else: optimizer = Float16OptimizerWithFloat16Params(*optimizer_args) @@ -203,9 +213,11 @@ def get_megatron_optimizer( # Collect grad buffers for distributed optimizer. per_model_grad_buffers = {} + per_model_ep_grad_buffers = {} for model_idx, model_chunk in enumerate(model_chunks): if hasattr(model_chunk, 'grad_buffers'): - per_model_grad_buffers[model_idx] = list(model_chunk.grad_buffers.values()) + per_model_grad_buffers[model_idx] = model_chunk.grad_buffers + per_model_ep_grad_buffers[model_idx] = model_chunk.expert_parallel_grad_buffers # Split param groups into dense and moe. dense_param_groups = list(filter(lambda g: not g['is_expert_parallel'], param_groups)) @@ -214,11 +226,23 @@ def get_megatron_optimizer( # Create optimizers. optimizers = [ get_megatron_optimizer_based_on_param_groups( - config, dense_param_groups, per_model_grad_buffers + config, + param_groups=dense_param_groups, + per_model_grad_buffers=per_model_grad_buffers, + data_parallel_group=mpu.get_data_parallel_group(with_context_parallel=True), + data_parallel_group_gloo=mpu.get_data_parallel_group_gloo(with_context_parallel=True), ) ] if len(moe_param_groups): - optimizers.append(get_megatron_optimizer_based_on_param_groups(config, moe_param_groups)) + optimizers.append( + get_megatron_optimizer_based_on_param_groups( + config, + param_groups=moe_param_groups, + per_model_grad_buffers=per_model_ep_grad_buffers, + data_parallel_group=mpu.get_data_modulo_expert_parallel_group(), + data_parallel_group_gloo=mpu.get_data_modulo_expert_parallel_group_gloo(), + ) + ) if len(optimizers) == 1: return optimizers[0] diff --git a/megatron/core/optimizer/distrib_optimizer.py b/megatron/core/optimizer/distrib_optimizer.py index 3e5943c0b1..1423a6abb6 100644 --- a/megatron/core/optimizer/distrib_optimizer.py +++ b/megatron/core/optimizer/distrib_optimizer.py @@ -9,7 +9,7 @@ import torch from apex.optimizers import FusedAdam as Adam -from .. import parallel_state, tensor_parallel +from .. import tensor_parallel from ..distributed import shard_buffer from .optimizer import MixedPrecisionOptimizer, _zero_grad_group_helper @@ -140,10 +140,8 @@ def build_model_gbuf_range(cls, grad_buffer, bucket_index): reduce-scatter and all-gather. """ - data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) - data_parallel_world_size = parallel_state.get_data_parallel_world_size( - with_context_parallel=True - ) + data_parallel_rank = torch.distributed.get_rank(grad_buffer.data_parallel_group) + data_parallel_world_size = grad_buffer.data_parallel_group.size() bucket = grad_buffer.buckets[bucket_index] bucket_buffer = bucket.data @@ -384,6 +382,8 @@ def __init__( grad_scaler, per_model_grad_buffers, overlap_param_gather, + data_parallel_group, + data_parallel_group_gloo, ): """ See top of class definition for argument descriptions. @@ -415,6 +415,8 @@ def __init__( assert per_model_grad_buffers, "grad_buffers must be provided" self.grad_buffers = list(itertools.chain(*per_model_grad_buffers.values())) self.per_model_grad_buffers = per_model_grad_buffers + self.data_parallel_group = data_parallel_group + self.data_parallel_group_gloo = data_parallel_group_gloo self.gbuf_idx_to_model_idx_map = {} gbuf_idx = 0 for model_idx, grad_buffers in self.per_model_grad_buffers.items(): @@ -673,14 +675,12 @@ def get_parameter_state(self): """ # Data parallelism variables. - data_parallel_world_size = parallel_state.get_data_parallel_world_size( - with_context_parallel=True - ) - data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) - data_parallel_group_gloo = parallel_state.get_data_parallel_group_gloo( - with_context_parallel=True + data_parallel_world_size = self.data_parallel_group_gloo.size() + data_parallel_rank = torch.distributed.get_rank(self.data_parallel_group_gloo) + data_parallel_group_gloo = self.data_parallel_group_gloo + data_parallel_global_ranks = torch.distributed.get_process_group_ranks( + self.data_parallel_group_gloo ) - data_parallel_global_ranks = list(parallel_state._DATA_PARALLEL_GLOBAL_RANKS_WITH_CP) # Collect param states. state = { @@ -765,9 +765,8 @@ def save_parameter_state(self, filename): filename (str): path to save parameter state to. """ - data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) state_dict = self.get_parameter_state() - if data_parallel_rank == 0: + if torch.distributed.get_rank(self.data_parallel_group) == 0: torch.save(state_dict, filename) def load_parameter_state_from_state_dict(self, state_dict): @@ -782,14 +781,12 @@ def load_parameter_state_from_state_dict(self, state_dict): """ # Data parallelism variables. - data_parallel_world_size = parallel_state.get_data_parallel_world_size( - with_context_parallel=True + data_parallel_world_size = self.data_parallel_group_gloo.size() + data_parallel_rank = torch.distributed.get_rank(self.data_parallel_group_gloo) + data_parallel_group_gloo = self.data_parallel_group_gloo + data_parallel_global_ranks = torch.distributed.get_process_group_ranks( + self.data_parallel_group_gloo ) - data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) - data_parallel_group_gloo = parallel_state.get_data_parallel_group_gloo( - with_context_parallel=True - ) - data_parallel_global_ranks = list(parallel_state._DATA_PARALLEL_GLOBAL_RANKS_WITH_CP) # Scatter tensors to all DP ranks. for gbuf_idx, gbuf_range_maps in enumerate(self.gbuf_ranges): @@ -904,10 +901,8 @@ def load_parameter_state(self, filename): Args: filename (str): path to load parameter state from. """ - - data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) state_dict = None - if data_parallel_rank == 0: + if torch.distributed.get_rank(self.data_parallel_group) == 0: state_dict = torch.load(filename) if "per_bucket_numel_unpadded" in state_dict: per_bucket_numel_unpadded_in_checkpoint = state_dict["per_bucket_numel_unpadded"] @@ -976,9 +971,10 @@ def get_model_param_buffer_dp_views(self): view_items_per_model_chunk = [] dtype = self.grad_buffers[gbuf_index].dtype for bucket_index, buf in enumerate(buffers): - buf_views = shard_buffer( - buf, parallel_state.get_data_parallel_world_size(with_context_parallel=True) + data_parallel_world_size = torch.distributed.get_world_size( + self.data_parallel_group ) + buf_views = shard_buffer(buf, data_parallel_world_size) view_items_per_model_chunk.insert( 0, (gbuf_index, dtype, bucket_index, buf, buf_views) ) @@ -996,8 +992,8 @@ def _dispatch_gather_model_params(self, all_gather_handle_index, force_sync=Fals """ async_op = self.overlap_param_gather and not force_sync if self.update_successful: - data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) - data_parallel_group = parallel_state.get_data_parallel_group(with_context_parallel=True) + data_parallel_group = self.data_parallel_group + data_parallel_rank = torch.distributed.get_rank(data_parallel_group) # All-gather updated main params. # All param_buf views are guaranteed to have the same number of elements diff --git a/megatron/core/optimizer/optimizer.py b/megatron/core/optimizer/optimizer.py index 843f83f0ce..a3a431d6ae 100644 --- a/megatron/core/optimizer/optimizer.py +++ b/megatron/core/optimizer/optimizer.py @@ -10,6 +10,9 @@ import torch from apex.multi_tensor_apply import multi_tensor_applier +from megatron.core import tensor_parallel +from megatron.model.module import param_is_not_shared + from .. import parallel_state, tensor_parallel from ..transformer.module import param_is_not_shared from .clip_grads import clip_grad_norm_fp32, count_zeros_fp32 @@ -689,16 +692,23 @@ def save_parameter_state(self, filename): Args: filename (str): path to save parameter state to. """ - data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) - + save_states = False states = [] for optimizer in self.chained_optimizers: if hasattr(optimizer, 'get_parameter_state'): - states.append(optimizer.get_parameter_state()) + state_dict = optimizer.get_parameter_state() + + # Save checkpoint economically, only when DP rank = 0, state dict + # needs to be saved. + if torch.distributed.get_rank(optimizer.data_parallel_group) == 0: + states.append(state_dict) + save_states = True + else: + states.append(None) else: states.append(None) - if data_parallel_rank == 0: + if save_states: torch.save(states, filename) def load_parameter_state(self, filename): @@ -707,20 +717,17 @@ def load_parameter_state(self, filename): Args: filename (str): path to load parameter state from. """ - data_parallel_rank = parallel_state.get_data_parallel_rank(with_context_parallel=True) - num_of_optimizers = len(self.chained_optimizers) - if data_parallel_rank == 0: - states = torch.load(filename) - else: - states = [None] * num_of_optimizers + states = None + for idx, optimizer in enumerate(self.chained_optimizers): + if not hasattr(optimizer, 'load_parameter_state_from_state_dict'): + continue - assert len(states) == num_of_optimizers, ( - "Number of optimizers in " "checkpoint does not match number of optimizers in model." - ) + # Lazy loading checkpoint, state dict is needed only when DP rank = 0. + if torch.distributed.get_rank(optimizer.data_parallel_group) == 0 and states is None: + states = torch.load(filename) - for optimizer, state in zip(self.chained_optimizers, states): - if hasattr(optimizer, 'load_parameter_state_from_state_dict'): - optimizer.load_parameter_state_from_state_dict(state) + state_dict = states[idx] if states else None + optimizer.load_parameter_state_from_state_dict(state_dict) def finish_param_sync(self, model_index): """Finish parameter synchronization for all optimizers. diff --git a/megatron/core/parallel_state.py b/megatron/core/parallel_state.py index 4307f629d2..45cccc6463 100644 --- a/megatron/core/parallel_state.py +++ b/megatron/core/parallel_state.py @@ -28,6 +28,7 @@ # Expert parallel group that the current rank belongs to. _TENSOR_AND_EXPERT_PARALLEL_GROUP = None _DATA_MODULO_EXPERT_PARALLEL_GROUP = None +_DATA_MODULO_EXPERT_PARALLEL_GROUP_GLOO = None _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None @@ -458,6 +459,7 @@ def initialize_model_parallel( assert ( _DATA_MODULO_EXPERT_PARALLEL_GROUP is None ), 'Data modulo expert group is already initialized' + global _DATA_MODULO_EXPERT_PARALLEL_GROUP_GLOO tensor_and_data_group_size: int = tensor_model_parallel_size * data_parallel_size num_tensor_and_data_groups: int = world_size // tensor_and_data_group_size tensor_and_expert_group_size: int = tensor_model_parallel_size * expert_model_parallel_size @@ -481,8 +483,10 @@ def initialize_model_parallel( group = torch.distributed.new_group( ranks, pg_options=get_nccl_options('dp_modulo_exp', nccl_comm_cfgs) ) + group_gloo = torch.distributed.new_group(ranks, backend="gloo") if rank in ranks: _DATA_MODULO_EXPERT_PARALLEL_GROUP = group + _DATA_MODULO_EXPERT_PARALLEL_GROUP_GLOO = group_gloo # Initialize global memory buffer # This isn't really "parallel state" but there isn't another good place to @@ -624,6 +628,13 @@ def get_data_modulo_expert_parallel_group(): return _DATA_MODULO_EXPERT_PARALLEL_GROUP +def get_data_modulo_expert_parallel_group_gloo(): + assert ( + _DATA_MODULO_EXPERT_PARALLEL_GROUP_GLOO is not None + ), 'data modulo expert parallel group-gloo is not initialized' + return _DATA_MODULO_EXPERT_PARALLEL_GROUP_GLOO + + def set_expert_model_parallel_world_size(world_size): global _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE _MPU_EXPERT_MODEL_PARALLEL_WORLD_SIZE = world_size diff --git a/megatron/training.py b/megatron/training.py index 9b80971bbc..d604e6c489 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -407,6 +407,7 @@ def get_model(model_provider_func, model_type=ModelType.encoder_or_decoder, wrap model = [DDP(config, model_chunk, data_parallel_group=mpu.get_data_parallel_group(with_context_parallel=True), + expert_data_parallel_group=mpu.get_data_modulo_expert_parallel_group(), accumulate_allreduce_grads_in_fp32=args.accumulate_allreduce_grads_in_fp32, overlap_grad_reduce=args.overlap_grad_reduce, use_distributed_optimizer=args.use_distributed_optimizer, diff --git a/tests/functional_tests/jet_recipes/MR-gpt.yaml b/tests/functional_tests/jet_recipes/MR-gpt.yaml index 4c03391c57..6b9e2558dc 100644 --- a/tests/functional_tests/jet_recipes/MR-gpt.yaml +++ b/tests/functional_tests/jet_recipes/MR-gpt.yaml @@ -59,6 +59,7 @@ products: - {tp_size: [1], pp_size: [1], extra_args: ['"--recompute-granularity full --recompute-method uniform --recompute-num-layers 1"'], args_meta: ["uniform_full_recompute"]} # - {tp_size: [2], pp_size: [1,2], extra_args: ['"--context-parallel-size 2 --sequence-parallel --hidden-dropout 0.0 --attention-dropout 0.0"']} # TODO: need updated container with TE > 1.0.0 - {tp_size: [2], pp_size: [1], extra_args: ['"--sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_8experts2parallel"]} + - {tp_size: [2], pp_size: [1], extra_args: ['"--sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --use-distributed-optimizer --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_8experts2parallel_dist_optimizer"]} - {tp_size: [2], pp_size: [1], extra_args: ['"--moe-grouped-gemm --disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], moe_grouped_gemm: [1], args_meta: ["te_8experts2parallel_groupedGEMM"]} - {tp_size: [2], pp_size: [1], extra_args: ['"--disable-bias-linear --sequence-parallel --num-experts 8 --expert-model-parallel-size 2 --moe-router-load-balancing-type aux_loss --moe-router-topk 2 --moe-aux-loss-coeff 1e-2"'], moe_grouped_gemm: [1], args_meta: ["te_8experts2parallel_top2router"]} - {tp_size: [1], pp_size: [1], extra_args: ["--use-distributed-optimizer"], args_meta: ["dist_optimizer"]} diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json new file mode 100644 index 0000000000..04eb336aac --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79066, 10.83474, 10.85443, 10.77921, 10.69997, 10.61398, 10.15871, 10.27978, 10.19497, 9.86981]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [30950.0, 37387.0, 37772.0, 36424.0, 33230.0, 34567.0, 30132.0, 34960.0, 36224.0, 37476.0]}, "iteration_timing_avg": 0.20243735294117646} \ No newline at end of file From a67ffda5a322610b1510b3fca1fffb85496c78b0 Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Fri, 16 Feb 2024 11:30:55 -0800 Subject: [PATCH 280/296] Make sure data_end_index is padded when creating new buckets --- megatron/core/distributed/grad_buffer.py | 35 +++++++++++++++++------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/megatron/core/distributed/grad_buffer.py b/megatron/core/distributed/grad_buffer.py index 9a6506957f..fe96c8fad1 100644 --- a/megatron/core/distributed/grad_buffer.py +++ b/megatron/core/distributed/grad_buffer.py @@ -200,8 +200,10 @@ def __init__( self.param_to_bucket = {} # Param -> bucket mapping. self.param_index_map = {} # Param -> location in buffer mapping (used in dist. optimizer). - def _pad_if_needed(data_index: int): - """Pads data indices if using distributed optimizer (to ensure uniform sharding).""" + def _pad_if_needed(data_index: int) -> int: + """ + Pads data indices if using distributed optimizer (to ensure uniform sharding). + """ if use_distributed_optimizer: return ( int(math.ceil(data_index / self.data_parallel_world_size)) @@ -219,14 +221,22 @@ def _pad_if_needed(data_index: int): per_bucket_numel_unpadded = [] bucket_id = 0 - def _create_new_bucket(data_end_index: int): + def _create_new_bucket(data_end_index: int) -> int: + """ + Create the bucket_id'th bucket with collected bucket_params, starting at + bucket_data_start_index. + """ nonlocal bucket_data_start_index, bucket_params, bucket_id per_bucket_numel_unpadded.append(data_end_index - bucket_data_start_index) data_end_index = _pad_if_needed(data_end_index) + # Update bucket metadata. self.bucket_indices.append((bucket_data_start_index, data_end_index)) bucket_data_start_index = data_end_index + # Re-set bucket_params and increment bucket_id for next bucket. bucket_params = set() bucket_id += 1 + # Return the potentially padded data_end_index. + return data_end_index for param in params[::-1]: # Iterate through parameters in reverse order to roughly follow backprop order, @@ -237,17 +247,22 @@ def _create_new_bucket(data_end_index: int): data_end_index = data_start_index + this_numel def _does_param_require_new_bucket(param): - # Split shared embedding parameters into separate bucket if using distributed - # optimizer that makes use of reduce-scatters instead of all-reduces. - # This ensures that the first and last pipeline stage partition optimizer state - # for the shared embedding parameters the same way across DP replicas, allowing - # the DP reduce-scatter to be before the embedding all-reduce. + """ + Split shared embedding parameters into separate bucket if using distributed + optimizer that makes use of reduce-scatters instead of all-reduces. + This ensures that the first and last pipeline stage partition optimizer state + for the shared embedding parameters the same way across DP replicas, allowing + the DP reduce-scatter to be before the embedding all-reduce. + """ return getattr(param, "shared_embedding", False) and self.use_distributed_optimizer # Create bucket with already collected parameters if current param needs its own bucket. if _does_param_require_new_bucket(param) and len(bucket_params) > 0: # We are creating a bucket for the already accumulated parameters, whose params # end at the current data_start_index. + if use_distributed_optimizer: + # data_start_index should already be padded. + assert data_start_index % self.data_parallel_world_size == 0 _create_new_bucket(data_start_index) self.param_index_map[param] = ( @@ -263,12 +278,12 @@ def _does_param_require_new_bucket(param): bucket_size is not None and (data_end_index - bucket_data_start_index) >= bucket_size ) or _does_param_require_new_bucket(param): - _create_new_bucket(data_end_index) + data_end_index = _create_new_bucket(data_end_index) data_start_index = data_end_index # Add remaining params to a new bucket. if len(bucket_params) > 0: - _create_new_bucket(data_end_index) + data_end_index = _create_new_bucket(data_end_index) # Next, create underlying storage for buffer (with numel elements that includes # padding as necessary). From 5afa5da17d0e2154d861cab1a00ef8e67945b3ba Mon Sep 17 00:00:00 2001 From: Tuomas Rintamaki Date: Fri, 23 Feb 2024 17:10:40 -0800 Subject: [PATCH 281/296] Mcore CLIP ViT model --- .../models/common/vision_module/__init__.py | 0 .../common/vision_module/vision_module.py | 17 +++ megatron/core/models/vision/__init__.py | 0 megatron/core/models/vision/clip_vit_model.py | 139 ++++++++++++++++++ .../unit_tests/models/test_clip_vit_model.py | 55 +++++++ 5 files changed, 211 insertions(+) create mode 100644 megatron/core/models/common/vision_module/__init__.py create mode 100644 megatron/core/models/common/vision_module/vision_module.py create mode 100644 megatron/core/models/vision/__init__.py create mode 100644 megatron/core/models/vision/clip_vit_model.py create mode 100644 tests/unit_tests/models/test_clip_vit_model.py diff --git a/megatron/core/models/common/vision_module/__init__.py b/megatron/core/models/common/vision_module/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/megatron/core/models/common/vision_module/vision_module.py b/megatron/core/models/common/vision_module/vision_module.py new file mode 100644 index 0000000000..5dc51873a4 --- /dev/null +++ b/megatron/core/models/common/vision_module/vision_module.py @@ -0,0 +1,17 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +"""Megatron Vision Module.""" + +from megatron.core.transformer.module import MegatronModule +from megatron.core.transformer.transformer_config import TransformerConfig + + +# Note: This is only a stub at the moment. This will be expanded in follow-up changes. +class VisionModule(MegatronModule): + """Base vision module that has common helper functions used across CLIP, ViT, etc. + + Args: + config (TransformerConfig): Input transformer config for the model + """ + + def __init__(self, config: TransformerConfig) -> None: + super().__init__(config=config) diff --git a/megatron/core/models/vision/__init__.py b/megatron/core/models/vision/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/megatron/core/models/vision/clip_vit_model.py b/megatron/core/models/vision/clip_vit_model.py new file mode 100644 index 0000000000..f898f1e54a --- /dev/null +++ b/megatron/core/models/vision/clip_vit_model.py @@ -0,0 +1,139 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from typing import Optional + +import torch + +from megatron.core import tensor_parallel +from megatron.core.models.common.vision_module.vision_module import VisionModule +from megatron.core.transformer.custom_layers.transformer_engine import TENorm +from megatron.core.transformer.enums import ModelType +from megatron.core.transformer.spec_utils import ModuleSpec +from megatron.core.transformer.transformer_block import TransformerBlock +from megatron.core.transformer.transformer_config import TransformerConfig + + +# Note: This is unused at the moment and is missing features like position embedding interpolation. +# Follow-up changes will use this and expand the functionality. +class CLIPViTModel(VisionModule): + """CLIP ViT vision model. + + Args: + transformer_config (TransformerConfig): Transformer config + transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers + patch_dim (int): Image patch size. + img_h (int): Input image height. + img_w (int): Input image width. + add_class_token (bool, optional): Include a class token. Defaults to True. + class_token_len (int): Class token length. Defaults to 1 but 8 may be faster. + """ + + def __init__( + self, + transformer_config: TransformerConfig, + transformer_layer_spec: ModuleSpec, + patch_dim: int = 14, + img_h: int = 336, + img_w: int = 336, + add_class_token: bool = True, + class_token_len: int = 1, + ) -> None: + super().__init__(config=transformer_config) + + self.visual_hidden_size = transformer_config.hidden_size + self.patch_dim = patch_dim + self.img_h = img_h + self.img_w = img_w + assert self.img_h % self.patch_dim == 0 + assert self.img_w % self.patch_dim == 0 + self.num_patches_per_dim_h = self.img_h // self.patch_dim + self.num_patches_per_dim_w = self.img_w // self.patch_dim + self.num_patches = self.num_patches_per_dim_h * self.num_patches_per_dim_w + + self.add_class_token = add_class_token + self.class_token_len = class_token_len + + self.seq_length = self.num_patches + (self.class_token_len if self.add_class_token else 0) + + self.conv1 = torch.nn.Conv2d( + in_channels=3, + out_channels=self.visual_hidden_size, + kernel_size=self.patch_dim, + stride=self.patch_dim, + bias=False, + ) + + self.position_ids = torch.arange(self.seq_length).expand(1, -1).cuda() + + self.position_embeddings = torch.nn.Embedding(self.seq_length, self.visual_hidden_size) + + self.add_class_token = add_class_token + if self.add_class_token: + self.class_token = torch.nn.Parameter( + torch.randn(1, self.class_token_len, self.visual_hidden_size) + ) + + self.ln_pre = TENorm( + config=self.config, + hidden_size=self.visual_hidden_size, + eps=self.config.layernorm_epsilon, + ) + + self.model_type = ModelType.encoder_or_decoder + + # Transformer + final layer norm (via post_process) + # TODO: Follow-up changes will make pre and post_process configurable. They are needed for supporting pipeline parallelism. + self.transformer = TransformerBlock( + config=transformer_config, + spec=transformer_layer_spec, + pre_process=True, + post_process=True, + ) + + # Note: a final linear layer present in some implementations is omitted here. It can be added separately where needed. + + def set_input_tensor(self, input_tensor: torch.Tensor) -> None: + """Sets input tensor to the model. + + Args: + input_tensor (Tensor): Sets the input tensor for the model. + """ + self.transformer.set_input_tensor(input_tensor) + + def forward( + self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None + ) -> torch.Tensor: + """Forward function of the CLIP ViT Model. This function passes the input tensors + through the embedding layer and then the transformer. + + Args: + x (torch.Tensor): input data of shape [batch, img_h, img_w] + attention_mask (torch.Tensor with dtype=bool): Attention mask to use. If none, all ones. + + Returns: + x (torch.Tensor): output after final transformer block of shape [b, s, h]. + """ + x = self.conv1(x) # shape = [batch, hidden_size, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # [batch, hidden_size, grid ** 2] + x = x.permute(0, 2, 1) # [batch, grid ** 2, hidden_size] + + if self.add_class_token: + class_token = self.class_token.expand( + x.shape[0], -1, -1 + ) # [batch, class_token_len, hidden_size] + x = torch.cat( + [class_token, x], dim=1 + ) # [batch, grid ** 2 + class_token_len, hidden_size] + + x = x + self.position_embeddings(self.position_ids) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # [b, s, h] -> [s, b, h] + if attention_mask is None: + attention_mask = torch.ones(1, 1, x.shape[0], x.shape[0]).cuda() # [1, 1, s, s] + attention_mask = attention_mask < 0.5 # to bool + x = self.transformer(x.contiguous(), attention_mask) + x = x.permute(1, 0, 2) # [s, b, h] -> [b, s, h] + x = x.contiguous() + + return x diff --git a/tests/unit_tests/models/test_clip_vit_model.py b/tests/unit_tests/models/test_clip_vit_model.py new file mode 100644 index 0000000000..3c15684fb4 --- /dev/null +++ b/tests/unit_tests/models/test_clip_vit_model.py @@ -0,0 +1,55 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import pytest +import torch + +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec +from megatron.core.models.vision.clip_vit_model import CLIPViTModel +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.transformer_config import TransformerConfig +from tests.unit_tests.test_utilities import Utils + + +class TestCLIPViTModel: + """Test CLIP ViT model.""" + + def setup_method(self, method): + Utils.initialize_model_parallel(1, 1) + model_parallel_cuda_manual_seed(123) + transformer_config = TransformerConfig( + num_layers=2, hidden_size=64, num_attention_heads=4, use_cpu_initialization=True + ) + transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec() + self.model = CLIPViTModel(transformer_config, transformer_layer_spec) + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + def test_constructor(self): + assert isinstance(self.model, CLIPViTModel) + + num_weights = sum([p.numel() for p in self.model.parameters()]) + assert num_weights == 174848 + + def test_set_input_tensor(self): + # [s, b, h] expected to the transformer. + expected_shape = (577, 2, 64) + input_tensor = torch.zeros(expected_shape) + + self.model.set_input_tensor(input_tensor) + + assert self.model.transformer.input_tensor.shape == torch.Size(expected_shape) + + def test_forward(self): + self.model.cuda() + + img = torch.zeros((2, 3, 336, 336)).cuda() + + out = self.model.forward(img) + assert out.shape == torch.Size([2, 577, 64]) + + def test_save_load(self, tmp_path): + path = tmp_path / "model.pt" + torch.save(self.model.state_dict(), path) + + self.model.load_state_dict(torch.load(path)) From 9530e19988832b909c1c181200a0dc40b536cb08 Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Sun, 25 Feb 2024 23:00:57 -0800 Subject: [PATCH 282/296] Print number of transformer and embedding parameters separately --- megatron/theoretical_memory_usage.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/megatron/theoretical_memory_usage.py b/megatron/theoretical_memory_usage.py index 1a6fb6b5b3..642fa0d831 100644 --- a/megatron/theoretical_memory_usage.py +++ b/megatron/theoretical_memory_usage.py @@ -26,15 +26,18 @@ def compute_weight_and_optimizer_memory(args, verbose=False): ) embedding_size = args.hidden_size * args.padded_vocab_size if args.untie_embeddings_and_output_weights: - num_total_parameters_with_embeddings = num_parameters_in_transformer_layers + ( - 2 * embedding_size - ) + num_parameters_in_embedding_layers = 2 * embedding_size else: - num_total_parameters_with_embeddings = num_parameters_in_transformer_layers + embedding_size + num_parameters_in_embedding_layers = embedding_size + num_total_parameters = num_parameters_in_transformer_layers + num_parameters_in_embedding_layers if verbose: print( - f"Number of parameters in billions: {num_total_parameters_with_embeddings / 10**9:.2f}" + f"Number of parameters in transformer layers in billions: {num_parameters_in_transformer_layers / 10**9: .2f}" + ) + print( + f"Number of parameters in embedding layers in billions: {num_parameters_in_embedding_layers / 10**9:.2f}" ) + print(f"Total number of parameters in billions: {num_total_parameters / 10**9:.2f}") # Most loaded model shard has (1/pp_size transformer layers + 1 embedding layer) / tp_size. num_parameters_on_most_loaded_model_shard = ( From 5f1f81303adc16c7e7b96c7e1195a0b03f41d7f8 Mon Sep 17 00:00:00 2001 From: Mikolaj Blaz Date: Tue, 27 Feb 2024 13:05:39 -0800 Subject: [PATCH 283/296] Unify resume and correctness functional tests --- .gitlab-ci.yml | 25 +-- .../functional_tests/jet_recipes/MR-bert.yaml | 61 +----- .../functional_tests/jet_recipes/MR-gpt.yaml | 60 +----- .../jet_recipes/monthly-t5.yaml | 59 +----- .../test_resume_checkpoint_pipeline.py | 32 ++-- ...ps-100_tp-1_pp-2_mcore-false_te-false.json | 1 + ...eps-50_tp-1_pp-2_mcore-false_te-false.json | 1 - ...ps-100_tp-1_pp-2_mcore-false_te-false.json | 1 - ...ps-100_tp-1_pp-2_mcore-false_te-false.json | 1 + ...bert_distributed_resume_checkpoint_test.sh | 108 ----------- .../bert/pretrain_bert_distributed_test.sh | 17 +- ...gpt3_distributed_resume_checkpoint_test.sh | 119 ------------ .../gpt3/pretrain_gpt3_distributed_test.sh | 17 +- ...etro_distributed_resume_checkpoint_test.sh | 127 ------------- .../retro/pretrain_retro_distributed_test.sh | 27 ++- ...n_t5_distributed_resume_checkpoint_test.sh | 175 ------------------ .../t5/pretrain_t5_distributed_test.sh | 16 +- 17 files changed, 108 insertions(+), 739 deletions(-) create mode 100644 tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json delete mode 100644 tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json delete mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json delete mode 100755 tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh delete mode 100755 tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh delete mode 100755 tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_resume_checkpoint_test.sh delete mode 100755 tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f1f9117af1..3c2d3fef3a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -18,7 +18,7 @@ variables: &VARS DISPLAY_OUTPUT: "True" # Set to true for new tests to copy the logs for creating golden truth file TIME_LIMIT: "10:00" # Default time limit for all jobs MOE_GROUPED_GEMM: 0 # Set to 1 to enable grouped gemm for MoE - + include: - jet-tests.yml @@ -70,29 +70,6 @@ formatting: rules: - when: always -.selene_test_resume_checkpoint_launcher: &selene-test-resume-checkpoint-launcher - tags: - - ssh_selene_runner - stage: test - script: &selene-test-resume-launcher-script - - echo "Running selene resume from checkpoint test. " - - pwd - - run_cmd="bash tests/functional_tests/shell_test_utils/run_selene_test_resume_checkpoint_launcher_script.sh RUN_MODEL=$RUN_MODEL TP_SIZE=$TP_SIZE PP_SIZE=$PP_SIZE VP_SIZE=$VP_SIZE NUM_NODES=$NUM_NODES SELENE_ADLR_CI_PATH=$SELENE_ADLR_CI_PATH CI_PIPELINE_ID=$CI_PIPELINE_ID RUN_NAME=$RUN_NAME PYTORCH_IMAGE=$PYTORCH_IMAGE DATA_DIR=$DATA_DIR TIME_LIMIT=$TIME_LIMIT" - - echo "$run_cmd" - - ${run_cmd} - - echo "Completed the job" - rules: - - if: $TEST_LEVEL =~ $TESTS_TO_RUN_ON_THIS_COMMIT || $CI_JOB_NAME =~ $TESTS_TO_RUN_ON_THIS_COMMIT || $CI_JOB_NAME =~ $TEST_REGEX_ON_THIS_COMMIT - when: always - - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGING' - when: always - - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_APPROVED && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED - when: always - - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_LABELS =~ "READY FOR REVIEW" && $TEST_LEVEL =~ $TESTS_TO_RUN_AFTER_MERGE_REQ_APPROVED - when: always - allow_failure: false - retry: 2 - .selene_test_launcher: &selene-test-launcher tags: - ssh_selene_runner diff --git a/tests/functional_tests/jet_recipes/MR-bert.yaml b/tests/functional_tests/jet_recipes/MR-bert.yaml index edfe09371b..28c4e3f68d 100644 --- a/tests/functional_tests/jet_recipes/MR-bert.yaml +++ b/tests/functional_tests/jet_recipes/MR-bert.yaml @@ -5,7 +5,7 @@ loggers: [stdout] spec: model: bert variant: 345m - build: mcore-pyt + build: mcore-pyt scope: merge-request nodes: 1 gpus: 8 @@ -21,6 +21,7 @@ spec: precision: bf16 time_limit: 1200 artifacts: {/workspace/data/bert_data: text/the_pile/bert_shard00} + checkpoint_resume_test: 0 script: |- ls cd /workspace/megatron-lm @@ -39,6 +40,7 @@ spec: VP_SIZE={vp_size if vp_size is not None else '""'} \ MBS={micro_batch_size} \ GBS={batch_size} \ + CHECKPOINT_RESUME_TEST={checkpoint_resume_test} \ ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ tee {assets_dir}/results.json @@ -49,61 +51,8 @@ products: # Non-MCore - {use_mcore: [False], tp_size: [2], pp_size: [2]} - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [2]} -key_segments: - vp_size: vp - use_mcore: mcore - use_te: te - args_meta: args - - ---- -### Resume from ckpt ### -type: recipe -format_version: 1 -maintainers: [maanug] -loggers: [stdout] -spec: - model: bert - variant: 345m - build: mcore-pyt - scope: merge-request-resume - nodes: 1 - gpus: 8 - platforms: [dgx_h100] - steps: 50 - use_te: False - use_mcore: True - vp_size: null - extra_args: null - args_meta: null - micro_batch_size: 4 # MBS - batch_size: 128 # GBS, JET schema requires 'batch_size' - precision: bf16 - time_limit: 1200 - artifacts: {/workspace/data/bert_data: text/the_pile/bert_shard00} - script: |- - ls - cd /workspace/megatron-lm - - ./tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh \ - DATA_PATH=/workspace/data/bert_data/my-bert_00_text_sentence \ - CHECKPOINT_PATH=/workspace/checkpoints \ - TENSORBOARD_DIR={assets_dir} \ - DATA_CACHE=/workspace/data/index-cache \ - USE_TE={"1" if use_te else "0"} \ - TP_SIZE={tp_size} \ - PP_SIZE={pp_size} \ - NUM_NODES={nodes} \ - MAX_STEPS={steps} \ - USE_CORE={"1" if use_mcore else "0"} \ - VP_SIZE={vp_size if vp_size is not None else '""'} \ - MBS={micro_batch_size} \ - GBS={batch_size} \ - ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ - python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ - tee {assets_dir}/results.json -products: - - {use_mcore: [False], tp_size: [1], pp_size: [2]} + # Checkpoint resume + - {checkpoint_resume_test: [1], scope: [merge-request-resume], steps: [100], use_mcore: [False], tp_size: [1], pp_size: [2]} key_segments: vp_size: vp use_mcore: mcore diff --git a/tests/functional_tests/jet_recipes/MR-gpt.yaml b/tests/functional_tests/jet_recipes/MR-gpt.yaml index 6b9e2558dc..a708fea315 100644 --- a/tests/functional_tests/jet_recipes/MR-gpt.yaml +++ b/tests/functional_tests/jet_recipes/MR-gpt.yaml @@ -22,6 +22,7 @@ spec: precision: bf16 time_limit: 1200 artifacts: {/workspace/data/gpt3_data: text/the_pile/shard00} + checkpoint_resume_test: 0 script: |- ls cd /workspace/megatron-lm @@ -43,6 +44,7 @@ spec: MBS={micro_batch_size} \ GBS={batch_size} \ MOE_GROUPED_GEMM={moe_grouped_gemm} \ + CHECKPOINT_RESUME_TEST={checkpoint_resume_test} \ ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ tee {assets_dir}/results.json @@ -71,62 +73,8 @@ products: # Non-MCore - {use_mcore: [False], use_te: [False, True], tp_size: [2], pp_size: [2]} - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [1]} -key_segments: - vp_size: vp - use_mcore: mcore - use_te: te - args_meta: args - - ---- -### Resume from ckpt ### -type: recipe -format_version: 1 -maintainers: [maanug] -loggers: [stdout] -spec: - model: gpt3 - variant: 345m - build: mcore-pyt - scope: merge-request-resume - nodes: 1 - gpus: 8 - platforms: [dgx_h100] - steps: 100 - use_te: False - use_mcore: True - vp_size: null - extra_args: null - args_meta: null - micro_batch_size: 4 # MBS - batch_size: 32 # GBS, JET schema requires 'batch_size' - precision: 16 - time_limit: 1200 - artifacts: {/workspace/data/gpt3_data: text/the_pile/shard00} - script: |- - ls - cd /workspace/megatron-lm - - ./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh \ - DATA_PATH=/workspace/data/gpt3_data/my-gpt3_00_text_document \ - CHECKPOINT_PATH=/workspace/checkpoints \ - TENSORBOARD_DIR={assets_dir} \ - VOCAB_FILE=/workspace/data/gpt3_data/bpe/vocab.json \ - MERGE_FILE=/workspace/data/gpt3_data/bpe/merges.txt \ - DATA_CACHE=/workspace/data/index-cache \ - USE_TE={"1" if use_te else "0"} \ - TP_SIZE={tp_size} \ - PP_SIZE={pp_size} \ - NUM_NODES={nodes} \ - USE_CORE={"1" if use_mcore else "0"} \ - VP_SIZE={vp_size if vp_size is not None else '""'} \ - MBS={micro_batch_size} \ - GBS={batch_size} \ - ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ - python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ - tee {assets_dir}/results.json -products: - - {use_mcore: [False], tp_size: [1], pp_size: [2]} + # Checkpoint resume + - {checkpoint_resume_test: [1], scope: [merge-request-resume], steps: [100], use_mcore: [False], tp_size: [1], pp_size: [2]} key_segments: vp_size: vp use_mcore: mcore diff --git a/tests/functional_tests/jet_recipes/monthly-t5.yaml b/tests/functional_tests/jet_recipes/monthly-t5.yaml index 6eb3490fe8..d99bf92b9c 100644 --- a/tests/functional_tests/jet_recipes/monthly-t5.yaml +++ b/tests/functional_tests/jet_recipes/monthly-t5.yaml @@ -21,6 +21,7 @@ spec: precision: bf16 time_limit: 1800 artifacts: {/workspace/data/t5_data: text/the_pile/t5_shard00} + checkpoint_resume_test: 0 script: |- ls cd /workspace/megatron-lm @@ -39,6 +40,7 @@ spec: VP_SIZE={vp_size if vp_size is not None else '""'} \ MBS={micro_batch_size} \ GBS={batch_size} \ + CHECKPOINT_RESUME_TEST={checkpoint_resume_test} \ ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ tee {assets_dir}/results.json @@ -46,61 +48,8 @@ products: - { tp_size: [1,2], pp_size: [1], vp_size: [1] } - {use_te: [True], tp_size: [2], pp_size: [1], vp_size: [1]} - {use_te: [True], tp_size: [2], pp_size: [1], vp_size: [1], extra_args: ["--sequence-parallel"], args_meta: ["sequence_parallel"]} -key_segments: - vp_size: vp - use_mcore: mcore - use_te: te - args_meta: args - - ---- -### Resume from ckpt ### -type: recipe -format_version: 1 -maintainers: [maanug] -loggers: [stdout] -spec: - model: t5 - variant: 220m - build: mcore-pyt - scope: monthly-resume - nodes: 1 - gpus: 8 - platforms: [dgx_h100] - steps: 100 - use_te: False - use_mcore: True - vp_size: 1 - extra_args: null - args_meta: null - micro_batch_size: 4 # MBS - batch_size: 32 # GBS, JET schema requires 'batch_size' - precision: bf16 - time_limit: 1800 - artifacts: {/workspace/data/t5_data: text/the_pile/t5_shard00} - script: |- - ls - cd /workspace/megatron-lm - - ./tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh \ - DATA_PATH="/workspace/data/t5_data/my-t5_00_text_document" \ - CHECKPOINT_PATH=/workspace/checkpoints \ - TENSORBOARD_DIR={assets_dir} \ - VOCAB_PATH="/workspace/data/t5_data/bert-large-cased-vocab.txt" \ - DATA_CACHE=/workspace/data/index-cache \ - USE_TE={"1" if use_te else "0"} \ - TP_SIZE={tp_size} \ - PP_SIZE={pp_size} \ - NUM_NODES={nodes} \ - USE_CORE={"1" if use_mcore else "0"} \ - VP_SIZE={vp_size if vp_size is not None else '""'} \ - MBS={micro_batch_size} \ - GBS={batch_size} \ - ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} && \ - python3 ./tests/functional_tests/python_test_utils/get_test_results_from_tensorboard_logs.py {assets_dir} "" | \ - tee {assets_dir}/results.json -products: - - {use_te: [False, True], tp_size: [1], pp_size: [1], vp_size: [1]} + # Checkpoint resume + - {checkpoint_resume_test: [1], scope: [monthly-resume], use_te: [False, True], tp_size: [1], pp_size: [1], vp_size: [1]} key_segments: vp_size: vp use_mcore: mcore diff --git a/tests/functional_tests/python_test_utils/test_resume_checkpoint_pipeline.py b/tests/functional_tests/python_test_utils/test_resume_checkpoint_pipeline.py index 41b7a0e7d8..417297eaff 100644 --- a/tests/functional_tests/python_test_utils/test_resume_checkpoint_pipeline.py +++ b/tests/functional_tests/python_test_utils/test_resume_checkpoint_pipeline.py @@ -1,11 +1,16 @@ import os + os.environ['OPENBLAS_NUM_THREADS'] = '1' -import sys +import glob import json import shutil -import glob +import sys + +import pytest from tensorboard.backend.event_processing import event_accumulator +from tests.functional_tests.python_test_utils.common import TypeOfTest + LOGS_DIR = os.getenv('LOGS_DIR') STEP_INTERVAL = 5 @@ -36,10 +41,11 @@ def collect_train_test_metrics(logs_dir, index): class TestCIPipeline: + margin_loss = 0.05 train_metrics_100 = collect_train_test_metrics(LOGS_DIR, 0) train_metrics_50_to_100 = collect_train_test_metrics(LOGS_DIR, 1) - def _test_helper(self, loss_type): + def _test_helper(self, loss_type, test_type): expected = self.train_metrics_100[loss_type] assert len(expected) == 100 // STEP_INTERVAL, \ f"Train metrics from first run (before checkpoint load) should have {100 // STEP_INTERVAL} elements" @@ -48,14 +54,18 @@ def _test_helper(self, loss_type): assert len(actual) == 50 // STEP_INTERVAL, \ f"Train metrics from second run (after checkpoint load) should have {50 // STEP_INTERVAL} elements" print('actual : ' + str(actual)) - # NOTE : Doing this way because in gpt3 model when I run from 0 - 100 directly, it produces 1 extra element - # i.e expected is [10.84266, 10.89696, 10.90542, 10.87498, 10.86265, 10.83608, 10.64368, 10.62319, 10.53908, 10.25005, 10.20907, 9.96542, 9.96802, 9.92436, 9.79086, 9.26718, 9.61784, 9.19018, 9.45986, 9.62168, 9.73772, 8.85732, 9.43185, 9.27912, 9.6832, 9.5127, 9.5419, 9.02549, 8.55077, 8.91355, 8.83375, 9.17722, 9.22436, 9.19436, 9.11323, 9.09711, 9.04421, 9.36795] - # actual is : [9.73772, 8.85732, 9.43185, 9.27912, 9.6832, 9.5127, 9.5419, 9.02549, 8.55077, 8.91355, 8.83375, 9.17722, 9.22435, 9.19435, 9.11322, 9.09711, 9.04422] - # That extra element in expected is causing some issues. So doing it this way. Need to figure out whats happening - start_idx_expected = expected.index(actual[0]) # First element of actual + start_idx_expected = len(expected) - len(actual) + print('start_idx_expected:', start_idx_expected) # Here we will just be comparing values of actual and second half (50-100) of expected - for i in range(len(actual)): - assert actual[i] == expected[start_idx_expected + i], f"The value at step {i} should be {expected[start_idx_expected + i]} but it is {actual[i]}." + for i, (expected_val, actual_val) in enumerate(zip(expected[start_idx_expected:], actual)): + step = start_idx_expected + i * STEP_INTERVAL + if test_type == TypeOfTest.APPROX: + assert actual_val == pytest.approx(expected=expected_val, rel=self.margin_loss), f"The loss at step {step} should be approximately {expected_val} but it is {actual_val}." + else: + assert actual_val == expected_val, f"The value at step {step} should be {expected_val} but it is {actual_val}." def test_lm_loss_deterministic(self): - self._test_helper("lm loss") + self._test_helper("lm loss", TypeOfTest.DETERMINISTIC) + + def test_lm_loss_approx(self): + self._test_helper("lm loss", TypeOfTest.APPROX) diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..bf335a35d0 --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.51554, 10.51032, 10.52063, 10.52247, 10.51818, 10.5092, 10.43695, 10.29864, 10.16893, 9.98643, 9.9146, 9.78576, 9.67452, 9.55758, 9.50388, 9.35033, 9.34043, 9.27911, 9.27768, 9.20722]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [21174.0, 21615.0, 24124.0, 18698.0, 23551.0, 18803.0, 19627.0, 27198.0, 25001.0, 25778.0, 15220.0, 35074.0, 26410.0, 22075.0, 37860.0, 28583.0, 23027.0]}, "iteration_timing_avg": 0.24888507462686574} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json deleted file mode 100644 index 9ee243fd58..0000000000 --- a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.51553, 10.51031, 10.52063, 10.52246, 10.51819, 10.50918, 10.43691, 10.29866, 10.16894, 9.98642, 9.91462, 9.78574, 9.67453, 9.55759, 9.50386, 9.35031, 9.34045, 9.27913, 9.27768, 9.20723]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [21436.0, 21632.0, 23818.0, 19149.0, 23732.0, 18947.0, 19899.0, 26923.0, 24942.0, 25962.0, 15012.0, 34688.0, 26498.0, 21937.0, 37472.0, 28599.0, 23063.0]}, "iteration_timing_avg": 0.24888507462686574} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json deleted file mode 100644 index 5d41fc6f1c..0000000000 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json +++ /dev/null @@ -1 +0,0 @@ -{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.79022, 10.84034, 10.85603, 10.8232, 10.83355, 10.78173, 10.59641, 10.58331, 10.49545, 10.22799, 10.1995, 9.94815, 9.94997, 9.91997, 9.79865, 9.25224, 9.61409, 9.19153, 9.46281, 9.62472]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2085.0, 2613.0, 2387.0, 2215.0, 2074.0, 2039.0, 2766.0, 2722.0, 2763.0, 2395.0, 2859.0, 3089.0, 3405.0, 2982.0, 3134.0, 2896.0, 3986.0]}, "iteration_timing_avg": 0.06181014925373134} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..583d5ed358 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.79022, 10.84034, 10.85603, 10.82319, 10.83355, 10.78173, 10.59641, 10.58331, 10.49545, 10.22799, 10.19949, 9.94816, 9.94997, 9.91997, 9.79865, 9.25223, 9.61408, 9.19153, 9.46281, 9.62472]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2130.0, 2531.0, 2368.0, 2204.0, 2141.0, 2068.0, 2772.0, 2715.0, 2831.0, 2384.0, 2870.0, 2893.0, 3396.0, 3064.0, 3136.0, 2916.0, 3917.0]}, "iteration_timing_avg": 0.06181014925373134} \ No newline at end of file diff --git a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh deleted file mode 100755 index 1b1920f7ac..0000000000 --- a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_resume_checkpoint_test.sh +++ /dev/null @@ -1,108 +0,0 @@ -#! /bin/bash - -echo "------ARGUMENTS LIST --------" -for ARGUMENT in "$@" -do - KEY=$(echo $ARGUMENT | cut -f1 -d=) - - KEY_LENGTH=${#KEY} - VALUE="${ARGUMENT:$KEY_LENGTH+1}" - - export "$KEY"="$VALUE" - echo "$KEY=$VALUE" -done -echo "---------------------------------" - -if [[ -z $VOCAB_FILE ]]; then VOCAB_FILE="/workspace/data/bert_data/vocab.txt" ; fi - -GPUS_PER_NODE=8 -# Change for multinode config -MASTER_ADDR=localhost -MASTER_PORT=6000 -NODE_RANK=0 -WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -export CUDA_DEVICE_MAX_CONNECTIONS=1 - - -# Runs the "345M" parameter model -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" - -# Run for 100 iterations -torchrun $DISTRIBUTED_ARGS \ - pretrain_bert.py \ - --use-checkpoint-args \ - --use-checkpoint-opt_param-scheduler \ - --num-layers 24 \ - --hidden-size 1024 \ - --num-attention-heads 16 \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --micro-batch-size 4 \ - --global-batch-size 128 \ - --seq-length 512 \ - --max-position-embeddings 512 \ - --train-iters 100 \ - --timing-log-level 2 \ - --lr-decay-iters 990000 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --data-path $DATA_PATH \ - --vocab-file $VOCAB_FILE \ - --split 949,50,1 \ - --distributed-backend nccl \ - --lr 0.0001 \ - --min-lr 0.00001 \ - --lr-warmup-fraction 0.01 \ - --log-interval 1 \ - --save-interval 50 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --no-gradient-accumulation-fusion \ - ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ - --fp16 - -echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt - -# Resume from 50th iteration ckpt and continue to 100 iterations -torchrun $DISTRIBUTED_ARGS \ - pretrain_bert.py \ - --use-checkpoint-args \ - --use-checkpoint-opt_param-scheduler \ - --num-layers 24 \ - --hidden-size 1024 \ - --num-attention-heads 16 \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --micro-batch-size 4 \ - --global-batch-size 128 \ - --seq-length 512 \ - --max-position-embeddings 512 \ - --train-iters 100 \ - --timing-log-level 2 \ - --lr-decay-iters 990000 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --data-path $DATA_PATH \ - --vocab-file $VOCAB_FILE \ - --split 949,50,1 \ - --distributed-backend nccl \ - --lr 0.0001 \ - --min-lr 0.00001 \ - --lr-warmup-fraction 0.01 \ - --log-interval 1 \ - --save-interval 10000 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --no-gradient-accumulation-fusion \ - ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ - --fp16 diff --git a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh index 23508c3290..e2abaa51fc 100755 --- a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh +++ b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh @@ -35,7 +35,17 @@ if [[ $USE_CORE -eq 1 ]]; then command="$command export NVTE_ALLOW_NONDETERMINISTIC_ALGO=0;" USE_MCORE=1 fi - +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + echo "Running checkpoint resume test..." + __SAVE_INTERVAL=50 + ADDITIONAL_PARAMS+=" --use-checkpoint-args --use-checkpoint-opt_param-scheduler" + if [[ $MAX_STEPS -ne 100 ]]; then + echo "Overriding MAX_STEPS=100" + MAX_STEPS=100 + fi +else + __SAVE_INTERVAL=10000 # inf +fi # Runs the "345M" parameter model DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" @@ -66,7 +76,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --min-lr 0.00001 \ --lr-warmup-fraction 0.01 \ --log-interval 1 \ - --save-interval 10000 \ + --save-interval $__SAVE_INTERVAL \ --eval-interval 1000 \ --eval-iters 10 \ --tensor-model-parallel-size $TP_SIZE \ @@ -83,6 +93,9 @@ if [[ "${TRAINING_DTYPE}" == "fp16" ]]; then fi command="$command $torch_run_cmd" +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + command="$command; rm -rf $CHECKPOINT_PATH/iter_0000100; echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt; $torch_run_cmd" +fi echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" echo "$command" echo "-----------------------------------------------------------------------------" diff --git a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh deleted file mode 100755 index cb9ccf68f0..0000000000 --- a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_resume_checkpoint_test.sh +++ /dev/null @@ -1,119 +0,0 @@ -#! /bin/bash -echo "------ARGUMENTS LIST --------" -for ARGUMENT in "$@" -do - KEY=$(echo $ARGUMENT | cut -f1 -d=) - - KEY_LENGTH=${#KEY} - VALUE="${ARGUMENT:$KEY_LENGTH+1}" - - export "$KEY"="$VALUE" - echo "$KEY=$VALUE" -done -echo "---------------------------------" - -if [[ -z $VOCAB_FILE ]]; then VOCAB_FILE="/workspace/data/gpt3_data/vocab.json" ; fi -if [[ -z $MERGE_FILE ]]; then MERGE_FILE="/workspace/data/gpt3_data/merges.txt" ; fi - -GPUS_PER_NODE=8 -# Change for multinode config -MASTER_ADDR=localhost -MASTER_PORT=6000 -NODE_RANK=0 -WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -export CUDA_DEVICE_MAX_CONNECTIONS=1 - - -# Runs the "345M" parameter model -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" - -# Run for 100 iterations and save checkpoint at 50 -torchrun $DISTRIBUTED_ARGS \ - pretrain_gpt.py \ - --use-checkpoint-args \ - --use-checkpoint-opt_param-scheduler \ - --num-layers 12 \ - --hidden-size 512 \ - --num-attention-heads 8 \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --micro-batch-size 4 \ - --global-batch-size 32 \ - --seq-length 1024 \ - --max-position-embeddings 1024 \ - --train-iters 100 \ - --timing-log-level 2 \ - --lr-decay-iters 320000 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --data-path $DATA_PATH \ - --vocab-file $VOCAB_FILE \ - --merge-file $MERGE_FILE \ - --split 949,50,1 \ - --distributed-backend nccl \ - --lr 0.00015 \ - --lr-decay-style cosine \ - --min-lr 1.0e-5 \ - --weight-decay 1e-2 \ - --clip-grad 1.0 \ - --lr-warmup-fraction .01 \ - --log-interval 1 \ - --save-interval 50 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --no-gradient-accumulation-fusion \ - --no-bias-swiglu-fusion \ - --no-rope-fusion \ - ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ - --fp16 - -echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt - -# Resume from 50th iteration ckpt and continue to 100 iterations -torchrun $DISTRIBUTED_ARGS \ - pretrain_gpt.py \ - --use-checkpoint-args \ - --use-checkpoint-opt_param-scheduler \ - --num-layers 12 \ - --hidden-size 512 \ - --num-attention-heads 8 \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --micro-batch-size 4 \ - --global-batch-size 32 \ - --seq-length 1024 \ - --max-position-embeddings 1024 \ - --train-iters 100 \ - --timing-log-level 2 \ - --lr-decay-iters 320000 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --data-path $DATA_PATH \ - --vocab-file $VOCAB_FILE \ - --merge-file $MERGE_FILE \ - --split 949,50,1 \ - --distributed-backend nccl \ - --lr 0.00015 \ - --lr-decay-style cosine \ - --min-lr 1.0e-5 \ - --weight-decay 1e-2 \ - --clip-grad 1.0 \ - --lr-warmup-fraction .01 \ - --log-interval 1 \ - --save-interval 10000 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --no-gradient-accumulation-fusion \ - ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ - --fp16 - diff --git a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh index c5961c8f17..07439bc56f 100755 --- a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh +++ b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh @@ -53,6 +53,18 @@ if [[ $USE_TE -eq 1 ]]; then else echo "Running with local transformer implementation ..." fi + +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + echo "Running checkpoint resume test..." + __SAVE_INTERVAL=50 + ADDITIONAL_PARAMS+=" --use-checkpoint-args --use-checkpoint-opt_param-scheduler" + if [[ $MAX_STEPS -ne 100 ]]; then + echo "Overriding MAX_STEPS=100" + MAX_STEPS=100 + fi +else + __SAVE_INTERVAL=10000 # inf +fi set +x # Runs the "345M" parameter model DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" @@ -88,7 +100,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --clip-grad 1.0 \ --lr-warmup-fraction .01 \ --log-interval 1 \ - --save-interval 10000 \ + --save-interval $__SAVE_INTERVAL \ --eval-interval 1000 \ --eval-iters 10 \ --transformer-impl $TRANSFORMER_IMPL \ @@ -108,6 +120,9 @@ if [[ "${TRAINING_DTYPE}" == "fp16" ]]; then fi command="$command $torch_run_cmd" +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + command="$command; rm -rf $CHECKPOINT_PATH/iter_0000100; echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt; $torch_run_cmd" +fi echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" echo "$command" echo "-----------------------------------------------------------------------------" diff --git a/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_resume_checkpoint_test.sh deleted file mode 100755 index c62fea1aad..0000000000 --- a/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_resume_checkpoint_test.sh +++ /dev/null @@ -1,127 +0,0 @@ -#! /bin/bash - -echo "------ARGUMENTS LIST --------" -for ARGUMENT in "$@" -do - KEY=$(echo $ARGUMENT | cut -f1 -d=) - - KEY_LENGTH=${#KEY} - VALUE="${ARGUMENT:$KEY_LENGTH+1}" - - export "$KEY"="$VALUE" - echo "$KEY=$VALUE" -done -echo "---------------------------------" - -set -x -if [[ -z $MBS ]]; then MBS=4; fi - -GPUS_PER_NODE=8 -# Change for multinode config -MASTER_ADDR=localhost -MASTER_PORT=6000 -NODE_RANK=0 -WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) -export CUDA_DEVICE_MAX_CONNECTIONS=1 - -TRANSFORMER_IMPL=local -TRAINING_DTYPE=bf16 - -if [[ $USE_CORE -eq 1 ]]; then - echo "Running using megatron core" - TRANSFORMER_IMPL=local - TRAINING_DTYPE=bf16 - command="$command export NVTE_ALLOW_NONDETERMINISTIC_ALGO=0;" - USE_MCORE=1 - export NVTE_ALLOW_NONDETERMINISTIC_ALGO=0 -fi - -if [[ $USE_TE -eq 1 ]]; then - echo "Running with TransformerEngine ..." - TRANSFORMER_IMPL=transformer_engine - TRAINING_DTYPE=bf16 -else - echo "Running with local transformer implementation ..." -fi -set +x - -# Runs the "345M" parameter model -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" - -# Arguments. -ARGS=" \ - --recompute-activations \ - --use-flash-attn \ - --apply-layernorm-1p \ - --untie-embeddings-and-output-weights \ - --disable-bias-linear \ - --no-position-embedding \ - --use-rotary-position-embeddings \ - --rotary-percent 0.5 \ - --swiglu \ - --attention-dropout 0.0 \ - --hidden-dropout 0.0 \ - --exit-duration-in-mins 220 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size 1 \ - --num-layers 24 \ - --hidden-size 1024 \ - --num-attention-heads 16 \ - --seq-length 2048 \ - --max-position-embeddings 2048 \ - --micro-batch-size $MBS \ - --global-batch-size 256 \ - --train-samples 100000 \ - --lr-decay-samples 99000 \ - --lr-warmup-samples 1000 \ - --lr 2.5e-5 \ - --min-lr 2.5e-6 \ - --lr-decay-style cosine \ - --log-interval 5 \ - --eval-iters 100 \ - --eval-interval 2000 \ - --tokenizer-type GPT2BPETokenizer \ - --vocab-file /workspace/data/retro_data/vocab/gpt2-vocab.json \ - --merge-file /workspace/data/retro_data/vocab/gpt2-merges.txt \ - --data-path /workspace/data/retro_data/inputs/wiki-200k_text_document \ - --split 98,2,0 \ - --clip-grad 1.0 \ - --weight-decay 0.1 \ - --adam-beta1 0.9 \ - --adam-beta2 0.95 \ - --init-method-std 0.007 \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --save-interval 50 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --bf16 \ - --transformer-impl $TRANSFORMER_IMPL \ - --${TRAINING_DTYPE} \ - ${USE_MCORE:+--use-mcore-models} \ - ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS} \ - --retro-workdir /workspace/data/retro_data/neighbors - --retro-add-retriever \ - --num-workers 32 \ -" - -pip install h5py -pip install transformers -pip install faiss-gpu - -# Run for 100 iterations and save checkpoint at 50 -torchrun $DISTRIBUTED_ARGS \ - pretrain_retro.py \ - $ARGS \ - --exit-interval 100 - -echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt - -# Resume from 50th iteration ckpt and continue to 100 iterations -torchrun $DISTRIBUTED_ARGS \ - pretrain_retro.py \ - $ARGS \ - --exit-interval 50 diff --git a/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh b/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh index fe3271cb46..7e1a81ad82 100755 --- a/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh +++ b/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh @@ -44,11 +44,23 @@ if [[ $USE_TE -eq 1 ]]; then else echo "Running with local transformer implementation ..." fi + +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + echo "Running checkpoint resume test..." + __SAVE_INTERVAL=50 + if [[ $MAX_STEPS -ne 100 ]]; then + echo "Overriding MAX_STEPS=100" + MAX_STEPS=100 + fi +else + __SAVE_INTERVAL=10000 # inf +fi set +x # Runs the "345M" parameter model DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" -ARGS=" \ +build_args() { + ARGS=" \ --exit-interval $MAX_STEPS \ \ --recompute-activations \ @@ -96,7 +108,7 @@ ARGS=" \ --log-validation-ppl-to-tensorboard \ --log-timers-to-tensorboard \ --tensorboard-dir ${TENSORBOARD_DIR} \ - --save-interval 10000 \ + --save-interval $__SAVE_INTERVAL \ --save $CHECKPOINT_PATH \ --load $CHECKPOINT_PATH \ --bf16 \ @@ -108,12 +120,23 @@ ARGS=" \ --retro-add-retriever \ --num-workers 32 \ " +} +build_args torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ pretrain_retro.py \ ${ARGS}" command="$command $torch_run_cmd" + +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + MAX_STEPS=50 + build_args + torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ + pretrain_retro.py \ + ${ARGS}" + command="$command; rm -rf $CHECKPOINT_PATH/iter_0000100; echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt; $torch_run_cmd" +fi echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" echo "$command" echo "-----------------------------------------------------------------------------" diff --git a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh deleted file mode 100755 index dc5bdbab3b..0000000000 --- a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_resume_checkpoint_test.sh +++ /dev/null @@ -1,175 +0,0 @@ -#! /bin/bash -echo "------ARGUMENTS LIST --------" -for ARGUMENT in "$@" -do - KEY=$(echo $ARGUMENT | cut -f1 -d=) - - KEY_LENGTH=${#KEY} - VALUE="${ARGUMENT:$KEY_LENGTH+1}" - - export "$KEY"="$VALUE" - echo "$KEY=$VALUE" -done -echo "---------------------------------" - -set -x -if [[ -z $MBS ]]; then MBS=4; fi -if [[ -z $GBS ]]; then GBS=32; fi -if [[ -z $VOCAB_PATH ]]; then VOCAB_PATH="/workspace/data/t5_data/bert-large-cased-vocab.txt"; fi - -GPUS_PER_NODE=8 -# Change for multinode config -MASTER_ADDR=localhost -MASTER_PORT=6000 -NODE_RANK=0 -WORLD_SIZE=$(($GPUS_PER_NODE*$NUM_NODES)) - -command="export CUDA_DEVICE_MAX_CONNECTIONS=1;" - -TRANSFORMER_IMPL=local -TRAINING_DTYPE=fp16 - -if [[ $USE_CORE -eq 1 ]]; then - echo "Running using megatron core" - TRANSFORMER_IMPL=local - TRAINING_DTYPE=bf16 - command="$command export NVTE_ALLOW_NONDETERMINISTIC_ALGO=0;" - USE_MCORE=1 - export NVTE_ALLOW_NONDETERMINISTIC_ALGO=0 -fi - -if [[ $NO_FA -eq 1 ]]; then - echo "Turn off flash attention environment variable" - export NVTE_FLASH_ATTN=0 - export NVTE_FUSED_ATTN=0 -fi - -if [[ $USE_TE -eq 1 ]]; then - echo "Running with TransformerEngine ..." - TRANSFORMER_IMPL=transformer_engine - TRAINING_DTYPE=bf16 -else - echo "Running with local transformer implementation ..." -fi -set +x - -# install neccessary library -pip install pydantic==2.2.1 - -# Runs the "220M" parameter model -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NUM_NODES" - -# Run for 100 iterations and save checkpoint at 50 -torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ - pretrain_t5.py \ - --encoder-num-layers 12 \ - --decoder-num-layers 12 \ - --hidden-size 768 \ - --num-attention-heads 12 \ - --kv-channels 64 \ - --ffn-hidden-size 3072 \ - --encoder-seq-length 512 \ - --decoder-seq-length 128 \ - --max-position-embeddings 512 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --micro-batch-size ${MBS:-4} \ - --global-batch-size ${GBS:-32} \ - --lr 0.0001 \ - --train-iters 100 \ - --lr-decay-iters 100 \ - --lr-decay-style linear \ - --min-lr 0.00001 \ - --weight-decay 1e-2 \ - --lr-warmup-fraction .01 \ - --clip-grad 1.0 \ - --${TRAINING_DTYPE} \ - --vocab-extra-ids 100 \ - --init-method-std 0.015 \ - --transformer-impl $TRANSFORMER_IMPL \ - --use-mcore-models \ - --data-path $DATA_PATH \ - --vocab-file $VOCAB_PATH \ - --tokenizer-type BertWordPieceCase \ - --split 99982,9,9 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --timing-log-level 2 \ - --log-interval 1 \ - --save-interval 50 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --distributed-backend nccl \ - ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ - ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS}" - -command1="$command $torch_run_cmd" -echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" -echo "$command1" -echo "-----------------------------------------------------------------------------" -echo "$command1" >> $SCRIPTS_DIR/pretrain_t5_distributed_command.sh -eval $command1 - -echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt - -# Resume from 50th iteration ckpt and continue to 100 iterations -torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ - pretrain_t5.py \ - --encoder-num-layers 12 \ - --decoder-num-layers 12 \ - --hidden-size 768 \ - --num-attention-heads 12 \ - --kv-channels 64 \ - --ffn-hidden-size 3072 \ - --encoder-seq-length 512 \ - --decoder-seq-length 128 \ - --max-position-embeddings 512 \ - --tensor-model-parallel-size $TP_SIZE \ - --pipeline-model-parallel-size $PP_SIZE \ - --micro-batch-size ${MBS:-4} \ - --global-batch-size ${GBS:-32} \ - --lr 0.0001 \ - --train-iters 100 \ - --lr-decay-iters 100 \ - --lr-decay-style linear \ - --min-lr 0.00001 \ - --weight-decay 1e-2 \ - --lr-warmup-fraction .01 \ - --clip-grad 1.0 \ - --${TRAINING_DTYPE} \ - --vocab-extra-ids 100 \ - --init-method-std 0.015 \ - --transformer-impl $TRANSFORMER_IMPL \ - --use-mcore-models \ - --data-path $DATA_PATH \ - --vocab-file $VOCAB_PATH \ - --tokenizer-type BertWordPieceCase \ - --split 99982,9,9 \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --tensorboard-dir ${TENSORBOARD_DIR} \ - --log-params-norm \ - --log-num-zeros-in-grad \ - --log-validation-ppl-to-tensorboard \ - --log-timers-to-tensorboard \ - --timing-log-level 2 \ - --log-interval 1 \ - --save-interval 50 \ - --eval-interval 1000 \ - --eval-iters 10 \ - --distributed-backend nccl \ - ${DATA_CACHE:+--data-cache-path "$DATA_CACHE"} \ - ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS}" - -command2="$command $torch_run_cmd" -echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" -echo "$command2" -echo "-----------------------------------------------------------------------------" - -echo "$command2" >> $SCRIPTS_DIR/pretrain_t5_distributed_command.sh -eval $command2 diff --git a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh index fae02fb755..e84fda8c19 100755 --- a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh +++ b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh @@ -51,6 +51,17 @@ if [[ $USE_TE -eq 1 ]]; then else echo "Running with local transformer implementation ..." fi + +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + echo "Running checkpoint resume test..." + __SAVE_INTERVAL=50 + if [[ $MAX_STEPS -ne 100 ]]; then + echo "Overriding MAX_STEPS=100" + MAX_STEPS=100 + fi +else + __SAVE_INTERVAL=10000 # inf +fi set +x # install neccessary library @@ -100,7 +111,7 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ --log-timers-to-tensorboard \ --timing-log-level 2 \ --log-interval 1 \ - --save-interval 5000 \ + --save-interval $__SAVE_INTERVAL \ --eval-interval 1000 \ --eval-iters 10 \ --distributed-backend nccl \ @@ -108,6 +119,9 @@ torch_run_cmd="torchrun $DISTRIBUTED_ARGS \ ${ADDITIONAL_PARAMS:+$ADDITIONAL_PARAMS}" command="$command $torch_run_cmd" +if [[ $CHECKPOINT_RESUME_TEST -eq 1 ]]; then + command="$command; rm -rf $CHECKPOINT_PATH/iter_0000100; echo 50 > $CHECKPOINT_PATH/latest_checkpointed_iteration.txt; $torch_run_cmd" +fi echo "-------------------- THE FINAL PRETRAIN SCRIPT COMMAND THAT WILL BE RUN ------------" echo "$command" echo "-----------------------------------------------------------------------------" From 1fcdc95ed996aa6eaeb1626a12f53efb86ba3e86 Mon Sep 17 00:00:00 2001 From: Tuomas Rintamaki Date: Tue, 27 Feb 2024 15:22:26 -0800 Subject: [PATCH 284/296] Mcore mock multimodal dataset --- megatron/core/datasets/gpt_dataset.py | 2 +- megatron/core/datasets/multimodal_dataset.py | 58 +++++++++++++++++++ tests/unit_tests/data/__init__.py | 0 ...pt_dataset.py => test_mock_gpt_dataset.py} | 0 .../data/test_multimodal_dataset.py | 33 +++++++++++ 5 files changed, 92 insertions(+), 1 deletion(-) create mode 100644 megatron/core/datasets/multimodal_dataset.py create mode 100644 tests/unit_tests/data/__init__.py rename tests/unit_tests/data/{test_builder_mock_gpt_dataset.py => test_mock_gpt_dataset.py} (100%) create mode 100644 tests/unit_tests/data/test_multimodal_dataset.py diff --git a/megatron/core/datasets/gpt_dataset.py b/megatron/core/datasets/gpt_dataset.py index a5c4083636..81bde5dc88 100644 --- a/megatron/core/datasets/gpt_dataset.py +++ b/megatron/core/datasets/gpt_dataset.py @@ -57,7 +57,7 @@ class MockGPTDataset(MockDataset): """The mock GPT dataset """ - def __getitem__(self, idx: int) -> Dict[str, numpy.ndarray]: + def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: """Return a sequence_length + 1 token sequence consisting of the following: - (1) S, the RNG length-sentinel in the range [0, sequence_length) - (S) tokens diff --git a/megatron/core/datasets/multimodal_dataset.py b/megatron/core/datasets/multimodal_dataset.py new file mode 100644 index 0000000000..3cfd011c77 --- /dev/null +++ b/megatron/core/datasets/multimodal_dataset.py @@ -0,0 +1,58 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from dataclasses import dataclass +from typing import Dict + +import numpy +import torch + +from megatron.core.datasets.gpt_dataset import GPTDatasetConfig, MockGPTDataset + + +@dataclass +class MultimodalDatasetConfig(GPTDatasetConfig): + """Configuration object for Megatron Core Multimodal datasets. + + + Note: This is unused at the moment and may be missing features. Follow-up changes will use this. + + Attributes: + image_h (int): Image height. + image_w (int): Image width. + """ + + image_h: int = None + image_w: int = None + + def __post_init__(self) -> None: + super().__post_init__() + + assert self.image_h is not None + assert self.image_w is not None + + +class MockMultimodalDataset(MockGPTDataset): + """Mock multimodal dataset. + + + This is unused at the moment and may be missing features. Follow-up changes will use this. + """ + + def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: + """Return a sample that contains a dummy image, text sequence and the associated labels and cost and attention masks. + + Args: + idx (int): The integer seed for mock data generation. + + Returns: + Dict[str, numpy.ndarray]: The mock data. + """ + # Get a text sample. + sample = super().__getitem__(idx) + + # Add mock input image. + sample["image"] = torch.zeros( + (3, self.config.image_h, self.config.image_w), dtype=torch.float32 + ) + + return sample diff --git a/tests/unit_tests/data/__init__.py b/tests/unit_tests/data/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit_tests/data/test_builder_mock_gpt_dataset.py b/tests/unit_tests/data/test_mock_gpt_dataset.py similarity index 100% rename from tests/unit_tests/data/test_builder_mock_gpt_dataset.py rename to tests/unit_tests/data/test_mock_gpt_dataset.py diff --git a/tests/unit_tests/data/test_multimodal_dataset.py b/tests/unit_tests/data/test_multimodal_dataset.py new file mode 100644 index 0000000000..70c6fbf63c --- /dev/null +++ b/tests/unit_tests/data/test_multimodal_dataset.py @@ -0,0 +1,33 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from types import SimpleNamespace + +import torch + +from megatron.core.datasets.blended_megatron_dataset_builder import BlendedMegatronDatasetBuilder +from megatron.core.datasets.multimodal_dataset import MockMultimodalDataset, MultimodalDatasetConfig + + +def test_mock_multimodal_dataset(): + config = MultimodalDatasetConfig( + is_built_on_rank=lambda: True, + random_seed=1234, + sequence_length=1024, + mock=True, + reset_position_ids=False, + reset_attention_mask=False, + eod_mask_loss=True, + tokenizer=SimpleNamespace(), + image_h=336, + image_w=336, + ) + + datasets = BlendedMegatronDatasetBuilder( + MockMultimodalDataset, [None, None, None], config + ).build() + + for ds in datasets: + sample = ds[0] + assert "image" in sample + assert sample["image"].shape == torch.Size([3, 336, 336]) + assert "tokens" in sample From d66807759cb070b09c5eda9f31f0663a9858b50e Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Tue, 5 Dec 2023 10:56:54 -0800 Subject: [PATCH 285/296] Fix NaN checking in grads: should be performed before data-parallel communication Compute norm once per batch (instead of once per microbatch) and once per bucket (instead of once per param) --- megatron/arguments.py | 7 +++ .../distributed/distributed_data_parallel.py | 8 +++- megatron/core/distributed/grad_buffer.py | 19 ++++++++ megatron/core/optimizer/__init__.py | 7 +-- megatron/core/optimizer/clip_grads.py | 18 +------- megatron/core/optimizer/distrib_optimizer.py | 3 -- megatron/core/optimizer/optimizer.py | 44 ++++--------------- megatron/core/optimizer/optimizer_config.py | 3 -- megatron/training.py | 3 +- 9 files changed, 45 insertions(+), 67 deletions(-) diff --git a/megatron/arguments.py b/megatron/arguments.py index bffb098818..d481a0781c 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -181,6 +181,13 @@ def validate_args(args, defaults={}): if args.fp16: assert not args.bf16 args.params_dtype = torch.half + # Turn off checking for NaNs in loss and grads if using dynamic loss scaling, + # where NaNs in grads / loss are signal to the loss scaler. + if not args.loss_scale: + args.check_for_nan_in_loss_and_grad = False + if args.rank == 0: + print('WARNING: Setting args.check_for_nan_in_loss_and_grad to False since ' + 'dynamic loss scaling is being used') if args.bf16: assert not args.fp16 args.params_dtype = torch.bfloat16 diff --git a/megatron/core/distributed/distributed_data_parallel.py b/megatron/core/distributed/distributed_data_parallel.py index e3c8ece83a..d8cc637236 100644 --- a/megatron/core/distributed/distributed_data_parallel.py +++ b/megatron/core/distributed/distributed_data_parallel.py @@ -33,6 +33,7 @@ class DistributedDataParallel(MegatronModule): disable_bucketing: If true, force assign all parameters to a single bucket. If false, use standard bucketing policy: assign parameters to smaller buckets and all-reduce per bucket _if_ overlap_grad_reduce is True and pp_rank is 0. + check_for_nan_in_grad: If true, check if local grad norm is NaN. """ @@ -46,6 +47,7 @@ def __init__( use_distributed_optimizer: bool, expert_data_parallel_group: Optional[torch.distributed.ProcessGroup] = None, disable_bucketing: bool = False, + check_for_nan_in_grad: bool = False, bucket_size: int = 40000000, ): super().__init__(config=config) @@ -66,6 +68,8 @@ def __init__( bucket_size = None if disable_bucketing: bucket_size = None + + self.check_for_nan_in_grad = check_for_nan_in_grad self.bucket_size = bucket_size self.module = module @@ -115,7 +119,8 @@ def allocate_grad_buffers_for_parameters( param_to_name, self.overlap_grad_reduce, self.use_distributed_optimizer, - gradient_scaling_factor=gradient_scaling_factor, + gradient_scaling_factor, + self.check_for_nan_in_grad, ) ) for param in params: @@ -176,6 +181,7 @@ def param_hook(*unused): ): param.main_grad.add_(param.grad.data) param.grad = None + if self.overlap_grad_reduce: param_to_grad_buffer[param].register_grad_ready(param) diff --git a/megatron/core/distributed/grad_buffer.py b/megatron/core/distributed/grad_buffer.py index 949bc9468c..17d77c270d 100644 --- a/megatron/core/distributed/grad_buffer.py +++ b/megatron/core/distributed/grad_buffer.py @@ -1,6 +1,7 @@ # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. import math +import os from logging import getLogger from typing import Dict, List @@ -44,6 +45,7 @@ class Bucket: gradient_scaling_factor: This factor is utilized to scale gradients prior to their communication. Its application is twofold: it facilitates the averaging of gradients and the scaling of gradients in the context of the Mixture of Experts (MoE) model. + check_for_nan_in_grad: If true, check if local grad norm is NaN. """ def __init__( @@ -57,6 +59,7 @@ def __init__( overlap_grad_reduce: bool, use_distributed_optimizer: bool, gradient_scaling_factor: float, + check_for_nan_in_grad: bool, ): # State for bookkeeping: params is the set of parameters this bucket is # responsible for, params_with_grad is the set of parameters with grads @@ -76,6 +79,7 @@ def __init__( self.overlap_grad_reduce = overlap_grad_reduce self.use_distributed_optimizer = use_distributed_optimizer self.gradient_scaling_factor = gradient_scaling_factor + self.check_for_nan_in_grad = check_for_nan_in_grad self.reset() @@ -100,6 +104,17 @@ def start_grad_sync(self): self.communication_handle is None and not self.communication_issued ), 'Should not have multiple communication calls in flight at once' + # Make sure norm of grads in bucket are not NaN + # prior to data-parallel all-reduce / reduce-scatter. + if self.check_for_nan_in_grad: + global_rank = torch.distributed.get_rank() + norm = self.data.norm(p=2) + assert not norm.isnan(), ( + f'Rank {global_rank}: found NaN in local grad norm in ' + f'backward pass before data-parallel communication collective. ' + f'Device: {torch.cuda.current_device()}, node: {os.uname()[1]}' + ) + self.data *= self.gradient_scaling_factor # Use async_op only when overlap_grad_reduce is True. if self.use_distributed_optimizer: @@ -173,6 +188,7 @@ class GradBuffer: gradient_scaling_factor: This factor is utilized to scale gradients prior to their communication. Its application is twofold: it facilitates the averaging of gradients and the scaling of gradients in the context of the Mixture of Experts (MoE) model. + check_for_nan_in_grad: If true, check if local grad norm is NaN. """ def __init__( @@ -185,6 +201,7 @@ def __init__( overlap_grad_reduce: bool, use_distributed_optimizer: bool, gradient_scaling_factor: float, + check_for_nan_in_grad: bool, ): # Check that params are unique. @@ -203,6 +220,7 @@ def __init__( self.overlap_grad_reduce = overlap_grad_reduce self.use_distributed_optimizer = use_distributed_optimizer self.gradient_scaling_factor = gradient_scaling_factor + self.check_for_nan_in_grad = check_for_nan_in_grad self.is_last_microbatch = True # Data structures to store underlying buckets and relevant indexing data. @@ -384,6 +402,7 @@ def _set_bucket( overlap_grad_reduce=self.overlap_grad_reduce, use_distributed_optimizer=self.use_distributed_optimizer, gradient_scaling_factor=self.gradient_scaling_factor, + check_for_nan_in_grad=self.check_for_nan_in_grad, ) self.buckets.append(bucket) for bucket_param in bucket_params: diff --git a/megatron/core/optimizer/__init__.py b/megatron/core/optimizer/__init__.py index b3461f9032..231d986fb7 100644 --- a/megatron/core/optimizer/__init__.py +++ b/megatron/core/optimizer/__init__.py @@ -162,7 +162,6 @@ def get_megatron_optimizer_based_on_param_groups( optimizer, config.clip_grad, config.log_num_zeros_in_grad, - config.check_for_nan_in_loss_and_grad, params_have_main_grad, config.fp16, config.bf16, @@ -184,11 +183,7 @@ def get_megatron_optimizer_based_on_param_groups( # FP32. return FP32Optimizer( - optimizer, - config.clip_grad, - config.log_num_zeros_in_grad, - config.check_for_nan_in_loss_and_grad, - params_have_main_grad, + optimizer, config.clip_grad, config.log_num_zeros_in_grad, params_have_main_grad, ) diff --git a/megatron/core/optimizer/clip_grads.py b/megatron/core/optimizer/clip_grads.py index 4ad2445a89..0f94754c9d 100644 --- a/megatron/core/optimizer/clip_grads.py +++ b/megatron/core/optimizer/clip_grads.py @@ -14,12 +14,7 @@ def clip_grad_norm_fp32( - parameters, - grads_for_norm, - max_norm, - check_for_nan_in_grad, - norm_type=2, - model_parallel_group=None, + parameters, grads_for_norm, max_norm, norm_type=2, model_parallel_group=None, ): """Clips gradient norm of an iterable of parameters whose gradients are in fp32. @@ -34,7 +29,6 @@ def clip_grad_norm_fp32( grads_for_norm (Iterable[Tensor]): an iterable of Tensors or a single Tensor that will be used for calculating the grad norm. max_norm (float or int): max norm of the gradients. - check_for_nan_in_grad (bool): check if gradients have a NaN. norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. model_parallel_group (group): given the nature of the distributed @@ -95,16 +89,6 @@ def clip_grad_norm_fp32( grad_norm = torch.norm(grad, norm_type) total_norm += grad_norm ** norm_type - # Check individual rank grad norms are not NaN - # prior to model-parallel all-reduce. - if check_for_nan_in_grad: - global_rank = torch.distributed.get_rank() - assert not total_norm.isnan(), ( - f'Rank {global_rank}: found NaN in local grad norm in ' - f'backwards pass. Device: {torch.cuda.current_device()}, ' - f'node: {os.uname()[1]}' - ) - # Sum across all model-parallel GPUs. torch.distributed.all_reduce( total_norm, op=torch.distributed.ReduceOp.SUM, group=model_parallel_group diff --git a/megatron/core/optimizer/distrib_optimizer.py b/megatron/core/optimizer/distrib_optimizer.py index 1423a6abb6..3eb66d7b90 100644 --- a/megatron/core/optimizer/distrib_optimizer.py +++ b/megatron/core/optimizer/distrib_optimizer.py @@ -45,7 +45,6 @@ class DistributedOptimizer(MixedPrecisionOptimizer): clip_grad: clip gradeints with this global L2 norm. Note that clipping is ignored if clip_grad == 0 log_num_zeros_in_grad: return number of zeros in the gradients. - check_for_nan_in_grad: check if gradients have a NaN. params_have_main_grad: flag indicating if parameters have a `main_grad` field. If this is set, we are assuming that the model parameters are store in the `main_grad` @@ -374,7 +373,6 @@ def __init__( optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, fp16, bf16, @@ -399,7 +397,6 @@ def __init__( optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, fp16, bf16, diff --git a/megatron/core/optimizer/optimizer.py b/megatron/core/optimizer/optimizer.py index a3a431d6ae..5caa6b96d5 100644 --- a/megatron/core/optimizer/optimizer.py +++ b/megatron/core/optimizer/optimizer.py @@ -51,12 +51,7 @@ def _multi_tensor_copy_this_to_that(this, that, overflow_buf=None): class MegatronOptimizer(ABC): def __init__( - self, - optimizer, - clip_grad, - log_num_zeros_in_grad, - check_for_nan_in_grad, - params_have_main_grad, + self, optimizer, clip_grad, log_num_zeros_in_grad, params_have_main_grad, ): """Input optimizer is the base optimizer for example Adam.""" @@ -65,7 +60,6 @@ def __init__( # Set gradient clipping and logging params. self.clip_grad = clip_grad self.log_num_zeros_in_grad = log_num_zeros_in_grad - self.check_for_nan_in_grad = check_for_nan_in_grad self.params_have_main_grad = params_have_main_grad def get_parameters(self): @@ -97,15 +91,11 @@ def get_model_parallel_group(self): """Default returned here, but the distributed optimizer overrides this.""" return parallel_state.get_model_parallel_group() - def clip_grad_norm(self, clip_grad, check_for_nan_in_grad): + def clip_grad_norm(self, clip_grad): params = self.get_parameters() grads_for_norm = self.get_main_grads_for_grad_norm() return clip_grad_norm_fp32( - params, - grads_for_norm, - clip_grad, - check_for_nan_in_grad, - model_parallel_group=self.get_model_parallel_group(), + params, grads_for_norm, clip_grad, model_parallel_group=self.get_model_parallel_group(), ) def count_zeros(self): @@ -176,7 +166,6 @@ class MixedPrecisionOptimizer(MegatronOptimizer): clip_grad: clip gradeints with this global L2 norm. Note that clipping is ignored if clip_grad == 0 log_num_zeros_in_grad: return number of zeros in the gradients. - check_for_nan_in_grad: check if gradients have a NaN. params_have_main_grad: flag indicating if parameters have a `main_grad` field. If this is set, we are assuming that the model parameters are store in the `main_grad` @@ -201,7 +190,6 @@ def __init__( optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, fp16, bf16, @@ -210,11 +198,7 @@ def __init__( ): super().__init__( - optimizer, - clip_grad, - log_num_zeros_in_grad, - check_for_nan_in_grad, - params_have_main_grad, + optimizer, clip_grad, log_num_zeros_in_grad, params_have_main_grad, ) self.fp16 = fp16 @@ -307,7 +291,7 @@ def step(self, args, timers): timers('optimizer-clip-main-grad', log_level=1).start(barrier=args.barrier_with_L1_time) grad_norm = None if self.clip_grad > 0.0: - grad_norm = self.clip_grad_norm(self.clip_grad, self.check_for_nan_in_grad) + grad_norm = self.clip_grad_norm(self.clip_grad) timers('optimizer-clip-main-grad').stop() # Count the zeros in the grads. @@ -339,7 +323,6 @@ class Float16OptimizerWithFloat16Params(MixedPrecisionOptimizer): clip_grad: clip gradeints with this global L2 norm. Note that clipping is ignored if clip_grad == 0 log_num_zeros_in_grad: return number of zeros in the gradients. - check_for_nan_in_grad: check if gradients have a NaN. params_have_main_grad: flag indicating if parameters have a `main_grad` field. If this is set, we are assuming that the model parameters are store in the `main_grad` @@ -363,7 +346,6 @@ def __init__( optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, fp16, bf16, @@ -375,7 +357,6 @@ def __init__( optimizer, clip_grad, log_num_zeros_in_grad, - check_for_nan_in_grad, params_have_main_grad, fp16, bf16, @@ -558,20 +539,11 @@ def load_state_dict(self, state_dict): class FP32Optimizer(MegatronOptimizer): def __init__( - self, - optimizer, - clip_grad, - log_num_zeros_in_grad, - check_for_nan_in_grad, - params_have_main_grad, + self, optimizer, clip_grad, log_num_zeros_in_grad, params_have_main_grad, ): super(FP32Optimizer, self).__init__( - optimizer, - clip_grad, - log_num_zeros_in_grad, - check_for_nan_in_grad, - params_have_main_grad, + optimizer, clip_grad, log_num_zeros_in_grad, params_have_main_grad, ) self._scale = torch.tensor([1.0], dtype=torch.float, device='cuda') @@ -603,7 +575,7 @@ def step(self, args, timers): timers('optimizer-clip-main-grad', log_level=1).start(barrier=args.barrier_with_L1_time) grad_norm = None if self.clip_grad > 0.0: - grad_norm = self.clip_grad_norm(self.clip_grad, self.check_for_nan_in_grad) + grad_norm = self.clip_grad_norm(self.clip_grad) timers('optimizer-clip-main-grad').stop() # count the zeros in the grads diff --git a/megatron/core/optimizer/optimizer_config.py b/megatron/core/optimizer/optimizer_config.py index 2689d667bd..664e7c9036 100644 --- a/megatron/core/optimizer/optimizer_config.py +++ b/megatron/core/optimizer/optimizer_config.py @@ -78,8 +78,6 @@ class OptimizerConfig: clip_grad (float): Gradient clipping based on global L2 norm. log_num_zeros_in_grad (bool): If true, calculate and log the number of zeros in gradient. - - check_for_nan_in_loss_and_grad (bool): If true, check for NaNs in loss and gradient. """ # Precision. @@ -113,4 +111,3 @@ class OptimizerConfig: # Miscellaneous. clip_grad: float = 1.0 log_num_zeros_in_grad: bool = False - check_for_nan_in_loss_and_grad: bool = False diff --git a/megatron/training.py b/megatron/training.py index d604e6c489..e39d13e2e7 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -413,7 +413,8 @@ def get_model(model_provider_func, model_type=ModelType.encoder_or_decoder, wrap use_distributed_optimizer=args.use_distributed_optimizer, # Turn off bucketing for model_chunk 2 onwards, since communication for these # model chunks is overlapped with compute anyway. - disable_bucketing=(model_chunk_idx > 0)) + disable_bucketing=(model_chunk_idx > 0), + check_for_nan_in_grad=args.check_for_nan_in_loss_and_grad) for (model_chunk_idx, model_chunk) in enumerate(model)] # Broadcast params from data parallel src rank to other data parallel ranks. From 9677b3bbc20b0f569bb6f14dfe6b76ccef29095d Mon Sep 17 00:00:00 2001 From: Deepak Narayanan Date: Wed, 28 Feb 2024 17:29:45 -0800 Subject: [PATCH 286/296] Make throughput and memory footprint formulae compatible with arbitrary ffn_hidden_size --- megatron/theoretical_memory_usage.py | 6 ++++-- megatron/training.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/megatron/theoretical_memory_usage.py b/megatron/theoretical_memory_usage.py index 1a6fb6b5b3..8eeaa71ad4 100644 --- a/megatron/theoretical_memory_usage.py +++ b/megatron/theoretical_memory_usage.py @@ -18,7 +18,7 @@ def compute_weight_and_optimizer_memory(args, verbose=False): * args.hidden_size * args.hidden_size * ( - 1 + ((1 + (args.ffn_hidden_size / args.hidden_size)) / 5.0) + (args.num_query_groups / (5.0 * args.num_attention_heads)) + (2 / (5 * args.hidden_size)) + (1 / (5 * args.num_layers * args.hidden_size)) @@ -75,7 +75,9 @@ def compute_activation_memory(args, num_microbatches, verbose=False): # are for the first pipeline stage. # Memory footprint from transformer layer (self-attention and MLP). - activation_memory = (args.seq_length * args.micro_batch_size * args.hidden_size) * 34 + activation_memory = (args.seq_length * args.micro_batch_size * args.hidden_size) * ( + 18 + (4 * (args.ffn_hidden_size / args.hidden_size)) + ) if verbose: print( f"Activation memory footprint per transformer layer: " diff --git a/megatron/training.py b/megatron/training.py index e39d13e2e7..40d9081e12 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -71,7 +71,7 @@ def num_floating_point_operations(args, batch_size): * args.hidden_size * args.hidden_size * ( - 1 + ((1 + (args.ffn_hidden_size / args.hidden_size)) / 5.0) + (args.num_query_groups / (5 * args.num_attention_heads)) + (args.seq_length / (5 * args.hidden_size)) + (args.padded_vocab_size / (10 * args.num_layers * args.hidden_size)) From 3dafc0ed24b4748e73a65bd913d9f590927b07f5 Mon Sep 17 00:00:00 2001 From: Maanu Grover Date: Wed, 28 Feb 2024 21:56:19 -0800 Subject: [PATCH 287/296] Move to Draco OCI --- .gitlab-ci.yml | 52 +++---------------- jet-tests.yml | 3 +- .../functional_tests/jet_recipes/MR-bert.yaml | 2 +- .../functional_tests/jet_recipes/MR-gpt.yaml | 2 +- tests/functional_tests/jet_recipes/MR-t5.yaml | 2 +- .../python_test_utils/jet_test_pipeline.py | 5 +- ...ps-100_tp-1_pp-2_mcore-false_te-false.json | 2 +- ...eps-50_tp-1_pp-2_mcore-false_te-false.json | 1 + ...0_tp-1_pp-4_mcore-false_te-false_vp-2.json | 2 +- ...2_args-local-spec_mcore-true_te-false.json | 2 +- ...eps-50_tp-2_pp-2_mcore-false_te-false.json | 2 +- ...teps-50_tp-2_pp-2_mcore-true_te-false.json | 2 +- ...ps-100_tp-1_pp-2_mcore-false_te-false.json | 1 + ...0_tp-1_pp-4_mcore-false_te-false_vp-2.json | 1 + ...2_args-local-spec_mcore-true_te-false.json | 1 + ...eps-50_tp-2_pp-2_mcore-false_te-false.json | 1 + ...teps-50_tp-2_pp-2_mcore-true_te-false.json | 1 + ...ps-100_tp-1_pp-2_mcore-false_te-false.json | 1 + ...ute-num-layers-1-_mcore-true_te-false.json | 0 ...gs-dist-optimizer_mcore-true_te-false.json | 1 + ...rm-full-recompute_mcore-true_te-false.json | 1 + ...edding-type-rope-_mcore-true_te-false.json | 0 ...rleaved-no-fusion_mcore-true_te-false.json | 1 + ...s-rope-embeddings_mcore-true_te-false.json | 1 + ...sable-bias-linear_mcore-true_te-false.json | 0 ...sequence-parallel_mcore-true_te-false.json | 0 ...pp-4_args--swiglu_mcore-true_te-false.json | 0 ...nd-output-weights_mcore-true_te-false.json | 0 ...sable-bias-linear_mcore-true_te-false.json | 1 + ...param-gather_mcore-true_te-false_vp-1.json | 1 + ...educe-untied_mcore-true_te-false_vp-1.json | 1 + ...-grad-reduce_mcore-true_te-false_vp-1.json | 1 + ...sequence-parallel_mcore-true_te-false.json | 1 + ..._pp-4_args-swiglu_mcore-true_te-false.json | 1 + ...dings-and-outputs_mcore-true_te-false.json | 1 + ...0_tp-1_pp-4_mcore-false_te-false_vp-1.json | 1 + ...50_tp-1_pp-4_mcore-true_te-false_vp-1.json | 1 + ...-parallel-size-2-_mcore-true_te-false.json | 0 ...el-dist-optimizer_mcore-true_te-false.json | 1 + ...allel-groupedgemm_mcore-true_te-false.json | 1 + ...rallel-top2router_mcore-true_te-false.json | 1 + ...8experts2parallel_mcore-true_te-false.json | 1 + ...eps-50_tp-2_pp-2_mcore-false_te-false.json | 1 + ...teps-50_tp-2_pp-2_mcore-false_te-true.json | 1 + ...teps-50_tp-2_pp-2_mcore-true_te-false.json | 1 + ...duce-param-gather_mcore-true_te-false.json | 1 + ...erlap-grad-reduce_mcore-true_te-false.json | 1 + ...rlap-grad-reduce_mcore-false_te-false.json | 0 ...lap-grad-reduce-_mcore-false_te-false.json | 0 ...eps-50_tp-1_pp-2_mcore-false_te-false.json | 0 ...teps-50_tp-1_pp-2_mcore-true_te-false.json | 0 ...rlap-grad-reduce_mcore-false_te-false.json | 0 ...grad-reduce_mcore-false_te-false_vp-1.json | 0 ...eps-50_tp-1_pp-4_mcore-false_te-false.json | 0 ...teps-50_tp-1_pp-4_mcore-true_te-false.json | 0 ...s--num-experts-2-_mcore-true_te-false.json | 0 ...--num-experts-4-_mcore-false_te-false.json | 0 ...rlap-grad-reduce_mcore-false_te-false.json | 0 ...-parallel-size-2-_mcore-true_te-false.json | 0 ...rlap-grad-reduce_mcore-false_te-false.json | 0 ...eps-50_tp-4_pp-1_mcore-false_te-false.json | 0 ...teps-50_tp-4_pp-1_mcore-true_te-false.json | 0 ...100_tp-1_pp-1_mcore-true_te-true_vp-1.json | 1 + ...ps-100_tp-1_pp-2_mcore-false_te-false.json | 1 + ...ps-100_tp-1_pp-2_mcore-false_te-false.json | 2 +- ...s-dist-optimizer_mcore-false_te-false.json | 1 + ...gs-dist-optimizer_mcore-true_te-false.json | 2 +- ...rm-full-recompute_mcore-true_te-false.json | 2 +- ...rleaved-no-fusion_mcore-true_te-false.json | 2 +- ...s-rope-embeddings_mcore-true_te-false.json | 2 +- ...sable-bias-linear_mcore-true_te-false.json | 2 +- ...aram-gather_mcore-false_te-false_vp-1.json | 1 + ...param-gather_mcore-true_te-false_vp-1.json | 2 +- ...educe-untied_mcore-true_te-false_vp-1.json | 2 +- ...grad-reduce_mcore-false_te-false_vp-1.json | 1 + ...-grad-reduce_mcore-true_te-false_vp-1.json | 2 +- ...sequence-parallel_mcore-true_te-false.json | 2 +- ..._pp-4_args-swiglu_mcore-true_te-false.json | 2 +- ...dings-and-outputs_mcore-true_te-false.json | 2 +- ...0_tp-1_pp-4_mcore-false_te-false_vp-1.json | 2 +- ...50_tp-1_pp-4_mcore-true_te-false_vp-1.json | 2 +- ...el-dist-optimizer_mcore-true_te-false.json | 2 +- ...allel-groupedgemm_mcore-true_te-false.json | 2 +- ...rallel-top2router_mcore-true_te-false.json | 2 +- ...8experts2parallel_mcore-true_te-false.json | 2 +- ...eps-50_tp-2_pp-2_mcore-false_te-false.json | 2 +- ...teps-50_tp-2_pp-2_mcore-false_te-true.json | 2 +- ...teps-50_tp-2_pp-2_mcore-true_te-false.json | 2 +- ...uce-param-gather_mcore-false_te-false.json | 1 + ...duce-param-gather_mcore-true_te-false.json | 2 +- ...rlap-grad-reduce_mcore-false_te-false.json | 1 + ...erlap-grad-reduce_mcore-true_te-false.json | 2 +- ...100_tp-1_pp-1_mcore-true_te-true_vp-1.json | 2 +- .../bert/pretrain_bert_distributed_test.sh | 2 +- .../gpt3/pretrain_gpt3_distributed_test.sh | 2 +- .../retro/pretrain_retro_distributed_test.sh | 2 +- .../t5/pretrain_t5_distributed_test.sh | 2 +- 97 files changed, 82 insertions(+), 86 deletions(-) create mode 100644 tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json (100%) create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json (100%) create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json (100%) create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json (100%) create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json (100%) rename tests/functional_tests/test_results/jet/{ => dgx_h100}/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json (100%) create mode 100644 tests/functional_tests/test_results/jet/dgx_h100/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json create mode 100644 tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3c2d3fef3a..f432c7f210 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -18,7 +18,13 @@ variables: &VARS DISPLAY_OUTPUT: "True" # Set to true for new tests to copy the logs for creating golden truth file TIME_LIMIT: "10:00" # Default time limit for all jobs MOE_GROUPED_GEMM: 0 # Set to 1 to enable grouped gemm for MoE - + JET_CLUSTER_BRANCH: + value: "mcore/draco-oci" + options: + - "mcore/draco-oci" + - "mcore/eos" + description: '"mcore/draco-oci" for OCI-IAD, "mcore/eos" for EOS' + include: - jet-tests.yml @@ -92,47 +98,3 @@ formatting: when: always allow_failure: false retry: 2 - -train.bert_core.345m_tp1_pp2_1node_50steps_rope: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 - METADATA: rope_embeddings - ADDITIONAL_PARAMS: "--position-embedding-type rope" - -train.bert_core.345m_tp1_pp2_1node_50steps_sequence_parallel: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: bert - TP_SIZE: 1 - PP_SIZE: 2 - NUM_NODES: 1 - USE_CORE: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: L0 - METADATA: sequence_parallel - ADDITIONAL_PARAMS: "--sequence-parallel" - -train.retro_core.tp1_pp1_1node_50steps: - <<: *selene-test-launcher - variables: - <<: [*VARS] - RUN_MODEL: retro - USE_TE: 0 - USE_CORE: 1 - TP_SIZE: 1 - PP_SIZE: 1 - NUM_NODES: 1 - MAX_STEPS: 50 - TIME_LIMIT: "20:00" - TEST_LEVEL: MONTHLY_TESTS diff --git a/jet-tests.yml b/jet-tests.yml index 8bba162ae8..e23f9cc98f 100644 --- a/jet-tests.yml +++ b/jet-tests.yml @@ -53,11 +53,12 @@ jet-trigger: needs: [ jet-configure, jet-setup ] trigger: project: dl/jet/ci - branch: mcore/eos + branch: $JET_CLUSTER_BRANCH strategy: depend inherit: variables: - JET_CUSTOM_FILTER + - JET_CLUSTER_BRANCH variables: JET_WORKLOADS_FILTER: "$_JET_FILTER" diff --git a/tests/functional_tests/jet_recipes/MR-bert.yaml b/tests/functional_tests/jet_recipes/MR-bert.yaml index 28c4e3f68d..7fb5baf561 100644 --- a/tests/functional_tests/jet_recipes/MR-bert.yaml +++ b/tests/functional_tests/jet_recipes/MR-bert.yaml @@ -9,7 +9,7 @@ spec: scope: merge-request nodes: 1 gpus: 8 - platforms: [dgx_h100] + platforms: [dgx_a100] steps: 50 use_te: False use_mcore: True diff --git a/tests/functional_tests/jet_recipes/MR-gpt.yaml b/tests/functional_tests/jet_recipes/MR-gpt.yaml index a708fea315..81ac77fc28 100644 --- a/tests/functional_tests/jet_recipes/MR-gpt.yaml +++ b/tests/functional_tests/jet_recipes/MR-gpt.yaml @@ -9,7 +9,7 @@ spec: scope: merge-request nodes: 1 gpus: 8 - platforms: [dgx_h100] + platforms: [dgx_a100] steps: 50 use_te: False use_mcore: True diff --git a/tests/functional_tests/jet_recipes/MR-t5.yaml b/tests/functional_tests/jet_recipes/MR-t5.yaml index 9d8490b130..adf22b987c 100644 --- a/tests/functional_tests/jet_recipes/MR-t5.yaml +++ b/tests/functional_tests/jet_recipes/MR-t5.yaml @@ -9,7 +9,7 @@ spec: scope: merge-request nodes: 1 gpus: 8 - platforms: [dgx_h100] + platforms: [dgx_a100] steps: 100 use_te: False use_mcore: True diff --git a/tests/functional_tests/python_test_utils/jet_test_pipeline.py b/tests/functional_tests/python_test_utils/jet_test_pipeline.py index ce5957dd20..27d00df49f 100644 --- a/tests/functional_tests/python_test_utils/jet_test_pipeline.py +++ b/tests/functional_tests/python_test_utils/jet_test_pipeline.py @@ -47,10 +47,7 @@ def check_exitcodes(results): for result in results: exit_codes.append(result.get('l_exit_code', -1)) log_urls.append(select_asset(result, 'output_script-0.log')) - name = result['obj_workload']['s_key'].lstrip('recipe/') - remove_substr = result['obj_workload']['obj_spec']['s_build'] + \ - '_' + result['obj_workload']['obj_spec']['s_scope'] - names.append(''.join(name.split(remove_substr))) + names.append(result['obj_workload']['s_key'].lstrip('recipe/')) table = PrettyTable() table.add_column("Job Key", names) diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json index bf335a35d0..b1917e084a 100644 --- a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.51554, 10.51032, 10.52063, 10.52247, 10.51818, 10.5092, 10.43695, 10.29864, 10.16893, 9.98643, 9.9146, 9.78576, 9.67452, 9.55758, 9.50388, 9.35033, 9.34043, 9.27911, 9.27768, 9.20722]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [21174.0, 21615.0, 24124.0, 18698.0, 23551.0, 18803.0, 19627.0, 27198.0, 25001.0, 25778.0, 15220.0, 35074.0, 26410.0, 22075.0, 37860.0, 28583.0, 23027.0]}, "iteration_timing_avg": 0.24888507462686574} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.50685, 10.49816, 10.47982, 10.48566, 10.49533, 10.46662, 10.42394, 10.30694, 10.15979, 9.96957, 9.87618, 9.75265, 9.63628, 9.54661, 9.49972, 9.35969, 9.33181, 9.26258, 9.26438, 9.21491]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [18772.0, 19035.0, 22296.0, 18412.0, 20887.0, 23006.0, 22439.0, 26762.0, 24562.0, 25459.0, 17508.0, 32488.0, 28332.0, 20718.0, 37258.0, 30914.0, 26407.0]}, "iteration_timing_avg": 0.394903880597015} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..021bbc8a4b --- /dev/null +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.50685, 10.49817, 10.47983, 10.48565, 10.49536, 10.46664, 10.42393, 10.30694, 10.15981, 9.96956, 9.87619, 9.75265, 9.63628, 9.54659, 9.49972, 9.35968, 9.33181, 9.26259, 9.26438, 9.21492]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [18721.0, 19240.0, 22286.0, 18535.0, 20820.0, 23201.0, 22673.0, 26963.0, 24453.0, 25622.0, 17093.0, 32342.0, 27958.0, 20877.0, 37551.0, 30594.0, 26468.0]}, "iteration_timing_avg": 0.37912223880597} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json index a8886517f5..39bb4585d2 100644 --- a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.42108, 10.43552, 10.43934, 10.43349, 10.42826, 10.42499, 10.37549, 10.2337, 10.1091, 9.93972]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19496.0, 22201.0, 23780.0, 21779.0, 22701.0, 20018.0, 22409.0]}, "iteration_timing_avg": 0.5799538235294118} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.54837, 10.54636, 10.55694, 10.54151, 10.53088, 10.48503, 10.46275, 10.31499, 10.17122, 9.97326]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [22606.0, 20619.0, 26292.0, 23607.0, 21666.0, 21672.0, 23313.0]}, "iteration_timing_avg": 0.7795826470588233} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json index 163496d61e..9afb0ee0df 100644 --- a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.47903, 10.47213, 10.46828, 10.4513, 10.4294, 10.35818, 10.16921, 10.09081, 9.918, 9.74324]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2380.0, 1691.0, 2420.0, 2698.0, 2183.0, 2873.0, 2112.0, 3007.0, 1784.0, 2883.0]}, "iteration_timing_avg": 0.48770147058823515} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.49849, 10.48909, 10.48383, 10.45052, 10.4396, 10.34793, 10.13229, 10.03818, 9.86253, 9.67165]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2210.0, 2505.0, 2330.0, 2235.0, 2290.0, 2400.0, 2866.0, 3249.0, 3522.0, 2958.0]}, "iteration_timing_avg": 0.7140176470588235} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json index e3733adeb7..5a553ebb81 100644 --- a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.46209, 10.46586, 10.47036, 10.48285, 10.46953, 10.4551, 10.4144, 10.27757, 10.15408, 9.98652]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19468.0, 20366.0, 23078.0, 23209.0, 20501.0, 21956.0, 23051.0]}, "iteration_timing_avg": 0.47122588235294105} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.44877, 10.43852, 10.44018, 10.44113, 10.45623, 10.44143, 10.39045, 10.25681, 10.13301, 9.95744]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [27844.0, 20265.0, 28481.0, 26139.0, 24126.0, 21087.0, 21026.0]}, "iteration_timing_avg": 0.7523635294117648} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json index 2936e747d2..d411d8c1a7 100644 --- a/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.4791, 10.47202, 10.4682, 10.45128, 10.42934, 10.35805, 10.16903, 10.0907, 9.91791, 9.7432]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2250.0, 1699.0, 2376.0, 2808.0, 2117.0, 2783.0, 2170.0, 2896.0, 1835.0, 2867.0]}, "iteration_timing_avg": 0.6237708823529412} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.49838, 10.48932, 10.4839, 10.45043, 10.43933, 10.34765, 10.1322, 10.03809, 9.86242, 9.67174]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2309.0, 2556.0, 2286.0, 2336.0, 2345.0, 2428.0, 2974.0, 3161.0, 3625.0, 2918.0]}, "iteration_timing_avg": 0.8110379411764704} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..bf335a35d0 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-128_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.51554, 10.51032, 10.52063, 10.52247, 10.51818, 10.5092, 10.43695, 10.29864, 10.16893, 9.98643, 9.9146, 9.78576, 9.67452, 9.55758, 9.50388, 9.35033, 9.34043, 9.27911, 9.27768, 9.20722]}, "num-zeros": {"start_step": 0, "end_step": 84, "step_interval": 5, "values": [21174.0, 21615.0, 24124.0, 18698.0, 23551.0, 18803.0, 19627.0, 27198.0, 25001.0, 25778.0, 15220.0, 35074.0, 26410.0, 22075.0, 37860.0, 28583.0, 23027.0]}, "iteration_timing_avg": 0.24888507462686574} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json new file mode 100644 index 0000000000..a8886517f5 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-1_pp-4_mcore-false_te-false_vp-2.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.42108, 10.43552, 10.43934, 10.43349, 10.42826, 10.42499, 10.37549, 10.2337, 10.1091, 9.93972]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19496.0, 22201.0, 23780.0, 21779.0, 22701.0, 20018.0, 22409.0]}, "iteration_timing_avg": 0.5799538235294118} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json new file mode 100644 index 0000000000..163496d61e --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_args-local-spec_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.47903, 10.47213, 10.46828, 10.4513, 10.4294, 10.35818, 10.16921, 10.09081, 9.918, 9.74324]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2380.0, 1691.0, 2420.0, 2698.0, 2183.0, 2873.0, 2112.0, 3007.0, 1784.0, 2883.0]}, "iteration_timing_avg": 0.48770147058823515} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..e3733adeb7 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.46209, 10.46586, 10.47036, 10.48285, 10.46953, 10.4551, 10.4144, 10.27757, 10.15408, 9.98652]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [19468.0, 20366.0, 23078.0, 23209.0, 20501.0, 21956.0, 23051.0]}, "iteration_timing_avg": 0.47122588235294105} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json new file mode 100644 index 0000000000..2936e747d2 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/bert_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-128_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.4791, 10.47202, 10.4682, 10.45128, 10.42934, 10.35805, 10.16903, 10.0907, 9.91791, 9.7432]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2250.0, 1699.0, 2376.0, 2808.0, 2117.0, 2783.0, 2170.0, 2896.0, 1835.0, 2867.0]}, "iteration_timing_avg": 0.6237708823529412} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..583d5ed358 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.79022, 10.84034, 10.85603, 10.82319, 10.83355, 10.78173, 10.59641, 10.58331, 10.49545, 10.22799, 10.19949, 9.94816, 9.94997, 9.91997, 9.79865, 9.25223, 9.61408, 9.19153, 9.46281, 9.62472]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2130.0, 2531.0, 2368.0, 2204.0, 2141.0, 2068.0, 2772.0, 2715.0, 2831.0, 2384.0, 2870.0, 2893.0, 3396.0, 3064.0, 3136.0, 2916.0, 3917.0]}, "iteration_timing_avg": 0.06181014925373134} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--recompute-granularity-full-recompute-method-uniform-recompute-num-layers-1-_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json new file mode 100644 index 0000000000..8abb3869de --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.89293, 10.89952, 10.87875, 10.85504, 10.73491, 10.63533, 10.15658, 10.2421, 10.15573, 9.82116]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1608.0, 1717.0, 1868.0, 1920.0, 1891.0, 1766.0, 1630.0, 1955.0, 2416.0, 2390.0]}, "iteration_timing_avg": 0.04569411764705883} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json new file mode 100644 index 0000000000..b68287b6eb --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.89293, 10.8995, 10.87875, 10.855, 10.73496, 10.63535, 10.1566, 10.24211, 10.15574, 9.82117]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1653.0, 1779.0, 1911.0, 1928.0, 1880.0, 1881.0, 1618.0, 1983.0, 2375.0, 2352.0]}, "iteration_timing_avg": 0.06516882352941178} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args--position-embedding-type-rope-_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json new file mode 100644 index 0000000000..345d7fcc5f --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.858, 10.89563, 10.87285, 10.8249, 10.68816, 10.58405, 10.08513, 10.18125, 10.1058, 9.75605]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1864.0, 2004.0, 2086.0, 1978.0, 1975.0, 1889.0, 1656.0, 2059.0, 2227.0, 2306.0]}, "iteration_timing_avg": 0.08140323529411765} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json new file mode 100644 index 0000000000..2dcc249220 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85699, 10.89518, 10.87243, 10.82432, 10.68786, 10.58313, 10.08482, 10.18068, 10.10597, 9.75607]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1858.0, 1946.0, 2096.0, 1900.0, 2011.0, 1803.0, 1737.0, 2092.0, 2335.0, 2201.0]}, "iteration_timing_avg": 0.07560441176470588} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--disable-bias-linear_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--sequence-parallel_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--swiglu_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--untie-embeddings-and-output-weights_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json new file mode 100644 index 0000000000..018a6ecd39 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85535, 10.89042, 10.88142, 10.82973, 10.70858, 10.61199, 10.1184, 10.22418, 10.13702, 9.80781]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1629.0, 1692.0, 1882.0, 1929.0, 1936.0, 1669.0, 1603.0, 1903.0, 2128.0, 2278.0]}, "iteration_timing_avg": 0.0864920588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..23a753821c --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88916, 10.82637, 10.70814, 10.61007, 10.11963, 10.22997, 10.15772, 9.83339]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1801.0, 1862.0, 1827.0, 1711.0, 1708.0, 1954.0, 2328.0, 2335.0]}, "iteration_timing_avg": 0.09368529411764706} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..4113dfc61d --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.92853, 10.937, 10.92943, 10.87789, 10.75133, 10.67044, 10.17418, 10.27899, 10.1883, 9.87023]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22727964.0, 23020600.0, 22500812.0, 22830580.0, 22739790.0, 22548252.0, 22955676.0, 22589500.0, 22659010.0, 22884684.0]}, "iteration_timing_avg": 0.085995} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..262b2c579e --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88916, 10.82637, 10.70814, 10.61007, 10.11963, 10.22997, 10.15772, 9.83339]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1801.0, 1862.0, 1827.0, 1711.0, 1708.0, 1954.0, 2328.0, 2335.0]}, "iteration_timing_avg": 0.08397176470588234} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json new file mode 100644 index 0000000000..e4c1262364 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85534, 10.88947, 10.8806, 10.8283, 10.70687, 10.60921, 10.11533, 10.22106, 10.13408, 9.80477]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1669.0, 1816.0, 1897.0, 1831.0, 1824.0, 1649.0, 1484.0, 1877.0, 2140.0, 2202.0]}, "iteration_timing_avg": 0.0912420588235294} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json new file mode 100644 index 0000000000..6775db704b --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78152, 10.8477, 10.85991, 10.80229, 10.72398, 10.64556, 10.25979, 10.36953, 10.30726, 9.969]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2441.0, 2962.0, 2986.0, 2963.0, 2701.0, 2657.0, 2300.0, 2619.0, 2655.0, 2484.0]}, "iteration_timing_avg": 0.09503617647058824} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json new file mode 100644 index 0000000000..cc1244e378 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.91778, 10.93688, 10.92414, 10.85264, 10.74695, 10.66448, 10.16759, 10.27157, 10.17695, 9.86116]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22728092.0, 23020904.0, 22500632.0, 22830582.0, 22739828.0, 22547742.0, 22955712.0, 22588520.0, 22658932.0, 22885368.0]}, "iteration_timing_avg": 0.09069441176470588} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..61d841b3d7 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80628, 10.6169, 10.59573, 10.50423, 10.22238]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2348.0, 2524.0, 2517.0, 2205.0, 2198.0, 2558.0, 2398.0]}, "iteration_timing_avg": 0.07500764705882351} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json new file mode 100644 index 0000000000..a99307432e --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88918, 10.82635, 10.70816, 10.61006, 10.11963, 10.22999, 10.15774, 9.83337]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1846.0, 1868.0, 1856.0, 1652.0, 1638.0, 1903.0, 2315.0, 2381.0]}, "iteration_timing_avg": 0.08791117647058823} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args--sequence-parallel-num-experts-8-expert-model-parallel-size-2-_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json new file mode 100644 index 0000000000..04eb336aac --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79066, 10.83474, 10.85443, 10.77921, 10.69997, 10.61398, 10.15871, 10.27978, 10.19497, 9.86981]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [30950.0, 37387.0, 37772.0, 36424.0, 33230.0, 34567.0, 30132.0, 34960.0, 36224.0, 37476.0]}, "iteration_timing_avg": 0.20243735294117646} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json new file mode 100644 index 0000000000..f464650d3b --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80426, 10.84849, 10.86146, 10.81012, 10.72201, 10.64589, 10.2092, 10.32252, 10.23908, 9.92465]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16350.0, 19608.0, 19689.0, 19043.0, 17602.0, 17956.0, 15632.0, 18288.0, 18606.0, 19277.0]}, "iteration_timing_avg": 0.13919470588235297} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json new file mode 100644 index 0000000000..761c53aecb --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78922, 10.8416, 10.85552, 10.77966, 10.65528, 10.56398, 10.04054, 10.17415, 10.08488, 9.73406]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [13541.0, 16797.0, 17213.0, 16564.0, 15382.0, 15817.0, 14915.0, 17089.0, 17939.0, 18387.0]}, "iteration_timing_avg": 0.21506794117647057} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json new file mode 100644 index 0000000000..f58d4c4ceb --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79066, 10.83467, 10.85342, 10.77851, 10.70005, 10.61316, 10.15957, 10.27971, 10.19511, 9.87028]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16055.0, 19166.0, 19161.0, 18797.0, 17405.0, 17721.0, 15678.0, 18223.0, 18580.0, 19742.0]}, "iteration_timing_avg": 0.20099058823529406} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..a465e34711 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85706, 10.8832, 10.88511, 10.87562, 10.8708, 10.83108, 10.65065, 10.63723, 10.53201, 10.25681]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2479.0, 2534.0, 2786.0, 2310.0, 2385.0, 2586.0, 2472.0]}, "iteration_timing_avg": 0.09594764705882353} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json new file mode 100644 index 0000000000..c218a0ad40 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85899, 10.88286, 10.87687, 10.82429, 10.69664, 10.60784, 10.11662, 10.2347, 10.14673, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1627.0, 1874.0, 1894.0, 1862.0, 1901.0, 1649.0, 1553.0, 1949.0, 2281.0, 2225.0]}, "iteration_timing_avg": 0.10429970588235296} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json new file mode 100644 index 0000000000..79db29b177 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86873, 10.891, 10.89716, 10.84022, 10.70435, 10.61599, 10.11661, 10.23183, 10.14875, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1619.0, 1839.0, 1712.0, 1853.0, 1810.0, 1682.0, 1567.0, 1997.0, 2186.0, 2376.0]}, "iteration_timing_avg": 0.1169185294117647} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json new file mode 100644 index 0000000000..baf2c64a93 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84429, 10.86285, 10.86177, 10.81156, 10.69316, 10.61799, 10.16498, 10.25035, 10.15231, 9.83669]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1932.0, 2302.0, 2138.0, 2193.0, 2254.0, 2189.0, 1844.0, 2313.0, 2538.0, 2473.0]}, "iteration_timing_avg": 0.16636205882352936} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json new file mode 100644 index 0000000000..5db54e4e03 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84429, 10.86285, 10.86177, 10.81156, 10.69316, 10.61799, 10.16498, 10.25035, 10.15231, 9.83669]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1932.0, 2302.0, 2138.0, 2193.0, 2254.0, 2189.0, 1844.0, 2313.0, 2538.0, 2473.0]}, "iteration_timing_avg": 0.1574994117647059} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args--use-distributed-optimizer-overlap-grad-reduce-_mcore-false_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-false_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args--overlap-grad-reduce_mcore-false_te-false_vp-1.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-2-_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--num-experts-4-_mcore-false_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--overlap-grad-reduce_mcore-false_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_args--sequence-parallel-num-experts-4-expert-model-parallel-size-2-_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args--overlap-grad-reduce_mcore-false_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-false_te-false.json diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json similarity index 100% rename from tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json rename to tests/functional_tests/test_results/jet/dgx_h100/gpt3_345m_mcore-pyt_nightly_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_mcore-true_te-false.json diff --git a/tests/functional_tests/test_results/jet/dgx_h100/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json b/tests/functional_tests/test_results/jet/dgx_h100/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json new file mode 100644 index 0000000000..5b613dea44 --- /dev/null +++ b/tests/functional_tests/test_results/jet/dgx_h100/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.34848, 9.45337, 8.89369, 8.56467, 8.28131, 8.12832, 7.82238, 7.55462, 7.42172, 7.28716, 7.32811, 7.22045, 7.11648, 7.03859, 6.87728, 6.94356, 6.94705, 7.02828, 6.71597, 6.9486]}, "num-zeros": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [43307.0, 40999.0, 44043.0, 41749.0, 44811.0, 44001.0, 41304.0, 42490.0, 44698.0, 43956.0, 41137.0, 43230.0, 39726.0, 45427.0, 43358.0, 43930.0, 45426.0, 45701.0, 46301.0, 44734.0]}, "iteration_timing_avg": 0.12808164179104478} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json new file mode 100644 index 0000000000..cb29680bfe --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.84009, 10.89053, 10.90905, 10.87933, 10.86561, 10.83752, 10.64582, 10.62396, 10.53554, 10.25187, 10.20873, 9.96714, 9.96605, 9.92368, 9.79178, 9.26741, 9.61926, 9.18974, 9.46019, 9.62277]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2118.0, 2371.0, 2498.0, 2225.0, 2122.0, 2090.0, 2315.0, 2784.0, 2701.0, 2324.0, 2745.0, 2871.0, 3475.0, 3095.0, 3249.0, 3160.0, 3877.0]}, "iteration_timing_avg": 0.09977388059701493} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json index 583d5ed358..a7699776dd 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request-resume_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-2_mcore-false_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.79022, 10.84034, 10.85603, 10.82319, 10.83355, 10.78173, 10.59641, 10.58331, 10.49545, 10.22799, 10.19949, 9.94816, 9.94997, 9.91997, 9.79865, 9.25223, 9.61408, 9.19153, 9.46281, 9.62472]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2130.0, 2531.0, 2368.0, 2204.0, 2141.0, 2068.0, 2772.0, 2715.0, 2831.0, 2384.0, 2870.0, 2893.0, 3396.0, 3064.0, 3136.0, 2916.0, 3917.0]}, "iteration_timing_avg": 0.06181014925373134} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.84008, 10.89053, 10.90905, 10.87934, 10.86562, 10.83752, 10.64582, 10.62396, 10.53554, 10.25187, 10.20874, 9.96714, 9.96605, 9.92367, 9.79178, 9.26741, 9.61926, 9.18973, 9.46019, 9.62277]}, "num-zeros": {"start_step": 0, "end_step": 83, "step_interval": 5, "values": [2078.0, 2328.0, 2420.0, 2256.0, 2180.0, 2078.0, 2313.0, 2857.0, 2696.0, 2315.0, 2912.0, 2942.0, 3493.0, 3045.0, 3229.0, 3100.0, 3718.0]}, "iteration_timing_avg": 0.10716462686567164} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json new file mode 100644 index 0000000000..c92bb929d1 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.87174, 10.89545, 10.88847, 10.88533, 10.893, 10.84895, 10.70048, 10.64124, 10.53839, 10.3107]}, "num-zeros": {"start_step": 0, "end_step": 32, "step_interval": 5, "values": [1238.0, 1318.0, 1648.0, 1423.0, 1535.0, 1350.0, 1271.0]}, "iteration_timing_avg": 0.06317382352941177} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json index 8abb3869de..633847bc15 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-dist-optimizer_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.89293, 10.89952, 10.87875, 10.85504, 10.73491, 10.63533, 10.15658, 10.2421, 10.15573, 9.82116]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1608.0, 1717.0, 1868.0, 1920.0, 1891.0, 1766.0, 1630.0, 1955.0, 2416.0, 2390.0]}, "iteration_timing_avg": 0.04569411764705883} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83721, 10.87648, 10.85327, 10.79634, 10.67874, 10.60491, 10.12636, 10.22252, 10.13977, 9.82346]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1640.0, 1873.0, 1930.0, 1910.0, 1936.0, 1807.0, 1630.0, 1962.0, 2317.0, 2314.0]}, "iteration_timing_avg": 0.06904588235294119} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json index b68287b6eb..2b29a51a27 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-1_args-uniform-full-recompute_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.89293, 10.8995, 10.87875, 10.855, 10.73496, 10.63535, 10.1566, 10.24211, 10.15574, 9.82117]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1653.0, 1779.0, 1911.0, 1928.0, 1880.0, 1881.0, 1618.0, 1983.0, 2375.0, 2352.0]}, "iteration_timing_avg": 0.06516882352941178} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.83721, 10.87648, 10.85329, 10.79637, 10.67873, 10.60491, 10.12635, 10.22253, 10.13979, 9.82348]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1589.0, 1913.0, 1924.0, 1876.0, 2005.0, 1749.0, 1631.0, 1981.0, 2346.0, 2380.0]}, "iteration_timing_avg": 0.09164500000000002} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json index 345d7fcc5f..4357d8badf 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings-interleaved-no-fusion_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.858, 10.89563, 10.87285, 10.8249, 10.68816, 10.58405, 10.08513, 10.18125, 10.1058, 9.75605]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1864.0, 2004.0, 2086.0, 1978.0, 1975.0, 1889.0, 1656.0, 2059.0, 2227.0, 2306.0]}, "iteration_timing_avg": 0.08140323529411765} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84407, 10.87551, 10.90356, 10.81577, 10.67451, 10.60208, 10.06584, 10.19215, 10.11381, 9.76133]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1717.0, 2136.0, 2046.0, 1923.0, 2052.0, 1910.0, 1717.0, 2008.0, 2269.0, 2231.0]}, "iteration_timing_avg": 0.11052176470588236} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json index 2dcc249220..b4db7bde9b 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-2_args-rope-embeddings_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85699, 10.89518, 10.87243, 10.82432, 10.68786, 10.58313, 10.08482, 10.18068, 10.10597, 9.75607]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1858.0, 1946.0, 2096.0, 1900.0, 2011.0, 1803.0, 1737.0, 2092.0, 2335.0, 2201.0]}, "iteration_timing_avg": 0.07560441176470588} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84608, 10.87634, 10.90424, 10.81754, 10.67579, 10.60283, 10.06667, 10.19261, 10.11413, 9.7617]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1709.0, 2192.0, 2059.0, 1960.0, 2164.0, 1846.0, 1614.0, 2074.0, 2176.0, 2249.0]}, "iteration_timing_avg": 0.11051617647058823} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json index 018a6ecd39..eedf2baa8b 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-disable-bias-linear_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85535, 10.89042, 10.88142, 10.82973, 10.70858, 10.61199, 10.1184, 10.22418, 10.13702, 9.80781]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1629.0, 1692.0, 1882.0, 1929.0, 1936.0, 1669.0, 1603.0, 1903.0, 2128.0, 2278.0]}, "iteration_timing_avg": 0.0864920588235294} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79374, 10.86745, 10.89179, 10.78304, 10.66262, 10.58362, 10.08688, 10.19342, 10.13764, 9.81438]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1567.0, 1904.0, 1912.0, 1931.0, 1799.0, 1722.0, 1591.0, 1950.0, 2428.0, 2378.0]}, "iteration_timing_avg": 0.12243558823529416} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..6362aacb7c --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.7951, 10.84939, 10.87411, 10.83459, 10.82865, 10.78676, 10.56492, 10.57063, 10.48544, 10.19547]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [2586.0, 2828.0, 2105.0, 2725.0, 2711.0, 2428.0, 2946.0]}, "iteration_timing_avg": 0.12451529411764707} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json index 23a753821c..cd7044ddda 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false_vp-1.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88916, 10.82637, 10.70814, 10.61007, 10.11963, 10.22997, 10.15772, 9.83339]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1801.0, 1862.0, 1827.0, 1711.0, 1708.0, 1954.0, 2328.0, 2335.0]}, "iteration_timing_avg": 0.09368529411764706} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82096, 10.87269, 10.88192, 10.79677, 10.68633, 10.59654, 10.09782, 10.21295, 10.13917, 9.80682]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1501.0, 1749.0, 1845.0, 1786.0, 1912.0, 1741.0, 1567.0, 1927.0, 2280.0, 2405.0]}, "iteration_timing_avg": 0.12873676470588236} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json index 4113dfc61d..d8ea1345ac 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce-untied_mcore-true_te-false_vp-1.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.92853, 10.937, 10.92943, 10.87789, 10.75133, 10.67044, 10.17418, 10.27899, 10.1883, 9.87023]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22727964.0, 23020600.0, 22500812.0, 22830580.0, 22739790.0, 22548252.0, 22955676.0, 22589500.0, 22659010.0, 22884684.0]}, "iteration_timing_avg": 0.085995} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.9362, 10.93543, 10.9456, 10.87817, 10.75688, 10.66385, 10.16947, 10.27156, 10.19469, 9.85867]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22727572.0, 23021722.0, 22500652.0, 22830476.0, 22739252.0, 22547046.0, 22954704.0, 22589164.0, 22659710.0, 22883876.0]}, "iteration_timing_avg": 0.12799705882352944} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json new file mode 100644 index 0000000000..11b747f2d3 --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false_vp-1.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.7951, 10.84939, 10.87411, 10.83459, 10.82865, 10.78676, 10.56492, 10.57063, 10.48544, 10.19547]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [2586.0, 2828.0, 2105.0, 2725.0, 2711.0, 2428.0, 2946.0]}, "iteration_timing_avg": 0.11798852941176469} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json index 262b2c579e..c9e2aa6032 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false_vp-1.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88916, 10.82637, 10.70814, 10.61007, 10.11963, 10.22997, 10.15772, 9.83339]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1801.0, 1862.0, 1827.0, 1711.0, 1708.0, 1954.0, 2328.0, 2335.0]}, "iteration_timing_avg": 0.08397176470588234} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82096, 10.87269, 10.88192, 10.79677, 10.68633, 10.59654, 10.09782, 10.21295, 10.13917, 9.80682]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1501.0, 1749.0, 1845.0, 1786.0, 1912.0, 1741.0, 1567.0, 1927.0, 2280.0, 2405.0]}, "iteration_timing_avg": 0.12168999999999999} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json index e4c1262364..ac3c1f57f2 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-sequence-parallel_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85534, 10.88947, 10.8806, 10.8283, 10.70687, 10.60921, 10.11533, 10.22106, 10.13408, 9.80477]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1669.0, 1816.0, 1897.0, 1831.0, 1824.0, 1649.0, 1484.0, 1877.0, 2140.0, 2202.0]}, "iteration_timing_avg": 0.0912420588235294} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79373, 10.86651, 10.89091, 10.78164, 10.66101, 10.58089, 10.08413, 10.19034, 10.13461, 9.81138]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1670.0, 1864.0, 1826.0, 1965.0, 1861.0, 1605.0, 1609.0, 1931.0, 2343.0, 2347.0]}, "iteration_timing_avg": 0.12348235294117646} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json index 6775db704b..a2d5ed7952 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-swiglu_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78152, 10.8477, 10.85991, 10.80229, 10.72398, 10.64556, 10.25979, 10.36953, 10.30726, 9.969]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2441.0, 2962.0, 2986.0, 2963.0, 2701.0, 2657.0, 2300.0, 2619.0, 2655.0, 2484.0]}, "iteration_timing_avg": 0.09503617647058824} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.73353, 10.81676, 10.83941, 10.7586, 10.70146, 10.62786, 10.20836, 10.36754, 10.26496, 9.94346]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [2536.0, 2988.0, 2925.0, 2895.0, 2617.0, 2603.0, 2325.0, 2704.0, 2592.0, 2406.0]}, "iteration_timing_avg": 0.12725500000000006} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json index cc1244e378..e294c75c0f 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_args-untie-embeddings-and-outputs_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.91778, 10.93688, 10.92414, 10.85264, 10.74695, 10.66448, 10.16759, 10.27157, 10.17695, 9.86116]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22728092.0, 23020904.0, 22500632.0, 22830582.0, 22739828.0, 22547742.0, 22955712.0, 22588520.0, 22658932.0, 22885368.0]}, "iteration_timing_avg": 0.09069441176470588} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8968, 10.90735, 10.91688, 10.84693, 10.70699, 10.63243, 10.15516, 10.26078, 10.15949, 9.83311]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [22727844.0, 23021590.0, 22500488.0, 22830910.0, 22739472.0, 22546526.0, 22955764.0, 22588942.0, 22658932.0, 22884080.0]}, "iteration_timing_avg": 0.1246464705882353} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json index 61d841b3d7..c051895065 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-false_te-false_vp-1.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.8214, 10.8607, 10.8735, 10.85187, 10.84091, 10.80628, 10.6169, 10.59573, 10.50423, 10.22238]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2348.0, 2524.0, 2517.0, 2205.0, 2198.0, 2558.0, 2398.0]}, "iteration_timing_avg": 0.07500764705882351} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.7951, 10.84939, 10.87411, 10.83459, 10.82865, 10.78676, 10.56492, 10.57063, 10.48545, 10.19548]}, "num-zeros": {"start_step": 0, "end_step": 34, "step_interval": 5, "values": [2561.0, 2771.0, 2141.0, 2656.0, 2737.0, 2472.0, 2991.0]}, "iteration_timing_avg": 0.12433176470588231} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json index a99307432e..3da54b9c18 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-1_pp-4_mcore-true_te-false_vp-1.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84693, 10.89428, 10.88918, 10.82635, 10.70816, 10.61006, 10.11963, 10.22999, 10.15774, 9.83337]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1636.0, 1747.0, 1846.0, 1868.0, 1856.0, 1652.0, 1638.0, 1903.0, 2315.0, 2381.0]}, "iteration_timing_avg": 0.08791117647058823} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.82096, 10.87269, 10.88192, 10.79677, 10.68633, 10.59654, 10.09776, 10.21294, 10.13909, 9.80679]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1501.0, 1749.0, 1794.0, 1829.0, 1913.0, 1793.0, 1585.0, 1815.0, 2296.0, 2266.0]}, "iteration_timing_avg": 0.12502588235294115} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json index 04eb336aac..1818cb41de 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-dist-optimizer_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79066, 10.83474, 10.85443, 10.77921, 10.69997, 10.61398, 10.15871, 10.27978, 10.19497, 9.86981]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [30950.0, 37387.0, 37772.0, 36424.0, 33230.0, 34567.0, 30132.0, 34960.0, 36224.0, 37476.0]}, "iteration_timing_avg": 0.20243735294117646} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79896, 10.8594, 10.87122, 10.79881, 10.71717, 10.6354, 10.19743, 10.30887, 10.2168, 9.90751]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [30665.0, 37001.0, 37644.0, 35953.0, 33382.0, 35191.0, 30525.0, 35253.0, 36653.0, 37931.0]}, "iteration_timing_avg": 0.2890776470588235} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json index f464650d3b..f45f321721 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-groupedgemm_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80426, 10.84849, 10.86146, 10.81012, 10.72201, 10.64589, 10.2092, 10.32252, 10.23908, 9.92465]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16350.0, 19608.0, 19689.0, 19043.0, 17602.0, 17956.0, 15632.0, 18288.0, 18606.0, 19277.0]}, "iteration_timing_avg": 0.13919470588235297} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80961, 10.86075, 10.86755, 10.80331, 10.71906, 10.64746, 10.21053, 10.32037, 10.22013, 9.92387]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16604.0, 19509.0, 19801.0, 18644.0, 17084.0, 17721.0, 14980.0, 17754.0, 18357.0, 18520.0]}, "iteration_timing_avg": 0.19267441176470584} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json index 761c53aecb..ade8011335 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel-top2router_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.78922, 10.8416, 10.85552, 10.77966, 10.65528, 10.56398, 10.04054, 10.17415, 10.08488, 9.73406]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [13541.0, 16797.0, 17213.0, 16564.0, 15382.0, 15817.0, 14915.0, 17089.0, 17939.0, 18387.0]}, "iteration_timing_avg": 0.21506794117647057} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.80682, 10.86708, 10.88001, 10.79339, 10.66648, 10.57654, 10.05866, 10.18464, 10.10235, 9.76286]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [13270.0, 16578.0, 17037.0, 16415.0, 15006.0, 15965.0, 14350.0, 17035.0, 17408.0, 18260.0]}, "iteration_timing_avg": 0.3051714705882352} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json index f58d4c4ceb..8f14311c51 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-1_args-te-8experts2parallel_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79066, 10.83467, 10.85342, 10.77851, 10.70005, 10.61316, 10.15957, 10.27971, 10.19511, 9.87028]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16055.0, 19166.0, 19161.0, 18797.0, 17405.0, 17721.0, 15678.0, 18223.0, 18580.0, 19742.0]}, "iteration_timing_avg": 0.20099058823529406} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.79896, 10.8601, 10.87152, 10.79856, 10.71624, 10.6355, 10.19683, 10.30917, 10.21632, 9.90782]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [16152.0, 19202.0, 19645.0, 18594.0, 17375.0, 17768.0, 15576.0, 17888.0, 18387.0, 18810.0]}, "iteration_timing_avg": 0.29991823529411765} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json index a465e34711..457294168c 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85706, 10.8832, 10.88511, 10.87562, 10.8708, 10.83108, 10.65065, 10.63723, 10.53201, 10.25681]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2479.0, 2534.0, 2786.0, 2310.0, 2385.0, 2586.0, 2472.0]}, "iteration_timing_avg": 0.09594764705882353} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85543, 10.89355, 10.87608, 10.87365, 10.88042, 10.84182, 10.67177, 10.62853, 10.52511, 10.2523]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2472.0, 2462.0, 2480.0, 2235.0, 2268.0, 2619.0, 2429.0]}, "iteration_timing_avg": 0.14061323529411762} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json index c218a0ad40..ddd7132a35 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-false_te-true.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85899, 10.88286, 10.87687, 10.82429, 10.69664, 10.60784, 10.11662, 10.2347, 10.14673, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1627.0, 1874.0, 1894.0, 1862.0, 1901.0, 1649.0, 1553.0, 1949.0, 2281.0, 2225.0]}, "iteration_timing_avg": 0.10429970588235296} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85632, 10.88791, 10.86527, 10.81439, 10.69842, 10.61079, 10.109, 10.21405, 10.12865, 9.80275]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1714.0, 1877.0, 1928.0, 1863.0, 1960.0, 1646.0, 1648.0, 2023.0, 2318.0, 2333.0]}, "iteration_timing_avg": 0.14203264705882354} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json index 79db29b177..e5c571448d 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-2_pp-2_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86873, 10.891, 10.89716, 10.84022, 10.70435, 10.61599, 10.11661, 10.23183, 10.14875, 9.82429]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1619.0, 1839.0, 1712.0, 1853.0, 1810.0, 1682.0, 1567.0, 1997.0, 2186.0, 2376.0]}, "iteration_timing_avg": 0.1169185294117647} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.92392, 10.93645, 10.89657, 10.86919, 10.74782, 10.658, 10.15864, 10.24906, 10.15088, 9.83933]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1735.0, 1861.0, 2111.0, 1844.0, 1762.0, 1858.0, 1554.0, 2031.0, 2309.0, 2225.0]}, "iteration_timing_avg": 0.15396205882352942} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json new file mode 100644 index 0000000000..5ead3b3cae --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85921, 10.8797, 10.87381, 10.88658, 10.88912, 10.84826, 10.68571, 10.62947, 10.54289, 10.26918]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2288.0, 2326.0, 2454.0, 2011.0, 2111.0, 2436.0, 2446.0]}, "iteration_timing_avg": 0.2084426470588236} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json index baf2c64a93..ef3ee44978 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce-param-gather_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84429, 10.86285, 10.86177, 10.81156, 10.69316, 10.61799, 10.16498, 10.25035, 10.15231, 9.83669]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1932.0, 2302.0, 2138.0, 2193.0, 2254.0, 2189.0, 1844.0, 2313.0, 2538.0, 2473.0]}, "iteration_timing_avg": 0.16636205882352936} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86174, 10.88685, 10.8766, 10.83063, 10.71362, 10.60782, 10.13037, 10.2308, 10.15865, 9.83394]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1747.0, 2204.0, 2128.0, 2098.0, 2033.0, 1943.0, 1761.0, 2152.0, 2427.0, 2590.0]}, "iteration_timing_avg": 0.22043823529411763} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json new file mode 100644 index 0000000000..9c4d0796ed --- /dev/null +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-false_te-false.json @@ -0,0 +1 @@ +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.85921, 10.8797, 10.87381, 10.88658, 10.88912, 10.84826, 10.68571, 10.62947, 10.54289, 10.26918]}, "num-zeros": {"start_step": 0, "end_step": 33, "step_interval": 5, "values": [2288.0, 2326.0, 2454.0, 2011.0, 2111.0, 2436.0, 2446.0]}, "iteration_timing_avg": 0.20483676470588236} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json index 5db54e4e03..447f6efaf8 100644 --- a/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json +++ b/tests/functional_tests/test_results/jet/gpt3_345m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-50_tp-4_pp-1_args-dist-optimizer-overlap-grad-reduce_mcore-true_te-false.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.84429, 10.86285, 10.86177, 10.81156, 10.69316, 10.61799, 10.16498, 10.25035, 10.15231, 9.83669]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1932.0, 2302.0, 2138.0, 2193.0, 2254.0, 2189.0, 1844.0, 2313.0, 2538.0, 2473.0]}, "iteration_timing_avg": 0.1574994117647059} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [10.86174, 10.88685, 10.8766, 10.83063, 10.71362, 10.60782, 10.13037, 10.2308, 10.15865, 9.83394]}, "num-zeros": {"start_step": 0, "end_step": 50, "step_interval": 5, "values": [1747.0, 2204.0, 2128.0, 2098.0, 2033.0, 1943.0, 1761.0, 2152.0, 2427.0, 2590.0]}, "iteration_timing_avg": 0.2256223529411765} \ No newline at end of file diff --git a/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json b/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json index 5b613dea44..e0b067d9f2 100644 --- a/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json +++ b/tests/functional_tests/test_results/jet/t5_220m_mcore-pyt_merge-request_bf16_nodes-1_gpus-8_bs-32_steps-100_tp-1_pp-1_mcore-true_te-true_vp-1.json @@ -1 +1 @@ -{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.34848, 9.45337, 8.89369, 8.56467, 8.28131, 8.12832, 7.82238, 7.55462, 7.42172, 7.28716, 7.32811, 7.22045, 7.11648, 7.03859, 6.87728, 6.94356, 6.94705, 7.02828, 6.71597, 6.9486]}, "num-zeros": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [43307.0, 40999.0, 44043.0, 41749.0, 44811.0, 44001.0, 41304.0, 42490.0, 44698.0, 43956.0, 41137.0, 43230.0, 39726.0, 45427.0, 43358.0, 43930.0, 45426.0, 45701.0, 46301.0, 44734.0]}, "iteration_timing_avg": 0.12808164179104478} \ No newline at end of file +{"lm loss": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [10.33235, 9.41913, 8.85861, 8.55638, 8.28439, 8.11201, 7.83824, 7.54562, 7.41436, 7.31027, 7.34805, 7.22802, 7.12902, 7.06142, 6.91137, 6.96105, 6.96531, 7.04832, 6.7364, 6.97504]}, "num-zeros": {"start_step": 0, "end_step": 100, "step_interval": 5, "values": [43300.0, 40964.0, 44028.0, 41739.0, 44792.0, 43949.0, 41300.0, 42529.0, 44700.0, 43963.0, 41174.0, 43285.0, 39762.0, 45371.0, 43317.0, 43929.0, 45404.0, 45705.0, 46310.0, 44692.0]}, "iteration_timing_avg": 0.17640776119402987} \ No newline at end of file diff --git a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh index e2abaa51fc..3dbfd683ec 100755 --- a/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh +++ b/tests/functional_tests/test_scripts/bert/pretrain_bert_distributed_test.sh @@ -12,7 +12,7 @@ do done echo "---------------------------------" -set -x +set -exo pipefail if [[ -z $MBS ]]; then MBS=4; fi if [[ -z $GBS ]]; then GBS=128; fi if [[ -z $VOCAB_FILE ]]; then VOCAB_FILE="/workspace/data/bert_data/vocab.txt" ; fi diff --git a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh index 07439bc56f..b6ef7f2ce5 100755 --- a/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh +++ b/tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh @@ -12,7 +12,7 @@ do done echo "---------------------------------" -set -x +set -exo pipefail if [[ -z $MBS ]]; then MBS=4; fi if [[ -z $GBS ]]; then GBS=32; fi if [[ -z $MOE_GROUPED_GEMM ]]; then MOE_GROUPED_GEMM=0; fi diff --git a/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh b/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh index 7e1a81ad82..b06dc336f8 100755 --- a/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh +++ b/tests/functional_tests/test_scripts/retro/pretrain_retro_distributed_test.sh @@ -13,7 +13,7 @@ do done echo "---------------------------------" -set -x +set -exo pipefail if [[ -z $MBS ]]; then MBS=4; fi GPUS_PER_NODE=8 diff --git a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh index e84fda8c19..241d844839 100755 --- a/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh +++ b/tests/functional_tests/test_scripts/t5/pretrain_t5_distributed_test.sh @@ -12,7 +12,7 @@ do done echo "---------------------------------" -set -x +set -exo pipefail if [[ -z $MBS ]]; then MBS=4; fi if [[ -z $GBS ]]; then GBS=32; fi if [[ -z $VOCAB_PATH ]]; then VOCAB_PATH="/workspace/data/t5_data/bert-large-cased-vocab.txt"; fi From 7bc3c7412bcc17d0acf226a57f937fad1a7b1e8a Mon Sep 17 00:00:00 2001 From: Tuomas Rintamaki Date: Fri, 1 Mar 2024 12:13:12 -0800 Subject: [PATCH 288/296] Mcore LLaVA model --- megatron/core/models/multimodal/__init__.py | 0 .../core/models/multimodal/llava_model.py | 119 ++++++++++++++++++ tests/unit_tests/models/test_llava_model.py | 71 +++++++++++ 3 files changed, 190 insertions(+) create mode 100644 megatron/core/models/multimodal/__init__.py create mode 100644 megatron/core/models/multimodal/llava_model.py create mode 100644 tests/unit_tests/models/test_llava_model.py diff --git a/megatron/core/models/multimodal/__init__.py b/megatron/core/models/multimodal/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/megatron/core/models/multimodal/llava_model.py b/megatron/core/models/multimodal/llava_model.py new file mode 100644 index 0000000000..3ab4d1a98c --- /dev/null +++ b/megatron/core/models/multimodal/llava_model.py @@ -0,0 +1,119 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import torch + +from megatron.core import parallel_state, tensor_parallel +from megatron.core.models.gpt import GPTModel +from megatron.core.models.vision.clip_vit_model import CLIPViTModel +from megatron.core.transformer import MegatronModule +from megatron.core.transformer.spec_utils import ModuleSpec +from megatron.core.transformer.transformer_config import TransformerConfig + + +# Note: This is unused at the moment and may be missing features. Follow-up changes will use this. +class LLaVAModel(MegatronModule): + """LLaVA multi-modal model. + + Args: + language_transformer_config (TransformerConfig): Transformer config for the language model. + language_transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers of the language model. + vocab_size (int): Vocabulary size. + max_sequence_length (int): maximum sequence length. This is used for positional embedding. + vision_transformer_config (TransformerConfig): Transformer config for the vision model. + vision_transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers of the vision model. + """ + + def __init__( + self, + language_transformer_config: TransformerConfig, + language_transformer_layer_spec: ModuleSpec, + vocab_size: int, + max_sequence_length: int, + vision_transformer_config: TransformerConfig, + vision_transformer_layer_spec: ModuleSpec, + ) -> None: + super().__init__(config=language_transformer_config) + + if parallel_state.get_pipeline_model_parallel_world_size() > 1: + raise NotImplementedError("pipeline parallelism is not supported in this model yet.") + + self.language_model = GPTModel( + language_transformer_config, + language_transformer_layer_spec, + vocab_size, + max_sequence_length, + ) + + self.vision_model = CLIPViTModel(vision_transformer_config, vision_transformer_layer_spec) + + # Map (intermediate) vision model outputs to the language model input dimension. + # TODO: Separate work is adding a configurable multimodal projection layer. Replace this with that one. + self._vision_projection = tensor_parallel.ColumnParallelLinear( + vision_transformer_config.hidden_size, + language_transformer_config.hidden_size, + config=vision_transformer_config, + init_method=vision_transformer_config.init_method, + bias=False, + skip_bias_add=True, + gather_output=True, + ) + + def set_input_tensor(self, input_tensor: torch.Tensor) -> None: + """Sets input tensor to the model. + + NOTE: Pipeline parallelism is not supported in this model yet. This is just a placeholder implementation. + + Args: + input_tensor (Tensor): Sets the input tensor for the model. + """ + self.vision_model.set_input_tensor(input_tensor) + + def forward( + self, + image: torch.Tensor, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + attention_mask: torch.Tensor, + labels: torch.Tensor = None, + ) -> torch.Tensor: + """Forward function of the LLaVA model. + + Args: + image (torch.Tensor): input image of shape [batch, img_h, img_w]. + input_ids (torch.Tensor): input text ids [batch, text_seq_len]. + position_ids (torch.Tensor): input text position ids [batch, text_seq_len]. + attention_mask (torch.Tensor): attention mask for the language model [batch, 1, combined_seq_len, combined_seq_len]. + labels (torch.Tensor): Optional target text labels [batch, combined_seq_len]. + + Returns: + output (torch.Tensor): Loss of shape [b, s] if labels are provided, otherwise logits of shape [b, s, vocab_size]. + """ + image_embeddings = self.vision_model(image) # [b, img_seq_len, h_vision] + + # map vision model output size to language model input size. + image_embeddings, _ = self._vision_projection( + image_embeddings + ) # [b, img_seq_len, h_language] + + image_embeddings = image_embeddings.permute(1, 0, 2) # [img_seq_len, b, h_language] + language_embeddings = self.language_model.embedding( + input_ids=input_ids, position_ids=position_ids + ) # [text_seq_len, b, h_language] + combined_embeddings = torch.cat( + [image_embeddings, language_embeddings], dim=0 + ) # [combined_seq_len, b, h_language] + + # Embedding is computed above so we can discard input and position ids. + input_ids = None + position_ids = None + + # Note: This returns loss if labels are provided, otherwise logits. + output = self.language_model( + input_ids, + position_ids, + attention_mask, + decoder_input=combined_embeddings, + labels=labels, + ) + + return output diff --git a/tests/unit_tests/models/test_llava_model.py b/tests/unit_tests/models/test_llava_model.py new file mode 100644 index 0000000000..4f947ba681 --- /dev/null +++ b/tests/unit_tests/models/test_llava_model.py @@ -0,0 +1,71 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import pytest +import torch + +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec +from megatron.core.models.multimodal.llava_model import LLaVAModel +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.transformer.transformer_config import TransformerConfig +from tests.unit_tests.test_utilities import Utils + + +class TestLLaVAModel: + def setup_method(self, method): + Utils.initialize_model_parallel(1, 1) + model_parallel_cuda_manual_seed(123) + language_config = TransformerConfig( + num_layers=3, hidden_size=128, num_attention_heads=8, use_cpu_initialization=True + ) + vision_config = TransformerConfig( + num_layers=2, hidden_size=64, num_attention_heads=4, use_cpu_initialization=True + ) + layer_spec = get_gpt_layer_with_transformer_engine_spec() + self.model = LLaVAModel( + language_transformer_config=language_config, + language_transformer_layer_spec=layer_spec, + vocab_size=2048, + max_sequence_length=1024, + vision_transformer_config=vision_config, + vision_transformer_layer_spec=layer_spec, + ) + + def teardown_method(self, method): + Utils.destroy_model_parallel() + + def test_constructor(self): + assert isinstance(self.model, LLaVAModel) + + num_weights = sum([p.numel() for p in self.model.parameters()]) + assert num_weights == 1433472 + + def test_set_input_tensor(self): + expected_shape = (1, 2, 3, 4) + input_tensor = torch.zeros(expected_shape) + self.model.set_input_tensor(input_tensor) + assert self.model.vision_model.transformer.input_tensor.shape == expected_shape + + def test_forward(self): + self.model.cuda() + + img = torch.randn((2, 3, 336, 336)).cuda() + input_ids = torch.randint(0, 2048, (2, 1024)).cuda() + position_ids = torch.arange(0, 1024, dtype=torch.int).cuda() + position_ids = position_ids.expand(2, 1024) + # With default image and patch sizes of 336 and 14, respectively, and a class token, the combined sequence length is 1024 + (336/14) ** 2 + 1 = 1601. + attention_mask = torch.tril(torch.ones((2, 1, 1601, 1601))).cuda() + attention_mask = attention_mask < 0.5 + labels = torch.randint(0, 2048, (2, 1601)).cuda() + + # Try with and without labels. + loss = self.model.forward(img, input_ids, position_ids, attention_mask, labels) + assert loss.shape == torch.Size((2, 1601)) + + logits = self.model.forward(img, input_ids, position_ids, attention_mask, labels=None) + assert logits.shape == torch.Size((2, 1601, 2048)) + + def test_save_load(self, tmp_path): + path = tmp_path / "model.pt" + torch.save(self.model.state_dict(), path) + + self.model.load_state_dict(torch.load(path)) From 80e180d7e26da12c9717d589e26de25614330829 Mon Sep 17 00:00:00 2001 From: Chenhan Yu Date: Fri, 1 Mar 2024 15:19:36 -0800 Subject: [PATCH 289/296] [OMNIML-614] AMMO ptq + TensorRT-LLM export examples for megatron-lm --- README.md | 6 + examples/deploy/README.md | 132 +++++++++ examples/deploy/ptq_trtllm_llama_7b.sh | 79 +++++ examples/deploy/ptq_trtllm_nemotron3_8b.sh | 75 +++++ examples/deploy/text_generation_ptq.py | 273 ++++++++++++++++++ examples/deploy/trtllm_text_generation.py | 93 ++++++ megatron/core/deploy/__init__.py | 1 + megatron/core/deploy/gpt/__init__.py | 1 + megatron/core/deploy/gpt/model_specs.py | 50 ++++ megatron/core/deploy/gpt/state_dict_hooks.py | 126 ++++++++ megatron/core/models/gpt/gpt_model.py | 4 + .../core/transformer/transformer_config.py | 4 + megatron/deploy/__init__.py | 1 + megatron/deploy/arguments.py | 25 ++ megatron/deploy/gpt/__init__.py | 1 + megatron/deploy/gpt/model_provider.py | 73 +++++ 16 files changed, 944 insertions(+) create mode 100644 examples/deploy/README.md create mode 100644 examples/deploy/ptq_trtllm_llama_7b.sh create mode 100644 examples/deploy/ptq_trtllm_nemotron3_8b.sh create mode 100644 examples/deploy/text_generation_ptq.py create mode 100644 examples/deploy/trtllm_text_generation.py create mode 100644 megatron/core/deploy/__init__.py create mode 100644 megatron/core/deploy/gpt/__init__.py create mode 100644 megatron/core/deploy/gpt/model_specs.py create mode 100644 megatron/core/deploy/gpt/state_dict_hooks.py create mode 100644 megatron/deploy/__init__.py create mode 100644 megatron/deploy/arguments.py create mode 100644 megatron/deploy/gpt/__init__.py create mode 100644 megatron/deploy/gpt/model_provider.py diff --git a/README.md b/README.md index bc8f93bb90..a7a06c621d 100644 --- a/README.md +++ b/README.md @@ -519,6 +519,12 @@ The Llama-2 [family of models](https://ai.meta.com/llama/) are an open-source se The Llama-2 checkpoints can be loaded into Megatron for inference and finetuning. See documentation [here](docs/llama2.md). +# Model Optimization and Deployment +Megatron-Core (MCore) `GPTModel` family supports advanced quantization algorithms and high-performance deployment through TensorRT-LLM. + +## Quantization and TensorRT-LLM Deployment +See [Megatron Model Optimization and Deployment](examples/modelopt/README.md) for `llama2` and `nemotron3` examples. + # Datasets We do not host any datasets for GPT or BERT training, however, we detail their collection so that our results may be reproduced. diff --git a/examples/deploy/README.md b/examples/deploy/README.md new file mode 100644 index 0000000000..c63993e9ca --- /dev/null +++ b/examples/deploy/README.md @@ -0,0 +1,132 @@ +# Megatron Model Optimization and Deployment + +## Installation +We recommend that users follow TensorRT-LLM's official installation guide to build it from source +and proceed with a containerized environment (`docker.io/tensorrt_llm/release:latest`): + +``` +git clone https://github.com/NVIDIA/TensorRT-LLM.git +cd TensorRT-LLM +git checkout v0.7.1 +make -C docker release_build +``` + +> **TROUBLE SHOOTING:** rather than copying each folder separately in `docker/Dockerfile.multi`, +> you may need to copy the entire dir as `COPY ./ /src/tensorrt_llm` since a `git submodule` is +> called later which requires `.git` to continue. + +Once the container is built, install `nvidia-ammo` and additional dependencies for sharded checkpoint support: +``` +pip install --no-cache-dir --extra-index-url https://pypi.nvidia.com nvidia-ammo +pip install zarr tensorstore==0.1.45 +``` +TensorRT-LLM quantization functionalities are currently packaged in `nvidia-ammo`. +You can find more documentation about `nvidia-ammo` in [TensorRT-LLM's quantization +examples](https://github.com/NVIDIA/TensorRT-LLM/tree/main/examples/quantization). + +## Support Matrix + +The following matrix shows the current support for the PTQ + TensorRT-LLM export flow. + +| model | fp16 | int8_sq | fp8 | int4_awq | +|-----------------------------|------|---------| ----| -------- | +| nextllm-2b | x | x | x | | +| nemotron3-8b | x | | x | | +| nemotron3-15b | x | | x | | +| llama2-text-7b | x | x | x | TP2 | +| llama2-chat-70b | x | x | x | TP4 | + +Our PTQ + TensorRT-LLM flow has native support on MCore `GPTModel` with a mixed layer spec (native ParallelLinear +and Transformer-Engine Norm (`TENorm`). Note that this is not the default mcore gpt spec. You can still load the +following checkpoint formats with some remedy: + +| GPTModel | sharded | remedy arguments | +|-----------------------------------|---------|-----------------------------------------| +| megatron.model | | `--ammo-load-classic-megatron-to-mcore` | +| TE-Fused (default mcore gpt spec) | | `--ammo-convert-te-to-local-spec` | +| TE-Fused (default mcore gpt spec) | x | | + +> **TROUBLE SHOOTING:** If you are trying to load an unpacked `.nemo` sharded checkpoint, then typically you will +> need to adding `additional_sharded_prefix="model."` to `ammo_load_checkpoint()` since NeMo has an additional +> `model.` wrapper on top of the `GPTModel`. + +> **NOTE:** flag `--ammo-load-classic-megatron-to-mcore` may not work on all legacy checkpoint versions. + +## Examples + +> **NOTE:** we only provide a simple text generation script to test the generated TensorRT-LLM engines. For +> a production-level API server or enterprise support, see [NeMo](https://github.com/NVIDIA/NeMo) and TensorRT-LLM's +> backend for [NVIDIA Triton Inference Server](https://developer.nvidia.com/nvidia-triton-inference-server). + +### nemotron3-8B FP8 Quantization and TensorRT-LLM Deployment +First download the nemotron checkpoint from https://huggingface.co/nvidia/nemotron-3-8b-base-4k, extract the +sharded checkpoint from the `.nemo` tarbal and fix the tokenizer file name. + +> **NOTE:** The following cloning method uses `ssh`, and assume you have registered the `ssh-key` in Hugging Face. +> If you are want to clone with `https`, then `git clone https://huggingface.co/nvidia/nemotron-3-8b-base-4k` with an access token. + +```sh +git lfs install +git clone git@hf.co:nvidia/nemotron-3-8b-base-4k +cd nemotron-3-8b-base-4k +tar -xvf Nemotron-3-8B-Base-4k.nemo +mv 586f3f51a9cf43bc9369bd53fa08868c_a934dc7c3e1e46a6838bb63379916563_3feba89c944047c19d5a1d0c07a85c32_mt_nlg_plus_multilingual_ja_zh_the_stack_frac_015_256k.model mt_nlg_plus_multilingual_ja_zh_the_stack_frac_015_256k.model +cd .. +``` + +Now launch the PTQ + TensorRT-LLM export script, +``` +bash examples/deploy/ptq_trtllm_nemotron3_8b ./nemotron-3-8b-base-4k None +``` +By default, `cnn_dailymail` is used for calibration. The `GPTModel` will have quantizers for simulating the +quantization effect. The checkpoint will be saved optionally (with quantizers as additional states) and can +be restored for further evaluation. TensorRT-LLM engine is exported to `/tmo/ammo` by default. + +The script expects `${CHECKPOINT_DIR}` (`./nemotron-3-8b-base-4k`) to have the following structure: +``` +├── model_weights +│ ├── common.pt +│ ... +│ +├── model_config.yaml +├── mt_nlg_plus_multilingual_ja_zh_the_stack_frac_015_256k.model +``` + +> **NOTE:** The script is using `TP=8`. Change `$TP` in the script if your checkpoint has a different tensor +> model parallelism. + +> **KNOWN ISSUES:** The `mt_nlg_plus_multilingual_ja_zh_the_stack_frac_015_256k.model` in the checkpoint is for +> Megatron-LM's `GPTSentencePiece` tokenizer. +> For TensorRT-LLM, we are trying to load this tokenizer as a Hugging Face `T5Tokenizer` by changing +> some special tokens, `encode`, and `batch_decode`. As a result, the tokenizer behavior in TensorRT-LLM engine may +> not match exactly. + +> **TROUBLE SHOOTING:** If you are loading `.nemo` sharded checkpoint here, call +> `ammo_load_checkpoint(..., additional_sharded_prefix="model.")` with additional sharded prefix in +> `text_generation_ptq.py` to align the sharded keys. + +### llama2-text-7b INT8 SmoothQuant and TensorRT-LLM Deployment +> **NOTE:** Due to the LICENSE issue, we do not provide a MCore checkpoint to download. Users can follow +> the instruction in `docs/llama2.md` to convert the checkpoint to megatron classic `GPTModel` format and +> use `--ammo-load-classic-megatron-to-mcore` flag which will remap the checkpoint to the MCore `GPTModel` spec +> that we support. + +```sh +bash examples/deploy/ptq_trtllm_llama_7b.sh ${CHECKPOINT_DIR} +``` + +The script expect `${CHECKPOINT_DIR}` to have the following structure: +``` +├── hf +│ ├── tokenizer.config +│ ├── tokenizer.model +│ ... +│ +├── iter_0000001 +│ ├── mp_rank_00 +│ ... +│ +├── latest_checkpointed_iteration.txt +``` +In short, other than the converted llama megatron checkpoint, also put the Hugging Face checkpoint inside as +the source of the tokenizer. diff --git a/examples/deploy/ptq_trtllm_llama_7b.sh b/examples/deploy/ptq_trtllm_llama_7b.sh new file mode 100644 index 0000000000..dc936c82ac --- /dev/null +++ b/examples/deploy/ptq_trtllm_llama_7b.sh @@ -0,0 +1,79 @@ +#!/bin/bash +DEFAULT_NAME="/checkpoints/llama2-text-7b_v0.2.0" +NAME="${1:-$DEFAULT_NAME}" + +DEFAULT_QUANT_CFG="int8_sq" +QUANT_CFG="${2:-$DEFAULT_QUANT_CFG}" + +# CHANGE THE FOLLOWING IF YOU MOUNT YOUR DATA AND CHECKPOINTS DIFFERENTLY IN THE CONTAINER. +TP="8" +PP=1 +INFERENCE_TP=${TP} +DECODER_TYPE="llama" +CHECKPOINT_LOAD_DIR="${NAME}" +TOKENIZER_MODEL="${CHECKPOINT_LOAD_DIR}/hf/tokenizer.model" + +# LLaMA2 text 7b has ffn_hidden_size 11008. int4_awq requires a block_size of 128 as a result the TP can at most be 2 +if [ "$QUANT_CFG" = "int4_awq" ]; then + INFERENCE_TP="2" +fi + +additional_options=" \ + --ammo-quant-cfg ${QUANT_CFG} \ + --ammo-load-classic-megatron-to-mcore \ + --decoder ${DECODER_TYPE} \ + --engine-dir /tmp/ammo \ + --max-input-len 2048 \ + --max-output-len 512 \ + --max-batch-size 8 \ + --inference-tensor-parallel ${INFERENCE_TP} " + +trtllm_options=" \ + --engine-dir /tmp/ammo \ + --tokenizer ${CHECKPOINT_LOAD_DIR}/hf \ + --max-output-len 512 " + +# DO NOT CHANGE THE SETTING BELOW UNLESS YOU KNOW WHAT YOU ARE DOING!!! +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +options=" \ + --disable-bias-linear \ + --swiglu \ + --untie-embeddings-and-output-weights \ + --use-rotary-position-embeddings \ + --normalization RMSNorm \ + --norm-epsilon 1e-5 \ + --no-position-embedding \ + --no-masked-softmax-fusion \ + --no-bias-gelu-fusion \ + --no-bias-dropout-fusion \ + --no-async-tensor-model-parallel-allreduce \ + --tensor-model-parallel-size ${TP} \ + --pipeline-model-parallel-size 1 \ + --num-layers 32 \ + --hidden-size 4096 \ + --ffn-hidden-size 11008 \ + --num-attention-heads 32 \ + --seq-length 2048 \ + --max-position-embeddings 4096 \ + --micro-batch-size 1 \ + --make-vocab-size-divisible-by 1 \ + --tokenizer-type Llama2Tokenizer \ + --tokenizer-model ${TOKENIZER_MODEL} \ + --save-interval 1000000 \ + --bf16 \ + --use-mcore-models " + +set +x + +# Precompile CUDA extentions +python -c "import ammo.torch.quantization.extensions as ext; print(ext.cuda_ext); print(ext.cuda_ext_fp8)" + +# Acquire launch configuration where variable launch_config will be set +launch_config="--nproc_per_node=${TP}" + +# Launch multi-process with torchrun +torchrun ${launch_config} examples/deploy/text_generation_ptq.py ${options} ${additional_options} --load ${CHECKPOINT_LOAD_DIR} + +# This script is using mpi4py which will fork multiple processes. +python examples/deploy/trtllm_text_generation.py ${trtllm_options} diff --git a/examples/deploy/ptq_trtllm_nemotron3_8b.sh b/examples/deploy/ptq_trtllm_nemotron3_8b.sh new file mode 100644 index 0000000000..418021b102 --- /dev/null +++ b/examples/deploy/ptq_trtllm_nemotron3_8b.sh @@ -0,0 +1,75 @@ +#!/bin/bash +DEFAULT_NAME="/checkpoints/nemotron3-8b_v0.2.0" +NAME="${1:-$DEFAULT_NAME}" + +DEFAULT_QUANT_CFG="fp8" +QUANT_CFG="${2:-$DEFAULT_QUANT_CFG}" + +# CHANGE THE FOLLOWING IF YOU MOUNT YOUR DATA AND CHECKPOINTS DIFFERENTLY IN THE CONTAINER. +TP="8" +INFERENCE_TP=${TP} +DECODER_TYPE="gptnext" +CHECKPOINT_LOAD_DIR="${NAME}" +TOKENIZER_MODEL="${CHECKPOINT_LOAD_DIR}/mt_nlg_plus_multilingual_ja_zh_the_stack_frac_015_256k.model" + +if [ "$QUANT_CFG" = "int4_awq" ]; then + INFERENCE_TP="1" +fi + +additional_options=" \ + --ammo-quant-cfg ${QUANT_CFG} \ + --ammo-load-classic-megatron-to-mcore \ + --decoder ${DECODER_TYPE} \ + --engine-dir /tmp/ammo \ + --max-input-len 2048 \ + --max-output-len 512 \ + --max-batch-size 8 \ + --inference-tensor-parallel ${INFERENCE_TP} " + +trtllm_options=" \ + --engine-dir /tmp/ammo \ + --tokenizer ${TOKENIZER_MODEL} \ + --max-output-len 512 " + +# DO NOT CHANGE THE SETTING BELOW UNLESS YOU KNOW WHAT YOU ARE DOING!!! +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +options=" \ + --apply-layernorm-1p \ + --untie-embeddings-and-output-weights \ + --disable-bias-linear \ + --no-position-embedding \ + --use-rotary-position-embeddings \ + --rotary-percent 0.5 \ + --squared-relu \ + --attention-dropout 0.0 \ + --hidden-dropout 0.0 \ + --tensor-model-parallel-size ${TP} \ + --pipeline-model-parallel-size 1 \ + --num-layers 32 \ + --hidden-size 4096 \ + --num-attention-heads 32 \ + --seq-length 4096 \ + --max-position-embeddings 4096 \ + --micro-batch-size 1 \ + --tokenizer-type GPTSentencePieceTokenizer \ + --tokenizer-model ${TOKENIZER_MODEL} \ + --save-interval 1000000 \ + --load ${CHECKPOINT_LOAD_DIR} \ + --bf16 \ + --use-mcore-models " + +set +x + +# Precompile CUDA extentions +python -c "import ammo.torch.quantization.extensions as ext; print(ext.cuda_ext); print(ext.cuda_ext_fp8)" + +# Acquire launch configuration where variable launch_config will be set +launch_config="--nproc_per_node=${TP}" + +# Launch multi-process with torchrun +torchrun ${launch_config} examples/deploy/text_generation_ptq.py ${options} ${additional_options} --load ${CHECKPOINT_LOAD_DIR} + +# This script is using mpi4py which will fork multiple processes. +python examples/deploy/trtllm_text_generation.py ${trtllm_options} + diff --git a/examples/deploy/text_generation_ptq.py b/examples/deploy/text_generation_ptq.py new file mode 100644 index 0000000000..db25a5a4c7 --- /dev/null +++ b/examples/deploy/text_generation_ptq.py @@ -0,0 +1,273 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +"""Sample Generate GPT.""" +import functools +import os +import sys +from pathlib import Path + +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))) + +import ammo.torch.quantization as atq +import torch +from datasets import load_dataset + +# [ModelOpt]: changing the default model provider to the AMMO version +from megatron import get_args, print_rank_0 +from megatron.checkpointing import load_checkpoint, save_checkpoint +from megatron.core import mpu +from megatron.core.dist_checkpointing import load +from megatron.deploy.arguments import add_ammo_args +from megatron.deploy.gpt.model_provider import model_provider +from megatron.initialize import initialize_megatron +from megatron.text_generation import generate_and_post_process +from megatron.training import get_model +from megatron.utils import unwrap_model + +QUANT_CFG_CHOICES = { + "int8": atq.INT8_DEFAULT_CFG, + "int8_sq": atq.INT8_SMOOTHQUANT_CFG, + "fp8": atq.FP8_DEFAULT_CFG, + "int4_awq": atq.INT4_AWQ_CFG, + "w4a8_awq": atq.W4A8_AWQ_BETA_CFG, +} + + +def add_trtllm_args(parser): + """Add additional arguments for TensorRT-LLM.""" + group = parser.add_argument_group(title="trtllm") + + group.add_argument( + "--engine-dir", type=str, help="The output TensorRT-LLM engine dir.", + ) + group.add_argument( + "--decoder", type=str, choices=["gptnext", 'llama'], help="The decoder type of the model.", + ) + group.add_argument("--max-input-len", type=int, help="Max input sequence length.", default=2048) + group.add_argument( + "--max-output-len", type=int, help="Max output sequence length.", default=512 + ) + group.add_argument("--max-batch-size", type=int, help="Max batch size.", default=32) + group.add_argument( + "--inference-tensor-parallel", + type=int, + help="Tensor parallel for the inference time, can be different from the training config.", + default=1, + ) + + +def add_text_generate_ptq_args(parser): + """Add additional arguments for AMMO text generation PTQ.""" + group = parser.add_argument_group(title='AMMO text generation ptq') + group.add_argument( + "--calib-dataset", + type=str, + default="cnn_dailymail", + help="Calibration datasets from HuggingFace datasets.", + ) + group.add_argument( + "--calib-steps", type=int, default=512, help="Steps to perform atq.quantize calibration." + ) + parser.add_argument( + "--prompts", + type=str, + default=( + "Born in north-east France, Soyer trained as a|Born in California, Soyer trained as a" + ), + help="Input texts. Please use | to separate different batches.", + ) + add_ammo_args(parser) + add_trtllm_args(parser) + return parser + + +def get_calib_dataloader( + data="cnn_dailymail", batch_size=4, calib_size=512, max_sequence_length=512 +): + if data == "wikitext": + dataset = load_dataset("wikitext", "wikitext-103-v1", split="train") + text_column = "text" + elif data == "cnn_dailymail": + dataset = load_dataset("cnn_dailymail", name="3.0.0", split="train") + text_column = "article" + + calib_size = max(min(len(dataset), calib_size), batch_size) + for i in range(calib_size // batch_size): + batch = dataset[i * batch_size : (i + 1) * batch_size][text_column] + for j in range(len(batch)): + batch[j] = batch[j][:max_sequence_length] + yield batch + + +def ammo_load_checkpoint( + model, optimizer=None, opt_param_scheduler=None, strict=True, additional_sharded_prefix="" +): + """Load a megatron checkpoint depending its format. + + Args: + model: MCoreGPTModel instance + optimizer: Megatron optimizer instance + opt_param_scheduler: Megatron scheduler instance + strict: if True, no extra or missing keys are allowed while loading the state_dict + additional_sharded_prefix (str): Append additional prefix to align the sharded checkpoint keys. When loading + an .nemo sharded checkpoint, this is usually `model.`. Otherwise, this is typically an empty string. + """ + + def _remove_prefix_state_dict_pre_hook( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, + ): + """Pytorch _load_state_dict_pre_hook to remap the state_dict with the additional sharded prefix.""" + if additional_sharded_prefix is None: + return + key_rewrite_list = [] + for key, _ in state_dict.items(): + if key.startswith(additional_sharded_prefix): + key_rewrite_list.append(key) + for old_key in key_rewrite_list: + new_key = old_key[len(additional_sharded_prefix) :] + state_dict[new_key] = state_dict.pop(old_key) + + args = get_args() + load_dir = args.load + + shared_model_state_dir = "model_weights" + sharded_load_dir = Path(load_dir + "/" + shared_model_state_dir) + + if sharded_load_dir.exists() and optimizer is None and opt_param_scheduler is None: + unwrapped_model = unwrap_model(model) + shareded_state_dict = unwrapped_model[0].sharded_state_dict( + prefix=additional_sharded_prefix + ) + if additional_sharded_prefix: + unwrapped_model[0]._register_load_state_dict_pre_hook( + _remove_prefix_state_dict_pre_hook + ) + unwrapped_model[0].load_state_dict(load(shareded_state_dict, sharded_load_dir)) + else: + _ = load_checkpoint(model, optimizer, opt_param_scheduler, strict=strict) + + +if __name__ == "__main__": + initialize_megatron( + extra_args_provider=add_text_generate_ptq_args, + args_defaults={ + 'tokenizer_type': 'GPT2BPETokenizer', + 'no_load_rng': True, + 'no_load_optim': True, + }, + ) + + args = get_args() + if args.num_layers_per_virtual_pipeline_stage is not None: + print("Interleaved pipeline schedule is not yet supported for text generation.") + exit() + + text_generation_model_provider = functools.partial(model_provider, parallel_output=False) + model = get_model(text_generation_model_provider, wrap_with_ddp=False) + assert len(model) == 1, "Above condition should have caught this" + + if args.load is not None: + _ = ammo_load_checkpoint( + model, + None, + None, + strict=not args.untie_embeddings_and_output_weights, + additional_sharded_prefix="model.", + ) + else: + print_rank_0("WARNING: No checkpoint is loaded for PTQ! The process will still continue.") + + all_prompts = args.prompts.split("|") + + def custom_prompt_forward_loop_func(): + for prompt in all_prompts: + if mpu.is_pipeline_first_stage() and mpu.get_tensor_model_parallel_rank() == 0: + ( + prompts_plus_generations, + prompts_plus_generations_segments, + logprobs, + _, + ) = generate_and_post_process( + model[0], + prompts=[prompt], + tokens_to_generate=128, + return_output_log_probs=True, + temperature=1.0, + ) + print_rank_0(prompts_plus_generations) + else: + generate_and_post_process(model[0]) + + def hf_dataset_forword_loop_func(): + dataloader = get_calib_dataloader(args.calib_dataset, calib_size=args.calib_steps) + for prompts in dataloader: + if mpu.is_pipeline_first_stage() and mpu.get_tensor_model_parallel_rank() == 0: + ( + prompts_plus_generations, + prompts_plus_generations_segments, + logprobs, + _, + ) = generate_and_post_process( + model[0], + prompts=prompts, + tokens_to_generate=0, + return_output_log_probs=True, + temperature=1.0, + ) + else: + generate_and_post_process(model[0]) + + ptq_forward_loop_func = custom_prompt_forward_loop_func + if args.calib_dataset is not None: + ptq_forward_loop_func = hf_dataset_forword_loop_func + + if args.ammo_quant_cfg in QUANT_CFG_CHOICES: + atq_config = QUANT_CFG_CHOICES[args.ammo_quant_cfg] + if "awq" in args.ammo_quant_cfg: + weight_quantizer = atq_config["quant_cfg"]["*weight_quantizer"] # type: ignore + if isinstance(weight_quantizer, list): + weight_quantizer = weight_quantizer[0] + weight_quantizer["block_sizes"][-1] = 128 + atq_config["quant_cfg"]["*.output_layer.*"] = {"enable": False} + print_rank_0("atq.quantize: output_layer quantization is disable") + atq.quantize(model[0], atq_config, ptq_forward_loop_func) + custom_prompt_forward_loop_func() + if args.save: + save_checkpoint(1, model, None, None) + else: + custom_prompt_forward_loop_func() + + if args.engine_dir: + from ammo.deploy.llm import model_config_to_tensorrt_llm + from ammo.torch.export import torch_to_model_config + + assert args.decoder in ["gptnext", "llama"], f"Decoder type {args.decoder} not supported." + + Path(args.engine_dir).mkdir(parents=True, exist_ok=True) + + print_rank_0("Exporting model_configs for TRT LLM.") + model = unwrap_model(model) + model = model[0] + + # In TRT LLM, squared relu activation does not support bf16. So we use fp16 by default. + model_configs = torch_to_model_config( + model, + args.decoder, + torch.float16, + inference_tensor_parallel=args.inference_tensor_parallel, + ) + + print_rank_0("Building TRT LLM engines.") + for model_config in model_configs: + model_config_to_tensorrt_llm( + model_config, + args.engine_dir, + max_input_len=args.max_input_len, + max_output_len=args.max_output_len, + max_batch_size=args.max_batch_size, + max_beam_width=1, + num_build_workers=1, + inflight_batching=False, + enable_sparsity=False, + ) + print_rank_0(f"TRT LLM engines saved to {args.engine_dir}") diff --git a/examples/deploy/trtllm_text_generation.py b/examples/deploy/trtllm_text_generation.py new file mode 100644 index 0000000000..c6c0098f20 --- /dev/null +++ b/examples/deploy/trtllm_text_generation.py @@ -0,0 +1,93 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +"""An example script to run the tensorrt_llm engine.""" + +import argparse +from pathlib import Path + +import numpy as np +import torch +from ammo.deploy.llm import generate, load, unload +from transformers import AutoTokenizer, T5Tokenizer + + +class CustomSentencePieceTokenizer(T5Tokenizer): + """This is a custom GPTSentencePiece Tokenizer modified from the T5Tokenizer. + + Note: + The modification is kept minimal to make `encode` and `batch_decode` working + properly (used in TensorRT-LLM engine). Other functions have not been tested. + """ + + def __init__(self, model): + super().__init__(model, extra_ids=0, bos_token="", pad_token="") + + def encode(self, text, add_special_tokens: bool = True, **kwargs): + return self.sp_model.encode_as_ids(text) + + def batch_decode(self, sequences, skip_special_tokens: bool = False, **kwargs): + if isinstance(sequences, np.ndarray) or torch.is_tensor(sequences): + sequences = sequences.tolist() + return self.sp_model.decode(sequences) + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument("--tokenizer", type=str, default="") + parser.add_argument("--max-output-len", type=int, default=100) + parser.add_argument("--engine-dir", type=str, default="/tmp/ammo") + parser.add_argument( + "--input-texts", + type=str, + default=( + "Born in north-east France, Soyer trained as a|Born in California, Soyer trained as a" + ), + help="Input texts. Please use | to separate different batches.", + ) + parser.add_argument("--max-num-beams", type=int, default=1) + parser.add_argument("--profiler-output", type=str, default="") + return parser.parse_args() + + +def run(args): + tokenizer_path = Path(args.tokenizer) + + if tokenizer_path.is_dir(): + # For llama models, use local HF tokenizer which is a folder. + tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, trust_remote_code=True) + elif tokenizer_path.is_file(): + # For nextllm and nemotron models, use local Megatron GPTSentencePiece tokenizer which is a model file. + tokenizer = CustomSentencePieceTokenizer(args.tokenizer) + else: + raise ValueError( + "arg.tokenizer must be a dir to a hf tokenizer checkpoint for llama or a SentencePiece .model file for gptnext" + ) + + if not hasattr(args, "profiler_output"): + args.profiler_output = "" + + input_texts = args.input_texts.split("|") + assert input_texts, "input_text not specified" + print(input_texts) + + free_memory_before = torch.cuda.mem_get_info() + + host_context = load( + tokenizer=tokenizer, engine_dir=args.engine_dir, num_beams=args.max_num_beams + ) + torch.cuda.cudart().cudaProfilerStart() + outputs = generate(input_texts, args.max_output_len, host_context, None, args.profiler_output) + print(outputs) + torch.cuda.cudart().cudaProfilerStop() + + free_memory_after = torch.cuda.mem_get_info() + print( + f"Use GPU memory: {(free_memory_before[0] - free_memory_after[0]) / 1024 / 1024 / 1024} GB" + ) + + unload(host_context) + + +if __name__ == "__main__": + args = parse_arguments() + run(args) diff --git a/megatron/core/deploy/__init__.py b/megatron/core/deploy/__init__.py new file mode 100644 index 0000000000..f8011007a5 --- /dev/null +++ b/megatron/core/deploy/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. diff --git a/megatron/core/deploy/gpt/__init__.py b/megatron/core/deploy/gpt/__init__.py new file mode 100644 index 0000000000..f8011007a5 --- /dev/null +++ b/megatron/core/deploy/gpt/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. diff --git a/megatron/core/deploy/gpt/model_specs.py b/megatron/core/deploy/gpt/model_specs.py new file mode 100644 index 0000000000..50467ef414 --- /dev/null +++ b/megatron/core/deploy/gpt/model_specs.py @@ -0,0 +1,50 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add +from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear +from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules +from megatron.core.transformer.custom_layers.transformer_engine import TENorm +from megatron.core.transformer.dot_product_attention import DotProductAttention +from megatron.core.transformer.enums import AttnMaskType +from megatron.core.transformer.mlp import MLP, MLPSubmodules +from megatron.core.transformer.spec_utils import ModuleSpec +from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules + + +# Use this spec for AMMO PTQ and TensorRT-LLM export +def get_gpt_layer_ammo_spec() -> ModuleSpec: + """Mix the native spec with TENorm. + + This is essentially the native local spec except for the layernorm implementation + is using TENorm from Transformer-Engine. This TENorm supports both FusedLayerNorm and RMSNorm and + prevents the apex dependency. + """ + return ModuleSpec( + module=TransformerLayer, + submodules=TransformerLayerSubmodules( + input_layernorm=TENorm, + self_attention=ModuleSpec( + module=SelfAttention, + params={"attn_mask_type": AttnMaskType.causal}, + submodules=SelfAttentionSubmodules( + linear_qkv=ColumnParallelLinear, + core_attention=DotProductAttention, + linear_proj=RowParallelLinear, + ), + ), + self_attn_bda=get_bias_dropout_add, + pre_mlp_layernorm=TENorm, + mlp=ModuleSpec( + module=MLP, + submodules=MLPSubmodules( + linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, + ), + ), + mlp_bda=get_bias_dropout_add, + # Map TE-layernorm-fusion keys back + sharded_state_dict_keys_map={ + 'input_layernorm.': 'self_attention.linear_qkv.layer_norm_', + 'pre_mlp_layernorm.': 'mlp.linear_fc1.layer_norm_', + }, + ), + ) diff --git a/megatron/core/deploy/gpt/state_dict_hooks.py b/megatron/core/deploy/gpt/state_dict_hooks.py new file mode 100644 index 0000000000..cf1565af89 --- /dev/null +++ b/megatron/core/deploy/gpt/state_dict_hooks.py @@ -0,0 +1,126 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +from megatron import print_rank_0 + + +def mcore_gpt_load_classic_state_dict_pre_hook( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, +): + """Register a pre-hook to fix the state_dict key difference. + + This prehook is used when trying to load the classic Megatron-LM GPTModel into its + megatron/core variant that uses native ParallelLinear and Transformer-Engine Norm. + Only this particular spec supports post-training quantization and TensorRT-LLM + config export through `nvidia-ammo` package. + + Args: + state_dict: state dictionary + prefix: module name prefix + local_metadata: local metatdata + strict: whether is in strict mode + missing_keys: missing state dict keys + unexpected_keys: unexpected state dict keys + error_msgs: error messages + """ + if "modelopt_state" in state_dict: + state_dict.pop("modelopt_state") + + if "language_model" in state_dict: + language_model_state_dict = state_dict.pop("language_model") + if "embedding" in language_model_state_dict: + if "word_embeddings" in language_model_state_dict["embedding"]: + for key, param in language_model_state_dict["embedding"]["word_embeddings"].items(): + state_dict.update({"embedding.word_embeddings." + key: param}) + if "position_embeddings" in language_model_state_dict["embedding"]: + for key, param in language_model_state_dict["embedding"][ + "position_embeddings" + ].items(): + state_dict.update({"embedding.position_embeddings." + key: param}) + if "transformer" in language_model_state_dict: + for key, param in language_model_state_dict["transformer"].items(): + state_dict.update({"decoder." + key: param}) + else: + for key, param in language_model_state_dict["encoder"].items(): + state_dict.update({"decoder." + key: param}) + if "output_layer" in language_model_state_dict: + for key, param in language_model_state_dict["output_layer"].items(): + state_dict.update({"output_layer." + key: param}) + + print_rank_0("ModelOptGPTModel {}".format(state_dict.keys())) + + module_name_rewrite_list = [ + ("input_norm", "input_layernorm"), + (".attention.query_key_value", ".self_attention.linear_qkv"), + (".attention.dense", ".self_attention.linear_proj"), + ("self_attention.query_key_value", "self_attention.linear_qkv"), + ("self_attention.dense", "self_attention.linear_proj"), + ("post_attention_layernorm", "pre_mlp_layernorm"), + ("post_attention_norm", "pre_mlp_layernorm"), + ("dense_h_to_4h", "linear_fc1"), + ("dense_4h_to_h", "linear_fc2"), + ("final_norm", "final_layernorm"), + ] + + key_rewrite_list = [] + + for key, _ in state_dict.items(): + for old_name, new_name in module_name_rewrite_list: + if old_name in key: + key_rewrite_list += [(key, key.replace(old_name, new_name))] + + for old_key, new_key in key_rewrite_list: + print_rank_0("replace {} with {}".format(old_key, new_key)) + state_dict[new_key] = state_dict[old_key] + state_dict.pop(old_key) + + +def mcore_gpt_load_te_state_dict_pre_hook( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, +): + """Register a pre-hook to fix the state_dict key difference of. + + This prehook is used when trying to load the megatron/core GPTModel that uses a + fused Transformer-Engine ParallelLinear into the variant that uses native ParallelLinear + and Transformer-Engine Norm (effectively to restore the fusion). + Only this particular spec supports post-training quantization and TensorRT-LLM + config export through `nvidia-ammo` package. + + Args: + state_dict: state dictionary + prefix: module name prefix + local_metadata: local metatdata + strict: whether is in strict mode + missing_keys: missing state dict keys + unexpected_keys: unexpected state dict keys + error_msgs: error messages + """ + if "modelopt_state" in state_dict: + state_dict.pop("modelopt_state") + + key_with_te_extra_state_to_pop = [] + + for key, _ in state_dict.items(): + if "_extra_state" in key: + key_with_te_extra_state_to_pop += [key] + + for key in key_with_te_extra_state_to_pop: + state_dict.pop(key) + + module_name_rewrite_list = [ + ("self_attention.linear_qkv.layer_norm_weight", "input_layernorm.weight"), + ("self_attention.linear_qkv.layer_norm_bias", "input_layernorm.bias"), + ("mlp.linear_fc1.layer_norm_weight", "pre_mlp_layernorm.weight"), + ("mlp.linear_fc1.layer_norm_bias", "pre_mlp_layernorm.bias"), + ] + + key_rewrite_list = [] + + for key, _ in state_dict.items(): + for old_name, new_name in module_name_rewrite_list: + if old_name in key: + key_rewrite_list += [(key, key.replace(old_name, new_name))] + + for old_key, new_key in key_rewrite_list: + print_rank_0("replace {} with {}".format(old_key, new_key)) + state_dict[new_key] = state_dict[old_key] + state_dict.pop(old_key) diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index d096b47c22..16a5b351cc 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -70,6 +70,10 @@ def __init__( # TODO: remove this dependency ? self.model_type = ModelType.encoder_or_decoder + # These 2 attributes are needed for TensorRT-LLM export. + self.max_position_embeddings = max_sequence_length + self.rotary_percent = rotary_percent + if self.pre_process: self.embedding = LanguageModelEmbedding( config=self.config, diff --git a/megatron/core/transformer/transformer_config.py b/megatron/core/transformer/transformer_config.py index ce6d38aba8..d85473c948 100644 --- a/megatron/core/transformer/transformer_config.py +++ b/megatron/core/transformer/transformer_config.py @@ -138,6 +138,10 @@ class TransformerConfig(ModelParallelConfig): moe_input_jitter_eps: float = None moe_token_dropping: bool = False # TODO: Support token dropping. + # These 2 attributes are WAR for TRTLLM export. DO NOT USE!! WILL BE DEPRECATED SOON!! + max_position_embeddings: int = 0 + rotary_percent: float = 0 + def __post_init__(self): """ Python dataclass method that is used to modify attributes after initialization. See https://docs.python.org/3/library/dataclasses.html#post-init-processing for more details. diff --git a/megatron/deploy/__init__.py b/megatron/deploy/__init__.py new file mode 100644 index 0000000000..f8011007a5 --- /dev/null +++ b/megatron/deploy/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. diff --git a/megatron/deploy/arguments.py b/megatron/deploy/arguments.py new file mode 100644 index 0000000000..c03e70cdb6 --- /dev/null +++ b/megatron/deploy/arguments.py @@ -0,0 +1,25 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +def add_ammo_args(parser): + """Add additional arguments for ammo.""" + group = parser.add_argument_group(title="ammo-generic") + + group.add_argument( + "--ammo-load-classic-megatron-to-mcore", + action="store_true", + help="Load a classic megatron-lm checkpoint to a new megatron-core model.", + ) + group.add_argument( + "--ammo-convert-te-to-local-spec", + action="store_true", + help="Load a megatron-core transformer-engine checkpoint to a model with local spec.", + ) + group.add_argument( + "--ammo-quant-cfg", + type=str, + default=None, + choices=["int8_sq", "fp8", "int4_awq", "None"], + help="Algorithms supported by atq.quantize.", + ) + + return parser diff --git a/megatron/deploy/gpt/__init__.py b/megatron/deploy/gpt/__init__.py new file mode 100644 index 0000000000..f8011007a5 --- /dev/null +++ b/megatron/deploy/gpt/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. diff --git a/megatron/deploy/gpt/model_provider.py b/megatron/deploy/gpt/model_provider.py new file mode 100644 index 0000000000..39fb49f8c3 --- /dev/null +++ b/megatron/deploy/gpt/model_provider.py @@ -0,0 +1,73 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +"""ModelOpt GPT model provider.""" + +from typing import Union + +from megatron import get_args, print_rank_0 +from megatron.arguments import core_transformer_config_from_args +from megatron.core.deploy.gpt.model_specs import get_gpt_layer_ammo_spec +from megatron.core.deploy.gpt.state_dict_hooks import ( + mcore_gpt_load_classic_state_dict_pre_hook, + mcore_gpt_load_te_state_dict_pre_hook, +) +from megatron.core.models.gpt import GPTModel as MCoreGPTModel + + +def model_provider( + pre_process=True, post_process=True, parallel_output=True, +) -> Union[MCoreGPTModel]: + """Builds the GPT model. + + This model_provider only sypport use_mcore_models=True. + + Args: + pre_process (bool, optional): Set to true if you need to compute embedings. Defaults to True. + post_process (bool, optional): Set to true if you need to want to compute output logits/loss. Defaults to True. + parallel_output (bool): whether to allgather the output logits? This must be + True if `model_provider` is called in text_generation_server. + + Returns: + Union[MCoreGPTModel]: The returned model + """ + args = get_args() + + print_rank_0("building GPT model ...") + config = core_transformer_config_from_args(get_args()) + + if args.use_mcore_models: + if args.spec is not None: + raise ValueError("Custom layer specs are not supported!") + else: + if args.num_experts is None: + transformer_layer_spec = get_gpt_layer_ammo_spec() + else: + raise ValueError("MoE is not supported for now!") + + model_type = MCoreGPTModel + model_kwargs = { + "config": config, + "transformer_layer_spec": transformer_layer_spec, + "vocab_size": args.padded_vocab_size, + "max_sequence_length": args.max_position_embeddings, + "pre_process": pre_process, + "post_process": post_process, + "fp16_lm_cross_entropy": args.fp16_lm_cross_entropy, + "parallel_output": parallel_output, + "share_embeddings_and_output_weights": not args.untie_embeddings_and_output_weights, + "position_embedding_type": args.position_embedding_type, + "rotary_percent": args.rotary_percent, + } + else: + raise ValueError("Classic Megatron-LM models are not supported!") + + model = model_type(**model_kwargs) + print_rank_0(str(model)) + + if args.use_mcore_models: + if args.ammo_load_classic_megatron_to_mcore: + model._register_load_state_dict_pre_hook(mcore_gpt_load_classic_state_dict_pre_hook) + elif args.ammo_convert_te_to_local_spec: + model._register_load_state_dict_pre_hook(mcore_gpt_load_te_state_dict_pre_hook) + + return model From 47cb6301a24607aece4d75d8ac4d88c85bb05926 Mon Sep 17 00:00:00 2001 From: William Dykas Date: Tue, 5 Mar 2024 13:13:31 -0800 Subject: [PATCH 290/296] Experimental Yaml configs --- examples/gpt3/gpt_config.yaml | 303 ++++++++++++++++++++++ megatron/arguments.py | 11 +- megatron/global_vars.py | 1 - megatron/initialize.py | 7 +- megatron/training.py | 4 +- megatron/yaml_arguments.py | 476 ++++++++++++++++++++++++++++++++++ pretrain_gpt.py | 7 +- 7 files changed, 803 insertions(+), 6 deletions(-) create mode 100644 examples/gpt3/gpt_config.yaml create mode 100644 megatron/yaml_arguments.py diff --git a/examples/gpt3/gpt_config.yaml b/examples/gpt3/gpt_config.yaml new file mode 100644 index 0000000000..652cd4d43e --- /dev/null +++ b/examples/gpt3/gpt_config.yaml @@ -0,0 +1,303 @@ +# WARNING: Yaml configs is currently an experimental feature +language_model: + # model architecture + num_layers: 24 + hidden_size: 1024 + num_attention_heads: 16 + num_query_groups: null + + ffn_hidden_size: null + kv_channels: null + hidden_dropout: 0.0 + attention_dropout: 0.0 + fp32_residual_connection: False + + apply_residual_connection_post_layernorm: False + layernorm_epsilon: 1.e-5 + layernorm_zero_centered_gamma: True + add_bias_linear: False + bias_activation_fusion: False + add_qkv_bias: False + gated_linear_unit: False + activation_func: swiglu + num_moe_experts: null + rotary_interleaved: False + window_size: null + + # initialization + init_method: null + init_method_std: 0.02 + output_layer_init_method: null + + # mixed-precision + apply_query_key_layer_scaling: False + attention_softmax_in_fp32: False + + # fusion + bias_swiglu_fusion: True + masked_softmax_fusion: True + persist_layer_norm: False + memory_efficient_layer_norm: False + bias_dropout_fusion: True + apply_rope_fusion: True + + # activation recomputation + recompute_granularity: null + recompute_method: null + recompute_num_layers: null + distribute_saved_activations: null + + # fp8 related + fp8: null + fp8_margin: 0 + fp8_interval: 1 + fp8_amax_history_len: 1 + fp8_amax_compute_algo: "most_recent" + fp8_wgrad: True + + # miscellaneous + clone_scatter_output_in_embedding: True + + normalization: "LayerNorm" # alt value supported by TE: "RMSNorm" + + # MoE related + moe_router_load_balancing_type: "aux_loss" + moe_router_topk: 2 + moe_grouped_gemm: False + moe_aux_loss_coeff: 0 # 1e-2 would be a good start value for load balance loss. + moe_z_loss_coeff: null # 1e-3 would be a good start value for z-loss + moe_input_jitter_eps: null + moe_token_dropping: False + +model_parallel: + # Model parallelism + tensor_model_parallel_size: 1 + context_parallel_size: 1 + pipeline_model_parallel_size: 1 + virtual_pipeline_model_parallel_size: null + sequence_parallel: True + expert_model_parallel_size: 1 + + # Initialization + perform_initialization: True + use_cpu_initialization: null + + # Training + fp16: False + bf16: True + params_dtype: null # Set from above arguments for core + timers: null + + # Optimizations + gradient_accumulation_fusion: True + async_tensor_model_parallel_allreduce: True + tp_comm_overlap: False + + # Debug Options + tp_comm_split_ag: True + tp_comm_atomic_ag: True + tp_comm_split_rs: True + tp_comm_atomic_rs: True + tp_comm_bulk_wgrad: True + tp_comm_bulk_dgrad: True + + # Parallelism + finalize_model_grads_func: null + + # Pipeline Parallel + pipeline_dtype: null + grad_scale_func: null + enable_autocast: False + autocast_dtype: null + variable_seq_lengths: False + num_microbatches_with_partial_activation_checkpoints: null + overlap_p2p_comm: False + batch_p2p_comm: True + batch_p2p_sync: True + use_ring_exchange_p2p: False + deallocate_pipeline_outputs: False + no_sync_func: null + grad_sync_func: null + param_sync_func: null + pipeline_model_parallel_split_rank: null + + # CPU Offloading + cpu_offloading: False + cpu_offloading_num_layers: 0 + _cpu_offloading_context: null + cpu_offloading_weights: False + cpu_offloading_activations: True + + # Timing + barrier_with_L1_time: True + +# training: +use_mcore_models: True +spec: null +micro_batch_size: 2 +global_batch_size: 128 +rampup_batch_size: [32, 32, 65324160] +check_for_nan_in_loss_and_grad: True +num_layers_per_virtual_pipeline_stage: null + +encoder_num_layers: null +decoder_num_layers: null +rotary_seq_len_interpolation_factor: null +add_position_embedding: False +make_vocab_size_divisible_by: 128 +group_query_attention: False + + +exit_signal_handler: False +exit_duration_in_mins: null +exit_interval: null + +untie_embeddings_and_output_weights: True +position_embedding_type: rope +rotary_percent: 0.5 +openai_gelu: False +squared_relu: False +swiglu: True +onnx_safe: null +bert_binary_head: True +max_position_embeddings: 4096 + +transformer_impl: local +use_flash_attn: False +seed: 1234 +data_parallel_random_init: False + +# Optimizer +optimizer: adam +lr: 2.5e-4 +lr_decay_style: cosine +lr_decay_iters: null +lr_decay_samples: 255126953 +lr_warmup_fraction: null +lr_warmup_iters: 0 +lr_warmup_samples: 81381 +lr_warmup_init: 0.0 +min_lr: 2.5e-5 +weight_decay: 0.1 +start_weight_decay: null +end_weight_decay: null +weight_decay_incr_style: constant +clip_grad: 1.0 +adam_beta1: 0.9 +adam_beta2: 0.95 +adam_eps: 1.e-08 +sgd_momentum: 0.9 +override_opt_param_scheduler: False +use_checkpoint_opt_param_scheduler: False + +# checkpointing arguments +save: null +save_interval: 20000 +no_save_optim: null +no_save_rng: null +load: null +no_load_optim: null +no_load_rng: null +finetune: False +use_checkpoint_args: False +exit_on_missing_checkpoint: False + +# loss arguments +loss_scale: null +initial_loss_scale: 4294967296 +min_loss_scale: 1.0 +loss_scale_window: 1000 +hysteresis: 2 +accumulate_allreduce_grads_in_fp32: False +fp16_lm_cross_entropy: False + +# distributed arguments +distributed_backend: nccl +distributed_timeout_minutes: 10 +overlap_grad_reduce: False +delay_grad_reduce: True +overlap_param_gather: False +delay_param_gather: False +scatter_gather_tensors_in_pipeline: True +local_rank: null +lazy_mpu_init: null +empty_unused_memory_level: 0 +standalone_embedding_stage: False +use_distributed_optimizer: False +nccl_communicator_config_path: null + +train_iters: null +eval_iters: 32 +eval_interval: 2000 +skip_train: False + +adlr_autoresume: False +adlr_autoresume_interval: 1000 + +# garbage collection +manual_gc: False +manual_gc_interval: 0 +manual_gc_eval: True + +tp_comm_overlap_cfg: null + +#data +data_path: null +split: '99,1,0' +train_data_path: null +valid_data_path: null +test_data_path: null +data_cache_path: null +mock_data: False +vocab_size: null +vocab_file: null +merge_file: null +vocab_extra_ids: 0 +seq_length: 4096 +encoder_seq_length: null +decoder_seq_length: null +retriever_seq_length: 256 +sample_rate: 1.0 +mask_prob: 0.15 +short_seq_prob: 0.1 +num_workers: 2 +tokenizer_type: GPTSentencePieceTokenizer +tokenizer_model: null +reset_position_ids: False +reset_attention_mask: False +eod_mask_loss: False +train_samples: 268554688 +dataloader_type: null + +#profile: +profile: False +profile_ranks: [0] +profile_step_end: 12 +profile_step_start: 10 + +#logging: +log_params_norm: True +log_num_zeros_in_grad: True +log_throughput: False +log_progress: False +timing_log_level: 0 +timing_log_option: minmax +tensorboard_log_interval: 1 +tensorboard_queue_size: 1000 +log_timers_to_tensorboard: False +log_batch_size_to_tensorboard: False +log_learning_rate_to_tensorboard: True +log_learning_rate_to_tensorboard: True +log_validation_ppl_to_tensorboard: False +log_memory_to_tensorboard: False +log_world_size_to_tensorboard: False +log_loss_scale_to_tensorboard: True +wandb_project: '' +wandb_exp_name: '' +wandb_save_dir: '' +enable_one_logger: False +one_logger_project: e2e-tracking +one_logger_entity: hwinf_dcm +one_logger_run_name: null +log_interval: 100 +tensorboard_dir: null diff --git a/megatron/arguments.py b/megatron/arguments.py index bffb098818..b901d10586 100644 --- a/megatron/arguments.py +++ b/megatron/arguments.py @@ -46,13 +46,20 @@ def parse_args(extra_args_provider=None, ignore_unknown_args=False): # Custom arguments. if extra_args_provider is not None: parser = extra_args_provider(parser) - + # Parse. if ignore_unknown_args: args, _ = parser.parse_known_args() else: args = parser.parse_args() + # Experimental yaml + if args.yaml_cfg is not None: + from .yaml_arguments import load_yaml + assert args.yaml_cfg and args.use_mcore_models, "To use yaml, mcore must be enabled" + args = load_yaml(args.yaml_cfg) + + # Args from environment args.rank = int(os.getenv('RANK', '0')) args.world_size = int(os.getenv("WORLD_SIZE", '1')) @@ -1474,5 +1481,7 @@ def _add_experimental_args(parser): 'To use local spec specify local as the argument.' 'For more details, see the model class, ' '`transformer_block.py`, or `transformer_layer.py`') + group.add_argument('--yaml-cfg', type=str, default=None, + help = 'Config file to add additional arguments') return parser diff --git a/megatron/global_vars.py b/megatron/global_vars.py index 45e7723860..b7e19fe434 100644 --- a/megatron/global_vars.py +++ b/megatron/global_vars.py @@ -247,4 +247,3 @@ def _ensure_var_is_not_initialized(var, name): assert var is None, '{} is already initialized.'.format(name) - diff --git a/megatron/initialize.py b/megatron/initialize.py index fb7866ab03..8eb88d482e 100644 --- a/megatron/initialize.py +++ b/megatron/initialize.py @@ -16,6 +16,7 @@ from megatron import get_tensorboard_writer from megatron.core import mpu, tensor_parallel from megatron.arguments import parse_args, validate_args +from megatron.yaml_arguments import validate_yaml from megatron.checkpointing import load_args_from_checkpoint from megatron.global_vars import set_global_variables from megatron.model.transformer import bias_dropout_add_fused_train @@ -47,7 +48,11 @@ def initialize_megatron( assert args.load is not None, "--use-checkpoints-args requires --load argument" load_args_from_checkpoint(args) - validate_args(args, args_defaults) + if args.yaml_cfg is not None: + args = validate_yaml(args, args_defaults) + else: + validate_args(args, args_defaults) + # set global args, build tokenizer, and set adlr-autoresume, # tensorboard-writer, and timers. diff --git a/megatron/training.py b/megatron/training.py index d604e6c489..ab74cee269 100644 --- a/megatron/training.py +++ b/megatron/training.py @@ -548,7 +548,7 @@ def train_step(forward_step_func, data_iterator, torch.cuda.empty_cache() # Vision gradients. - if args.vision_pretraining and args.vision_pretraining_type == "dino": + if getattr(args, 'vision_pretraining', False) and args.vision_pretraining_type == "dino": unwrapped_model = unwrap_model(model[0]) unwrapped_model.cancel_gradients_last_layer(args.curr_iteration) @@ -558,7 +558,7 @@ def train_step(forward_step_func, data_iterator, timers('optimizer').stop() # Vision momentum. - if args.vision_pretraining and args.vision_pretraining_type == "dino": + if getattr(args, 'vision_pretraining', False) and args.vision_pretraining_type == "dino": unwrapped_model = unwrap_model(model[0]) unwrapped_model.update_momentum(args.curr_iteration) diff --git a/megatron/yaml_arguments.py b/megatron/yaml_arguments.py new file mode 100644 index 0000000000..5601e2ee67 --- /dev/null +++ b/megatron/yaml_arguments.py @@ -0,0 +1,476 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +"""Megatron arguments.""" + +import argparse +import dataclasses +import json +import os +import torch +import types + +from itertools import chain, starmap +from types import SimpleNamespace +import yaml, re, os +from types import SimpleNamespace + +import torch.nn.functional as F +from megatron.global_vars import set_retro_args, get_retro_args +from tools.retro.utils import get_args_path as get_retro_args_path + +from megatron.core.models.retro import RetroConfig +from megatron.core.transformer import TransformerConfig + +# Taken from https://stackoverflow.com/questions/65414773/parse-environment-variable-from-yaml-with-pyyaml +# Allows for yaml to use environment variables +env_pattern = re.compile(r".*?\${(.*?)}.*?") +def env_constructor(loader, node): + value = loader.construct_scalar(node) + for group in env_pattern.findall(value): + assert os.environ.get(group) is not None, f"environment variable {group} in yaml not found" + value = value.replace(f"${{{group}}}", os.environ.get(group)) + return value +yaml.add_implicit_resolver("!pathex", env_pattern) +yaml.add_constructor("!pathex", env_constructor) + + +str_dtype_to_torch = { + "float32" : torch.float32, + "float16" : torch.float16, + "bfloat16" : torch.bfloat16 +} + +def validate_yaml(args, defaults={}): + + # This is for legacy script env var setting + if type(args.data_path) is str: + # If no white space its a single path + split_data_path = args.data_path.split() + if len(split_data_path) != 1: + args.data_path = split_data_path + + # Tensor model parallel size. + args.model_parallel.tensor_model_parallel_size = min( + args.model_parallel.tensor_model_parallel_size, args.world_size) + assert args.world_size % args.model_parallel.tensor_model_parallel_size == 0, 'world size'\ + ' ({}) is not divisible by tensor model parallel size ({})'.format( + args.world_size, args.model_parallel.tensor_model_parallel_size) + # Pipeline model parallel size. + args.model_parallel.pipeline_model_parallel_size = min( + args.model_parallel.pipeline_model_parallel_size, + (args.world_size // args.model_parallel.tensor_model_parallel_size)) + args.model_parallel.transformer_pipeline_model_parallel_size = ( + args.model_parallel.pipeline_model_parallel_size - 1 + if args.standalone_embedding_stage else + args.model_parallel.pipeline_model_parallel_size + ) + # Checks. + model_parallel_size = args.model_parallel.pipeline_model_parallel_size * \ + args.model_parallel.tensor_model_parallel_size + assert args.world_size % (model_parallel_size * args.model_parallel.context_parallel_size) == 0, \ + 'world size ({}) is not divisible by tensor parallel size ({}) times ' \ + 'pipeline parallel size ({}) times context parallel size ({})'.format( + args.world_size, args.model_parallel.tensor_model_parallel_size, + args.model_parallel.pipeline_model_parallel_size, args.model_parallel.context_parallel_size) + + # data_parallel_size is not in model parallel config + args.data_parallel_size = args.world_size // (model_parallel_size * args.model_parallel.context_parallel_size) + if args.rank == 0: + print('using world size: {}, data-parallel size: {}, ' + 'context-parallel size: {} ' + 'tensor-model-parallel size: {}, ' + 'pipeline-model-parallel size: {} '.format( + args.world_size, args.data_parallel_size, + args.model_parallel.context_parallel_size, + args.model_parallel.tensor_model_parallel_size, + args.model_parallel.pipeline_model_parallel_size), flush=True) + if args.model_parallel.pipeline_model_parallel_size > 1: + if args.model_parallel.pipeline_model_parallel_split_rank is not None: + assert args.model_parallel.pipeline_model_parallel_split_rank < \ + args.model_parallel.pipeline_model_parallel_size, 'split rank needs'\ + ' to be less than pipeline model parallel size ({})'.format( + args.model_parallel.pipeline_model_parallel_size) + + if args.model_parallel.tp_comm_overlap: + assert args.model_parallel.sequence_parallel == True, 'Tensor parallel communication/GEMM overlap can happen only when sequence parallelism is enabled' + + # Set input defaults. + for key in defaults: + # For default to be valid, it should not be provided in the + # arguments that are passed to the program. We check this by + # ensuring the arg is set to None. + if getattr(args, key, None) is not None: + if args.rank == 0: + print('WARNING: overriding default arguments for {key}:{v} \ + with {key}:{v2}'.format(key=key, v=defaults[key], + v2=getattr(args, key)), + flush=True) + else: + setattr(args, key, defaults[key]) + + # Batch size. + assert args.micro_batch_size is not None + assert args.micro_batch_size > 0 + if args.global_batch_size is None: + args.global_batch_size = args.micro_batch_size * args.data_parallel_size + if args.rank == 0: + print('setting global batch size to {}'.format( + args.global_batch_size), flush=True) + assert args.global_batch_size > 0 + + # num_layers_per_virtual_pipeline_stage is not insde model parallel for checkpointing + if args.num_layers_per_virtual_pipeline_stage is not None: + assert args.model_parallel.pipeline_model_parallel_size > 2, \ + 'pipeline-model-parallel size should be greater than 2 with ' \ + 'interleaved schedule' + assert args.language_model.num_layers % args.model_parallel.transformer_pipeline_model_parallel_size == 0, \ + 'number of layers should be divisible by the pipeline parallel size' + num_layers_per_pipeline_stage = args.language_model.num_layers // args.model_parallel.transformer_pipeline_model_parallel_size + assert num_layers_per_pipeline_stage % args.num_layers_per_virtual_pipeline_stage == 0, \ + 'number of layers per pipeline stage must be divisible number of layers per virtual pipeline stage' + args.model_parallel.virtual_pipeline_model_parallel_size = num_layers_per_pipeline_stage // \ + args.num_layers_per_virtual_pipeline_stage + else: + args.model_parallel.virtual_pipeline_model_parallel_size = None + # Overlap P2P communication is disabled if not using the interleaved schedule. + args.model_parallel.overlap_p2p_comm = False + if args.rank == 0: + print('WARNING: Setting args.overlap_p2p_comm to False since non-interleaved ' + 'schedule does not support overlapping p2p communication') + + if args.overlap_param_gather: + assert args.use_distributed_optimizer, \ + '--overlap-param-gather only supported with distributed optimizer' + assert args.overlap_grad_reduce, \ + '--overlap-grad-reduce should be turned on when using --overlap-param-gather' + + # Parameters dtype. + if args.model_parallel.fp16: + assert not args.model_parallel.bf16 + args.model_parallel.params_dtype = torch.half + if args.model_parallel.bf16: + assert not args.model_parallel.fp16 + args.model_parallel.params_dtype = torch.bfloat16 + # bfloat16 requires gradient accumulation and all-reduce to + # be done in fp32. + if not args.accumulate_allreduce_grads_in_fp32: + args.accumulate_allreduce_grads_in_fp32 = True + if args.rank == 0: + print('accumulate and all-reduce gradients in fp32 for ' + 'bfloat16 data type.', flush=True) + + if args.rank == 0: + print('using {} for parameters ...'.format(args.model_parallel.params_dtype), + flush=True) + + if args.dataloader_type is None: + args.dataloader_type = 'single' + + # Consumed tokens. + args.consumed_train_samples = 0 + args.consumed_valid_samples = 0 + + # Support for variable sequence lengths across batches/microbatches. + # set it if the dataloader supports generation of variable sequence lengths + # across batches/microbatches. Due to additional communication overhead + # during pipeline parallelism, it should not be set if sequence length + # is constant during training. + args.model_parallel.variable_seq_lengths = False + + # Iteration-based training. + if args.train_iters: + # If we use iteration-based training, make sure the + # sample-based options are off. + assert args.train_samples is None, \ + 'expected iteration-based training' + assert args.lr_decay_samples is None, \ + 'expected iteration-based learning rate decay' + assert args.lr_warmup_samples == 0, \ + 'expected iteration-based learning rate warmup' + assert args.rampup_batch_size is None, \ + 'expected no batch-size rampup for iteration-based training' + if args.lr_warmup_fraction is not None: + assert args.lr_warmup_iters == 0, \ + 'can only specify one of lr-warmup-fraction and lr-warmup-iters' + + # Sample-based training. + if args.train_samples: + # If we use sample-based training, make sure the + # iteration-based options are off. + assert args.train_iters is None, \ + 'expected sample-based training' + assert args.lr_decay_iters is None, \ + 'expected sample-based learning rate decay' + assert args.lr_warmup_iters == 0, \ + 'expected sample-based learnig rate warmup' + if args.lr_warmup_fraction is not None: + assert args.lr_warmup_samples == 0, \ + 'can only specify one of lr-warmup-fraction ' \ + 'and lr-warmup-samples' + + # How to handle this better + if args.language_model.num_layers is not None: + assert args.encoder_num_layers is None, \ + 'cannot have both num-layers and encoder-num-layers specified' + args.encoder_num_layers = args.language_model.num_layers + else: + assert args.encoder_num_layers is not None, \ + 'either num-layers or encoder-num-layers should be specified' + args.language_model.num_layers = args.encoder_num_layers + + # Check required arguments. + # removed max_position_embeddings from reqs + required_args = ['num_layers', 'hidden_size', 'num_attention_heads'] + for req_arg in required_args: + _check_arg_is_not_none(args.language_model, req_arg) + + # Checks. + if args.language_model.ffn_hidden_size is None: + if args.language_model.activation_func == "swiglu": + # reduce the dimnesion for MLP since projections happens on + # two linear layers. this keeps the number of paramters in + # the same ballpark as the counterpart with 4*h size + # we keep it a multiple of 64, which means the actual tensor size + # will be a multiple of 64 / tp_size + args.language_model.ffn_hidden_size = int((4 * args.language_model.hidden_size * 2 / 3) / 64) * 64 + else: + args.language_model.ffn_hidden_size = 4 * args.language_model.hidden_size + + if args.language_model.kv_channels is None: + assert args.language_model.hidden_size % args.language_model.num_attention_heads == 0 + args.language_model.kv_channels = args.language_model.hidden_size // args.language_model.num_attention_heads + + #TODO: Implement arguments for encoder-decoder + if args.seq_length is not None: + assert args.encoder_seq_length is None + args.encoder_seq_length = args.seq_length + else: + assert args.encoder_seq_length is not None + args.seq_length = args.encoder_seq_length + + if args.seq_length is not None: + assert args.max_position_embeddings >= args.seq_length + if args.decoder_seq_length is not None: + assert args.max_position_embeddings >= args.decoder_seq_length + if args.lr is not None: + assert args.min_lr <= args.lr + if args.save is not None: + assert args.save_interval is not None + # Mixed precision checks. + if args.fp16_lm_cross_entropy: + assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.' + if args.language_model.fp32_residual_connection: + assert args.model_parallel.fp16 or args.model_parallel.bf16, \ + 'residual connection in fp32 only supported when using fp16 or bf16.' + + if args.language_model.moe_grouped_gemm: + assert args.model_parallel.bf16, 'Currently GroupedGEMM for MoE only supports bf16 dtype.' + dc = torch.cuda.get_device_capability() + assert dc[0] >= 8, "Unsupported compute capability for GroupedGEMM kernels." + + if args.weight_decay_incr_style == 'constant': + assert args.start_weight_decay is None + assert args.end_weight_decay is None + args.start_weight_decay = args.weight_decay + args.end_weight_decay = args.weight_decay + else: + assert args.start_weight_decay is not None + assert args.end_weight_decay is not None + + TORCH_MAJOR = int(torch.__version__.split('.')[0]) + TORCH_MINOR = int(torch.__version__.split('.')[1]) + # Persistent fused layer norm. + if TORCH_MAJOR < 1 or (TORCH_MAJOR == 1 and TORCH_MINOR < 11): + args.language_model.persist_layer_norm = False + if args.rank == 0: + print('Persistent fused layer norm kernel is supported from ' + 'pytorch v1.11 (nvidia pytorch container paired with v1.11). ' + 'Defaulting to no_persist_layer_norm=True') + + # Activation recomputing. + if args.language_model.distribute_saved_activations: + assert args.model_parallel.tensor_model_parallel_size > 1, 'can distribute ' \ + 'recomputed activations only across tensor model ' \ + 'parallel groups' + assert args.language_model.recompute_granularity == 'full', \ + 'distributed recompute activations is only '\ + 'application to full recompute granularity' + assert args.language_model.recompute_method is not None, \ + 'for distributed recompute activations to work you '\ + 'need to use a recompute method ' + assert (TORCH_MAJOR, TORCH_MINOR) >= (1, 10), \ + 'distributed recompute activations are supported for pytorch ' \ + 'v1.10 and above (Nvidia Pytorch container >= 21.07). Current ' \ + 'pytorch version is v%s.%s.' % (TORCH_MAJOR, TORCH_MINOR) + + if args.language_model.recompute_granularity == 'selective': + assert args.language_model.recompute_method is None, \ + 'recompute method is not yet supported for ' \ + 'selective recomputing granularity' + + # disable sequence parallelism when tp=1 + # to avoid change in numerics when + # sequence_parallelism is enabled. + if args.model_parallel.tensor_model_parallel_size == 1: + args.model_parallel.sequence_parallel = False + + # disable async_tensor_model_parallel_allreduce when + # model parallel memory optimization is enabled + if args.model_parallel.sequence_parallel: + args.model_parallel.async_tensor_model_parallel_allreduce = False + + if os.environ.get('CUDA_DEVICE_MAX_CONNECTIONS') != "1": + if args.model_parallel.sequence_parallel: + raise RuntimeError( + "Using sequence parallelism requires setting the environment variable " + "CUDA_DEVICE_MAX_CONNECTIONS to 1") + if args.model_parallel.async_tensor_model_parallel_allreduce: + raise RuntimeError( + "Using async gradient all reduce requires setting the environment " + "variable CUDA_DEVICE_MAX_CONNECTIONS to 1") + + # Retro checks. + if getattr(args, 'retro_add_retriever', False): + + # Sequence parallelism unsupported. + assert not args.sequence_parallel, \ + "retro currently does not support sequence parallelism." + + # Pipeline parallelism unsupported. + assert args.pipeline_model_parallel_size == 1, \ + "retro currently does not support pipeline parallelism." + + #TODO: Retro args loading not tested + # Load retro args (used by both Retro & GPT). + if getattr(args, 'retro_workdir', None) is not None: + retro_args_path = get_retro_args_path(args.retro_workdir) + assert os.path.exists(retro_args_path), "retro workdir missing args.json" + with open(retro_args_path) as f: + retro_args = types.SimpleNamespace(**json.load(f)) + retro_args.retro_return_doc_ids = args.retro_return_doc_ids + retro_args.retro_gpt_retrieved_length = \ + args.retro_num_retrieved_chunks * \ + retro_args.retro_gpt_chunk_length + set_retro_args(retro_args) + + if args.language_model.rotary_interleaved and args.language_model.apply_rope_fusion: + raise RuntimeError('--rotary-interleaved does not work with rope_fusion.') + + # MoE Spec check + if args.language_model.num_moe_experts is not None: + assert args.spec is None, "Model Spec must be None when using MoEs" + if args.model_parallel.tensor_model_parallel_size > 1: + assert args.model_parallel.sequence_parallel, \ + "When using MoE and tensor parallelism, sequence parallelism must be used." + + # Expert parallelism check + if args.model_parallel.expert_model_parallel_size > 1: + assert args.language_model.num_moe_experts is not None, "num_experts must be non None to use expert model parallelism" + assert args.language_model.num_moe_experts % args.model_parallel.expert_model_parallel_size == 0, \ + "Number of experts should be a multiple of expert model parallel_size." + assert not args.model_parallel.fp16, \ + "Expert parallelism is not supported with fp16 training." + + # Print arguments. + _print_args("arguments", args) + retro_args = get_retro_args() + if retro_args and args != retro_args: + _print_args("retro arguments", types.SimpleNamespace(**{k:v for k,v in vars(retro_args).items() if k.startswith("retro")}, rank=args.rank)) + + #TODO: Added as much of the global initialization requires the model parallel arguments + args = SimpleNamespace(**args.__dict__, **args.model_parallel.__dict__) + args = SimpleNamespace(**args.__dict__, **args.language_model.__dict__) + # For GPT Layer spec in pretrain_gpt + args.num_experts = args.language_model.num_moe_experts + + return args + +def _print_args(title, args): + """Print arguments.""" + if args.rank == 0: + print(f'------------------------ {title} ------------------------', + flush=True) + str_list = [] + for arg in vars(args): + dots = '.' * (48 - len(arg)) + str_list.append(' {} {} {}'.format(arg, dots, getattr(args, arg))) + for arg in sorted(str_list, key=lambda x: x.lower()): + print(arg, flush=True) + print(f'-------------------- end of {title} ---------------------', + flush=True) + +def core_config_from_args(args, dataclass=TransformerConfig): + """Builds core config object from namespace args from given dataclass + + Raises exception if argument missing in args + + Args: + args(SimpleNamespace, optional): Namespace to pull argument values from + dataclass (dataclass, optional): Core dataclass config to pull argument names from + + + Returns: + SimpleNamespace: The returned namespace to build core config from + """ + kw_args = {} + for f in dataclasses.fields(dataclass): + if hasattr(args, f.name): + kw_args[f.name] = getattr(args, f.name) + else: + raise Exception(f"Missing argument {f.name} for {str(dataclass)} config") + return kw_args + +def _check_arg_is_not_none(args, arg): + assert getattr(args, arg) is not None, '{} argument is None'.format(arg) + +def core_transformer_config_from_yaml(args, transfomer_key = "language_model"): + # Combine transfomer config with model parallel args + args = SimpleNamespace(**vars(getattr(args, transfomer_key)), **vars(args.model_parallel)) + # Translate args to core transformer configuration + kw_args = core_config_from_args(args, TransformerConfig) + + # Hardcoded + kw_args['deallocate_pipeline_outputs'] = True + kw_args['pipeline_dtype'] = kw_args['params_dtype'] + kw_args['batch_p2p_comm'] = not args.overlap_p2p_comm + + assert args.activation_func in ["swiglu","squaredrelu","gelu"], f"{args.activation_func} is not a supported activation function" + if args.activation_func == "swiglu": + kw_args['activation_func'] = F.silu + kw_args['gated_linear_unit'] = True + kw_args['bias_activation_fusion'] = args.bias_swiglu_fusion + elif args.activation_func == "squaredrelu": + def squared_relu(x): + return torch.pow(F.relu(x), 2) + kw_args['activation_func'] = squared_relu + elif args.activation_func == "gelu": + kw_args['activation_func'] = F.gelu + if args.add_bias_linear: + kw_args['bias_activation_fusion'] = False + else: + kw_args['bias_activation_fusion'] = args.bias_activation_fusion + + if args.init_method == "xavier_uniform": + kw_args['init_method'] = torch.nn.init.xavier_uniform_ + kw_args['scaled_init_method'] = torch.nn.init.xavier_uniform_ + + #TODO: untested handling of retro + # If using Retro, return Retro config. + retro_args = get_retro_args() + if retro_args: + kw_args['retro_preprocess'] = retro_args + return RetroConfig(**kw_args) + + # Return Transformer config. + return TransformerConfig(**kw_args) + +def load_yaml(yaml_path): + print(f"warning using experimental yaml arguments feature, argparse arguments will be ignored") + with open(yaml_path, "r") as f: + config = yaml.load(f,Loader=yaml.FullLoader) + # Convert to nested namespace + config_namespace = json.loads(json.dumps(config), object_hook=lambda item: SimpleNamespace(**item)) + # Add config location to namespace + config_namespace.yaml_cfg = yaml_path + return config_namespace + diff --git a/pretrain_gpt.py b/pretrain_gpt.py index 03764030fa..af296c7167 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -24,6 +24,7 @@ average_losses_across_data_parallel_group ) from megatron.arguments import core_transformer_config_from_args +from megatron.yaml_arguments import core_transformer_config_from_yaml from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec @@ -43,7 +44,11 @@ def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megat args = get_args() print_rank_0('building GPT model ...') - config = core_transformer_config_from_args(get_args()) + # Experimental loading arguments from yaml + if args.yaml_cfg is not None: + config = core_transformer_config_from_yaml(args, "language_model") + else: + config = core_transformer_config_from_args(args) if args.use_mcore_models: if args.spec is not None: From 63d9d3e8bc88cd9e043a07a3d80ff34ca23dbd67 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Fri, 8 Mar 2024 16:39:59 -0500 Subject: [PATCH 291/296] MOE support --- megatron/model/language_model.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/megatron/model/language_model.py b/megatron/model/language_model.py index b351158ece..9786502b13 100644 --- a/megatron/model/language_model.py +++ b/megatron/model/language_model.py @@ -442,10 +442,17 @@ def __init__(self, key[2]="norm_2" elif key[2]=="self_attention": key[2]="self_attn" - elif key[3]=="dense_h_to_4h": - key[3]="layer_1" - elif key[3]=="dense_4h_to_h": - key[3]="layer_2" + elif key[2]=="mlp": + mlp_key=3 + if key[3] in ("local_experts","router"): + key[2]="mixture_of_experts" + if key[3]=="local_experts": + key[3]="experts" + mlp_key=5 + if key[mlp_key]=="dense_h_to_4h": + key[mlp_key]="layer_1" + elif key[mlp_key]=="dense_4h_to_h": + key[mlp_key]="layer_2" else: assert key[0]=="final_norm", key[0] key=["layers",str(args.encoder_num_layers+1), "final_norm"]+key[1:] From 40a134a64b0c5452d5dcffdc994c4bd3ea10249c Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Fri, 8 Mar 2024 18:06:30 -0500 Subject: [PATCH 292/296] stuff --- megatron/model/transformer.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py index 3a8df1bbad..394090a16d 100644 --- a/megatron/model/transformer.py +++ b/megatron/model/transformer.py @@ -231,6 +231,7 @@ def forward(self, hidden_states): b = hidden_states.size(1) h = hidden_states.size(2) route = self.router(hidden_states).view(-1, args.num_experts) + log_tensor("LOGITS", route.unflatten(0, (b,s)).transpose(0,1).flatten(0,2), level=5) # TODO (rprenger) Right now we're just using the sinkhorn algorithm # for load balancing. There should be an option to do no load balancing @@ -248,6 +249,9 @@ def forward(self, hidden_states): max_prob = torch.unsqueeze(max_prob, 1) hidden_states = hidden_states.view(-1, hidden_states.size(2)) + log_tensor("SCORES", max_prob.unflatten(0, (b,s)).transpose(0,1).flatten(0,2), level=5) + log_tensor("INDICES", max_ind.unflatten(0, (b,s)).transpose(0,1).flatten(0,2), level=5) + # TODO (rprenger) TODO this could be made easier to read # Converting [s, b, h] to [s*b, h]. # Each vector could be routed differently @@ -1191,7 +1195,7 @@ def forward(self, hidden_states, attention_mask, if self._debug_transformer: log_tensor( f"Layer {self.layer_number} Attn output", - (hidden_states + attention_bias).transpose(0,1), + (hidden_states if attention_bias is None else hidden_states + attention_bias).transpose(0,1), level=self._debug_transformer ) @@ -1279,7 +1283,7 @@ def forward(self, hidden_states, attention_mask, if self._debug_transformer: log_tensor( f"Layer {self.layer_number} MLP output", - (mlp_output + mlp_bias).transpose(0,1), + (mlp_output if mlp_bias is None else mlp_output + mlp_bias).transpose(0,1), level=self._debug_transformer ) From fdd668c4727111e63d35f0f8033e3be555a4d1a6 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Mon, 11 Mar 2024 18:40:02 -0400 Subject: [PATCH 293/296] Support megatron core models --- megatron/core/models/gpt/gpt_model.py | 64 +++++++++++++++++++ megatron/core/transformer/attention.py | 12 +++- .../core/transformer/transformer_block.py | 19 +++++- .../core/transformer/transformer_layer.py | 31 +++++++++ megatron/model/transformer.py | 4 -- megatron/yaml_arguments.py | 5 +- 6 files changed, 128 insertions(+), 7 deletions(-) diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index 16a5b351cc..8431216d87 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -17,6 +17,8 @@ from megatron.core.transformer.transformer_block import TransformerBlock from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.utils import make_tp_sharded_tensor_for_checkpoint +from megatron.tensor_logging import log_tensor +from megatron import get_args class GPTModel(LanguageModule): @@ -116,6 +118,60 @@ def __init__( if self.share_embeddings_and_output_weights and (self.pre_process or self.post_process): self.initialize_last_stage_with_word_embeddings() + from megatron import print_rank_0 + print_rank_0(self) + for i, (key, value) in enumerate(self.named_parameters()): + # Store standardized parameter names for debug purposes. + key=key.split(".") + if key[0]=="decoder": + # Remove "encoder" prefix. + key=key[1:] + if key[0]=="layers": + # Shift layer index. + key[1]=str(int(key[1])+1) + if key[2]=="input_layernorm": + key[2]="norm_1" + elif key[2]=="pre_mlp_layernorm": + key[2]="norm_2" + elif key[2]=="self_attention": + if "layer_norm" in key[-1]: + key=[*key[:2], "norm_1", key[-1].split("_")[-1]] + else: + key[2]="self_attn" + if key[3]=="linear_qkv": + key[3]="query_key_value" + elif key[3]=="linear_proj": + key[3]="dense" + + elif key[2]=="mlp": + mlp_key=3 + if "layer_norm" in key[-1]: + key=[*key[:2], "norm_2", key[-1].split("_")[-1]] + else: + if key[3] in ("experts","router"): + key[2]="mixture_of_experts" + if key[3]=="experts": + key.pop(4) + mlp_key=5 + if key[mlp_key]=="linear_fc1": + key[mlp_key]="layer_1" + elif key[mlp_key]=="linear_fc2": + key[mlp_key]="layer_2" + else: + assert key[0]=="final_layernorm", key[0] + key=["layers",str(self.config.num_layers+1), "final_norm"]+key[1:] + elif key[0]=="embedding": + key=["layers", "0", "_".join(key[1:])] + elif key[0] == "output_layer": + key = ["layers", str(self.config.num_layers+1), "output_weights"] + else: + # Not implemented but still ok + pass + + value.param_name = ".".join(key) + value.param_idx = i + + def set_input_tensor(self, input_tensor: Tensor) -> None: """Sets input tensor to the model. @@ -157,6 +213,14 @@ def forward( pass elif self.pre_process: decoder_input = self.embedding(input_ids=input_ids, position_ids=position_ids) + args = get_args() + if args.debug_layer_outputs: + log_tensor(f"Global layer 0 fw: Embedding output", decoder_input.transpose(0, 1), level=args.debug_layer_outputs) + if args.debug_layer_gradients: + decoder_input.register_hook(lambda grad: log_tensor( + f"Global layer 1 bw: Embedding output", + grad.transpose(0, 1), level=args.debug_layer_gradients + )) else: # intermediate stage of pipeline # decoder will get hidden_states from encoder.input_tensor diff --git a/megatron/core/transformer/attention.py b/megatron/core/transformer/attention.py index a67c753751..fba2c7af29 100644 --- a/megatron/core/transformer/attention.py +++ b/megatron/core/transformer/attention.py @@ -19,7 +19,7 @@ from .enums import AttnMaskType from .transformer_config import TransformerConfig - +from megatron.tensor_logging import log_tensor @dataclass class SelfAttentionSubmodules: @@ -58,6 +58,10 @@ def __init__( self.attn_mask_type = attn_mask_type self.attention_type = attention_type + from megatron import get_args + args = get_args() + self._debug_transformer=args.debug_transformer + # For normal attention without groups, num_query_groups == num_attention_heads, # so these two will be the same self.query_projection_size = self.config.kv_channels * self.config.num_attention_heads @@ -324,6 +328,12 @@ def forward( output, bias = self.linear_proj(core_attn_out) + if self._debug_transformer: + log_tensor(f"Layer {self.layer_number} Query", query.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Key", key.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Value", value.transpose(0,1), level=self._debug_transformer) + log_tensor(f"Layer {self.layer_number} Attn context", core_attn_out.transpose(0,1), level=self._debug_transformer) + return output, bias diff --git a/megatron/core/transformer/transformer_block.py b/megatron/core/transformer/transformer_block.py index 8b8dad0c4e..a4d4da760c 100755 --- a/megatron/core/transformer/transformer_block.py +++ b/megatron/core/transformer/transformer_block.py @@ -24,6 +24,7 @@ from megatron.core.transformer.transformer_layer import BaseTransformerLayer, TransformerLayer from megatron.core.transformer.utils import sharded_state_dict_default from megatron.core.utils import make_sharded_tensor_for_checkpoint, make_viewless_tensor +from megatron.tensor_logging import log_tensor def get_num_layers_to_build(config: TransformerConfig) -> int: @@ -101,6 +102,11 @@ def __init__( ): super().__init__(config=config) + from megatron import get_args + args = get_args() + self._debug_layer_outputs = args.debug_layer_outputs + self._debug_layer_gradients = args.debug_layer_gradients + self.submodules = _get_block_submodules(config, spec) self.post_layer_norm = post_layer_norm self.pre_process = pre_process @@ -365,7 +371,7 @@ def forward( packed_seq_params=packed_seq_params, ) else: - for layer in self.layers: + for index, layer in enumerate(self.layers): with self.offload_context: hidden_states, context = layer( hidden_states=hidden_states, @@ -376,6 +382,17 @@ def forward( inference_params=inference_params, packed_seq_params=packed_seq_params, ) + if self._debug_layer_outputs: + log_tensor( + f"Global layer {index + 1} fw: Transformer layer {index+1} output", + hidden_states.transpose(0, 1), level=self._debug_layer_outputs + ) + if self._debug_layer_gradients: + fn=lambda idx:(lambda grad: log_tensor( + f"Global layer {idx + 2} bw: Transformer layer {idx+1} output", + grad.transpose(0, 1), level=self._debug_layer_gradients + )) + hidden_states.register_hook(fn(index)) if ( torch.is_grad_enabled() diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index edc45bbec4..5170aec41b 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -15,6 +15,7 @@ from megatron.core.transformer.spec_utils import ModuleSpec, build_module from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.utils import make_viewless_tensor +from megatron.tensor_logging import log_tensor @dataclass @@ -68,6 +69,10 @@ def __init__( super().__init__(config=config) self.submodules_config = submodules + from megatron import get_args + args = get_args() + self._debug_layer_outputs=args.debug_layer_outputs + self.layer_number = layer_number + self._get_layer_offset() self.hidden_dropout = config.hidden_dropout if hidden_dropout is None else hidden_dropout @@ -191,6 +196,19 @@ def forward( # Residual connection. residual = hidden_states + if self._debug_layer_outputs: + attention_output, attention_bias = attention_output_with_bias + log_tensor( + f"Layer {self.layer_number} norm 1", + input_layernorm_output.transpose(0,1), + level=self._debug_layer_outputs + ) + log_tensor( + f"Layer {self.layer_number} Attn output", + (attention_output if attention_bias is None else attention_output + attention_bias).transpose(0,1), + level=self._debug_layer_outputs + ) + log_tensor(f"Layer {self.layer_number} Attn residual", residual.transpose(0,1), level=self._debug_layer_outputs) # Optional Layer norm after self-attention pre_cross_attn_layernorm_output = self.pre_cross_attn_layernorm(hidden_states) @@ -238,6 +256,19 @@ def forward( inp=hidden_states, requires_grad=hidden_states.requires_grad, keep_graph=True ) + if self._debug_layer_outputs: + mlp_output, mlp_bias = mlp_output_with_bias if isinstance(mlp_output_with_bias, tuple) else (mlp_output_with_bias, None) + log_tensor( + f"Layer {self.layer_number} norm 2", + pre_mlp_layernorm_output.transpose(0,1), + level=self._debug_layer_outputs + ) + log_tensor( + f"Layer {self.layer_number} MLP output", + (mlp_output if mlp_bias is None else mlp_output + mlp_bias).transpose(0,1), + level=self._debug_layer_outputs + ) + return output, context def sharded_state_dict(self, prefix: str = '', sharded_offsets: tuple = ()) -> ShardedStateDict: diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py index 394090a16d..09963fcc3d 100644 --- a/megatron/model/transformer.py +++ b/megatron/model/transformer.py @@ -231,7 +231,6 @@ def forward(self, hidden_states): b = hidden_states.size(1) h = hidden_states.size(2) route = self.router(hidden_states).view(-1, args.num_experts) - log_tensor("LOGITS", route.unflatten(0, (b,s)).transpose(0,1).flatten(0,2), level=5) # TODO (rprenger) Right now we're just using the sinkhorn algorithm # for load balancing. There should be an option to do no load balancing @@ -249,9 +248,6 @@ def forward(self, hidden_states): max_prob = torch.unsqueeze(max_prob, 1) hidden_states = hidden_states.view(-1, hidden_states.size(2)) - log_tensor("SCORES", max_prob.unflatten(0, (b,s)).transpose(0,1).flatten(0,2), level=5) - log_tensor("INDICES", max_ind.unflatten(0, (b,s)).transpose(0,1).flatten(0,2), level=5) - # TODO (rprenger) TODO this could be made easier to read # Converting [s, b, h] to [s*b, h]. # Each vector could be routed differently diff --git a/megatron/yaml_arguments.py b/megatron/yaml_arguments.py index 5601e2ee67..f86c74efb7 100644 --- a/megatron/yaml_arguments.py +++ b/megatron/yaml_arguments.py @@ -16,11 +16,14 @@ import torch.nn.functional as F from megatron.global_vars import set_retro_args, get_retro_args -from tools.retro.utils import get_args_path as get_retro_args_path from megatron.core.models.retro import RetroConfig from megatron.core.transformer import TransformerConfig +def get_retro_args_path(workdir): + '''Argument copy stored within retro workdir.''' + return os.path.join(workdir, "args.json") + # Taken from https://stackoverflow.com/questions/65414773/parse-environment-variable-from-yaml-with-pyyaml # Allows for yaml to use environment variables env_pattern = re.compile(r".*?\${(.*?)}.*?") From 4238a80cd661351e395244cae4420fa4f379c5a4 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Mon, 11 Mar 2024 18:56:47 -0400 Subject: [PATCH 294/296] Fix arg --- pretrain_gpt.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pretrain_gpt.py b/pretrain_gpt.py index 514c76507a..8d424644f9 100644 --- a/pretrain_gpt.py +++ b/pretrain_gpt.py @@ -26,7 +26,7 @@ from megatron.tensor_logging import log_tensor, run_and_log_exception from megatron.arguments import core_transformer_config_from_args from megatron.yaml_arguments import core_transformer_config_from_yaml -from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec +from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec, get_gpt_layer_local_spec def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megatron.model.GPTModel]: @@ -54,8 +54,12 @@ def model_provider(pre_process=True, post_process=True) -> Union[GPTModel, megat if args.use_mcore_models: if args.spec is not None: transformer_layer_spec = import_module(args.spec) - else: + elif args.transformer_impl=="local": + transformer_layer_spec = get_gpt_layer_local_spec(args.num_experts, args.moe_grouped_gemm) + elif args.transformer_impl=="transformer_engine": transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec(args.num_experts, args.moe_grouped_gemm) + else: + raise NotImplementedError() model = GPTModel( config=config, From fe38434e650698706c94654dbd969158408058a3 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Tue, 12 Mar 2024 18:50:22 -0400 Subject: [PATCH 295/296] fixes --- megatron/core/optimizer/optimizer.py | 2 ++ megatron/core/transformer/transformer_layer.py | 16 ++++++++-------- megatron/tensor_logging.py | 2 +- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/megatron/core/optimizer/optimizer.py b/megatron/core/optimizer/optimizer.py index 41a1e87fd4..5ad9a903f7 100644 --- a/megatron/core/optimizer/optimizer.py +++ b/megatron/core/optimizer/optimizer.py @@ -65,9 +65,11 @@ def __init__( args=get_args() if args.debug_param_init: + log_generator("CPU generator after reset", torch.random.default_generator) log_generator("PP init generator after reset") with tensor_parallel.get_cuda_rng_tracker().fork(): log_generator("TP init generator after reset") + for param in sorted(self.get_parameters(), key=lambda p: p.param_idx): log_tensor(f"Global param: {param.param_name}", param, level=args.debug_param_init) diff --git a/megatron/core/transformer/transformer_layer.py b/megatron/core/transformer/transformer_layer.py index 5170aec41b..75f26b3f0c 100644 --- a/megatron/core/transformer/transformer_layer.py +++ b/megatron/core/transformer/transformer_layer.py @@ -71,7 +71,7 @@ def __init__( from megatron import get_args args = get_args() - self._debug_layer_outputs=args.debug_layer_outputs + self._debug_transformer=args.debug_transformer self.layer_number = layer_number + self._get_layer_offset() self.hidden_dropout = config.hidden_dropout if hidden_dropout is None else hidden_dropout @@ -196,19 +196,19 @@ def forward( # Residual connection. residual = hidden_states - if self._debug_layer_outputs: + if self._debug_transformer: attention_output, attention_bias = attention_output_with_bias log_tensor( f"Layer {self.layer_number} norm 1", input_layernorm_output.transpose(0,1), - level=self._debug_layer_outputs + level=self._debug_transformer ) log_tensor( f"Layer {self.layer_number} Attn output", (attention_output if attention_bias is None else attention_output + attention_bias).transpose(0,1), - level=self._debug_layer_outputs + level=self._debug_transformer ) - log_tensor(f"Layer {self.layer_number} Attn residual", residual.transpose(0,1), level=self._debug_layer_outputs) + log_tensor(f"Layer {self.layer_number} Attn residual", residual.transpose(0,1), level=self._debug_transformer) # Optional Layer norm after self-attention pre_cross_attn_layernorm_output = self.pre_cross_attn_layernorm(hidden_states) @@ -256,17 +256,17 @@ def forward( inp=hidden_states, requires_grad=hidden_states.requires_grad, keep_graph=True ) - if self._debug_layer_outputs: + if self._debug_transformer: mlp_output, mlp_bias = mlp_output_with_bias if isinstance(mlp_output_with_bias, tuple) else (mlp_output_with_bias, None) log_tensor( f"Layer {self.layer_number} norm 2", pre_mlp_layernorm_output.transpose(0,1), - level=self._debug_layer_outputs + level=self._debug_transformer ) log_tensor( f"Layer {self.layer_number} MLP output", (mlp_output if mlp_bias is None else mlp_output + mlp_bias).transpose(0,1), - level=self._debug_layer_outputs + level=self._debug_transformer ) return output, context diff --git a/megatron/tensor_logging.py b/megatron/tensor_logging.py index b5d2d399a8..44b9b90794 100644 --- a/megatron/tensor_logging.py +++ b/megatron/tensor_logging.py @@ -132,4 +132,4 @@ def log_generator( if generator is None: generator = torch.cuda.default_generators[torch.cuda.current_device()] tensor = generator.get_state() if isinstance(generator, torch.Generator) else generator - return log_fn(f"{name} {tensor.flatten()[-16:].tolist()}") + return log_fn(f"{name} {tensor.view(dtype=torch.int64)[-8:].tolist()}") From 3c6652e3d9f3eb6b274f71e2b9cbc8acce8649ce Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Wed, 29 May 2024 14:24:12 -0400 Subject: [PATCH 296/296] fix --- megatron/core/models/gpt/gpt_model.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/megatron/core/models/gpt/gpt_model.py b/megatron/core/models/gpt/gpt_model.py index 8431216d87..ba3dbc9eb7 100644 --- a/megatron/core/models/gpt/gpt_model.py +++ b/megatron/core/models/gpt/gpt_model.py @@ -148,11 +148,9 @@ def __init__( if "layer_norm" in key[-1]: key=[*key[:2], "norm_2", key[-1].split("_")[-1]] else: - if key[3] in ("experts","router"): - key[2]="mixture_of_experts" - if key[3]=="experts": - key.pop(4) - mlp_key=5 + if key[3]=="experts": + key.pop(4) + mlp_key=5 if key[mlp_key]=="linear_fc1": key[mlp_key]="layer_1" elif key[mlp_key]=="linear_fc2":