Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TP does not work on Flan-T5 #215

Open
shreyansh26 opened this issue Nov 16, 2023 · 0 comments
Open

TP does not work on Flan-T5 #215

shreyansh26 opened this issue Nov 16, 2023 · 0 comments

Comments

@shreyansh26
Copy link

How to reproduce

Using almost the same code from the tutorials

from tqdm import tqdm
import os
from torch.optim import Adam
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModelForSeq2SeqLM
import oslo
from oslo.torch.distributed import ParallelContext, ParallelMode
from oslo.torch.nn.parallel import TensorParallel
from datasets import load_dataset
from torch.utils.data import DataLoader, DistributedSampler

BATCH_SIZE = 4
SEQ_LEN = 64
SAVE_INTERVAL = 50
TRAIN_STEP = 100

model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small")
optimizer = Adam(model.parameters(), lr=3e-5)
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small")

# Add pad token for batch training because GPT2 tokenizer doesn't have pad token.
tokenizer.pad_token = tokenizer.eos_token

tp_size = 2
tp_depth = 1
dp_size = 2

parallel_context = ParallelContext.from_torch(
    data_parallel_size=dp_size,
    pipeline_parallel_size=1,
    tensor_parallel_size=tp_size,
    tensor_parallel_mode=ParallelMode.TENSOR_1D,
    tensor_parallel_depth=tp_depth,
)
model = TensorParallel(model, parallel_context)
oslo.ready(model, parallel_context)

datasets = load_dataset("squad").data["train"]["context"]
datasets = [str(_) for _ in datasets[: TRAIN_STEP * BATCH_SIZE]]

rank = parallel_context.get_local_rank(ParallelMode.DATA)
    
train_sampler = DistributedSampler(
        datasets, num_replicas=dp_size, rank=rank
    )
dataloader = DataLoader(datasets, batch_size=BATCH_SIZE, sampler=train_sampler, shuffle=False)


for step, batch in enumerate(tqdm(dataloader)):
    optimizer.zero_grad()

    # Make batch
    input_batch = tokenizer(
        batch,
        return_tensors="pt",
        padding=True,
        truncation=True,
        max_length=SEQ_LEN,
    ).to("cuda")

    # Forward-Backward-Step
    loss = model(**input_batch, labels=input_batch["input_ids"]).loss
    loss.backward()
    optimizer.step()

# Save the merged model using `save_pretrained`
model.save_pretrained(
    save_directory="./parallel_ckpt",
    merge_checkpoints=True 
)

Error

    loss = model(**input_batch, labels=input_batch["input_ids"]).loss
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/oslo_core-3.0.0-py3.10.egg/oslo/torch/nn/parallel/tensor_parallel/tensor_parallel.py", line 95, in forward
    return self.module_forward(*args, **kwargs)
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/oslo_core-3.0.0-py3.10.egg/oslo/torch/nn/parallel/tensor_parallel/_1d/_wrapper.py", line 61, in forward
    return self.module_forward(*args, **kwargs)
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/transformers/models/t5/modeling_t5.py", line 1709, in forward
    encoder_outputs = self.encoder(
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/transformers/models/t5/modeling_t5.py", line 1123, in forward
    layer_outputs = layer_module(
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/transformers/models/t5/modeling_t5.py", line 755, in forward
    hidden_states = self.layer[-1](hidden_states)
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/transformers/models/t5/modeling_t5.py", line 344, in forward
    forwarded_states = self.DenseReluDense(forwarded_states)
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/transformers/models/t5/modeling_t5.py", line 327, in forward
    hidden_states = self.wo(hidden_states)
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/home/shreyansh/miniconda3/envs/shreyansh-env-py10/lib/python3.10/site-packages/oslo_core-3.0.0-py3.10.egg/oslo/torch/nn/modules/linear.py", line 149, in forward
    outputs = F.linear(input, self.weight)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (256x1024 and 512x512)

Environment

  • OS : Ubuntu 22.04
  • Python version : 3.10.12
  • Transformers version : 4.34.0
  • Whether to use Docker: No
  • Misc.:
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant