Skip to content

Commit

Permalink
fix moe_align_block_size (#2615)
Browse files Browse the repository at this point in the history
  • Loading branch information
HandH1998 authored Dec 27, 2024
1 parent 70dc2fb commit 77d1210
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 18 deletions.
2 changes: 1 addition & 1 deletion sgl-kernel/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "sgl-kernel"
version = "0.0.2.post9"
version = "0.0.2.post10"
description = "Kernel Library for SGLang"
readme = "README.md"
requires-python = ">=3.8"
Expand Down
20 changes: 4 additions & 16 deletions sgl-kernel/src/sgl-kernel/csrc/moe_align_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -118,31 +118,19 @@ __global__ void moe_align_block_size_kernel(scalar_t* __restrict__ topk_ids, int
}

void moe_align_block_size(torch::Tensor topk_ids, int64_t num_experts, int64_t block_size,
torch::Tensor sorted_token_ids, torch::Tensor experts_ids,
torch::Tensor num_tokens_post_pad) {
torch::Tensor sorted_token_ids, torch::Tensor experts_ids, torch::Tensor num_tokens_post_pad,
torch::Tensor token_cnts_buffer, torch::Tensor cumsum_buffer) {
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
DISPATCH_INTEGRAL_TYPES(topk_ids.scalar_type(), "moe_align_block_size_kernel", [&] {
// calc needed amount of shared mem for `tokens_cnts` and `cumsum`
// tensors
const int32_t num_thread = max((int32_t)num_experts, WARP_SIZE);

const int32_t mem_tokens_cnts = ((num_experts + 1) * num_experts) * sizeof(int32_t);
const int32_t mem_cumsum = (num_experts + 1) * sizeof(int32_t);

// allocate global memory
int32_t* tokens_cnts;
int32_t* cumsum;
cudaMalloc(&tokens_cnts, mem_tokens_cnts);
cudaMalloc(&cumsum, mem_cumsum);

// set dynamic shared mem
auto kernel = moe_align_block_size_kernel<scalar_t>;
kernel<<<1, num_thread, 0, stream>>>(topk_ids.data_ptr<scalar_t>(), sorted_token_ids.data_ptr<int32_t>(),
experts_ids.data_ptr<int32_t>(), num_tokens_post_pad.data_ptr<int32_t>(),
num_experts, block_size, topk_ids.numel(), tokens_cnts, cumsum);

cudaFree(tokens_cnts);
cudaFree(cumsum);
num_experts, block_size, topk_ids.numel(),
token_cnts_buffer.data_ptr<int32_t>(), cumsum_buffer.data_ptr<int32_t>());
});
}

Expand Down
4 changes: 4 additions & 0 deletions sgl-kernel/src/sgl-kernel/ops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ def moe_align_block_size(
sorted_token_ids,
experts_ids,
num_tokens_post_pad,
token_cnts_buffer,
cumsum_buffer,
):
_moe_align_block_size(
topk_ids,
Expand All @@ -16,4 +18,6 @@ def moe_align_block_size(
sorted_token_ids,
experts_ids,
num_tokens_post_pad,
token_cnts_buffer,
cumsum_buffer,
)
16 changes: 15 additions & 1 deletion sgl-kernel/tests/test_moe_align.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,22 @@ def test_moe_align_block_size():
)
num_tokens_post_pad = torch.empty((1), dtype=torch.int32, device=topk_ids.device)

token_cnts_buffer = torch.empty(
(num_experts + 1) * num_experts, dtype=torch.int32, device=topk_ids.device
)
cumsum_buffer = torch.empty(
num_experts + 1, dtype=torch.int32, device=topk_ids.device
)

moe_align_block_size(
topk_ids, num_experts, block_size, sorted_ids, expert_ids, num_tokens_post_pad
topk_ids,
num_experts,
block_size,
sorted_ids,
expert_ids,
num_tokens_post_pad,
token_cnts_buffer,
cumsum_buffer,
)


Expand Down

0 comments on commit 77d1210

Please sign in to comment.