diff --git a/ggml-cann.cpp b/ggml-cann.cpp index c15f0f63494f4..a4e904b946521 100644 --- a/ggml-cann.cpp +++ b/ggml-cann.cpp @@ -488,7 +488,15 @@ GGML_CALL static size_t ggml_backend_cann_buffer_type_get_alloc_size( size_t size = ggml_nbytes(tensor); int64_t ne0 = tensor->ne[0]; + // last line must bigger than 32, because every single op deal at + // least 32 bytes. + // TODO: quantized type? + // int64_t line_size = ne0 * ggml_element_size(tensor); + // int64_t line_size_align_32 = (line_size + 31) & ~31; + // size += (line_size_align_32 - line_size); + // TODO: not support quantized yet. + // TODO: consider un-continue tensor. if (ggml_is_quantized(tensor->type)) { if (ne0 % MATRIX_ROW_PADDING != 0) { size += ggml_row_size( @@ -925,6 +933,7 @@ GGML_CALL static bool ggml_backend_cann_supports_op(ggml_backend_t backend, // embedding case GGML_OP_GET_ROWS: { switch (op->src[0]->type) { + case GGML_TYPE_F32: case GGML_TYPE_Q4_0: case GGML_TYPE_Q8_0: return true; diff --git a/ggml-cann/aclnn_ops.cpp b/ggml-cann/aclnn_ops.cpp index cbae073fe9082..4ce4eabefd7d7 100644 --- a/ggml-cann/aclnn_ops.cpp +++ b/ggml-cann/aclnn_ops.cpp @@ -1611,9 +1611,18 @@ void ggml_cann_get_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst) { ggml_tensor* src1 = dst->src[1]; switch (src0->type) { + case GGML_TYPE_F32: + aclrtlaunch_ascendc_get_row_f32( + 1, ctx.stream(), src0->data, src1->data, dst->data, + ((ggml_tensor*)src0->extra)->ne, + ((ggml_tensor*)src0->extra)->nb, + ((ggml_tensor*)src1->extra)->ne, + ((ggml_tensor*)src1->extra)->nb, ((ggml_tensor*)dst->extra)->ne, + ((ggml_tensor*)dst->extra)->nb); + break; case GGML_TYPE_Q4_0: aclrtlaunch_ascendc_get_row_q4_0( - 1, ctx.stream(), src0->data, src1->data, dst->data, + 24, ctx.stream(), src0->data, src1->data, dst->data, ((ggml_tensor*)src0->extra)->ne, ((ggml_tensor*)src1->extra)->ne, ((ggml_tensor*)src1->extra)->nb, ((ggml_tensor*)dst->extra)->ne, diff --git a/ggml-cann/kernels/CMakeLists.txt b/ggml-cann/kernels/CMakeLists.txt index 275586bbb70fa..26f9ac33247a0 100644 --- a/ggml-cann/kernels/CMakeLists.txt +++ b/ggml-cann/kernels/CMakeLists.txt @@ -3,6 +3,7 @@ if (NOT SOC_TYPE) endif() file(GLOB SRC_FILES + get_row_f32.cpp get_row_q4_0.cpp get_row_q8_0.cpp ) diff --git a/ggml-cann/kernels/ascendc_kernels.h b/ggml-cann/kernels/ascendc_kernels.h index ad3f2bf2e5b89..2d61e93c8a84b 100644 --- a/ggml-cann/kernels/ascendc_kernels.h +++ b/ggml-cann/kernels/ascendc_kernels.h @@ -1,6 +1,7 @@ #ifndef ASCENDC_KERNELS_H #define ASCENDC_KERNELS_H +#include "aclrtlaunch_ascendc_get_row_f32.h" #include "aclrtlaunch_ascendc_get_row_q8_0.h" #include "aclrtlaunch_ascendc_get_row_q4_0.h" diff --git a/ggml-cann/kernels/get_row_f32.cpp b/ggml-cann/kernels/get_row_f32.cpp new file mode 100644 index 0000000000000..0c16f67581871 --- /dev/null +++ b/ggml-cann/kernels/get_row_f32.cpp @@ -0,0 +1,161 @@ +#include "kernel_operator.h" + +// optimize me. Use template to avoid copy code. +using namespace AscendC; + +#define BUFFER_NUM 2 + +class GET_ROW_F32 { + public: + __aicore__ inline GET_ROW_F32() {} + __aicore__ inline void init(GM_ADDR input, GM_ADDR indices, GM_ADDR output, + int64_t *input_ne_ub, size_t *input_nb_ub, + int64_t *indices_ne_ub, size_t *indices_nb_ub, + int64_t *output_ne_ub, size_t *output_nb_ub) { + int64_t op_block_num = GetBlockNum(); + int64_t op_block_idx = GetBlockIdx(); + + for (int i = 0; i < 4; i++) { + input_ne[i] = input_ne_ub[i]; + input_stride[i] = input_nb_ub[i] / input_nb_ub[0]; + + indices_ne[i] = indices_ne_ub[i]; + indices_stride[i] = indices_nb_ub[i] / indices_nb_ub[0]; + + output_ne[i] = output_ne_ub[i]; + output_stride[i] = output_nb_ub[i] / output_nb_ub[0]; + } + + // Indices has two dims. n_elements = all rows should get. + // dr, all rows should this thread get. + uint64_t n_elements = + indices_ne[0] * indices_ne[1] * indices_ne[2] * indices_ne[3]; + dr = n_elements / op_block_num; + + uint64_t tails = n_elements % op_block_num; + if (op_block_idx < tails) { + dr += 1; + ir = dr * op_block_idx; + } else { + ir = dr * op_block_idx + tails; + } + + input_gm.SetGlobalBuffer((__gm__ float *)input); + indices_gm.SetGlobalBuffer((__gm__ int32_t *)indices); + output_gm.SetGlobalBuffer((__gm__ float *)output); + + uint64_t local_buffer_size = ((input_ne[0] * sizeof(float) + 31) & ~31); + local_buffer_elems = local_buffer_size / sizeof(float); + + // TODO, consider long row that can't put in UB. + // All data should asign to 32. It's ok because all data is align to 32. + pipe.InitBuffer(input_queue, BUFFER_NUM, local_buffer_size); + pipe.InitBuffer(output_queue, BUFFER_NUM, local_buffer_size); + } + + __aicore__ inline void copy_in(uint32_t offset) { + LocalTensor input_local = input_queue.AllocTensor(); + DataCopy(input_local, input_gm[offset], local_buffer_elems); + input_queue.EnQue(input_local); + } + + __aicore__ inline void copy_out(uint32_t offset) { + LocalTensor output_local = output_queue.DeQue(); + DataCopy(output_gm[offset], output_local, local_buffer_elems); + output_queue.FreeTensor(output_local); + } + + __aicore__ inline void calculate_row(int64_t idx) { + const int64_t indices_ne2_idx = idx / (indices_ne[0] * indices_ne[1]); + const int64_t indices_ne1_idx = + (idx - indices_ne2_idx * indices_ne[0] * indices_ne[1]) / + indices_ne[0]; + const int64_t indices_ne0_idx = + (idx - indices_ne2_idx * indices_ne[0] * indices_ne[1] - + indices_ne1_idx * indices_ne[0]); + + const int64_t indices_offset = indices_ne0_idx * indices_stride[0] + + indices_ne1_idx * indices_stride[1] + + indices_ne2_idx * indices_stride[2]; + const int32_t selected_row_idx = indices_gm.GetValue(indices_offset); + + const int64_t input_offset = selected_row_idx * input_stride[1] + + indices_ne1_idx * input_stride[2] + + indices_ne2_idx * input_stride[3]; + + const int64_t output_offset = indices_ne0_idx * output_stride[1] + + indices_ne1_idx * output_stride[2] + + indices_ne2_idx * output_stride[3]; + + copy_in(input_offset); + LocalTensor input_local = input_queue.DeQue(); + LocalTensor output_local = output_queue.AllocTensor(); + + DataCopy(output_local, input_local, local_buffer_elems); + output_queue.EnQue(output_local); + copy_out(output_offset); + + input_queue.FreeTensor(input_local); + } + + __aicore__ inline void calculate() { + for (int64_t i = ir; i < ir + dr; i++) { + calculate_row(i); + } + } + + private: + int64_t input_ne[4]; + size_t input_stride[4]; + + int64_t indices_ne[4]; + size_t indices_stride[4]; + + int64_t output_ne[4]; + size_t output_stride[4]; + + size_t local_buffer_elems; + + int64_t ir; + int64_t dr; + + TPipe pipe; + GlobalTensor input_gm; + GlobalTensor indices_gm; + GlobalTensor output_gm; + TQue input_queue; + TQue output_queue; +}; + +template +__aicore__ inline void copy_to_ub(GM_ADDR gm, T *ub, size_t size) { + auto gm_ptr = (__gm__ uint8_t *)gm; + auto ub_ptr = (uint8_t *)(ub); + for (int32_t i = 0; i < size; ++i, ++ub_ptr, ++gm_ptr) { + *ub_ptr = *gm_ptr; + } +} + +extern "C" __global__ __aicore__ void ascendc_get_row_f32( + GM_ADDR input_gm, GM_ADDR indices_gm, GM_ADDR output_gm, + GM_ADDR input_ne_gm, GM_ADDR input_nb_gm, GM_ADDR indices_ne_gm, + GM_ADDR indices_nb_gm, GM_ADDR output_ne_gm, GM_ADDR output_nb_gm) { + int64_t input_ne_ub[4]; + size_t input_nb_ub[4]; + int64_t indices_ne_ub[4]; + size_t indices_nb_ub[4]; + int64_t output_ne_ub[4]; + size_t output_nb_ub[4]; + + copy_to_ub(input_ne_gm, input_ne_ub, 32); + copy_to_ub(input_nb_gm, input_nb_ub, 32); + copy_to_ub(indices_ne_gm, indices_ne_ub, 32); + copy_to_ub(indices_nb_gm, indices_nb_ub, 32); + copy_to_ub(output_ne_gm, output_ne_ub, 32); + copy_to_ub(output_nb_gm, output_nb_ub, 32); + + GET_ROW_F32 op; + op.init(input_gm, indices_gm, output_gm, input_ne_ub, input_nb_ub, + indices_ne_ub, indices_nb_ub, output_ne_ub, output_nb_ub); + op.calculate(); +} \ No newline at end of file diff --git a/ggml-cann/kernels/get_row_q4_0.cpp b/ggml-cann/kernels/get_row_q4_0.cpp index 00644b503f3e3..d3f229c1980b5 100644 --- a/ggml-cann/kernels/get_row_q4_0.cpp +++ b/ggml-cann/kernels/get_row_q4_0.cpp @@ -67,6 +67,7 @@ class GET_ROW_Q4_0 { __aicore__ inline void copy_in(uint32_t offset) { LocalTensor input_local = input_queue.AllocTensor(); + // 32 * sizeof(int4b_t) = 16, which is not aligned to 32, why no error? DataCopy(input_local, input_gm[offset], QK4_0); input_queue.EnQue(input_local); }