Skip to content

Commit

Permalink
add cpy from fp32 to q8_0
Browse files Browse the repository at this point in the history
  • Loading branch information
hipudding committed May 16, 2024
1 parent 0274c5d commit 1ccd1bd
Show file tree
Hide file tree
Showing 5 changed files with 197 additions and 22 deletions.
12 changes: 5 additions & 7 deletions ggml-cann.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,6 @@ GGML_CALL static void ggml_backend_cann_transform_back_q4_0(
GGML_CALL static void ggml_backend_cann_transform_q8_0(ggml_tensor* tensor,
const void* src,
void* dst) {
GGML_ASSERT(tensor->op == GGML_OP_NONE);

size_t n_bytes = ggml_nbytes(tensor);
int64_t n_elems = ggml_nelements(tensor);
int64_t groups = n_elems / QK8_0;
Expand All @@ -220,8 +218,6 @@ GGML_CALL static void ggml_backend_cann_transform_q8_0(ggml_tensor* tensor,

GGML_CALL static void ggml_backend_cann_transform_back_q8_0(
const ggml_tensor* tensor, const void* src, void* dst) {
GGML_ASSERT(tensor->op == GGML_OP_NONE);

size_t n_bytes = ggml_nbytes(tensor);
int64_t n_elems = ggml_nelements(tensor);
int64_t groups = n_elems / QK8_0;
Expand Down Expand Up @@ -943,10 +939,12 @@ GGML_CALL static bool ggml_backend_cann_supports_op(ggml_backend_t backend,
}
} break;
case GGML_OP_CPY: {
switch (op->src[0]->type) {
// case GGML_TYPE_Q4_0:
switch (op->type) {
case GGML_TYPE_Q8_0:
return true;
if (op->src[0]->type == GGML_TYPE_F32)
return true;
else
return false;
default:
return false;
}
Expand Down
19 changes: 4 additions & 15 deletions ggml-cann/aclnn_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1477,20 +1477,9 @@ void ggml_cann_alibi(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
void ggml_cann_cpy(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
ggml_tensor* src = dst->src[0];

aclTensor* acl_src = create_acl_tensor(src);
aclTensor* acl_dst = create_acl_tensor(dst);

if (!ggml_is_quantized(dst->type)) {
cann_copy(ctx, dst, acl_src, acl_dst);
} else {
uint8_t* size = (uint8_t*)ctx.alloc_buffer(dst, sizeof(size_t));
size_t ne = ggml_nelements(src);
aclrtMemcpy(size, sizeof(size_t), &ne, sizeof(size_t),
ACL_MEMCPY_HOST_TO_DEVICE);
size_t ne1;
aclrtMemcpy(&ne1, sizeof(size_t), size, sizeof(size_t),
ACL_MEMCPY_DEVICE_TO_HOST);
}
aclrtlaunch_ascendc_quantize_q8_0(
24, ctx.stream(), src->data, dst->data, ((ggml_tensor*)src->extra)->ne,
((ggml_tensor*)src->extra)->nb, ((ggml_tensor*)dst->extra)->ne);
}

void aclnn_inplace_add(ggml_backend_cann_context& ctx, aclTensor* acl_src,
Expand Down Expand Up @@ -1622,7 +1611,7 @@ void ggml_cann_get_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
break;
case GGML_TYPE_F16:
aclrtlaunch_ascendc_get_row_f16(
1, ctx.stream(), src0->data, src1->data, dst->data,
24, ctx.stream(), src0->data, src1->data, dst->data,
((ggml_tensor*)src0->extra)->ne,
((ggml_tensor*)src0->extra)->nb,
((ggml_tensor*)src1->extra)->ne,
Expand Down
1 change: 1 addition & 0 deletions ggml-cann/kernels/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ file(GLOB SRC_FILES
get_row_f16.cpp
get_row_q4_0.cpp
get_row_q8_0.cpp
quantize_q8_0.cpp
)

string(TOLOWER ${SOC_TYPE} SOC_VERSION)
Expand Down
2 changes: 2 additions & 0 deletions ggml-cann/kernels/ascendc_kernels.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,6 @@
#include "aclrtlaunch_ascendc_get_row_q8_0.h"
#include "aclrtlaunch_ascendc_get_row_q4_0.h"

#include "aclrtlaunch_ascendc_quantize_q8_0.h"

#endif // ASCENDC_KERNELS_H
185 changes: 185 additions & 0 deletions ggml-cann/kernels/quantize_q8_0.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
#include "kernel_operator.h"

using namespace AscendC;

#define BUFFER_NUM 2
#define QK8_0 32

class QUANTIZE_Q8_0 {
public:
__aicore__ inline QUANTIZE_Q8_0() {}
__aicore__ inline void init(GM_ADDR input, GM_ADDR output,
int64_t *input_ne_ub, size_t *input_nb_ub,
int64_t *output_ne_ub) {
int64_t op_block_num = GetBlockNum();
int64_t op_block_idx = GetBlockIdx();

for (int i = 0; i < 4; i++) {
input_ne[i] = input_ne_ub[i];
input_stride[i] = input_nb_ub[i] / input_nb_ub[0];

output_ne[i] = output_ne_ub[i];
}

output_stride[0] = 1;
for (int i = 1; i < 4; i++) {
output_stride[i] = output_stride[i - 1] * output_ne[i - 1];
}

scale_ne = input_ne;
scale_stride[0] = 1;
scale_stride[1] = input_ne[0] / QK8_0;
for (int i = 2; i < 4; i++) {
scale_stride[i] = scale_stride[i - 1] * scale_ne[i - 1];
}

// split input tensor by rows.
uint64_t nr = input_ne[1] * input_ne[2] * input_ne[3];
dr = nr / op_block_num;

uint64_t tails = nr % op_block_num;
if (op_block_idx < tails) {
dr += 1;
ir = dr * op_block_idx;
} else {
ir = dr * op_block_idx + tails;
}

group_size_in_row = scale_stride[1];
int64_t output_size = output_ne[0] * output_ne[1] * output_ne[2] *
output_ne[3] * sizeof(uint8_t);

input_gm.SetGlobalBuffer((__gm__ float *)input);
output_gm.SetGlobalBuffer((__gm__ int8_t *)output);
scale_gm.SetGlobalBuffer((__gm__ half *)(output + output_size));

pipe.InitBuffer(input_queue, BUFFER_NUM, QK8_0 * sizeof(float));
pipe.InitBuffer(output_queue, BUFFER_NUM, QK8_0 * sizeof(int8_t));
pipe.InitBuffer(work_queue, BUFFER_NUM, 32);
pipe.InitBuffer(max_queue, BUFFER_NUM, 32);
pipe.InitBuffer(abs_queue, BUFFER_NUM, QK8_0 * sizeof(float));
pipe.InitBuffer(cast_queue, BUFFER_NUM, QK8_0 * sizeof(half));
}

__aicore__ inline void copy_in(uint32_t offset) {
LocalTensor<float> input_local = input_queue.AllocTensor<float>();
DataCopy(input_local, input_gm[offset], QK8_0);
input_queue.EnQue(input_local);
}

__aicore__ inline void copy_out(uint32_t offset) {
LocalTensor<int8_t> output_local = output_queue.DeQue<int8_t>();
DataCopy(output_gm[offset], output_local, QK8_0);
output_queue.FreeTensor(output_local);
}

__aicore__ inline void calculate_group(int64_t row, int64_t group) {
const int64_t i3 = row / (input_ne[1] * input_ne[2]);
const int64_t i2 = (row - i3 * input_ne[1] * input_ne[2]) / input_ne[1];
const int64_t i1 =
row - i3 * input_ne[1] * input_ne[2] - i2 * input_ne[1];

const int64_t input_offset = i1 * input_stride[1] +
i2 * input_stride[2] +
i3 * input_stride[3] + QK8_0 * group;

const int64_t output_offset = i1 * output_stride[1] +
i2 * output_stride[2] +
i3 * output_stride[3] + QK8_0 * group;

const int64_t scale_offset = i1 * scale_stride[1] +
i2 * scale_stride[2] +
i3 * scale_stride[3] + group;

copy_in(input_offset);
LocalTensor<float> input_local = input_queue.DeQue<float>();
LocalTensor<int8_t> output_local = output_queue.AllocTensor<int8_t>();
LocalTensor<float> work_local = work_queue.AllocTensor<float>();
LocalTensor<float> abs_local = abs_queue.AllocTensor<float>();
LocalTensor<float> max_local = max_queue.AllocTensor<float>();
LocalTensor<half> cast_local = cast_queue.AllocTensor<half>();

Abs(abs_local, input_local, QK8_0);
ReduceMax(max_local, abs_local, work_local, QK8_0);
float d = max_local.GetValue(0);
d = d / ((1 << 7) - 1);

if (d != 0) {
Muls(input_local, input_local, 1.0f / d, QK8_0);
}

Cast(input_local, input_local, RoundMode::CAST_ROUND, QK8_0);
Cast(cast_local, input_local, RoundMode::CAST_ROUND, QK8_0);
Cast(output_local, cast_local, RoundMode::CAST_ROUND, QK8_0);

scale_gm.SetValue(scale_offset, (half)d);

output_queue.EnQue(output_local);
copy_out(output_offset);

input_queue.FreeTensor(input_local);
work_queue.FreeTensor(work_local);
abs_queue.FreeTensor(abs_local);
max_queue.FreeTensor(max_local);
cast_queue.FreeTensor(cast_local);
}

__aicore__ inline void calculate() {
for (int64_t i = ir; i < ir + dr; i++) {
for (int64_t j = 0; j < group_size_in_row; j++) {
calculate_group(i, j);
}
}
}

private:
int64_t input_ne[4];
size_t input_stride[4];

int64_t *scale_ne;
size_t scale_stride[4];

int64_t output_ne[4];
size_t output_stride[4];

int64_t group_size_in_row;

int64_t ir;
int64_t dr;

TPipe pipe;
GlobalTensor<float> input_gm;
GlobalTensor<half> scale_gm;
GlobalTensor<int8_t> output_gm;
TQue<QuePosition::VECIN, BUFFER_NUM> input_queue;
TQue<QuePosition::VECOUT, BUFFER_NUM> output_queue;
TQue<QuePosition::VECIN, 1> work_queue;
TQue<QuePosition::VECOUT, 1> max_queue;
TQue<QuePosition::VECIN, 1> abs_queue;
TQue<QuePosition::VECIN, 1> cast_queue;
};

template <typename T>
__aicore__ inline void copy_to_ub(GM_ADDR gm, T *ub, size_t size) {
auto gm_ptr = (__gm__ uint8_t *)gm;
auto ub_ptr = (uint8_t *)(ub);
for (int32_t i = 0; i < size; ++i, ++ub_ptr, ++gm_ptr) {
*ub_ptr = *gm_ptr;
}
}

extern "C" __global__ __aicore__ void ascendc_quantize_q8_0(
GM_ADDR input_gm, GM_ADDR output_gm, GM_ADDR input_ne_gm,
GM_ADDR input_nb_gm, GM_ADDR output_ne_gm) {
int64_t input_ne_ub[4];
size_t input_nb_ub[4];
int64_t output_ne_ub[4];

copy_to_ub(input_ne_gm, input_ne_ub, 32);
copy_to_ub(input_nb_gm, input_nb_ub, 32);
copy_to_ub(output_ne_gm, output_ne_ub, 32);

QUANTIZE_Q8_0 op;
op.init(input_gm, output_gm, input_ne_ub, input_nb_ub, output_ne_ub);
op.calculate();
}

0 comments on commit 1ccd1bd

Please sign in to comment.