-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmatmul_tiled.cu
111 lines (88 loc) · 3.45 KB
/
matmul_tiled.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#include <stdio.h>
#include "utils.cu"
#include <cuda_fp16.h>
#define N 1024
#define WARP_SIZE 32
#define BLOCK_SIZE 32
__global__ void matmul(__half *a, __half *b, float *c, int n)
{
int block_i = blockIdx.y; // block index along row (y) axis
int block_j = blockIdx.x; // block index along col (x) axis
int thread_i = threadIdx.x / BLOCK_SIZE; // thread item y index inside the 32x32 block
int thread_j = threadIdx.x % BLOCK_SIZE; // thread item x index inside the 32x32 block
int row = block_i * BLOCK_SIZE + thread_i;
int col = block_j * BLOCK_SIZE + thread_j;
if (row >= n or col >= n)
{
return;
}
__shared__ __half tile_a[BLOCK_SIZE * BLOCK_SIZE];
__shared__ __half tile_b[BLOCK_SIZE * BLOCK_SIZE];
float acc = 0.0f;
for (int block_start_i = 0; block_start_i < n; block_start_i += BLOCK_SIZE)
{
tile_a[thread_i * BLOCK_SIZE + thread_j] = a[row * n + (block_start_i + thread_j)];
tile_b[thread_i * BLOCK_SIZE + thread_j] = b[(block_start_i + thread_i) * n + col];
__syncthreads(); // wait for all the threads in the warp to load their item of the block into the block (smem)
for (int k = 0; k < BLOCK_SIZE; k++)
{
acc += __half2float(tile_a[thread_i * BLOCK_SIZE + k] * tile_b[k * BLOCK_SIZE + thread_j]);
}
__syncthreads(); // we don't want to change the tiles in smem while some threads are still accumulating
}
c[row * n + col] = acc;
}
int main()
{
srand(time(NULL));
float *a = (float *)malloc(N * N * sizeof(float));
float *b = (float *)malloc(N * N * sizeof(float));
float *c = (float *)malloc(N * N * sizeof(float));
// fill a & b
matrix_random_fp16valued(a, N * N);
matrix_random_fp16valued(b, N * N);
__half *a_h = (__half *)malloc(N * N * sizeof(__half));
__half *b_h = (__half *)malloc(N * N * sizeof(__half));
for (int i = 0; i < N * N; i++)
{
a_h[i] = __float2half(a[i]);
b_h[i] = __float2half(b[i]);
}
__half *d_a, *d_b;
float *d_c;
cudaMalloc(&d_a, N * N * sizeof(__half));
cudaMalloc(&d_b, N * N * sizeof(__half));
cudaMalloc(&d_c, N * N * sizeof(float));
cudaMemcpy(d_a, a_h, N * N * sizeof(__half), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b_h, N * N * sizeof(__half), cudaMemcpyHostToDevice);
dim3 grid_dim(CEIL_DIV(N, WARP_SIZE), CEIL_DIV(N, WARP_SIZE));
dim3 block_dim(WARP_SIZE * WARP_SIZE);
printf("LAUNCHING with grid_dim: (%d, %d) and block_dim: (%d, %d)\n", grid_dim.x, grid_dim.y, block_dim.x, block_dim.y);
uint64_t start = nanos();
matmul<<<grid_dim, block_dim>>>(d_a, d_b, d_c, N);
cudaDeviceSynchronize();
uint64_t end = nanos();
cudaMemcpy(c, d_c, N * N * sizeof(float), cudaMemcpyDeviceToHost);
double gflop = (2.0 * N * N * N) * 1e-9;
double s = (end - start) * 1e-9;
printf("%f GFLOP/S -- %.2f ms\n", gflop / s, s * 1e3);
{
// compute naive reference matmul on cpu
printf("Computing reference matmul result on cpu\n");
float *reference_c = (float *)malloc(N * N * sizeof(float));
matmul_c(a, b, reference_c, N);
// check each item
printf("Comparing reference result with gpu result\n");
matrix_eq(reference_c, c, N);
printf("ALL GOOD\n");
free(reference_c);
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a_h);
free(b_h);
free(a);
free(b);
free(c);
}