1、使用全局内存数组
矩阵乘法,即用矩阵A每行与矩阵B的每列,依次作乘积累加就能够获得各个元素的值。在CPU上用三层循环实现。这里是将二维数组用一维的形式表示,即按行存储。函数
size_t size = WIDTH*WIDTH * sizeof(int);
int *h_A = (int *)malloc(size);
int *h_B = (int*)malloc(size);
int *h_C = (int*)malloc(size);
int *h_d_Answer = (int *)malloc(size);
if (h_A == NULL || h_B == NULL || h_C == NULL || h_d_Answer == NULL) {
exit(EXIT_FAILURE);
}
for (int i = 0; i < WIDTH; i++) {
for (int j = 0; j < WIDTH; j++) {
h_A[i*WIDTH + j] = i*WIDTH + j;
h_B[i*WIDTH + j] = 1;
}
}
for (int i = 0; i < WIDTH; i++) {
for (int j = 0; j < WIDTH; j++) {
int sum = 0;
for (int k = 0; k < WIDTH; k++) {
sum += h_A[i*WIDTH + k] * h_B[k*WIDTH + j];
}
h_C[i*WIDTH + j] = sum;
}
}
printf("CPU answer:\n");
在GPU对应的kernel 函数:spa
__global__ void kernelMatrix(int *A, int *B, int *C){ int idx = threadIdx.x + blockDim.x*blockIdx.x;//col number int idy = threadIdx.y + blockDim.y*blockIdx.y;//row number if (idx < WIDTH && idy < WIDTH) { int sum = 0; for (int k = 0; k < WIDTH; k++) { sum += A[idy*WIDTH + k] * B[k*WIDTH + idx]; } C[idy*WIDTH + idx] = sum; } }
每一个Block计算一个方阵的子矩阵,大小为BLOCKDIM,由BLOCK的共享内存装载数据。线程
线程配置示意图:3d
数据装载过程示意图:code
======================================================================blog
实现代码以下:索引
__global__ void kernelMatrixShare(int *A, int *B, int *C) { __shared__ int ds_M[BLOCKDIM][BLOCKDIM]; __shared__ int ds_N[BLOCKDIM][BLOCKDIM]; int idx = threadIdx.x + BLOCKDIM*blockIdx.x; int idy = threadIdx.y + BLOCKDIM*blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int sum = 0; for (int m = 0; m < WIDTH / BLOCKDIM; m++) { ds_M[ty][tx] = A[idy*WIDTH + m*BLOCKDIM+tx];//A[idy][m*BLOCKDIM + tx]; ds_N[ty][tx] = B[idx + (m*BLOCKDIM + ty)* WIDTH]; __syncthreads(); for (int k = 0; k < BLOCKDIM; k++) { sum += ds_M[ty][k] * ds_N[k][tx]; } __syncthreads(); } //get one value C[idy*WIDTH + idx] = sum; }
实验对比结果:内存