簡體   English   中英

共享內存矩陣乘法內核

[英]Shared memory matrix multiplication kernel

我正在嘗試實現《 CUDA C編程指南》中概述的基於共享內存的矩陣乘法內核。 以下是內核:

 __global__ void matrixMultiplyShared(float * A, float * B, float * C,
                     int ARows, int AColumns,
                     int BRows, int BColumns,
                     int CRows, int CColumns) {
     float * CSub = &C[CColumns * 16 * blockIdx.y + 16 * blockIdx.x];
     float CValue = 0;
 for (int k = 0; k < (AColumns / 16); ++k) {
         float * ASub =  &A[AColumns * 16 * blockIdx.y + 16 * k];
         float * BSub = &B[AColumns*16*k + 16*blockIdx.y];
         __shared__ float As[16][16];
         __shared__ float Bs[16][16];
         As[threadIdx.y][threadIdx.x] = ASub[threadIdx.y*AColumns+threadIdx.x];
         Bs[threadIdx.y][threadIdx.x] = BSub[threadIdx.y*AColumns+threadIdx.x];
         __syncthreads();
         for (int n = 0; n < 16; ++n)
        CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
         __syncthreads();
     }
     CSub[threadIdx.x*CColumns+threadIdx.y]=CValue;
 }

以下是對內核的調用:

 dim3 dimBlock(16, 16, 1);
 dim3 dimGrid;
 dimGrid.x = (CColumns + dimBlock.x - 1)/dimBlock.x;
 dimGrid.y = (CRows + dimBlock.y - 1)/dimBlock.y;
 matrixMultiplyShared<<<dimGrid , dimBlock>>>(deviceA , deviceB , deviceC , ARows , AColumns, BRows ,BColumns , CRows , CColumns);

不幸的是,這似乎產生了錯誤的結果。

任何幫助/解釋將不勝感激。

您的內核中至少有2個基本錯誤,兩者都很瑣碎。 您在哪里:

     float * BSub = &B[AColumns*16*k + 16*blockIdx.y];

您應該使用此:

     float * BSub = &B[AColumns*16*k + 16*blockIdx.x];

而你有這個:

 CSub[threadIdx.x*CColumns+threadIdx.y]=CValue;

您應該使用此:

 CSub[threadIdx.y*CColumns+threadIdx.x]=CValue;

在以下情況下,這應該可以使您獲得基本的正確性:

  1. 方陣
  2. 矩陣尺寸可以被圖塊尺寸均勻除盡

固定方陣限制並不困難。 在圖塊尺寸上固定尺寸限制涉及對內核的重大更改,以便:

  1. 不處理超出范圍的元素
  2. 使用“邊界”區域中的適當值正確填充共享內存區域

由於您的代碼不了解任何這些內容,因此我不確定您是否要詢問它,並選擇不專門解決這些問題。

我可以對您的代碼進行以下修改,作為一個基本示例:(請注意,為了減少代碼量,我省去了通常的CUDA錯誤檢查 。請不要將其用作代表示例正確的錯誤檢查。我的回答不是說明良好的CUDA錯誤檢查,而是顯示算法上正確的示例。)

#include <stdio.h>
#include <math.h>
#define TILE_DIM 16
#define DIMX 256
#define DIMY 256
#define RES 0.1

__global__ void matrixMultiplyShared(float * A, float * B, float * C,
                     int ARows, int AColumns,
                     int BRows, int BColumns,
                     int CRows, int CColumns) {
     float CValue = 0;
     if (((blockIdx.y * blockDim.y + threadIdx.y)< CRows) && ((blockIdx.x * blockDim.x + threadIdx.x) < CColumns)) {
       for (int k = 0; k < (AColumns / TILE_DIM); ++k) {
         float * ASub =  &A[AColumns * TILE_DIM * blockIdx.y + TILE_DIM * k];
         float * BSub = &B[AColumns*TILE_DIM*k + TILE_DIM*blockIdx.x];
         __shared__ float As[TILE_DIM][TILE_DIM];
         __shared__ float Bs[TILE_DIM][TILE_DIM];
         As[threadIdx.y][threadIdx.x] = ASub[threadIdx.y*AColumns+threadIdx.x];
         Bs[threadIdx.y][threadIdx.x] = BSub[threadIdx.y*AColumns+threadIdx.x];
         __syncthreads();
         for (int n = 0; n < TILE_DIM; ++n)
         CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
         __syncthreads();
       }
       C[((blockIdx.y * blockDim.y + threadIdx.y)*CColumns)+(blockIdx.x*blockDim.x)+threadIdx.x]=CValue;
     }
 }


void matrixMultiplyCPU(float * A, float * B, float * C,
                     int ARows, int AColumns,
                     int BRows, int BColumns,
                     int CRows, int CColumns) {
  for (int i = 0; i<ARows; i++)
    for (int j=0; j<BColumns; j++){
      float Ctemp = 0.0;
      for (int k=0; k<AColumns; k++)
        Ctemp += A[i*AColumns + k] * B[k*BColumns+j];
      C[i*CColumns+j] = Ctemp;
      }

}
int main(){
 int CColumns = DIMY, CRows=DIMX, AColumns=DIMY, ARows=DIMX, BColumns=DIMY, BRows=DIMX;
 dim3 dimBlock(TILE_DIM, TILE_DIM, 1);
 dim3 dimGrid;
 dimGrid.x = (CColumns + dimBlock.x - 1)/dimBlock.x;
 dimGrid.y = (CRows + dimBlock.y - 1)/dimBlock.y;
 float *deviceA, *deviceB, *deviceC;
 float hostA[DIMY][DIMX];
 float hostB[DIMY][DIMX];
 float hostC[DIMY][DIMX];
 float hostCp[DIMY][DIMX];
 for (int x = 0; x<DIMX; x++)
   for (int y = 0; y<DIMY; y++) {
     hostA[y][x] = rand()/(float)RAND_MAX;
     hostB[y][x] = rand()/(float)RAND_MAX;
     }
  cudaMalloc((void **)&deviceA, DIMX*DIMY*sizeof(float));
  cudaMalloc((void **)&deviceB, DIMX*DIMY*sizeof(float));
  cudaMalloc((void **)&deviceC, DIMX*DIMY*sizeof(float));
  cudaMemcpy(deviceA, hostA, DIMX*DIMY*sizeof(float), cudaMemcpyHostToDevice);
  cudaMemcpy(deviceB, hostB, DIMX*DIMY*sizeof(float), cudaMemcpyHostToDevice);
  matrixMultiplyShared<<<dimGrid , dimBlock>>>(deviceA , deviceB , deviceC , ARows , AColumns, BRows ,BColumns , CRows , CColumns);
  cudaMemcpy(hostC, deviceC, DIMX*DIMY*sizeof(float), cudaMemcpyDeviceToHost);
  matrixMultiplyCPU(&(hostA[0][0]) , &(hostB[0][0]) , &(hostCp[0][0]) , ARows , AColumns, BRows ,BColumns , CRows , CColumns);

 for (int y = 0; y<DIMY; y++)
   for (int x = 0; x<DIMX; x++)
     if (fabs(hostCp[y][x] - hostC[y][x]) > RES)
       {
       printf("Error at offset y=%d,x=%d, CPU = %f, GPU = %f\n", y, x, hostCp[y][x], hostC[y][x]);
       return 1;
       }
 printf("Finished!\n");
 return 0;
}

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM