[英]How performing multiple matrix multiplications in CUDA?
我有一個方陣數組int *M[10];
以便M[i]
定位第i
個矩陣的第一個元素。 我想將所有矩陣M[i]
乘以另一個矩陣N
,以便我收到一個方陣數組int *P[10]
作為輸出。
我看到了不同的可能性:
M[i]
的不同元素的計算分配給不同的線程; 例如,我有10
矩陣,大小為4x4
,因此涉及的線程數為160
; 如何使用 CUDA 來實現這種方法?40x40
大小的復合矩陣(即收集10
、 4x4
大小的矩陣一起)並使用40x40
線程; 但這種方法似乎需要更多時間; 我正在嘗試使用矩陣數組,但我認為我做錯了什么; 如何將這種方法用於10
矩陣? 如何在內核函數中對其進行編碼?這就是我正在嘗試的;
void GPU_Multi(int *M[2], int *N, int *P[2], size_t width)
{
int *devM[2];
int *devN[2];
int *devP[2];
size_t allocasize =sizeof(int) *width*width;
for(int i = 0 ; i < 10 ; i ++ )
{
cudaMalloc((void**)&devM[ i ], allocasize );
cudaMalloc((void**)&devP[ i ], allocasize );
}
cudaMalloc((void**)&devN, allocasize );
for(int i = 0 ; i < 10 ; i ++ ) {
cudaMemcpy(devM[ i ],M[ i ], allocasize , cudaMemcpyHostToDevice);
cudaMemcpy(devN, N, allocasize , cudaMemcpyHostToDevice);
dim3 block(width*2, width*2);
dim3 grid(1,1,1);
Kernel_Function<<<grid, block>>> (devM[2], devN, devP[2],width);
for(int i = 0 ; i < 10 ; i ++ )
{
cudaMemcpy(P[ i ], P[ i ], allocatesize, cudaMemcpyDeviceToHost);
cudaFree(devM[ i ]);
cudaFree(devP[ i ]);
}
}
我認為通過使用專門為此目的設計的CUBLAS 批處理 gemm 函數(執行大量“相對較小”的矩陣-矩陣乘法運算)可能會實現最快的性能。
即使您想將矩陣數組 ( M[]
) 乘以單個矩陣 ( N
),批處理 gemm 函數也將要求您傳遞N
的矩陣數組 (即N[]
),它們都是你的情況也是如此。
編輯:現在我已經通過了一個例子,我似乎很清楚,通過修改下面的例子,我們可以傳遞一個N
矩陣,並讓GPU_Multi
函數簡單地將單個N
矩陣發送到設備,同時傳遞一個N
的指針數組,即下面示例中的d_Narray
,所有指針都指向設備上的相同N
矩陣。
這是一個完整的批處理 GEMM 示例:
#include <stdio.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <assert.h>
#define ROWM 4
#define COLM 3
#define COLN 5
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
typedef float mytype;
// Pi = Mi x Ni
// pr = P rows = M rows
// pc = P cols = N cols
// mc = M cols = N rows
void GPU_Multi(mytype **M, mytype **N, mytype **P
, size_t pr, size_t pc, size_t mc
, size_t num_mat, mytype alpha, mytype beta)
{
mytype *devM[num_mat];
mytype *devN[num_mat];
mytype *devP[num_mat];
size_t p_size =sizeof(mytype) *pr*pc;
size_t m_size =sizeof(mytype) *pr*mc;
size_t n_size =sizeof(mytype) *mc*pc;
const mytype **d_Marray, **d_Narray;
mytype **d_Parray;
cublasHandle_t myhandle;
cublasStatus_t cublas_result;
for(int i = 0 ; i < num_mat; i ++ )
{
cudaMalloc((void**)&devM[ i ], m_size );
cudaMalloc((void**)&devN[ i ], n_size );
cudaMalloc((void**)&devP[ i ], p_size );
}
cudaMalloc((void**)&d_Marray, num_mat*sizeof(mytype *));
cudaMalloc((void**)&d_Narray, num_mat*sizeof(mytype *));
cudaMalloc((void**)&d_Parray, num_mat*sizeof(mytype *));
cudaCheckErrors("cudaMalloc fail");
for(int i = 0 ; i < num_mat; i ++ ) {
cudaMemcpy(devM[i], M[i], m_size , cudaMemcpyHostToDevice);
cudaMemcpy(devN[i], N[i], n_size , cudaMemcpyHostToDevice);
cudaMemcpy(devP[i], P[i], p_size , cudaMemcpyHostToDevice);
}
cudaMemcpy(d_Marray, devM, num_mat*sizeof(mytype *), cudaMemcpyHostToDevice);
cudaMemcpy(d_Narray, devN, num_mat*sizeof(mytype *), cudaMemcpyHostToDevice);
cudaMemcpy(d_Parray, devP, num_mat*sizeof(mytype *), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy H2D fail");
cublas_result = cublasCreate(&myhandle);
assert(cublas_result == CUBLAS_STATUS_SUCCESS);
// change to cublasDgemmBatched for double
cublas_result = cublasSgemmBatched(myhandle, CUBLAS_OP_N, CUBLAS_OP_N
, pr, pc, mc
, &alpha, d_Marray, pr, d_Narray, mc
, &beta, d_Parray, pr
, num_mat);
assert(cublas_result == CUBLAS_STATUS_SUCCESS);
for(int i = 0 ; i < num_mat ; i ++ )
{
cudaMemcpy(P[i], devP[i], p_size, cudaMemcpyDeviceToHost);
cudaFree(devM[i]);
cudaFree(devN[i]);
cudaFree(devP[i]);
}
cudaFree(d_Marray);
cudaFree(d_Narray);
cudaFree(d_Parray);
cudaCheckErrors("cudaMemcpy D2H fail");
}
int main(){
mytype h_M1[ROWM][COLM], h_M2[ROWM][COLM];
mytype h_N1[COLM][COLN], h_N2[COLM][COLN];
mytype h_P1[ROWM][COLN], h_P2[ROWM][COLN];
mytype *h_Marray[2], *h_Narray[2], *h_Parray[2];
for (int i = 0; i < ROWM; i++)
for (int j = 0; j < COLM; j++){
h_M1[i][j] = 1.0f; h_M2[i][j] = 2.0f;}
for (int i = 0; i < COLM; i++)
for (int j = 0; j < COLN; j++){
h_N1[i][j] = 1.0f; h_N2[i][j] = 1.0f;}
for (int i = 0; i < ROWM; i++)
for (int j = 0; j < COLN; j++){
h_P1[i][j] = 0.0f; h_P2[i][j] = 0.0f;}
h_Marray[0] = &(h_M1[0][0]);
h_Marray[1] = &(h_M2[0][0]);
h_Narray[0] = &(h_N1[0][0]);
h_Narray[1] = &(h_N2[0][0]);
h_Parray[0] = &(h_P1[0][0]);
h_Parray[1] = &(h_P2[0][0]);
GPU_Multi(h_Marray, h_Narray, h_Parray, ROWM, COLN, COLM, 2, 1.0f, 0.0f);
for (int i = 0; i < ROWM; i++)
for (int j = 0; j < COLN; j++){
if (h_P1[i][j] != COLM*1.0f)
{
printf("h_P1 mismatch at %d,%d was: %f should be: %f\n"
, i, j, h_P1[i][j], COLM*1.0f); return 1;
}
if (h_P2[i][j] != COLM*2.0f)
{
printf("h_P2 mismatch at %d,%d was: %f should be: %f\n"
, i, j, h_P2[i][j], COLM*2.0f); return 1;
}
}
printf("Success!\n");
return 0;
}
從上面的評論和羅伯特·克羅維拉的回答中可以看出,有不同的可能方法。 每種方法都可以更好地適用於不同的情況,即不同數量N
的要相乘的矩陣和不同的矩陣維度MxM
。 讓我總結如下:
N
小而M
大,也許最好的方法是使用從主機代碼調用的cublas<t>gemm
;N
適中, M
適中,並且如果有至少3.5
計算能力的設備可用,那么一個很好的可能性是使用動態並行,即創建N
線程的線程網格並啟動cublas<t>gemm
內核中的cublas<t>gemm
; 由於需要大量線程,這種方法對於大N
或M
可能會失敗;N
很大而M
很小,那么 Robert Crovella 鏈接的 cuBLAS 批處理方法可能會引起人們的興趣;N
大而M
小,那么基於 cuBLAS 流的方法值得一試,正如 Robert 的評論中所述;N
很大而M
很小,則使用N
線程的線程網格的方法,每個線程都“手動”計算優化的矩陣乘法可能很有吸引力; 例如,如果必須為4x4
矩陣構建矩陣乘法算法,則可以根據乘法 4x4 矩陣的基本乘法數優化每個線程執行的矩陣乘法。如果數據以行主要順序存儲在主機內存中,並且我們希望執行矩陣乘法並按行主要順序檢索數據,則以下代碼執行此操作
#include <stdio.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <assert.h>
#define ROWM 4
#define COLM 3
#define COLN 5
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
void printArrayS(float *ptr, int rows, int cols, char mode, char *name)
{
printf("%s\n", name);
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
if (mode == 'N') /* Normal mode */
{
if (ptr[i * cols + j] >= 0)
printf(" %3.6f ", ptr[i * cols + j]);
else
printf("%3.6f ", ptr[i * cols + j]);
}
else /* Transpose mode */
{
if (ptr[j * rows + i] >= 0)
printf("%3.6f ", ptr[j * rows + i]);
else
printf("%3.6f ", ptr[j * rows + i]);
}
}
printf("\n");
}
}
typedef float mytype;
// Pi = Mi x Ni
// pr = P rows = M rows
// pc = P cols = N cols
// mc = M cols = N rows
void GPU_Multi(mytype **M, mytype **N, mytype **P,
size_t pr, size_t pc, size_t mc,
size_t num_mat, mytype alpha, mytype beta)
{
#define NUM_MAT 2
mytype *devM[NUM_MAT];
mytype *devN[NUM_MAT];
mytype *devP[NUM_MAT];
size_t p_size = sizeof(mytype) * pr * pc;
size_t m_size = sizeof(mytype) * pr * mc;
size_t n_size = sizeof(mytype) * mc * pc;
const mytype **d_Marray, **d_Narray;
mytype **d_Parray;
cublasHandle_t myhandle;
cublasStatus_t cublas_result;
for (int i = 0; i < NUM_MAT; i++)
{
cudaMalloc((void **)&devM[i], m_size);
cudaMalloc((void **)&devN[i], n_size);
cudaMalloc((void **)&devP[i], p_size);
}
cudaMalloc((void **)&d_Marray, NUM_MAT * sizeof(mytype *));
cudaMalloc((void **)&d_Narray, NUM_MAT * sizeof(mytype *));
cudaMalloc((void **)&d_Parray, NUM_MAT * sizeof(mytype *));
cudaCheckErrors("cudaMalloc fail");
for (int i = 0; i < NUM_MAT; i++) {
cudaMemcpy(devM[i], M[i], m_size, cudaMemcpyHostToDevice);
cudaMemcpy(devN[i], N[i], n_size, cudaMemcpyHostToDevice);
cudaMemcpy(devP[i], P[i], p_size, cudaMemcpyHostToDevice);
}
cudaMemcpy(d_Marray, devM, NUM_MAT * sizeof(mytype *), cudaMemcpyHostToDevice);
cudaMemcpy(d_Narray, devN, NUM_MAT * sizeof(mytype *), cudaMemcpyHostToDevice);
cudaMemcpy(d_Parray, devP, NUM_MAT * sizeof(mytype *), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy H2D fail");
cublas_result = cublasCreate(&myhandle);
assert(cublas_result == CUBLAS_STATUS_SUCCESS);
// change to cublasDgemmBatched for double
cublas_result = cublasSgemmBatched(myhandle, CUBLAS_OP_N, CUBLAS_OP_N
, pc, pr, mc
, &alpha, d_Narray, pc, d_Marray, mc
, &beta, d_Parray, pc
, NUM_MAT);
assert(cublas_result == CUBLAS_STATUS_SUCCESS);
for (int i = 0; i < NUM_MAT; i++)
{
cudaMemcpy(P[i], devP[i], p_size, cudaMemcpyDeviceToHost);
cudaFree(devM[i]);
cudaFree(devN[i]);
cudaFree(devP[i]);
}
cudaFree(d_Marray);
cudaFree(d_Narray);
cudaFree(d_Parray);
cudaCheckErrors("cudaMemcpy D2H fail");
}
int main() {
mytype h_M1[ROWM][COLM], h_M2[ROWM][COLM];
mytype h_N1[COLM][COLN], h_N2[COLM][COLN];
mytype h_P1[ROWM][COLN], h_P2[ROWM][COLN];
mytype *h_Marray[2], *h_Narray[2], *h_Parray[2];
for (int i = 0; i < ROWM; i++)
for (int j = 0; j < COLM; j++) {
h_M1[i][j] = (i + j) * 1.0f; h_M2[i][j] = (i - j) * 2.0f;
}
for (int i = 0; i < COLM; i++)
for (int j = 0; j < COLN; j++) {
h_N1[i][j] = (i + j) * 1.0f; h_N2[i][j] = (i - j) * 1.0f;
}
for (int i = 0; i < ROWM; i++)
for (int j = 0; j < COLN; j++) {
h_P1[i][j] = 0.0f; h_P2[i][j] = 0.0f;
}
printArrayS((float *)h_M1, ROWM, COLM, 'N', "h_M1");
printArrayS((float *)h_N1, COLM, COLN, 'N', "h_N1");
printArrayS((float *)h_M2, ROWM, COLM, 'N', "h_M2");
printArrayS((float *)h_N2, COLM, COLN, 'N', "h_N2");
h_Marray[0] = &(h_M1[0][0]);
h_Marray[1] = &(h_M2[0][0]);
h_Narray[0] = &(h_N1[0][0]);
h_Narray[1] = &(h_N2[0][0]);
h_Parray[0] = &(h_P1[0][0]);
h_Parray[1] = &(h_P2[0][0]);
GPU_Multi(h_Marray, h_Narray, h_Parray, ROWM, COLN, COLM, 2, 1.0f, 0.0f);
printArrayS((float *)h_P1, ROWM, COLN, 'N', "h_P1");
printArrayS((float *)h_P2, ROWM, COLN, 'N', "h_P2");
return 0;
}
結果
h_M1
0.000000 1.000000 2.000000
1.000000 2.000000 3.000000
2.000000 3.000000 4.000000
3.000000 4.000000 5.000000
h_N1
0.000000 1.000000 2.000000 3.000000 4.000000
1.000000 2.000000 3.000000 4.000000 5.000000
2.000000 3.000000 4.000000 5.000000 6.000000
h_M2
0.000000 -2.000000 -4.000000
2.000000 0.000000 -2.000000
4.000000 2.000000 0.000000
6.000000 4.000000 2.000000
h_N2
0.000000 -1.000000 -2.000000 -3.000000 -4.000000
1.000000 0.000000 -1.000000 -2.000000 -3.000000
2.000000 1.000000 0.000000 -1.000000 -2.000000
h_P1
5.000000 8.000000 11.000000 14.000000 17.000000
8.000000 14.000000 20.000000 26.000000 32.000000
11.000000 20.000000 29.000000 38.000000 47.000000
14.000000 26.000000 38.000000 50.000000 62.000000
h_P2
-10.000000 -4.000000 2.000000 8.000000 14.000000
-4.000000 -4.000000 -4.000000 -4.000000 -4.000000
2.000000 -4.000000 -10.000000 -16.000000 -22.000000
8.000000 -4.000000 -16.000000 -28.000000 -40.000000
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.