簡體   English   中英

Jetson TK1上的CUDA零拷貝與CudaMemcpy

[英]CUDA Zero Copy vs. CudaMemcpy on Jetson TK1

我的問題:我正在尋找某人要么指出我試圖在CUDA中使用實現零拷貝的方式中的錯誤,要么揭示更多“幕后”視角為什么零拷貝方法不會更快比memcpy方法。 順便說一下,我正在使用Ubuntu對NVidia的TK1處理器進行測試。

我的問題與使用CIDA有效地使用NVIDIA TK1(物理)統一內存架構有關。 NVIDIA提供了兩種GPU / CPU內存傳輸抽象方法。

  1. 統一內存抽象(使用cudaHostAlloc和cudaHostGetDevicePointer)
  2. 顯式復制到主機和設備(使用cudaMalloc()&cudaMemcpy)

我的測試代碼的簡短描述:我使用方法1和2測試了相同的cuda內核。鑒於源數據的設備沒有復制或結果數據的設備沒有復制,我預計1會更快。 然而,結果倒退到我的假設(方法#1慢50%)。 以下是我測試的代碼:

#include <libfreenect/libfreenect.hpp>
#include <iostream>
#include <vector>
#include <cmath>
#include <pthread.h>
#include <cxcore.h>
#include <time.h>
#include <sys/time.h>
#include <memory.h>
///CUDA///
#include <cuda.h>
#include <cuda_runtime.h>

 ///OpenCV 2.4
#include <highgui.h>
#include <cv.h>
#include <opencv2/gpu/gpu.hpp>

using namespace cv;
using namespace std;

///The Test Kernel///
__global__ void cudaCalcXYZ( float *dst, float *src, float *M, int height, int width, float scaleFactor, int minDistance)
{
    float nx,ny,nz, nzpminD, jFactor;
    int heightCenter = height / 2;
    int widthCenter = width / 2;
    //int j = blockIdx.x;   //Represents which row we are in
    int index = blockIdx.x*width;
    jFactor = (blockIdx.x - heightCenter)*scaleFactor;
    for(int i= 0; i < width; i++)
    {
        nz = src[index];
        nzpminD = nz + minDistance;
        nx = (i - widthCenter )*(nzpminD)*scaleFactor;      
        ny = (jFactor)*(nzpminD);   
        //Solve for only Y matrix (height vlaues)           
         dst[index++] = nx*M[4] + ny*M[5] + nz*M[6];
        //dst[index++] = 1 + 2 + 3;
    }
}

//Function fwd declarations
double getMillis();
double getMicros();
void runCudaTestZeroCopy(int iter, int cols, int rows);
void runCudaTestDeviceCopy(int iter, int cols, int rows);

int main(int argc, char **argv) {

    //ZERO COPY FLAG (allows runCudaTestZeroCopy to run without fail)
    cudaSetDeviceFlags(cudaDeviceMapHost);

    //Runs kernel using explicit data copy to 'device' and back from 'device'
    runCudaTestDeviceCopy(20, 640,480);
    //Uses 'unified memory' cuda abstraction so device can directly work from host data
    runCudaTestZeroCopy(20,640, 480);

    std::cout << "Stopping test" << std::endl;

    return 0;
}

void runCudaTestZeroCopy(int iter, int cols, int rows)
{
    cout << "CUDA Test::ZEROCOPY" << endl;
        int src_rows = rows;
        int src_cols = cols;
        int m_rows = 4;
        int m_cols = 4;
        int dst_rows = src_rows;
        int dst_cols = src_cols;
        //Create and allocate memory for host mats pointers
        float *psrcMat;
        float *pmMat;
        float *pdstMat;
        cudaHostAlloc((void **)&psrcMat, src_rows*src_cols*sizeof(float), cudaHostAllocMapped);
        cudaHostAlloc((void **)&pmMat, m_rows*m_cols*sizeof(float), cudaHostAllocMapped);
        cudaHostAlloc((void **)&pdstMat, dst_rows*dst_cols*sizeof(float), cudaHostAllocMapped);
        //Create mats using host pointers
        Mat src_mat = Mat(cvSize(src_cols, src_rows), CV_32FC1, psrcMat);
        Mat m_mat   = Mat(cvSize(m_cols, m_rows), CV_32FC1, pmMat);
        Mat dst_mat = Mat(cvSize(dst_cols, dst_rows), CV_32FC1, pdstMat);

        //configure src and m mats
        for(int i = 0; i < src_rows*src_cols; i++)
        {
            psrcMat[i] = (float)i;
        }
        for(int i = 0; i < m_rows*m_cols; i++)
        {
            pmMat[i] = 0.1234;
        }
        //Create pointers to dev mats
        float *d_psrcMat;
        float *d_pmMat;
        float *d_pdstMat;
        //Map device to host pointers
        cudaHostGetDevicePointer((void **)&d_psrcMat, (void *)psrcMat, 0);
        //cudaHostGetDevicePointer((void **)&d_pmMat, (void *)pmMat, 0);
        cudaHostGetDevicePointer((void **)&d_pdstMat, (void *)pdstMat, 0);
        //Copy matrix M to device
        cudaMalloc( (void **)&d_pmMat, sizeof(float)*4*4 ); //4x4 matrix
        cudaMemcpy( d_pmMat, pmMat, sizeof(float)*m_rows*m_cols, cudaMemcpyHostToDevice);

        //Additional Variables for kernels
        float scaleFactor = 0.0021;
        int minDistance = -10;

        //Run kernel! //cudaSimpleMult( float *dst, float *src, float *M, int width, int height)
        int blocks = src_rows;
        const int numTests = iter;
        double perfStart = getMillis();

        for(int i = 0; i < numTests; i++)
        {           
            //cudaSimpleMult<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_cols, src_rows);
            cudaCalcXYZ<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_rows, src_cols, scaleFactor, minDistance);
            cudaDeviceSynchronize();
        }
        double perfStop = getMillis();
        double perfDelta = perfStop - perfStart;
        cout << "Ran " << numTests << " iterations totaling " << perfDelta << "ms" << endl;
        cout << " Average time per iteration: " << (perfDelta/(float)numTests) << "ms" << endl;

        //Copy result back to host
        //cudaMemcpy(pdstMat, d_pdstMat, sizeof(float)*src_rows*src_cols, cudaMemcpyDeviceToHost);
        //cout << "Printing results" << endl;
        //for(int i = 0; i < 16*16; i++)
        //{
        //  cout << "src[" << i << "]= " << psrcMat[i] << " dst[" << i << "]= " << pdstMat[i] << endl;
        //}

        cudaFree(d_psrcMat);
        cudaFree(d_pmMat);
        cudaFree(d_pdstMat);
        cudaFreeHost(psrcMat);
        cudaFreeHost(pmMat);
        cudaFreeHost(pdstMat);
}

void runCudaTestDeviceCopy(int iter, int cols, int rows)
{
        cout << "CUDA Test::DEVICE COPY" << endl;
        int src_rows = rows;
        int src_cols = cols;
        int m_rows = 4;
        int m_cols = 4;
        int dst_rows = src_rows;
        int dst_cols = src_cols;
        //Create and allocate memory for host mats pointers
        float *psrcMat;
        float *pmMat;
        float *pdstMat;
        cudaHostAlloc((void **)&psrcMat, src_rows*src_cols*sizeof(float), cudaHostAllocMapped);
        cudaHostAlloc((void **)&pmMat, m_rows*m_cols*sizeof(float), cudaHostAllocMapped);
        cudaHostAlloc((void **)&pdstMat, dst_rows*dst_cols*sizeof(float), cudaHostAllocMapped);
        //Create pointers to dev mats
        float *d_psrcMat;
        float *d_pmMat;
        float *d_pdstMat;
        cudaMalloc( (void **)&d_psrcMat, sizeof(float)*src_rows*src_cols ); 
        cudaMalloc( (void **)&d_pdstMat, sizeof(float)*src_rows*src_cols );
        cudaMalloc( (void **)&d_pmMat, sizeof(float)*4*4 ); //4x4 matrix
        //Create mats using host pointers
        Mat src_mat = Mat(cvSize(src_cols, src_rows), CV_32FC1, psrcMat);
        Mat m_mat   = Mat(cvSize(m_cols, m_rows), CV_32FC1, pmMat);
        Mat dst_mat = Mat(cvSize(dst_cols, dst_rows), CV_32FC1, pdstMat);

        //configure src and m mats
        for(int i = 0; i < src_rows*src_cols; i++)
        {
            psrcMat[i] = (float)i;
        }
        for(int i = 0; i < m_rows*m_cols; i++)
        {
            pmMat[i] = 0.1234;
        }

        //Additional Variables for kernels
        float scaleFactor = 0.0021;
        int minDistance = -10;

        //Run kernel! //cudaSimpleMult( float *dst, float *src, float *M, int width, int height)
        int blocks = src_rows;

        double perfStart = getMillis();
        for(int i = 0; i < iter; i++)
        {           
            //Copty from host to device
            cudaMemcpy( d_psrcMat, psrcMat, sizeof(float)*src_rows*src_cols, cudaMemcpyHostToDevice);
            cudaMemcpy( d_pmMat, pmMat, sizeof(float)*m_rows*m_cols, cudaMemcpyHostToDevice);
            //Run Kernel
            //cudaSimpleMult<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_cols, src_rows);
            cudaCalcXYZ<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_rows, src_cols, scaleFactor, minDistance);
            //Copy from device to host
            cudaMemcpy( pdstMat, d_pdstMat, sizeof(float)*src_rows*src_cols, cudaMemcpyDeviceToHost);
        }
        double perfStop = getMillis();
        double perfDelta = perfStop - perfStart;
        cout << "Ran " << iter << " iterations totaling " << perfDelta << "ms" << endl;
        cout << " Average time per iteration: " << (perfDelta/(float)iter) << "ms" << endl;

        cudaFree(d_psrcMat);
        cudaFree(d_pmMat);
        cudaFree(d_pdstMat);
        cudaFreeHost(psrcMat);
        cudaFreeHost(pmMat);
        cudaFreeHost(pdstMat);
}

//Timing functions for performance measurements
double getMicros()
{
    timespec ts;
    //double t_ns, t_s;
    long t_ns;
    double t_s;
    clock_gettime(CLOCK_MONOTONIC, &ts);
    t_s = (double)ts.tv_sec;
    t_ns = ts.tv_nsec;
    //return( (t_s *1000.0 * 1000.0) + (double)(t_ns / 1000.0) );
    return ((double)t_ns / 1000.0);
}

double getMillis()
{
    timespec ts;
    double t_ns, t_s;
    clock_gettime(CLOCK_MONOTONIC, &ts);
    t_s = (double)ts.tv_sec;
    t_ns = (double)ts.tv_nsec;
    return( (t_s * 1000.0) + (t_ns / 1000000.0) );
}

我已經看過后Cuda零拷貝性能 ,但我覺得這與以下原因無關:GPU和CPU具有物理統一的內存架構。

謝謝

當您使用ZeroCopy時,對內存的讀取會經過一些路徑,在該路徑中,它會查詢內存單元以從系統內存中獲取數據。 此操作有一些延遲。

當使用直接訪問存儲器時,存儲器單元從全局存儲器收集數據,並具有不同的訪問模式和延遲。

實際上看到這種差異需要某種形式的分析。

盡管如此,您對全局函數的調用使用了單個線程

cudaCalcXYZ<<< blocks,1 >>> (...

在這種情況下,當從系統內存(或全局內存)收集內存時,GPU幾乎無法隱藏延遲。 我建議你使用更多的線程(64的一些,總共至少128),並在其上運行探查器以獲得內存訪問的成本。 您的算法似乎是可分離的,並修改了代碼

for(int i= 0; i < width; i++)

for (int i = threadIdx.x ; i < width ; i += blockDim.x)

可能會提高整體表現。 圖像大小為640,將變為128個線程的5次迭代。

cudaCalcXYZ<<< blocks,128 >>> (...

我相信這會帶來一些性能提升。

ZeroCopy功能允許我們在設備上運行數據,而無需手動將其復制到設備內存,如cudaMemcpy功能。 零拷貝存儲器僅將主機地址傳遞給在內核設備上讀/寫的設備。 因此,您向內核設備聲明的線程塊越多,在內核設備上讀取/寫入的數據越多,傳遞給設備的主機地址就越多。 最后,與僅向設備內核聲明一些線程塊相比,您獲得了更好的性能提升。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM