簡體   English   中英

OpenCL-GPU總和和CPU總和不同

[英]OpenCL - GPU sum and CPU sum not the same

我是OpenCL的新手。

我寫了一個程序,應該對100萬個元素數組進行並行約簡。 在代碼的最后部分,我將比較CPU和GPU的總和,這是不一樣的。 我的本地大小為64。GPU的總和從索引“ 90”開始變大。

編輯:如果我求和較小的數字(現在求和為0-1m),則假定1的最終和是正確的。

核心:

__kernel void gpuSumfunc(  __global float *vec ,__global float* sum, int n)          
{
    __local float tempSum[64];

    const int i;                                                        
    const int globalID = get_global_id(0); //BLOCK_DIM*BLOCK_IND+THREAD_ID
    const int tid = get_local_id(0);         //THREAD_ID
    const int BlockDIM = get_local_size(0);//BLOCK_DIM=64

    if (globalID < n)
    {
        tempSum[tid] = vec[globalID];    //Inserting global data to local data

    }
    else
    {
        tempSum[tid] = 0;
    }
        barrier(CLK_LOCAL_MEM_FENCE);    //Wating for all the threads to copy their data

        for (i = BlockDIM / 2; i > 0; i /= 2)
        {

            if (tid < i)
            {

                tempSum[tid] += tempSum[tid + i];
            }
            barrier(CLK_LOCAL_MEM_FENCE);

        }

        if (tid == 0)
        {
            sum[get_group_id(0)] = tempSum[0];
        }

    }

主要:

//HOST-cpu
    float *h_a;//input
    float *h_b;//output
    float *h_s;
    //DEVICE-gpu
    cl_mem d_a;//input buffer
    cl_mem d_b;//Output

               //Kernel File
    FILE* fileKernel;

    //Memory allocation - cpu input 
    vector = (float*)malloc(n * sizeof(float));
    h_a = (float*)malloc(n * sizeof(float));
    h_b = (float*)malloc(n * sizeof(float));
    h_s = (float*)malloc(n * sizeof(float));

    *vector = { 0 };
    *h_a = { 0 };
    *h_b = { 0 };
    *h_s = { 0 };


    //Initializing Data for gpu
    for (i = 0; i < n; i++) {
        h_a[i] = i;//(float)i;
    }


    //Initializing Data for cpu
    for (i = 0; i < n; i++) {
        vector[i] = i;//(float)i;
    }
    fileKernel = fopen("KernelCode.cl", "r");
    if (!fileKernel)
    {
        printf("Cannot open kernel file!\n");
        exit(1);
    }

    // Read kernel code
    kernelSource = (char*)malloc(MAX_SOURCE_SIZE);
    source_size = fread(kernelSource, 1, MAX_SOURCE_SIZE, fileKernel);
    fclose(fileKernel);


    error = clGetPlatformIDs(2, cp_Platform, NULL); //array with two devices

    error = clGetDeviceIDs(cp_Platform[1], CL_DEVICE_TYPE_GPU, 1, &Device_ID, NULL); // cp_platform[1] = Nvidia GPU

    context = clCreateContext(NULL, 1, &Device_ID, NULL, NULL, &error); // creating openCL context 

    queue = clCreateCommandQueue(context, Device_ID, 0, &error); // creating command queue, executing openCL context on device cp_Platform[1] 


    globalSize = ceil(n / (float)localSize)*localSize;

    d_a = clCreateBuffer(context, CL_MEM_READ_ONLY, n * sizeof(float), NULL, NULL);
    d_b = clCreateBuffer(context, CL_MEM_READ_ONLY, n * sizeof(float), NULL, NULL);

    error = clEnqueueWriteBuffer(queue, d_a, CL_TRUE, 0, n * sizeof(float), h_a, 0, NULL, NULL); //Enqueue commands to write to a buffer object from host memory.
    error |= clEnqueueWriteBuffer(queue, d_b, CL_TRUE, 0,n * sizeof(float), h_s, 0, NULL, NULL); //Enqueue commands to write to a buffer object from host memory.


    program = clCreateProgramWithSource(context, 1, (const char **)& kernelSource, (const size_t *)&source_size, &error); //this function creates a program object for this specific openCL context
    error = clBuildProgram(program, 0, NULL, NULL, NULL, NULL); //compiles and links a program executable from the program source


    kernel = clCreateKernel(program, "gpuSumfunc", &error); //creating kernel object 
    error = clGetKernelWorkGroupInfo(kernel, Device_ID, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), (void*)&workGroupSize, NULL);
    error = clGetKernelWorkGroupInfo(kernel, Device_ID, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, sizeof(size_t), (void*)&pWorkGroupSize, NULL);
    error = clGetDeviceInfo(Device_ID, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof(NumOfCU), &NumOfCU, NULL);

    error |= clSetKernelArg(kernel, 0, sizeof(cl_mem), &d_a); //Used to set the argument value for a specific argument of a kernel.
    error |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &d_b);
    error |= clSetKernelArg(kernel, 2, sizeof(int), &n);
    error |= clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &globalSize, &localSize, 0, NULL, NULL); // Enqueues a command to execute a kernel on a device.

    clFinish(queue);

    clEnqueueReadBuffer(queue, d_b, CL_TRUE, 0, n*sizeof(float) , h_b, 0, NULL, NULL); ////writing data from the device (d_b) to host(h_b)
    clock_t end = clock();

    for (i = 0; i < (n+localSize-1)/localSize; i++)
    {
        gpuSum += h_b[i];
        cpuSum = cpuSumfunc(vector, 64*(i+1));
    if ((gpuSum - cpuSum) > Tolerance)
        {
            printf("\nfailed! for index:%d",i);
            printf("\nCPU sum = %f", cpuSum);
            printf("\nGPU sum = %f\n", gpuSum);
        }
        else
        {
            printf("\nPassed! for index:%d",i);
            printf("\nCPU sum: %.2f", cpuSum);
            printf("\nGPU sum: %.2f\n", gpuSum);
        }
    }


    // cpu


    time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
    //printf("\nTotal program's running time is: %.2f\n", time_spent);

    free(h_a);
    free(h_b);
    free(h_s);
    free(vector);
    //free(kernelSource);
    clReleaseProgram(program);
    clReleaseContext(context);
    clReleaseKernel(kernel);
    clReleaseCommandQueue(queue);
}

float cpuSumfunc(float * vec, int n)
{

    float sum = 0;
    int i;

    for (i = 0; i < n; i++)
    {
        sum += vec[i];

    }
    return sum;
}

Float32值不足以進行求和運算,並且會產生舍入誤差,這在CPU和GPU設備中會有所不同。

16956560需要25位才能准確表示。 Float32僅提供23位精度。 這意味着:如果在Float32中執行該操作,則為16956560 +1 = 16956560。

兩種設備的區別在於:

  • 順序 :CPU和GPU的總和順序不同,舍入誤差也不同。
  • 精度 :大多數CPU(x86等)都使用內部48位浮點運算,然后將其保存為32位。 而GPU則以純32位來完成所有數學運算。

您可以使用Float64( double )或使用整數(int64_t = Long)來解決它。

注意:實際上,您的GPU總和比CPU總和更准確,因為它首先將較小的值打包在一起,然后將這些較大的值與最終的總和相加。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM