简体   繁体   English

推力CUDA每组(段)找到最大值

[英]Thrust CUDA find maximum per each group(segment)

My data like 我的数据如

value = [1, 2, 3, 4, 5, 6]
key =   [0, 1, 0, 2, 1, 2]

I need to now maximum(value and index) per each group(key). 我现在需要每个组(键)的最大值(值和索引)。 So the result should be 所以结果应该是

max = [3, 5, 6]
index = [2, 4, 5]
key = [0, 1, 2]

How can I get it with cuda thrust? 如何通过cuda推力获得它? I can do sort -> reduce_by_key but it's not really efficient. 我可以做排序 - > reduce_by_key,但效率不高。 In my case vector size > 10M and key space ~ 1K(starts from 0 without gaps). 在我的情况下,矢量大小> 10M,密钥空间〜1K(从0开始没有间隙)。

Since the original question focused on thrust, I didn't have any suggestions other than what I mentioned in the comments, 由于最初的问题集中在推力上,除了我在评论中提到的内容之外,我没有任何建议,

However, based on further dialog in the comments, I thought I would post an answer that covers both CUDA and thrust. 但是,基于评论中的进一步对话,我想我会发布一个涵盖CUDA和推力的答案。

The thrust method uses a sort_by_key operation to group like keys together, followed by a reduce_by_key operation to find the max + index for each key-group. 推力方法使用sort_by_key操作将类似的键分组在一起,然后执行reduce_by_key操作以查找每个键组的max +索引。

The CUDA method uses a custom atomic approach I describe here to find a 32-bit max plus 32-bit index (for each key-group). CUDA方法使用我在此描述的自定义原子方法来查找32位最大加32位索引(对于每个键组)。

The CUDA method is substantially (~10x) faster, for this specific test case. 对于这个特定的测试用例,CUDA方法基本上(~10x)更快。 I used a vector size of 10M and a key size of 10K for this test. 我使用的矢量大小为10M,密钥大小为10K。

My test platform was CUDA 8RC, RHEL 7, and Tesla K20X GPU. 我的测试平台是CUDA 8RC,RHEL 7和Tesla K20X GPU。 K20X is a member of the Kepler generation which has much faster global atomics than previous GPU generations. K20X是Kepler一代的成员,它比以前的GPU世代拥有更快的全局原子。

Here's a fully worked example, covering both cases, and providing a timing comparison: 这是一个完整的例子,涵盖两种情况,并提供时序比较:

$ cat t1234.cu
#include <iostream>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
#include <cstdlib>

#include <time.h>
#include <sys/time.h>
#define USECPSEC 1000000ULL

unsigned long long dtime_usec(unsigned long long start){

  timeval tv;
  gettimeofday(&tv, 0);
  return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}

const size_t ksize = 10000;
const size_t vsize = 10000000;
const int nTPB = 256;

struct my_max_func
{

  template <typename T1, typename T2>
  __host__ __device__
  T1 operator()(const T1 t1, const T2 t2){
    T1 res;
    if (thrust::get<0>(t1) > thrust::get<0>(t2)){
      thrust::get<0>(res) = thrust::get<0>(t1);
      thrust::get<1>(res) = thrust::get<1>(t1);}
    else {
      thrust::get<0>(res) = thrust::get<0>(t2);
      thrust::get<1>(res) = thrust::get<1>(t2);}
    return res;
    }
};

typedef union  {
  float floats[2];                 // floats[0] = maxvalue
  int ints[2];                     // ints[1] = maxindex
  unsigned long long int ulong;    // for atomic update
} my_atomics;


__device__ unsigned long long int my_atomicMax(unsigned long long int* address, float val1, int val2)
{
    my_atomics loc, loctest;
    loc.floats[0] = val1;
    loc.ints[1] = val2;
    loctest.ulong = *address;
    while (loctest.floats[0] <  val1)
      loctest.ulong = atomicCAS(address, loctest.ulong,  loc.ulong);
    return loctest.ulong;
}


__global__ void my_max_idx(const float *data, const int *keys,const int ds, my_atomics *res)
{

    int idx = (blockDim.x * blockIdx.x) + threadIdx.x;
    if (idx < ds)
      my_atomicMax(&(res[keys[idx]].ulong), data[idx],idx);
}


int main(){

  float *h_vals = new float[vsize];
  int   *h_keys = new int[vsize];
  for (int i = 0; i < vsize; i++) {h_vals[i] = rand(); h_keys[i] = rand()%ksize;}
// thrust method
  thrust::device_vector<float> d_vals(h_vals, h_vals+vsize);
  thrust::device_vector<int> d_keys(h_keys, h_keys+vsize);
  thrust::device_vector<int> d_keys_out(ksize);
  thrust::device_vector<float> d_vals_out(ksize);
  thrust::device_vector<int> d_idxs(vsize);
  thrust::device_vector<int> d_idxs_out(ksize);

  thrust::sequence(d_idxs.begin(), d_idxs.end());
  cudaDeviceSynchronize();
  unsigned long long et = dtime_usec(0);

  thrust::sort_by_key(d_keys.begin(), d_keys.end(), thrust::make_zip_iterator(thrust::make_tuple(d_vals.begin(), d_idxs.begin())));
  thrust::reduce_by_key(d_keys.begin(), d_keys.end(), thrust::make_zip_iterator(thrust::make_tuple(d_vals.begin(),d_idxs.begin())), d_keys_out.begin(), thrust::make_zip_iterator(thrust::make_tuple(d_vals_out.begin(), d_idxs_out.begin())), thrust::equal_to<int>(), my_max_func());
  cudaDeviceSynchronize();
  et = dtime_usec(et);
  std::cout << "Thrust time: " << et/(float)USECPSEC << "s" << std::endl;

// cuda method

  float *vals;
  int *keys;
  my_atomics *results;
  cudaMalloc(&keys, vsize*sizeof(int));
  cudaMalloc(&vals, vsize*sizeof(float));
  cudaMalloc(&results, ksize*sizeof(my_atomics));

  cudaMemset(results, 0, ksize*sizeof(my_atomics)); // works because vals are all positive
  cudaMemcpy(keys, h_keys, vsize*sizeof(int), cudaMemcpyHostToDevice);
  cudaMemcpy(vals, h_vals, vsize*sizeof(float), cudaMemcpyHostToDevice);
  et = dtime_usec(0);

  my_max_idx<<<(vsize+nTPB-1)/nTPB, nTPB>>>(vals, keys, vsize, results);
  cudaDeviceSynchronize();
  et = dtime_usec(et);
  std::cout << "CUDA time: " << et/(float)USECPSEC << "s" << std::endl;

// verification

  my_atomics *h_results = new my_atomics[ksize];
  cudaMemcpy(h_results, results, ksize*sizeof(my_atomics), cudaMemcpyDeviceToHost);
  for (int i = 0; i < ksize; i++){
    if (h_results[i].floats[0] != d_vals_out[i]) {std::cout << "value mismatch at index: " << i << " thrust: " << d_vals_out[i] << " CUDA: " << h_results[i].floats[0] << std::endl; return -1;}
    if (h_results[i].ints[1] != d_idxs_out[i]) {std::cout << "index mismatch at index: " << i << " thrust: " << d_idxs_out[i] << " CUDA: " << h_results[i].ints[1] << std::endl; return -1;}
    }

  std::cout << "Success!" << std::endl;
  return 0;
}

$ nvcc -arch=sm_35 -o t1234 t1234.cu
$ ./t1234
Thrust time: 0.026593s
CUDA time: 0.002451s
Success!
$

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM