简体   繁体   中英

Optimizing an algorithm using map::count

I currently have an algorithm that hashes a key and checks for it's uniqueness using map::count. How could this be optimized? I also forgot to mention that this is threaded.

int coll = 0;
map<long, bool> mymap;
#pragma omp parallel for
for (int i = 0; i < 256; i++)
  for (int j = 0; j < 256; j++)
    for (int k = 0; k < 256; k++)
    {
      string temp;
      temp = i;
      temp += j;
      temp += k;
      temp += temp;
      long myhash = hash(temp.c_str());

     if (mymap.count(myhash))
      {
        #pragma omp atomic
        coll++;
        cout << "Collision at " << i << " " << j << " " << k << endl;
      }
      else
      {
        #pragma omp critical
        mymap[myhash] = true;
      }
    }

cout << "Number of collisions: " << coll << endl;
cout << "Map size: " << mymap.size() << endl;

After much trial and error, here is the best version I could produce, generating 4294967296 keys in 82.5 seconds using 1GB of RAM.

#include <iostream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <sys/time.h>
#include <iomanip>
#include <omp.h>
#include <vector>
#include <fstream>
#include <ios>
#include <unistd.h>
using namespace std;

class Timer 
{
private:
  timeval startTime;

public:

  void start()
  {
    gettimeofday(&startTime, NULL);
  }

  double stop()
  {
    timeval endTime;
    long seconds, useconds;
    double duration;

    gettimeofday(&endTime, NULL);

    seconds  = endTime.tv_sec  - startTime.tv_sec;
    useconds = endTime.tv_usec - startTime.tv_usec;

    duration = seconds + useconds/1000000.0;

    return duration;
  }

  static void printTime(double duration)
  {
    cout << setprecision(10) << fixed << duration << " seconds" << endl;
  }
};

static inline long hash(const char* str)
{
  return (*(long*)str)>> 0;
}  
int coll;
vector<bool> test;

void process_mem_usage(double& vm_usage, double& resident_set)
{
  using std::ios_base;
  using std::ifstream;
  using std::string;

  vm_usage     = 0.0;
  resident_set = 0.0;

  // 'file' stat seems to give the most reliable results
  //
  ifstream stat_stream("/proc/self/stat",ios_base::in);

  // dummy vars for leading entries in stat that we don't care about
  //
  string pid, comm, state, ppid, pgrp, session, tty_nr;
  string tpgid, flags, minflt, cminflt, majflt, cmajflt;
  string utime, stime, cutime, cstime, priority, nice;
  string O, itrealvalue, starttime;

  // the two fields we want
  //
  unsigned long vsize;
  long rss;

  stat_stream >> pid >> comm >> state >> ppid >> pgrp >> session >> tty_nr
          >> tpgid >> flags >> minflt >> cminflt >> majflt >> cmajflt
          >> utime >> stime >> cutime >> cstime >> priority >> nice
          >> O >> itrealvalue >> starttime >> vsize >> rss; // don't care about the rest

  stat_stream.close();

  long page_size_kb = sysconf(_SC_PAGE_SIZE) / 1024; // in case x86-64 is configured to use 2MB pages
  vm_usage     = vsize / 1024.0;
  resident_set = rss * page_size_kb;
}
Timer timer;
void signal_handlerkill(int sig)
{
  cout << "Number of collisions: " << coll << endl;
  //cout << test.size() << endl;
  double vm, rss;
  process_mem_usage(vm, rss);
  vm /= 1024.0;
  rss /= 1024.0;
  cout << "VM: " << vm << "MB" << endl;
  timer.printTime(timer.stop());
  exit(1);
}

int main()
{
  signal(SIGINT, signal_handlerkill);
  timer = Timer();
  timer.start();
  coll = 0;

  for (long i = 0; i < 4294967296+1; i++)
  {
    test.push_back(0); //Set up the vector
  }

  #pragma omp parallel for 
  for (int i = 0; i < 256; i++)
    for (int j = 0; j < 256; j++)
      for (int k = 0; k < 256; k++)
        for (int l = 0; l < 256; l++)
        {
          const char temp[4] = {i, j, k, l};
          long myhash = (*(long*)temp);

          if(test.at(myhash))
          {
            #pragma omp atomic
            coll++;
          }
          else
          {
            test[myhash].flip();
          }
        }

  cout << "Number of collisions: " << coll << endl;
  double vm, rss;
  process_mem_usage(vm, rss);
  vm /= 1024.0;
  rss /= 1024.0;
  cout << "VM: " << vm << "MB" << endl;
  timer.printTime(timer.stop());

  return 0;
}

In terms of space, you could use a set instead of a map , since the bool value is useless.

Also, if you're using C++11, an unordered_set will probably give better performance.

Also,

temp = i;
temp += j;
temp += k;
temp += temp;

probably has a bigger overhead than using a stringstream or even char arrays.

Use insert instead of operator[] . The insert function returns a pair. The second value indicates, if the value was actually inserted, ie you can rewrite your code as follows:

if (!mymap.insert(std::make_pair(myhash, true)).second) {
    coll++;
    cout << "Collision at " << i << " " << j << " " << k << endl;
}

Well, I answered this here: https://stackoverflow.com/a/10606381/389833 , and it went something like this:

    int coll = 0;
typedef map<long, bool> MY_MAP_TYPE;
MY_MAP_TYPE mymap;
string temp;
long myhash;
for (int i = 0; i < 256; i++)
    for (int j = 0; j < 256; j++)
        for (int k = 0; k < 256; k++)
        {
            temp = i;
            temp += j;
            temp += k;
            temp += temp;
            myhash = hash(temp.c_str());
            if( mymap.insert( MY_MAP_TYPE::value_type( myhash, true ) ).second == false)
            {
                coll++;
                cout << "Collision at " << i << " " << j << " " << k << endl;
            }
        }

Depending on the size of your hash you could trade space for CPU time and just use a bool vector rather than a map for constant-time lookup. If the range is 0 - 256 3 (the number of unique values here), it should only take about 2 MB since STL vectors in many implementations will internally compact bool vectors to bits. Of course this won't be efficient (or perhaps work at all) if your hash function can return very large values like 2 32 or even 2 64 .

If you are only concerned with 6 character strings, then you can easily optimize the loop(s) you are generating by the following:

for (int i = 0; i < 256; i++)
  for (int j = 0; j < 256; j++)
    for (int k = 0; k < 256; k++)
    {
/*
      string temp;
      temp = i;
      temp += j;
      temp += k;
      temp += temp;
      myhash = hash(temp.c_str());
*/  
      // effectively, the same as above
      const char temp[7] = {i, j, k, i, j, k, '\0'};
      myhash = hash(temp);
    }

The above combined with the insert as suggested also, should provide a nice performance boost.

EDIT:

So, you comment below on this version being "slower" makes me really question:

  1. How you are profiling
  2. The implementation of your hash function

These are questionable, because running this code on my machine (ignore the 3.3GHz magic number for now, as that is the speed of my CPU):

#include <iostream>
#include <vector>
#include <boost/functional/hash.hpp>
#include <x86intrin.h>

using namespace std;

uint64_t f(std::vector<uint64_t>& values)
{
    boost::hash<std::string> hasher;

    uint64_t start = __rdtsc();
    int z = 0;

    for (int i = 0; i < 256; i++)
    {
      for (int j = 0; j < 256; j++)
      {
        for (int k = 0; k < 256; k++)
        {
          string temp;
          temp = i;
          temp += j;
          temp += k;
          temp += temp;

          values[z++] = hasher(temp);
        }
      }
    }

    return (__rdtsc()) - start;
}

uint64_t g(std::vector<uint64_t>& values)
{
    boost::hash<std::string> hasher;

    uint64_t start = __rdtsc();
    int z = 0;

    for (int i = 0; i < 256; i++)
    {
      for (int j = 0; j < 256; j++)
      {
        for (int k = 0; k < 256; k++)
        {
          const char temp[7] = {i, j, k, i, j, k, '\0'};
          values[z++] = hasher(std::string(temp, 6));
        }
      }
    }

    return (__rdtsc()) - start;
}

static const double freq = 3300000000.0;
static const int elements = 1024 * 1024 * 16;

int main()
{
    std::vector<uint64_t> values_f(elements);
    std::vector<uint64_t> values_g(elements);

    uint64_t delta_f = f(values_f);
    uint64_t delta_g = g(values_g);

    cout << "F: " << (delta_f * 1000.0) / freq << "ms \n";
    cout << "G: " << (delta_g * 1000.0) / freq << "ms \n";

    for(int x = 0; x < elements; ++x)
    {
        if(values_f[x] != values_g[x])
        {
            cout << "Error: Expected "
                 << values_f[x] << " received "
                 << values_g[x] << "!\n";
        }
    }

    return 0;
}

Gives this output:

F: 3297.17ms 
G: 736.444ms 

Showing that the version that constructs the std::string (which wouldn't even technically be necessary) performs much better than the version that does the concatenation. The difference in my case being the use of boost::hash (and obviously using a std::vector instead of a std::map or std::set , but that doesn't bias the test towards either of the results.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM