[英]C++ Lock-Free Queue
我設計了這個function,用來實現Lock-Free隊列,但是在實際執行過程中(dequeue)存在死鎖問題。 我檢查了很多次,我認為這很好。 我在x86平台上運行,有12個線程可以讀寫。
現在我想弄清楚是什么導致了這種情況,我想知道這是否是線程安全的設計,或者它需要在哪里繼續優化以獲得更高的性能。
12 個線程出隊和 12 個線程入隊。
開發工具:Visual Studio 2019
我非常期待您的回復。 感謝你。
#include <iostream>
#include <functional>
#include<atomic>
#include<cassert>
#include<thread>
#include<vector>
template<typename T>
class mpmc_queue_t
{
public:
mpmc_queue_t(size_t size) :
_size(size),
_mask(_size - 1),
_buffer((node_t*)(new aligned_node_t[_size]))
{
assert((_size != 0) && ((_size & (~_size + 1)) == _size));
_read.store(0, std::memory_order_relaxed);
_write.store(0, std::memory_order_relaxed);
for (size_t i = 0; i < _size; ++i)
{
_buffer[i].status.store(false, std::memory_order_relaxed);
}
}
~mpmc_queue_t()
{
delete[] _buffer;
}
bool enqueue(const T& data)
{
auto write = _write.fetch_add(1, std::memory_order_relaxed);
node_t* node = &_buffer[write & _mask];
while (true)
{
if (!node->status.load(std::memory_order_acquire))
{
node->data = data;
node->status.store(true, std::memory_order_release);
return true;
}
std::this_thread::yield();
}
}
bool dequeue(T& data)
{
auto read = _read.fetch_add(1, std::memory_order_relaxed);
node_t* node = &_buffer[read & _mask];
while (true)
{
if (node->status.load(std::memory_order_acquire))
{
data = node->data;
node->status.store(false, std::memory_order_release);
return true;
}
std::this_thread::yield();
}
}
private:
struct node_t
{
T data;
std::atomic_bool status;
};
typedef typename std::aligned_storage<sizeof(node_t), std::alignment_of<node_t>::value>::type aligned_node_t;
typedef char cache_line_pad_t[64];
cache_line_pad_t _pad0;
size_t _size;
size_t _mask;
node_t* const _buffer;
cache_line_pad_t _pad1;
std::atomic_size_t _read;
cache_line_pad_t _pad2;
std::atomic_size_t _write;
cache_line_pad_t _pad3;
};
#define COUNT 100000000
#define THREAD 12
typedef mpmc_queue_t<size_t> queue_t;
template<typename T>
void consumer_func(T* queue)
{
size_t count = COUNT;
size_t value = 0;
while (count > 0) {
if (queue->dequeue(value)) {
--count;
}
}
std::cout << "consumer_func ID: " << std::this_thread::get_id() << " ok" << std::endl;
}
template<typename T>
void producer_func(T* queue)
{
size_t count = COUNT;
while (count > 0) {
if (queue->enqueue(count)) {
--count;
}
}
std::cout << "producer_func ID: " << std::this_thread::get_id() << " ok" << std::endl;
}
template<typename T>
long double
run_test(
T producer_func,
T consumer_func)
{
typedef std::chrono::high_resolution_clock clock_t;
typedef std::chrono::time_point<clock_t> time_t;
time_t start;
time_t end;
start = clock_t::now();
std::thread producer0(producer_func);
std::thread producer1(producer_func);
std::thread producer2(producer_func);
std::thread producer3(producer_func);
std::thread producer4(producer_func);
std::thread producer5(producer_func);
std::thread producer6(producer_func);
std::thread producer7(producer_func);
std::thread producer8(producer_func);
std::thread producer9(producer_func);
std::thread producer10(producer_func);
std::thread producer11(producer_func);
std::thread consumer0(consumer_func);
std::thread consumer1(consumer_func);
std::thread consumer2(consumer_func);
std::thread consumer3(consumer_func);
std::thread consumer4(consumer_func);
std::thread consumer5(consumer_func);
std::thread consumer6(consumer_func);
std::thread consumer7(consumer_func);
std::thread consumer8(consumer_func);
std::thread consumer9(consumer_func);
std::thread consumer10(consumer_func);
std::thread consumer11(consumer_func);
producer0.join();
producer1.join();
producer2.join();
producer3.join();
producer4.join();
producer5.join();
producer6.join();
producer7.join();
producer8.join();
producer9.join();
producer10.join();
producer11.join();
consumer0.join();
consumer1.join();
consumer2.join();
consumer3.join();
consumer4.join();
consumer5.join();
consumer6.join();
consumer7.join();
consumer8.join();
consumer9.join();
consumer10.join();
consumer11.join();
end = clock_t::now();
return
(end - start).count()
* ((double)std::chrono::high_resolution_clock::period::num
/ std::chrono::high_resolution_clock::period::den);
}
int main()
{
{
queue_t queue(65536);
long double seconds = run_test(std::bind(&producer_func<queue_t>, &queue),
std::bind(&consumer_func<queue_t>, &queue));
std::cout << "The control group completed "
<< COUNT * THREAD
<< " iterations in "
<< seconds
<< " seconds. "
<< ((long double)COUNT * THREAD / seconds) / 1000000
<< " million enqueue/dequeue pairs per second."
<< std::endl;
}
return 0;
}
這種設計不是無鎖的,而是“無鎖”的,因為出隊中的線程可能必須等待對該項目的入隊操作完成(通過status
發出信號),即它不提供鎖所需的進度保證-自由。
正如 Matt Timmermans 已經指出的那樣,索引環繞時會出現問題。 不能保證節點的status
已經被更新,或者由於對status
的操作不是順序一致的,所以這個更新是否可見。 當兩個線程(在不同的輪次中)嘗試推送到同一個節點時,這可能會導致數據競爭,因為兩個線程都觀察到node->status.load()
返回 false。
為了解決這個問題,您可以在節點中使用計數器而不是布爾值來跟蹤節點所屬的當前輪次(類似於 Dmitry Vukov 在此隊列中的處理方式: http://www.1024cores.net/home/無鎖算法/隊列/有界-mpmc-隊列)
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.