boost库中有一个boost::lockfree::queue类型的 队列,对于一般的需要队列的程序,其效率都算不错的了,下面使用一个用例来说明。
程序是一个典型的生产者与消费者的关系,都可以使用多线程,其效率要比使用上层的互斥锁要快很多,因为它直接使用底层的原子操作来进行同步数据的。
freedeque.h
#pragma once#ifndef INCLUDED_UTILS_LFRINGQUEUE
#define INCLUDED_UTILS_LFRINGQUEUE #define _ENABLE_ATOMIC_ALIGNMENT_FIX
#define ATOMIC_FLAG_INIT 0 #pragma once #include <vector>
#include <mutex>
#include <thread>
#include <atomic>
#include <chrono>
#include <cstring>
#include <iostream> // Lock free ring queue template < typename _TyData, long _uiCount = >
class lfringqueue
{
public:
lfringqueue(long uiCount = _uiCount) : m_lTailIterator(), m_lHeadIterator(), m_uiCount(uiCount)
{
m_queue = new _TyData*[m_uiCount];
memset(m_queue, , sizeof(_TyData*) * m_uiCount);
} ~lfringqueue()
{
if (m_queue)
delete[] m_queue;
} bool enqueue(_TyData *pdata, unsigned int uiRetries = )
{
if (NULL == pdata)
{
// Null enqueues are not allowed
return false;
} unsigned int uiCurrRetries = ;
while (uiCurrRetries < uiRetries)
{
// Release fence in order to prevent memory reordering
// of any read or write with following write
std::atomic_thread_fence(std::memory_order_release); long lHeadIterator = m_lHeadIterator; if (NULL == m_queue[lHeadIterator])
{
long lHeadIteratorOrig = lHeadIterator; ++lHeadIterator;
if (lHeadIterator >= m_uiCount)
lHeadIterator = ; // Don't worry if this CAS fails. It only means some thread else has
// already inserted an item and set it.
if (std::atomic_compare_exchange_strong(&m_lHeadIterator, &lHeadIteratorOrig, lHeadIterator))
{
// void* are always atomic (you wont set a partial pointer).
m_queue[lHeadIteratorOrig] = pdata; if (m_lEventSet.test_and_set())
{
m_bHasItem.test_and_set();
}
return true;
}
}
else
{
// The queue is full. Spin a few times to check to see if an item is popped off.
++uiCurrRetries;
}
}
return false;
} bool dequeue(_TyData **ppdata)
{
if (!ppdata)
{
// Null dequeues are not allowed!
return false;
} bool bDone = false;
bool bCheckQueue = true; while (!bDone)
{
// Acquire fence in order to prevent memory reordering
// of any read or write with following read
std::atomic_thread_fence(std::memory_order_acquire);
//MemoryBarrier();
long lTailIterator = m_lTailIterator;
_TyData *pdata = m_queue[lTailIterator];
//volatile _TyData *pdata = m_queue[lTailIterator];
if (NULL != pdata)
{
bCheckQueue = true;
long lTailIteratorOrig = lTailIterator; ++lTailIterator;
if (lTailIterator >= m_uiCount)
lTailIterator = ; //if ( lTailIteratorOrig == atomic_cas( (volatile long*)&m_lTailIterator, lTailIterator, lTailIteratorOrig ))
if (std::atomic_compare_exchange_strong(&m_lTailIterator, &lTailIteratorOrig, lTailIterator))
{
// Sets of sizeof(void*) are always atomic (you wont set a partial pointer).
m_queue[lTailIteratorOrig] = NULL; // Gets of sizeof(void*) are always atomic (you wont get a partial pointer).
*ppdata = (_TyData*)pdata; return true;
}
}
else
{
bDone = true;
m_lEventSet.clear();
}
}
*ppdata = NULL;
return false;
} long countguess() const
{
long lCount = trycount(); if ( != lCount)
return lCount; // If the queue is full then the item right before the tail item will be valid. If it
// is empty then the item should be set to NULL.
long lLastInsert = m_lTailIterator - ;
if (lLastInsert < )
lLastInsert = m_uiCount - ; _TyData *pdata = m_queue[lLastInsert];
if (pdata != NULL)
return m_uiCount; return ;
} long getmaxsize() const
{
return m_uiCount;
} bool HasItem()
{
return m_bHasItem.test_and_set();
} void SetItemFlagBack()
{
m_bHasItem.clear();
} private:
long trycount() const
{
long lHeadIterator = m_lHeadIterator;
long lTailIterator = m_lTailIterator; if (lTailIterator > lHeadIterator)
return m_uiCount - lTailIterator + lHeadIterator; // This has a bug where it returns 0 if the queue is full.
return lHeadIterator - lTailIterator;
} private:
std::atomic<long> m_lHeadIterator; // enqueue index
std::atomic<long> m_lTailIterator; // dequeue index
_TyData **m_queue; // array of pointers to the data
long m_uiCount; // size of the array
std::atomic_flag m_lEventSet = ATOMIC_FLAG_INIT; // a flag to use whether we should change the item flag
std::atomic_flag m_bHasItem = ATOMIC_FLAG_INIT; // a flag to indicate whether there is an item enqueued
}; #endif //INCLUDED_UTILS_LFRINGQUEUE
/*
* File: main.cpp
* Author: Peng
*
* Created on February 22, 2014, 9:55 PM
*/
#include <iostream>
#include <string>
#include "freedeque.h"
#include <sstream>
#include <boost/thread/thread.hpp>
#include <boost/lockfree/queue.hpp>
#include <boost/atomic.hpp>
#include<boost/thread/lock_guard.hpp>
#include<boost/thread/mutex.hpp>
#include<boost/date_time/posix_time/posix_time.hpp> const int NUM_ENQUEUE_THREAD = 5;
const int NUM_DEQUEUE_THREAD = 10;
const long NUM_ITEM = 50000;
const long NUM_DATA = NUM_ENQUEUE_THREAD * NUM_ITEM; class Data {
public:
Data(int i = 0) : m_iData(i)
{
std::stringstream ss;
ss << i;
m_szDataString = ss.str();
} bool operator< (const Data & aData) const
{
if (m_iData < aData.m_iData)
return true;
else
return false;
} int& GetData()
{
return m_iData;
}
private:
int m_iData;
std::string m_szDataString;
}; Data* g_arrData = new Data[NUM_DATA];
boost::mutex mtx; constexpr long size = 0.5 * NUM_DATA;
lfringqueue < Data, 10000> LockFreeQueue;
boost::lockfree::queue<Data*> BoostQueue(10000); bool GenerateRandomNumber_FindPointerToTheNumber_EnQueue(int n)
{
for (long i = 0; i < NUM_ITEM; i++)
{
int x = i + NUM_ITEM * n;
Data* pData = g_arrData + x;
LockFreeQueue.enqueue(pData);
}
return true;
} void print(Data* pData) {
if (!pData)
return; boost::lock_guard<boost::mutex> lock(mtx); std::cout << pData->GetData() << std::endl; } bool Dequeue()
{
Data *pData = NULL; while (true)
{
if (LockFreeQueue.dequeue(&pData) && pData)
{
print(pData);
}
else {
boost::thread::sleep(boost::get_system_time() + boost::posix_time::milliseconds(5));
}
} return true;
} int main(int argc, char** argv)
{
for (int i = 0; i < NUM_DATA; ++i)
{
Data data(i);
//DataArray[i] = data;
*(g_arrData + i) = data;
} std::thread PublishThread[NUM_ENQUEUE_THREAD];
std::thread ConsumerThread[NUM_DEQUEUE_THREAD];
std::chrono::duration<double> elapsed_seconds; for (int i = 0; i < NUM_ENQUEUE_THREAD; i++)
{
PublishThread[i] = std::thread(GenerateRandomNumber_FindPointerToTheNumber_EnQueue, i);
} for (int i = 0; i < NUM_DEQUEUE_THREAD; i++)
{
ConsumerThread[i] = std::thread{ Dequeue };
} for (int i = 0; i < NUM_DEQUEUE_THREAD; i++)
{
ConsumerThread[i].join();
} for (int i = 0; i < NUM_ENQUEUE_THREAD; i++)
{
PublishThread[i].join();
} delete[] g_arrData;
return 0;
}
说明:模板文件是原作者写的,为了验证其正确性,后面的测试程序我改写了一下,最后测试程序是无法退出来的,这里只是测试,没有进一步完善了。
在测试中发现deque应该是大小限制的,再增大data的数据程序会阻塞在某个地方没有进一步再查找原因了,以后有时候再做修改,对于一般的工程都够用了。