1    	//  lock-free queue from
2    	//  Michael, M. M. and Scott, M. L.,
3    	//  "simple, fast and practical non-blocking and blocking concurrent queue algorithms"
4    	//
5    	//  Copyright (C) 2008-2013 Tim Blechmann
6    	//
7    	//  Distributed under the Boost Software License, Version 1.0. (See
8    	//  accompanying file LICENSE_1_0.txt or copy at
9    	//  http://www.boost.org/LICENSE_1_0.txt)
10   	
11   	#ifndef BOOST_LOCKFREE_FIFO_HPP_INCLUDED
12   	#define BOOST_LOCKFREE_FIFO_HPP_INCLUDED
13   	
14   	#include <boost/assert.hpp>
15   	#include <boost/static_assert.hpp>
16   	#include <boost/type_traits/has_trivial_assign.hpp>
17   	#include <boost/type_traits/has_trivial_destructor.hpp>
18   	#include <boost/config.hpp> // for BOOST_LIKELY & BOOST_ALIGNMENT
19   	
20   	#include <boost/lockfree/detail/atomic.hpp>
21   	#include <boost/lockfree/detail/copy_payload.hpp>
22   	#include <boost/lockfree/detail/freelist.hpp>
23   	#include <boost/lockfree/detail/parameter.hpp>
24   	#include <boost/lockfree/detail/tagged_ptr.hpp>
25   	
26   	#include <boost/lockfree/lockfree_forward.hpp>
27   	
28   	#ifdef BOOST_HAS_PRAGMA_ONCE
29   	#pragma once
30   	#endif
31   	
32   	
33   	#if defined(_MSC_VER)
34   	#pragma warning(push)
35   	#pragma warning(disable: 4324) // structure was padded due to __declspec(align())
36   	#endif
37   	
38   	
39   	namespace boost    {
40   	namespace lockfree {
41   	namespace detail   {
42   	
43   	typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
44   	                              boost::parameter::optional<tag::capacity>
45   	                             > queue_signature;
46   	
47   	} /* namespace detail */
48   	
49   	
50   	/** The queue class provides a multi-writer/multi-reader queue, pushing and popping is lock-free,
51   	 *  construction/destruction has to be synchronized. It uses a freelist for memory management,
52   	 *  freed nodes are pushed to the freelist and not returned to the OS before the queue is destroyed.
53   	 *
54   	 *  \b Policies:
55   	 *  - \ref boost::lockfree::fixed_sized, defaults to \c boost::lockfree::fixed_sized<false> \n
56   	 *    Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior. \n
57   	 *    If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
58   	 *    by array indexing. This limits the possible size of the queue to the number of elements that can be addressed by the index
59   	 *    type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
60   	 *    to achieve lock-freedom.
61   	 *
62   	 *  - \ref boost::lockfree::capacity, optional \n
63   	 *    If this template argument is passed to the options, the size of the queue is set at compile-time.\n
64   	 *    This option implies \c fixed_sized<true>
65   	 *
66   	 *  - \ref boost::lockfree::allocator, defaults to \c boost::lockfree::allocator<std::allocator<void>> \n
67   	 *    Specifies the allocator that is used for the internal freelist
68   	 *
69   	 *  \b Requirements:
70   	 *   - T must have a copy constructor
71   	 *   - T must have a trivial assignment operator
72   	 *   - T must have a trivial destructor
73   	 *
74   	 * */
75   	#ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
76   	template <typename T, class A0, class A1, class A2>
77   	#else
78   	template <typename T, typename ...Options>
79   	#endif
80   	class queue
81   	{
82   	private:
83   	#ifndef BOOST_DOXYGEN_INVOKED
84   	
85   	#ifdef BOOST_HAS_TRIVIAL_DESTRUCTOR
86   	    BOOST_STATIC_ASSERT((boost::has_trivial_destructor<T>::value));
87   	#endif
88   	
89   	#ifdef BOOST_HAS_TRIVIAL_ASSIGN
90   	    BOOST_STATIC_ASSERT((boost::has_trivial_assign<T>::value));
91   	#endif
92   	
93   	#ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
94   	    typedef typename detail::queue_signature::bind<A0, A1, A2>::type bound_args;
95   	#else
96   	    typedef typename detail::queue_signature::bind<Options...>::type bound_args;
97   	#endif
98   	
99   	    static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
100  	    static const size_t capacity = detail::extract_capacity<bound_args>::capacity + 1; // the queue uses one dummy node
101  	    static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
102  	    static const bool node_based = !(has_capacity || fixed_sized);
103  	    static const bool compile_time_sized = has_capacity;
104  	
105  	    struct BOOST_ALIGNMENT(BOOST_LOCKFREE_CACHELINE_BYTES) node
106  	    {
107  	        typedef typename detail::select_tagged_handle<node, node_based>::tagged_handle_type tagged_node_handle;
108  	        typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_type;
109  	
110  	        node(T const & v, handle_type null_handle):
111  	            next(tagged_node_handle(null_handle, 0)), data(v)
112  	        {
113  	            /* increment tag to avoid ABA problem */
114  	            tagged_node_handle old_next = next.load(memory_order_relaxed);
115  	            tagged_node_handle new_next (null_handle, old_next.get_next_tag());
116  	            next.store(new_next, memory_order_release);
117  	        }
118  	
119  	        node (handle_type null_handle):
120  	            next(tagged_node_handle(null_handle, 0))
(2) Event uninit_member: Non-static class member field "data.delivery_tag" is not initialized in this constructor nor in any functions that it calls.
(4) Event uninit_member: Non-static class member field "data.multiple" is not initialized in this constructor nor in any functions that it calls.
Also see events: [member_decl][member_decl]
121  	        {}
122  	
123  	        node(void)
124  	        {}
125  	
126  	        atomic<tagged_node_handle> next;
127  	        T data;
128  	    };
129  	
130  	    typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
131  	    typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
132  	    typedef typename pool_t::tagged_node_handle tagged_node_handle;
133  	    typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_type;
134  	
135  	    void initialize(void)
136  	    {
137  	        node * n = pool.template construct<true, false>(pool.null_handle());
138  	        tagged_node_handle dummy_node(pool.get_handle(n), 0);
139  	        head_.store(dummy_node, memory_order_relaxed);
140  	        tail_.store(dummy_node, memory_order_release);
141  	    }
142  	
143  	    struct implementation_defined
144  	    {
145  	        typedef node_allocator allocator;
146  	        typedef std::size_t size_type;
147  	    };
148  	
149  	#endif
150  	
151  	    BOOST_DELETED_FUNCTION(queue(queue const&))
152  	    BOOST_DELETED_FUNCTION(queue& operator= (queue const&))
153  	
154  	public:
155  	    typedef T value_type;
156  	    typedef typename implementation_defined::allocator allocator;
157  	    typedef typename implementation_defined::size_type size_type;
158  	
159  	    /**
160  	     * \return true, if implementation is lock-free.
161  	     *
162  	     * \warning It only checks, if the queue head and tail nodes and the freelist can be modified in a lock-free manner.
163  	     *       On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics, there is
164  	     *       no possibility to provide a completely accurate implementation, because one would need to test every internal
165  	     *       node, which is impossible if further nodes will be allocated from the operating system.
166  	     * */
167  	    bool is_lock_free (void) const
168  	    {
169  	        return head_.is_lock_free() && tail_.is_lock_free() && pool.is_lock_free();
170  	    }
171  	
172  	    //! Construct queue
173  	    // @{
174  	    queue(void):
175  	        head_(tagged_node_handle(0, 0)),
176  	        tail_(tagged_node_handle(0, 0)),
177  	        pool(node_allocator(), capacity)
178  	    {
179  	        BOOST_ASSERT(has_capacity);
180  	        initialize();
181  	    }
182  	
183  	    template <typename U>
184  	    explicit queue(typename node_allocator::template rebind<U>::other const & alloc):
185  	        head_(tagged_node_handle(0, 0)),
186  	        tail_(tagged_node_handle(0, 0)),
187  	        pool(alloc, capacity)
188  	    {
189  	        BOOST_STATIC_ASSERT(has_capacity);
190  	        initialize();
191  	    }
192  	
193  	    explicit queue(allocator const & alloc):
194  	        head_(tagged_node_handle(0, 0)),
195  	        tail_(tagged_node_handle(0, 0)),
196  	        pool(alloc, capacity)
197  	    {
198  	        BOOST_ASSERT(has_capacity);
199  	        initialize();
200  	    }
201  	    // @}
202  	
203  	    //! Construct queue, allocate n nodes for the freelist.
204  	    // @{
205  	    explicit queue(size_type n):
206  	        head_(tagged_node_handle(0, 0)),
207  	        tail_(tagged_node_handle(0, 0)),
208  	        pool(node_allocator(), n + 1)
209  	    {
210  	        BOOST_ASSERT(!has_capacity);
211  	        initialize();
212  	    }
213  	
214  	    template <typename U>
215  	    queue(size_type n, typename node_allocator::template rebind<U>::other const & alloc):
216  	        head_(tagged_node_handle(0, 0)),
217  	        tail_(tagged_node_handle(0, 0)),
218  	        pool(alloc, n + 1)
219  	    {
220  	        BOOST_STATIC_ASSERT(!has_capacity);
221  	        initialize();
222  	    }
223  	    // @}
224  	
225  	    /** \copydoc boost::lockfree::stack::reserve
226  	     * */
227  	    void reserve(size_type n)
228  	    {
229  	        pool.template reserve<true>(n);
230  	    }
231  	
232  	    /** \copydoc boost::lockfree::stack::reserve_unsafe
233  	     * */
234  	    void reserve_unsafe(size_type n)
235  	    {
236  	        pool.template reserve<false>(n);
237  	    }
238  	
239  	    /** Destroys queue, free all nodes from freelist.
240  	     * */
241  	    ~queue(void)
242  	    {
243  	        T dummy;
244  	        while(unsynchronized_pop(dummy))
245  	        {}
246  	
247  	        pool.template destruct<false>(head_.load(memory_order_relaxed));
248  	    }
249  	
250  	    /** Check if the queue is empty
251  	     *
252  	     * \return true, if the queue is empty, false otherwise
253  	     * \note The result is only accurate, if no other thread modifies the queue. Therefore it is rarely practical to use this
254  	     *       value in program logic.
255  	     * */
256  	    bool empty(void) const
257  	    {
258  	        return pool.get_handle(head_.load()) == pool.get_handle(tail_.load());
259  	    }
260  	
261  	    /** Pushes object t to the queue.
262  	     *
263  	     * \post object will be pushed to the queue, if internal node can be allocated
264  	     * \returns true, if the push operation is successful.
265  	     *
266  	     * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
267  	     *                    from the OS. This may not be lock-free.
268  	     * */
269  	    bool push(T const & t)
270  	    {
271  	        return do_push<false>(t);
272  	    }
273  	
274  	    /** Pushes object t to the queue.
275  	     *
276  	     * \post object will be pushed to the queue, if internal node can be allocated
277  	     * \returns true, if the push operation is successful.
278  	     *
279  	     * \note Thread-safe and non-blocking. If internal memory pool is exhausted, operation will fail
280  	     * \throws if memory allocator throws
281  	     * */
282  	    bool bounded_push(T const & t)
283  	    {
284  	        return do_push<true>(t);
285  	    }
286  	
287  	
288  	private:
289  	#ifndef BOOST_DOXYGEN_INVOKED
290  	    template <bool Bounded>
291  	    bool do_push(T const & t)
292  	    {
293  	        node * n = pool.template construct<true, Bounded>(t, pool.null_handle());
294  	        handle_type node_handle = pool.get_handle(n);
295  	
296  	        if (n == NULL)
297  	            return false;
298  	
299  	        for (;;) {
300  	            tagged_node_handle tail = tail_.load(memory_order_acquire);
301  	            node * tail_node = pool.get_pointer(tail);
302  	            tagged_node_handle next = tail_node->next.load(memory_order_acquire);
303  	            node * next_ptr = pool.get_pointer(next);
304  	
305  	            tagged_node_handle tail2 = tail_.load(memory_order_acquire);
306  	            if (BOOST_LIKELY(tail == tail2)) {
307  	                if (next_ptr == 0) {
308  	                    tagged_node_handle new_tail_next(node_handle, next.get_next_tag());
309  	                    if ( tail_node->next.compare_exchange_weak(next, new_tail_next) ) {
310  	                        tagged_node_handle new_tail(node_handle, tail.get_next_tag());
311  	                        tail_.compare_exchange_strong(tail, new_tail);
312  	                        return true;
313  	                    }
314  	                }
315  	                else {
316  	                    tagged_node_handle new_tail(pool.get_handle(next_ptr), tail.get_next_tag());
317  	                    tail_.compare_exchange_strong(tail, new_tail);
318  	                }
319  	            }
320  	        }
321  	    }
322  	#endif
323  	
324  	public:
325  	
326  	    /** Pushes object t to the queue.
327  	     *
328  	     * \post object will be pushed to the queue, if internal node can be allocated
329  	     * \returns true, if the push operation is successful.
330  	     *
331  	     * \note Not Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
332  	     *       from the OS. This may not be lock-free.
333  	     * \throws if memory allocator throws
334  	     * */
335  	    bool unsynchronized_push(T const & t)
336  	    {
337  	        node * n = pool.template construct<false, false>(t, pool.null_handle());
338  	
339  	        if (n == NULL)
340  	            return false;
341  	
342  	        for (;;) {
343  	            tagged_node_handle tail = tail_.load(memory_order_relaxed);
344  	            tagged_node_handle next = tail->next.load(memory_order_relaxed);
345  	            node * next_ptr = next.get_ptr();
346  	
347  	            if (next_ptr == 0) {
348  	                tail->next.store(tagged_node_handle(n, next.get_next_tag()), memory_order_relaxed);
349  	                tail_.store(tagged_node_handle(n, tail.get_next_tag()), memory_order_relaxed);
350  	                return true;
351  	            }
352  	            else
353  	                tail_.store(tagged_node_handle(next_ptr, tail.get_next_tag()), memory_order_relaxed);
354  	        }
355  	    }
356  	
357  	    /** Pops object from queue.
358  	     *
359  	     * \post if pop operation is successful, object will be copied to ret.
360  	     * \returns true, if the pop operation is successful, false if queue was empty.
361  	     *
362  	     * \note Thread-safe and non-blocking
363  	     * */
364  	    bool pop (T & ret)
365  	    {
366  	        return pop<T>(ret);
367  	    }
368  	
369  	    /** Pops object from queue.
370  	     *
371  	     * \pre type U must be constructible by T and copyable, or T must be convertible to U
372  	     * \post if pop operation is successful, object will be copied to ret.
373  	     * \returns true, if the pop operation is successful, false if queue was empty.
374  	     *
375  	     * \note Thread-safe and non-blocking
376  	     * */
377  	    template <typename U>
378  	    bool pop (U & ret)
379  	    {
380  	        for (;;) {
381  	            tagged_node_handle head = head_.load(memory_order_acquire);
382  	            node * head_ptr = pool.get_pointer(head);
383  	
384  	            tagged_node_handle tail = tail_.load(memory_order_acquire);
385  	            tagged_node_handle next = head_ptr->next.load(memory_order_acquire);
386  	            node * next_ptr = pool.get_pointer(next);
387  	
388  	            tagged_node_handle head2 = head_.load(memory_order_acquire);
389  	            if (BOOST_LIKELY(head == head2)) {
390  	                if (pool.get_handle(head) == pool.get_handle(tail)) {
391  	                    if (next_ptr == 0)
392  	                        return false;
393  	
394  	                    tagged_node_handle new_tail(pool.get_handle(next), tail.get_next_tag());
395  	                    tail_.compare_exchange_strong(tail, new_tail);
396  	
397  	                } else {
398  	                    if (next_ptr == 0)
399  	                        /* this check is not part of the original algorithm as published by michael and scott
400  	                         *
401  	                         * however we reuse the tagged_ptr part for the freelist and clear the next part during node
402  	                         * allocation. we can observe a null-pointer here.
403  	                         * */
404  	                        continue;
405  	                    detail::copy_payload(next_ptr->data, ret);
406  	
407  	                    tagged_node_handle new_head(pool.get_handle(next), head.get_next_tag());
408  	                    if (head_.compare_exchange_weak(head, new_head)) {
409  	                        pool.template destruct<true>(head);
410  	                        return true;
411  	                    }
412  	                }
413  	            }
414  	        }
415  	    }
416  	
417  	    /** Pops object from queue.
418  	     *
419  	     * \post if pop operation is successful, object will be copied to ret.
420  	     * \returns true, if the pop operation is successful, false if queue was empty.
421  	     *
422  	     * \note Not thread-safe, but non-blocking
423  	     *
424  	     * */
425  	    bool unsynchronized_pop (T & ret)
426  	    {
427  	        return unsynchronized_pop<T>(ret);
428  	    }
429  	
430  	    /** Pops object from queue.
431  	     *
432  	     * \pre type U must be constructible by T and copyable, or T must be convertible to U
433  	     * \post if pop operation is successful, object will be copied to ret.
434  	     * \returns true, if the pop operation is successful, false if queue was empty.
435  	     *
436  	     * \note Not thread-safe, but non-blocking
437  	     *
438  	     * */
439  	    template <typename U>
440  	    bool unsynchronized_pop (U & ret)
441  	    {
442  	        for (;;) {
443  	            tagged_node_handle head = head_.load(memory_order_relaxed);
444  	            node * head_ptr = pool.get_pointer(head);
445  	            tagged_node_handle tail = tail_.load(memory_order_relaxed);
446  	            tagged_node_handle next = head_ptr->next.load(memory_order_relaxed);
447  	            node * next_ptr = pool.get_pointer(next);
448  	
449  	            if (pool.get_handle(head) == pool.get_handle(tail)) {
450  	                if (next_ptr == 0)
451  	                    return false;
452  	
453  	                tagged_node_handle new_tail(pool.get_handle(next), tail.get_next_tag());
454  	                tail_.store(new_tail);
455  	            } else {
456  	                if (next_ptr == 0)
457  	                    /* this check is not part of the original algorithm as published by michael and scott
458  	                     *
459  	                     * however we reuse the tagged_ptr part for the freelist and clear the next part during node
460  	                     * allocation. we can observe a null-pointer here.
461  	                     * */
462  	                    continue;
463  	                detail::copy_payload(next_ptr->data, ret);
464  	                tagged_node_handle new_head(pool.get_handle(next), head.get_next_tag());
465  	                head_.store(new_head);
466  	                pool.template destruct<false>(head);
467  	                return true;
468  	            }
469  	        }
470  	    }
471  	
472  	    /** consumes one element via a functor
473  	     *
474  	     *  pops one element from the queue and applies the functor on this object
475  	     *
476  	     * \returns true, if one element was consumed
477  	     *
478  	     * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
479  	     * */
480  	    template <typename Functor>
481  	    bool consume_one(Functor & f)
482  	    {
483  	        T element;
484  	        bool success = pop(element);
485  	        if (success)
486  	            f(element);
487  	
488  	        return success;
489  	    }
490  	
491  	    /// \copydoc boost::lockfree::queue::consume_one(Functor & rhs)
492  	    template <typename Functor>
493  	    bool consume_one(Functor const & f)
494  	    {
495  	        T element;
496  	        bool success = pop(element);
497  	        if (success)
498  	            f(element);
499  	
500  	        return success;
501  	    }
502  	
503  	    /** consumes all elements via a functor
504  	     *
505  	     * sequentially pops all elements from the queue and applies the functor on each object
506  	     *
507  	     * \returns number of elements that are consumed
508  	     *
509  	     * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
510  	     * */
511  	    template <typename Functor>
512  	    size_t consume_all(Functor & f)
513  	    {
514  	        size_t element_count = 0;
515  	        while (consume_one(f))
516  	            element_count += 1;
517  	
518  	        return element_count;
519  	    }
520  	
521  	    /// \copydoc boost::lockfree::queue::consume_all(Functor & rhs)
522  	    template <typename Functor>
523  	    size_t consume_all(Functor const & f)
524  	    {
525  	        size_t element_count = 0;
526  	        while (consume_one(f))
527  	            element_count += 1;
528  	
529  	        return element_count;
530  	    }
531  	
532  	private:
533  	#ifndef BOOST_DOXYGEN_INVOKED
534  	    atomic<tagged_node_handle> head_;
535  	    static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
536  	    char padding1[padding_size];
537  	    atomic<tagged_node_handle> tail_;
538  	    char padding2[padding_size];
539  	
540  	    pool_t pool;
541  	#endif
542  	};
543  	
544  	} /* namespace lockfree */
545  	} /* namespace boost */
546  	
547  	#if defined(_MSC_VER)
548  	#pragma warning(pop)
549  	#endif
550  	
551  	#endif /* BOOST_LOCKFREE_FIFO_HPP_INCLUDED */
552