Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
_concurrent_unordered_impl.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
21 /* Container implementations in this header are based on PPL implementations
22  provided by Microsoft. */
23 
24 #ifndef __TBB__concurrent_unordered_impl_H
25 #define __TBB__concurrent_unordered_impl_H
26 #if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H)
27 #error Do not #include this internal file directly; use public TBB headers instead.
28 #endif
29 
30 #include "../tbb_stddef.h"
31 
32 #include <iterator>
33 #include <utility> // Need std::pair
34 #include <functional> // Need std::equal_to (in ../concurrent_unordered_*.h)
35 #include <string> // For tbb_hasher
36 #include <cstring> // Need std::memset
37 #include __TBB_STD_SWAP_HEADER
38 
39 #include "../atomic.h"
40 #include "../tbb_exception.h"
41 #include "../tbb_allocator.h"
42 
43 #if __TBB_INITIALIZER_LISTS_PRESENT
44  #include <initializer_list>
45 #endif
46 
47 #if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_COPY_DELETION_BROKEN
48  #define __TBB_UNORDERED_NODE_HANDLE_PRESENT 1
49 #endif
50 
51 #include "_allocator_traits.h"
52 #include "_tbb_hash_compare_impl.h"
53 #include "_template_helpers.h"
54 
55 namespace tbb {
56 namespace interface5 {
58 namespace internal {
59 
60 template <typename T, typename Allocator>
62 template <typename Traits>
64 
65 // Forward list iterators (without skipping dummy elements)
66 template<class Solist, typename Value>
67 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
68 {
69  template <typename T, typename Allocator>
70  friend class split_ordered_list;
71  template <typename Traits>
73  template<class M, typename V>
74  friend class flist_iterator;
75 
76  typedef typename Solist::nodeptr_t nodeptr_t;
77 public:
78  typedef typename Solist::value_type value_type;
79  typedef typename Solist::difference_type difference_type;
80  typedef typename Solist::pointer pointer;
81  typedef typename Solist::reference reference;
82 
85  : my_node_ptr(other.my_node_ptr) {}
86 
87  reference operator*() const { return my_node_ptr->my_element; }
88  pointer operator->() const { return &**this; }
89 
91  my_node_ptr = my_node_ptr->my_next;
92  return *this;
93  }
94 
96  flist_iterator tmp = *this;
97  ++*this;
98  return tmp;
99  }
100 
101 protected:
103  nodeptr_t get_node_ptr() const { return my_node_ptr; }
104 
106 
107  template<typename M, typename T, typename U>
108  friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
109  template<typename M, typename T, typename U>
110  friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
111 };
112 
113 template<typename Solist, typename T, typename U>
115  return i.my_node_ptr == j.my_node_ptr;
116 }
117 template<typename Solist, typename T, typename U>
119  return i.my_node_ptr != j.my_node_ptr;
120 }
121 
122 // Split-order list iterators, needed to skip dummy elements
123 template<class Solist, typename Value>
124 class solist_iterator : public flist_iterator<Solist, Value>
125 {
127  typedef typename Solist::nodeptr_t nodeptr_t;
129  template <typename T, typename Allocator>
130  friend class split_ordered_list;
131  template<class M, typename V>
132  friend class solist_iterator;
133  template <typename Traits>
135  template<typename M, typename T, typename U>
136  friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
137  template<typename M, typename T, typename U>
138  friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
139 
140  const Solist *my_list_ptr;
141  solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
142 
143 public:
144  typedef typename Solist::value_type value_type;
145  typedef typename Solist::difference_type difference_type;
146  typedef typename Solist::pointer pointer;
147  typedef typename Solist::reference reference;
148 
151  : base_type(other), my_list_ptr(other.my_list_ptr) {}
152 
154  return this->base_type::operator*();
155  }
156 
157  pointer operator->() const {
158  return (&**this);
159  }
160 
162  do ++(*(base_type *)this);
163  while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
164 
165  return (*this);
166  }
167 
169  solist_iterator tmp = *this;
170  do ++*this;
171  while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
172 
173  return (tmp);
174  }
175 };
176 
177 template<typename Solist, typename T, typename U>
179  return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
180 }
181 template<typename Solist, typename T, typename U>
183  return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
184 }
185 
186 // Forward type and class definitions
187 typedef size_t sokey_t;
188 
189 
190 // Forward list in which elements are sorted in a split-order
191 template <typename T, typename Allocator>
192 class split_ordered_list
193 {
194 public:
196 
198 
199  struct node;
200  typedef node *nodeptr_t;
201 
207  // No support for reference/const_reference in allocator traits
209  typedef const value_type& const_reference;
210 
215 
216  // Node that holds the element in a split-ordered list
218  {
219  private:
220  // for compilers that try to generate default constructors though they are not needed.
221  node(); // VS 2008, 2010, 2012
222  public:
223  // Initialize the node with the given order key
224  void init(sokey_t order_key) {
225  my_order_key = order_key;
226  my_next = NULL;
227  }
228 
229  // Return the order key (needed for hashing)
230  sokey_t get_order_key() const { // TODO: remove
231  return my_order_key;
232  }
233 
234  // Inserts the new element in the list in an atomic fashion
236  {
237  // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
238  nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node);
239 
240  if (exchange_node == current_node) // TODO: why this branch?
241  {
242  // Operation succeeded, return the new node
243  return new_node;
244  }
245  else
246  {
247  // Operation failed, return the "interfering" node
248  return exchange_node;
249  }
250  }
251 
252  // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
253  // in the hash table to quickly index into the right subsection of the split-ordered list.
254  bool is_dummy() const {
255  return (my_order_key & 0x1) == 0;
256  }
257 
258 
259  nodeptr_t my_next; // Next element in the list
260  value_type my_element; // Element storage
261  sokey_t my_order_key; // Order key for this element
262  };
263 
264  // Allocate a new node with the given order key; used to allocate dummy nodes
266  nodeptr_t pnode = my_node_allocator.allocate(1);
267  pnode->init(order_key);
268  return (pnode);
269  }
270 
271  // Allocate a new node with the given order key and value
272  template<typename Arg>
275  nodeptr_t pnode = my_node_allocator.allocate(1);
276 
277  //TODO: use RAII scoped guard instead of explicit catch
278  __TBB_TRY {
279  new(static_cast<void*>(&pnode->my_element)) T(tbb::internal::forward<Arg>(t));
280  pnode->init(order_key);
281  } __TBB_CATCH(...) {
282  my_node_allocator.deallocate(pnode, 1);
283  __TBB_RETHROW();
284  }
285 
286  return (pnode);
287  }
288 
289  // A helper to avoid excessive requiremens in internal_insert
290  template<typename Arg>
292  /*AllowCreate=*/tbb::internal::false_type){
293  __TBB_ASSERT(false, "This compile-time helper should never get called");
294  return nodeptr_t();
295  }
296 
297  // Allocate a new node with the given parameters for constructing value
298  template<typename __TBB_PARAMETER_PACK Args>
300  nodeptr_t pnode = my_node_allocator.allocate(1);
301 
302  //TODO: use RAII scoped guard instead of explicit catch
303  __TBB_TRY {
304  new(static_cast<void*>(&pnode->my_element)) T(__TBB_PACK_EXPANSION(tbb::internal::forward<Args>(args)));
305  } __TBB_CATCH(...) {
306  my_node_allocator.deallocate(pnode, 1);
307  __TBB_RETHROW();
308  }
309 
310  return (pnode);
311  }
312 
315  {
316  // Immediately allocate a dummy node with order key of 0. This node
317  // will always be the head of the list.
319  }
320 
322  {
323  // Clear the list
324  clear();
325 
326  // Remove the head element which is not cleared by clear()
327  nodeptr_t pnode = my_head;
328  my_head = NULL;
329 
330  __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
331 
332  destroy_node(pnode);
333  }
334 
335  // Common forward list functions
336 
338  return (my_node_allocator);
339  }
340 
341  void clear() {
342  nodeptr_t pnext;
343  nodeptr_t pnode = my_head;
344 
345  __TBB_ASSERT(my_head != NULL, "Invalid head list node");
346  pnext = pnode->my_next;
347  pnode->my_next = NULL;
348  pnode = pnext;
349 
350  while (pnode != NULL)
351  {
352  pnext = pnode->my_next;
353  destroy_node(pnode);
354  pnode = pnext;
355  }
356 
357  my_element_count = 0;
358  }
359 
360  // Returns a first non-dummy element in the SOL
362  return first_real_iterator(raw_begin());
363  }
364 
365  // Returns a first non-dummy element in the SOL
367  return first_real_iterator(raw_begin());
368  }
369 
371  return (iterator(0, this));
372  }
373 
374  const_iterator end() const {
375  return (const_iterator(0, this));
376  }
377 
379  return (((const self_type *)this)->begin());
380  }
381 
383  return (((const self_type *)this)->end());
384  }
385 
386  // Checks if the number of elements (non-dummy) is 0
387  bool empty() const {
388  return (my_element_count == 0);
389  }
390 
391  // Returns the number of non-dummy elements in the list
392  size_type size() const {
393  return my_element_count;
394  }
395 
396  // Returns the maximum size of the list, determined by the allocator
397  size_type max_size() const {
398  return my_node_allocator.max_size();
399  }
400 
401  // Swaps 'this' list with the passed in one
402  void swap(self_type& other)
403  {
404  if (this == &other)
405  {
406  // Nothing to do
407  return;
408  }
409 
411  std::swap(my_head, other.my_head);
412  }
413 
414  // Split-order list functions
415 
416  // Returns a first element in the SOL, which is always a dummy
418  return raw_iterator(my_head);
419  }
420 
421  // Returns a first element in the SOL, which is always a dummy
423  return raw_const_iterator(my_head);
424  }
425 
427  return raw_iterator(0);
428  }
429 
431  return raw_const_iterator(0);
432  }
433 
435  return it.get_node_ptr()->get_order_key();
436  }
437 
439  if( !it.get_node_ptr() ) return ~sokey_t(0);
440  return it.get_node_ptr()->get_order_key();
441  }
442 
443  // Returns a public iterator version of the internal iterator. Public iterator must not
444  // be a dummy private iterator.
446  __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
447  return iterator(it.get_node_ptr(), this);
448  }
449 
450  // Returns a public iterator version of the internal iterator. Public iterator must not
451  // be a dummy private iterator.
453  __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
454  return const_iterator(it.get_node_ptr(), this);
455  }
456 
457  // Returns a non-const version of the raw_iterator
459  return raw_iterator(it.get_node_ptr());
460  }
461 
462  // Returns a non-const version of the iterator
464  return iterator(it.my_node_ptr, it.my_list_ptr);
465  }
466 
467  // Returns a public iterator version of a first non-dummy internal iterator at or after
468  // the passed in internal iterator.
470  {
471  // Skip all dummy, internal only iterators
472  while (it != raw_end() && it.get_node_ptr()->is_dummy())
473  ++it;
474 
475  return iterator(it.get_node_ptr(), this);
476  }
477 
478  // Returns a public iterator version of a first non-dummy internal iterator at or after
479  // the passed in internal iterator.
481  {
482  // Skip all dummy, internal only iterators
483  while (it != raw_end() && it.get_node_ptr()->is_dummy())
484  ++it;
485 
486  return const_iterator(it.get_node_ptr(), this);
487  }
488 
489  // Erase an element using the allocator
490  void destroy_node(nodeptr_t pnode) {
491  if (!pnode->is_dummy()) my_node_allocator.destroy(pnode);
492  my_node_allocator.deallocate(pnode, 1);
493  }
494 
495  // Try to insert a new element in the list.
496  // If insert fails, return the node that was inserted instead.
497  static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
498  new_node->my_next = current_node;
499  return previous->atomic_set_next(new_node, current_node);
500  }
501 
502  // Insert a new element between passed in iterators
503  std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
504  {
505  nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), pnode, next.get_node_ptr());
506 
507  if (inserted_node == pnode)
508  {
509  // If the insert succeeded, check that the order is correct and increment the element count
510  check_range(it, next);
511  *new_count = tbb::internal::as_atomic(my_element_count).fetch_and_increment();
512  return std::pair<iterator, bool>(iterator(pnode, this), true);
513  }
514  else
515  {
516  return std::pair<iterator, bool>(end(), false);
517  }
518  }
519 
520  // Insert a new dummy element, starting search at a parent dummy element
522  {
524  raw_iterator where = it;
525 
526  __TBB_ASSERT(where != last, "Invalid head node");
527 
528  ++where;
529 
530  // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
531  nodeptr_t dummy_node = create_node(order_key);
532 
533  for (;;)
534  {
535  __TBB_ASSERT(it != last, "Invalid head list node");
536 
537  // If the head iterator is at the end of the list, or past the point where this dummy
538  // node needs to be inserted, then try to insert it.
539  if (where == last || get_order_key(where) > order_key)
540  {
541  __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
542 
543  // Try to insert it in the right place
544  nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), dummy_node, where.get_node_ptr());
545 
546  if (inserted_node == dummy_node)
547  {
548  // Insertion succeeded, check the list for order violations
549  check_range(it, where);
550  return raw_iterator(dummy_node);
551  }
552  else
553  {
554  // Insertion failed: either dummy node was inserted by another thread, or
555  // a real element was inserted at exactly the same place as dummy node.
556  // Proceed with the search from the previous location where order key was
557  // known to be larger (note: this is legal only because there is no safe
558  // concurrent erase operation supported).
559  where = it;
560  ++where;
561  continue;
562  }
563  }
564  else if (get_order_key(where) == order_key)
565  {
566  // Another dummy node with the same value found, discard the new one.
567  destroy_node(dummy_node);
568  return where;
569  }
570 
571  // Move the iterator forward
572  it = where;
573  ++where;
574  }
575 
576  }
577 
579  nodeptr_t pnode = (where++).get_node_ptr();
580  nodeptr_t prevnode = previous.get_node_ptr();
581  __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
582  prevnode->my_next = pnode->my_next;
583  return pnode;
584  }
585 
586  // This erase function can handle both real and dummy nodes
588  /*allow_destroy*/tbb::internal::true_type)
589  {
590  nodeptr_t pnode = erase_node_impl(previous, where);
591  destroy_node(pnode);
592  }
593 
595  /*allow_destroy*/tbb::internal::false_type)
596  {
597  erase_node_impl(previous, where);
598  }
599 
600  void erase_node(raw_iterator previous, raw_const_iterator& where) {
601  erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
602  }
603 
604  // Erase the element (previous node needs to be passed because this is a forward only list)
605  template<typename AllowDestroy>
606  iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy)
607  {
608  raw_const_iterator it = where;
609  erase_node(previous, it, AllowDestroy());
611 
612  return get_iterator(first_real_iterator(it));
613  }
614 
616  return erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
617  }
618 
619 
620 
621  // Move all elements from the passed in split-ordered list to this one
622  void move_all(self_type& source)
623  {
625  raw_const_iterator last = source.raw_end();
626 
627  if (first == last)
628  return;
629 
630  nodeptr_t previous_node = my_head;
631  raw_const_iterator begin_iterator = first++;
632 
633  // Move all elements one by one, including dummy ones
634  for (raw_const_iterator it = first; it != last;)
635  {
636  nodeptr_t pnode = it.get_node_ptr();
637 
638  nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
639  previous_node = try_insert_atomic(previous_node, dummy_node, NULL);
640  __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
641  raw_const_iterator where = it++;
642  source.erase_node(get_iterator(begin_iterator), where);
643  }
644  check_range();
645  }
646 
647 
648 private:
649  //Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base
650  template <typename Traits>
652 
653  // Check the list for order violations
655  {
656 #if TBB_USE_ASSERT
657  for (raw_iterator it = first; it != last; ++it)
658  {
659  raw_iterator next = it;
660  ++next;
661 
662  __TBB_ASSERT(next == raw_end() || get_order_key(next) >= get_order_key(it), "!!! List order inconsistency !!!");
663  }
664 #else
666 #endif
667  }
668  void check_range()
669  {
670 #if TBB_USE_ASSERT
671  check_range( raw_begin(), raw_end() );
672 #endif
673  }
674 
676  size_type my_element_count; // Total item count, not counting dummy nodes
677  nodeptr_t my_head; // pointer to head node
678 };
679 
680 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
681 #pragma warning(push)
682 #pragma warning(disable: 4127) // warning C4127: conditional expression is constant
683 #endif
684 
685 template <typename Traits>
686 class concurrent_unordered_base : public Traits
687 {
688 protected:
689  // Type definitions
691  typedef typename Traits::value_type value_type;
692  typedef typename Traits::key_type key_type;
693  typedef typename Traits::hash_compare hash_compare;
694  typedef typename Traits::allocator_type allocator_type;
695  typedef typename hash_compare::hasher hasher;
697 
702  // No support for reference/const_reference in allocator
705 
707  typedef typename solist_t::nodeptr_t nodeptr_t;
708  // Iterators that walk the entire split-order list, including dummy nodes
711  typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
715 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
716  typedef typename Traits::node_type node_type;
717 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
718  using Traits::my_hash_compare;
719  using Traits::get_key;
720  using Traits::allow_multimapping;
721 
722  static const size_type initial_bucket_number = 8; // Initial number of buckets
723 
724 private:
725  template<typename OtherTraits>
727 
728  typedef std::pair<iterator, iterator> pairii_t;
729  typedef std::pair<const_iterator, const_iterator> paircc_t;
730 
731  static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit
732  static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket
733 
737  void dismiss(){ my_instance = NULL;}
739  if (my_instance){
741  }
742  }
743  };
744 protected:
745  // Constructors/Destructors
747  const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
748  : Traits(hc), my_solist(a),
750  {
751  if( n_of_buckets == 0) ++n_of_buckets;
752  my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
753  internal_init();
754  }
755 
757  : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
758  {
759  internal_init();
760  internal_copy(right);
761  }
762 
764  : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
765  {
766  //FIXME:exception safety seems to be broken here
767  internal_init();
768  internal_copy(right);
769  }
770 
771 #if __TBB_CPP11_RVALUE_REF_PRESENT
773  : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()),
775  {
777  internal_init();
778  swap(right);
779  }
780 
782  : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
783  {
784  call_internal_clear_on_exit clear_buckets_on_exception(this);
785 
786  internal_init();
787  if (a == right.get_allocator()){
790  this->swap(right);
791  }else{
792  my_maximum_bucket_size = right.my_maximum_bucket_size;
793  my_number_of_buckets = right.my_number_of_buckets;
794  my_solist.my_element_count = right.my_solist.my_element_count;
795 
796  if (! right.my_solist.empty()){
797  nodeptr_t previous_node = my_solist.my_head;
798 
799  // Move all elements one by one, including dummy ones
800  for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it)
801  {
802  const nodeptr_t pnode = it.get_node_ptr();
803  nodeptr_t node;
804  if (pnode->is_dummy()) {
805  node = my_solist.create_node(pnode->get_order_key());
806  size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets;
807  set_bucket(bucket, node);
808  }else{
809  node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element));
810  }
811 
812  previous_node = my_solist.try_insert_atomic(previous_node, node, NULL);
813  __TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?");
814  }
816  }
817  }
818 
819  clear_buckets_on_exception.dismiss();
820  }
821 
822 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
823 
825  if (this != &right)
826  internal_copy(right);
827  return (*this);
828  }
829 
830 #if __TBB_CPP11_RVALUE_REF_PRESENT
832  {
833  if(this != &other){
835  if(pocma_t::value || this->my_allocator == other.my_allocator) {
836  concurrent_unordered_base trash (std::move(*this));
837  swap(other);
838  if (pocma_t::value) {
839  using std::swap;
840  //TODO: swapping allocators here may be a problem, replace with single direction moving
841  swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator);
842  swap(this->my_allocator, other.my_allocator);
843  }
844  } else {
845  concurrent_unordered_base moved_copy(std::move(other),this->my_allocator);
846  this->swap(moved_copy);
847  }
848  }
849  return *this;
850  }
851 
852 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
853 
854 #if __TBB_INITIALIZER_LISTS_PRESENT
855  concurrent_unordered_base& operator=(std::initializer_list<value_type> il)
857  {
858  this->clear();
859  this->insert(il.begin(),il.end());
860  return (*this);
861  }
862 #endif // __TBB_INITIALIZER_LISTS_PRESENT
863 
864 
866  // Delete all node segments
867  internal_clear();
868  }
869 
870 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
871  template<typename SourceType>
872  void internal_merge(SourceType& source) {
873  typedef typename SourceType::iterator source_iterator;
875  typename SourceType::node_type>::value),
876  "Incompatible containers cannot be merged");
877 
878  for(source_iterator it = source.begin(); it != source.end();) {
879  source_iterator where = it++;
880  if (allow_multimapping || find(get_key(*where)) == end()) {
881  std::pair<node_type, raw_iterator> extract_result = source.internal_extract(where);
882 
883  // If the insertion fails, it returns ownership of the node to extract_result.first
884  // extract_result.first remains valid node handle
885  if (!insert(std::move(extract_result.first)).second) {
886  raw_iterator next = extract_result.second;
887  raw_iterator current = next++;
888 
889  __TBB_ASSERT(extract_result.first.my_node->get_order_key() >= current.get_node_ptr()->get_order_key(),
890  "Wrong nodes order in source container");
891  __TBB_ASSERT(next==source.my_solist.raw_end() ||
892  extract_result.first.my_node->get_order_key() <= next.get_node_ptr()->get_order_key(),
893  "Wrong nodes order in source container");
894 
895  size_t new_count = 0;// To use try_insert()
896  bool insert_result =
897  source.my_solist.try_insert(current, next, extract_result.first.my_node, &new_count).second;
898  __TBB_ASSERT_EX(insert_result, "Return to source must be successful. "
899  "Changing source container while merging is unsafe.");
900  }
901  extract_result.first.deactivate();
902  }
903  }
904  }
905 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
906 
907 public:
909  return my_solist.get_allocator();
910  }
911 
912  // Size and capacity function
913  bool empty() const {
914  return my_solist.empty();
915  }
916 
917  size_type size() const {
918  return my_solist.size();
919  }
920 
921  size_type max_size() const {
922  return my_solist.max_size();
923  }
924 
925  // Iterators
927  return my_solist.begin();
928  }
929 
931  return my_solist.begin();
932  }
933 
935  return my_solist.end();
936  }
937 
938  const_iterator end() const {
939  return my_solist.end();
940  }
941 
943  return my_solist.cbegin();
944  }
945 
947  return my_solist.cend();
948  }
949 
950  // Parallel traversal support
956  public:
963 
965  bool empty() const {return my_begin_node == my_end_node;}
966 
968  bool is_divisible() const {
969  return my_midpoint_node != my_end_node;
970  }
974  {
976  __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
977  __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
978  set_midpoint();
979  r.set_midpoint();
980  }
983  my_table(a_table), my_begin_node(a_table.my_solist.begin()),
984  my_end_node(a_table.my_solist.end())
985  {
986  set_midpoint();
987  }
991  size_type grainsize() const { return 1; }
992 
994  void set_midpoint() const {
995  if( my_begin_node == my_end_node ) // not divisible
997  else {
1000  size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
1001  while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
1002  if(__TBB_ReverseBits(mid_bucket) > begin_key) {
1003  // found a dummy_node between begin and end
1005  }
1006  else {
1007  // didn't find a dummy node between begin and end.
1009  }
1010 #if TBB_USE_ASSERT
1011  {
1013  __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
1014  __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
1015  }
1016 #endif // TBB_USE_ASSERT
1017  }
1018  }
1019  };
1020 
1021  class range_type : public const_range_type {
1022  public:
1028 
1031  };
1032 
1033  range_type range() {
1034  return range_type( *this );
1035  }
1036 
1037  const_range_type range() const {
1038  return const_range_type( *this );
1039  }
1040 
1041  // Modifiers
1042  std::pair<iterator, bool> insert(const value_type& value) {
1043  return internal_insert</*AllowCreate=*/tbb::internal::true_type,
1044  /*AllowDestroy=*/tbb::internal::true_type>(value);
1045  }
1046 
1048  // Ignore hint
1049  return insert(value).first;
1050  }
1051 
1052 #if __TBB_CPP11_RVALUE_REF_PRESENT
1053  std::pair<iterator, bool> insert(value_type&& value) {
1054  return internal_insert</*AllowCreate=*/tbb::internal::true_type,
1055  /*AllowDestroy=*/tbb::internal::true_type>(std::move(value));
1056  }
1057 
1059  // Ignore hint
1060  return insert(std::move(value)).first;
1061  }
1062 #endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/
1063 
1064 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1065  std::pair<iterator, bool> insert(node_type&& nh) {
1066  if (!nh.empty()) {
1067  nodeptr_t handled_node = nh.my_node;
1068  std::pair<iterator, bool> insert_result =
1070  /*AllowDestroy=*/tbb::internal::false_type>
1071  (handled_node->my_element, handled_node);
1072  if (insert_result.second)
1073  nh.deactivate();
1074  return insert_result;
1075  }
1076  return std::pair<iterator, bool>(end(), false);
1077  }
1078 
1080  return insert(std::move(nh)).first;
1081  }
1082 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1083 
1084 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
1085  template<typename... Args>
1086  std::pair<iterator, bool> emplace(Args&&... args) {
1087  nodeptr_t pnode = my_solist.create_node_v(tbb::internal::forward<Args>(args)...);
1088 
1089  return internal_insert</*AllowCreate=*/tbb::internal::false_type,
1090  /*AllowDestroy=*/tbb::internal::true_type>(pnode->my_element, pnode);
1091  }
1092 
1093  template<typename... Args>
1095  // Ignore hint
1096  return emplace(tbb::internal::forward<Args>(args)...).first;
1097  }
1098 #endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
1099 
1100 
1101  template<class Iterator>
1102  void insert(Iterator first, Iterator last) {
1103  for (Iterator it = first; it != last; ++it)
1104  insert(*it);
1105  }
1106 
1107 #if __TBB_INITIALIZER_LISTS_PRESENT
1108  void insert(std::initializer_list<value_type> il) {
1110  insert(il.begin(), il.end());
1111  }
1112 #endif
1113 
1115  return internal_erase(where);
1116  }
1117 
1119  while (first != last)
1120  unsafe_erase(first++);
1121  return my_solist.get_iterator(first);
1122  }
1123 
1125  pairii_t where = equal_range(key);
1126  size_type item_count = internal_distance(where.first, where.second);
1127  unsafe_erase(where.first, where.second);
1128  return item_count;
1129  }
1130 
1131 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1133  return internal_extract(where).first;
1134  }
1135 
1137  pairii_t where = equal_range(key);
1138  if (where.first == end()) return node_type(); // element was not found
1139  return internal_extract(where.first).first;
1140  }
1141 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1142 
1144  if (this != &right) {
1145  std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
1146  my_solist.swap(right.my_solist);
1147  internal_swap_buckets(right);
1150  }
1151  }
1152 
1153  // Observers
1155  return my_hash_compare.my_hash_object;
1156  }
1157 
1158  key_equal key_eq() const {
1159  return my_hash_compare.my_key_compare_object;
1160  }
1161 
1162  void clear() {
1163  // Clear list
1164  my_solist.clear();
1165 
1166  // Clear buckets
1167  internal_clear();
1168 
1169  // Initialize bucket 0
1170  __TBB_ASSERT(my_buckets[0] == NULL, NULL);
1171  raw_iterator dummy_node = my_solist.raw_begin();
1172  set_bucket(0, dummy_node);
1173  }
1174 
1175  // Lookup
1177  return internal_find(key);
1178  }
1179 
1181  return const_cast<self_type*>(this)->internal_find(key);
1182  }
1183 
1184  size_type count(const key_type& key) const {
1185  if(allow_multimapping) {
1186  paircc_t answer = equal_range(key);
1187  size_type item_count = internal_distance(answer.first, answer.second);
1188  return item_count;
1189  } else {
1190  return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
1191  }
1192  }
1193 
1194  std::pair<iterator, iterator> equal_range(const key_type& key) {
1195  return internal_equal_range(key);
1196  }
1197 
1198  std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
1199  return const_cast<self_type*>(this)->internal_equal_range(key);
1200  }
1201 
1202  // Bucket interface - for debugging
1204  return my_number_of_buckets;
1205  }
1206 
1208  return segment_size(pointers_per_table-1);
1209  }
1210 
1212  size_type item_count = 0;
1213  if (is_initialized(bucket)) {
1214  raw_iterator it = get_bucket(bucket);
1215  ++it;
1216  for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
1217  ++item_count;
1218  }
1219  return item_count;
1220  }
1221 
1223  sokey_t order_key = (sokey_t) my_hash_compare(key);
1224  size_type bucket = order_key % my_number_of_buckets;
1225  return bucket;
1226  }
1227 
1228  // If the bucket is initialized, return a first non-dummy element in it
1230  if (!is_initialized(bucket))
1231  return end();
1232 
1233  raw_iterator it = get_bucket(bucket);
1234  return my_solist.first_real_iterator(it);
1235  }
1236 
1237  // If the bucket is initialized, return a first non-dummy element in it
1239  {
1240  if (!is_initialized(bucket))
1241  return end();
1242 
1243  raw_const_iterator it = get_bucket(bucket);
1244  return my_solist.first_real_iterator(it);
1245  }
1246 
1247  // @REVIEW: Takes O(n)
1248  // Returns the iterator after the last non-dummy element in the bucket
1250  {
1251  if (!is_initialized(bucket))
1252  return end();
1253 
1254  raw_iterator it = get_bucket(bucket);
1255 
1256  // Find the end of the bucket, denoted by the dummy element
1257  do ++it;
1258  while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1259 
1260  // Return the first real element past the end of the bucket
1261  return my_solist.first_real_iterator(it);
1262  }
1263 
1264  // @REVIEW: Takes O(n)
1265  // Returns the iterator after the last non-dummy element in the bucket
1267  {
1268  if (!is_initialized(bucket))
1269  return end();
1270 
1271  raw_const_iterator it = get_bucket(bucket);
1272 
1273  // Find the end of the bucket, denoted by the dummy element
1274  do ++it;
1275  while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1276 
1277  // Return the first real element past the end of the bucket
1278  return my_solist.first_real_iterator(it);
1279  }
1280 
1282  return ((const self_type *) this)->unsafe_begin(bucket);
1283  }
1284 
1286  return ((const self_type *) this)->unsafe_end(bucket);
1287  }
1288 
1289  // Hash policy
1290  float load_factor() const {
1291  return (float) size() / (float) unsafe_bucket_count();
1292  }
1293 
1294  float max_load_factor() const {
1295  return my_maximum_bucket_size;
1296  }
1297 
1298  void max_load_factor(float newmax) {
1299  if (newmax != newmax || newmax < 0)
1301  my_maximum_bucket_size = newmax;
1302  }
1303 
1304  // This function is a noop, because the underlying split-ordered list
1305  // is already sorted, so an increase in the bucket number will be
1306  // reflected next time this bucket is touched.
1307  void rehash(size_type buckets) {
1308  size_type current_buckets = my_number_of_buckets;
1309  if (current_buckets >= buckets)
1310  return;
1311  my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
1312  }
1313 
1314 private:
1315 
1316  // Initialize the hash and keep the first bucket open
1317  void internal_init() {
1318  // Initialize the array of segment pointers
1319  memset(my_buckets, 0, sizeof(my_buckets));
1320 
1321  // Initialize bucket 0
1322  raw_iterator dummy_node = my_solist.raw_begin();
1323  set_bucket(0, dummy_node);
1324  }
1325 
1327  for (size_type index = 0; index < pointers_per_table; ++index) {
1328  if (my_buckets[index] != NULL) {
1329  size_type sz = segment_size(index);
1330  for (size_type index2 = 0; index2 < sz; ++index2)
1331  my_allocator.destroy(&my_buckets[index][index2]);
1332  my_allocator.deallocate(my_buckets[index], sz);
1333  my_buckets[index] = 0;
1334  }
1335  }
1336  }
1337 
1338  void internal_copy(const self_type& right) {
1339  clear();
1340 
1343 
1344  __TBB_TRY {
1345  insert(right.begin(), right.end());
1346  my_hash_compare = right.my_hash_compare;
1347  } __TBB_CATCH(...) {
1348  my_solist.clear();
1349  __TBB_RETHROW();
1350  }
1351  }
1352 
1354  {
1355  // Swap all node segments
1356  for (size_type index = 0; index < pointers_per_table; ++index)
1357  {
1358  raw_iterator * iterator_pointer = my_buckets[index];
1359  my_buckets[index] = right.my_buckets[index];
1360  right.my_buckets[index] = iterator_pointer;
1361  }
1362  }
1363 
1364  //TODO: why not use std::distance?
1365  // Hash APIs
1367  {
1368  size_type num = 0;
1369 
1370  for (const_iterator it = first; it != last; ++it)
1371  ++num;
1372 
1373  return num;
1374  }
1375 
1376  // Insert an element in the hash given its value
1377  template<typename AllowCreate, typename AllowDestroy, typename ValueType>
1378  std::pair<iterator, bool> internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode = NULL)
1379  {
1380  const key_type *pkey = &get_key(value);
1381  sokey_t hash_key = (sokey_t) my_hash_compare(*pkey);
1382  size_type new_count = 0;
1383  sokey_t order_key = split_order_key_regular(hash_key);
1384  raw_iterator previous = prepare_bucket(hash_key);
1386  __TBB_ASSERT(previous != last, "Invalid head node");
1387 
1388  // First node is a dummy node
1389  for (raw_iterator where = previous;;)
1390  {
1391  ++where;
1392  if (where == last || solist_t::get_order_key(where) > order_key ||
1393  // if multimapped, stop at the first item equal to us.
1394  (allow_multimapping && solist_t::get_order_key(where) == order_key &&
1395  !my_hash_compare(get_key(*where), *pkey))) // TODO: fix negation
1396  {
1397  if (!pnode) {
1398  pnode = my_solist.create_node(order_key, tbb::internal::forward<ValueType>(value), AllowCreate());
1399  // If the value was moved, the known reference to key might be invalid
1400  pkey = &get_key(pnode->my_element);
1401  }
1402  else
1403  {
1404  // Set new order_key to node
1405  pnode->init(order_key);
1406  }
1407 
1408  // Try to insert 'pnode' between 'previous' and 'where'
1409  std::pair<iterator, bool> result = my_solist.try_insert(previous, where, pnode, &new_count);
1410 
1411  if (result.second)
1412  {
1413  // Insertion succeeded, adjust the table size, if needed
1415  return result;
1416  }
1417  else
1418  {
1419  // Insertion failed: either the same node was inserted by another thread, or
1420  // another element was inserted at exactly the same place as this node.
1421  // Proceed with the search from the previous location where order key was
1422  // known to be larger (note: this is legal only because there is no safe
1423  // concurrent erase operation supported).
1424  where = previous;
1425  continue;
1426  }
1427  }
1428  else if (!allow_multimapping && solist_t::get_order_key(where) == order_key &&
1429  !my_hash_compare(get_key(*where), *pkey)) // TODO: fix negation
1430  { // Element already in the list, return it
1431  if (pnode && AllowDestroy::value)
1432  my_solist.destroy_node(pnode);
1433  return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
1434  }
1435  // Move the iterator forward
1436  previous = where;
1437  }
1438  }
1439 
1440  // Find the element in the split-ordered list
1442  {
1443  sokey_t hash_key = (sokey_t) my_hash_compare(key);
1444  sokey_t order_key = split_order_key_regular(hash_key);
1446 
1447  for (raw_iterator it = prepare_bucket(hash_key); it != last; ++it)
1448  {
1449  if (solist_t::get_order_key(it) > order_key)
1450  {
1451  // If the order key is smaller than the current order key, the element
1452  // is not in the hash.
1453  return end();
1454  }
1455  else if (solist_t::get_order_key(it) == order_key)
1456  {
1457  // The fact that order keys match does not mean that the element is found.
1458  // Key function comparison has to be performed to check whether this is the
1459  // right element. If not, keep searching while order key is the same.
1460  if (!my_hash_compare(get_key(*it), key)) // TODO: fix negation
1461  return my_solist.get_iterator(it);
1462  }
1463  }
1464 
1465  return end();
1466  }
1467 
1468  // Erase an element from the list. This is not a concurrency safe function.
1470  {
1471  sokey_t hash_key = (sokey_t) my_hash_compare(get_key(*it));
1472  raw_iterator previous = prepare_bucket(hash_key);
1474  __TBB_ASSERT(previous != last, "Invalid head node");
1475 
1476  // First node is a dummy node
1477  for (raw_iterator where = previous; where != last; previous = where) {
1478  ++where;
1479  if (my_solist.get_iterator(where) == it)
1480  return my_solist.erase_node(previous, it);
1481  }
1482  return end();
1483  }
1484 
1485 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1486  std::pair<node_type, raw_iterator> internal_extract(const_iterator it) {
1487  sokey_t hash_key = sokey_t(my_hash_compare(get_key(*it)));
1488  raw_iterator previous = prepare_bucket(hash_key);
1490  __TBB_ASSERT(previous != last, "Invalid head node");
1491 
1492  for(raw_iterator where = previous; where != last; previous = where) {
1493  ++where;
1494  if (my_solist.get_iterator(where) == it) {
1495  const_iterator result = it;
1496  my_solist.erase_node(previous, it, /*allow_destroy*/tbb::internal::false_type());
1497  return std::pair<node_type, raw_iterator>( node_type(result.get_node_ptr()),
1498  previous);
1499  }
1500  }
1501  return std::pair<node_type, iterator>(node_type(), end());
1502  }
1503 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1504 
1505  // Return the [begin, end) pair of iterators with the same key values.
1506  // This operation makes sense only if mapping is many-to-one.
1508  {
1509  sokey_t hash_key = (sokey_t) my_hash_compare(key);
1510  sokey_t order_key = split_order_key_regular(hash_key);
1511  raw_iterator end_it = my_solist.raw_end();
1512 
1513  for (raw_iterator it = prepare_bucket(hash_key); it != end_it; ++it)
1514  {
1515  if (solist_t::get_order_key(it) > order_key)
1516  {
1517  // There is no element with the given key
1518  return pairii_t(end(), end());
1519  }
1520  else if (solist_t::get_order_key(it) == order_key &&
1521  !my_hash_compare(get_key(*it), key)) // TODO: fix negation; also below
1522  {
1524  iterator last = first;
1525  do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
1526  return pairii_t(first, last);
1527  }
1528  }
1529 
1530  return pairii_t(end(), end());
1531  }
1532 
1533  // Bucket APIs
1534  void init_bucket(size_type bucket)
1535  {
1536  // Bucket 0 has no parent.
1537  __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized");
1538 
1539  size_type parent_bucket = get_parent(bucket);
1540 
1541  // All parent_bucket buckets have to be initialized before this bucket is
1542  if (!is_initialized(parent_bucket))
1543  init_bucket(parent_bucket);
1544 
1545  raw_iterator parent = get_bucket(parent_bucket);
1546 
1547  // Create a dummy first node in this bucket
1549  set_bucket(bucket, dummy_node);
1550  }
1551 
1552  void adjust_table_size(size_type total_elements, size_type current_size)
1553  {
1554  // Grow the table by a factor of 2 if possible and needed
1555  if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
1556  {
1557  // Double the size of the hash only if size has not changed in between loads
1558  my_number_of_buckets.compare_and_swap(2u*current_size, current_size);
1559  //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
1560  //due to overzealous compiler warnings in /Wp64 mode
1561  }
1562  }
1563 
1565  {
1566  // Unsets bucket's most significant turned-on bit
1567  size_type msb = __TBB_Log2((uintptr_t)bucket);
1568  return bucket & ~(size_type(1) << msb);
1569  }
1570 
1571 
1572  // Dynamic sized array (segments)
1575  return size_type( __TBB_Log2( uintptr_t(index|1) ) );
1576  }
1577 
1580  return (size_type(1)<<k & ~size_type(1));
1581  }
1582 
1585  return k? size_type(1)<<k : 2;
1586  }
1587 
1589  size_type segment = segment_index_of(bucket);
1590  bucket -= segment_base(segment);
1591  __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
1592  return my_buckets[segment][bucket];
1593  }
1594 
1596  size_type bucket = hash_key % my_number_of_buckets;
1597  size_type segment = segment_index_of(bucket);
1598  size_type index = bucket - segment_base(segment);
1599  if (my_buckets[segment] == NULL || my_buckets[segment][index].get_node_ptr() == NULL)
1600  init_bucket(bucket);
1601  return my_buckets[segment][index];
1602  }
1603 
1604  void set_bucket(size_type bucket, raw_iterator dummy_head) {
1605  size_type segment = segment_index_of(bucket);
1606  bucket -= segment_base(segment);
1607 
1608  if (my_buckets[segment] == NULL) {
1609  size_type sz = segment_size(segment);
1610  raw_iterator * new_segment = my_allocator.allocate(sz);
1611  std::memset(static_cast<void*>(new_segment), 0, sz*sizeof(raw_iterator));
1612 
1613  if (my_buckets[segment].compare_and_swap( new_segment, NULL) != NULL)
1614  my_allocator.deallocate(new_segment, sz);
1615  }
1616 
1617  my_buckets[segment][bucket] = dummy_head;
1618  }
1619 
1620  bool is_initialized(size_type bucket) const {
1621  size_type segment = segment_index_of(bucket);
1622  bucket -= segment_base(segment);
1623 
1624  if (my_buckets[segment] == NULL)
1625  return false;
1626 
1627  raw_iterator it = my_buckets[segment][bucket];
1628  return (it.get_node_ptr() != NULL);
1629  }
1630 
1631  // Utilities for keys
1632 
1633  // A regular order key has its original hash value reversed and the last bit set
1635  return __TBB_ReverseBits(order_key) | 0x1;
1636  }
1637 
1638  // A dummy order key has its original hash value reversed and the last bit unset
1640  return __TBB_ReverseBits(order_key) & ~sokey_t(0x1);
1641  }
1642 
1643  // Shared variables
1645  solist_t my_solist; // List where all the elements are kept
1647  float my_maximum_bucket_size; // Maximum size of the bucket
1649 };
1650 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1651 #pragma warning(pop) // warning 4127 is back
1652 #endif
1653 
1654 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1655 
1656 template<typename Value, typename Allocator>
1658 public:
1659  typedef Allocator allocator_type;
1660 protected:
1662 public:
1663 
1666  my_allocator(std::move(nh.my_allocator)) {
1667  nh.my_node = NULL;
1668  }
1669 
1670  bool empty() const { return my_node == NULL; }
1671  explicit operator bool() const { return my_node != NULL; }
1672 
1674 
1676  internal_destroy();
1677  my_node = nh.my_node;
1679  propagate_on_container_move_assignment pocma_type;
1680  tbb::internal::allocator_move_assignment(my_allocator, nh.my_allocator, pocma_type());
1681  nh.deactivate();
1682  return *this;
1683  }
1684 
1686  std::swap(my_node, nh.my_node);
1688  propagate_on_container_swap pocs_type;
1690  }
1691 
1693  return my_allocator;
1694  }
1695 
1696 protected:
1698 
1700  if(my_node) {
1701  my_allocator.destroy(&(my_node->my_element));
1702  // TODO: Consider using node_allocator from the container
1704  node_allocator.deallocate(my_node, 1);
1705  }
1706  }
1707 
1708  void deactivate() { my_node = NULL; }
1709 
1712 };
1713 
1714 // node handle for concurrent_unordered maps
1715 template<typename Key, typename Value, typename Allocator>
1716 class node_handle : public node_handle_base<Value, Allocator> {
1718 public:
1719  typedef Key key_type;
1720  typedef typename Value::second_type mapped_type;
1722 
1724 
1725  key_type& key() const {
1726  __TBB_ASSERT(!this->empty(), "Cannot get key from the empty node_type object");
1727  return *const_cast<key_type*>(&(this->my_node->my_element.first));
1728  }
1729 
1730  mapped_type& mapped() const {
1731  __TBB_ASSERT(!this->empty(), "Cannot get mapped value from the empty node_type object");
1732  return this->my_node->my_element.second;
1733  }
1734 
1735 private:
1736  template<typename T, typename A>
1737  friend class split_ordered_list;
1738 
1739  template<typename Traits>
1741 
1743 };
1744 
1745 // node handle for concurrent_unordered sets
1746 template<typename Key, typename Allocator>
1747 class node_handle<Key, Key, Allocator> : public node_handle_base<Key, Allocator> {
1749 public:
1750  typedef Key value_type;
1752 
1754 
1755  value_type& value() const {
1756  __TBB_ASSERT(!this->empty(), "Cannot get value from the empty node_type object");
1757  return *const_cast<value_type*>(&(this->my_node->my_element));
1758  }
1759 
1760 private:
1761  template<typename T, typename A>
1762  friend class split_ordered_list;
1763 
1764  template<typename Traits>
1766 
1768 };
1769 
1770 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1771 
1772 } // namespace internal
1774 } // namespace interface5
1775 } // namespace tbb
1776 #endif // __TBB__concurrent_unordered_impl_H
nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
tbb::internal::allocator_traits< allocator_type >::const_pointer const_pointer
concurrent_unordered_base(concurrent_unordered_base &&right, const allocator_type &a)
allocator_type::value_type value_type
void erase_node(raw_iterator previous, raw_const_iterator &where, tbb::internal::false_type)
tbb::internal::allocator_rebind< Allocator, T >::type allocator_type
const_local_iterator unsafe_end(size_type bucket) const
solist_iterator< self_type, value_type > iterator
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:536
void erase_node(raw_iterator previous, raw_const_iterator &where, tbb::internal::true_type)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
concurrent_unordered_base & operator=(const concurrent_unordered_base &right)
tbb::internal::allocator_traits< allocator_type >::difference_type difference_type
node_handle_base & operator=(node_handle_base &&nh)
auto first(Container &c) -> decltype(begin(c))
nodeptr_t erase_node_impl(raw_iterator previous, raw_const_iterator &where)
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:381
const_local_iterator unsafe_begin(size_type bucket) const
static sokey_t get_safe_order_key(const raw_const_iterator &it)
bool_constant< true > true_type
Definition: tbb_stddef.h:472
const_iterator get_iterator(raw_const_iterator it) const
std::pair< const_iterator, const_iterator > paircc_t
std::pair< node_type, raw_iterator > internal_extract(const_iterator it)
tbb::internal::allocator_traits< allocator_type >::size_type size_type
flist_iterator< self_type, value_type > raw_iterator
static sokey_t get_order_key(const raw_const_iterator &it)
flist_iterator(const flist_iterator< Solist, typename Solist::value_type > &other)
allocator_traits< Alloc >::template rebind_alloc< T >::other type
concurrent_unordered_base(const concurrent_unordered_base &right, const allocator_type &a)
Detects whether two given types are the same.
concurrent_unordered_base(size_type n_of_buckets=initial_bucket_number, const hash_compare &hc=hash_compare(), const allocator_type &a=allocator_type())
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:539
#define __TBB_TRY
Definition: tbb_stddef.h:287
iterator insert(const_iterator, value_type &&value)
#define __TBB_PARAMETER_PACK
Definition: tbb_stddef.h:507
iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy)
const_iterator first_real_iterator(raw_const_iterator it) const
atomic< raw_iterator * > my_buckets[pointers_per_table]
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
Definition: tbb_stddef.h:171
split_ordered_list< Value, allocator_type >::node node
flist_iterator< self_type, const value_type > raw_const_iterator
raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
T __TBB_ReverseBits(T src)
Definition: tbb_machine.h:971
tbb::internal::allocator_traits< allocator_type >::const_pointer const_pointer
friend bool operator!=(const flist_iterator< M, T > &i, const flist_iterator< M, U > &j)
allocator_type::size_type size_type
solist_iterator(nodeptr_t pnode, const Solist *plist)
void set_bucket(size_type bucket, raw_iterator dummy_head)
Base class for types that should not be assigned.
Definition: tbb_stddef.h:324
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
std::pair< iterator, iterator > equal_range(const key_type &key)
tbb::internal::allocator_rebind< allocator_type, raw_iterator >::type my_allocator
auto last(Container &c) -> decltype(begin(c))
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:864
friend bool operator==(const solist_iterator< M, T > &i, const solist_iterator< M, U > &j)
iterator erase_node(raw_iterator previous, const_iterator &where)
tbb::internal::allocator_traits< allocator_type >::size_type size_type
void check_range(raw_iterator first, raw_iterator last)
static size_type internal_distance(const_iterator first, const_iterator last)
tbb::internal::allocator_traits< allocator_type >::pointer pointer
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:288
concurrent_unordered_base(const concurrent_unordered_base &right)
range_type(const concurrent_unordered_base &a_table)
Init range with container and grainsize specified.
concurrent_unordered_base & operator=(concurrent_unordered_base &&other)
split_ordered_list(allocator_type a=allocator_type())
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:500
std::pair< const_iterator, const_iterator > equal_range(const key_type &key) const
void internal_swap_buckets(concurrent_unordered_base &right)
The graph class.
const_local_iterator unsafe_cend(size_type bucket) const
#define __TBB_PACK_EXPANSION(A)
Definition: tbb_stddef.h:508
bool operator==(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
const_local_iterator unsafe_cbegin(size_type bucket) const
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:399
nodeptr_t create_node_v(__TBB_FORWARDING_REF(Args) __TBB_PARAMETER_PACK args)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void set_midpoint() const
Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.
allocator_type::const_pointer const_pointer
friend bool operator==(const flist_iterator< M, T > &i, const flist_iterator< M, U > &j)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
std::pair< iterator, bool > internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode=NULL)
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
bool operator!=(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance * instance
std::pair< iterator, bool > insert(value_type &&value)
split_ordered_list< value_type, typename Traits::allocator_type > solist_t
std::pair< iterator, bool > try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
std::pair< iterator, bool > emplace(Args &&... args)
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:309
tbb::internal::allocator_traits< allocator_type >::pointer pointer
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
node_handle_base< Value, Allocator > base_type
friend bool operator!=(const solist_iterator< M, T > &i, const solist_iterator< M, U > &j)
std::pair< iterator, bool > insert(const value_type &value)
iterator emplace_hint(const_iterator, Args &&... args)
iterator insert(const_iterator, const value_type &value)
static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node)
solist_iterator(const solist_iterator< Solist, typename Solist::value_type > &other)
solist_iterator< self_type, const value_type > const_iterator
bool_constant< false > false_type
Definition: tbb_stddef.h:473
value_type compare_and_swap(value_type value, value_type comparand)
Definition: atomic.h:289
void adjust_table_size(size_type total_elements, size_type current_size)
void erase_node(raw_iterator previous, raw_const_iterator &where)
tbb::internal::allocator_rebind< allocator_type, node >::type my_node_allocator
allocator_type::pointer pointer
nodeptr_t create_node(sokey_t order_key, __TBB_FORWARDING_REF(Arg) t, tbb::internal::true_type=tbb::internal::true_type())
tbb::internal::allocator_traits< allocator_type >::difference_type difference_type
tbb::internal::allocator_traits< allocator_type >::value_type value_type
const_range_type(const concurrent_unordered_base &a_table)
Init range with container and grainsize specified.
std::pair< iterator, bool > insert(node_type &&nh)
iterator unsafe_erase(const_iterator first, const_iterator last)
nodeptr_t create_node(sokey_t, __TBB_FORWARDING_REF(Arg), tbb::internal::false_type)
atomic< T > & as_atomic(T &t)
Definition: atomic.h:547
#define __TBB_RETHROW()
Definition: tbb_stddef.h:290
concurrent_unordered_base::size_type size_type
Type for size of a range.
bool is_divisible() const
True if range can be partitioned into two subranges.
allocator_type::difference_type difference_type

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.