Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
concurrent_hash_map.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
21 #ifndef __TBB_concurrent_hash_map_H
22 #define __TBB_concurrent_hash_map_H
23 
24 #include "tbb_stddef.h"
25 #include <iterator>
26 #include <utility> // Need std::pair
27 #include <cstring> // Need std::memset
28 #include __TBB_STD_SWAP_HEADER
29 
30 #include "tbb_allocator.h"
31 #include "spin_rw_mutex.h"
32 #include "atomic.h"
33 #include "tbb_exception.h"
34 #include "tbb_profiling.h"
35 #include "aligned_space.h"
39 #if __TBB_INITIALIZER_LISTS_PRESENT
40 #include <initializer_list>
41 #endif
42 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
43 #include <typeinfo>
44 #endif
45 #if __TBB_STATISTICS
46 #include <stdio.h>
47 #endif
48 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
49 // Definition of __TBB_CPP11_RVALUE_REF_PRESENT includes __TBB_CPP11_TUPLE_PRESENT
50 // for most of platforms, tuple present macro was added for logical correctness
51 #include <tuple>
52 #endif
53 
54 namespace tbb {
55 
56 namespace interface5 {
57 
58  template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<const Key, T> > >
60 
62  namespace internal {
63  using namespace tbb::internal;
64 
65 
67  typedef size_t hashcode_t;
77  };
79  static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
81  static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
83  class hash_map_base {
84  public:
86  typedef size_t size_type;
88  typedef size_t hashcode_t;
90  typedef size_t segment_index_t;
101  };
103  static size_type const embedded_block = 1;
105  static size_type const embedded_buckets = 1<<embedded_block;
107  static size_type const first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
109  static size_type const pointers_per_table = sizeof(segment_index_t) * 8; // one segment per bit
113  typedef segment_ptr_t segments_table_t[pointers_per_table];
117  segments_table_t my_table;
119  atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
121  bucket my_embedded_segment[embedded_buckets];
122 #if __TBB_STATISTICS
123  atomic<unsigned> my_info_resizes; // concurrent ones
124  mutable atomic<unsigned> my_info_restarts; // race collisions
125  atomic<unsigned> my_info_rehashes; // invocations of rehash_bucket
126 #endif
127  hash_map_base() {
129  std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512
130  + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8
131  + embedded_buckets*sizeof(bucket) ); // n*8 or n*16
132  for( size_type i = 0; i < embedded_block; i++ ) // fill the table
133  my_table[i] = my_embedded_segment + segment_base(i);
134  my_mask = embedded_buckets - 1;
135  __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
136 #if __TBB_STATISTICS
137  my_info_resizes = 0; // concurrent ones
138  my_info_restarts = 0; // race collisions
139  my_info_rehashes = 0; // invocations of rehash_bucket
140 #endif
141  }
142 
145  return segment_index_t( __TBB_Log2( index|1 ) );
146  }
147 
150  return (segment_index_t(1)<<k & ~segment_index_t(1));
151  }
152 
155  return size_type(1)<<k; // fake value for k==0
156  }
157 
159  static bool is_valid( void *ptr ) {
160  return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
161  }
162 
164  static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
165  if( is_initial ) std::memset( static_cast<void*>(ptr), 0, sz*sizeof(bucket) );
166  else for(size_type i = 0; i < sz; i++, ptr++) {
167  *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
168  ptr->node_list = rehash_req;
169  }
170  }
171 
173  static void add_to_bucket( bucket *b, node_base *n ) {
174  __TBB_ASSERT(b->node_list != rehash_req, NULL);
175  n->next = b->node_list;
176  b->node_list = n; // its under lock and flag is set
177  }
178 
182  enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
184  if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress
185  }
186  };
187 
189  template<typename Allocator>
190  void enable_segment( segment_index_t k, const Allocator& allocator, bool is_initial = false ) {
191  typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
192  typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
193  bucket_allocator_type bucket_allocator(allocator);
194  __TBB_ASSERT( k, "Zero segment must be embedded" );
195  enable_segment_failsafe watchdog( my_table, k );
196  size_type sz;
197  __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
198  if( k >= first_block ) {
199  sz = segment_size( k );
200  segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
201  init_buckets( ptr, sz, is_initial );
202  itt_hide_store_word( my_table[k], ptr );
203  sz <<= 1;// double it to get entire capacity of the container
204  } else { // the first block
205  __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
206  sz = segment_size( first_block );
207  segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
208  init_buckets( ptr, sz - embedded_buckets, is_initial );
209  ptr -= segment_base(embedded_block);
210  for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
211  itt_hide_store_word( my_table[i], ptr + segment_base(i) );
212  }
213  itt_store_word_with_release( my_mask, sz-1 );
214  watchdog.my_segment_ptr = 0;
215  }
216 
217  template<typename Allocator>
218  void delete_segment(segment_index_t s, const Allocator& allocator) {
219  typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
220  typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
221  bucket_allocator_type bucket_allocator(allocator);
222  segment_ptr_t buckets_ptr = my_table[s];
223  size_type sz = segment_size( s ? s : 1 );
224 
225  if( s >= first_block) // the first segment or the next
226  bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
227  else if( s == embedded_block && embedded_block != first_block )
228  bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
229  segment_size(first_block) - embedded_buckets);
230  if( s >= embedded_block ) my_table[s] = 0;
231  }
232 
234  bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?
235  segment_index_t s = segment_index_of( h );
236  h -= segment_base(s);
237  segment_ptr_t seg = my_table[s];
238  __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
239  return &seg[h];
240  }
241 
242  // internal serial rehashing helper
243  void mark_rehashed_levels( hashcode_t h ) throw () {
244  segment_index_t s = segment_index_of( h );
245  while( segment_ptr_t seg = my_table[++s] )
246  if( seg[h].node_list == rehash_req ) {
247  seg[h].node_list = empty_rehashed;
248  mark_rehashed_levels( h + ((hashcode_t)1<<s) ); // optimized segment_base(s)
249  }
250  }
251 
253  // Splitting into two functions should help inlining
254  inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
255  hashcode_t m_now, m_old = m;
256  m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );
257  if( m_old != m_now )
258  return check_rehashing_collision( h, m_old, m = m_now );
259  return false;
260  }
261 
264  __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m
265  if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
266  // condition above proves that 'h' has some other bits set beside 'm_old'
267  // find next applicable mask after m_old //TODO: look at bsl instruction
268  for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
269  ;
270  m_old = (m_old<<1) - 1; // get full mask from a bit
271  __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
272  // check whether it is rehashing/ed
273  if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
274  {
275 #if __TBB_STATISTICS
276  my_info_restarts++; // race collisions
277 #endif
278  return true;
279  }
280  }
281  return false;
282  }
283 
286  size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
287  add_to_bucket( b, n );
288  // check load factor
289  if( sz >= mask ) { // TODO: add custom load_factor
290  segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of
291  __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
292  static const segment_ptr_t is_allocating = (segment_ptr_t)2;
293  if( !itt_hide_load_word(my_table[new_seg])
294  && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
295  return new_seg; // The value must be processed
296  }
297  return 0;
298  }
299 
301  template<typename Allocator>
302  void reserve(size_type buckets, const Allocator& allocator) {
303  if( !buckets-- ) return;
304  bool is_initial = !my_size;
305  for( size_type m = my_mask; buckets > m; m = my_mask )
306  enable_segment( segment_index_of( m+1 ), allocator, is_initial );
307  }
310  using std::swap;
311  swap(this->my_mask, table.my_mask);
312  swap(this->my_size, table.my_size);
313  for(size_type i = 0; i < embedded_buckets; i++)
314  swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
315  for(size_type i = embedded_block; i < pointers_per_table; i++)
316  swap(this->my_table[i], table.my_table[i]);
317  }
318  };
319 
320  template<typename Iterator>
322 
324 
326  template<typename Container, typename Value>
328  : public std::iterator<std::forward_iterator_tag,Value>
329  {
330  typedef Container map_type;
331  typedef typename Container::node node;
334 
335  template<typename C, typename T, typename U>
336  friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
337 
338  template<typename C, typename T, typename U>
339  friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
340 
341  template<typename C, typename T, typename U>
342  friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
343 
344  template<typename C, typename U>
345  friend class hash_map_iterator;
346 
347  template<typename I>
348  friend class hash_map_range;
349 
350  void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
351  size_t k = my_index+1;
352  __TBB_ASSERT( my_bucket, "advancing an invalid iterator?");
353  while( k <= my_map->my_mask ) {
354  // Following test uses 2's-complement wizardry
355  if( k&(k-2) ) // not the beginning of a segment
356  ++my_bucket;
357  else my_bucket = my_map->get_bucket( k );
358  my_node = static_cast<node*>( my_bucket->node_list );
359  if( hash_map_base::is_valid(my_node) ) {
360  my_index = k; return;
361  }
362  ++k;
363  }
364  my_bucket = 0; my_node = 0; my_index = k; // the end
365  }
366 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
367  template<typename Key, typename T, typename HashCompare, typename A>
369 #else
370  public: // workaround
371 #endif
372  const Container *my_map;
374 
376  size_t my_index;
377 
380 
383 
384  hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
385 
386  public:
388  hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {}
390  my_map(other.my_map),
391  my_index(other.my_index),
392  my_bucket(other.my_bucket),
393  my_node(other.my_node)
394  {}
395  Value& operator*() const {
396  __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
397  return my_node->value();
398  }
399  Value* operator->() const {return &operator*();}
400  hash_map_iterator& operator++();
401 
404  hash_map_iterator old(*this);
405  operator++();
406  return old;
407  }
408  };
409 
410  template<typename Container, typename Value>
411  hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
412  my_map(&map),
413  my_index(index),
414  my_bucket(b),
415  my_node( static_cast<node*>(n) )
416  {
417  if( b && !hash_map_base::is_valid(n) )
419  }
420 
421  template<typename Container, typename Value>
423  my_node = static_cast<node*>( my_node->next );
424  if( !my_node ) advance_to_next_bucket();
425  return *this;
426  }
427 
428  template<typename Container, typename T, typename U>
430  return i.my_node == j.my_node && i.my_map == j.my_map;
431  }
432 
433  template<typename Container, typename T, typename U>
435  return i.my_node != j.my_node || i.my_map != j.my_map;
436  }
437 
439 
440  template<typename Iterator>
441  class hash_map_range {
442  typedef typename Iterator::map_type map_type;
443  Iterator my_begin;
444  Iterator my_end;
445  mutable Iterator my_midpoint;
446  size_t my_grainsize;
448  void set_midpoint() const;
449  template<typename U> friend class hash_map_range;
450  public:
452  typedef std::size_t size_type;
453  typedef typename Iterator::value_type value_type;
454  typedef typename Iterator::reference reference;
455  typedef typename Iterator::difference_type difference_type;
456  typedef Iterator iterator;
457 
459  bool empty() const {return my_begin==my_end;}
460 
462  bool is_divisible() const {
463  return my_midpoint!=my_end;
464  }
467  my_end(r.my_end),
469  {
470  r.my_end = my_begin = r.my_midpoint;
471  __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
472  __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
473  set_midpoint();
474  r.set_midpoint();
475  }
477  template<typename U>
479  my_begin(r.my_begin),
480  my_end(r.my_end),
483  {}
485  hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
486  my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
487  my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
488  my_grainsize( grainsize_ )
489  {
490  __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
491  set_midpoint();
492  }
493  const Iterator& begin() const {return my_begin;}
494  const Iterator& end() const {return my_end;}
496  size_type grainsize() const {return my_grainsize;}
497  };
498 
499  template<typename Iterator>
501  // Split by groups of nodes
502  size_t m = my_end.my_index-my_begin.my_index;
503  if( m > my_grainsize ) {
504  m = my_begin.my_index + m/2u;
505  hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
506  my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
507  } else {
508  my_midpoint = my_end;
509  }
510  __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
511  "my_begin is after my_midpoint" );
512  __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
513  "my_midpoint is after my_end" );
514  __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
515  "[my_begin, my_midpoint) range should not be empty" );
516  }
517 
518  } // internal
520 
521 #if _MSC_VER && !defined(__INTEL_COMPILER)
522  // Suppress "conditional expression is constant" warning.
523  #pragma warning( push )
524  #pragma warning( disable: 4127 )
525 #endif
526 
528 
557 template<typename Key, typename T, typename HashCompare, typename Allocator>
558 class concurrent_hash_map : protected internal::hash_map_base {
559  template<typename Container, typename Value>
561 
562  template<typename I>
564 
565 public:
566  typedef Key key_type;
567  typedef T mapped_type;
568  typedef std::pair<const Key,T> value_type;
569  typedef hash_map_base::size_type size_type;
570  typedef ptrdiff_t difference_type;
571  typedef value_type *pointer;
572  typedef const value_type *const_pointer;
574  typedef const value_type &const_reference;
575  typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
576  typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
577  typedef internal::hash_map_range<iterator> range_type;
578  typedef internal::hash_map_range<const_iterator> const_range_type;
579  typedef Allocator allocator_type;
580 
581 protected:
582  friend class const_accessor;
583  class node;
587  HashCompare my_hash_compare;
588 
589  class node : public node_base {
591  public:
592  value_type* storage() { return my_value.begin(); }
593  value_type& value() { return *storage(); }
594  };
595 
596  void delete_node( node_base *n ) {
597  node_allocator_traits::destroy(my_allocator, static_cast<node*>(n)->storage());
598  node_allocator_traits::destroy(my_allocator, static_cast<node*>(n));
599  node_allocator_traits::deallocate(my_allocator, static_cast<node*>(n), 1);
600  }
601 
605 
608  if(my_node) {
611  }
612  }
613  void dismiss() { my_node = NULL; }
614  };
615 
616 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
617  template<typename... Args>
618  static node* create_node(node_allocator_type& allocator, Args&&... args)
619 #else
620  template<typename Arg1, typename Arg2>
621  static node* create_node(node_allocator_type& allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2)
622 #endif
623  {
624  node* node_ptr = node_allocator_traits::allocate(allocator, 1);
625  node_scoped_guard guard(node_ptr, allocator);
626  node_allocator_traits::construct(allocator, node_ptr);
627 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
628  node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward<Args>(args)...);
629 #else
630  node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward<Arg1>(arg1), tbb::internal::forward<Arg2>(arg2));
631 #endif
632  guard.dismiss();
633  return node_ptr;
634  }
635 
636  static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){
637  return create_node(allocator, key, *t);
638  }
639 
640 #if __TBB_CPP11_RVALUE_REF_PRESENT
641  static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){
642  return create_node(allocator, key, std::move(*const_cast<T*>(t)));
643  }
644 #endif
645 
646  static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){
647 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
648  // Emplace construct an empty T object inside the pair
649  return create_node(allocator, std::piecewise_construct,
650  std::forward_as_tuple(key), std::forward_as_tuple());
651 #else
652  T obj; // Use of temporary object in impossible, because create_node takes non-const reference
653  return create_node(allocator, key, tbb::internal::move(obj));
654 #endif
655  }
656 
657  static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){
658  __TBB_ASSERT(false,"this dummy function should not be called");
659  return NULL;
660  }
661 
662  node *search_bucket( const key_type &key, bucket *b ) const {
663  node *n = static_cast<node*>( b->node_list );
664  while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) )
665  n = static_cast<node*>( n->next );
666  __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
667  return n;
668  }
669 
672  bucket *my_b;
673  public:
674  bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
676  inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
677  my_b = base->get_bucket( h );
678  // TODO: actually, notification is unnecessary here, just hiding double-check
680  && try_acquire( my_b->mutex, /*write=*/true ) )
681  {
682  if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing
683  }
684  else bucket::scoped_t::acquire( my_b->mutex, writer );
685  __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
686  }
688  bool is_writer() { return bucket::scoped_t::is_writer; }
690  bucket *operator() () { return my_b; }
691  };
692 
693  // TODO refactor to hash_base
694  void rehash_bucket( bucket *b_new, const hashcode_t h ) {
695  __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
696  __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
698  hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
699 #if __TBB_STATISTICS
700  my_info_rehashes++; // invocations of rehash_bucket
701 #endif
702 
703  bucket_accessor b_old( this, h & mask );
704 
705  mask = (mask<<1) | 1; // get full mask for new bucket
706  __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
707  restart:
708  for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
709  hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->value().first );
710 #if TBB_USE_ASSERT
711  hashcode_t bmask = h & (mask>>1);
712  bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
713  __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
714 #endif
715  if( (c & mask) == h ) {
716  if( !b_old.is_writer() )
717  if( !b_old.upgrade_to_writer() ) {
718  goto restart; // node ptr can be invalid due to concurrent erase
719  }
720  *p = n->next; // exclude from b_old
721  add_to_bucket( b_new, n );
722  } else p = &n->next; // iterate to next item
723  }
724  }
725 
729  void dismiss() {my_ch_map = 0;}
731  if (my_ch_map){
732  my_ch_map->clear();
733  }
734  }
735  };
736 public:
737 
738  class accessor;
740  class const_accessor : private node::scoped_t /*which derived from no_copy*/ {
741  friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
742  friend class accessor;
743  public:
746 
748  bool empty() const { return !my_node; }
749 
751  void release() {
752  if( my_node ) {
754  my_node = 0;
755  }
756  }
757 
760  __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
761  return my_node->value();
762  }
763 
766  return &operator*();
767  }
768 
770  const_accessor() : my_node(NULL) {}
771 
774  my_node = NULL; // scoped lock's release() is called in its destructor
775  }
776  protected:
777  bool is_writer() { return node::scoped_t::is_writer; }
780  };
781 
783  class accessor: public const_accessor {
784  public:
787 
790  __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
791  return this->my_node->value();
792  }
793 
795  pointer operator->() const {
796  return &operator*();
797  }
798  };
799 
802  : internal::hash_map_base(), my_allocator(a)
803  {}
804 
805  explicit concurrent_hash_map( const HashCompare& compare, const allocator_type& a = allocator_type() )
806  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
807  {}
808 
811  : internal::hash_map_base(), my_allocator(a)
812  {
813  reserve( n, my_allocator );
814  }
815 
816  concurrent_hash_map( size_type n, const HashCompare& compare, const allocator_type& a = allocator_type() )
817  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
818  {
819  reserve( n, my_allocator );
820  }
821 
824  : internal::hash_map_base(), my_allocator(a)
825  {
826  call_clear_on_leave scope_guard(this);
827  internal_copy(table);
828  scope_guard.dismiss();
829  }
830 
831 #if __TBB_CPP11_RVALUE_REF_PRESENT
834  : internal::hash_map_base(), my_allocator(std::move(table.get_allocator()))
835  {
836  swap(table);
837  }
838 
841  : internal::hash_map_base(), my_allocator(a)
842  {
843  if (a == table.get_allocator()){
844  this->swap(table);
845  }else{
846  call_clear_on_leave scope_guard(this);
847  internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
848  scope_guard.dismiss();
849  }
850  }
851 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
852 
854  template<typename I>
856  : internal::hash_map_base(), my_allocator(a)
857  {
858  call_clear_on_leave scope_guard(this);
859  internal_copy(first, last, std::distance(first, last));
860  scope_guard.dismiss();
861  }
862 
863  template<typename I>
864  concurrent_hash_map( I first, I last, const HashCompare& compare, const allocator_type& a = allocator_type() )
865  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
866  {
867  call_clear_on_leave scope_guard(this);
868  internal_copy(first, last, std::distance(first, last));
869  scope_guard.dismiss();
870  }
871 
872 #if __TBB_INITIALIZER_LISTS_PRESENT
873  concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type &a = allocator_type() )
875  : internal::hash_map_base(), my_allocator(a)
876  {
877  call_clear_on_leave scope_guard(this);
878  internal_copy(il.begin(), il.end(), il.size());
879  scope_guard.dismiss();
880  }
881 
882  concurrent_hash_map( std::initializer_list<value_type> il, const HashCompare& compare, const allocator_type& a = allocator_type() )
883  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
884  {
885  call_clear_on_leave scope_guard(this);
886  internal_copy(il.begin(), il.end(), il.size());
887  scope_guard.dismiss();
888  }
889 
890 #endif //__TBB_INITIALIZER_LISTS_PRESENT
891 
894  if( this!=&table ) {
895  clear();
896  internal_copy(table);
897  }
898  return *this;
899  }
900 
901 #if __TBB_CPP11_RVALUE_REF_PRESENT
904  if(this != &table) {
906  if(pocma_t::value || this->my_allocator == table.my_allocator) {
907  concurrent_hash_map trash (std::move(*this));
908  //TODO: swapping allocators here may be a problem, replace with single direction moving iff pocma is set
909  this->swap(table);
910  } else {
911  //do per element move
912  concurrent_hash_map moved_copy(std::move(table), this->my_allocator);
913  this->swap(moved_copy);
914  }
915  }
916  return *this;
917  }
918 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
919 
920 #if __TBB_INITIALIZER_LISTS_PRESENT
921  concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {
923  clear();
924  internal_copy(il.begin(), il.end(), il.size());
925  return *this;
926  }
927 #endif //__TBB_INITIALIZER_LISTS_PRESENT
928 
929 
931 
933  void rehash(size_type n = 0);
934 
936  void clear();
937 
940 
941  //------------------------------------------------------------------------
942  // Parallel algorithm support
943  //------------------------------------------------------------------------
944  range_type range( size_type grainsize=1 ) {
945  return range_type( *this, grainsize );
946  }
947  const_range_type range( size_type grainsize=1 ) const {
948  return const_range_type( *this, grainsize );
949  }
950 
951  //------------------------------------------------------------------------
952  // STL support - not thread-safe methods
953  //------------------------------------------------------------------------
955  iterator end() { return iterator( *this, 0, 0, 0 ); }
957  const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); }
958  std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }
959  std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }
960 
962  size_type size() const { return my_size; }
963 
965  bool empty() const { return my_size == 0; }
966 
968  size_type max_size() const {return (~size_type(0))/sizeof(node);}
969 
971  size_type bucket_count() const { return my_mask+1; }
972 
974  allocator_type get_allocator() const { return this->my_allocator; }
975 
977  void swap( concurrent_hash_map &table );
978 
979  //------------------------------------------------------------------------
980  // concurrent map operations
981  //------------------------------------------------------------------------
982 
984  size_type count( const Key &key ) const {
985  return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node );
986  }
987 
989 
990  bool find( const_accessor &result, const Key &key ) const {
991  result.release();
992  return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node );
993  }
994 
996 
997  bool find( accessor &result, const Key &key ) {
998  result.release();
999  return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node );
1000  }
1001 
1003 
1004  bool insert( const_accessor &result, const Key &key ) {
1005  result.release();
1006  return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct );
1007  }
1008 
1010 
1011  bool insert( accessor &result, const Key &key ) {
1012  result.release();
1013  return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct );
1014  }
1015 
1017 
1018  bool insert( const_accessor &result, const value_type &value ) {
1019  result.release();
1020  return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct );
1021  }
1022 
1024 
1025  bool insert( accessor &result, const value_type &value ) {
1026  result.release();
1027  return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct );
1028  }
1029 
1031 
1032  bool insert( const value_type &value ) {
1033  return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct );
1034  }
1035 
1036 #if __TBB_CPP11_RVALUE_REF_PRESENT
1037 
1039  bool insert( const_accessor &result, value_type && value ) {
1040  return generic_move_insert(result, std::move(value));
1041  }
1042 
1044 
1045  bool insert( accessor &result, value_type && value ) {
1046  return generic_move_insert(result, std::move(value));
1047  }
1048 
1050 
1051  bool insert( value_type && value ) {
1053  }
1054 
1055 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1056 
1058  template<typename... Args>
1059  bool emplace( const_accessor &result, Args&&... args ) {
1060  return generic_emplace(result, std::forward<Args>(args)...);
1061  }
1062 
1064 
1065  template<typename... Args>
1066  bool emplace( accessor &result, Args&&... args ) {
1067  return generic_emplace(result, std::forward<Args>(args)...);
1068  }
1069 
1071 
1072  template<typename... Args>
1073  bool emplace( Args&&... args ) {
1074  return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
1075  }
1076 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1077 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1078 
1080  template<typename I>
1081  void insert( I first, I last ) {
1082  for ( ; first != last; ++first )
1083  insert( *first );
1084  }
1085 
1086 #if __TBB_INITIALIZER_LISTS_PRESENT
1087  void insert( std::initializer_list<value_type> il ) {
1089  insert( il.begin(), il.end() );
1090  }
1091 #endif //__TBB_INITIALIZER_LISTS_PRESENT
1092 
1094 
1095  bool erase( const Key& key );
1096 
1098 
1099  bool erase( const_accessor& item_accessor ) {
1100  return exclude( item_accessor );
1101  }
1102 
1104 
1105  bool erase( accessor& item_accessor ) {
1106  return exclude( item_accessor );
1107  }
1108 
1109 protected:
1111  bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key &, const T * ), node *tmp_n = 0 ) ;
1112 
1113  struct accessor_not_used { void release(){}};
1114  friend const_accessor* accessor_location( accessor_not_used const& ){ return NULL;}
1115  friend const_accessor* accessor_location( const_accessor & a ) { return &a;}
1116 
1117  friend bool is_write_access_needed( accessor const& ) { return true;}
1118  friend bool is_write_access_needed( const_accessor const& ) { return false;}
1119  friend bool is_write_access_needed( accessor_not_used const& ) { return false;}
1120 
1121 #if __TBB_CPP11_RVALUE_REF_PRESENT
1122  template<typename Accessor>
1123  bool generic_move_insert( Accessor && result, value_type && value ) {
1124  result.release();
1125  return lookup(/*insert*/true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
1126  }
1127 
1128 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1129  template<typename Accessor, typename... Args>
1130  bool generic_emplace( Accessor && result, Args &&... args ) {
1131  result.release();
1132  node * node_ptr = create_node(my_allocator, std::forward<Args>(args)...);
1133  return lookup(/*insert*/true, node_ptr->value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
1134  }
1135 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1136 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1137 
1139  bool exclude( const_accessor &item_accessor );
1140 
1142  template<typename I>
1143  std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
1144 
1146  void internal_copy( const concurrent_hash_map& source );
1147 
1148  template<typename I>
1149  void internal_copy( I first, I last, size_type reserve_size );
1150 
1152 
1154  const_pointer internal_fast_find( const Key& key ) const {
1155  hashcode_t h = my_hash_compare.hash( key );
1157  node *n;
1158  restart:
1159  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1160  bucket *b = get_bucket( h & m );
1161  // TODO: actually, notification is unnecessary here, just hiding double-check
1163  {
1165  if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
1166  if( b->node_list == internal::rehash_req)
1167  const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
1168  }
1169  else lock.acquire( b->mutex, /*write=*/false );
1171  }
1172  n = search_bucket( key, b );
1173  if( n )
1174  return &n->item;
1175  else if( check_mask_race( h, m ) )
1176  goto restart;
1177  return 0;
1178  }
1179 };
1180 
1181 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1182 namespace internal {
1183 using namespace tbb::internal;
1184 
1185 template<template<typename...> typename Map, typename Key, typename T, typename... Args>
1186 using hash_map_t = Map<
1187  Key, T,
1188  std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
1189  pack_element_t<0, Args...>, tbb_hash_compare<Key> >,
1190  std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t<sizeof...(Args)-1, Args...> >,
1191  pack_element_t<sizeof...(Args)-1, Args...>, tbb_allocator<std::pair<const Key, T> > >
1192 >;
1193 }
1194 
1195 // Deduction guide for the constructor from two iterators and hash_compare/ allocator
1196 template<typename I, typename... Args>
1197 concurrent_hash_map(I, I, Args...)
1198 -> internal::hash_map_t<concurrent_hash_map, internal::iterator_key_t<I>,internal::iterator_mapped_t<I>, Args...>;
1199 
1200 // Deduction guide for the constructor from an initializer_list and hash_compare/ allocator
1201 // Deduction guide for an initializer_list, hash_compare and allocator is implicit
1202 template<typename Key, typename T, typename CompareOrAllocator>
1203 concurrent_hash_map(std::initializer_list<std::pair<const Key, T>>, CompareOrAllocator)
1204 -> internal::hash_map_t<concurrent_hash_map, Key, T, CompareOrAllocator>;
1205 
1206 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1207 
1208 template<typename Key, typename T, typename HashCompare, typename A>
1209 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) {
1210  __TBB_ASSERT( !result || !result->my_node, NULL );
1211  bool return_value;
1212  hashcode_t const h = my_hash_compare.hash( key );
1214  segment_index_t grow_segment = 0;
1215  node *n;
1216  restart:
1217  {//lock scope
1218  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1219  return_value = false;
1220  // get bucket
1221  bucket_accessor b( this, h & m );
1222 
1223  // find a node
1224  n = search_bucket( key, b() );
1225  if( op_insert ) {
1226  // [opt] insert a key
1227  if( !n ) {
1228  if( !tmp_n ) {
1229  tmp_n = allocate_node(my_allocator, key, t);
1230  }
1231  if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
1232  // Rerun search_list, in case another thread inserted the item during the upgrade.
1233  n = search_bucket( key, b() );
1234  if( is_valid(n) ) { // unfortunately, it did
1235  b.downgrade_to_reader();
1236  goto exists;
1237  }
1238  }
1239  if( check_mask_race(h, m) )
1240  goto restart; // b.release() is done in ~b().
1241  // insert and set flag to grow the container
1242  grow_segment = insert_new_node( b(), n = tmp_n, m );
1243  tmp_n = 0;
1244  return_value = true;
1245  }
1246  } else { // find or count
1247  if( !n ) {
1248  if( check_mask_race( h, m ) )
1249  goto restart; // b.release() is done in ~b(). TODO: replace by continue
1250  return false;
1251  }
1252  return_value = true;
1253  }
1254  exists:
1255  if( !result ) goto check_growth;
1256  // TODO: the following seems as generic/regular operation
1257  // acquire the item
1258  if( !result->try_acquire( n->mutex, write ) ) {
1259  for( tbb::internal::atomic_backoff backoff(true);; ) {
1260  if( result->try_acquire( n->mutex, write ) ) break;
1261  if( !backoff.bounded_pause() ) {
1262  // the wait takes really long, restart the operation
1263  b.release();
1264  __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
1265  __TBB_Yield();
1266  m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1267  goto restart;
1268  }
1269  }
1270  }
1271  }//lock scope
1272  result->my_node = n;
1273  result->my_hash = h;
1274 check_growth:
1275  // [opt] grow the container
1276  if( grow_segment ) {
1277 #if __TBB_STATISTICS
1278  my_info_resizes++; // concurrent ones
1279 #endif
1280  enable_segment( grow_segment, my_allocator );
1281  }
1282  if( tmp_n ) // if op_insert only
1283  delete_node( tmp_n );
1284  return return_value;
1285 }
1286 
1287 template<typename Key, typename T, typename HashCompare, typename A>
1288 template<typename I>
1289 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
1290  hashcode_t h = my_hash_compare.hash( key );
1291  hashcode_t m = my_mask;
1292  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1293  h &= m;
1294  bucket *b = get_bucket( h );
1295  while( b->node_list == internal::rehash_req ) {
1296  m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1297  b = get_bucket( h &= m );
1298  }
1299  node *n = search_bucket( key, b );
1300  if( !n )
1301  return std::make_pair(end_, end_);
1302  iterator lower(*this, h, b, n), upper(lower);
1303  return std::make_pair(lower, ++upper);
1304 }
1305 
1306 template<typename Key, typename T, typename HashCompare, typename A>
1308  __TBB_ASSERT( item_accessor.my_node, NULL );
1309  node_base *const n = item_accessor.my_node;
1310  hashcode_t const h = item_accessor.my_hash;
1312  do {
1313  // get bucket
1314  bucket_accessor b( this, h & m, /*writer=*/true );
1315  node_base **p = &b()->node_list;
1316  while( *p && *p != n )
1317  p = &(*p)->next;
1318  if( !*p ) { // someone else was first
1319  if( check_mask_race( h, m ) )
1320  continue;
1321  item_accessor.release();
1322  return false;
1323  }
1324  __TBB_ASSERT( *p == n, NULL );
1325  *p = n->next; // remove from container
1326  my_size--;
1327  break;
1328  } while(true);
1329  if( !item_accessor.is_writer() ) // need to get exclusive lock
1330  item_accessor.upgrade_to_writer(); // return value means nothing here
1331  item_accessor.release();
1332  delete_node( n ); // Only one thread can delete it
1333  return true;
1334 }
1335 
1336 template<typename Key, typename T, typename HashCompare, typename A>
1338  node_base *n;
1339  hashcode_t const h = my_hash_compare.hash( key );
1341 restart:
1342  {//lock scope
1343  // get bucket
1344  bucket_accessor b( this, h & m );
1345  search:
1346  node_base **p = &b()->node_list;
1347  n = *p;
1348  while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->value().first ) ) {
1349  p = &n->next;
1350  n = *p;
1351  }
1352  if( !n ) { // not found, but mask could be changed
1353  if( check_mask_race( h, m ) )
1354  goto restart;
1355  return false;
1356  }
1357  else if( !b.is_writer() && !b.upgrade_to_writer() ) {
1358  if( check_mask_race( h, m ) ) // contended upgrade, check mask
1359  goto restart;
1360  goto search;
1361  }
1362  *p = n->next;
1363  my_size--;
1364  }
1365  {
1366  typename node::scoped_t item_locker( n->mutex, /*write=*/true );
1367  }
1368  // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1369  delete_node( n ); // Only one thread can delete it due to write lock on the bucket
1370  return true;
1371 }
1372 
1373 template<typename Key, typename T, typename HashCompare, typename A>
1375  //TODO: respect C++11 allocator_traits<A>::propogate_on_constainer_swap
1376  using std::swap;
1377  swap(this->my_allocator, table.my_allocator);
1378  swap(this->my_hash_compare, table.my_hash_compare);
1379  internal_swap(table);
1380 }
1381 
1382 template<typename Key, typename T, typename HashCompare, typename A>
1384  reserve( sz, my_allocator ); // TODO: add reduction of number of buckets as well
1385  hashcode_t mask = my_mask;
1386  hashcode_t b = (mask+1)>>1; // size or first index of the last segment
1387  __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2
1388  bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing
1389  for(; b <= mask; b++, bp++ ) {
1390  node_base *n = bp->node_list;
1391  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1392  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1393  if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
1394  hashcode_t h = b; bucket *b_old = bp;
1395  do {
1396  __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
1397  hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1398  b_old = get_bucket( h &= m );
1399  } while( b_old->node_list == internal::rehash_req );
1400  // now h - is index of the root rehashed bucket b_old
1401  mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
1402  for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1403  hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->value().first );
1404  if( (c & mask) != h ) { // should be rehashed
1405  *p = q->next; // exclude from b_old
1406  bucket *b_new = get_bucket( c & mask );
1407  __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
1408  add_to_bucket( b_new, q );
1409  } else p = &q->next; // iterate to next item
1410  }
1411  }
1412  }
1413 #if TBB_USE_PERFORMANCE_WARNINGS
1414  int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1415  static bool reported = false;
1416 #endif
1417 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1418  for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing
1419  if( b & (b-2) ) ++bp; // not the beginning of a segment
1420  else bp = get_bucket( b );
1421  node_base *n = bp->node_list;
1422  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1423  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
1424 #if TBB_USE_PERFORMANCE_WARNINGS
1425  if( n == internal::empty_rehashed ) empty_buckets++;
1426  else if( n->next ) overpopulated_buckets++;
1427 #endif
1428 #if TBB_USE_ASSERT
1429  for( ; is_valid(n); n = n->next ) {
1430  hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first ) & mask;
1431  __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
1432  }
1433 #endif
1434  }
1435 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1436 #if TBB_USE_PERFORMANCE_WARNINGS
1437  if( buckets > current_size) empty_buckets -= buckets - current_size;
1438  else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1439  if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1441  "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1443  typeid(*this).name(),
1444 #else
1445  "concurrent_hash_map",
1446 #endif
1447  current_size, empty_buckets, overpopulated_buckets );
1448  reported = true;
1449  }
1450 #endif
1451 }
1452 
1453 template<typename Key, typename T, typename HashCompare, typename A>
1455  hashcode_t m = my_mask;
1456  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1457 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1458 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1459  int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1460  static bool reported = false;
1461 #endif
1462  bucket *bp = 0;
1463  // check consistency
1464  for( segment_index_t b = 0; b <= m; b++ ) {
1465  if( b & (b-2) ) ++bp; // not the beginning of a segment
1466  else bp = get_bucket( b );
1467  node_base *n = bp->node_list;
1468  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1469  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
1470 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1471  if( n == internal::empty_rehashed ) empty_buckets++;
1472  else if( n == internal::rehash_req ) buckets--;
1473  else if( n->next ) overpopulated_buckets++;
1474 #endif
1475 #if __TBB_EXTRA_DEBUG
1476  for(; is_valid(n); n = n->next ) {
1477  hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first );
1478  h &= m;
1479  __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
1480  }
1481 #endif
1482  }
1483 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1484 #if __TBB_STATISTICS
1485  printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
1486  " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1487  current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
1488  unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
1489  my_info_resizes = 0; // concurrent ones
1490  my_info_restarts = 0; // race collisions
1491  my_info_rehashes = 0; // invocations of rehash_bucket
1492 #endif
1493  if( buckets > current_size) empty_buckets -= buckets - current_size;
1494  else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1495  if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1497  "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1499  typeid(*this).name(),
1500 #else
1501  "concurrent_hash_map",
1502 #endif
1503  current_size, empty_buckets, overpopulated_buckets );
1504  reported = true;
1505  }
1506 #endif
1507 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1508  my_size = 0;
1509  segment_index_t s = segment_index_of( m );
1510  __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
1511  do {
1512  __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
1513  segment_ptr_t buckets_ptr = my_table[s];
1514  size_type sz = segment_size( s ? s : 1 );
1515  for( segment_index_t i = 0; i < sz; i++ )
1516  for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1517  buckets_ptr[i].node_list = n->next;
1518  delete_node( n );
1519  }
1520  delete_segment(s, my_allocator);
1521  } while(s-- > 0);
1522  my_mask = embedded_buckets - 1;
1523 }
1524 
1525 template<typename Key, typename T, typename HashCompare, typename A>
1527  hashcode_t mask = source.my_mask;
1528  if( my_mask == mask ) { // optimized version
1529  reserve( source.my_size, my_allocator ); // TODO: load_factor?
1530  bucket *dst = 0, *src = 0;
1531  bool rehash_required = false;
1532  for( hashcode_t k = 0; k <= mask; k++ ) {
1533  if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1534  else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1535  __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
1536  node *n = static_cast<node*>( src->node_list );
1537  if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets
1538  rehash_required = true;
1540  } else for(; n; n = static_cast<node*>( n->next ) ) {
1541  node* node_ptr = create_node(my_allocator, n->value().first, n->value().second);
1542  add_to_bucket( dst, node_ptr);
1543  ++my_size; // TODO: replace by non-atomic op
1544  }
1545  }
1546  if( rehash_required ) rehash();
1547  } else internal_copy( source.begin(), source.end(), source.my_size );
1548 }
1549 
1550 template<typename Key, typename T, typename HashCompare, typename A>
1551 template<typename I>
1553  reserve( reserve_size, my_allocator ); // TODO: load_factor?
1554  hashcode_t m = my_mask;
1555  for(; first != last; ++first) {
1556  hashcode_t h = my_hash_compare.hash( (*first).first );
1557  bucket *b = get_bucket( h & m );
1558  __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
1559  node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1560  add_to_bucket( b, node_ptr );
1561  ++my_size; // TODO: replace by non-atomic op
1562  }
1563 }
1564 
1565 } // namespace interface5
1566 
1568 
1569 
1570 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1572  if(a.size() != b.size()) return false;
1575  for(; i != i_end; ++i) {
1576  j = b.equal_range(i->first).first;
1577  if( j == j_end || !(i->second == j->second) ) return false;
1578  }
1579  return true;
1580 }
1581 
1582 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1584 { return !(a == b); }
1585 
1586 template<typename Key, typename T, typename HashCompare, typename A>
1588 { a.swap( b ); }
1589 
1590 #if _MSC_VER && !defined(__INTEL_COMPILER)
1591  #pragma warning( pop )
1592 #endif // warning 4127 is back
1593 
1594 } // namespace tbb
1595 
1596 #endif /* __TBB_concurrent_hash_map_H */
Meets requirements of a forward iterator for STL */.
~const_accessor()
Destroy result after releasing the underlying reference.
T itt_hide_load_word(const T &src)
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
Fast, unfair, spinning reader-writer lock with backoff and writer-preference.
Definition: spin_rw_mutex.h:42
Allows write access to elements and combines data access, locking, and garbage collection.
void acquire(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
find a bucket by masked hashcode, optionally rehash, and acquire the lock
bool operator==(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
internal::hash_map_range< const_iterator > const_range_type
node * my_node
Pointer to node that has current item.
bucket accessor is to find, rehash, acquire a lock, and access a bucket
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
concurrent_hash_map(size_type n, const HashCompare &compare, const allocator_type &a=allocator_type())
allocator_type get_allocator() const
return allocator object
friend bool is_write_access_needed(const_accessor const &)
bool check_rehashing_collision(const hashcode_t h, hashcode_t m_old, hashcode_t m) const
Process mask race, check for rehashing collision.
const Container * my_map
concurrent_hash_map over which we are iterating.
size_type size() const
Number of items in table.
friend bool is_write_access_needed(accessor_not_used const &)
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:713
static node * allocate_node_move_construct(node_allocator_type &allocator, const Key &key, const T *t)
const concurrent_hash_map::value_type value_type
Type of value.
const_pointer operator->() const
Return pointer to associated value in hash table.
T * begin() const
Pointer to beginning of array.
Definition: aligned_space.h:39
bool emplace(Args &&... args)
Insert item by copying if there is no such key present already.
std::pair< const_iterator, const_iterator > equal_range(const Key &key) const
void set_midpoint() const
Set my_midpoint to point approximately half way between my_begin and my_end.
bool is_writer()
check whether bucket is locked for write
bool generic_move_insert(Accessor &&result, value_type &&value)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
tbb::aligned_space< value_type > my_value
const_pointer internal_fast_find(const Key &key) const
Fast find when no concurrent erasure is used. For internal use inside TBB only!
size_type count(const Key &key) const
Return count of items (0 or 1)
concurrent_hash_map(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
std::pair< iterator, iterator > equal_range(const Key &key)
tick_count::interval_t operator-(const tick_count &t1, const tick_count &t0)
Definition: tick_count.h:130
size_type grainsize() const
The grain size for this range.
pointer operator->() const
Return pointer to associated value in hash table.
void itt_store_word_with_release(tbb::atomic< T > &dst, U src)
auto first(Container &c) -> decltype(begin(c))
Unordered map from Key to T.
hash_map_range(hash_map_range &r, split)
Split range.
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
Definition: tbb_allocator.h:62
bool erase(const_accessor &item_accessor)
Erase item by const_accessor.
allocator_traits< Alloc >::template rebind_alloc< T >::other type
void insert(I first, I last)
Insert range [first, last)
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:335
friend const_accessor * accessor_location(const_accessor &a)
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
Range class used with concurrent_hash_map.
static void add_to_bucket(bucket *b, node_base *n)
Add node.
#define __TBB_Yield()
Definition: ibm_aix51.h:48
bool generic_emplace(Accessor &&result, Args &&... args)
size_t my_index
Index in hash table for current item.
base class of concurrent_hash_map
tbb::internal::allocator_rebind< Allocator, node >::type node_allocator_type
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
~concurrent_hash_map()
Clear table and destroy it.
static pointer allocate(Alloc &a, size_type n)
hash_map_range(const map_type &map, size_type grainsize_=1)
Init range with container and grainsize specified.
hash_compare that is default argument for concurrent_hash_map
Acquire.
Definition: atomic.h:47
void const char const char int ITT_FORMAT __itt_group_sync p
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
static void deallocate(Alloc &a, pointer p, size_type n)
const_range_type range(size_type grainsize=1) const
static void construct(Alloc &, PT *p)
void delete_segment(segment_index_t s, const Allocator &allocator)
node * search_bucket(const key_type &key, bucket *b) const
atomic< size_type > my_size
Size of container in stored items.
bool emplace(accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
concurrent_hash_map(concurrent_hash_map &&table, const allocator_type &a)
Move constructor.
auto last(Container &c) -> decltype(begin(c))
concurrent_hash_map(const concurrent_hash_map &table, const allocator_type &a=allocator_type())
Copy constructor.
void internal_swap(hash_map_base &table)
Swap hash_map_bases.
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:864
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
bool exclude(const_accessor &item_accessor)
delete item by accessor
bool empty() const
True if size()==0.
internal::hash_map_range< iterator > range_type
const bucket * my_bucket
Pointer to bucket.
concurrent_hash_map(const allocator_type &a=allocator_type())
Construct empty table.
static hash_map_node_base *const empty_rehashed
Rehashed empty bucket flag.
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
concurrent_hash_map(size_type n, const allocator_type &a=allocator_type())
Construct empty table with n preallocated buckets. This number serves also as initial concurrency lev...
reference operator*() const
Return reference to associated value in hash table.
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:500
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
hash_map_range(hash_map_range< U > &r)
type conversion
tbb::internal::allocator_traits< node_allocator_type > node_allocator_traits
bucket my_embedded_segment[embedded_buckets]
Zero segment.
const_reference operator*() const
Return reference to associated value in hash table.
The graph class.
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
static node * allocate_node_default_construct(node_allocator_type &allocator, const Key &key, const T *)
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
bool erase(const Key &key)
Erase item.
static void init_buckets(segment_ptr_t ptr, size_type sz, bool is_initial)
Initialize buckets.
bool operator==(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
friend const_accessor * accessor_location(accessor_not_used const &)
Combines data access, locking, and garbage collection.
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:399
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
bool emplace(const_accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
friend bool is_write_access_needed(accessor const &)
bool erase(accessor &item_accessor)
Erase item by accessor.
void swap(concurrent_hash_map &table)
swap two instances. Iterators are invalidated
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
spin_rw_mutex mutex_t
Mutex type for buckets.
static node * do_not_allocate_node(node_allocator_type &, const Key &, const T *)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
bool check_mask_race(const hashcode_t h, hashcode_t &m) const
Check for mask race.
bool operator!=(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
void rehash_bucket(bucket *b_new, const hashcode_t h)
static hash_map_node_base *const rehash_req
Incompleteness flag value.
Class that implements exponential backoff.
Definition: tbb_machine.h:349
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
hash_map_node_base * next
Next node in chain.
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:309
bool operator!=(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
void const char const char int ITT_FORMAT __itt_group_sync s
void itt_hide_store_word(T &dst, T src)
#define __TBB_USE_OPTIONAL_RTTI
Definition: tbb_config.h:129
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
hash_map_iterator(const hash_map_iterator< Container, typename Container::value_type > &other)
segment_index_t insert_new_node(bucket *b, node_base *n, hashcode_t mask)
Insert a node and check for load factor.
static segment_index_t segment_base(segment_index_t k)
hash_map_iterator()
Construct undefined iterator.
range_type range(size_type grainsize=1)
bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node *(*allocate_node)(node_allocator_type &, const Key &, const T *), node *tmp_n=0)
Insert or find item and optionally acquire a lock on the item.
bool is_divisible() const
True if range can be partitioned into two subranges.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
concurrent_hash_map::value_type value_type
Type of value.
internal::hash_map_iterator< concurrent_hash_map, const value_type > const_iterator
static segment_index_t segment_index_of(size_type index)
bucket * get_bucket(hashcode_t h) const
Get bucket by (masked) hashcode.
concurrent_hash_map(const HashCompare &compare, const allocator_type &a=allocator_type())
concurrent_hash_map(I first, I last, const HashCompare &compare, const allocator_type &a=allocator_type())
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
static void destroy(Alloc &, T *p)
size_type bucket_count() const
Returns the current number of buckets.
void enable_segment(segment_index_t k, const Allocator &allocator, bool is_initial=false)
Enable segment.
node_scoped_guard(node *n, node_allocator_type &alloc)
bool empty() const
True if range is empty.
enable_segment_failsafe(segments_table_t &table, segment_index_t k)
std::size_t size_type
Type for size of a range.
std::pair< I, I > internal_equal_range(const Key &key, I end) const
Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
The scoped locking pattern.
Definition: spin_rw_mutex.h:90
void reserve(size_type buckets, const Allocator &allocator)
Prepare enough segments for number of buckets.
Class for determining type of std::allocator<T>::value_type.
Definition: tbb_stddef.h:454
hash_map_iterator operator++(int)
Post increment.
static node * allocate_node_copy_construct(node_allocator_type &allocator, const Key &key, const T *t)
Identifiers declared inside namespace internal should never be used directly by client code.
Definition: atomic.h:55
hash_map_node_base node_base
Node base type.
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
internal::hash_map_iterator< concurrent_hash_map, value_type > iterator
Release.
Definition: atomic.h:49
static size_type segment_size(segment_index_t k)
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:717
atomic< T > & as_atomic(T &t)
Definition: atomic.h:547
size_type max_size() const
Upper bound on size.
bucket_accessor(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
concurrent_hash_map(std::initializer_list< value_type > il, const HashCompare &compare, const allocator_type &a=allocator_type())
size_t hashcode_t
Type of a hash code.
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
static node * create_node(node_allocator_type &allocator, Args &&... args)
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.