Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
enumerable_thread_specific.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
21 #ifndef __TBB_enumerable_thread_specific_H
22 #define __TBB_enumerable_thread_specific_H
23 
24 #include "atomic.h"
25 #include "concurrent_vector.h"
26 #include "tbb_thread.h"
27 #include "tbb_allocator.h"
29 #include "aligned_space.h"
32 #include "tbb_profiling.h"
33 #include <string.h> // for memcpy
34 
35 #if _WIN32||_WIN64
36 #include "machine/windows_api.h"
37 #else
38 #include <pthread.h>
39 #endif
40 
41 #define __TBB_ETS_USE_CPP11 \
42  (__TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \
43  && __TBB_CPP11_DECLTYPE_PRESENT && __TBB_CPP11_LAMBDAS_PRESENT)
44 
45 namespace tbb {
46 
49 
50 namespace interface6 {
51 
52  // Forward declaration to use in internal classes
53  template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>
55 
57  namespace internal {
58 
59  using namespace tbb::internal;
60 
61  template<ets_key_usage_type ETS_key_type>
62  class ets_base: tbb::internal::no_copy {
63  protected:
64  typedef tbb_thread::id key_type;
65 #if __TBB_PROTECTED_NESTED_CLASS_BROKEN
66  public:
67 #endif
68  struct slot;
69 
70  struct array {
71  array* next;
72  size_t lg_size;
73  slot& at( size_t k ) {
74  return ((slot*)(void*)(this+1))[k];
75  }
76  size_t size() const {return size_t(1)<<lg_size;}
77  size_t mask() const {return size()-1;}
78  size_t start( size_t h ) const {
79  return h>>(8*sizeof(size_t)-lg_size);
80  }
81  };
82  struct slot {
83  key_type key;
84  void* ptr;
85  bool empty() const {return key == key_type();}
86  bool match( key_type k ) const {return key == k;}
87  bool claim( key_type k ) {
88  // TODO: maybe claim ptr, because key_type is not guaranteed to fit into word size
89  return atomic_compare_and_swap(key, k, key_type()) == key_type();
90  }
91  };
92 #if __TBB_PROTECTED_NESTED_CLASS_BROKEN
93  protected:
94 #endif
95 
97 
99  atomic<array*> my_root;
100  atomic<size_t> my_count;
101  virtual void* create_local() = 0;
102  virtual void* create_array(size_t _size) = 0; // _size in bytes
103  virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes
104  array* allocate( size_t lg_size ) {
105  size_t n = size_t(1)<<lg_size;
106  array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
107  a->lg_size = lg_size;
108  std::memset( a+1, 0, n*sizeof(slot) );
109  return a;
110  }
111  void free(array* a) {
112  size_t n = size_t(1)<<(a->lg_size);
113  free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
114  }
115 
116  ets_base() {my_root=NULL; my_count=0;}
117  virtual ~ets_base(); // g++ complains if this is not virtual
118  void* table_lookup( bool& exists );
119  void table_clear();
120  // The following functions are not used in concurrent context,
121  // so we don't need synchronization and ITT annotations there.
122  void table_elementwise_copy( const ets_base& other,
123  void*(*add_element)(ets_base&, void*) ) {
124  __TBB_ASSERT(!my_root,NULL);
125  __TBB_ASSERT(!my_count,NULL);
126  if( !other.my_root ) return;
127  array* root = my_root = allocate(other.my_root->lg_size);
128  root->next = NULL;
129  my_count = other.my_count;
130  size_t mask = root->mask();
131  for( array* r=other.my_root; r; r=r->next ) {
132  for( size_t i=0; i<r->size(); ++i ) {
133  slot& s1 = r->at(i);
134  if( !s1.empty() ) {
135  for( size_t j = root->start(tbb::tbb_hash<key_type>()(s1.key)); ; j=(j+1)&mask ) {
136  slot& s2 = root->at(j);
137  if( s2.empty() ) {
138  s2.ptr = add_element(*this, s1.ptr);
139  s2.key = s1.key;
140  break;
141  }
142  else if( s2.match(s1.key) )
143  break;
144  }
145  }
146  }
147  }
148  }
149  void table_swap( ets_base& other ) {
150  __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
151  tbb::internal::swap<relaxed>(my_root, other.my_root);
152  tbb::internal::swap<relaxed>(my_count, other.my_count);
153  }
154  };
155 
156  template<ets_key_usage_type ETS_key_type>
157  ets_base<ETS_key_type>::~ets_base() {
158  __TBB_ASSERT(!my_root, NULL);
159  }
160 
161  template<ets_key_usage_type ETS_key_type>
162  void ets_base<ETS_key_type>::table_clear() {
163  while( array* r = my_root ) {
164  my_root = r->next;
165  free(r);
166  }
167  my_count = 0;
168  }
169 
170  template<ets_key_usage_type ETS_key_type>
171  void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
172  const key_type k = tbb::this_tbb_thread::get_id();
173 
174  __TBB_ASSERT(k != key_type(),NULL);
175  void* found;
176  size_t h = tbb::tbb_hash<key_type>()(k);
177  for( array* r=my_root; r; r=r->next ) {
179  size_t mask=r->mask();
180  for(size_t i = r->start(h); ;i=(i+1)&mask) {
181  slot& s = r->at(i);
182  if( s.empty() ) break;
183  if( s.match(k) ) {
184  if( r==my_root ) {
185  // Success at top level
186  exists = true;
187  return s.ptr;
188  } else {
189  // Success at some other level. Need to insert at top level.
190  exists = true;
191  found = s.ptr;
192  goto insert;
193  }
194  }
195  }
196  }
197  // Key does not yet exist. The density of slots in the table does not exceed 0.5,
198  // for if this will occur a new table is allocated with double the current table
199  // size, which is swapped in as the new root table. So an empty slot is guaranteed.
200  exists = false;
201  found = create_local();
202  {
203  size_t c = ++my_count;
204  array* r = my_root;
206  if( !r || c>r->size()/2 ) {
207  size_t s = r ? r->lg_size : 2;
208  while( c>size_t(1)<<(s-1) ) ++s;
209  array* a = allocate(s);
210  for(;;) {
211  a->next = r;
213  array* new_r = my_root.compare_and_swap(a,r);
214  if( new_r==r ) break;
215  call_itt_notify(acquired, new_r);
216  if( new_r->lg_size>=s ) {
217  // Another thread inserted an equal or bigger array, so our array is superfluous.
218  free(a);
219  break;
220  }
221  r = new_r;
222  }
223  }
224  }
225  insert:
226  // Whether a slot has been found in an older table, or if it has been inserted at this level,
227  // it has already been accounted for in the total. Guaranteed to be room for it, and it is
228  // not present, so search for empty slot and use it.
229  array* ir = my_root;
231  size_t mask = ir->mask();
232  for(size_t i = ir->start(h);;i=(i+1)&mask) {
233  slot& s = ir->at(i);
234  if( s.empty() ) {
235  if( s.claim(k) ) {
236  s.ptr = found;
237  return found;
238  }
239  }
240  }
241  }
242 
244  template <>
245  class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {
246  typedef ets_base<ets_no_key> super;
247 #if _WIN32||_WIN64
248 #if __TBB_WIN8UI_SUPPORT
249  typedef DWORD tls_key_t;
250  void create_key() { my_key = FlsAlloc(NULL); }
251  void destroy_key() { FlsFree(my_key); }
252  void set_tls(void * value) { FlsSetValue(my_key, (LPVOID)value); }
253  void* get_tls() { return (void *)FlsGetValue(my_key); }
254 #else
255  typedef DWORD tls_key_t;
256  void create_key() { my_key = TlsAlloc(); }
257  void destroy_key() { TlsFree(my_key); }
258  void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
259  void* get_tls() { return (void *)TlsGetValue(my_key); }
260 #endif
261 #else
262  typedef pthread_key_t tls_key_t;
263  void create_key() { pthread_key_create(&my_key, NULL); }
264  void destroy_key() { pthread_key_delete(my_key); }
265  void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
266  void* get_tls() const { return pthread_getspecific(my_key); }
267 #endif
268  tls_key_t my_key;
269  virtual void* create_local() __TBB_override = 0;
270  virtual void* create_array(size_t _size) __TBB_override = 0; // _size in bytes
271  virtual void free_array(void* ptr, size_t _size) __TBB_override = 0; // size in bytes
272  protected:
273  ets_base() {create_key();}
274  ~ets_base() {destroy_key();}
275  void* table_lookup( bool& exists ) {
276  void* found = get_tls();
277  if( found ) {
278  exists=true;
279  } else {
280  found = super::table_lookup(exists);
281  set_tls(found);
282  }
283  return found;
284  }
285  void table_clear() {
286  destroy_key();
287  create_key();
288  super::table_clear();
289  }
290  void table_swap( ets_base& other ) {
291  using std::swap;
292  __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
293  swap(my_key, other.my_key);
294  super::table_swap(other);
295  }
296  };
297 
299  template< typename Container, typename Value >
300  class enumerable_thread_specific_iterator
301 #if defined(_WIN64) && defined(_MSC_VER)
302  // Ensure that Microsoft's internal template function _Val_type works correctly.
303  : public std::iterator<std::random_access_iterator_tag,Value>
304 #endif /* defined(_WIN64) && defined(_MSC_VER) */
305  {
307 
308  Container *my_container;
309  typename Container::size_type my_index;
310  mutable Value *my_value;
311 
312  template<typename C, typename T>
313  friend enumerable_thread_specific_iterator<C,T>
314  operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<C,T>& v );
315 
316  template<typename C, typename T, typename U>
317  friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,
318  const enumerable_thread_specific_iterator<C,U>& j );
319 
320  template<typename C, typename T, typename U>
321  friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i,
322  const enumerable_thread_specific_iterator<C,U>& j );
323 
324  template<typename C, typename T, typename U>
325  friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i,
326  const enumerable_thread_specific_iterator<C,U>& j );
327 
328  template<typename C, typename U>
329  friend class enumerable_thread_specific_iterator;
330 
331  public:
332 
333  enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) :
334  my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
335 
337  enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
338 
339  template<typename U>
340  enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
341  my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
342 
343  enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
344  return enumerable_thread_specific_iterator(*my_container, my_index + offset);
345  }
346 
347  enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
348  my_index += offset;
349  my_value = NULL;
350  return *this;
351  }
352 
353  enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
354  return enumerable_thread_specific_iterator( *my_container, my_index-offset );
355  }
356 
357  enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
358  my_index -= offset;
359  my_value = NULL;
360  return *this;
361  }
362 
363  Value& operator*() const {
364  Value* value = my_value;
365  if( !value ) {
366  value = my_value = (*my_container)[my_index].value();
367  }
368  __TBB_ASSERT( value==(*my_container)[my_index].value(), "corrupt cache" );
369  return *value;
370  }
371 
372  Value& operator[]( ptrdiff_t k ) const {
373  return (*my_container)[my_index + k].value;
374  }
375 
376  Value* operator->() const {return &operator*();}
377 
378  enumerable_thread_specific_iterator& operator++() {
379  ++my_index;
380  my_value = NULL;
381  return *this;
382  }
383 
384  enumerable_thread_specific_iterator& operator--() {
385  --my_index;
386  my_value = NULL;
387  return *this;
388  }
389 
391  enumerable_thread_specific_iterator operator++(int) {
392  enumerable_thread_specific_iterator result = *this;
393  ++my_index;
394  my_value = NULL;
395  return result;
396  }
397 
399  enumerable_thread_specific_iterator operator--(int) {
400  enumerable_thread_specific_iterator result = *this;
401  --my_index;
402  my_value = NULL;
403  return result;
404  }
405 
406  // STL support
407  typedef ptrdiff_t difference_type;
408  typedef Value value_type;
409  typedef Value* pointer;
410  typedef Value& reference;
411  typedef std::random_access_iterator_tag iterator_category;
412  };
413 
414  template<typename Container, typename T>
415  enumerable_thread_specific_iterator<Container,T>
416  operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<Container,T>& v ) {
417  return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
418  }
419 
420  template<typename Container, typename T, typename U>
421  bool operator==( const enumerable_thread_specific_iterator<Container,T>& i,
422  const enumerable_thread_specific_iterator<Container,U>& j ) {
423  return i.my_index==j.my_index && i.my_container == j.my_container;
424  }
425 
426  template<typename Container, typename T, typename U>
427  bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i,
428  const enumerable_thread_specific_iterator<Container,U>& j ) {
429  return !(i==j);
430  }
431 
432  template<typename Container, typename T, typename U>
433  bool operator<( const enumerable_thread_specific_iterator<Container,T>& i,
434  const enumerable_thread_specific_iterator<Container,U>& j ) {
435  return i.my_index<j.my_index;
436  }
437 
438  template<typename Container, typename T, typename U>
439  bool operator>( const enumerable_thread_specific_iterator<Container,T>& i,
440  const enumerable_thread_specific_iterator<Container,U>& j ) {
441  return j<i;
442  }
443 
444  template<typename Container, typename T, typename U>
445  bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i,
446  const enumerable_thread_specific_iterator<Container,U>& j ) {
447  return !(i<j);
448  }
449 
450  template<typename Container, typename T, typename U>
451  bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i,
452  const enumerable_thread_specific_iterator<Container,U>& j ) {
453  return !(j<i);
454  }
455 
456  template<typename Container, typename T, typename U>
457  ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i,
458  const enumerable_thread_specific_iterator<Container,U>& j ) {
459  return i.my_index-j.my_index;
460  }
461 
462  template<typename SegmentedContainer, typename Value >
463  class segmented_iterator
464 #if defined(_WIN64) && defined(_MSC_VER)
465  : public std::iterator<std::input_iterator_tag, Value>
466 #endif
467  {
468  template<typename C, typename T, typename U>
469  friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
470 
471  template<typename C, typename T, typename U>
472  friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
473 
474  template<typename C, typename U>
475  friend class segmented_iterator;
476 
477  public:
478 
479  segmented_iterator() {my_segcont = NULL;}
480 
481  segmented_iterator( const SegmentedContainer& _segmented_container ) :
482  my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
483  outer_iter(my_segcont->end()) { }
484 
485  ~segmented_iterator() {}
486 
487  typedef typename SegmentedContainer::iterator outer_iterator;
488  typedef typename SegmentedContainer::value_type InnerContainer;
489  typedef typename InnerContainer::iterator inner_iterator;
490 
491  // STL support
492  typedef ptrdiff_t difference_type;
493  typedef Value value_type;
494  typedef typename SegmentedContainer::size_type size_type;
495  typedef Value* pointer;
496  typedef Value& reference;
497  typedef std::input_iterator_tag iterator_category;
498 
499  // Copy Constructor
500  template<typename U>
501  segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
502  my_segcont(other.my_segcont),
503  outer_iter(other.outer_iter),
504  // can we assign a default-constructed iterator to inner if we're at the end?
505  inner_iter(other.inner_iter)
506  {}
507 
508  // assignment
509  template<typename U>
510  segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
511  if(this != &other) {
512  my_segcont = other.my_segcont;
513  outer_iter = other.outer_iter;
514  if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
515  }
516  return *this;
517  }
518 
519  // allow assignment of outer iterator to segmented iterator. Once it is
520  // assigned, move forward until a non-empty inner container is found or
521  // the end of the outer container is reached.
522  segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
523  __TBB_ASSERT(my_segcont != NULL, NULL);
524  // check that this iterator points to something inside the segmented container
525  for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
526  if( !outer_iter->empty() ) {
527  inner_iter = outer_iter->begin();
528  break;
529  }
530  }
531  return *this;
532  }
533 
534  // pre-increment
535  segmented_iterator& operator++() {
536  advance_me();
537  return *this;
538  }
539 
540  // post-increment
541  segmented_iterator operator++(int) {
542  segmented_iterator tmp = *this;
543  operator++();
544  return tmp;
545  }
546 
547  bool operator==(const outer_iterator& other_outer) const {
548  __TBB_ASSERT(my_segcont != NULL, NULL);
549  return (outer_iter == other_outer &&
550  (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
551  }
552 
553  bool operator!=(const outer_iterator& other_outer) const {
554  return !operator==(other_outer);
555 
556  }
557 
558  // (i)* RHS
559  reference operator*() const {
560  __TBB_ASSERT(my_segcont != NULL, NULL);
561  __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
562  __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
563  return *inner_iter;
564  }
565 
566  // i->
567  pointer operator->() const { return &operator*();}
568 
569  private:
570  SegmentedContainer* my_segcont;
571  outer_iterator outer_iter;
572  inner_iterator inner_iter;
573 
574  void advance_me() {
575  __TBB_ASSERT(my_segcont != NULL, NULL);
576  __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
577  __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
578  ++inner_iter;
579  while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
580  inner_iter = outer_iter->begin();
581  }
582  }
583  }; // segmented_iterator
584 
585  template<typename SegmentedContainer, typename T, typename U>
586  bool operator==( const segmented_iterator<SegmentedContainer,T>& i,
587  const segmented_iterator<SegmentedContainer,U>& j ) {
588  if(i.my_segcont != j.my_segcont) return false;
589  if(i.my_segcont == NULL) return true;
590  if(i.outer_iter != j.outer_iter) return false;
591  if(i.outer_iter == i.my_segcont->end()) return true;
592  return i.inner_iter == j.inner_iter;
593  }
594 
595  // !=
596  template<typename SegmentedContainer, typename T, typename U>
597  bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,
598  const segmented_iterator<SegmentedContainer,U>& j ) {
599  return !(i==j);
600  }
601 
602  template<typename T>
603  struct construct_by_default: tbb::internal::no_assign {
604  void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.
605  construct_by_default( int ) {}
606  };
607 
608  template<typename T>
609  struct construct_by_exemplar: tbb::internal::no_assign {
610  const T exemplar;
611  void construct(void*where) {new(where) T(exemplar);}
612  construct_by_exemplar( const T& t ) : exemplar(t) {}
613 #if __TBB_ETS_USE_CPP11
614  construct_by_exemplar( T&& t ) : exemplar(std::move(t)) {}
615 #endif
616  };
617 
618  template<typename T, typename Finit>
619  struct construct_by_finit: tbb::internal::no_assign {
620  Finit f;
621  void construct(void* where) {new(where) T(f());}
622  construct_by_finit( const Finit& f_ ) : f(f_) {}
623 #if __TBB_ETS_USE_CPP11
624  construct_by_finit( Finit&& f_ ) : f(std::move(f_)) {}
625 #endif
626  };
627 
628 #if __TBB_ETS_USE_CPP11
629  template<typename T, typename... P>
630  struct construct_by_args: tbb::internal::no_assign {
631  internal::stored_pack<P...> pack;
632  void construct(void* where) {
633  internal::call( [where](const typename strip<P>::type&... args ){
634  new(where) T(args...);
635  }, pack );
636  }
637  construct_by_args( P&& ... args ) : pack(std::forward<P>(args)...) {}
638  };
639 #endif
640 
641  // storage for initialization function pointer
642  // TODO: consider removing the template parameter T here and in callback_leaf
643  template<typename T>
644  class callback_base {
645  public:
646  // Clone *this
647  virtual callback_base* clone() const = 0;
648  // Destruct and free *this
649  virtual void destroy() = 0;
650  // Need virtual destructor to satisfy GCC compiler warning
651  virtual ~callback_base() { }
652  // Construct T at where
653  virtual void construct(void* where) = 0;
654  };
655 
656  template <typename T, typename Constructor>
657  class callback_leaf: public callback_base<T>, Constructor {
658 #if __TBB_ETS_USE_CPP11
659  template<typename... P> callback_leaf( P&& ... params ) : Constructor(std::forward<P>(params)...) {}
660 #else
661  template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
662 #endif
663  // TODO: make the construction/destruction consistent (use allocator.construct/destroy)
664  typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
665 
666  callback_base<T>* clone() const __TBB_override {
667  return make(*this);
668  }
669 
670  void destroy() __TBB_override {
671  my_allocator_type().destroy(this);
672  my_allocator_type().deallocate(this,1);
673  }
674 
675  void construct(void* where) __TBB_override {
676  Constructor::construct(where);
677  }
678  public:
679 #if __TBB_ETS_USE_CPP11
680  template<typename... P>
681  static callback_base<T>* make( P&& ... params ) {
682  void* where = my_allocator_type().allocate(1);
683  return new(where) callback_leaf( std::forward<P>(params)... );
684  }
685 #else
686  template<typename X>
687  static callback_base<T>* make( const X& x ) {
688  void* where = my_allocator_type().allocate(1);
689  return new(where) callback_leaf(x);
690  }
691 #endif
692  };
693 
695 
703  template<typename U>
704  struct ets_element {
705  tbb::aligned_space<U> my_space;
706  bool is_built;
707  ets_element() { is_built = false; } // not currently-built
708  U* value() { return my_space.begin(); }
709  U* value_committed() { is_built = true; return my_space.begin(); }
710  ~ets_element() {
711  if(is_built) {
712  my_space.begin()->~U();
713  is_built = false;
714  }
715  }
716  };
717 
718  // A predicate that can be used for a compile-time compatibility check of ETS instances
719  // Ideally, it should have been declared inside the ETS class, but unfortunately
720  // in that case VS2013 does not enable the variadic constructor.
721  template<typename T, typename ETS> struct is_compatible_ets { static const bool value = false; };
722  template<typename T, typename U, typename A, ets_key_usage_type C>
723  struct is_compatible_ets< T, enumerable_thread_specific<U,A,C> > { static const bool value = internal::is_same_type<T,U>::value; };
724 
725 #if __TBB_ETS_USE_CPP11
726  // A predicate that checks whether, for a variable 'foo' of type T, foo() is a valid expression
727  template <typename T>
728  class is_callable_no_args {
729  private:
730  typedef char yes[1];
731  typedef char no [2];
732 
733  template<typename U> static yes& decide( decltype(declval<U>()())* );
734  template<typename U> static no& decide(...);
735  public:
736  static const bool value = (sizeof(decide<T>(NULL)) == sizeof(yes));
737  };
738 #endif
739 
740  } // namespace internal
742 
744 
763  template <typename T,
764  typename Allocator=cache_aligned_allocator<T>,
765  ets_key_usage_type ETS_key_type=ets_no_key >
766  class enumerable_thread_specific: internal::ets_base<ETS_key_type> {
767 
768  template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
769 
770  typedef internal::padded< internal::ets_element<T> > padded_element;
771 
773  template<typename I>
775  public:
776  typedef T value_type;
777  typedef T& reference;
778  typedef const T& const_reference;
779  typedef I iterator;
780  typedef ptrdiff_t difference_type;
781  generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
782  template<typename U>
783  generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
785  };
786 
787  typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
789 
790  internal::callback_base<T> *my_construct_callback;
791 
793 
794  // TODO: consider unifying the callback mechanism for all create_local* methods below
795  // (likely non-compatible and requires interface version increase)
797  padded_element& lref = *my_locals.grow_by(1);
798  my_construct_callback->construct(lref.value());
799  return lref.value_committed();
800  }
801 
802  static void* create_local_by_copy( internal::ets_base<ets_no_key>& base, void* p ) {
803  enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
804  padded_element& lref = *ets.my_locals.grow_by(1);
805  new(lref.value()) T(*static_cast<T*>(p));
806  return lref.value_committed();
807  }
808 
809 #if __TBB_ETS_USE_CPP11
810  static void* create_local_by_move( internal::ets_base<ets_no_key>& base, void* p ) {
811  enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
812  padded_element& lref = *ets.my_locals.grow_by(1);
813  new(lref.value()) T(std::move(*static_cast<T*>(p)));
814  return lref.value_committed();
815  }
816 #endif
817 
818  typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
819 
820  // _size is in bytes
821  void* create_array(size_t _size) __TBB_override {
822  size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
823  return array_allocator_type().allocate(nelements);
824  }
825 
826  void free_array( void* _ptr, size_t _size) __TBB_override {
827  size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
828  array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
829  }
830 
831  public:
832 
834  typedef Allocator allocator_type;
835  typedef T value_type;
836  typedef T& reference;
837  typedef const T& const_reference;
838  typedef T* pointer;
839  typedef const T* const_pointer;
842 
843  // Iterator types
844  typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
845  typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
846 
847  // Parallel range types
848  typedef generic_range_type< iterator > range_type;
849  typedef generic_range_type< const_iterator > const_range_type;
850 
852  enumerable_thread_specific() : my_construct_callback(
853  internal::callback_leaf<T,internal::construct_by_default<T> >::make(/*dummy argument*/0)
854  ){}
855 
857  template <typename Finit
858 #if __TBB_ETS_USE_CPP11
860 #endif
861  >
862  explicit enumerable_thread_specific( Finit finit ) : my_construct_callback(
863  internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( tbb::internal::move(finit) )
864  ){}
865 
867  explicit enumerable_thread_specific( const T& exemplar ) : my_construct_callback(
868  internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar )
869  ){}
870 
871 #if __TBB_ETS_USE_CPP11
872  explicit enumerable_thread_specific( T&& exemplar ) : my_construct_callback(
873  internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( std::move(exemplar) )
874  ){}
875 
877  template <typename P1, typename... P,
881  >::type>
882  enumerable_thread_specific( P1&& arg1, P&& ... args ) : my_construct_callback(
883  internal::callback_leaf<T,internal::construct_by_args<T,P1,P...> >::make( std::forward<P1>(arg1), std::forward<P>(args)... )
884  ){}
885 #endif
886 
889  if(my_construct_callback) my_construct_callback->destroy();
890  // Deallocate the hash table before overridden free_array() becomes inaccessible
891  this->internal::ets_base<ets_no_key>::table_clear();
892  }
893 
896  bool exists;
897  return local(exists);
898  }
899 
901  reference local(bool& exists) {
902  void* ptr = this->table_lookup(exists);
903  return *(T*)ptr;
904  }
905 
907  size_type size() const { return my_locals.size(); }
908 
910  bool empty() const { return my_locals.empty(); }
911 
913  iterator begin() { return iterator( my_locals, 0 ); }
915  iterator end() { return iterator(my_locals, my_locals.size() ); }
916 
918  const_iterator begin() const { return const_iterator(my_locals, 0); }
919 
921  const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
922 
924  range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); }
925 
927  const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
928 
930  void clear() {
931  my_locals.clear();
932  this->table_clear();
933  // callback is not destroyed
934  }
935 
936  private:
937 
938  template<typename A2, ets_key_usage_type C2>
940 #if __TBB_ETS_USE_CPP11 && TBB_USE_ASSERT
941  // this tests is_compatible_ets
942  __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
943 #endif
944  // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
945  my_construct_callback = other.my_construct_callback->clone();
946  __TBB_ASSERT(my_locals.size()==0,NULL);
947  my_locals.reserve(other.size());
948  this->table_elementwise_copy( other, create_local_by_copy );
949  }
950 
952  using std::swap;
953  __TBB_ASSERT( this!=&other, NULL );
954  swap(my_construct_callback, other.my_construct_callback);
955  // concurrent_vector::swap() preserves storage space,
956  // so addresses to the vector kept in ETS hash table remain valid.
957  swap(my_locals, other.my_locals);
958  this->internal::ets_base<ETS_key_type>::table_swap(other);
959  }
960 
961 #if __TBB_ETS_USE_CPP11
962  template<typename A2, ets_key_usage_type C2>
964 #if TBB_USE_ASSERT
965  // this tests is_compatible_ets
966  __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
967 #endif
968  my_construct_callback = other.my_construct_callback;
969  other.my_construct_callback = NULL;
970  __TBB_ASSERT(my_locals.size()==0,NULL);
971  my_locals.reserve(other.size());
972  this->table_elementwise_copy( other, create_local_by_move );
973  }
974 #endif
975 
976  public:
977 
979  : internal::ets_base<ETS_key_type>() /* prevents GCC warnings with -Wextra */
980  {
981  internal_copy(other);
982  }
983 
984  template<typename Alloc, ets_key_usage_type Cachetype>
986  {
987  internal_copy(other);
988  }
989 
990 #if __TBB_ETS_USE_CPP11
991  enumerable_thread_specific( enumerable_thread_specific&& other ) : my_construct_callback()
992  {
993  internal_swap(other);
994  }
995 
996  template<typename Alloc, ets_key_usage_type Cachetype>
998  {
999  internal_move(std::move(other));
1000  }
1001 #endif
1002 
1004  {
1005  if( this != &other ) {
1006  this->clear();
1007  my_construct_callback->destroy();
1008  internal_copy( other );
1009  }
1010  return *this;
1011  }
1012 
1013  template<typename Alloc, ets_key_usage_type Cachetype>
1015  {
1016  __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL ); // Objects of different types
1017  this->clear();
1018  my_construct_callback->destroy();
1019  internal_copy(other);
1020  return *this;
1021  }
1022 
1023 #if __TBB_ETS_USE_CPP11
1025  {
1026  if( this != &other )
1027  internal_swap(other);
1028  return *this;
1029  }
1030 
1031  template<typename Alloc, ets_key_usage_type Cachetype>
1033  {
1034  __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL ); // Objects of different types
1035  this->clear();
1036  my_construct_callback->destroy();
1037  internal_move(std::move(other));
1038  return *this;
1039  }
1040 #endif
1041 
1042  // combine_func_t has signature T(T,T) or T(const T&, const T&)
1043  template <typename combine_func_t>
1044  T combine(combine_func_t f_combine) {
1045  if(begin() == end()) {
1046  internal::ets_element<T> location;
1047  my_construct_callback->construct(location.value());
1048  return *location.value_committed();
1049  }
1050  const_iterator ci = begin();
1051  T my_result = *ci;
1052  while(++ci != end())
1053  my_result = f_combine( my_result, *ci );
1054  return my_result;
1055  }
1056 
1057  // combine_func_t takes T by value or by [const] reference, and returns nothing
1058  template <typename combine_func_t>
1059  void combine_each(combine_func_t f_combine) {
1060  for(iterator ci = begin(); ci != end(); ++ci) {
1061  f_combine( *ci );
1062  }
1063  }
1064 
1065  }; // enumerable_thread_specific
1066 
1067  template< typename Container >
1068  class flattened2d {
1069 
1070  // This intermediate typedef is to address issues with VC7.1 compilers
1071  typedef typename Container::value_type conval_type;
1072 
1073  public:
1074 
1076  typedef typename conval_type::size_type size_type;
1077  typedef typename conval_type::difference_type difference_type;
1078  typedef typename conval_type::allocator_type allocator_type;
1079  typedef typename conval_type::value_type value_type;
1080  typedef typename conval_type::reference reference;
1081  typedef typename conval_type::const_reference const_reference;
1082  typedef typename conval_type::pointer pointer;
1083  typedef typename conval_type::const_pointer const_pointer;
1084 
1085  typedef typename internal::segmented_iterator<Container, value_type> iterator;
1086  typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
1087 
1088  flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) :
1089  my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
1090 
1091  explicit flattened2d( const Container &c ) :
1092  my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
1093 
1094  iterator begin() { return iterator(*my_container) = my_begin; }
1095  iterator end() { return iterator(*my_container) = my_end; }
1096  const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
1097  const_iterator end() const { return const_iterator(*my_container) = my_end; }
1098 
1099  size_type size() const {
1100  size_type tot_size = 0;
1101  for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
1102  tot_size += i->size();
1103  }
1104  return tot_size;
1105  }
1106 
1107  private:
1108 
1109  Container *my_container;
1110  typename Container::const_iterator my_begin;
1111  typename Container::const_iterator my_end;
1112 
1113  };
1114 
1115  template <typename Container>
1116  flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
1117  return flattened2d<Container>(c, b, e);
1118  }
1119 
1120  template <typename Container>
1121  flattened2d<Container> flatten2d(const Container &c) {
1122  return flattened2d<Container>(c);
1123  }
1124 
1125 } // interface6
1126 
1127 namespace internal {
1128 using interface6::internal::segmented_iterator;
1129 }
1130 
1131 using interface6::enumerable_thread_specific;
1132 using interface6::flattened2d;
1133 using interface6::flatten2d;
1134 
1135 } // namespace tbb
1136 
1137 #endif
bool operator<=(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
void reserve(size_type n)
Allocate enough space to grow to size n without having to allocate more memory later.
conval_type::size_type size_type
Basic types.
#define __TBB_override
Definition: tbb_stddef.h:244
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
void internal_copy(const enumerable_thread_specific< T, A2, C2 > &other)
bool operator>=(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
generic_range_type< const_iterator > const_range_type
enumerable_thread_specific & operator=(enumerable_thread_specific &&other)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
internal::padded< internal::ets_element< T > > padded_element
T * begin() const
Pointer to beginning of array.
Definition: aligned_space.h:39
enumerable_thread_specific(const enumerable_thread_specific< T, Alloc, Cachetype > &other)
A range over which to iterate.
Definition: blocked_range.h:49
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:536
flattened2d(const Container &c, typename Container::const_iterator b, typename Container::const_iterator e)
ets_key_usage_type
enum for selecting between single key and key-per-instance versions
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
reference local()
returns reference to local, discarding exists
bool operator<(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
tick_count::interval_t operator-(const tick_count &t1, const tick_count &t0)
Definition: tick_count.h:130
enumerable_thread_specific(const T &exemplar)
Constructor with exemplar. Each local instance of T is copy-constructed from the exemplar.
enumerable_thread_specific(enumerable_thread_specific< T, Alloc, Cachetype > &&other)
internal::segmented_iterator< Container, value_type > iterator
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
void internal_swap(enumerable_thread_specific &other)
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
Definition: tbb_allocator.h:62
const_iterator end() const
end const iterator
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:335
bool operator>(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:539
const_iterator begin() const
begin const iterator
A generic range, used to create range objects from the iterators.
void internal_move(enumerable_thread_specific< T, A2, C2 > &&other)
vector_iterator< Container, T > operator+(ptrdiff_t offset, const vector_iterator< Container, T > &v)
void const char const char int ITT_FORMAT __itt_group_sync p
Base class for types that should not be assigned.
Definition: tbb_stddef.h:324
reference local(bool &exists)
Returns reference to calling thread's local copy, creating one if necessary.
internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator
bool empty() const
true if there have been no local copies created
enumerable_thread_specific(Finit finit)
Constructor with initializer functor. Each local instance of T is constructed by T(finit()).
tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type
void free_array(void *_ptr, size_t _size) __TBB_override
Allocator::template rebind< padded_element >::other padded_allocator_type
enumerable_thread_specific(P1 &&arg1, P &&... args)
Variadic constructor with initializer arguments. Each local instance of T is constructed by T(args....
range_type range(size_t grainsize=1)
Get range for parallel algorithms.
bool operator!=(const memory_pool_allocator< T, P > &a, const memory_pool_allocator< U, P > &b)
Definition: memory_pool.h:187
conval_type::const_pointer const_pointer
const_range_type range(size_t grainsize=1) const
Get const range for parallel algorithms.
static void * create_local_by_copy(internal::ets_base< ets_no_key > &base, void *p)
The graph class.
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
internal_collection_type::difference_type difference_type
conval_type::difference_type difference_type
Allocator::template rebind< uintptr_t >::other array_allocator_type
size_type size() const
Get the number of local copies.
conval_type::const_reference const_reference
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:399
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void call(F &&f, Pack &&p)
Calls the given function with arguments taken from a stored_pack.
void call_itt_notify(notify_type, void *)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
enumerable_thread_specific(const enumerable_thread_specific &other)
tbb_thread::id get_id()
Definition: tbb_thread.h:321
enumerable_thread_specific & operator=(enumerable_thread_specific< T, Alloc, Cachetype > &&other)
conval_type::allocator_type allocator_type
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:309
bool empty() const
Return false if vector is not empty or has elements under construction at least.
void const char const char int ITT_FORMAT __itt_group_sync s
internal::concurrent_vector_base_v3::size_type size_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
enumerable_thread_specific & operator=(const enumerable_thread_specific &other)
flattened2d< Container > flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
bool operator==(const memory_pool_allocator< T, P > &a, const memory_pool_allocator< U, P > &b)
Definition: memory_pool.h:184
size_type size() const
Return size of vector. It may include elements under construction.
enumerable_thread_specific(enumerable_thread_specific &&other)
static void * create_local_by_move(internal::ets_base< ets_no_key > &base, void *p)
void * create_array(size_t _size) __TBB_override
internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator
iterator grow_by(size_type delta)
Grow by "delta" elements.
The enumerable_thread_specific container.
Identifiers declared inside namespace internal should never be used directly by client code.
Definition: atomic.h:55
Block of space aligned sufficiently to construct an array T with N elements.
Definition: aligned_space.h:33
enumerable_thread_specific & operator=(const enumerable_thread_specific< T, Alloc, Cachetype > &other)
internal::segmented_iterator< Container, const value_type > const_iterator
enumerable_thread_specific()
Default constructor. Each local instance of T is default constructed.
void clear()
Clear container while keeping memory allocated.

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.