Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
concurrent_vector.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
21 #ifndef __TBB_concurrent_vector_H
22 #define __TBB_concurrent_vector_H
23 
24 #include "tbb_stddef.h"
25 #include "tbb_exception.h"
26 #include "atomic.h"
28 #include "blocked_range.h"
29 #include "tbb_machine.h"
30 #include "tbb_profiling.h"
31 #include <new>
32 #include <cstring> // for memset()
33 #include __TBB_STD_SWAP_HEADER
34 #include <algorithm>
35 #include <iterator>
36 
38 
39 #if _MSC_VER==1500 && !__INTEL_COMPILER
40  // VS2008/VC9 seems to have an issue; limits pull in math.h
41  #pragma warning( push )
42  #pragma warning( disable: 4985 )
43 #endif
44 #include <limits> /* std::numeric_limits */
45 #if _MSC_VER==1500 && !__INTEL_COMPILER
46  #pragma warning( pop )
47 #endif
48 
49 #if __TBB_INITIALIZER_LISTS_PRESENT
50  #include <initializer_list>
51 #endif
52 
53 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
54  // Workaround for overzealous compiler warnings in /Wp64 mode
55  #pragma warning (push)
56 #if defined(_Wp64)
57  #pragma warning (disable: 4267)
58 #endif
59  #pragma warning (disable: 4127) //warning C4127: conditional expression is constant
60 #endif
61 
62 namespace tbb {
63 
64 template<typename T, class A = cache_aligned_allocator<T> >
66 
68 namespace internal {
69 
70  template<typename Container, typename Value>
72 
74  static void *const vector_allocation_error_flag = reinterpret_cast<void*>(size_t(63));
75 
77  template<typename T>
78  void handle_unconstructed_elements(T* array, size_t n_of_elements){
79  std::memset( static_cast<void*>(array), 0, n_of_elements * sizeof( T ) );
80  }
81 
83 
85  protected:
86 
87  // Basic types declarations
88  typedef size_t segment_index_t;
89  typedef size_t size_type;
90 
91  // Using enumerations due to Mac linking problems of static const variables
92  enum {
93  // Size constants
94  default_initial_segments = 1, // 2 initial items
96  pointers_per_short_table = 3, // to fit into 8 words of entire structure
97  pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit
98  };
99 
100  struct segment_not_used {};
101  struct segment_allocated {};
103 
104  class segment_t;
106  void* array;
107  private:
108  //TODO: More elegant way to grant access to selected functions _only_?
109  friend class segment_t;
110  explicit segment_value_t(void* an_array):array(an_array) {}
111  public:
112  friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;}
115  template<typename argument_type>
116  friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);}
117 
118  template<typename T>
119  T* pointer() const { return static_cast<T*>(const_cast<void*>(array)); }
120  };
121 
123  if(s != segment_allocated()){
124  internal::throw_exception(exception);
125  }
126  }
127 
128  // Segment pointer.
129  class segment_t {
131  public:
132  segment_t(){ store<relaxed>(segment_not_used());}
133  //Copy ctor and assignment operator are defined to ease using of stl algorithms.
134  //These algorithms usually not a synchronization point, so, semantic is
135  //intentionally relaxed here.
137 
138  void swap(segment_t & rhs ){
139  tbb::internal::swap<relaxed>(array, rhs.array);
140  }
141 
143  array.store<relaxed>(rhs.array.load<relaxed>());
144  return *this;
145  }
146 
147  template<memory_semantics M>
148  segment_value_t load() const { return segment_value_t(array.load<M>());}
149 
150  template<memory_semantics M>
152  array.store<M>(0);
153  }
154 
155  template<memory_semantics M>
157  __TBB_ASSERT(load<relaxed>() != segment_allocated(),"transition from \"allocated\" to \"allocation failed\" state looks non-logical");
159  }
160 
161  template<memory_semantics M>
162  void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) {
163  __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(),
164  "other overloads of store should be used for marking segment as not_used or allocation_failed" );
165  array.store<M>(allocated_segment_pointer);
166  }
167 
168 #if TBB_USE_ASSERT
169  ~segment_t() {
170  __TBB_ASSERT(load<relaxed>() != segment_allocated(), "should have been freed by clear" );
171  }
172 #endif /* TBB_USE_ASSERT */
173  };
174  friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true);
175 
176  // Data fields
177 
179  void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t);
180 
183 
186 
189 
192 
193  // Methods
194 
196  //Here the semantic is intentionally relaxed.
197  //The reason this is next:
198  //Object that is in middle of construction (i.e. its constructor is not yet finished)
199  //cannot be used concurrently until the construction is finished.
200  //Thus to flag other threads that construction is finished, some synchronization with
201  //acquire-release semantic should be done by the (external) code that uses the vector.
202  //So, no need to do the synchronization inside the vector.
203 
205  my_first_block.store<relaxed>(0); // here is not default_initial_segments
206  my_segment.store<relaxed>(my_storage);
207  }
208 
210 
211  //these helpers methods use the fact that segments are allocated so
212  //that every segment size is a (increasing) power of 2.
213  //with one exception 0 segment has size of 2 as well segment 1;
214  //e.g. size of segment with index of 3 is 2^3=8;
216  return segment_index_t( __TBB_Log2( index|1 ) );
217  }
218 
220  return (segment_index_t(1)<<k & ~segment_index_t(1));
221  }
222 
224  segment_index_t k = segment_index_of( index );
225  index -= segment_base(k);
226  return k;
227  }
228 
230  return segment_index_t(1)<<k; // fake value for k==0
231  }
232 
233 
234  static bool is_first_element_in_segment(size_type element_index){
235  //check if element_index is a power of 2 that is at least 2.
236  //The idea is to detect if the iterator crosses a segment boundary,
237  //and 2 is the minimal index for which it's true
238  __TBB_ASSERT(element_index, "there should be no need to call "
239  "is_first_element_in_segment for 0th element" );
240  return is_power_of_two_at_least( element_index, 2 );
241  }
242 
245 
247  typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );
248 
253  };
254 
255  void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
257  void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src );
258  size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src );
259  void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );
261  void* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy );
268 
269  void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
270  internal_array_op1 destroy, internal_array_op2 init );
272 
275 private:
277  class helper;
278  friend class helper;
279 
280  template<typename Container, typename Value>
281  friend class vector_iterator;
282 
283  };
284 
286  lhs.swap(rhs);
287  }
288 
290 
292 
294  template<typename Container, typename Value>
295  class vector_iterator
296  {
298  Container* my_vector;
299 
301  size_t my_index;
302 
304 
305  mutable Value* my_item;
306 
307  template<typename C, typename T>
308  friend vector_iterator<C,T> operator+( ptrdiff_t offset, const vector_iterator<C,T>& v );
309 
310  template<typename C, typename T, typename U>
311  friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
312 
313  template<typename C, typename T, typename U>
314  friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
315 
316  template<typename C, typename T, typename U>
317  friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
318 
319  template<typename C, typename U>
321 
322 #if !__TBB_TEMPLATE_FRIENDS_BROKEN
323  template<typename T, class A>
325 #else
326 public:
327 #endif
328 
329  vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) :
330  my_vector(const_cast<Container*>(&vector)),
331  my_index(index),
332  my_item(static_cast<Value*>(ptr))
333  {}
334 
335  public:
337  vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
338 
340  my_vector(other.my_vector),
341  my_index(other.my_index),
342  my_item(other.my_item)
343  {}
344 
345  vector_iterator operator+( ptrdiff_t offset ) const {
346  return vector_iterator( *my_vector, my_index+offset );
347  }
348  vector_iterator &operator+=( ptrdiff_t offset ) {
349  my_index+=offset;
350  my_item = NULL;
351  return *this;
352  }
353  vector_iterator operator-( ptrdiff_t offset ) const {
354  return vector_iterator( *my_vector, my_index-offset );
355  }
356  vector_iterator &operator-=( ptrdiff_t offset ) {
357  my_index-=offset;
358  my_item = NULL;
359  return *this;
360  }
361  Value& operator*() const {
362  Value* item = my_item;
363  if( !item ) {
364  item = my_item = &my_vector->internal_subscript(my_index);
365  }
366  __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" );
367  return *item;
368  }
369  Value& operator[]( ptrdiff_t k ) const {
370  return my_vector->internal_subscript(my_index+k);
371  }
372  Value* operator->() const {return &operator*();}
373 
376  size_t element_index = ++my_index;
377  if( my_item ) {
378  //TODO: consider using of knowledge about "first_block optimization" here as well?
380  //if the iterator crosses a segment boundary, the pointer become invalid
381  //as possibly next segment is in another memory location
382  my_item= NULL;
383  } else {
384  ++my_item;
385  }
386  }
387  return *this;
388  }
389 
392  __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" );
393  size_t element_index = my_index--;
394  if( my_item ) {
396  //if the iterator crosses a segment boundary, the pointer become invalid
397  //as possibly next segment is in another memory location
398  my_item= NULL;
399  } else {
400  --my_item;
401  }
402  }
403  return *this;
404  }
405 
408  vector_iterator result = *this;
409  operator++();
410  return result;
411  }
412 
415  vector_iterator result = *this;
416  operator--();
417  return result;
418  }
419 
420  // STL support
421 
422  typedef ptrdiff_t difference_type;
423  typedef Value value_type;
424  typedef Value* pointer;
425  typedef Value& reference;
426  typedef std::random_access_iterator_tag iterator_category;
427  };
428 
429  template<typename Container, typename T>
431  return vector_iterator<Container,T>( *v.my_vector, v.my_index+offset );
432  }
433 
434  template<typename Container, typename T, typename U>
436  return i.my_index==j.my_index && i.my_vector == j.my_vector;
437  }
438 
439  template<typename Container, typename T, typename U>
441  return !(i==j);
442  }
443 
444  template<typename Container, typename T, typename U>
446  return i.my_index<j.my_index;
447  }
448 
449  template<typename Container, typename T, typename U>
451  return j<i;
452  }
453 
454  template<typename Container, typename T, typename U>
456  return !(i<j);
457  }
458 
459  template<typename Container, typename T, typename U>
461  return !(j<i);
462  }
463 
464  template<typename Container, typename T, typename U>
466  return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
467  }
468 
469  template<typename T, class A>
471  public:
475  };
476 
477 } // namespace internal
479 
481 
542 template<typename T, class A>
543 class concurrent_vector: protected internal::allocator_base<T, A>,
545 private:
546  template<typename I>
548  public:
549  typedef T value_type;
550  typedef T& reference;
551  typedef const T& const_reference;
552  typedef I iterator;
553  typedef ptrdiff_t difference_type;
554  generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
555  template<typename U>
558  };
559 
560  template<typename C, typename U>
561  friend class internal::vector_iterator;
562 
563 public:
564  //------------------------------------------------------------------------
565  // STL compatible types
566  //------------------------------------------------------------------------
569 
570  typedef T value_type;
571  typedef ptrdiff_t difference_type;
572  typedef T& reference;
573  typedef const T& const_reference;
574  typedef T *pointer;
575  typedef const T *const_pointer;
576 
577  typedef internal::vector_iterator<concurrent_vector,T> iterator;
578  typedef internal::vector_iterator<concurrent_vector,const T> const_iterator;
579 
580 #if !defined(_MSC_VER) || _CPPLIB_VER>=300
581  // Assume ISO standard definition of std::reverse_iterator
582  typedef std::reverse_iterator<iterator> reverse_iterator;
583  typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
584 #else
585  // Use non-standard std::reverse_iterator
586  typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;
587  typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;
588 #endif /* defined(_MSC_VER) && (_MSC_VER<1300) */
589 
590  //------------------------------------------------------------------------
591  // Parallel algorithm support
592  //------------------------------------------------------------------------
593  typedef generic_range_type<iterator> range_type;
594  typedef generic_range_type<const_iterator> const_range_type;
595 
596  //------------------------------------------------------------------------
597  // STL compatible constructors & destructors
598  //------------------------------------------------------------------------
599 
602  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
603  {
605  }
606 
607  //Constructors are not required to have synchronization
608  //(for more details see comment in the concurrent_vector_base constructor).
609 #if __TBB_INITIALIZER_LISTS_PRESENT
610  concurrent_vector(std::initializer_list<T> init_list, const allocator_type &a = allocator_type())
612  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
613  {
615  __TBB_TRY {
616  internal_assign_iterators(init_list.begin(), init_list.end());
617  } __TBB_CATCH(...) {
618  segment_t *table = my_segment.load<relaxed>();;
620  __TBB_RETHROW();
621  }
622 
623  }
624 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
625 
628  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
629  {
631  __TBB_TRY {
632  internal_copy(vector, sizeof(T), &copy_array);
633  } __TBB_CATCH(...) {
634  segment_t *table = my_segment.load<relaxed>();
636  __TBB_RETHROW();
637  }
638  }
639 
640 #if __TBB_CPP11_RVALUE_REF_PRESENT
641  //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor<A>::value)
644  : internal::allocator_base<T, A>(std::move(source)), internal::concurrent_vector_base()
645  {
648  }
649 
651  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
652  {
654  //C++ standard requires instances of an allocator being compared for equality,
655  //which means that memory allocated by one instance is possible to deallocate with the other one.
656  if (a == source.my_allocator) {
658  } else {
659  __TBB_TRY {
660  internal_copy(source, sizeof(T), &move_array);
661  } __TBB_CATCH(...) {
662  segment_t *table = my_segment.load<relaxed>();
664  __TBB_RETHROW();
665  }
666  }
667  }
668 
669 #endif
670 
672  template<class M>
674  : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
675  {
677  __TBB_TRY {
678  internal_copy(vector.internal_vector_base(), sizeof(T), &copy_array);
679  } __TBB_CATCH(...) {
680  segment_t *table = my_segment.load<relaxed>();
682  __TBB_RETHROW();
683  }
684  }
685 
688  {
690  __TBB_TRY {
691  internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
692  } __TBB_CATCH(...) {
693  segment_t *table = my_segment.load<relaxed>();
695  __TBB_RETHROW();
696  }
697  }
698 
701  : internal::allocator_base<T, A>(a)
702  {
704  __TBB_TRY {
705  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
706  } __TBB_CATCH(...) {
707  segment_t *table = my_segment.load<relaxed>();
709  __TBB_RETHROW();
710  }
711  }
712 
714  template<class I>
716  : internal::allocator_base<T, A>(a)
717  {
719  __TBB_TRY {
720  internal_assign_range(first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
721  } __TBB_CATCH(...) {
722  segment_t *table = my_segment.load<relaxed>();
724  __TBB_RETHROW();
725  }
726  }
727 
730  if( this != &vector )
731  internal_assign(vector, sizeof(T), &destroy_array, &assign_array, &copy_array);
732  return *this;
733  }
734 
735 #if __TBB_CPP11_RVALUE_REF_PRESENT
736  //TODO: add __TBB_NOEXCEPT()
739  __TBB_ASSERT(this != &other, "Move assignment to itself is prohibited ");
741  if(pocma_t::value || this->my_allocator == other.my_allocator) {
742  concurrent_vector trash (std::move(*this));
743  internal_swap(other);
744  tbb::internal::allocator_move_assignment(this->my_allocator, other.my_allocator, pocma_t());
745  } else {
747  }
748  return *this;
749  }
750 #endif
751  //TODO: add an template assignment operator? (i.e. with different element type)
752 
754  template<class M>
756  if( static_cast<void*>( this ) != static_cast<const void*>( &vector ) )
757  internal_assign(vector.internal_vector_base(),
758  sizeof(T), &destroy_array, &assign_array, &copy_array);
759  return *this;
760  }
761 
762 #if __TBB_INITIALIZER_LISTS_PRESENT
763  concurrent_vector& operator=( std::initializer_list<T> init_list ) {
766  internal_assign_iterators(init_list.begin(), init_list.end());
767  return *this;
768  }
769 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
770 
771  //------------------------------------------------------------------------
772  // Concurrent operations
773  //------------------------------------------------------------------------
775 
777  return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load());
778  }
779 
781 
783  return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load());
784  }
785 
787  template<typename I>
789  typename std::iterator_traits<I>::difference_type delta = std::distance(first, last);
790  __TBB_ASSERT( delta >= 0, NULL);
791 
792  return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), &copy_range<I>, static_cast<const void*>(&first)) : my_early_size.load());
793  }
794 
795 #if __TBB_INITIALIZER_LISTS_PRESENT
796 
797  iterator grow_by( std::initializer_list<T> init_list ) {
798  return grow_by( init_list.begin(), init_list.end() );
799  }
800 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
801 
803 
808  size_type m=0;
809  if( n ) {
810  m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );
811  if( m>n ) m=n;
812  }
813  return iterator(*this, m);
814  };
815 
819  size_type m=0;
820  if( n ) {
822  if( m>n ) m=n;
823  }
824  return iterator(*this, m);
825  };
826 
828 
830  {
831  push_back_helper prolog(*this);
832  new(prolog.internal_push_back_result()) T(item);
833  return prolog.return_iterator_and_dismiss();
834  }
835 
836 #if __TBB_CPP11_RVALUE_REF_PRESENT
837 
839  iterator push_back( T&& item )
840  {
841  push_back_helper prolog(*this);
842  new(prolog.internal_push_back_result()) T(std::move(item));
843  return prolog.return_iterator_and_dismiss();
844  }
845 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
846 
848  template<typename... Args>
849  iterator emplace_back( Args&&... args )
850  {
851  push_back_helper prolog(*this);
852  new(prolog.internal_push_back_result()) T(std::forward<Args>(args)...);
853  return prolog.return_iterator_and_dismiss();
854  }
855 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
856 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
857 
861  return internal_subscript(index);
862  }
863 
866  return internal_subscript(index);
867  }
868 
870  reference at( size_type index ) {
872  }
873 
875  const_reference at( size_type index ) const {
877  }
878 
880  range_type range( size_t grainsize = 1 ) {
881  return range_type( begin(), end(), grainsize );
882  }
883 
885  const_range_type range( size_t grainsize = 1 ) const {
886  return const_range_type( begin(), end(), grainsize );
887  }
888 
889  //------------------------------------------------------------------------
890  // Capacity
891  //------------------------------------------------------------------------
893  size_type size() const {
895  return cp < sz ? cp : sz;
896  }
897 
899  bool empty() const {return !my_early_size;}
900 
903 
905 
907  void reserve( size_type n ) {
908  if( n )
909  internal_reserve(n, sizeof(T), max_size());
910  }
911 
913  void resize( size_type n ) {
914  internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
915  }
916 
919  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
920  }
921 
923  void shrink_to_fit();
924 
926  size_type max_size() const {return (~size_type(0))/sizeof(T);}
927 
928  //------------------------------------------------------------------------
929  // STL support
930  //------------------------------------------------------------------------
931 
933  iterator begin() {return iterator(*this,0);}
935  iterator end() {return iterator(*this,size());}
937  const_iterator begin() const {return const_iterator(*this,0);}
939  const_iterator end() const {return const_iterator(*this,size());}
941  const_iterator cbegin() const {return const_iterator(*this,0);}
943  const_iterator cend() const {return const_iterator(*this,size());}
958  __TBB_ASSERT( size()>0, NULL);
959  const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
960  return (segment_value.template pointer<T>())[0];
961  }
964  __TBB_ASSERT( size()>0, NULL);
965  const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
966  return (segment_value.template pointer<const T>())[0];
967  }
970  __TBB_ASSERT( size()>0, NULL);
971  return internal_subscript( size()-1 );
972  }
975  __TBB_ASSERT( size()>0, NULL);
976  return internal_subscript( size()-1 );
977  }
979  allocator_type get_allocator() const { return this->my_allocator; }
980 
982  void assign(size_type n, const_reference t) {
983  clear();
984  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
985  }
986 
988  template<class I>
989  void assign(I first, I last) {
990  clear(); internal_assign_range( first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
991  }
992 
993 #if __TBB_INITIALIZER_LISTS_PRESENT
994  void assign(std::initializer_list<T> init_list) {
996  clear(); internal_assign_iterators( init_list.begin(), init_list.end());
997  }
998 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
999 
1001  void swap(concurrent_vector &vector) {
1003  if( this != &vector && (this->my_allocator == vector.my_allocator || pocs_t::value) ) {
1004  concurrent_vector_base_v3::internal_swap(static_cast<concurrent_vector_base_v3&>(vector));
1005  tbb::internal::allocator_swap(this->my_allocator, vector.my_allocator, pocs_t());
1006  }
1007  }
1008 
1010 
1011  void clear() {
1013  }
1014 
1017  segment_t *table = my_segment.load<relaxed>();
1019  // base class destructor call should be then
1020  }
1021 
1022  const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; }
1023 private:
1025  static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) {
1026  return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.allocate(k);
1027  }
1029  void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block);
1030 
1032  T& internal_subscript( size_type index ) const;
1033 
1036 
1039  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(p), &destroy_array, p? &initialize_array_by : &initialize_array );
1040  }
1041 
1043  /* Functions declarations:
1044  * void foo(is_integer_tag<true>*);
1045  * void foo(is_integer_tag<false>*);
1046  * Usage example:
1047  * foo(static_cast<is_integer_tag<std::numeric_limits<T>::is_integer>*>(0));
1048  */
1049  template<bool B> class is_integer_tag;
1050 
1052  template<class I>
1053  void internal_assign_range(I first, I last, is_integer_tag<true> *) {
1054  internal_assign_n(static_cast<size_type>(first), &static_cast<T&>(last));
1055  }
1057  template<class I>
1058  void internal_assign_range(I first, I last, is_integer_tag<false> *) {
1060  }
1062  template<class I>
1064 
1065  //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library
1066 
1068  static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n );
1069 
1071  static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n );
1072 
1074  static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );
1075 
1076 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1077  static void __TBB_EXPORTED_FUNC move_array_if_noexcept( void* dst, const void* src, size_type n );
1079 #endif //__TBB_MOVE_IF_NO_EXCEPT_PRESENT
1080 
1081 #if __TBB_CPP11_RVALUE_REF_PRESENT
1082  static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n );
1084 
1086  static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n );
1087 #endif
1088  template<typename Iterator>
1090  static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n );
1091 
1093  static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );
1094 
1096  static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n );
1097 
1099  class internal_loop_guide : internal::no_copy {
1100  public:
1102  const size_type n;
1104 
1105  static const T* as_const_pointer(const void *ptr) { return static_cast<const T *>(ptr); }
1106  static T* as_pointer(const void *src) { return static_cast<T*>(const_cast<void *>(src)); }
1107 
1108  internal_loop_guide(size_type ntrials, void *ptr)
1109  : array(as_pointer(ptr)), n(ntrials), i(0) {}
1110  void init() { for(; i < n; ++i) new( &array[i] ) T(); }
1111  void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); }
1112  void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); }
1113  void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; }
1114 #if __TBB_CPP11_RVALUE_REF_PRESENT
1115  void move_assign(const void *src) { for(; i < n; ++i) array[i] = std::move(as_pointer(src)[i]); }
1116  void move_construct(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); }
1117 #endif
1118 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1119  void move_construct_if_noexcept(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move_if_noexcept(as_pointer(src)[i]) ); }
1120 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1121 
1122  //TODO: rename to construct_range
1123  template<class I> void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); }
1125  if(i < n) {// if an exception was raised, fill the rest of items with zeros
1127  }
1128  }
1129  };
1130 
1131  struct push_back_helper : internal::no_copy{
1132  struct element_construction_guard : internal::no_copy{
1134 
1135  element_construction_guard(pointer an_element) : element (an_element){}
1136  void dismiss(){ element = NULL; }
1138  if (element){
1140  }
1141  }
1142  };
1143 
1147 
1149  v(vector),
1150  g (static_cast<T*>(v.internal_push_back(sizeof(T),k)))
1151  {}
1152 
1155  pointer ptr = g.element;
1156  g.dismiss();
1157  return iterator(v, k, ptr);
1158  }
1159  };
1160 };
1161 
1162 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1163 // Deduction guide for the constructor from two iterators
1164 template<typename I,
1165  typename T = typename std::iterator_traits<I>::value_type,
1166  typename A = cache_aligned_allocator<T>
1167 > concurrent_vector(I, I, const A& = A())
1168 -> concurrent_vector<T, A>;
1169 
1170 // Deduction guide for the constructor from a vector and allocator
1171 template<typename T, typename A1, typename A2>
1172 concurrent_vector(const concurrent_vector<T, A1> &, const A2 &)
1173 -> concurrent_vector<T, A2>;
1174 
1175 // Deduction guide for the constructor from an initializer_list
1176 template<typename T, typename A = cache_aligned_allocator<T>
1177 > concurrent_vector(std::initializer_list<T>, const A& = A())
1178 -> concurrent_vector<T, A>;
1179 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1180 
1181 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1182 #pragma warning (push)
1183 #pragma warning (disable: 4701) // potentially uninitialized local variable "old"
1184 #endif
1185 template<typename T, class A>
1188  __TBB_TRY {
1189  internal_array_op2 copy_or_move_array =
1190 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1191  &move_array_if_noexcept
1192 #else
1193  &copy_array
1194 #endif
1195  ;
1196  if( internal_compact( sizeof(T), &old, &destroy_array, copy_or_move_array ) )
1197  internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments
1198  } __TBB_CATCH(...) {
1199  if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]
1200  internal_free_segments( old.table, 1, old.first_block );
1201  __TBB_RETHROW();
1202  }
1203 }
1204 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1205 #pragma warning (pop)
1206 #endif // warning 4701 is back
1207 
1208 template<typename T, class A>
1210  // Free the arrays
1211  while( k > first_block ) {
1212  --k;
1213  segment_value_t segment_value = table[k].load<relaxed>();
1214  table[k].store<relaxed>(segment_not_used());
1215  if( segment_value == segment_allocated() ) // check for correct segment pointer
1216  this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(k) );
1217  }
1218  segment_value_t segment_value = table[0].load<relaxed>();
1219  if( segment_value == segment_allocated() ) {
1220  __TBB_ASSERT( first_block > 0, NULL );
1221  while(k > 0) table[--k].store<relaxed>(segment_not_used());
1222  this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(first_block) );
1223  }
1224 }
1225 
1226 template<typename T, class A>
1228  //TODO: unify both versions of internal_subscript
1229  __TBB_ASSERT( index < my_early_size, "index out of bounds" );
1230  size_type j = index;
1231  segment_index_t k = segment_base_index_of( j );
1232  __TBB_ASSERT( my_segment.load<acquire>() != my_storage || k < pointers_per_short_table, "index is being allocated" );
1233  //no need in load with acquire (load<acquire>) since thread works in own space or gets
1234  //the information about added elements via some form of external synchronization
1235  //TODO: why not make a load of my_segment relaxed as well ?
1236  //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1237  segment_value_t segment_value = my_segment[k].template load<relaxed>();
1238  __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instance is broken by bad allocation. Use at() instead" );
1239  __TBB_ASSERT( segment_value != segment_not_used(), "index is being allocated" );
1240  return (( segment_value.pointer<T>()))[j];
1241 }
1242 
1243 template<typename T, class A>
1245  if( index >= my_early_size )
1246  internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range
1247  size_type j = index;
1248  segment_index_t k = segment_base_index_of( j );
1249  //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table
1250  if( my_segment.load<acquire>() == my_storage && k >= pointers_per_short_table )
1252  // no need in load with acquire (load<acquire>) since thread works in own space or gets
1253  //the information about added elements via some form of external synchronization
1254  //TODO: why not make a load of my_segment relaxed as well ?
1255  //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1256  segment_value_t segment_value = my_segment[k].template load<relaxed>();
1257  enforce_segment_allocated(segment_value, internal::eid_index_range_error);
1258  return (segment_value.pointer<T>())[j];
1259 }
1260 
1261 template<typename T, class A> template<class I>
1263  __TBB_ASSERT(my_early_size == 0, NULL);
1264  size_type n = std::distance(first, last);
1265  if( !n ) return;
1266  internal_reserve(n, sizeof(T), max_size());
1267  my_early_size = n;
1268  segment_index_t k = 0;
1269  //TODO: unify segment iteration code with concurrent_base_v3::helper
1270  size_type sz = segment_size( my_first_block );
1271  while( sz < n ) {
1272  internal_loop_guide loop(sz, my_segment[k].template load<relaxed>().template pointer<void>());
1273  loop.iterate(first);
1274  n -= sz;
1275  if( !k ) k = my_first_block;
1276  else { ++k; sz <<= 1; }
1277  }
1278  internal_loop_guide loop(n, my_segment[k].template load<relaxed>().template pointer<void>());
1279  loop.iterate(first);
1280 }
1281 
1282 template<typename T, class A>
1284  internal_loop_guide loop(n, begin); loop.init();
1285 }
1286 
1287 template<typename T, class A>
1289  internal_loop_guide loop(n, begin); loop.init(src);
1290 }
1291 
1292 template<typename T, class A>
1293 void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_type n ) {
1294  internal_loop_guide loop(n, dst); loop.copy(src);
1295 }
1296 
1297 #if __TBB_CPP11_RVALUE_REF_PRESENT
1298 template<typename T, class A>
1299 void concurrent_vector<T, A>::move_array( void* dst, const void* src, size_type n ) {
1300  internal_loop_guide loop(n, dst); loop.move_construct(src);
1301 }
1302 template<typename T, class A>
1303 void concurrent_vector<T, A>::move_assign_array( void* dst, const void* src, size_type n ) {
1304  internal_loop_guide loop(n, dst); loop.move_assign(src);
1305 }
1306 #endif
1307 
1308 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1309 template<typename T, class A>
1310 void concurrent_vector<T, A>::move_array_if_noexcept( void* dst, const void* src, size_type n ) {
1311  internal_loop_guide loop(n, dst); loop.move_construct_if_noexcept(src);
1312 }
1313 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1314 
1315 template<typename T, class A>
1316 template<typename I>
1317 void concurrent_vector<T, A>::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){
1318  internal_loop_guide loop(n, dst);
1319  loop.iterate( *(static_cast<I*>(const_cast<void*>(p_type_erased_iterator))) );
1320 }
1321 
1322 template<typename T, class A>
1323 void concurrent_vector<T, A>::assign_array( void* dst, const void* src, size_type n ) {
1324  internal_loop_guide loop(n, dst); loop.assign(src);
1325 }
1326 
1327 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1328  // Workaround for overzealous compiler warning
1329  #pragma warning (push)
1330  #pragma warning (disable: 4189)
1331 #endif
1332 template<typename T, class A>
1334  T* array = static_cast<T*>(begin);
1335  for( size_type j=n; j>0; --j )
1336  array[j-1].~T(); // destructors are supposed to not throw any exceptions
1337 }
1338 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1339  #pragma warning (pop)
1340 #endif // warning 4189 is back
1341 
1342 // concurrent_vector's template functions
1343 template<typename T, class A1, class A2>
1345  //TODO: call size() only once per vector (in operator==)
1346  // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
1347  if(a.size() != b.size()) return false;
1348  typename concurrent_vector<T, A1>::const_iterator i(a.begin());
1349  typename concurrent_vector<T, A2>::const_iterator j(b.begin());
1350  for(; i != a.end(); ++i, ++j)
1351  if( !(*i == *j) ) return false;
1352  return true;
1353 }
1354 
1355 template<typename T, class A1, class A2>
1357 { return !(a == b); }
1358 
1359 template<typename T, class A1, class A2>
1361 { return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }
1362 
1363 template<typename T, class A1, class A2>
1365 { return b < a; }
1366 
1367 template<typename T, class A1, class A2>
1369 { return !(b < a); }
1370 
1371 template<typename T, class A1, class A2>
1373 { return !(a < b); }
1374 
1375 template<typename T, class A>
1377 { a.swap( b ); }
1378 
1379 } // namespace tbb
1380 
1381 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1382  #pragma warning (pop)
1383 #endif // warning 4267,4127 are back
1384 
1385 #endif /* __TBB_concurrent_vector_H */
vector_iterator & operator+=(ptrdiff_t offset)
concurrent_vector & operator=(const concurrent_vector< T, M > &vector)
Assignment for vector with different allocator type.
bool operator<=(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
Definition: atomic.h:503
void reserve(size_type n)
Allocate enough space to grow to size n without having to allocate more memory later.
void swap(concurrent_vector &vector)
swap two instances
void internal_assign_range(I first, I last, is_integer_tag< true > *)
assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23....
size_type capacity() const
Maximum size to which array can grow without allocating more memory. Concurrent allocations are not i...
iterator grow_to_at_least(size_type n)
Append minimal sequence of elements such that size()>=n.
bool operator==(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
void internal_assign_n(size_type n, const_pointer p)
assign n items by copying t
const_iterator begin() const
Beginning of range.
Definition: blocked_range.h:73
Concurrent vector container.
std::reverse_iterator< iterator > reverse_iterator
segment_t my_storage[pointers_per_short_table]
embedded storage of segment pointers
void resize(size_type n, const_reference t)
Resize the vector, copy t for new elements. Not thread-safe.
void internal_assign_range(I first, I last, is_integer_tag< false > *)
inline proxy assign by iterators
std::reverse_iterator< const_iterator > const_reverse_iterator
vector_iterator(const Container &vector, size_t index, void *ptr=0)
iterator push_back(T &&item)
Push item, move-aware.
void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3 &v)
void *__TBB_EXPORTED_METHOD internal_push_back(size_type element_size, size_type &index)
size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
size_t my_index
Index into the vector.
iterator grow_by(std::initializer_list< T > init_list)
bool operator>=(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
No ordering.
Definition: atomic.h:51
allocator_type get_allocator() const
return allocator object
True/false function override helper.
static void __TBB_EXPORTED_FUNC copy_array(void *dst, const void *src, size_type n)
Copy-construct n instances of T by copying single element pointed to by src, starting at "dst".
Container * my_vector
concurrent_vector over which we are iterating.
A range over which to iterate.
Definition: blocked_range.h:49
bool operator==(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
void __TBB_EXPORTED_METHOD internal_reserve(size_type n, size_type element_size, size_type max_size)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
reverse_iterator rbegin()
reverse start iterator
reference operator[](size_type index)
Get reference to element at given index.
segment_index_t __TBB_EXPORTED_METHOD internal_clear(internal_array_op1 destroy)
bool operator<(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
tbb::internal::allocator_rebind< A, T >::type allocator_type
#define __TBB_NOEXCEPT(expression)
Definition: tbb_stddef.h:114
allocator_base(const allocator_type &a=allocator_type())
iterator emplace_back(Args &&... args)
Push item, create item "in place" with provided arguments.
const_reverse_iterator crend() const
reverse end const iterator
auto first(Container &c) -> decltype(begin(c))
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
concurrent_vector(const concurrent_vector< T, M > &vector, const allocator_type &a=allocator_type())
Copying constructor for vector with different allocator type.
internal::vector_iterator< concurrent_vector, const T > const_iterator
push_back_helper(concurrent_vector &vector)
~concurrent_vector()
Clear and destroy vector.
static size_type segment_size(segment_index_t k)
internal::allocator_base< T, A >::allocator_type allocator_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
const_reference operator[](size_type index) const
Get const reference to element at given index.
iterator push_back(const_reference item)
Push item.
void assign(size_type n, const_reference t)
assign n items by copying t item
vector_iterator operator+(ptrdiff_t offset) const
void __TBB_EXPORTED_METHOD internal_copy(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op2 copy)
allocator_traits< Alloc >::template rebind_alloc< T >::other type
concurrent_vector_base_v3 concurrent_vector_base
generic_range_type(I begin_, I end_, size_t grainsize_=1)
size_type __TBB_EXPORTED_METHOD internal_grow_by(size_type delta, size_type element_size, internal_array_op2 init, const void *src)
const_reference at(size_type index) const
Get const reference to element at given index. Throws exceptions on errors.
vector_iterator(const vector_iterator< Container, typename Container::value_type > &other)
range_type range(size_t grainsize=1)
Get range for iterating with parallel algorithms.
bool operator>(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
Value & operator[](ptrdiff_t k) const
concurrent_vector(concurrent_vector &&source)
Move constructor.
static void __TBB_EXPORTED_FUNC destroy_array(void *begin, size_type n)
Destroy n instances of T, starting at "begin".
void assign(I first, I last)
assign range [first, last)
generic_range_type(const generic_range_type< U > &r)
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:539
const_reference front() const
the first item const
#define __TBB_TRY
Definition: tbb_stddef.h:287
atomic< size_type > my_first_block
count of segments in the first block
vector_iterator operator--(int)
Post decrement.
__TBB_EXPORTED_METHOD ~concurrent_vector_base_v3()
bool operator!=(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
ptrdiff_t operator-(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
void store(void *allocated_segment_pointer) __TBB_NOEXCEPT(true)
reference at(size_type index)
Get reference to element at given index. Throws exceptions on errors.
static bool is_first_element_in_segment(size_type element_index)
friend class internal::vector_iterator
friend void enforce_segment_allocated(segment_value_t const &s, internal::exception_id exception=eid_bad_last_alloc)
concurrent_vector & operator=(const concurrent_vector &vector)
Assignment.
concurrent_vector & operator=(concurrent_vector &&other)
Move assignment.
Acquire.
Definition: atomic.h:47
vector_iterator< Container, T > operator+(ptrdiff_t offset, const vector_iterator< Container, T > &v)
void const char const char int ITT_FORMAT __itt_group_sync p
bool is_power_of_two_at_least(argument_integer_type arg, power2_integer_type power2)
A function to determine if arg is a power of 2 at least as big as another power of 2.
Definition: tbb_stddef.h:375
size_type __TBB_EXPORTED_METHOD internal_capacity() const
void __TBB_EXPORTED_METHOD internal_assign(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy)
generic_range_type(generic_range_type &r, split)
concurrent_vector(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
friend bool operator<(const vector_iterator< C, T > &i, const vector_iterator< C, U > &j)
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
bool operator>(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
static const T * as_const_pointer(const void *ptr)
internal::vector_iterator< concurrent_vector, T > iterator
void(__TBB_EXPORTED_FUNC * internal_array_op2)(void *dst, const void *src, size_type n)
An operation on n-element destination array and n-element source array.
friend void swap(segment_t &, segment_t &) __TBB_NOEXCEPT(true)
const_iterator end() const
end const iterator
auto last(Container &c) -> decltype(begin(c))
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:864
internal_loop_guide(size_type ntrials, void *ptr)
bool operator<=(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
iterator grow_by(size_type delta, const_reference t)
Grow by "delta" elements using copying constructor.
void internal_assign_iterators(I first, I last)
assign by iterators
concurrent_vector(concurrent_vector &&source, const allocator_type &a)
const_iterator begin() const
start const iterator
const_reverse_iterator rbegin() const
reverse start const iterator
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:288
const_iterator end() const
One past last value in range.
Definition: blocked_range.h:76
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:102
value_type load() const
Definition: atomic.h:306
static void __TBB_EXPORTED_FUNC assign_array(void *dst, const void *src, size_type n)
Assign (using operator=) n instances of T, starting at "dst" by assigning according element of src ar...
friend bool operator==(const vector_iterator< C, T > &i, const vector_iterator< C, U > &j)
vector_iterator & operator--()
Pre decrement.
vector_iterator operator-(ptrdiff_t offset) const
vector_iterator & operator-=(ptrdiff_t offset)
static segment_index_t segment_index_of(size_type index)
static void __TBB_EXPORTED_FUNC copy_range(void *dst, const void *p_type_erased_iterator, size_type n)
Copy-construct n instances of T, starting at "dst" by iterator range of [p_type_erased_iterator,...
friend ptrdiff_t operator-(const vector_iterator< C, T > &i, const vector_iterator< C, U > &j)
The graph class.
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
void(__TBB_EXPORTED_FUNC * internal_array_op1)(void *begin, size_type n)
An operation on an n-element array starting at begin.
void __TBB_EXPORTED_METHOD internal_resize(size_type n, size_type element_size, size_type max_size, const void *src, internal_array_op1 destroy, internal_array_op2 init)
static void __TBB_EXPORTED_FUNC initialize_array(void *begin, const void *, size_type n)
Construct n instances of T, starting at "begin".
static void __TBB_EXPORTED_FUNC move_assign_array(void *dst, const void *src, size_type n)
Move-assign (using operator=) n instances of T, starting at "dst" by assigning according element of s...
atomic< size_type > my_early_size
Requested size of vector.
friend bool operator!=(segment_value_t const &lhs, argument_type arg)
const_reference back() const
the last item const
generic_range_type< const_iterator > const_range_type
Base class of concurrent vector implementation.
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:399
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
static segment_index_t segment_base_index_of(segment_index_t &index)
concurrent_vector(size_type n, const_reference t, const allocator_type &a=allocator_type())
Construction with initial size specified by argument n, initialization by copying of t,...
reverse_iterator rend()
reverse end iterator
const_reverse_iterator rend() const
reverse end const iterator
static void * internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k)
Allocate k items.
Value * my_item
Caches my_vector->internal_subscript(my_index)
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
iterator grow_to_at_least(size_type n, const_reference t)
vector_iterator operator++(int)
Post increment.
void __TBB_EXPORTED_METHOD internal_grow_to_at_least(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
Deprecated entry point for backwards compatibility to TBB 2.1.
const internal::concurrent_vector_base_v3 & internal_vector_base() const
const_range_type range(size_t grainsize=1) const
Get const range for iterating with parallel algorithms.
#define __TBB_EXPORTED_FUNC
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:309
bool empty() const
Return false if vector is not empty or has elements under construction at least.
static void __TBB_EXPORTED_FUNC initialize_array_by(void *begin, const void *src, size_type n)
Copy-construct n instances of T, starting at "begin".
T & internal_subscript(size_type index) const
Get reference to element at given index.
bool operator!=(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
static segment_index_t segment_base(segment_index_t k)
void internal_grow(size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src)
void const char const char int ITT_FORMAT __itt_group_sync s
reference back()
the last item
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
internal::concurrent_vector_base_v3::size_type size_type
iterator begin()
start iterator
T & internal_subscript_with_exceptions(size_type index) const
Get reference to element at given index with errors checks.
size_type grainsize() const
The grain size for this range.
Definition: blocked_range.h:86
static void __TBB_EXPORTED_FUNC move_array(void *dst, const void *src, size_type n)
Move-construct n instances of T, starting at "dst" by copying according element of src array.
friend bool operator==(segment_value_t const &lhs, segment_allocation_failed)
void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block)
Free k segments from table.
Exception-aware helper class for filling a segment by exception-danger operators of user class.
concurrent_vector(const allocator_type &a=allocator_type())
Construct empty vector.
atomic< segment_t * > my_segment
Pointer to the segments table.
concurrent_vector(const concurrent_vector &vector, const allocator_type &a=allocator_type())
Copying constructor.
reference front()
the first item
vector_iterator()
Default constructor.
iterator end()
end iterator
void resize(size_type n)
Resize the vector. Not thread-safe.
bool operator>=(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
const_reverse_iterator crbegin() const
reverse start const iterator
void handle_unconstructed_elements(T *array, size_t n_of_elements)
Exception helper function.
Number of slots for segment pointers inside the class.
const_iterator cend() const
end const iterator
size_type size() const
Return size of vector. It may include elements under construction.
static void *const vector_allocation_error_flag
Bad allocation marker.
bool operator<(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
Class for determining type of std::allocator<T>::value_type.
Definition: tbb_stddef.h:454
iterator grow_by(size_type delta)
Grow by "delta" elements.
friend vector_iterator< C, T > operator+(ptrdiff_t offset, const vector_iterator< C, T > &v)
void store(value_type value)
Definition: atomic.h:317
void *(* vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t)
allocator function pointer
void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const
Obsolete.
Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/.
concurrent_vector(size_type n)
Construction with initial size specified by argument n.
void shrink_to_fit()
Optimize memory usage and fragmentation.
iterator grow_by(I first, I last)
std::random_access_iterator_tag iterator_category
#define __TBB_RETHROW()
Definition: tbb_stddef.h:290
size_type max_size() const
Upper bound on argument to reserve.
friend bool operator==(segment_value_t const &lhs, segment_not_used)
const_iterator cbegin() const
start const iterator
void *__TBB_EXPORTED_METHOD internal_compact(size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy)
friend bool operator==(segment_value_t const &lhs, segment_allocated)
generic_range_type< iterator > range_type
vector_iterator & operator++()
Pre increment.
void clear()
Clear container while keeping memory allocated.

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.