Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
task.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
21 #ifndef __TBB_task_H
22 #define __TBB_task_H
23 
24 #include "tbb_stddef.h"
25 #include "tbb_machine.h"
26 #include "tbb_profiling.h"
27 #include <climits>
28 
29 typedef struct ___itt_caller *__itt_caller;
30 
31 namespace tbb {
32 
33 class task;
34 class task_list;
35 class task_group_context;
36 
37 // MSVC does not allow taking the address of a member that was defined
38 // privately in task_base and made public in class task via a using declaration.
39 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
40 #define __TBB_TASK_BASE_ACCESS public
41 #else
42 #define __TBB_TASK_BASE_ACCESS private
43 #endif
44 
45 namespace internal { //< @cond INTERNAL
46 
49  task* self;
51  public:
52  explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {
54  }
55  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
56  void __TBB_EXPORTED_METHOD free( task& ) const;
57  };
58 
59  struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; };
60 } //< namespace internal @endcond
61 
62 namespace interface5 {
63  namespace internal {
65 
72  friend class tbb::task;
73 
75  static void spawn( task& t );
76 
78  static void spawn( task_list& list );
79 
81 
85  }
86 
88 
92  static void __TBB_EXPORTED_FUNC destroy( task& victim );
93  };
94  } // internal
95 } // interface5
96 
98 namespace internal {
99 
101  public:
103  virtual void spawn( task& first, task*& next ) = 0;
104 
106  virtual void wait_for_all( task& parent, task* child ) = 0;
107 
109  virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
110 
112  // Have to have it just to shut up overzealous compilation warnings
113  virtual ~scheduler() = 0;
114 
116  virtual void enqueue( task& t, void* reserved ) = 0;
117  };
118 
120 
121  typedef intptr_t reference_count;
122 
124  typedef unsigned short affinity_id;
125 
126 #if __TBB_TASK_ISOLATION
127  typedef intptr_t isolation_tag;
130 #endif /* __TBB_TASK_ISOLATION */
131 
132 #if __TBB_TASK_GROUP_CONTEXT
133  class generic_scheduler;
134 
137  *my_next;
138  };
139 
142  public:
144  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
145  void __TBB_EXPORTED_METHOD free( task& ) const;
146  };
147 #endif /* __TBB_TASK_GROUP_CONTEXT */
148 
150  public:
151  static task& __TBB_EXPORTED_FUNC allocate( size_t size );
152  static void __TBB_EXPORTED_FUNC free( task& );
153  };
154 
156  public:
157  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
158  void __TBB_EXPORTED_METHOD free( task& ) const;
159  };
160 
162  public:
163  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
164  void __TBB_EXPORTED_METHOD free( task& ) const;
165  };
166 
167 #if __TBB_PREVIEW_CRITICAL_TASKS
168  // TODO: move to class methods when critical task API becomes public
169  void make_critical( task& t );
170  bool is_critical( task& t );
171 #endif
172 
174 
188  class task_prefix {
189  private:
190  friend class tbb::task;
192  friend class tbb::task_list;
193  friend class internal::scheduler;
198 #if __TBB_PREVIEW_CRITICAL_TASKS
199  friend void make_critical( task& );
200  friend bool is_critical( task& );
201 #endif
202 
203 #if __TBB_TASK_ISOLATION
206 #else
207  intptr_t reserved_space_for_task_isolation_tag;
208 #endif /* __TBB_TASK_ISOLATION */
209 
210 #if __TBB_TASK_GROUP_CONTEXT
211 
216 #endif /* __TBB_TASK_GROUP_CONTEXT */
217 
219 
225 
226 #if __TBB_TASK_PRIORITY
227  union {
228 #endif /* __TBB_TASK_PRIORITY */
229 
233 
234 #if __TBB_TASK_PRIORITY
235 
238  };
239 #endif /* __TBB_TASK_PRIORITY */
240 
242 
246 
248 
253 
255 
257  int depth;
258 
260 
261  unsigned char state;
262 
264 
270  unsigned char extra_state;
271 
273 
276 
278  tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
279  };
280 
281 } // namespace internal
283 
284 #if __TBB_TASK_GROUP_CONTEXT
285 
286 #if __TBB_TASK_PRIORITY
287 namespace internal {
288  static const int priority_stride_v4 = INT_MAX / 4;
289 #if __TBB_PREVIEW_CRITICAL_TASKS
290  // TODO: move into priority_t enum when critical tasks become public feature
291  static const int priority_critical = priority_stride_v4 * 3 + priority_stride_v4 / 3 * 2;
292 #endif
293 }
294 
299 };
300 
301 #endif /* __TBB_TASK_PRIORITY */
302 
303 #if TBB_USE_CAPTURED_EXCEPTION
304  class tbb_exception;
305 #else
306  namespace internal {
307  class tbb_exception_ptr;
308  }
309 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
310 
311 class task_scheduler_init;
312 namespace interface7 { class task_arena; }
313 
315 
335 class task_group_context : internal::no_copy {
336 private:
338  friend class task_scheduler_init;
340 
341 #if TBB_USE_CAPTURED_EXCEPTION
343 #else
344  typedef internal::tbb_exception_ptr exception_container_type;
345 #endif
346 
349  version_mask = 0xFFFF,
351  };
352 
353 public:
354  enum kind_type {
357  };
358 
359  enum traits_type {
361 #if __TBB_FP_CONTEXT
362  fp_settings = 0x0002ul << traits_offset,
363 #endif
365 #if TBB_USE_CAPTURED_EXCEPTION
366  default_traits = 0
367 #else
369 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
370  };
371 
372 private:
373  enum state {
375  // the following enumerations must be the last, new 2^x values must go above
377  };
378 
379  union {
381  // TODO: describe asynchronous use, and whether any memory semantics are needed
383  uintptr_t _my_kind_aligner;
384  };
385 
388 
390 
392  internal::context_list_node_t my_node;
393 
396 
398 
402  - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
403  - sizeof(__itt_caller)
404 #if __TBB_FP_CONTEXT
405  - sizeof(internal::cpu_ctl_env_space)
406 #endif
407  ];
408 
409 #if __TBB_FP_CONTEXT
410 
413  internal::cpu_ctl_env_space my_cpu_ctl_env;
414 #endif
415 
418 
420 
424 
427 
430 
432  uintptr_t my_state;
433 
434 #if __TBB_TASK_PRIORITY
435  intptr_t my_priority;
437 #endif /* __TBB_TASK_PRIORITY */
438 
441 
443 
444  char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
445 #if __TBB_TASK_PRIORITY
446  - sizeof(intptr_t)
447 #endif /* __TBB_TASK_PRIORITY */
448  - sizeof(internal::string_index)
449  ];
450 
451 public:
453 
481  task_group_context ( kind_type relation_with_parent = bound,
482  uintptr_t t = default_traits )
483  : my_kind(relation_with_parent)
484  , my_version_and_traits(3 | t)
485  , my_name(internal::CUSTOM_CTX)
486  {
487  init();
488  }
489 
490  // Custom constructor for instrumentation of tbb algorithm
492  : my_kind(bound)
494  , my_name(name)
495  {
496  init();
497  }
498 
499  // Do not introduce standalone unbind method since it will break state propagation assumptions
501 
503 
511 
513 
521 
524 
526 
533 
534 #if __TBB_FP_CONTEXT
535 
544 #endif
545 
546 #if __TBB_TASK_PRIORITY
547  void set_priority ( priority_t );
549 
551  priority_t priority () const;
552 #endif /* __TBB_TASK_PRIORITY */
553 
555  uintptr_t traits() const { return my_version_and_traits & traits_mask; }
556 
557 protected:
559 
560  void __TBB_EXPORTED_METHOD init ();
561 
562 private:
563  friend class task;
565 
569  static const kind_type dying = kind_type(detached+1);
570 
572  template <typename T>
573  void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
574 
576  void bind_to ( internal::generic_scheduler *local_sched );
577 
579  void register_with ( internal::generic_scheduler *local_sched );
580 
581 #if __TBB_FP_CONTEXT
582  // TODO: Consider adding #else stub in order to omit #if sections in other code
584  void copy_fp_settings( const task_group_context &src );
585 #endif /* __TBB_FP_CONTEXT */
586 }; // class task_group_context
587 
588 #endif /* __TBB_TASK_GROUP_CONTEXT */
589 
591 
593 
596 
599 
600 protected:
602  task() {prefix().extra_state=1;}
603 
604 public:
606  virtual ~task() {}
607 
609  virtual task* execute() = 0;
610 
612  enum state_type {
625 #if __TBB_RECYCLE_TO_ENQUEUE
626  ,to_enqueue
628 #endif
629  };
630 
631  //------------------------------------------------------------------------
632  // Allocating tasks
633  //------------------------------------------------------------------------
634 
638  }
639 
640 #if __TBB_TASK_GROUP_CONTEXT
641  static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
643  return internal::allocate_root_with_context_proxy(ctx);
644  }
645 #endif /* __TBB_TASK_GROUP_CONTEXT */
646 
648 
650  return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
651  }
652 
655  return *reinterpret_cast<internal::allocate_child_proxy*>(this);
656  }
657 
659  using task_base::allocate_additional_child_of;
660 
661 #if __TBB_DEPRECATED_TASK_INTERFACE
662 
667  void __TBB_EXPORTED_METHOD destroy( task& t );
668 #else /* !__TBB_DEPRECATED_TASK_INTERFACE */
669  using task_base::destroy;
671 #endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
672 
673  //------------------------------------------------------------------------
674  // Recycling of tasks
675  //------------------------------------------------------------------------
676 
678 
685  __TBB_ASSERT( prefix().state==executing, "execute not running?" );
686  prefix().state = allocated;
687  }
688 
690 
693  __TBB_ASSERT( prefix().state==executing, "execute not running?" );
694  prefix().state = recycle;
695  }
696 
698  void recycle_as_child_of( task& new_parent ) {
699  internal::task_prefix& p = prefix();
700  __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
701  __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
702  __TBB_ASSERT( p.parent==NULL, "parent must be null" );
703  __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
704  __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
705  p.state = allocated;
706  p.parent = &new_parent;
707 #if __TBB_TASK_GROUP_CONTEXT
708  p.context = new_parent.prefix().context;
709 #endif /* __TBB_TASK_GROUP_CONTEXT */
710  }
711 
713 
715  __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
716  __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
717  prefix().state = reexecute;
718  }
719 
720 #if __TBB_RECYCLE_TO_ENQUEUE
721 
723  void recycle_to_enqueue() {
724  __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
725  prefix().state = to_enqueue;
726  }
727 #endif /* __TBB_RECYCLE_TO_ENQUEUE */
728 
729  //------------------------------------------------------------------------
730  // Spawning and blocking
731  //------------------------------------------------------------------------
732 
734  void set_ref_count( int count ) {
735 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
737 #else
738  prefix().ref_count = count;
739 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
740  }
741 
743 
746  }
747 
749 
750  int add_ref_count( int count ) {
752  internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count );
753  __TBB_ASSERT( k>=0, "task's reference count underflowed" );
754  if( k==0 )
756  return int(k);
757  }
758 
760 
762 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
764 #else
766 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
767  }
768 
770  using task_base::spawn;
771 
773  void spawn_and_wait_for_all( task& child ) {
774  prefix().owner->wait_for_all( *this, &child );
775  }
776 
779 
781  static void spawn_root_and_wait( task& root ) {
782  root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
783  }
784 
786 
788  static void spawn_root_and_wait( task_list& root_list );
789 
791 
792  void wait_for_all() {
793  prefix().owner->wait_for_all( *this, NULL );
794  }
795 
797 #if __TBB_TASK_PRIORITY
798 
808 #endif /* __TBB_TASK_PRIORITY */
809  static void enqueue( task& t ) {
810  t.prefix().owner->enqueue( t, NULL );
811  }
812 
813 #if __TBB_TASK_PRIORITY
814  static void enqueue( task& t, priority_t p ) {
816 #if __TBB_PREVIEW_CRITICAL_TASKS
818  || p == internal::priority_critical, "Invalid priority level value");
819 #else
820  __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
821 #endif
822  t.prefix().owner->enqueue( t, (void*)p );
823  }
824 #endif /* __TBB_TASK_PRIORITY */
825 
827  static task& __TBB_EXPORTED_FUNC self();
828 
830  task* parent() const {return prefix().parent;}
831 
833  void set_parent(task* p) {
834 #if __TBB_TASK_GROUP_CONTEXT
835  __TBB_ASSERT(!p || prefix().context == p->prefix().context, "The tasks must be in the same context");
836 #endif
837  prefix().parent = p;
838  }
839 
840 #if __TBB_TASK_GROUP_CONTEXT
841 
843  task_group_context* context() {return prefix().context;}
844 
846  task_group_context* group () { return prefix().context; }
847 #endif /* __TBB_TASK_GROUP_CONTEXT */
848 
850  bool is_stolen_task() const {
851  return (prefix().extra_state & 0x80)!=0;
852  }
853 
854  //------------------------------------------------------------------------
855  // Debugging
856  //------------------------------------------------------------------------
857 
859  state_type state() const {return state_type(prefix().state);}
860 
862  int ref_count() const {
863 #if TBB_USE_ASSERT
864  internal::reference_count ref_count_ = prefix().ref_count;
865  __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
866 #endif
867  return int(prefix().ref_count);
868  }
869 
872 
873  //------------------------------------------------------------------------
874  // Affinity
875  //------------------------------------------------------------------------
876 
878 
880 
882  void set_affinity( affinity_id id ) {prefix().affinity = id;}
883 
885  affinity_id affinity() const {return prefix().affinity;}
886 
888 
893 
894 #if __TBB_TASK_GROUP_CONTEXT
895 
907 
909 
910  bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
911 
913  bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
914 #else
915  bool is_cancelled () const { return false; }
916 #endif /* __TBB_TASK_GROUP_CONTEXT */
917 
918 #if __TBB_TASK_PRIORITY
919  void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); }
921 
923  priority_t group_priority () const { return prefix().context->priority(); }
924 
925 #endif /* __TBB_TASK_PRIORITY */
926 
927 private:
929  friend class task_list;
930  friend class internal::scheduler;
932 #if __TBB_TASK_GROUP_CONTEXT
934 #endif /* __TBB_TASK_GROUP_CONTEXT */
938 
940 
941  internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
942  return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
943  }
944 #if __TBB_PREVIEW_CRITICAL_TASKS
945  friend void internal::make_critical( task& );
946  friend bool internal::is_critical( task& );
947 #endif
948 }; // class task
949 
950 #if __TBB_PREVIEW_CRITICAL_TASKS
951 namespace internal {
952 inline void make_critical( task& t ) { t.prefix().extra_state |= 0x8; }
953 inline bool is_critical( task& t ) { return bool((t.prefix().extra_state & 0x8) != 0); }
954 } // namespace internal
955 #endif /* __TBB_PREVIEW_CRITICAL_TASKS */
956 
958 
959 class empty_task: public task {
961  return NULL;
962  }
963 };
964 
966 namespace internal {
967  template<typename F>
968  class function_task : public task {
969 #if __TBB_ALLOW_MUTABLE_FUNCTORS
970  F my_func;
971 #else
972  const F my_func;
973 #endif
975  my_func();
976  return NULL;
977  }
978  public:
979  function_task( const F& f ) : my_func(f) {}
980 #if __TBB_CPP11_RVALUE_REF_PRESENT
981  function_task( F&& f ) : my_func( std::move(f) ) {}
982 #endif
983  };
984 } // namespace internal
986 
988 
990 class task_list: internal::no_copy {
991 private:
994  friend class task;
996 public:
998  task_list() : first(NULL), next_ptr(&first) {}
999 
1002 
1004  bool empty() const {return !first;}
1005 
1007  void push_back( task& task ) {
1008  task.prefix().next = NULL;
1009  *next_ptr = &task;
1010  next_ptr = &task.prefix().next;
1011  }
1012 #if __TBB_TODO
1013  // TODO: add this method and implement&document the local execution ordering. See more in generic_scheduler::local_spawn
1015  void push_front( task& task ) {
1016  if( empty() ) {
1017  push_back(task);
1018  } else {
1019  task.prefix().next = first;
1020  first = &task;
1021  }
1022  }
1023 #endif
1024  task& pop_front() {
1026  __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
1027  task* result = first;
1028  first = result->prefix().next;
1029  if( !first ) next_ptr = &first;
1030  return *result;
1031  }
1032 
1034  void clear() {
1035  first=NULL;
1036  next_ptr=&first;
1037  }
1038 };
1039 
1041  t.prefix().owner->spawn( t, t.prefix().next );
1042 }
1043 
1045  if( task* t = list.first ) {
1046  t->prefix().owner->spawn( *t, *list.next_ptr );
1047  list.clear();
1048  }
1049 }
1050 
1051 inline void task::spawn_root_and_wait( task_list& root_list ) {
1052  if( task* t = root_list.first ) {
1053  t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
1054  root_list.clear();
1055  }
1056 }
1057 
1058 } // namespace tbb
1059 
1060 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
1062 }
1063 
1064 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
1065  tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
1066 }
1067 
1068 #if __TBB_TASK_GROUP_CONTEXT
1069 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
1070  return &p.allocate(bytes);
1071 }
1072 
1073 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
1074  p.free( *static_cast<tbb::task*>(task) );
1075 }
1076 #endif /* __TBB_TASK_GROUP_CONTEXT */
1077 
1078 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
1079  return &p.allocate(bytes);
1080 }
1081 
1082 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
1083  p.free( *static_cast<tbb::task*>(task) );
1084 }
1085 
1086 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
1087  return &p.allocate(bytes);
1088 }
1089 
1090 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
1091  p.free( *static_cast<tbb::task*>(task) );
1092 }
1093 
1094 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1095  return &p.allocate(bytes);
1096 }
1097 
1098 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1099  p.free( *static_cast<tbb::task*>(task) );
1100 }
1101 
1102 #endif /* __TBB_task_H */
task & pop_front()
Pop the front task from the list.
Definition: task.h:1025
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:138
bool empty() const
True if list if empty; false otherwise.
Definition: task.h:1004
__TBB_EXPORTED_METHOD ~task_group_context()
task_group_context(kind_type relation_with_parent=bound, uintptr_t t=default_traits)
Default & binding constructor.
Definition: task.h:481
friend class internal::allocate_root_proxy
Definition: task.h:931
#define __TBB_override
Definition: tbb_stddef.h:244
internal::allocate_child_proxy & allocate_child()
Returns proxy for overloaded new that allocates a child task of *this.
Definition: task.h:654
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
task * self
No longer used, but retained for binary layout compatibility. Always NULL.
Definition: task.h:49
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
Definition: task.h:252
static const kind_type detached
Definition: task.h:568
void clear()
Clear the list.
Definition: task.h:1034
Memory prefix to a task object.
Definition: task.h:188
friend class internal::scheduler
Definition: task.h:193
friend class internal::allocate_root_with_context_proxy
Definition: task.h:564
unsigned short affinity_id
An id as used for specifying affinity.
Definition: task.h:124
friend class internal::allocate_child_proxy
Definition: task.h:936
void __TBB_EXPORTED_METHOD internal_set_ref_count(int count)
Set reference count.
Definition: task.cpp:187
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
Definition: task.h:432
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
int depth
Obsolete. Used to be scheduling depth before TBB 2.2.
Definition: task.h:257
char _leading_padding[internal::NFS_MaxLineSize - 2 *sizeof(uintptr_t) - sizeof(void *) - sizeof(internal::context_list_node_t) - sizeof(__itt_caller) - sizeof(internal::cpu_ctl_env_space)]
Leading padding protecting accesses to frequently used members from false sharing.
Definition: task.h:407
friend class internal::allocate_additional_child_of_proxy
Definition: task.h:197
static void __TBB_EXPORTED_FUNC destroy(task &victim)
Destroy a task.
Definition: task.cpp:216
#define __TBB_FetchAndDecrementWrelease(P)
Definition: tbb_machine.h:315
friend bool is_critical(task &)
Definition: task.h:953
virtual task * execute()=0
Should be overridden by derived classes.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
const isolation_tag no_isolation
Definition: task.h:129
priority_t priority() const
Retrieves current priority of the current task group.
task object is freshly allocated or recycled.
Definition: task.h:620
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:125
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:118
task that does nothing. Useful for synchronization.
Definition: task.h:959
task ** next_ptr
Definition: task.h:993
affinity_id affinity() const
Current affinity of this task.
Definition: task.h:885
auto first(Container &c) -> decltype(begin(c))
allocate_root_with_context_proxy(task_group_context &ctx)
Definition: task.h:143
Used to form groups of tasks.
Definition: task.h:335
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:381
internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count()
Decrement reference count and return its new value.
Definition: task.cpp:196
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
Definition: task.h:426
#define __TBB_TASK_BASE_ACCESS
Definition: task.h:42
tbb::task * next
"next" field for list of task
Definition: task.h:275
friend class internal::scheduler
Definition: task.h:930
static void spawn_root_and_wait(task &root)
Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
Definition: task.h:781
~task_list()
Destroys the list, but does not destroy the task objects.
Definition: task.h:1001
friend class internal::allocate_root_with_context_proxy
Definition: task.h:933
friend class task
Definition: task.h:994
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:335
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:132
tbb::task & task()
The task corresponding to this task_prefix.
Definition: task.h:278
static void __TBB_EXPORTED_FUNC free(task &)
Definition: task.cpp:51
A list of children.
Definition: task.h:990
#define __TBB_FetchAndIncrementWacquire(P)
Definition: tbb_machine.h:314
void recycle_as_child_of(task &new_parent)
Change this to be a child of new_parent.
Definition: task.h:698
friend class internal::allocate_continuation_proxy
Definition: task.h:196
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
Definition: task.h:395
bool __TBB_EXPORTED_METHOD cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
Base class for user-defined tasks.
Definition: task.h:592
Work stealing task scheduler.
Definition: scheduler.h:124
char _trailing_padding[internal::NFS_MaxLineSize - 2 *sizeof(uintptr_t) - 2 *sizeof(void *) - sizeof(intptr_t) - sizeof(internal::string_index)]
Trailing padding protecting accesses to frequently used members from false sharing.
Definition: task.h:449
void set_parent(task *p)
sets parent task pointer to specified value
Definition: task.h:833
internal::string_index my_name
Decription of algorithm for scheduler based instrumentation.
Definition: task.h:440
context_list_node_t * my_prev
Definition: task.h:136
version_tag_v3 version_tag
Definition: tbb_stddef.h:390
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
intptr_t isolation_tag
A tag for task isolation.
Definition: task.h:128
uintptr_t _my_kind_aligner
Definition: task.h:383
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
Definition: task.h:429
static const kind_type binding_required
Definition: task.h:566
task is running, and will be destroyed after method execute() completes.
Definition: task.h:614
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
function_task(const F &f)
Definition: task.h:979
void const char const char int ITT_FORMAT __itt_group_sync p
void __TBB_EXPORTED_METHOD reset()
Forcefully reinitializes the context after the task tree it was associated with is completed.
task * first
Definition: task.h:992
Interface to be implemented by all exceptions TBB recognizes and propagates across the threads.
bool is_critical(task &t)
Definition: task.h:953
virtual void wait_for_all(task &parent, task *child)=0
For internal use only.
static void enqueue(task &t)
Enqueue task for starvation-resistant execution.
Definition: task.h:809
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: task.h:974
Base class for types that should not be assigned.
Definition: tbb_stddef.h:324
void spawn_and_wait_for_all(task &child)
Similar to spawn followed by wait_for_all, but more efficient.
Definition: task.h:773
internal::affinity_id affinity_id
An id as used for specifying affinity.
Definition: task.h:879
task()
Default constructor.
Definition: task.h:602
task_list()
Construct empty list.
Definition: task.h:998
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
Definition: task.h:830
void wait_for_all()
Wait for reference count to become one, and set reference count to zero.
Definition: task.h:792
int space[sizeof(internal::uint64_t)/sizeof(int)]
Definition: task.h:59
friend class internal::allocate_continuation_proxy
Definition: task.h:935
priority_t
Definition: task.h:295
internal::allocate_continuation_proxy & allocate_continuation()
Returns proxy for overloaded new that allocates a continuation task of *this.
Definition: task.h:649
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:109
context_list_node_t * my_next
Definition: task.h:136
void __TBB_EXPORTED_METHOD free(task &) const
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:102
static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of(task &t)
Like allocate_child, except that task's parent becomes "t", not this.
Definition: task.h:83
friend class internal::generic_scheduler
Definition: task.h:337
state_type
Enumeration of task states that the scheduler considers.
Definition: task.h:612
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
void set_priority(priority_t)
Changes priority of the task group.
The graph class.
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
Definition: task.h:382
static const int priority_stride_v4
Definition: task.h:288
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Definition: task.h:417
priority_t group_priority() const
Retrieves current priority of the task group this task belongs to.
Definition: task.h:923
task to be recycled as continuation
Definition: task.h:624
virtual void enqueue(task &t, void *reserved)=0
For internal use only.
void set_affinity(affinity_id id)
Set affinity for this task.
Definition: task.h:882
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
int decrement_ref_count()
Atomically decrement reference count and returns its new value.
Definition: task.h:761
void call_itt_notify(notify_type, void *)
void push_back(task &task)
Push task onto back of list.
Definition: task.h:1007
intptr_t my_priority
Priority level of the task group (in normalized representation)
Definition: task.h:436
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:100
internal::tbb_exception_ptr exception_container_type
Definition: task.h:344
task is in ready pool, or is going to be put there, or was just taken off.
Definition: task.h:618
void propagate_task_group_state(T task_group_context::*mptr_state, task_group_context &src, T new_state)
Propagates any state change detected to *this, and as an optimisation possibly also upward along the ...
friend class internal::allocate_child_proxy
Definition: task.h:195
task object is on free list, or is going to be put there, or was just taken off.
Definition: task.h:622
virtual void spawn(task &first, task *&next)=0
For internal use only.
void recycle_to_reexecute()
Schedule this for reexecution after current execute() returns.
Definition: task.h:714
static task &__TBB_EXPORTED_FUNC allocate(size_t size)
Definition: task.cpp:39
task_group_context * group()
Pointer to the task group descriptor.
Definition: task.h:846
virtual ~task()
Destructor.
Definition: task.h:606
Class delimiting the scope of task scheduler activity.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
Definition: task.h:423
friend void make_critical(task &)
Definition: task.h:952
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
virtual void spawn_root_and_wait(task &first, task *&next)=0
For internal use only.
void increment_ref_count()
Atomically increment reference count.
Definition: task.h:744
#define __TBB_EXPORTED_FUNC
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:309
Base class for methods that became static in TBB 3.0.
Definition: task.h:70
scheduler * owner
Obsolete. The scheduler that owns the task.
Definition: task.h:232
isolation_tag isolation
The tag used for task isolation.
Definition: task.h:205
bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const
Obsolete, and only retained for the sake of backward compatibility. Always returns true.
Definition: task.cpp:212
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:220
task * next_offloaded
Pointer to the next offloaded lower priority task.
Definition: task.h:237
friend class internal::allocate_root_proxy
Definition: task.h:194
void __TBB_EXPORTED_METHOD change_group(task_group_context &ctx)
Moves this task from its current group into another one.
static const kind_type binding_completed
Definition: task.h:567
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
Definition: task.h:387
void recycle_as_safe_continuation()
Recommended to use, safe variant of recycle_as_continuation.
Definition: task.h:692
static void spawn(task &t)
Schedule task for execution when a worker becomes available.
Definition: task.h:1040
bool cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
Definition: task.h:910
bool is_cancelled() const
Returns true if the context has received cancellation request.
Definition: task.h:913
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
Definition: task.h:392
#define __TBB_atomic
Definition: tbb_stddef.h:241
unsigned char state
A task::state_type, stored as a byte for compactness.
Definition: task.h:261
void set_ref_count(int count)
Set reference count.
Definition: task.h:734
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
Definition: task.h:215
void __TBB_EXPORTED_METHOD init()
Out-of-line part of the constructor.
friend class internal::allocate_additional_child_of_proxy
Definition: task.h:937
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
void recycle_as_continuation()
Change this to be a continuation of its former self.
Definition: task.h:684
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
virtual ~scheduler()=0
Pure virtual destructor;.
Definition: scheduler.cpp:76
state_type state() const
Current execution state.
Definition: task.h:859
task_group_context * context()
This method is deprecated and will be removed in the future.
Definition: task.h:843
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:636
void make_critical(task &t)
Definition: task.h:952
intptr_t reference_count
A reference count.
Definition: task.h:121
static const int priority_critical
Definition: task.h:291
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Definition: task.h:941
void register_with(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler.
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
Definition: task.h:270
bool is_stolen_task() const
True if task was stolen from the task pool of another thread.
Definition: task.h:850
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
Definition: task.h:224
int add_ref_count(int count)
Atomically adds to reference count and returns its new value.
Definition: task.h:750
struct ___itt_caller * __itt_caller
Definition: task.h:29
internal::cpu_ctl_env_space my_cpu_ctl_env
Space for platform-specific FPU settings.
Definition: task.h:413
affinity_id affinity
Definition: task.h:272
void set_group_priority(priority_t p)
Changes priority of the task group this task belongs to.
Definition: task.h:920
void __TBB_EXPORTED_METHOD capture_fp_settings()
Captures the current FPU control settings to the context.
task_group_context(internal::string_index name)
Definition: task.h:491
void const char const char int ITT_FORMAT __itt_group_sync x void const char * name
virtual void __TBB_EXPORTED_METHOD note_affinity(affinity_id id)
Invoked by scheduler to notify task that it ran on unexpected thread.
Definition: task.cpp:249
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: task.h:960
tbb::task * parent
The task whose reference count includes me.
Definition: task.h:245
task to be rescheduled.
Definition: task.h:616
static const kind_type dying
Definition: task.h:569
uintptr_t traits() const
Returns the context's trait.
Definition: task.h:555
int ref_count() const
The internal reference count.
Definition: task.h:862

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.