34 #if __TBB_TASK_PRIORITY 36 arena *&next = my_priority_levels[a.my_top_priority].next_arena;
42 if ( arenas.
size() == 1 )
43 next = &*arenas.
begin();
47 #if __TBB_TASK_PRIORITY 49 arena *&next = my_priority_levels[a.my_top_priority].next_arena;
54 arena_list_type::iterator it = next;
57 if ( ++it == arenas.
end() && arenas.
size() > 1 )
68 market::market (
unsigned workers_soft_limit,
unsigned workers_hard_limit,
size_t stack_size )
69 : my_num_workers_hard_limit(workers_hard_limit)
70 , my_num_workers_soft_limit(workers_soft_limit)
72 , my_global_top_priority(normalized_normal_priority)
73 , my_global_bottom_priority(normalized_normal_priority)
76 , my_stack_size(stack_size)
77 , my_workers_soft_limit_to_report(workers_soft_limit)
79 #if __TBB_TASK_PRIORITY 92 workers_soft_limit = soft_limit-1;
95 if( workers_soft_limit >= workers_hard_limit )
96 workers_soft_limit = workers_hard_limit-1;
97 return workers_soft_limit;
107 if( old_public_count==0 )
113 "skip_soft_limit_warning must be larger than any valid workers_requested" );
115 if( soft_limit_to_report < workers_requested ) {
117 "The request for %u workers is ignored. Further requests for more workers " 118 "will be silently ignored until the limit changes.\n",
119 soft_limit_to_report, workers_requested );
129 "The request for larger stack (%u) cannot be satisfied.\n",
134 if( stack_size == 0 )
148 #if __TBB_TASK_GROUP_CONTEXT 150 "my_workers must be the last data field of the market class");
155 memset( storage, 0,
size );
157 m =
new (storage)
market( workers_soft_limit, workers_hard_limit, stack_size );
163 runtime_warning(
"RML might limit the number of workers to %u while %u is requested.\n" 164 , m->
my_server->default_concurrency(), workers_soft_limit );
170 #if __TBB_COUNT_TASK_NODES 171 if ( my_task_node_count )
172 runtime_warning(
"Leaked %ld task objects\n", (
long)my_task_node_count );
174 this->market::~market();
181 bool do_release =
false;
184 if ( blocking_terminate ) {
185 __TBB_ASSERT( is_public,
"Only an object with a public reference can request the blocking terminate" );
216 return blocking_terminate;
222 int old_requested=0, requested=0;
223 bool need_mandatory =
false;
241 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 244 if( !(m->my_mandatory_num_requested && !soft_limit) )
248 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 249 m->my_mandatory_num_requested? 0 :
252 requested =
min(demand, (
int)soft_limit);
255 #if __TBB_TASK_PRIORITY 256 m->my_priority_levels[m->my_global_top_priority].workers_available = soft_limit;
262 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 263 if( !m->my_mandatory_num_requested && !soft_limit ) {
266 #if __TBB_TASK_PRIORITY 267 for(
int p = m->my_global_top_priority;
p >= m->my_global_bottom_priority; --
p ) {
268 priority_level_info &pl = m->my_priority_levels[
p];
274 for( arena_list_type::iterator it = arenas.
begin(); it != arenas.
end(); ++it ) {
275 if( !it->my_task_stream.empty(
p) ) {
277 if( m->mandatory_concurrency_enable_impl( &*it ) )
278 need_mandatory =
true;
281 #if __TBB_TASK_PRIORITY 288 int delta = requested - old_requested;
289 if( need_mandatory ) ++delta;
291 m->
my_server->adjust_job_count_estimate( delta );
297 return ((
const market&)client).must_join_workers();
330 #if __TBB_TASK_PRIORITY 334 priority_level_info &pl = my_priority_levels[
p];
340 if ( it->my_aba_epoch == aba_epoch ) {
356 #if __TBB_TASK_PRIORITY 364 if ( arenas.
empty() )
366 arena_list_type::iterator it = hint;
370 if ( ++it == arenas.
end() )
373 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 374 && !a.recall_by_mandatory_request()
380 }
while ( it != hint );
386 max_workers =
min(workers_demand, max_workers);
389 arena_list_type::iterator it = arenas.
begin();
390 for ( ; it != arenas.
end(); ++it ) {
397 int allotted = tmp / workers_demand;
398 carry = tmp % workers_demand;
401 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 402 if ( !allotted && a.must_have_concurrency() )
406 assigned += allotted;
408 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 419 for ( arena_list_type::iterator it = arenas.
begin(); it != arenas.
end(); ++it )
426 #if __TBB_TASK_PRIORITY 427 inline void market::update_global_top_priority ( intptr_t newPriority ) {
429 my_global_top_priority = newPriority;
430 my_priority_levels[newPriority].workers_available =
431 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 435 advance_global_reload_epoch();
438 inline void market::reset_global_priority () {
439 my_global_bottom_priority = normalized_normal_priority;
440 update_global_top_priority(normalized_normal_priority);
448 int p = my_global_top_priority;
456 while ( !a &&
p >= my_global_bottom_priority ) {
457 priority_level_info &pl = my_priority_levels[
p--];
470 intptr_t i = highest_affected_priority;
471 int available = my_priority_levels[i].workers_available;
472 for ( ; i >= my_global_bottom_priority; --i ) {
473 priority_level_info &pl = my_priority_levels[i];
474 pl.workers_available = available;
475 if ( pl.workers_requested ) {
476 available -=
update_allotment( pl.arenas, pl.workers_requested, available );
477 if ( available < 0 ) {
483 __TBB_ASSERT( i <= my_global_bottom_priority || !available, NULL );
484 for ( --i; i >= my_global_bottom_priority; --i ) {
485 priority_level_info &pl = my_priority_levels[i];
486 pl.workers_available = 0;
487 arena_list_type::iterator it = pl.arenas.begin();
488 for ( ; it != pl.arenas.end(); ++it ) {
489 __TBB_ASSERT( it->my_num_workers_requested >= 0 || !it->my_num_workers_allotted, NULL );
490 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 491 it->my_num_workers_allotted = it->must_have_concurrency() ? 1 : 0;
493 it->my_num_workers_allotted = 0;
500 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 501 bool market::mandatory_concurrency_enable_impl ( arena *a,
bool *enabled ) {
502 if( a->my_concurrency_mode==arena_base::cm_enforced_global ) {
509 a->my_max_num_workers = 1;
510 a->my_concurrency_mode = arena_base::cm_enforced_global;
511 #if __TBB_TASK_PRIORITY 512 priority_level_info &pl = my_priority_levels[a->my_top_priority];
513 pl.workers_requested++;
514 if( my_global_top_priority < a->my_top_priority ) {
515 my_global_top_priority = a->my_top_priority;
516 advance_global_reload_epoch();
519 a->my_num_workers_requested++;
520 a->my_num_workers_allotted++;
521 if( 1 == ++my_mandatory_num_requested ) {
528 bool market::mandatory_concurrency_enable ( arena *a ) {
533 add_thread = mandatory_concurrency_enable_impl(a, &enabled);
536 my_server->adjust_job_count_estimate( 1 );
540 void market::mandatory_concurrency_disable ( arena *a ) {
541 bool remove_thread =
false;
542 int delta_adjust_demand = 0;
547 if( a->my_concurrency_mode!=arena_base::cm_enforced_global )
550 a->my_max_num_workers = 0;
551 #if __TBB_TASK_PRIORITY 552 if ( a->my_top_priority != normalized_normal_priority ) {
553 update_arena_top_priority( *a, normalized_normal_priority );
555 a->my_bottom_priority = normalized_normal_priority;
558 int val = --my_mandatory_num_requested;
562 remove_thread =
true;
564 a->my_num_workers_requested--;
565 if (a->my_num_workers_requested > 0)
566 delta_adjust_demand = a->my_num_workers_requested;
568 a->my_num_workers_allotted = 0;
570 #if __TBB_TASK_PRIORITY 571 priority_level_info &pl = my_priority_levels[a->my_top_priority];
572 pl.workers_requested--;
573 intptr_t
p = my_global_top_priority;
574 for (; !my_priority_levels[
p].workers_requested &&
p>0;
p--)
577 reset_global_priority();
578 else if(
p!= my_global_top_priority )
579 update_global_top_priority(
p);
581 a->my_concurrency_mode = arena::cm_normal;
583 if( delta_adjust_demand )
586 my_server->adjust_job_count_estimate( -1 );
598 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 600 if ( a.
my_market->my_mandatory_num_requested && a.my_concurrency_mode!=arena_base::cm_normal )
605 if ( prev_req <= 0 ) {
611 else if ( prev_req < 0 ) {
615 #if !__TBB_TASK_PRIORITY 618 intptr_t
p = a.my_top_priority;
619 priority_level_info &pl = my_priority_levels[
p];
620 pl.workers_requested += delta;
623 if ( a.my_top_priority != normalized_normal_priority ) {
625 update_arena_top_priority( a, normalized_normal_priority );
627 a.my_bottom_priority = normalized_normal_priority;
629 if (
p == my_global_top_priority ) {
630 if ( !pl.workers_requested ) {
631 while ( --
p >= my_global_bottom_priority && !my_priority_levels[
p].workers_requested )
633 if (
p < my_global_bottom_priority )
634 reset_global_priority();
636 update_global_top_priority(
p);
640 else if (
p > my_global_top_priority ) {
644 update_global_top_priority(
p);
646 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 649 && a.
my_market->my_mandatory_num_requested && a.my_concurrency_mode!=arena_base::cm_normal )
655 else if (
p == my_global_bottom_priority ) {
656 if ( !pl.workers_requested ) {
657 while ( ++
p <= my_global_top_priority && !my_priority_levels[
p].workers_requested )
659 if (
p > my_global_top_priority )
660 reset_global_priority();
662 my_global_bottom_priority =
p;
667 else if (
p < my_global_bottom_priority ) {
668 int prev_bottom = my_global_bottom_priority;
669 my_global_bottom_priority =
p;
673 __TBB_ASSERT( my_global_bottom_priority <
p &&
p < my_global_top_priority, NULL );
694 my_server->adjust_job_count_estimate( delta );
704 for (
int i = 0; i < 2; ++i) {
744 #if __TBB_TASK_GROUP_CONTEXT 747 my_workers[index - 1] =
s;
752 #if __TBB_TASK_PRIORITY 753 void market::update_arena_top_priority (
arena& a, intptr_t new_priority ) {
755 __TBB_ASSERT( a.my_top_priority != new_priority, NULL );
756 priority_level_info &prev_level = my_priority_levels[a.my_top_priority],
757 &new_level = my_priority_levels[new_priority];
759 a.my_top_priority = new_priority;
764 __TBB_ASSERT( prev_level.workers_requested >= 0 && new_level.workers_requested >= 0, NULL );
767 bool market::lower_arena_priority (
arena& a, intptr_t new_priority, uintptr_t old_reload_epoch ) {
770 if ( a.my_reload_epoch != old_reload_epoch ) {
775 __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );
777 intptr_t
p = a.my_top_priority;
778 update_arena_top_priority( a, new_priority );
780 if ( my_global_bottom_priority > new_priority ) {
781 my_global_bottom_priority = new_priority;
783 if (
p == my_global_top_priority && !my_priority_levels[
p].workers_requested ) {
785 for ( --
p;
p>my_global_bottom_priority && !my_priority_levels[
p].workers_requested; --
p )
continue;
786 update_global_top_priority(
p);
791 __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );
796 bool market::update_arena_priority ( arena& a, intptr_t new_priority ) {
800 tbb::internal::assert_priority_valid(new_priority);
801 __TBB_ASSERT( my_global_top_priority >= a.my_top_priority || a.my_num_workers_requested <= 0, NULL );
803 if ( a.my_top_priority == new_priority ) {
806 else if ( a.my_top_priority > new_priority ) {
807 if ( a.my_bottom_priority > new_priority )
808 a.my_bottom_priority = new_priority;
811 else if ( a.my_num_workers_requested <= 0 ) {
815 __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );
817 intptr_t
p = a.my_top_priority;
818 intptr_t highest_affected_level =
max(
p, new_priority);
819 update_arena_top_priority( a, new_priority );
821 if ( my_global_top_priority < new_priority ) {
822 update_global_top_priority(new_priority);
824 else if ( my_global_top_priority == new_priority ) {
825 advance_global_reload_epoch();
828 __TBB_ASSERT( new_priority < my_global_top_priority, NULL );
829 __TBB_ASSERT( new_priority > my_global_bottom_priority, NULL );
830 if (
p == my_global_top_priority && !my_priority_levels[
p].workers_requested ) {
833 for ( --
p; !my_priority_levels[
p].workers_requested; --
p )
continue;
835 update_global_top_priority(
p);
836 highest_affected_level =
p;
839 if (
p == my_global_bottom_priority ) {
842 __TBB_ASSERT( new_priority <= my_global_top_priority, NULL );
843 while ( my_global_bottom_priority < my_global_top_priority
844 && !my_priority_levels[my_global_bottom_priority].workers_requested )
845 ++my_global_bottom_priority;
846 __TBB_ASSERT( my_global_bottom_priority <= new_priority, NULL );
847 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 848 const bool enforced_concurrency = my_mandatory_num_requested && a.must_have_concurrency();
850 const bool enforced_concurrency =
false;
852 __TBB_ASSERT_EX( enforced_concurrency || my_priority_levels[my_global_bottom_priority].workers_requested > 0, NULL );
856 __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
unsigned my_workers_soft_limit_to_report
Either workers soft limit to be reported via runtime_warning() or skip_soft_limit_warning.
void free_arena()
Completes arena shutdown, destructs and deallocates it.
unsigned my_public_ref_count
Count of master threads attached.
bool my_join_workers
Shutdown mode.
uintptr_t my_arenas_aba_epoch
ABA prevention marker to assign to newly created arenas.
static const unsigned skip_soft_limit_warning
The value indicating that the soft limit warning is unnecessary.
static const unsigned ref_worker
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
int my_total_demand
Number of workers that were requested by all arenas.
arena * my_next_arena
The first arena to be checked when idle worker seeks for an arena to enter.
T __TBB_load_with_acquire(const volatile T &location)
market * my_market
The market that owns this arena.
static market * theMarket
Currently active global market.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
void adjust_demand(arena &, int delta)
Request that arena's need in workers should be adjusted.
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
static unsigned default_num_threads()
arena * arena_in_need(arena *)
Returns next arena that needs more workers, or NULL.
static void add_ref()
Add reference to resources. If first reference added, acquire the resources.
bool is_worker() const
True if running on a worker thread, false otherwise.
#define __TBB_TASK_PRIORITY
int my_num_workers_requested
Number of workers currently requested from RML.
static unsigned calc_workers_soft_limit(unsigned workers_soft_limit, unsigned workers_hard_limit)
T min(const T &val1, const T &val2)
Utility template function returning lesser of the two values.
static rml::tbb_server * create_rml_server(rml::tbb_client &)
static generic_scheduler * local_scheduler_if_initialized()
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
#define __TBB_offsetof(class_name, member_name)
Extended variant of the standard offsetof macro.
Work stealing task scheduler.
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
static bool does_client_join_workers(const tbb::internal::rml::tbb_client &client)
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
unsigned my_ref_count
Reference count controlling market object lifetime.
void detach_arena(arena &)
Removes the arena from the market's list.
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
arenas_list_mutex_type my_arenas_list_mutex
void const char const char int ITT_FORMAT __itt_group_sync p
static bool UsePrivateRML
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
#define ITT_THREAD_SET_NAME(name)
bool is_arena_in_list(arena_list_type &arenas, arena *a)
static size_t active_value(parameter p)
arena_list_type my_arenas
List of registered arenas.
static global_market_mutex_type theMarketMutex
Mutex guarding creation/destruction of theMarket, insertions/deletions in my_arenas,...
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
atomic< unsigned > my_references
Reference counter for the arena.
market(unsigned workers_soft_limit, unsigned workers_hard_limit, size_t stack_size)
Constructor.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
static const intptr_t num_priority_levels
static void remove_ref()
Remove reference to resources. If last reference removed, release the resources.
void update_allotment()
Recalculates the number of workers assigned to each arena in the list.
job * create_one_job() __TBB_override
void destroy()
Destroys and deallocates market object created by market::create()
size_t my_stack_size
Stack size of worker threads.
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
#define GATHER_STATISTIC(x)
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
#define _T(string_literal)
Standard Windows style macro to markup the string literals.
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
atomic< unsigned > my_first_unused_worker_idx
First unused index of worker.
rml::tbb_server * my_server
Pointer to the RML server object that services this TBB instance.
static unsigned app_parallelism_limit()
Reports active parallelism level according to user's settings.
uintptr_t my_aba_epoch
ABA prevention marker.
void acknowledge_close_connection() __TBB_override
void lock()
Acquire writer lock.
void const char const char int ITT_FORMAT __itt_group_sync s
void unlock()
Release lock.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
static generic_scheduler * create_worker(market &m, size_t index)
Initialize a scheduler for a worker thread.
void remove_arena_from_list(arena &a)
unsigned my_num_workers_hard_limit
Maximal number of workers allowed for use by the underlying resource manager.
static void set_active_num_workers(unsigned w)
Set number of active workers.
void cleanup(job &j) __TBB_override
void process(job &j) __TBB_override
The scoped locking pattern.
void try_destroy_arena(arena *, uintptr_t aba_epoch)
Removes the arena from the market's list.
unsigned num_workers_active()
The number of workers active in the arena.
atomic< T > & as_atomic(T &t)
void assert_market_valid() const
unsigned my_num_workers_soft_limit
Current application-imposed limit on the number of workers (see set_active_num_workers())
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
void insert_arena_into_list(arena &a)