Intel(R) Threading Building Blocks Doxygen Documentation
version 4.2.3
|
A scheduler with a customized evaluation loop.
More...
#include <custom_scheduler.h>
|
| custom_scheduler (market &m, bool genuine) |
|
void | local_wait_for_all (task &parent, task *child) __TBB_override |
| Scheduler loop that dispatches tasks. More...
|
|
void | wait_for_all (task &parent, task *child) __TBB_override |
| Entry point from client code to the scheduler loop that dispatches tasks. More...
|
|
void | tally_completion_of_predecessor (task &s, __TBB_ISOLATION_ARG(task *&bypass_slot, isolation_tag isolation)) |
| Decrements ref_count of a predecessor. More...
|
|
bool | process_bypass_loop (context_guard_helper< SchedulerTraits::itt_possible > &context_guard, __TBB_ISOLATION_ARG(task *t, isolation_tag isolation)) |
| Implements the bypass loop of the dispatch loop (local_wait_for_all). More...
|
|
bool | is_task_pool_published () const |
|
bool | is_local_task_pool_quiescent () const |
|
bool | is_quiescent_local_task_pool_empty () const |
|
bool | is_quiescent_local_task_pool_reset () const |
|
void | attach_mailbox (affinity_id id) |
|
void | init_stack_info () |
| Sets up the data necessary for the stealing limiting heuristics. More...
|
|
bool | can_steal () |
| Returns true if stealing is allowed. More...
|
|
void | publish_task_pool () |
| Used by workers to enter the task pool. More...
|
|
void | leave_task_pool () |
| Leave the task pool. More...
|
|
void | reset_task_pool_and_leave () |
| Resets head and tail indices to 0, and leaves task pool. More...
|
|
task ** | lock_task_pool (arena_slot *victim_arena_slot) const |
| Locks victim's task pool, and returns pointer to it. The pointer can be NULL. More...
|
|
void | unlock_task_pool (arena_slot *victim_arena_slot, task **victim_task_pool) const |
| Unlocks victim's task pool. More...
|
|
void | acquire_task_pool () const |
| Locks the local task pool. More...
|
|
void | release_task_pool () const |
| Unlocks the local task pool. More...
|
|
task * | prepare_for_spawning (task *t) |
| Checks if t is affinitized to another thread, and if so, bundles it as proxy. More...
|
|
void | commit_spawned_tasks (size_t new_tail) |
| Makes newly spawned tasks visible to thieves. More...
|
|
void | commit_relocated_tasks (size_t new_tail) |
| Makes relocated tasks visible to thieves and releases the local task pool. More...
|
|
task * | get_task (__TBB_ISOLATION_EXPR(isolation_tag isolation)) |
| Get a task from the local pool. More...
|
|
task * | get_task (size_t T) |
| Get a task from the local pool at specified location T. More...
|
|
task * | get_mailbox_task (__TBB_ISOLATION_EXPR(isolation_tag isolation)) |
| Attempt to get a task from the mailbox. More...
|
|
task * | steal_task (__TBB_ISOLATION_EXPR(isolation_tag isolation)) |
| Attempts to steal a task from a randomly chosen thread/scheduler. More...
|
|
task * | steal_task_from (__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation)) |
| Steal task from another scheduler's ready pool. More...
|
|
size_t | prepare_task_pool (size_t n) |
| Makes sure that the task pool can accommodate at least n more elements. More...
|
|
bool | cleanup_master (bool blocking_terminate) |
| Perform necessary cleanup when a master thread stops using TBB. More...
|
|
void | assert_task_pool_valid () const |
|
void | attach_arena (arena *, size_t index, bool is_master) |
|
void | nested_arena_entry (arena *, size_t) |
|
void | nested_arena_exit () |
|
void | wait_until_empty () |
|
void | spawn (task &first, task *&next) __TBB_override |
| For internal use only. More...
|
|
void | spawn_root_and_wait (task &first, task *&next) __TBB_override |
| For internal use only. More...
|
|
void | enqueue (task &, void *reserved) __TBB_override |
| For internal use only. More...
|
|
void | local_spawn (task *first, task *&next) |
|
void | local_spawn_root_and_wait (task *first, task *&next) |
|
void | destroy () |
| Destroy and deallocate this scheduler object. More...
|
|
void | cleanup_scheduler () |
| Cleans up this scheduler (the scheduler might be destroyed). More...
|
|
task & | allocate_task (size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context)) |
| Allocate task object, either from the heap or a free list. More...
|
|
template<free_task_hint h> |
void | free_task (task &t) |
| Put task on free list. More...
|
|
void | deallocate_task (task &t) |
| Return task object to the memory allocator. More...
|
|
bool | is_worker () const |
| True if running on a worker thread, false otherwise. More...
|
|
bool | outermost_level () const |
| True if the scheduler is on the outermost dispatch level. More...
|
|
bool | master_outermost_level () const |
| True if the scheduler is on the outermost dispatch level in a master thread. More...
|
|
bool | worker_outermost_level () const |
| True if the scheduler is on the outermost dispatch level in a worker thread. More...
|
|
unsigned | max_threads_in_arena () |
| Returns the concurrency limit of the current arena. More...
|
|
void | free_nonlocal_small_task (task &t) |
| Free a small task t that that was allocated by a different scheduler. More...
|
|
| generic_scheduler (market &, bool) |
|
virtual | ~scheduler ()=0 |
| Pure virtual destructor;. More...
|
|
template<typename SchedulerTraits>
class tbb::internal::custom_scheduler< SchedulerTraits >
A scheduler with a customized evaluation loop.
The customization can use SchedulerTraits to make decisions without needing a run-time check.
Definition at line 52 of file custom_scheduler.h.
◆ scheduler_type
template<typename SchedulerTraits >
◆ custom_scheduler()
template<typename SchedulerTraits >
◆ allocate_scheduler()
template<typename SchedulerTraits >
◆ local_wait_for_all()
template<typename SchedulerTraits >
Scheduler loop that dispatches tasks.
If child is non-NULL, it is dispatched first. Then, until "parent" has a reference count of 1, other task are dispatched or stolen.
Implements tbb::internal::generic_scheduler.
Definition at line 557 of file custom_scheduler.h.
561 #if __TBB_TASK_GROUP_CONTEXT
567 if( SchedulerTraits::itt_possible )
572 context_guard_helper<SchedulerTraits::itt_possible> context_guard;
581 #if __TBB_PREVIEW_CRITICAL_TASKS
584 #if __TBB_TASK_PRIORITY
586 volatile intptr_t *old_ref_top_priority = my_ref_top_priority;
589 volatile uintptr_t *old_ref_reload_epoch = my_ref_reload_epoch;
594 my_ref_top_priority = &
parent.prefix().context->my_priority;
595 my_ref_reload_epoch = &
my_arena->my_reload_epoch;
596 if (my_ref_reload_epoch != old_ref_reload_epoch)
597 my_local_reload_epoch = *my_ref_reload_epoch - 1;
600 #if __TBB_TASK_ISOLATION
605 t->prefix().isolation = isolation;
608 #if __TBB_PREVIEW_RESUMABLE_TASKS
611 tbb::atomic<bool> recall_flag;
617 my_current_is_recalled = &recall_flag;
620 task* old_wait_task = my_wait_task;
623 #if TBB_USE_EXCEPTIONS
637 #if __TBB_PREVIEW_RESUMABLE_TASKS
641 my_wait_task = old_wait_task;
647 if (
parent.prefix().ref_count == 1 ) {
653 #if __TBB_PREVIEW_RESUMABLE_TASKS
655 if ( &recall_flag !=
my_arena_slot->my_scheduler_is_recalled ) {
658 if ( !resume_original_scheduler() ) {
661 "Only a coroutine on outermost level can be left." );
665 my_wait_task = old_wait_task;
680 #if __TBB_HOARD_NONLOCAL_TASKS
682 for (; my_nonlocal_free_list; my_nonlocal_free_list = t ) {
683 t = my_nonlocal_free_list->prefix().next;
692 #if __TBB_TASK_PRIORITY
693 my_ref_top_priority = old_ref_top_priority;
694 if(my_ref_reload_epoch != old_ref_reload_epoch)
695 my_local_reload_epoch = *old_ref_reload_epoch-1;
696 my_ref_reload_epoch = old_ref_reload_epoch;
698 #if __TBB_PREVIEW_RESUMABLE_TASKS
699 if (&recall_flag !=
my_arena_slot->my_scheduler_is_recalled) {
702 tbb::task::suspend(recall_functor(&recall_flag));
708 __TBB_ASSERT(!(my_wait_task->prefix().ref_count & internal::abandon_flag), NULL);
709 my_wait_task = old_wait_task;
715 #if __TBB_PREVIEW_RESUMABLE_TASKS
719 if ( &recall_flag ==
my_arena_slot->my_scheduler_is_recalled || old_wait_task != NULL )
723 tbb::task::suspend( recall_functor(&recall_flag) );
732 #if TBB_USE_EXCEPTIONS
741 || t->state() == task::to_enqueue
746 if( SchedulerTraits::itt_possible )
749 if( SchedulerTraits::itt_possible )
750 ITT_NOTIFY(sync_acquired, &t->prefix().ref_count);
759 #if __TBB_PREVIEW_RESUMABLE_TASKS
761 my_wait_task = old_wait_task;
762 if (my_wait_task == NULL) {
764 if (&recall_flag !=
my_arena_slot->my_scheduler_is_recalled) {
768 tbb::task::suspend(recall_functor(&recall_flag));
774 my_current_is_recalled = NULL;
780 #if __TBB_TASK_PRIORITY
781 my_ref_top_priority = old_ref_top_priority;
782 if(my_ref_reload_epoch != old_ref_reload_epoch)
783 my_local_reload_epoch = *old_ref_reload_epoch-1;
784 my_ref_reload_epoch = old_ref_reload_epoch;
787 if (
parent.prefix().ref_count != 1) {
790 "Worker thread exits nested dispatch loop prematurely" );
793 parent.prefix().ref_count = 0;
798 #if __TBB_TASK_GROUP_CONTEXT
800 task_group_context* parent_ctx =
parent.prefix().context;
801 if ( parent_ctx->my_cancellation_requested ) {
806 parent_ctx->my_cancellation_requested = 0;
814 context_guard.restore_default();
815 TbbRethrowException( pe );
819 "Worker's dummy task context modified");
821 "Unexpected exception or cancellation data in the master's dummy task");
References __TBB_ASSERT, __TBB_control_consistency_helper, __TBB_FetchAndDecrementWrelease, __TBB_ISOLATION_ARG, __TBB_ISOLATION_EXPR, __TBB_RECYCLE_TO_ENQUEUE, tbb::task::allocated, tbb::internal::ConcurrentWaitsEnabled(), tbb::internal::es_ref_count_active, tbb::internal::is_critical(), tbb::internal::governor::is_set(), tbb::internal::task_prefix::isolation, ITT_NOTIFY, ITT_SYNC_CREATE, tbb::task_group_context::may_have_children, tbb::task_group_context::my_cancellation_requested, tbb::task_group_context::my_exception, tbb::task_group_context::my_state, tbb::internal::task_prefix::next, tbb::internal::no_isolation, tbb::internal::num_priority_levels, parent, tbb::task::parent(), tbb::task::prefix(), tbb::task::recycle, tbb::internal::task_prefix::ref_count, tbb::internal::task_prefix::state, tbb::task::state(), and sync_releasing.
Referenced by tbb::internal::custom_scheduler< SchedulerTraits >::wait_for_all().
◆ process_bypass_loop()
template<typename SchedulerTraits >
Implements the bypass loop of the dispatch loop (local_wait_for_all).
Definition at line 389 of file custom_scheduler.h.
397 #if __TBB_TASK_ISOLATION
399 "A task from another isolated region is going to be executed" );
402 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_ASSERT
403 assert_context_valid(t->prefix().context);
404 if ( !t->prefix().context->my_cancellation_requested )
409 #if __TBB_PREVIEW_CRITICAL_TASKS
415 "Received task must be critical one" );
423 #if __TBB_TASK_PRIORITY
424 intptr_t
p = priority(*t);
425 if (
p != *my_ref_top_priority
426 && !t->is_enqueued_task() ) {
427 assert_priority_valid(
p);
431 if (
p < effective_reference_priority() ) {
432 if ( !my_offloaded_tasks ) {
433 my_offloaded_task_list_tail_link = &t->prefix().next_offloaded;
436 *my_offloaded_task_list_tail_link = NULL;
438 offload_task( *t,
p );
453 #if __TBB_PREVIEW_CRITICAL_TASKS
461 #if __TBB_TASK_GROUP_CONTEXT
462 context_guard.set_ctx( t->prefix().context );
463 if ( !t->prefix().context->my_cancellation_requested )
469 #if __TBB_TASK_PRIORITY
473 ITT_STACK(SchedulerTraits::itt_possible, callee_enter, t->prefix().context->itt_caller);
474 t_next = t->execute();
475 ITT_STACK(SchedulerTraits::itt_possible, callee_leave, t->prefix().context->itt_caller);
479 "if task::execute() returns task, it must be marked as allocated" );
483 affinity_id next_affinity=t_next->prefix().affinity;
490 switch( t->state() ) {
492 task*
s = t->parent();
494 __TBB_ASSERT( t->prefix().ref_count==0,
"Task still has children after it has been executed" );
498 free_task<no_hint>( *t );
506 #if __TBB_RECYCLE_TO_ENQUEUE
508 case task::to_enqueue:
510 __TBB_ASSERT( t_next != t,
"a task returned from method execute() can not be recycled in another way" );
518 __TBB_ASSERT( t_next,
"reexecution requires that method execute() return another task" );
519 __TBB_ASSERT( t_next != t,
"a task returned from method execute() can not be recycled in another way" );
528 #if __TBB_PREVIEW_RESUMABLE_TASKS
529 case task::to_resume:
531 __TBB_ASSERT(t->prefix().ref_count == 0,
"Task still has children after it has been executed");
533 free_task<no_hint>(*t);
535 "Only a coroutine on outermost level can be left.");
541 __TBB_ASSERT(
false,
"task is in READY state upon return from method execute()" );
References __TBB_ASSERT, __TBB_ASSERT_EX, __TBB_fallthrough, __TBB_ISOLATION_ARG, __TBB_ISOLATION_EXPR, tbb::internal::task_prefix::affinity, tbb::task::allocated, tbb::internal::assert_task_valid(), tbb::internal::task_prefix::context, tbb::task::execute(), tbb::task::executing, GATHER_STATISTIC, tbb::internal::is_critical(), tbb::task::is_enqueued_task(), tbb::internal::task_prefix::isolation, tbb::task_group_context::itt_caller, ITT_NOTIFY, ITT_STACK, tbb::task_group_context::my_cancellation_requested, tbb::internal::task_prefix::next, tbb::internal::task_prefix::next_offloaded, tbb::internal::no_isolation, tbb::internal::task_prefix::owner, p, tbb::task::parent(), tbb::internal::poison_pointer(), tbb::task::prefix(), tbb::task::ready, tbb::task::recycle, tbb::task::reexecute, tbb::internal::task_prefix::ref_count, tbb::internal::reset_extra_state(), s, tbb::internal::context_guard_helper< T >::set_ctx(), tbb::internal::task_prefix::state, tbb::task::state(), tbb::internal::arena::wakeup, and tbb::task::~task().
◆ receive_or_steal_task()
template<typename SchedulerTraits >
Try getting a task from the mailbox or stealing from another scheduler.
Returns the stolen task or NULL if all attempts fail.
Implements tbb::internal::generic_scheduler.
Definition at line 156 of file custom_scheduler.h.
161 bool outermost_current_worker_level = outermost_worker_level;
162 #if __TBB_PREVIEW_RESUMABLE_TASKS
166 #if __TBB_HOARD_NONLOCAL_TASKS
169 #if __TBB_TASK_PRIORITY
170 if ( outermost_dispatch_level ) {
171 if ( intptr_t skipped_priority =
my_arena->my_skipped_fifo_priority ) {
175 if (
my_arena->my_skipped_fifo_priority.compare_and_swap(0, skipped_priority) == skipped_priority
176 && skipped_priority >
my_arena->my_top_priority )
189 for(
int failure_count = -
static_cast<int>(SchedulerTraits::itt_possible);; ++failure_count) {
192 if( completion_ref_count == 1 ) {
193 if( SchedulerTraits::itt_possible ) {
194 if( failure_count!=-1 ) {
195 ITT_NOTIFY(sync_prepare, &completion_ref_count);
199 ITT_NOTIFY(sync_acquired, &completion_ref_count);
209 if ( outermost_current_worker_level ) {
211 if ( SchedulerTraits::itt_possible && failure_count != -1 )
216 #if __TBB_PREVIEW_RESUMABLE_TASKS
224 #if __TBB_TASK_PRIORITY
227 static const int p = 0;
233 #if __TBB_TASK_ISOLATION
261 #if __TBB_TASK_PRIORITY
271 #if __TBB_PREVIEW_CRITICAL_TASKS
277 #endif // __TBB_PREVIEW_CRITICAL_TASKS
282 #if __TBB_ARENA_OBSERVER
283 my_arena->my_observers.notify_entry_observers( my_last_local_observer,
is_worker() );
285 #if __TBB_SCHEDULER_OBSERVER
286 the_global_observer_list.notify_entry_observers( my_last_global_observer,
is_worker() );
288 if ( SchedulerTraits::itt_possible && failure_count != -1 ) {
296 if( SchedulerTraits::itt_possible && failure_count==-1 ) {
306 const int failure_threshold = 2*
int(n+1);
307 if( failure_count>=failure_threshold ) {
311 failure_count = failure_threshold;
314 #if __TBB_TASK_PRIORITY
316 if (
my_arena->my_orphaned_tasks ) {
319 task* orphans = (
task*)__TBB_FetchAndStoreW( &
my_arena->my_orphaned_tasks, 0 );
323 my_local_reload_epoch--;
324 t = reload_tasks( orphans, link,
__TBB_ISOLATION_ARG( effective_reference_priority(), isolation ) );
326 *link = my_offloaded_tasks;
327 if ( !my_offloaded_tasks )
328 my_offloaded_task_list_tail_link = link;
329 my_offloaded_tasks = orphans;
331 __TBB_ASSERT( !my_offloaded_tasks == !my_offloaded_task_list_tail_link, NULL );
333 if( SchedulerTraits::itt_possible )
343 const int yield_threshold = 10;
345 const int yield_threshold = 100;
347 if( yield_count++ >= yield_threshold ) {
350 #if __TBB_TASK_PRIORITY
351 if( outermost_current_worker_level ||
my_arena->my_top_priority >
my_arena->my_bottom_priority ) {
356 if( SchedulerTraits::itt_possible )
360 #if __TBB_TASK_PRIORITY
362 if ( my_offloaded_tasks ) {
365 my_local_reload_epoch--;
370 if ( !outermost_worker_level && *my_ref_top_priority >
my_arena->my_top_priority ) {
372 my_ref_top_priority = &
my_arena->my_top_priority;
References __TBB_ASSERT, __TBB_control_consistency_helper, __TBB_ISOLATION_ARG, __TBB_ISOLATION_EXPR, __TBB_PREVIEW_CRITICAL_TASKS, __TBB_Yield, GATHER_STATISTIC, int, tbb::internal::is_critical(), ITT_NOTIFY, tbb::internal::no_isolation, p, tbb::internal::prolonged_pause(), and sync_cancel.
◆ tally_completion_of_predecessor()
template<typename SchedulerTraits >
Decrements ref_count of a predecessor.
If it achieves 0, the predecessor is scheduled for execution. When changing, remember that this is a hot path function.
Definition at line 72 of file custom_scheduler.h.
73 task_prefix&
p =
s.prefix();
75 if( SchedulerTraits::itt_possible )
77 if( SchedulerTraits::has_slow_atomic &&
p.ref_count==1 )
81 #if __TBB_PREVIEW_RESUMABLE_TASKS
82 if (old_ref_count == internal::abandon_flag + 2) {
86 tbb::task::resume(
p.abandoned_scheduler);
90 if (old_ref_count > 1) {
99 __TBB_ASSERT(
p.ref_count==0,
"completion of task caused predecessor's reference count to underflow");
100 if( SchedulerTraits::itt_possible )
105 #if __TBB_TASK_ISOLATION
109 p.isolation = isolation;
113 #if __TBB_RECYCLE_TO_ENQUEUE
114 if (
p.state==task::to_enqueue) {
120 if( bypass_slot==NULL )
122 #if __TBB_PREVIEW_CRITICAL_TASKS
124 local_spawn( bypass_slot, bypass_slot->prefix().next );
References __TBB_ASSERT, __TBB_control_consistency_helper, __TBB_FetchAndDecrementWrelease, tbb::internal::arena::enqueue_task(), tbb::internal::es_ref_count_active, tbb::internal::is_critical(), ITT_NOTIFY, tbb::internal::generic_scheduler::local_spawn(), tbb::internal::scheduler_state::my_arena, tbb::internal::generic_scheduler::my_random, tbb::internal::task_prefix::next, tbb::internal::no_isolation, p, tbb::task::prefix(), s, and sync_releasing.
◆ wait_for_all()
template<typename SchedulerTraits >
The documentation for this class was generated from the following file:
scheduler_properties my_properties
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
bool is_out_of_work()
Check if there is job anywhere in arena.
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
@ recycle
task to be recycled as continuation
isolation_tag isolation
The tag used for task isolation.
#define __TBB_ISOLATION_EXPR(isolation)
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
@ executing
task is running, and will be destroyed after method execute() completes.
friend class custom_scheduler
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
bool is_task_pool_published() const
task_stream< num_priority_levels > my_task_stream
Task pool for the tasks scheduled via task::enqueue() method.
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
#define ITT_NOTIFY(name, obj)
void reset_extra_state(task *t)
void poison_pointer(T *__TBB_atomic &)
bool is_worker() const
True if running on a worker thread, false otherwise.
void assert_task_pool_valid() const
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
bool empty(int level)
Checks existence of a task.
scheduler * owner
Obsolete. The scheduler that owns the task.
void set_is_idle(bool value)
Indicate whether thread that reads this mailbox is idle.
void tally_completion_of_predecessor(task &s, __TBB_ISOLATION_ARG(task *&bypass_slot, isolation_tag isolation))
Decrements ref_count of a predecessor.
bool process_bypass_loop(context_guard_helper< SchedulerTraits::itt_possible > &context_guard, __TBB_ISOLATION_ARG(task *t, isolation_tag isolation))
Implements the bypass loop of the dispatch loop (local_wait_for_all).
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
generic_scheduler(market &, bool)
unsigned char state
A task::state_type, stored as a byte for compactness.
void local_wait_for_all(task &parent, task *child) __TBB_override
Scheduler loop that dispatches tasks.
bool is_critical(task &t)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p sync_cancel
bool ConcurrentWaitsEnabled(task &t)
@ es_ref_count_active
Set if ref_count might be changed by another thread. Used for debugging.
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
#define __TBB_FetchAndDecrementWrelease(P)
market * my_market
The market I am in.
#define GATHER_STATISTIC(x)
task * my_dummy_task
Fake root task created by slave threads.
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
#define ITT_SYNC_CREATE(obj, type, name)
internal::tbb_exception_ptr exception_container_type
unsigned short affinity_id
An id as used for specifying affinity.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
bool is_quiescent_local_task_pool_reset() const
bool can_steal()
Returns true if stealing is allowed.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
#define __TBB_fallthrough
const isolation_tag no_isolation
#define __TBB_ISOLATION_ARG(arg1, isolation)
#define __TBB_RECYCLE_TO_ENQUEUE
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
bool is_idle_state(bool value) const
Indicate whether thread that reads this mailbox is idle.
#define __TBB_control_consistency_helper()
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
unsigned hint_for_pop
Hint provided for operations with the container of starvation-resistant tasks.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
@ reexecute
task to be rescheduled.
intptr_t isolation_tag
A tag for task isolation.
void const char const char int ITT_FORMAT __itt_group_sync s
void local_spawn(task *first, task *&next)
#define ITT_STACK(precond, name, obj)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
static const intptr_t num_priority_levels
unsigned num_workers_active() const
The number of workers active in the arena.
#define __TBB_PREVIEW_CRITICAL_TASKS
@ ready
task is in ready pool, or is going to be put there, or was just taken off.
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
intptr_t reference_count
A reference count.
void assert_task_valid(const task *)
custom_scheduler< SchedulerTraits > scheduler_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
static bool is_proxy(const task &t)
True if t is a task_proxy.
void const char const char int ITT_FORMAT __itt_group_sync p
task * pop(int level, unsigned &last_used_lane)
Try finding and popping a task.
bool outermost
Indicates that a scheduler is on outermost level.
bool empty()
Return true if mailbox is empty.
atomic< unsigned > my_limit
The maximal number of currently busy slots.
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
@ allocated
task object is freshly allocated or recycled.
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation)) __TBB_override
Try getting a task from the mailbox or stealing from another scheduler.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
Copyright © 2005-2020 Intel Corporation. All Rights Reserved.
Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
registered trademarks or trademarks of Intel Corporation or its
subsidiaries in the United States and other countries.
* Other names and brands may be claimed as the property of others.