Intel(R) Threading Building Blocks Doxygen Documentation
version 4.2.3
|
Go to the documentation of this file.
17 #ifndef _TBB_scheduler_H
18 #define _TBB_scheduler_H
25 #include "../rml/include/rml_tbb.h"
29 #if __TBB_SURVIVE_THREAD_SWITCH
33 #if __TBB_PREVIEW_RESUMABLE_TASKS
40 template<
typename SchedulerTraits>
class custom_scheduler;
46 #define EmptyTaskPool ((task**)0)
47 #define LockedTaskPool ((task**)~(intptr_t)0)
58 #if __TBB_PREVIEW_CRITICAL_TASKS
59 bool has_taken_critical_task : 1;
62 #if __TBB_PREVIEW_RESUMABLE_TASKS
68 #if __TBB_PREVIEW_RESUMABLE_TASKS
70 #elif __TBB_PREVIEW_CRITICAL_TASKS
103 #if __TBB_SCHEDULER_OBSERVER
104 observer_proxy* my_last_global_observer;
108 #if __TBB_ARENA_OBSERVER
109 observer_proxy* my_last_local_observer;
112 #if __TBB_TASK_PRIORITY
116 volatile intptr_t *my_ref_top_priority;
119 volatile uintptr_t *my_ref_reload_epoch;
121 #if __TBB_PREVIEW_RESUMABLE_TASKS
126 tbb::atomic<bool>* my_current_is_recalled;
147 #if __TBB_PREVIEW_CRITICAL_TASKS
157 uintptr_t my_rsb_stealing_threshold;
180 #if __TBB_HOARD_NONLOCAL_TASKS
181 task* my_nonlocal_free_list;
199 #if __TBB_COUNT_TASK_NODES
200 intptr_t my_task_node_count;
204 #if __TBB_PREVIEW_RESUMABLE_TASKS
205 enum post_resume_action {
216 typedef void(*suspend_callback_t)(
void*, task::suspend_point);
220 suspend_callback_t suspend_callback;
222 task::suspend_point tag;
225 if (suspend_callback) {
226 __TBB_ASSERT(suspend_callback && user_callback && tag, NULL);
227 suspend_callback(user_callback, tag);
233 co_context my_co_context;
236 post_resume_action my_post_resume_action;
239 void* my_post_resume_arg;
245 void set_post_resume_action(post_resume_action,
void* arg);
248 void do_post_resume_action();
258 bool resume_original_scheduler();
263 friend void recall_function(task::suspend_point tag);
333 #if __TBB_TASK_ISOLATION
358 #if __TBB_PREVIEW_CRITICAL_TASKS
364 bool handled_as_critical(
task& t );
393 #if TBB_USE_ASSERT > 1
429 template<free_task_h
int h>
453 #if __TBB_COUNT_TASK_NODES
454 intptr_t get_task_node_count(
bool count_arena_workers =
false );
474 #if __TBB_TASK_GROUP_CONTEXT
500 uintptr_t my_context_state_propagation_epoch;
506 tbb::atomic<uintptr_t> my_local_ctx_list_update;
508 #if __TBB_TASK_PRIORITY
509 inline intptr_t effective_reference_priority ()
const;
514 task* my_offloaded_tasks;
517 task** my_offloaded_task_list_tail_link;
520 uintptr_t my_local_reload_epoch;
523 volatile bool my_pool_reshuffling_pending;
540 inline void offload_task (
task& t, intptr_t task_priority );
545 void cleanup_local_context_list ();
549 template <
typename T>
558 __TBB_ASSERT(is_alive(ctx),
"referenced task_group_context was destroyed");
559 static const char *msg =
"task_group_context is invalid";
568 #if __TBB_TASK_PRIORITY
572 #if TBB_USE_ASSERT > 1
584 ::rml::server::execution_resource_t master_exec_resource;
588 #if __TBB_TASK_GROUP_CONTEXT
591 tbb::atomic<uintptr_t> my_nonlocal_ctx_list_update;
594 #if __TBB_SURVIVE_THREAD_SWITCH
605 cilk_state_t my_cilk_state;
613 mutable statistics_counters my_counters;
661 #if __TBB_TASK_GROUP_CONTEXT
687 p.extra_state = 0xFF;
691 #if __TBB_COUNT_TASK_NODES
692 --my_task_node_count;
696 #if __TBB_COUNT_TASK_NODES
697 inline intptr_t generic_scheduler::get_task_node_count(
bool count_arena_workers ) {
698 return my_task_node_count + (count_arena_workers?
my_arena->workers_task_node_count(): 0);
711 __TBB_ASSERT ( new_tail <= my_arena_slot->my_task_pool_size,
"task deque end was overwritten" );
721 "Task pool must be locked when calling commit_relocated_tasks()" );
729 template<free_task_h
int h
int>
731 #if __TBB_HOARD_NONLOCAL_TASKS
745 #if __TBB_PREVIEW_RESUMABLE_TASKS
755 }
else if( !(
h&
local_task) &&
p.origin && uintptr_t(
p.origin) < uintptr_t(4096) ) {
760 #if __TBB_HOARD_NONLOCAL_TASKS
762 p.next = my_nonlocal_free_list;
763 my_nonlocal_free_list = &t;
773 #if __TBB_TASK_PRIORITY
774 inline intptr_t generic_scheduler::effective_reference_priority ()
const {
785 inline void generic_scheduler::offload_task (
task& t, intptr_t ) {
788 __TBB_ASSERT( my_offloaded_task_list_tail_link && !*my_offloaded_task_list_tail_link, NULL );
793 my_offloaded_tasks = &t;
797 #if __TBB_PREVIEW_RESUMABLE_TASKS
798 inline void generic_scheduler::set_post_resume_action(post_resume_action pra,
void* arg) {
799 __TBB_ASSERT(my_post_resume_action == PRA_NONE,
"Post resume action has already been set.");
802 my_post_resume_action = pra;
803 my_post_resume_arg = arg;
806 inline bool generic_scheduler::prepare_resume(generic_scheduler& target) {
811 target.set_post_resume_action(PRA_NOTIFY, my_current_is_recalled);
815 target.set_post_resume_action(PRA_CLEANUP,
this);
816 my_target_on_exit = ⌖
822 my_wait_task->prefix().abandoned_scheduler =
this;
823 target.set_post_resume_action(PRA_ABANDON, my_wait_task);
827 inline bool generic_scheduler::resume_original_scheduler() {
829 if (!prepare_resume(target)) {
837 inline void generic_scheduler::resume(generic_scheduler& target) {
841 "The post resume action is not set. Has prepare_resume been called?");
849 #if __TBB_SCHEDULER_OBSERVER
850 target.my_last_global_observer = my_last_global_observer;
852 #if __TBB_ARENA_OBSERVER
853 target.my_last_local_observer = my_last_local_observer;
855 target.attach_mailbox(
affinity_id(target.my_arena_index + 1));
857 #if __TBB_TASK_PRIORITY
858 if (my_offloaded_tasks)
859 my_arena->orphan_offloaded_tasks(*
this);
863 my_co_context.resume(target.my_co_context);
866 do_post_resume_action();
872 inline void generic_scheduler::do_post_resume_action() {
873 __TBB_ASSERT(my_post_resume_action != PRA_NONE,
"The post resume action is not set.");
876 switch (my_post_resume_action) {
879 task_prefix& wait_task_prefix =
static_cast<task*
>(my_post_resume_arg)->prefix();
880 reference_count old_ref_count = __TBB_FetchAndAddW(&wait_task_prefix.ref_count, internal::abandon_flag);
882 if (old_ref_count == 1) {
886 tbb::task::resume(wait_task_prefix.abandoned_scheduler);
892 callback_t callback = *
static_cast<callback_t*
>(my_post_resume_arg);
903 to_cleanup->my_arena->my_co_cache.push(to_cleanup);
908 tbb::atomic<bool>& scheduler_recall_flag = *
static_cast<tbb::atomic<bool>*
>(my_post_resume_arg);
909 scheduler_recall_flag =
true;
917 my_post_resume_action = PRA_NONE;
918 my_post_resume_arg = NULL;
921 struct recall_functor {
922 tbb::atomic<bool>* scheduler_recall_flag;
924 recall_functor(tbb::atomic<bool>* recall_flag_) :
925 scheduler_recall_flag(recall_flag_) {}
927 void operator()(task::suspend_point ) {
928 *scheduler_recall_flag =
true;
938 generic_scheduler&
s = *
static_cast<generic_scheduler*
>(arg);
943 s.do_post_resume_action();
946 __TBB_ASSERT(
s.my_innermost_running_task ==
s.my_dummy_task, NULL);
948 s.local_wait_for_all(*
s.my_dummy_task, NULL);
951 s.resume(*
s.my_target_on_exit);
957 #if __TBB_TASK_GROUP_CONTEXT
962 template <
bool report_tasks>
963 class context_guard_helper {
964 const task_group_context *curr_ctx;
966 cpu_ctl_env guard_cpu_ctl_env;
967 cpu_ctl_env curr_cpu_ctl_env;
970 context_guard_helper() : curr_ctx( NULL ) {
972 guard_cpu_ctl_env.get_env();
973 curr_cpu_ctl_env = guard_cpu_ctl_env;
976 ~context_guard_helper() {
978 if ( curr_cpu_ctl_env != guard_cpu_ctl_env )
979 guard_cpu_ctl_env.set_env();
981 if ( report_tasks && curr_ctx )
986 void set_ctx(
const task_group_context *ctx ) {
987 generic_scheduler::assert_context_valid( ctx );
989 const cpu_ctl_env &ctl = *punned_cast<cpu_ctl_env*>( &ctx->my_cpu_ctl_env );
991 if ( ctl != curr_cpu_ctl_env ) {
992 curr_cpu_ctl_env = ctl;
993 curr_cpu_ctl_env.set_env();
996 if ( report_tasks && ctx != curr_ctx ) {
1008 #if __TBB_FP_CONTEXT
1009 if ( curr_cpu_ctl_env != guard_cpu_ctl_env ) {
1010 guard_cpu_ctl_env.set_env();
1011 curr_cpu_ctl_env = guard_cpu_ctl_env;
task ** lock_task_pool(arena_slot *victim_arena_slot) const
Locks victim's task pool, and returns pointer to it. The pointer can be NULL.
scheduler_properties my_properties
void nested_arena_entry(arena *, size_t)
intptr_t my_priority
Priority level of the task group (in normalized representation)
void attach_mailbox(affinity_id id)
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
virtual void local_wait_for_all(task &parent, task *child)=0
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
bool my_auto_initialized
True if *this was created by automatic TBB initialization.
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
#define __TBB_ISOLATION_EXPR(isolation)
Used to form groups of tasks.
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
@ executing
task is running, and will be destroyed after method execute() completes.
task * steal_task_from(__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation))
Steal task from another scheduler's ready pool.
void init_stack_info()
Sets up the data necessary for the stealing limiting heuristics.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
Bit-field representing properties of a sheduler.
static const size_t min_task_pool_size
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
bool is_task_pool_published() const
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
#define ITT_NOTIFY(name, obj)
bool is_quiescent_local_task_pool_empty() const
void release_task_pool() const
Unlocks the local task pool.
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
static bool is_version_3_task(task &t)
Data structure to be inherited by the types that can form intrusive lists.
void co_local_wait_for_all(void *)
void poison_pointer(T *__TBB_atomic &)
void enqueue(task &, void *reserved) __TBB_override
For internal use only.
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
bool is_worker() const
True if running on a worker thread, false otherwise.
long my_ref_count
Reference count for scheduler.
void assert_task_pool_valid() const
Container::iterator first(Container &c)
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
Base class for user-defined tasks.
void spawn_root_and_wait(task &first, task *&next) __TBB_override
For internal use only.
A fast random number generator.
Work stealing task scheduler.
@ es_task_proxy
Tag for v3 task_proxy.
__TBB_atomic size_t head
Index of the first ready task in the deque.
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
void publish_task_pool()
Used by workers to enter the task pool.
#define __TBB_store_release
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
generic_scheduler(market &, bool)
unsigned char state
A task::state_type, stored as a byte for compactness.
void spawn(task &first, task *&next) __TBB_override
For internal use only.
@ small_local_task
Bitwise-OR of local_task and small_task.
void __TBB_store_relaxed(volatile T &location, V value)
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
market * my_market
The market I am in.
#define GATHER_STATISTIC(x)
task * my_dummy_task
Fake root task created by slave threads.
unsigned short affinity_id
An id as used for specifying affinity.
bool is_quiescent_local_task_pool_reset() const
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
bool can_steal()
Returns true if stealing is allowed.
void attach(mail_outbox &putter)
Attach inbox to a corresponding outbox.
void __TBB_store_with_release(volatile T &location, V value)
unsigned max_threads_in_arena()
Returns the concurrency limit of the current arena.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
@ freed
task object is on free list, or is going to be put there, or was just taken off.
context_list_node_t * my_prev
#define __TBB_ISOLATION_ARG(arg1, isolation)
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
void destroy()
Destroy and deallocate this scheduler object.
void free_task(task &t)
Put task on free list.
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
task * my_free_list
Free list of small tasks that can be reused.
void local_spawn_root_and_wait(task *first, task *&next)
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
void attach_arena(arena *, size_t index, bool is_master)
context_list_node_t * my_next
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
size_t prepare_task_pool(size_t n)
Makes sure that the task pool can accommodate at least n more elements.
void cleanup_scheduler()
Cleans up this scheduler (the scheduler might be destroyed).
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
intptr_t isolation_tag
A tag for task isolation.
task **__TBB_atomic task_pool
void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
state_type state() const
Current execution state.
@ no_cache
Disable caching for a small task.
void const char const char int ITT_FORMAT __itt_group_sync s
#define ITT_TASK_BEGIN(type, name, id)
Class representing source of mail.
void local_spawn(task *first, task *&next)
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
bool cleanup_master(bool blocking_terminate)
Perform necessary cleanup when a master thread stops using TBB.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
void acquire_task_pool() const
Locks the local task pool.
static const size_t quick_task_size
If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.
static const kind_type dying
unsigned char
Reserved bits.
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
unsigned num_workers_active() const
The number of workers active in the arena.
bool type
Indicates that a scheduler acts as a master or a worker.
@ local_task
Task is known to have been allocated by this scheduler.
@ ready
task is in ready pool, or is going to be put there, or was just taken off.
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
intptr_t reference_count
A reference count.
free_task_hint
Optimization hint to free_task that enables it omit unnecessary tests and code.
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
Memory prefix to a task object.
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
A scheduler with a customized evaluation loop.
static bool is_proxy(const task &t)
True if t is a task_proxy.
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
void unlock_task_pool(arena_slot *victim_arena_slot, task **victim_task_pool) const
Unlocks victim's task pool.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
void const char const char int ITT_FORMAT __itt_group_sync p
void deallocate_task(task &t)
Return task object to the memory allocator.
task * prepare_for_spawning(task *t)
Checks if t is affinitized to another thread, and if so, bundles it as proxy.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
virtual task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation))=0
Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption).
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
static const size_t null_arena_index
static generic_scheduler * create_worker(market &m, size_t index, bool geniune)
Initialize a scheduler for a worker thread.
bool outermost
Indicates that a scheduler is on outermost level.
void leave_task_pool()
Leave the task pool.
A lock that occupies a single byte.
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
static const unsigned ref_external
Reference increment values for externals and workers.
task * next_offloaded
Pointer to the next offloaded lower priority task.
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
unsigned my_num_slots
The number of slots in the arena.
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
@ small_task
Task is known to be a small task.
@ allocated
task object is freshly allocated or recycled.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
T __TBB_load_relaxed(const volatile T &location)
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
bool is_local_task_pool_quiescent() const
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
#define __TBB_CONTEXT_ARG(arg1, context)
Copyright © 2005-2020 Intel Corporation. All Rights Reserved.
Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
registered trademarks or trademarks of Intel Corporation or its
subsidiaries in the United States and other countries.
* Other names and brands may be claimed as the property of others.