Intel(R) Threading Building Blocks Doxygen Documentation
version 4.2.3
|
Go to the documentation of this file.
27 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD
32 #include "../rml/include/rml_tbb.h"
39 #if __TBB_PREVIEW_RESUMABLE_TASKS
45 class task_group_context;
46 class allocate_root_with_context_proxy;
50 #if __TBB_NUMA_SUPPORT
51 class numa_binding_observer;
54 #if __TBB_PREVIEW_RESUMABLE_TASKS
55 class arena_co_cache {
58 generic_scheduler** my_co_scheduler_cache;
62 unsigned my_max_index;
66 unsigned next_index() {
67 return ( my_head == my_max_index ) ? 0 : my_head + 1;
70 unsigned prev_index() {
71 return ( my_head == 0 ) ? my_max_index : my_head - 1;
74 bool internal_empty() {
75 return my_co_scheduler_cache[prev_index()] == NULL;
78 void internal_scheduler_cleanup(generic_scheduler* to_cleanup) {
79 to_cleanup->my_arena_slot = NULL;
86 void init(
unsigned cache_capacity) {
87 size_t alloc_size = cache_capacity *
sizeof(generic_scheduler*);
88 my_co_scheduler_cache = (generic_scheduler**)
NFS_Allocate(1, alloc_size, NULL);
89 memset( my_co_scheduler_cache, 0, alloc_size );
91 my_max_index = cache_capacity - 1;
96 while (generic_scheduler* to_cleanup = pop()) {
97 internal_scheduler_cleanup(to_cleanup);
105 void push(generic_scheduler*
s) {
106 generic_scheduler* to_cleanup = NULL;
110 if (my_co_scheduler_cache[my_head] != NULL) {
111 to_cleanup = my_co_scheduler_cache[my_head];
114 my_co_scheduler_cache[my_head] =
s;
116 my_head = next_index();
121 internal_scheduler_cleanup(to_cleanup);
127 generic_scheduler* pop() {
130 if (internal_empty())
return NULL;
132 my_head = prev_index();
134 generic_scheduler* to_return = my_co_scheduler_cache[my_head];
136 my_co_scheduler_cache[my_head] = NULL;
140 #endif // __TBB_PREVIEW_RESUMABLE_TASKS
155 #if __TBB_TASK_PRIORITY
156 volatile intptr_t my_top_priority;
169 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD
175 #if __TBB_PREVIEW_CRITICAL_TASKS
197 #if __TBB_ARENA_OBSERVER
198 observer_list my_observers;
202 #if __TBB_NUMA_SUPPORT
203 numa_binding_observer* my_numa_binding_observer;
207 #if __TBB_TASK_PRIORITY
208 intptr_t my_bottom_priority;
214 uintptr_t my_reload_epoch;
217 task* my_orphaned_tasks;
220 tbb::atomic<uintptr_t> my_abandonment_epoch;
226 tbb::atomic<intptr_t> my_skipped_fifo_priority;
237 #if !__TBB_FP_CONTEXT
242 #if __TBB_TASK_GROUP_CONTEXT
255 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
257 bool my_local_concurrency_mode;
259 bool my_global_concurrency_mode;
265 #if __TBB_PREVIEW_RESUMABLE_TASKS
266 arena_co_cache my_co_cache;
291 arena (
market&,
unsigned max_num_workers,
unsigned num_reserved_slots );
297 return max(2u, num_slots);
306 __TBB_ASSERT( 0<
id,
"affinity id must be positive integer" );
357 template<
unsigned ref_param>
361 void dump_arena_statistics ();
365 #if __TBB_TASK_PRIORITY
368 inline bool may_have_tasks (
generic_scheduler*,
bool& tasks_present,
bool& dequeuing_possible );
374 #if __TBB_COUNT_TASK_NODES
375 intptr_t workers_task_node_count();
384 template <
bool as_worker>
393 template<
unsigned ref_param>
452 #if __TBB_STATISTICS_EARLY_DUMP
458 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
486 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
488 my_market->enable_mandatory_concurrency(
this);
492 my_local_concurrency_mode =
true;
503 else if( work_type ==
wakeup ) {
530 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
532 if( my_local_concurrency_mode ) {
540 my_local_concurrency_mode =
false;
543 if (
as_atomic(my_global_concurrency_mode) ==
true )
544 my_market->mandatory_concurrency_disable(
this );
uintptr_t my_aba_epoch
ABA prevention marker.
market * my_market
The market that owns this arena.
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
bool has_enqueued_tasks()
Check for the presence of enqueued tasks at all priority levels.
bool is_out_of_work()
Check if there is job anywhere in arena.
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
static const size_t out_of_arena
static const unsigned ref_worker
Used to form groups of tasks.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
task_stream< num_priority_levels > my_task_stream
Task pool for the tasks scheduled via task::enqueue() method.
atomic< unsigned > my_references
Reference counter for the arena.
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
Base class for user-defined tasks.
A fast random number generator.
Work stealing task scheduler.
static const pool_state_t SNAPSHOT_FULL
At least one task has been offered for stealing since the last snapshot started.
Class representing where mail is put.
size_t occupy_free_slot(generic_scheduler &s)
Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available,...
Represents acquisition of a mutex.
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
cpu_ctl_env my_cpu_ctl_env
FPU control settings of arena's master thread captured at the moment of arena instantiation.
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
unsigned my_num_workers_soft_limit
Current application-imposed limit on the number of workers (see set_active_num_workers())
#define GATHER_STATISTIC(x)
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
unsigned short affinity_id
An id as used for specifying affinity.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
arena(market &, unsigned max_num_workers, unsigned num_reserved_slots)
Constructor.
new_work_type
Types of work advertised by advertise_new_work()
static const unsigned ref_external_bits
The number of least significant bits for external references.
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
void on_thread_leaving()
Notification that worker or master leaves its arena.
static generic_scheduler * local_scheduler_if_initialized()
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
void const char const char int ITT_FORMAT __itt_group_sync s
size_t occupy_free_slot_in_range(generic_scheduler &s, size_t lower, size_t upper)
Tries to occupy a slot in the specified range.
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
bool is_recall_requested() const
Check if the recall is requested by the market.
void free_arena()
Completes arena shutdown, destructs and deallocates it.
argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor)
A function to compute arg modulo divisor where divisor is a power of 2.
Pads type T to fill out to a multiple of cache line size.
static int unsigned num_arena_slots(unsigned num_slots)
static int allocation_size(unsigned num_slots)
The structure of an arena, except the array of slots.
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
static const intptr_t num_priority_levels
The container for "fairness-oriented" aka "enqueued" tasks.
unsigned num_workers_active() const
The number of workers active in the arena.
atomic< T > & as_atomic(T &t)
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
void adjust_demand(arena &, int delta)
Request that arena's need in workers should be adjusted.
static bool is_busy_or_empty(pool_state_t s)
No tasks to steal or snapshot is being taken.
void atomic_fence()
Sequentially consistent full memory fence.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
void restore_priority_if_need()
If enqueued tasks found, restore arena priority and task presence status.
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
A lock that occupies a single byte.
static const unsigned ref_external
Reference increment values for externals and workers.
atomic< unsigned > my_limit
The maximal number of currently busy slots.
void try_destroy_arena(arena *, uintptr_t aba_epoch)
Removes the arena from the market's list.
padded< arena_base > base_type
unsigned my_num_slots
The number of slots in the arena.
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
Copyright © 2005-2020 Intel Corporation. All Rights Reserved.
Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
registered trademarks or trademarks of Intel Corporation or its
subsidiaries in the United States and other countries.
* Other names and brands may be claimed as the property of others.