23 #if __TBB_TASK_GROUP_CONTEXT
31 inline char* duplicate_string (
const char* src ) {
34 size_t len = strlen(src) + 1;
36 strncpy (dst, src, len);
46 my_exception_name = duplicate_string( a_name );
47 my_exception_info = duplicate_string( info );
58 ::new (e) captured_exception();
59 e->my_exception_name = my_exception_name;
60 e->my_exception_info = my_exception_info;
62 my_exception_name = my_exception_info = NULL;
68 __TBB_ASSERT ( my_dynamic,
"Method destroy can be used only on objects created by clone or allocate" );
78 ::new (e) captured_exception(a_name, info);
85 return my_exception_name;
89 return my_exception_info;
97 #if !TBB_USE_CAPTURED_EXCEPTION
102 tbb_exception_ptr* AllocateExceptionContainer(
const T& src ) {
105 new (eptr) tbb_exception_ptr(src);
110 return AllocateExceptionContainer( std::current_exception() );
114 return AllocateExceptionContainer( std::current_exception() );
118 tbb_exception_ptr *res = AllocateExceptionContainer( src );
124 this->tbb_exception_ptr::~tbb_exception_ptr();
140 uintptr_t local_count_snapshot = my_owner->my_context_state_propagation_epoch;
141 my_owner->my_local_ctx_list_update.store<
relaxed>(1);
145 if ( my_owner->my_nonlocal_ctx_list_update.load<
relaxed>() ) {
147 my_node.my_prev->my_next = my_node.my_next;
148 my_node.my_next->my_prev = my_node.my_prev;
149 my_owner->my_local_ctx_list_update.store<
relaxed>(0);
152 my_node.my_prev->my_next = my_node.my_next;
153 my_node.my_next->my_prev = my_node.my_prev;
157 my_owner->my_local_ctx_list_update.store<
release>(0);
158 if ( local_count_snapshot != the_context_state_propagation_epoch ) {
172 my_node.my_prev->my_next = my_node.my_next;
173 my_node.my_next->my_prev = my_node.my_prev;
177 my_owner->my_nonlocal_ctx_list_update.fetch_and_increment<
full_fence>();
180 my_owner->my_context_list_mutex.lock();
181 my_node.my_prev->my_next = my_node.my_next;
182 my_node.my_next->my_prev = my_node.my_prev;
183 my_owner->my_context_list_mutex.unlock();
185 my_owner->my_nonlocal_ctx_list_update.fetch_and_decrement<
full_fence>();
190 internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env)->~cpu_ctl_env();
194 my_exception->destroy();
201 if( ( my_version_and_traits & version_mask ) < 3 )
202 my_name = internal::CUSTOM_CTX;
205 __TBB_STATIC_ASSERT (
sizeof(my_version_and_traits) >= 4,
"Layout of my_version_and_traits must be reconsidered on this platform" );
207 __TBB_ASSERT ( (uintptr_t(
this) & (
sizeof(my_cancellation_requested) - 1)) == 0,
"Context is improperly aligned" );
210 my_node.my_next = NULL;
211 my_node.my_prev = NULL;
212 my_cancellation_requested = 0;
217 #if __TBB_TASK_PRIORITY
218 my_priority = normalized_normal_priority;
221 __TBB_STATIC_ASSERT(
sizeof(my_cpu_ctl_env) ==
sizeof(internal::uint64_t),
"The reserved space for FPU settings are not equal sizeof(uint64_t)" );
222 __TBB_STATIC_ASSERT(
sizeof(cpu_ctl_env) <=
sizeof(my_cpu_ctl_env),
"FPU settings storage does not fit to uint64_t" );
225 cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
226 new ( &ctl ) cpu_ctl_env;
227 if ( my_version_and_traits & fp_settings )
234 my_owner = local_sched;
236 my_node.my_prev = &local_sched->my_context_list_head;
239 local_sched->my_local_ctx_list_update.store<
relaxed>(1);
245 if ( local_sched->my_nonlocal_ctx_list_update.load<
relaxed>() ) {
247 local_sched->my_context_list_head.my_next->my_prev = &my_node;
248 my_node.my_next = local_sched->my_context_list_head.my_next;
249 my_owner->my_local_ctx_list_update.store<
relaxed>(0);
250 local_sched->my_context_list_head.my_next = &my_node;
253 local_sched->my_context_list_head.my_next->my_prev = &my_node;
254 my_node.my_next = local_sched->my_context_list_head.my_next;
255 my_owner->my_local_ctx_list_update.store<
release>(0);
266 __TBB_ASSERT ( !my_parent,
"Parent is set before initial binding" );
267 my_parent = local_sched->my_innermost_running_task->prefix().context;
270 if ( !(my_version_and_traits & fp_settings) )
271 copy_fp_settings(*my_parent);
275 if ( !(my_parent->my_state & may_have_children) )
276 my_parent->my_state |= may_have_children;
277 if ( my_parent->my_parent ) {
289 uintptr_t local_count_snapshot =
__TBB_load_with_acquire( my_parent->my_owner->my_context_state_propagation_epoch );
292 my_cancellation_requested = my_parent->my_cancellation_requested;
293 #if __TBB_TASK_PRIORITY
294 my_priority = my_parent->my_priority;
296 register_with( local_sched );
302 if ( local_count_snapshot != the_context_state_propagation_epoch ) {
304 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
305 my_cancellation_requested = my_parent->my_cancellation_requested;
306 #if __TBB_TASK_PRIORITY
307 my_priority = my_parent->my_priority;
312 register_with( local_sched );
316 my_cancellation_requested = my_parent->my_cancellation_requested;
317 #if __TBB_TASK_PRIORITY
318 my_priority = my_parent->my_priority;
324 template <
typename T>
326 if (this->*mptr_state == new_state) {
334 else if (
this == &src) {
341 for ( task_group_context *ancestor = my_parent; ancestor != NULL; ancestor = ancestor->my_parent ) {
342 __TBB_ASSERT(internal::is_alive(ancestor->my_version_and_traits),
"context tree was corrupted");
343 if ( ancestor == &src ) {
344 for ( task_group_context *ctx =
this; ctx != ancestor; ctx = ctx->my_parent )
345 ctx->*mptr_state = new_state;
352 template <
typename T>
353 void generic_scheduler::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
359 while ( node != &my_context_list_head ) {
361 if ( ctx.*mptr_state != new_state )
362 ctx.propagate_task_group_state( mptr_state, src, new_state );
363 node = node->my_next;
364 __TBB_ASSERT( is_alive(ctx.my_version_and_traits),
"Local context list contains destroyed object" );
371 template <
typename T>
372 bool market::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
378 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
379 if ( src.*mptr_state != new_state )
386 for (
unsigned i = 0; i < num_workers; ++i ) {
390 s->propagate_task_group_state( mptr_state, src, new_state );
394 for( scheduler_list_type::iterator it = my_masters.begin(); it != my_masters.end(); it++ )
395 it->propagate_task_group_state( mptr_state, src, new_state );
400 __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1,
"Invalid cancellation state");
401 if ( my_cancellation_requested ||
as_atomic(my_cancellation_requested).compare_and_swap(1, 0) ) {
412 return my_cancellation_requested != 0;
420 if ( my_exception ) {
421 my_exception->destroy();
424 my_cancellation_requested = 0;
433 cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
434 if ( !(my_version_and_traits & fp_settings) ) {
435 new ( &ctl ) cpu_ctl_env;
436 my_version_and_traits |= fp_settings;
442 __TBB_ASSERT( !(my_version_and_traits & fp_settings),
"The context already has FPU settings." );
443 __TBB_ASSERT( src.my_version_and_traits & fp_settings,
"The source context does not have FPU settings." );
445 cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
446 cpu_ctl_env &src_ctl = *internal::punned_cast<cpu_ctl_env*>(&src.my_cpu_ctl_env);
447 new (&ctl) cpu_ctl_env( src_ctl );
448 my_version_and_traits |= fp_settings;
453 if ( my_cancellation_requested )
455 #if TBB_USE_EXCEPTIONS
458 } TbbCatchAll(
this );
462 #if __TBB_TASK_PRIORITY
465 intptr_t
p = normalize_priority(prio);
483 s->my_market->update_arena_priority( *
s->my_arena,
p );
487 return static_cast<priority_t>(priority_from_normalized_rep[my_priority]);