Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
queuing_mutex.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #include "tbb/queuing_mutex.h"
18 #include "tbb/tbb_machine.h"
19 #include "tbb/tbb_stddef.h"
20 #include "tbb_misc.h"
21 #include "itt_notify.h"
22 
23 namespace tbb {
24 
25 using namespace internal;
26 
29 {
30  __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex");
31 
32  // Must set all fields before the fetch_and_store, because once the
33  // fetch_and_store executes, *this becomes accessible to other threads.
34  mutex = &m;
35  next = NULL;
36  going = 0;
37 
38  // The fetch_and_store must have release semantics, because we are
39  // "sending" the fields initialized above to other processors.
40  scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this);
41  if( pred ) {
42  ITT_NOTIFY(sync_prepare, mutex);
43 #if TBB_USE_ASSERT
44  __TBB_control_consistency_helper(); // on "m.q_tail"
45  __TBB_ASSERT( !pred->next, "the predecessor has another successor!");
46 #endif
47  pred->next = this;
48  spin_wait_while_eq( going, 0ul );
49  }
50  ITT_NOTIFY(sync_acquired, mutex);
51 
52  // Force acquire so that user's critical section receives correct values
53  // from processor that was previously in the user's critical section.
55 }
56 
59 {
60  __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex");
61 
62  // Must set all fields before the fetch_and_store, because once the
63  // fetch_and_store executes, *this becomes accessible to other threads.
64  next = NULL;
65  going = 0;
66 
67  // The CAS must have release semantics, because we are
68  // "sending" the fields initialized above to other processors.
69  if( m.q_tail.compare_and_swap<tbb::release>(this, NULL) )
70  return false;
71 
72  // Force acquire so that user's critical section receives correct values
73  // from processor that was previously in the user's critical section.
75  mutex = &m;
76  ITT_NOTIFY(sync_acquired, mutex);
77  return true;
78 }
79 
82 {
83  __TBB_ASSERT(this->mutex!=NULL, "no lock acquired");
84 
85  ITT_NOTIFY(sync_releasing, mutex);
86  if( !next ) {
87  if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
88  // this was the only item in the queue, and the queue is now empty.
89  goto done;
90  }
91  // Someone in the queue
92  spin_wait_while_eq( next, (scoped_lock*)0 );
93  }
94  __TBB_ASSERT(next,NULL);
95  __TBB_store_with_release(next->going, 1);
96 done:
97  initialize();
98 }
99 
101  ITT_SYNC_CREATE(this, _T("tbb::queuing_mutex"), _T(""));
102 }
103 
104 } // namespace tbb
tbb::queuing_mutex::scoped_lock::next
scoped_lock * next
The pointer to the next competitor for a mutex.
Definition: queuing_mutex.h:84
tbb::queuing_mutex
Queuing mutex with local-only spinning.
Definition: queuing_mutex.h:31
tbb::internal::__TBB_load_with_acquire
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:709
tbb::queuing_mutex::scoped_lock::acquire
void __TBB_EXPORTED_METHOD acquire(queuing_mutex &m)
Acquire lock on given mutex.
Definition: queuing_mutex.cpp:28
internal
Definition: _flow_graph_async_msg_impl.h:24
__TBB_ASSERT
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
ITT_NOTIFY
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:112
tbb::queuing_mutex::scoped_lock::release
void __TBB_EXPORTED_METHOD release()
Release lock.
Definition: queuing_mutex.cpp:81
tbb
The graph class.
Definition: serial/tbb/parallel_for.h:46
tbb::internal::spin_wait_while_eq
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:391
itt_notify.h
queuing_mutex.h
tbb::queuing_mutex::q_tail
atomic< scoped_lock * > q_tail
The last competitor requesting the lock.
Definition: queuing_mutex.h:102
tbb::queuing_mutex::internal_construct
void __TBB_EXPORTED_METHOD internal_construct()
Definition: queuing_mutex.cpp:100
ITT_SYNC_CREATE
#define ITT_SYNC_CREATE(obj, type, name)
Definition: itt_notify.h:115
tbb::internal::__TBB_store_with_release
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:713
__TBB_control_consistency_helper
#define __TBB_control_consistency_helper()
Definition: gcc_generic.h:60
_T
#define _T(string_literal)
Standard Windows style macro to markup the string literals.
Definition: itt_notify.h:59
tbb_misc.h
tbb_machine.h
tbb::queuing_mutex::scoped_lock
The scoped locking pattern.
Definition: queuing_mutex.h:44
sync_releasing
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
Definition: ittnotify_static.h:104
tbb_stddef.h
tbb::queuing_mutex::scoped_lock::try_acquire
bool __TBB_EXPORTED_METHOD try_acquire(queuing_mutex &m)
Acquire lock on given mutex if free (i.e. non-blocking)
Definition: queuing_mutex.cpp:58
tbb::release
@ release
Release.
Definition: atomic.h:59

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.