Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
reader_writer_lock.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
21 #include "tbb/reader_writer_lock.h"
22 #include "tbb/tbb_machine.h"
23 #include "tbb/tbb_exception.h"
24 #include "itt_notify.h"
25 
26 #if defined(_MSC_VER) && defined(_Wp64)
27  // Workaround for overzealous compiler warnings in /Wp64 mode
28  #pragma warning (disable: 4244)
29 #endif
30 
31 namespace tbb {
32 namespace interface5 {
33 
34 const uintptr_t WFLAG1 = 0x1; // writer interested or active
35 const uintptr_t WFLAG2 = 0x2; // writers interested, no entering readers
36 const uintptr_t RFLAG = 0x4; // reader interested but not active
37 const uintptr_t RC_INCR = 0x8; // to adjust reader count
38 
39 
40 // Perform an atomic bitwise-OR on the operand, and return its previous value.
41 inline uintptr_t fetch_and_or(atomic<uintptr_t>& operand, uintptr_t value) {
43  uintptr_t old = operand;
44  uintptr_t result = operand.compare_and_swap(old|value, old);
45  if (result==old) return result;
46  }
47 }
48 
49 // Perform an atomic bitwise-AND on the operand, and return its previous value.
50 inline uintptr_t fetch_and_and(atomic<uintptr_t>& operand, uintptr_t value) {
52  uintptr_t old = operand;
53  uintptr_t result = operand.compare_and_swap(old&value, old);
54  if (result==old) return result;
55  }
56 }
57 
59 
60 template<typename T, typename U>
61 void spin_wait_while_geq( const volatile T& location, U value ) {
63  while( location>=value ) backoff.pause();
64 }
65 
67 
68 template<typename T, typename U>
69 void spin_wait_until_and( const volatile T& location, U value ) {
71  while( !(location & value) ) backoff.pause();
72 }
73 
74 
76  reader_head = NULL;
77  writer_head = NULL;
78  writer_tail = NULL;
81 #if TBB_USE_THREADING_TOOLS
82  ITT_SYNC_CREATE(this, _T("tbb::reader_writer_lock"), _T(""));
83 #endif /* TBB_USE_THREADING_TOOLS */
84 }
85 
87  __TBB_ASSERT(rdr_count_and_flags==0, "reader_writer_lock destroyed with pending readers/writers.");
88  __TBB_ASSERT(reader_head==NULL, "reader_writer_lock destroyed with pending readers.");
89  __TBB_ASSERT(writer_tail==NULL, "reader_writer_lock destroyed with pending writers.");
90  __TBB_ASSERT(writer_head==NULL, "reader_writer_lock destroyed with pending/active writers.");
91 }
92 
93 // Acquires the reader_writer_lock for write. If the lock is currently held in write
94 // mode by another context, the writer will block by spinning on a local variable.
95 // Throws exception improper_lock if the context tries to acquire a
96 // reader_writer_lock that it already has write ownership of.
98  if (is_current_writer()) { // recursive lock attempt
99  // we don't support recursive writer locks; throw exception
101  }
102  else {
103  scoped_lock *a_writer_lock = new scoped_lock();
104  (void) start_write(a_writer_lock);
105  }
106 }
107 
108 // Tries to acquire the reader_writer_lock for write. This function does not block.
109 // Return Value: True or false, depending on whether the lock is acquired or not.
110 // If the lock is already held by this acquiring context, try_lock() returns false.
112  if (is_current_writer()) { // recursive lock attempt
113  return false;
114  }
115  else {
116  scoped_lock *a_writer_lock = new scoped_lock();
117  a_writer_lock->status = waiting_nonblocking;
118  return start_write(a_writer_lock);
119  }
120 }
121 
124  scoped_lock *pred = NULL;
125  if (I->status == waiting_nonblocking) {
126  if ((pred = writer_tail.compare_and_swap(I, NULL)) != NULL) {
127  delete I;
128  return false;
129  }
130  }
131  else {
132  ITT_NOTIFY(sync_prepare, this);
133  pred = writer_tail.fetch_and_store(I);
134  }
135  if (pred)
136  pred->next = I;
137  else {
138  set_next_writer(I);
139  if (I->status == waiting_nonblocking) {
140  if (I->next) { // potentially more writers
141  set_next_writer(I->next);
142  }
143  else { // no more writers
144  writer_head.fetch_and_store(NULL);
145  if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added
146  spin_wait_while_eq(I->next, (scoped_lock *)NULL); // wait for new writer to be added
147  __TBB_ASSERT(I->next, "There should be a node following the last writer.");
148  set_next_writer(I->next);
149  }
150  }
151  delete I;
152  return false;
153  }
154  }
156  ITT_NOTIFY(sync_acquired, this);
158  return true;
159 }
160 
162  writer_head = W;
163  if (W->status == waiting_nonblocking) {
165  W->status = active;
166  }
167  }
168  else {
169  if (fetch_and_or(rdr_count_and_flags, WFLAG1) & RFLAG) { // reader present
170  spin_wait_until_and(rdr_count_and_flags, WFLAG2); // block until readers set WFLAG2
171  }
172  else { // no reader in timing window
174  }
175  spin_wait_while_geq(rdr_count_and_flags, RC_INCR); // block until readers finish
176  W->status = active;
177  }
178 }
179 
180 // Acquires the reader_writer_lock for read. If the lock is currently held by a writer,
181 // this reader will block and wait until the writers are done.
182 // Throws exception improper_lock when the context tries to acquire a reader_writer_lock
183 // that it already has write ownership of.
185  if (is_current_writer()) { // recursive lock attempt
186  // we don't support writer->reader downgrade; throw exception
188  }
189  else {
190  scoped_lock_read a_reader_lock;
191  start_read(&a_reader_lock);
192  }
193 }
194 
195 // Tries to acquire the reader_writer_lock for read. This function does not block.
196 // Return Value: True or false, depending on whether the lock is acquired or not.
198  if (is_current_writer()) { // recursive lock attempt
199  return false;
200  }
201  else {
202  if (rdr_count_and_flags.fetch_and_add(RC_INCR) & (WFLAG1+WFLAG2)) { // writers present
204  return false;
205  }
206  else { // no writers
207  ITT_NOTIFY(sync_acquired, this);
208  return true;
209  }
210  }
211 }
212 
214  ITT_NOTIFY(sync_prepare, this);
215  I->next = reader_head.fetch_and_store(I);
216  if (!I->next) { // first arriving reader in my group; set RFLAG, test writer flags
217  // unblock and/or update statuses of non-blocking readers
218  if (!(fetch_and_or(rdr_count_and_flags, RFLAG) & (WFLAG1+WFLAG2))) { // no writers
219  unblock_readers();
220  }
221  }
222  __TBB_ASSERT(I->status == waiting || I->status == active, "Lock requests should be waiting or active before blocking.");
223  spin_wait_while_eq(I->status, waiting); // block
224  if (I->next) {
225  __TBB_ASSERT(I->next->status == waiting, NULL);
227  I->next->status = active; // wake successor
228  }
229  ITT_NOTIFY(sync_acquired, this);
230 }
231 
233  // clear rdr interest flag, increment rdr count
237  // indicate clear of window
240  }
241  // unblock waiting readers
242  scoped_lock_read *head = reader_head.fetch_and_store(NULL);
243  __TBB_ASSERT(head, NULL);
244  __TBB_ASSERT(head->status == waiting, NULL);
245  head->status = active;
246 }
247 
248 // Releases the reader_writer_lock
251  // A writer owns the lock
252  __TBB_ASSERT(is_current_writer(), "caller of reader_writer_lock::unlock() does not own the lock.");
253  __TBB_ASSERT(writer_head, NULL);
254  __TBB_ASSERT(writer_head->status==active, NULL);
255  scoped_lock *a_writer_lock = writer_head;
256  end_write(a_writer_lock);
257  __TBB_ASSERT(a_writer_lock != writer_head, "Internal error: About to turn writer_head into dangling reference.");
258  delete a_writer_lock;
259  } else {
260  end_read();
261  }
262 }
263 
265  __TBB_ASSERT(I==writer_head, "Internal error: can't unlock a thread that is not holding the lock.");
267  ITT_NOTIFY(sync_releasing, this);
268  if (I->next) { // potentially more writers
269  writer_head = I->next;
270  writer_head->status = active;
271  }
272  else { // No more writers; clear writer flag, test reader interest flag
273  __TBB_ASSERT(writer_head, NULL);
275  unblock_readers();
276  }
277  writer_head.fetch_and_store(NULL);
278  if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added
279  spin_wait_while_eq(I->next, (scoped_lock *)NULL); // wait for new writer to be added
280  __TBB_ASSERT(I->next, "There should be a node following the last writer.");
281  set_next_writer(I->next);
282  }
283  }
284 }
285 
287  ITT_NOTIFY(sync_releasing, this);
288  __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, "unlock() called but no readers hold the lock.");
290 }
291 
294 }
295 
296 // Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock
298  mutex = &lock;
299  next = NULL;
300  status = waiting;
301  if (mutex->is_current_writer()) { // recursive lock attempt
302  // we don't support recursive writer locks; throw exception
304  }
305  else { // this thread holds no locks
306  (void) mutex->start_write(this);
307  }
308 }
309 
311  status = waiting;
312 }
313 
314 // Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock
316  mutex = &lock;
317  next = NULL;
318  status = waiting;
319  if (mutex->is_current_writer()) { // recursive lock attempt
320  // we don't support writer->reader downgrade; throw exception
322  }
323  else { // this thread holds no locks
324  mutex->start_read(this);
325  }
326 }
327 
329  status = waiting;
330 }
331 
333  if (mutex) {
334  __TBB_ASSERT(mutex->is_current_writer(), "~scoped_lock() destroyed by thread different than thread that holds lock.");
335  mutex->end_write(this);
336  }
337  status = invalid;
338 }
339 
341  if (mutex)
342  mutex->end_read();
343  status = invalid;
344 }
345 
346 } // namespace interface5
347 } // namespace tbb
uintptr_t fetch_and_and(atomic< uintptr_t > &operand, uintptr_t value)
const uintptr_t RFLAG
tbb_thread::id my_current_writer
Writer that owns the mutex; tbb_thread::id() otherwise.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
atomic< scoped_lock * > writer_head
The list of pending writers.
scoped_lock()
Construct scoped_lock that is not holding lock.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id head
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
void unblock_readers()
Unblocks pending readers.
#define ITT_SYNC_CREATE(obj, type, name)
Definition: itt_notify.h:123
void start_read(scoped_lock_read *)
Attempts to acquire read lock.
scoped_lock * next
The next queued competitor for the mutex.
void set_next_writer(scoped_lock *w)
Sets writer_head to w and attempts to unblock.
scoped_lock_read * next
The next queued competitor for the mutex.
bool start_write(scoped_lock *)
Attempts to acquire write lock.
bool is_current_writer()
Checks if current thread holds write lock.
void pause()
Pause for a while.
Definition: tbb_machine.h:364
The scoped lock pattern for write locks.
atomic< scoped_lock_read * > reader_head
The list of pending readers.
void end_read()
Relinquishes read lock by decrementing counter; last reader wakes pending writer.
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
atomic< status_t > status
Status flag of the thread associated with this node.
void __TBB_EXPORTED_METHOD internal_construct()
void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock &)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
bool __TBB_EXPORTED_METHOD try_lock()
Tries to acquire the reader_writer_lock for write.
The graph class.
const uintptr_t WFLAG1
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:395
#define _T(string_literal)
Standard Windows style macro to markup the string literals.
Definition: itt_notify.h:66
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock &)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
atomic< status_t > status
Status flag of the thread associated with this node.
Class that implements exponential backoff.
Definition: tbb_machine.h:349
tbb_thread::id get_id()
Definition: tbb_thread.h:321
void spin_wait_while_geq(const volatile T &location, U value)
Spin WHILE the value at the location is greater than or equal to a given value.
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
void __TBB_EXPORTED_METHOD internal_destroy()
void __TBB_EXPORTED_METHOD lock_read()
Acquires the reader_writer_lock for read.
bool __TBB_EXPORTED_METHOD try_lock_read()
Tries to acquire the reader_writer_lock for read.
scoped_lock_read()
Construct scoped_lock_read that is not holding lock.
value_type compare_and_swap(value_type value, value_type comparand)
Definition: atomic.h:289
atomic< scoped_lock * > writer_tail
The last node in the list of pending writers.
uintptr_t fetch_and_or(atomic< uintptr_t > &operand, uintptr_t value)
void __TBB_EXPORTED_METHOD lock()
Acquires the reader_writer_lock for write.
Wrapper around the platform's native lock.
Definition: mutex.h:39
Writer-preference reader-writer lock with local-only spinning on readers.
const uintptr_t WFLAG2
void spin_wait_until_and(const volatile T &location, U value)
Spin UNTIL (location & value) is true.
void end_write(scoped_lock *)
Relinquishes write lock to next waiting writer or group of readers.
void __TBB_EXPORTED_METHOD unlock()
Releases the reader_writer_lock.
void __TBB_AtomicOR(volatile void *operand, uintptr_t addend)
Definition: tbb_machine.h:882
atomic< uintptr_t > rdr_count_and_flags
Status of mutex.
const uintptr_t RC_INCR

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.