libstdc++
shared_mutex
Go to the documentation of this file.
1// <shared_mutex> -*- C++ -*-
2
3// Copyright (C) 2013-2025 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/shared_mutex
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_SHARED_MUTEX
30#define _GLIBCXX_SHARED_MUTEX 1
31
32#ifdef _GLIBCXX_SYSHDR
33#pragma GCC system_header
34#endif
35
36#include <bits/requires_hosted.h> // concurrency
37
38#if __cplusplus >= 201402L
39
40#include <bits/chrono.h>
42#include <bits/functexcept.h>
43#include <bits/move.h> // move, __exchange
44#include <bits/std_mutex.h> // defer_lock_t
45
46#define __glibcxx_want_shared_mutex
47#define __glibcxx_want_shared_timed_mutex
48#include <bits/version.h>
49
50#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
51# include <condition_variable>
52#endif
53
54namespace std _GLIBCXX_VISIBILITY(default)
55{
56_GLIBCXX_BEGIN_NAMESPACE_VERSION
57
58 /**
59 * @addtogroup mutexes
60 * @{
61 */
62
63#ifdef _GLIBCXX_HAS_GTHREADS
64
65#ifdef __cpp_lib_shared_mutex // C++ >= 17 && hosted && gthread
66 class shared_mutex;
67#endif
68
70
71 /// @cond undocumented
72
73#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
74#ifdef __gthrw
75#define _GLIBCXX_GTHRW(name) \
76 __gthrw(pthread_ ## name); \
77 inline int \
78 __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
79 { \
80 if (__gthread_active_p ()) \
81 return __gthrw_(pthread_ ## name) (__rwlock); \
82 else \
83 return 0; \
84 }
85 _GLIBCXX_GTHRW(rwlock_rdlock)
86 _GLIBCXX_GTHRW(rwlock_tryrdlock)
87 _GLIBCXX_GTHRW(rwlock_wrlock)
88 _GLIBCXX_GTHRW(rwlock_trywrlock)
89 _GLIBCXX_GTHRW(rwlock_unlock)
90# ifndef PTHREAD_RWLOCK_INITIALIZER
91 _GLIBCXX_GTHRW(rwlock_destroy)
92 __gthrw(pthread_rwlock_init);
93 inline int
94 __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
95 {
96 if (__gthread_active_p ())
97 return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
98 else
99 return 0;
100 }
101# endif
102# if _GTHREAD_USE_MUTEX_TIMEDLOCK
103 __gthrw(pthread_rwlock_timedrdlock);
104 inline int
105 __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
106 const timespec *__ts)
107 {
108 if (__gthread_active_p ())
109 return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
110 else
111 return 0;
112 }
113 __gthrw(pthread_rwlock_timedwrlock);
114 inline int
115 __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
116 const timespec *__ts)
117 {
118 if (__gthread_active_p ())
119 return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
120 else
121 return 0;
122 }
123# endif
124#else
125 inline int
126 __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
127 { return pthread_rwlock_rdlock (__rwlock); }
128 inline int
129 __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
130 { return pthread_rwlock_tryrdlock (__rwlock); }
131 inline int
132 __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
133 { return pthread_rwlock_wrlock (__rwlock); }
134 inline int
135 __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
136 { return pthread_rwlock_trywrlock (__rwlock); }
137 inline int
138 __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
139 { return pthread_rwlock_unlock (__rwlock); }
140 inline int
141 __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
142 { return pthread_rwlock_destroy (__rwlock); }
143 inline int
144 __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
145 { return pthread_rwlock_init (__rwlock, NULL); }
146# if _GTHREAD_USE_MUTEX_TIMEDLOCK
147 inline int
148 __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
149 const timespec *__ts)
150 { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
151 inline int
152 __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
153 const timespec *__ts)
154 { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
155# endif
156#endif
157
158 /// A shared mutex type implemented using pthread_rwlock_t.
159 class __shared_mutex_pthread
160 {
161 friend class shared_timed_mutex;
162
163#ifdef PTHREAD_RWLOCK_INITIALIZER
164 pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
165
166 public:
167 __shared_mutex_pthread() = default;
168 ~__shared_mutex_pthread() = default;
169#else
170 pthread_rwlock_t _M_rwlock;
171
172 public:
173 __shared_mutex_pthread()
174 {
175 int __ret = __glibcxx_rwlock_init(&_M_rwlock);
176 if (__ret == ENOMEM)
177 __throw_bad_alloc();
178 else if (__ret == EAGAIN)
179 __throw_system_error(int(errc::resource_unavailable_try_again));
180 else if (__ret == EPERM)
181 __throw_system_error(int(errc::operation_not_permitted));
182 // Errors not handled: EBUSY, EINVAL
183 __glibcxx_assert(__ret == 0);
184 }
185
186 ~__shared_mutex_pthread()
187 {
188 int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
189 // Errors not handled: EBUSY, EINVAL
190 __glibcxx_assert(__ret == 0);
191 }
192#endif
193
194 __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
195 __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
196
197 void
198 lock()
199 {
200 int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
201 if (__ret == EDEADLK)
202 __throw_system_error(int(errc::resource_deadlock_would_occur));
203 // Errors not handled: EINVAL
204 __glibcxx_assert(__ret == 0);
205 }
206
207 bool
208 try_lock()
209 {
210 int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
211 if (__ret == 0)
212 return true;
213 if (__ret == EBUSY)
214 return false;
215 // Errors not handled: EINVAL, EDEADLK
216 __glibcxx_assert(__ret == 0);
217 // try_lock() is not permitted to throw
218 return false;
219 }
220
221 void
222 unlock()
223 {
224 int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
225 // Errors not handled: EPERM, EBUSY, EINVAL
226 __glibcxx_assert(__ret == 0);
227 }
228
229 // Shared ownership
230
231 void
232 lock_shared()
233 {
234 int __ret;
235 // We retry if we exceeded the maximum number of read locks supported by
236 // the POSIX implementation; this can result in busy-waiting, but this
237 // is okay based on the current specification of forward progress
238 // guarantees by the standard.
239 do
240 __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
241 while (__ret == EAGAIN);
242 if (__ret == EDEADLK)
243 __throw_system_error(int(errc::resource_deadlock_would_occur));
244 // Errors not handled: EINVAL
245 __glibcxx_assert(__ret == 0);
246 }
247
248 bool
249 try_lock_shared()
250 {
251 int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
252 // If the maximum number of read locks has been exceeded, we just fail
253 // to acquire the lock. Unlike for lock(), we are not allowed to throw
254 // an exception.
255 if (__ret == EBUSY || __ret == EAGAIN) return false;
256 // Errors not handled: EINVAL
257 __glibcxx_assert(__ret == 0);
258 return true;
259 }
260
261 void
262 unlock_shared()
263 {
264 unlock();
265 }
266
267 void* native_handle() { return &_M_rwlock; }
268 };
269#endif
270
271#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
272 /// A shared mutex type implemented using std::condition_variable.
273 class __shared_mutex_cv
274 {
275 friend class shared_timed_mutex;
276
277 // Based on Howard Hinnant's reference implementation from N2406.
278
279 // The high bit of _M_state is the write-entered flag which is set to
280 // indicate a writer has taken the lock or is queuing to take the lock.
281 // The remaining bits are the count of reader locks.
282 //
283 // To take a reader lock, block on gate1 while the write-entered flag is
284 // set or the maximum number of reader locks is held, then increment the
285 // reader lock count.
286 // To release, decrement the count, then if the write-entered flag is set
287 // and the count is zero then signal gate2 to wake a queued writer,
288 // otherwise if the maximum number of reader locks was held signal gate1
289 // to wake a reader.
290 //
291 // To take a writer lock, block on gate1 while the write-entered flag is
292 // set, then set the write-entered flag to start queueing, then block on
293 // gate2 while the number of reader locks is non-zero.
294 // To release, unset the write-entered flag and signal gate1 to wake all
295 // blocked readers and writers.
296 //
297 // This means that when no reader locks are held readers and writers get
298 // equal priority. When one or more reader locks is held a writer gets
299 // priority and no more reader locks can be taken while the writer is
300 // queued.
301
302 // Only locked when accessing _M_state or waiting on condition variables.
303 mutex _M_mut;
304 // Used to block while write-entered is set or reader count at maximum.
305 condition_variable _M_gate1;
306 // Used to block queued writers while reader count is non-zero.
307 condition_variable _M_gate2;
308 // The write-entered flag and reader count.
309 unsigned _M_state;
310
311 static constexpr unsigned _S_write_entered
312 = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
313 static constexpr unsigned _S_max_readers = ~_S_write_entered;
314
315 // Test whether the write-entered flag is set. _M_mut must be locked.
316 bool _M_write_entered() const { return _M_state & _S_write_entered; }
317
318 // The number of reader locks currently held. _M_mut must be locked.
319 unsigned _M_readers() const { return _M_state & _S_max_readers; }
320
321 public:
322 __shared_mutex_cv() : _M_state(0) {}
323
324 ~__shared_mutex_cv()
325 {
326 __glibcxx_assert( _M_state == 0 );
327 }
328
329 __shared_mutex_cv(const __shared_mutex_cv&) = delete;
330 __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
331
332 // Exclusive ownership
333
334 void
335 lock()
336 {
337 unique_lock<mutex> __lk(_M_mut);
338 // Wait until we can set the write-entered flag.
339 _M_gate1.wait(__lk, [this]{ return !_M_write_entered(); });
340 _M_state |= _S_write_entered;
341 // Then wait until there are no more readers.
342 _M_gate2.wait(__lk, [this]{ return _M_readers() == 0; });
343 }
344
345 bool
346 try_lock()
347 {
348 unique_lock<mutex> __lk(_M_mut, try_to_lock);
349 if (__lk.owns_lock() && _M_state == 0)
350 {
351 _M_state = _S_write_entered;
352 return true;
353 }
354 return false;
355 }
356
357 void
358 unlock()
359 {
360 lock_guard<mutex> __lk(_M_mut);
361 __glibcxx_assert( _M_write_entered() );
362 _M_state = 0;
363 // call notify_all() while mutex is held so that another thread can't
364 // lock and unlock the mutex then destroy *this before we make the call.
365 _M_gate1.notify_all();
366 }
367
368 // Shared ownership
369
370 void
371 lock_shared()
372 {
373 unique_lock<mutex> __lk(_M_mut);
374 _M_gate1.wait(__lk, [this]{ return _M_state < _S_max_readers; });
375 ++_M_state;
376 }
377
378 bool
379 try_lock_shared()
380 {
381 unique_lock<mutex> __lk(_M_mut, try_to_lock);
382 if (!__lk.owns_lock())
383 return false;
384 if (_M_state < _S_max_readers)
385 {
386 ++_M_state;
387 return true;
388 }
389 return false;
390 }
391
392 void
393 unlock_shared()
394 {
395 lock_guard<mutex> __lk(_M_mut);
396 __glibcxx_assert( _M_readers() > 0 );
397 auto __prev = _M_state--;
398 if (_M_write_entered())
399 {
400 // Wake the queued writer if there are no more readers.
401 if (_M_readers() == 0)
402 _M_gate2.notify_one();
403 // No need to notify gate1 because we give priority to the queued
404 // writer, and that writer will eventually notify gate1 after it
405 // clears the write-entered flag.
406 }
407 else
408 {
409 // Wake any thread that was blocked on reader overflow.
410 if (__prev == _S_max_readers)
411 _M_gate1.notify_one();
412 }
413 }
414 };
415#endif
416 /// @endcond
417
418#ifdef __cpp_lib_shared_mutex
419 /// The standard shared mutex type.
420 class shared_mutex
421 {
422 public:
423 shared_mutex() = default;
424 ~shared_mutex() = default;
425
426 shared_mutex(const shared_mutex&) = delete;
427 shared_mutex& operator=(const shared_mutex&) = delete;
428
429 // Exclusive ownership
430
431 void lock() { _M_impl.lock(); }
432 [[nodiscard]] bool try_lock() { return _M_impl.try_lock(); }
433 void unlock() { _M_impl.unlock(); }
434
435 // Shared ownership
436
437 void lock_shared() { _M_impl.lock_shared(); }
438 [[nodiscard]] bool try_lock_shared() { return _M_impl.try_lock_shared(); }
439 void unlock_shared() { _M_impl.unlock_shared(); }
440
441#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
442 typedef void* native_handle_type;
443 native_handle_type native_handle() { return _M_impl.native_handle(); }
444
445 private:
446 __shared_mutex_pthread _M_impl;
447#else
448 private:
449 __shared_mutex_cv _M_impl;
450#endif
451 };
452#endif // __cpp_lib_shared_mutex
453
454 /// @cond undocumented
455#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
456 using __shared_timed_mutex_base = __shared_mutex_pthread;
457#else
458 using __shared_timed_mutex_base = __shared_mutex_cv;
459#endif
460 /// @endcond
461
462 /// The standard shared timed mutex type.
463 class shared_timed_mutex
464 : private __shared_timed_mutex_base
465 {
466 using _Base = __shared_timed_mutex_base;
467
468 // Must use the same clock as condition_variable for __shared_mutex_cv.
469#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
470 using __clock_t = chrono::steady_clock;
471#else
472 using __clock_t = chrono::system_clock;
473#endif
474
475 public:
476 shared_timed_mutex() = default;
477 ~shared_timed_mutex() = default;
478
479 shared_timed_mutex(const shared_timed_mutex&) = delete;
480 shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
481
482 // Exclusive ownership
483
484 void lock() { _Base::lock(); }
485 _GLIBCXX_NODISCARD bool try_lock() { return _Base::try_lock(); }
486 void unlock() { _Base::unlock(); }
487
488 template<typename _Rep, typename _Period>
489 _GLIBCXX_NODISCARD
490 bool
491 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
492 {
495 ++__rt;
496 return try_lock_until(__clock_t::now() + __rt);
497 }
498
499 // Shared ownership
500
501 void lock_shared() { _Base::lock_shared(); }
502 _GLIBCXX_NODISCARD
503 bool try_lock_shared() { return _Base::try_lock_shared(); }
504 void unlock_shared() { _Base::unlock_shared(); }
505
506 template<typename _Rep, typename _Period>
507 _GLIBCXX_NODISCARD
508 bool
509 try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rtime)
510 {
513 ++__rt;
514 return try_lock_shared_until(__clock_t::now() + __rt);
515 }
516
517#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
518
519 // Exclusive ownership
520
521 template<typename _Duration>
522 _GLIBCXX_NODISCARD
523 bool
524 try_lock_until(const chrono::time_point<chrono::system_clock,
525 _Duration>& __atime)
526 {
527 struct timespec __ts = chrono::__to_timeout_timespec(__atime);
528 int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
529 if (__ret == 0)
530 return true;
531 if (__ret == ETIMEDOUT)
532 return false;
533 // Errors not handled: EINVAL, EDEADLK
534 __glibcxx_assert(__ret == 0);
535 return false;
536 }
537
538#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
539 template<typename _Duration>
540 _GLIBCXX_NODISCARD
541 bool
542 try_lock_until(const chrono::time_point<chrono::steady_clock,
543 _Duration>& __atime)
544 {
545 struct timespec __ts = chrono::__to_timeout_timespec(__atime);
546 int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
547 &__ts);
548 if (__ret == 0)
549 return true;
550 if (__ret == ETIMEDOUT)
551 return false;
552 // Errors not handled: EINVAL, EDEADLK
553 __glibcxx_assert(__ret == 0);
554 return false;
555 }
556#endif
557
558 template<typename _Clock, typename _Duration>
559 _GLIBCXX_NODISCARD
560 bool
561 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
562 {
563#if __cplusplus > 201703L
564 static_assert(chrono::is_clock_v<_Clock>);
565#endif
566 // The user-supplied clock may not tick at the same rate as
567 // steady_clock, so we must loop in order to guarantee that
568 // the timeout has expired before returning false.
569 typename _Clock::time_point __now = _Clock::now();
570 do {
571 auto __rtime = __atime - __now;
572 if (try_lock_for(__rtime))
573 return true;
574 __now = _Clock::now();
575 } while (__atime > __now);
576 return false;
577 }
578
579 // Shared ownership
580
581 template<typename _Duration>
582 _GLIBCXX_NODISCARD
583 bool
584 try_lock_shared_until(const chrono::time_point<chrono::system_clock,
585 _Duration>& __atime)
586 {
587 struct timespec __ts = chrono::__to_timeout_timespec(__atime);
588
589 int __ret;
590 // Unlike for lock(), we are not allowed to throw an exception so if
591 // the maximum number of read locks has been exceeded, or we would
592 // deadlock, we just try to acquire the lock again (and will time out
593 // eventually).
594 // In cases where we would exceed the maximum number of read locks
595 // throughout the whole time until the timeout, we will fail to
596 // acquire the lock even if it would be logically free; however, this
597 // is allowed by the standard, and we made a "strong effort"
598 // (see C++14 30.4.1.4p26).
599 do
600 __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
601 while (__ret == EAGAIN);
602 if (__ret == 0)
603 return true;
604 if (__ret == ETIMEDOUT)
605 return false;
606 // Errors not handled: EINVAL, EDEADLK
607 __glibcxx_assert(__ret == 0);
608 return false;
609 }
610
611#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
612 template<typename _Duration>
613 _GLIBCXX_NODISCARD
614 bool
615 try_lock_shared_until(const chrono::time_point<chrono::steady_clock,
616 _Duration>& __atime)
617 {
618 struct timespec __ts = chrono::__to_timeout_timespec(__atime);
619 int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
620 &__ts);
621 // On self-deadlock, if _GLIBCXX_ASSERTIONS is not defined, we just
622 // fail to acquire the lock. Technically, the program violated the
623 // precondition.
624 if (__ret == 0)
625 return true;
626 if (__ret == ETIMEDOUT)
627 return false;
628 // Errors not handled: EINVAL, EDEADLK
629 __glibcxx_assert(__ret == 0);
630 return false;
631 }
632#endif
633
634 template<typename _Clock, typename _Duration>
635 _GLIBCXX_NODISCARD
636 bool
637 try_lock_shared_until(const chrono::time_point<_Clock,
638 _Duration>& __atime)
639 {
640#if __cplusplus > 201703L
641 static_assert(chrono::is_clock_v<_Clock>);
642#endif
643 // The user-supplied clock may not tick at the same rate as
644 // steady_clock, so we must loop in order to guarantee that
645 // the timeout has expired before returning false.
646 typename _Clock::time_point __now = _Clock::now();
647 do {
648 auto __rtime = __atime - __now;
649 if (try_lock_shared_for(__rtime))
650 return true;
651 __now = _Clock::now();
652 } while (__atime > __now);
653 return false;
654 }
655
656#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
657
658 // Exclusive ownership
659
660 template<typename _Clock, typename _Duration>
661 _GLIBCXX_NODISCARD
662 bool
663 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
664 {
665 unique_lock<mutex> __lk(_M_mut);
666 if (!_M_gate1.wait_until(__lk, __abs_time,
667 [this]{ return !_M_write_entered(); }))
668 {
669 return false;
670 }
671 _M_state |= _S_write_entered;
672 if (!_M_gate2.wait_until(__lk, __abs_time,
673 [this]{ return _M_readers() == 0; }))
674 {
675 _M_state ^= _S_write_entered;
676 // Wake all threads blocked while the write-entered flag was set.
677 _M_gate1.notify_all();
678 return false;
679 }
680 return true;
681 }
682
683 // Shared ownership
684
685 template <typename _Clock, typename _Duration>
686 _GLIBCXX_NODISCARD
687 bool
688 try_lock_shared_until(const chrono::time_point<_Clock,
689 _Duration>& __abs_time)
690 {
691 unique_lock<mutex> __lk(_M_mut);
692 if (!_M_gate1.wait_until(__lk, __abs_time,
693 [this]{ return _M_state < _S_max_readers; }))
694 {
695 return false;
696 }
697 ++_M_state;
698 return true;
699 }
700
701#endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
702 };
703#endif // _GLIBCXX_HAS_GTHREADS
704
705 /// shared_lock
706 template<typename _Mutex>
707 class shared_lock
708 {
709 public:
710 typedef _Mutex mutex_type;
711
712 // Shared locking
713
714 shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
715
716 explicit
717 shared_lock(mutex_type& __m)
718 : _M_pm(std::__addressof(__m)), _M_owns(true)
719 { __m.lock_shared(); }
720
721 shared_lock(mutex_type& __m, defer_lock_t) noexcept
722 : _M_pm(std::__addressof(__m)), _M_owns(false) { }
723
724 shared_lock(mutex_type& __m, try_to_lock_t)
725 : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
726
727 shared_lock(mutex_type& __m, adopt_lock_t)
728 : _M_pm(std::__addressof(__m)), _M_owns(true) { }
729
730 template<typename _Clock, typename _Duration>
731 shared_lock(mutex_type& __m,
733 : _M_pm(std::__addressof(__m)),
734 _M_owns(__m.try_lock_shared_until(__abs_time)) { }
735
736 template<typename _Rep, typename _Period>
737 shared_lock(mutex_type& __m,
738 const chrono::duration<_Rep, _Period>& __rel_time)
739 : _M_pm(std::__addressof(__m)),
740 _M_owns(__m.try_lock_shared_for(__rel_time)) { }
741
742 ~shared_lock()
743 {
744 if (_M_owns)
745 _M_pm->unlock_shared();
746 }
747
748 shared_lock(shared_lock const&) = delete;
749 shared_lock& operator=(shared_lock const&) = delete;
750
751 shared_lock(shared_lock&& __sl) noexcept : shared_lock()
752 { swap(__sl); }
753
754 shared_lock&
755 operator=(shared_lock&& __sl) noexcept
756 {
757 // _GLIBCXX_RESOLVE_LIB_DEFECTS
758 // 4172. unique_lock self-move-assignment is broken
759 shared_lock(std::move(__sl)).swap(*this);
760 return *this;
761 }
762
763 void
764 lock()
765 {
766 _M_lockable();
767 _M_pm->lock_shared();
768 _M_owns = true;
769 }
770
771 _GLIBCXX_NODISCARD
772 bool
773 try_lock()
774 {
775 _M_lockable();
776 return _M_owns = _M_pm->try_lock_shared();
777 }
778
779 template<typename _Rep, typename _Period>
780 _GLIBCXX_NODISCARD
781 bool
782 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
783 {
784 _M_lockable();
785 return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
786 }
787
788 template<typename _Clock, typename _Duration>
789 _GLIBCXX_NODISCARD
790 bool
791 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
792 {
793 _M_lockable();
794 return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
795 }
796
797 void
798 unlock()
799 {
800 if (!_M_owns)
801 __throw_system_error(int(errc::operation_not_permitted));
802 _M_pm->unlock_shared();
803 _M_owns = false;
804 }
805
806 // Setters
807
808 void
809 swap(shared_lock& __u) noexcept
810 {
811 std::swap(_M_pm, __u._M_pm);
812 std::swap(_M_owns, __u._M_owns);
813 }
814
815 mutex_type*
816 release() noexcept
817 {
818 _M_owns = false;
819 return std::__exchange(_M_pm, nullptr);
820 }
821
822 // Getters
823
824 _GLIBCXX_NODISCARD
825 bool owns_lock() const noexcept { return _M_owns; }
826
827 explicit operator bool() const noexcept { return _M_owns; }
828
829 _GLIBCXX_NODISCARD
830 mutex_type* mutex() const noexcept { return _M_pm; }
831
832 private:
833 void
834 _M_lockable() const
835 {
836 if (_M_pm == nullptr)
837 __throw_system_error(int(errc::operation_not_permitted));
838 if (_M_owns)
839 __throw_system_error(int(errc::resource_deadlock_would_occur));
840 }
841
842 mutex_type* _M_pm;
843 bool _M_owns;
844 };
845
846 /// Swap specialization for shared_lock
847 /// @relates shared_mutex
848 template<typename _Mutex>
849 void
850 swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
851 { __x.swap(__y); }
852
853 /// @} group mutexes
854_GLIBCXX_END_NAMESPACE_VERSION
855} // namespace
856
857#endif // C++14
858
859#endif // _GLIBCXX_SHARED_MUTEX
constexpr __enable_if_is_duration< _ToDur > duration_cast(const duration< _Rep, _Period > &__d)
Definition chrono.h:279
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition move.h:138
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition move.h:52
void lock(_L1 &__l1, _L2 &__l2, _L3 &... __l3)
Generic lock.
Definition mutex:686
int try_lock(_L1 &__l1, _L2 &__l2, _L3 &... __l3)
Generic try_lock.
Definition mutex:627
ISO C++ entities toplevel namespace is std.
ratio_greater
Definition ratio:462
The standard shared timed mutex type.
Definition shared_mutex:465
shared_lock
Definition shared_mutex:708
chrono::duration represents a distance between two points in time
Definition chrono.h:516
chrono::time_point represents a point in time as measured by a clock
Definition chrono.h:927
Monotonic clock.
Definition chrono.h:1273
Do not acquire ownership of the mutex.
Definition std_mutex.h:242
Try to acquire ownership of the mutex without blocking.
Definition std_mutex.h:245
Assume the calling thread has already obtained mutex ownership and manage it.
Definition std_mutex.h:249
A movable scoped lock type.
Definition unique_lock.h:63