libstdc++
mutex
Go to the documentation of this file.
1// <mutex> -*- C++ -*-
2
3// Copyright (C) 2003-2025 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/mutex
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_MUTEX
30#define _GLIBCXX_MUTEX 1
31
32#ifdef _GLIBCXX_SYSHDR
33#pragma GCC system_header
34#endif
35
36#include <bits/requires_hosted.h> // concurrency
37
38#if __cplusplus < 201103L
39# include <bits/c++0x_warning.h>
40#else
41
42#include <tuple> // std::tuple
43#include <type_traits> // is_same_v
44#include <errno.h> // EAGAIN, EDEADLK
45#include <bits/chrono.h> // duration, time_point, is_clock_v
46#include <bits/functexcept.h> // __throw_system_error
47#include <bits/invoke.h> // __invoke
48#include <bits/move.h> // std::forward
49#include <bits/std_mutex.h>
50#include <bits/unique_lock.h>
51#if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
52# include <condition_variable>
53# include <thread>
54#endif
55#include <ext/atomicity.h> // __gnu_cxx::__is_single_threaded
56
57#if defined _GLIBCXX_HAS_GTHREADS && ! defined _GLIBCXX_HAVE_TLS
58# include <bits/std_function.h> // std::function
59#endif
60
61#define __glibcxx_want_scoped_lock
62#include <bits/version.h>
63
64namespace std _GLIBCXX_VISIBILITY(default)
65{
66_GLIBCXX_BEGIN_NAMESPACE_VERSION
67
68 /**
69 * @addtogroup mutexes
70 * @{
71 */
72
73#ifdef _GLIBCXX_HAS_GTHREADS
74 /// @cond undocumented
75
76 // Common base class for std::recursive_mutex and std::recursive_timed_mutex
77 class __recursive_mutex_base
78 {
79 protected:
80 typedef __gthread_recursive_mutex_t __native_type;
81
82 __recursive_mutex_base(const __recursive_mutex_base&) = delete;
83 __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
84
85#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
86 __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
87
88 __recursive_mutex_base() = default;
89#else
90 __native_type _M_mutex;
91
92 __recursive_mutex_base()
93 {
94 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
95 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
96 }
97
98 ~__recursive_mutex_base()
99 { __gthread_recursive_mutex_destroy(&_M_mutex); }
100#endif
101 };
102 /// @endcond
103
104 /** The standard recursive mutex type.
105 *
106 * A recursive mutex can be locked more than once by the same thread.
107 * Other threads cannot lock the mutex until the owning thread unlocks it
108 * as many times as it was locked.
109 *
110 * @headerfile mutex
111 * @since C++11
112 */
113 class recursive_mutex : private __recursive_mutex_base
114 {
115 public:
116 typedef __native_type* native_handle_type;
117
118 recursive_mutex() = default;
119 ~recursive_mutex() = default;
120
121 recursive_mutex(const recursive_mutex&) = delete;
122 recursive_mutex& operator=(const recursive_mutex&) = delete;
123
124 void
125 lock()
126 {
127 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
128
129 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
130 if (__e)
131 __throw_system_error(__e);
132 }
133
134 _GLIBCXX_NODISCARD
135 bool
136 try_lock() noexcept
137 {
138 // XXX EINVAL, EAGAIN, EBUSY
139 return !__gthread_recursive_mutex_trylock(&_M_mutex);
140 }
141
142 void
143 unlock()
144 {
145 // XXX EINVAL, EAGAIN, EBUSY
146 __gthread_recursive_mutex_unlock(&_M_mutex);
147 }
148
149 native_handle_type
150 native_handle() noexcept
151 { return &_M_mutex; }
152 };
153
154#if _GTHREAD_USE_MUTEX_TIMEDLOCK
155 /// @cond undocumented
156
157 template<typename _Derived>
158 class __timed_mutex_impl
159 {
160 protected:
161 template<typename _Rep, typename _Period>
162 bool
163 _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
164 {
165#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
166 using __clock = chrono::steady_clock;
167#else
168 using __clock = chrono::system_clock;
169#endif
170
171 auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
172 if (ratio_greater<__clock::period, _Period>())
173 ++__rt;
174 return _M_try_lock_until(__clock::now() + __rt);
175 }
176
177 template<typename _Duration>
178 bool
179 _M_try_lock_until(const chrono::time_point<chrono::system_clock,
180 _Duration>& __atime)
181 {
182 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
183 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
184
185 __gthread_time_t __ts = {
186 static_cast<std::time_t>(__s.time_since_epoch().count()),
187 static_cast<long>(__ns.count())
188 };
189
190 return static_cast<_Derived*>(this)->_M_timedlock(__ts);
191 }
192
193#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
194 template<typename _Duration>
195 bool
196 _M_try_lock_until(const chrono::time_point<chrono::steady_clock,
197 _Duration>& __atime)
198 {
199 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
200 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
201
202 __gthread_time_t __ts = {
203 static_cast<std::time_t>(__s.time_since_epoch().count()),
204 static_cast<long>(__ns.count())
205 };
206
207 return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
208 __ts);
209 }
210#endif
211
212 template<typename _Clock, typename _Duration>
213 bool
214 _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
215 {
216#if __cplusplus > 201703L
217 static_assert(chrono::is_clock_v<_Clock>);
218#endif
219 // The user-supplied clock may not tick at the same rate as
220 // steady_clock, so we must loop in order to guarantee that
221 // the timeout has expired before returning false.
222 auto __now = _Clock::now();
223 do {
224 auto __rtime = __atime - __now;
225 if (_M_try_lock_for(__rtime))
226 return true;
227 __now = _Clock::now();
228 } while (__atime > __now);
229 return false;
230 }
231 };
232 /// @endcond
233
234 /** The standard timed mutex type.
235 *
236 * A non-recursive mutex that supports a timeout when trying to acquire the
237 * lock.
238 *
239 * @headerfile mutex
240 * @since C++11
241 */
242 class timed_mutex
243 : private __mutex_base, public __timed_mutex_impl<timed_mutex>
244 {
245 public:
246 typedef __native_type* native_handle_type;
247
248 timed_mutex() = default;
249 ~timed_mutex() = default;
250
251 timed_mutex(const timed_mutex&) = delete;
252 timed_mutex& operator=(const timed_mutex&) = delete;
253
254 void
255 lock()
256 {
257 int __e = __gthread_mutex_lock(&_M_mutex);
258
259 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
260 if (__e)
261 __throw_system_error(__e);
262 }
263
264 _GLIBCXX_NODISCARD
265 bool
266 try_lock() noexcept
267 {
268 // XXX EINVAL, EAGAIN, EBUSY
269 return !__gthread_mutex_trylock(&_M_mutex);
270 }
271
272 template <class _Rep, class _Period>
273 _GLIBCXX_NODISCARD
274 bool
275 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
276 { return _M_try_lock_for(__rtime); }
277
278 template <class _Clock, class _Duration>
279 _GLIBCXX_NODISCARD
280 bool
281 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
282 { return _M_try_lock_until(__atime); }
283
284 void
285 unlock()
286 {
287 // XXX EINVAL, EAGAIN, EBUSY
288 __gthread_mutex_unlock(&_M_mutex);
289 }
290
291 native_handle_type
292 native_handle() noexcept
293 { return &_M_mutex; }
294
295 private:
296 friend class __timed_mutex_impl<timed_mutex>;
297
298 bool
299 _M_timedlock(const __gthread_time_t& __ts)
300 { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
301
302#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
303 bool
304 _M_clocklock(clockid_t __clockid, const __gthread_time_t& __ts)
305 { return !pthread_mutex_clocklock(&_M_mutex, __clockid, &__ts); }
306#endif
307 };
308
309 /** The standard recursive timed mutex type.
310 *
311 * A recursive mutex that supports a timeout when trying to acquire the
312 * lock. A recursive mutex can be locked more than once by the same thread.
313 * Other threads cannot lock the mutex until the owning thread unlocks it
314 * as many times as it was locked.
315 *
316 * @headerfile mutex
317 * @since C++11
318 */
319 class recursive_timed_mutex
320 : private __recursive_mutex_base,
321 public __timed_mutex_impl<recursive_timed_mutex>
322 {
323 public:
324 typedef __native_type* native_handle_type;
325
326 recursive_timed_mutex() = default;
327 ~recursive_timed_mutex() = default;
328
329 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
330 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
331
332 void
333 lock()
334 {
335 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
336
337 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
338 if (__e)
339 __throw_system_error(__e);
340 }
341
342 _GLIBCXX_NODISCARD
343 bool
344 try_lock() noexcept
345 {
346 // XXX EINVAL, EAGAIN, EBUSY
347 return !__gthread_recursive_mutex_trylock(&_M_mutex);
348 }
349
350 template <class _Rep, class _Period>
351 _GLIBCXX_NODISCARD
352 bool
353 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
354 { return _M_try_lock_for(__rtime); }
355
356 template <class _Clock, class _Duration>
357 _GLIBCXX_NODISCARD
358 bool
359 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
360 { return _M_try_lock_until(__atime); }
361
362 void
363 unlock()
364 {
365 // XXX EINVAL, EAGAIN, EBUSY
366 __gthread_recursive_mutex_unlock(&_M_mutex);
367 }
368
369 native_handle_type
370 native_handle() noexcept
371 { return &_M_mutex; }
372
373 private:
374 friend class __timed_mutex_impl<recursive_timed_mutex>;
375
376 bool
377 _M_timedlock(const __gthread_time_t& __ts)
378 { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
379
380#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
381 bool
382 _M_clocklock(clockid_t __clockid, const __gthread_time_t& __ts)
383 { return !pthread_mutex_clocklock(&_M_mutex, __clockid, &__ts); }
384#endif
385 };
386
387#else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
388
389 /// timed_mutex
390 class timed_mutex
391 {
392 mutex _M_mut;
393 condition_variable _M_cv;
394 bool _M_locked = false;
395
396 public:
397
398 timed_mutex() = default;
399 ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
400
401 timed_mutex(const timed_mutex&) = delete;
402 timed_mutex& operator=(const timed_mutex&) = delete;
403
404 void
405 lock()
406 {
407 unique_lock<mutex> __lk(_M_mut);
408 _M_cv.wait(__lk, [&]{ return !_M_locked; });
409 _M_locked = true;
410 }
411
412 _GLIBCXX_NODISCARD
413 bool
414 try_lock()
415 {
416 lock_guard<mutex> __lk(_M_mut);
417 if (_M_locked)
418 return false;
419 _M_locked = true;
420 return true;
421 }
422
423 template<typename _Rep, typename _Period>
424 _GLIBCXX_NODISCARD
425 bool
426 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
427 {
428 unique_lock<mutex> __lk(_M_mut);
429 if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
430 return false;
431 _M_locked = true;
432 return true;
433 }
434
435 template<typename _Clock, typename _Duration>
436 _GLIBCXX_NODISCARD
437 bool
438 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
439 {
440 unique_lock<mutex> __lk(_M_mut);
441 if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
442 return false;
443 _M_locked = true;
444 return true;
445 }
446
447 void
448 unlock()
449 {
450 lock_guard<mutex> __lk(_M_mut);
451 __glibcxx_assert( _M_locked );
452 _M_locked = false;
453 _M_cv.notify_one();
454 }
455 };
456
457 /// recursive_timed_mutex
458 class recursive_timed_mutex
459 {
460 mutex _M_mut;
461 condition_variable _M_cv;
462 thread::id _M_owner;
463 unsigned _M_count = 0;
464
465 // Predicate type that tests whether the current thread can lock a mutex.
466 struct _Can_lock
467 {
468 // Returns true if the mutex is unlocked or is locked by _M_caller.
469 bool
470 operator()() const noexcept
471 { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
472
473 const recursive_timed_mutex* _M_mx;
474 thread::id _M_caller;
475 };
476
477 public:
478
479 recursive_timed_mutex() = default;
480 ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
481
482 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
483 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
484
485 void
486 lock()
487 {
488 auto __id = this_thread::get_id();
489 _Can_lock __can_lock{this, __id};
490 unique_lock<mutex> __lk(_M_mut);
491 _M_cv.wait(__lk, __can_lock);
492 if (_M_count == -1u)
493 __throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
494 _M_owner = __id;
495 ++_M_count;
496 }
497
498 _GLIBCXX_NODISCARD
499 bool
500 try_lock()
501 {
502 auto __id = this_thread::get_id();
503 _Can_lock __can_lock{this, __id};
504 lock_guard<mutex> __lk(_M_mut);
505 if (!__can_lock())
506 return false;
507 if (_M_count == -1u)
508 return false;
509 _M_owner = __id;
510 ++_M_count;
511 return true;
512 }
513
514 template<typename _Rep, typename _Period>
515 _GLIBCXX_NODISCARD
516 bool
517 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
518 {
519 auto __id = this_thread::get_id();
520 _Can_lock __can_lock{this, __id};
521 unique_lock<mutex> __lk(_M_mut);
522 if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
523 return false;
524 if (_M_count == -1u)
525 return false;
526 _M_owner = __id;
527 ++_M_count;
528 return true;
529 }
530
531 template<typename _Clock, typename _Duration>
532 _GLIBCXX_NODISCARD
533 bool
534 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
535 {
536 auto __id = this_thread::get_id();
537 _Can_lock __can_lock{this, __id};
538 unique_lock<mutex> __lk(_M_mut);
539 if (!_M_cv.wait_until(__lk, __atime, __can_lock))
540 return false;
541 if (_M_count == -1u)
542 return false;
543 _M_owner = __id;
544 ++_M_count;
545 return true;
546 }
547
548 void
549 unlock()
550 {
551 lock_guard<mutex> __lk(_M_mut);
552 __glibcxx_assert( _M_owner == this_thread::get_id() );
553 __glibcxx_assert( _M_count > 0 );
554 if (--_M_count == 0)
555 {
556 _M_owner = {};
557 _M_cv.notify_one();
558 }
559 }
560 };
561
562#endif
563#endif // _GLIBCXX_HAS_GTHREADS
564
565 /// @cond undocumented
566 namespace __detail
567 {
568 // Lock the last lockable, after all previous ones are locked.
569 template<typename _Lockable>
570 inline int
571 __try_lock_impl(_Lockable& __l)
572 {
573 if (unique_lock<_Lockable> __lock{__l, try_to_lock})
574 {
575 __lock.release();
576 return -1;
577 }
578 else
579 return 0;
580 }
581
582 // Lock each lockable in turn.
583 // Use iteration if all lockables are the same type, recursion otherwise.
584 template<typename _L0, typename... _Lockables>
585 inline int
586 __try_lock_impl(_L0& __l0, _Lockables&... __lockables)
587 {
588#if __cplusplus >= 201703L
589 if constexpr ((is_same_v<_L0, _Lockables> && ...))
590 {
591 constexpr int _Np = 1 + sizeof...(_Lockables);
592 unique_lock<_L0> __locks[_Np] = {
593 {__l0, defer_lock}, {__lockables, defer_lock}...
594 };
595 for (int __i = 0; __i < _Np; ++__i)
596 {
597 if (!__locks[__i].try_lock())
598 {
599 const int __failed = __i;
600 while (__i--)
601 __locks[__i].unlock();
602 return __failed;
603 }
604 }
605 for (auto& __l : __locks)
606 __l.release();
607 return -1;
608 }
609 else
610#endif
611 if (unique_lock<_L0> __lock{__l0, try_to_lock})
612 {
613 int __idx = __detail::__try_lock_impl(__lockables...);
614 if (__idx == -1)
615 {
616 __lock.release();
617 return -1;
618 }
619 return __idx + 1;
620 }
621 else
622 return 0;
623 }
624
625 } // namespace __detail
626 /// @endcond
627
628 /** @brief Generic try_lock.
629 * @param __l1 Meets Lockable requirements (try_lock() may throw).
630 * @param __l2 Meets Lockable requirements (try_lock() may throw).
631 * @param __l3 Meets Lockable requirements (try_lock() may throw).
632 * @return Returns -1 if all try_lock() calls return true. Otherwise returns
633 * a 0-based index corresponding to the argument that returned false.
634 * @post Either all arguments are locked, or none will be.
635 *
636 * Sequentially calls try_lock() on each argument.
637 */
638 template<typename _L1, typename _L2, typename... _L3>
639 _GLIBCXX_NODISCARD
640 inline int
641 try_lock(_L1& __l1, _L2& __l2, _L3&... __l3)
642 {
643 return __detail::__try_lock_impl(__l1, __l2, __l3...);
644 }
645
646 /// @cond undocumented
647 namespace __detail
648 {
649 // This function can recurse up to N levels deep, for N = 1+sizeof...(L1).
650 // On each recursion the lockables are rotated left one position,
651 // e.g. depth 0: l0, l1, l2; depth 1: l1, l2, l0; depth 2: l2, l0, l1.
652 // When a call to l_i.try_lock() fails it recurses/returns to depth=i
653 // so that l_i is the first argument, and then blocks until l_i is locked.
654 template<typename _L0, typename... _L1>
655 void
656 __lock_impl(int& __i, int __depth, _L0& __l0, _L1&... __l1)
657 {
658 while (__i >= __depth)
659 {
660 if (__i == __depth)
661 {
662 int __failed = 1; // index that couldn't be locked
663 {
664 unique_lock<_L0> __first(__l0);
665 __failed += __detail::__try_lock_impl(__l1...);
666 if (!__failed)
667 {
668 __i = -1; // finished
669 __first.release();
670 return;
671 }
672 }
673#if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
674 __gthread_yield();
675#endif
676 constexpr auto __n = 1 + sizeof...(_L1);
677 __i = (__depth + __failed) % __n;
678 }
679 else // rotate left until l_i is first.
680 __detail::__lock_impl(__i, __depth + 1, __l1..., __l0);
681 }
682 }
683
684 } // namespace __detail
685 /// @endcond
686
687 /** @brief Generic lock.
688 * @param __l1 Meets Lockable requirements (try_lock() may throw).
689 * @param __l2 Meets Lockable requirements (try_lock() may throw).
690 * @param __l3 Meets Lockable requirements (try_lock() may throw).
691 * @throw An exception thrown by an argument's lock() or try_lock() member.
692 * @post All arguments are locked.
693 *
694 * All arguments are locked via a sequence of calls to lock(), try_lock()
695 * and unlock(). If this function exits via an exception any locks that
696 * were obtained will be released.
697 */
698 template<typename _L1, typename _L2, typename... _L3>
699 void
700 lock(_L1& __l1, _L2& __l2, _L3&... __l3)
701 {
702#if __cplusplus >= 201703L
703 if constexpr (is_same_v<_L1, _L2> && (is_same_v<_L1, _L3> && ...))
704 {
705 constexpr int _Np = 2 + sizeof...(_L3);
706 unique_lock<_L1> __locks[] = {
707 {__l1, defer_lock}, {__l2, defer_lock}, {__l3, defer_lock}...
708 };
709 int __first = 0;
710 do {
711 __locks[__first].lock();
712 for (int __j = 1; __j < _Np; ++__j)
713 {
714 const int __idx = (__first + __j) % _Np;
715 if (!__locks[__idx].try_lock())
716 {
717 for (int __k = __j; __k != 0; --__k)
718 __locks[(__first + __k - 1) % _Np].unlock();
719 __first = __idx;
720 break;
721 }
722 }
723 } while (!__locks[__first].owns_lock());
724
725 for (auto& __l : __locks)
726 __l.release();
727 }
728 else
729#endif
730 {
731 int __i = 0;
732 __detail::__lock_impl(__i, 0, __l1, __l2, __l3...);
733 }
734 }
735
736#ifdef __cpp_lib_scoped_lock // C++ >= 17
737 /** @brief A scoped lock type for multiple lockable objects.
738 *
739 * A scoped_lock controls mutex ownership within a scope, releasing
740 * ownership in the destructor.
741 *
742 * @headerfile mutex
743 * @since C++17
744 */
745 template<typename... _MutexTypes>
746 class scoped_lock
747 {
748 public:
749
750 [[nodiscard]]
751 explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
752 { std::lock(__m...); }
753
754 [[nodiscard]]
755 explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
756 : _M_devices(std::tie(__m...))
757 { } // calling thread owns mutex
758
759 ~scoped_lock()
760 { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
761
762 scoped_lock(const scoped_lock&) = delete;
763 scoped_lock& operator=(const scoped_lock&) = delete;
764
765 private:
766 tuple<_MutexTypes&...> _M_devices;
767 };
768
769 template<>
770 class scoped_lock<>
771 {
772 public:
773 explicit scoped_lock() = default;
774 explicit scoped_lock(adopt_lock_t) noexcept { }
775 ~scoped_lock() = default;
776
777 scoped_lock(const scoped_lock&) = delete;
778 scoped_lock& operator=(const scoped_lock&) = delete;
779 };
780
781 template<typename _Mutex>
782 class scoped_lock<_Mutex>
783 {
784 public:
785 using mutex_type = _Mutex;
786
787 [[nodiscard]]
788 explicit scoped_lock(mutex_type& __m) : _M_device(__m)
789 { _M_device.lock(); }
790
791 [[nodiscard]]
792 explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
793 : _M_device(__m)
794 { } // calling thread owns mutex
795
796 ~scoped_lock()
797 { _M_device.unlock(); }
798
799 scoped_lock(const scoped_lock&) = delete;
800 scoped_lock& operator=(const scoped_lock&) = delete;
801
802 private:
803 mutex_type& _M_device;
804 };
805#endif // __cpp_lib_scoped_lock
806
807#ifdef _GLIBCXX_HAS_GTHREADS
808 /// Flag type used by std::call_once
809 struct once_flag
810 {
811 constexpr once_flag() noexcept = default;
812
813 /// Deleted copy constructor
814 once_flag(const once_flag&) = delete;
815 /// Deleted assignment operator
816 once_flag& operator=(const once_flag&) = delete;
817
818 private:
819 // For gthreads targets a pthread_once_t is used with pthread_once, but
820 // for most targets this doesn't work correctly for exceptional executions.
821 __gthread_once_t _M_once = __GTHREAD_ONCE_INIT;
822
823 struct _Prepare_execution;
824
825 template<typename _Callable, typename... _Args>
826 friend void
827 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
828 };
829
830 /// @cond undocumented
831# ifdef _GLIBCXX_HAVE_TLS
832 // If TLS is available use thread-local state for the type-erased callable
833 // that is being run by std::call_once in the current thread.
834 extern __thread void* __once_callable;
835 extern __thread void (*__once_call)();
836
837 // RAII type to set up state for pthread_once call.
838 struct once_flag::_Prepare_execution
839 {
840 template<typename _Callable>
841 explicit
842 _Prepare_execution(_Callable& __c)
843 {
844 // Store address in thread-local pointer:
845 __once_callable = std::__addressof(__c);
846 // Trampoline function to invoke the closure via thread-local pointer:
847 __once_call = [] { (*static_cast<_Callable*>(__once_callable))(); };
848 }
849
850 ~_Prepare_execution()
851 {
852 // PR libstdc++/82481
853 __once_callable = nullptr;
854 __once_call = nullptr;
855 }
856
857 _Prepare_execution(const _Prepare_execution&) = delete;
858 _Prepare_execution& operator=(const _Prepare_execution&) = delete;
859 };
860
861# else
862 // Without TLS use a global std::mutex and store the callable in a
863 // global std::function.
864 extern function<void()> __once_functor;
865
866 extern void
867 __set_once_functor_lock_ptr(unique_lock<mutex>*);
868
869 extern mutex&
870 __get_once_mutex();
871
872 // RAII type to set up state for pthread_once call.
873 struct once_flag::_Prepare_execution
874 {
875 template<typename _Callable>
876 explicit
877 _Prepare_execution(_Callable& __c)
878 {
879 // Store the callable in the global std::function
880 __once_functor = __c;
881 __set_once_functor_lock_ptr(&_M_functor_lock);
882 }
883
884 ~_Prepare_execution()
885 {
886 if (_M_functor_lock)
887 __set_once_functor_lock_ptr(nullptr);
888 }
889
890 private:
891 // XXX This deadlocks if used recursively (PR 97949)
892 unique_lock<mutex> _M_functor_lock{__get_once_mutex()};
893
894 _Prepare_execution(const _Prepare_execution&) = delete;
895 _Prepare_execution& operator=(const _Prepare_execution&) = delete;
896 };
897# endif
898 /// @endcond
899
900 // This function is passed to pthread_once by std::call_once.
901 // It runs __once_call() or __once_functor().
902 extern "C" void __once_proxy(void);
903
904 /// Invoke a callable and synchronize with other calls using the same flag
905 template<typename _Callable, typename... _Args>
906 void
907 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
908 {
909 // Closure type that runs the function
910 auto __callable = [&] {
911 std::__invoke(std::forward<_Callable>(__f),
912 std::forward<_Args>(__args)...);
913 };
914
915 once_flag::_Prepare_execution __exec(__callable);
916
917 // XXX pthread_once does not reset the flag if an exception is thrown.
918 if (int __e = __gthread_once(&__once._M_once, &__once_proxy))
919 __throw_system_error(__e);
920 }
921
922#else // _GLIBCXX_HAS_GTHREADS
923
924 /// Flag type used by std::call_once
925 struct once_flag
926 {
927 constexpr once_flag() noexcept = default;
928
929 /// Deleted copy constructor
930 once_flag(const once_flag&) = delete;
931 /// Deleted assignment operator
932 once_flag& operator=(const once_flag&) = delete;
933
934 private:
935 // There are two different std::once_flag interfaces, abstracting four
936 // different implementations.
937 // The single-threaded interface uses the _M_activate() and _M_finish(bool)
938 // functions, which start and finish an active execution respectively.
939 // See [thread.once.callonce] in C++11 for the definition of
940 // active/passive/returning/exceptional executions.
941 enum _Bits : int { _Init = 0, _Active = 1, _Done = 2 };
942
943 int _M_once = _Bits::_Init;
944
945 // Check to see if all executions will be passive now.
946 bool
947 _M_passive() const noexcept;
948
949 // Attempts to begin an active execution.
950 bool _M_activate();
951
952 // Must be called to complete an active execution.
953 // The argument is true if the active execution was a returning execution,
954 // false if it was an exceptional execution.
955 void _M_finish(bool __returning) noexcept;
956
957 // RAII helper to call _M_finish.
958 struct _Active_execution
959 {
960 explicit _Active_execution(once_flag& __flag) : _M_flag(__flag) { }
961
962 ~_Active_execution() { _M_flag._M_finish(_M_returning); }
963
964 _Active_execution(const _Active_execution&) = delete;
965 _Active_execution& operator=(const _Active_execution&) = delete;
966
967 once_flag& _M_flag;
968 bool _M_returning = false;
969 };
970
971 template<typename _Callable, typename... _Args>
972 friend void
973 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
974 };
975
976 // Inline definitions of std::once_flag members for single-threaded targets.
977
978 inline bool
979 once_flag::_M_passive() const noexcept
980 { return _M_once == _Bits::_Done; }
981
982 inline bool
983 once_flag::_M_activate()
984 {
985 if (_M_once == _Bits::_Init) [[__likely__]]
986 {
987 _M_once = _Bits::_Active;
988 return true;
989 }
990 else if (_M_passive()) // Caller should have checked this already.
991 return false;
992 else
993 __throw_system_error(EDEADLK);
994 }
995
996 inline void
997 once_flag::_M_finish(bool __returning) noexcept
998 { _M_once = __returning ? _Bits::_Done : _Bits::_Init; }
999
1000 /// Invoke a callable and synchronize with other calls using the same flag
1001 template<typename _Callable, typename... _Args>
1002 inline void
1003 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
1004 {
1005 if (__once._M_passive())
1006 return;
1007 else if (__once._M_activate())
1008 {
1009 once_flag::_Active_execution __exec(__once);
1010
1011 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1012 // 2442. call_once() shouldn't DECAY_COPY()
1013 std::__invoke(std::forward<_Callable>(__f),
1014 std::forward<_Args>(__args)...);
1015
1016 // __f(__args...) did not throw
1017 __exec._M_returning = true;
1018 }
1019 }
1020#endif // _GLIBCXX_HAS_GTHREADS
1021
1022 /// @} group mutexes
1023_GLIBCXX_END_NAMESPACE_VERSION
1024} // namespace
1025
1026#endif // C++11
1027
1028#endif // _GLIBCXX_MUTEX