38#ifndef _GLIBCXX_BARRIER
39#define _GLIBCXX_BARRIER 1
42#pragma GCC system_header
47#define __glibcxx_want_barrier
50#ifdef __cpp_lib_barrier
57namespace std _GLIBCXX_VISIBILITY(default)
59_GLIBCXX_BEGIN_NAMESPACE_VERSION
61 struct __empty_completion
63 _GLIBCXX_ALWAYS_INLINE
void
82 enum class __barrier_phase_t :
unsigned char { };
84 struct __tree_barrier_base
86 static constexpr ptrdiff_t
88 {
return __PTRDIFF_MAX__ - 1; }
91 using __atomic_phase_ref_t = std::__atomic_ref<__barrier_phase_t>;
92 using __atomic_phase_const_ref_t = std::__atomic_ref<const __barrier_phase_t>;
93 static constexpr auto __phase_alignment =
94 __atomic_phase_ref_t::required_alignment;
96 using __tickets_t = std::array<__barrier_phase_t, 64>;
97 struct alignas(64) __state_t
99 alignas(__phase_alignment) __tickets_t __tickets;
102 ptrdiff_t _M_expected;
103 __atomic_base<__state_t*> _M_state{
nullptr};
104 __atomic_base<ptrdiff_t> _M_expected_adjustment{0};
105 alignas(__phase_alignment) __barrier_phase_t _M_phase{};
108 __tree_barrier_base(ptrdiff_t __expected)
109 : _M_expected(__expected)
111 __glibcxx_assert(__expected >= 0 && __expected <= max());
113 if (!std::is_constant_evaluated())
114 _M_state.store(_M_alloc_state().release(), memory_order_release);
117 ~__tree_barrier_base()
118 {
delete[] _M_state.load(memory_order_relaxed); }
120 __tree_barrier_base(
const __tree_barrier_base&&) =
delete;
121 __tree_barrier_base& operator=(
const __tree_barrier_base&&) =
delete;
123 unique_ptr<__state_t[]>
126 size_t const __count = (_M_expected + 1) >> 1;
127 return std::make_unique<__state_t[]>(__count);
131 _M_arrive(__barrier_phase_t __old_phase,
size_t __current)
133 const auto __old_phase_val =
static_cast<unsigned char>(__old_phase);
134 const auto __half_step =
135 static_cast<__barrier_phase_t
>(__old_phase_val + 1);
136 const auto __full_step =
137 static_cast<__barrier_phase_t
>(__old_phase_val + 2);
139 size_t __current_expected = _M_expected;
140 __current %= ((_M_expected + 1) >> 1);
142 __state_t*
const __state = _M_state.load(memory_order_relaxed);
144 for (
int __round = 0; ; ++__round)
146 if (__current_expected <= 1)
148 size_t const __end_node = ((__current_expected + 1) >> 1),
149 __last_node = __end_node - 1;
150 for ( ; ; ++__current)
152 if (__current == __end_node)
154 auto __expect = __old_phase;
155 __atomic_phase_ref_t __phase(&__state[__current]
156 .__tickets[__round]);
157 if (__current == __last_node && (__current_expected & 1))
159 if (__phase.compare_exchange_strong(__expect, __full_step,
160 memory_order_acq_rel))
163 else if (__phase.compare_exchange_strong(__expect, __half_step,
164 memory_order_acq_rel))
168 else if (__expect == __half_step)
170 if (__phase.compare_exchange_strong(__expect, __full_step,
171 memory_order_acq_rel))
175 __current_expected = __last_node + 1;
181 template<
typename _CompletionF>
182 class __tree_barrier :
public __tree_barrier_base
184 [[no_unique_address]] _CompletionF _M_completion;
188 void _M_invoke_completion() noexcept { _M_completion(); }
191 using arrival_token = __barrier_phase_t;
194 __tree_barrier(ptrdiff_t __expected, _CompletionF __completion)
195 : __tree_barrier_base(__expected), _M_completion(std::move(__completion))
198 [[nodiscard]] arrival_token
199 arrive(ptrdiff_t __update)
201 __glibcxx_assert(__update > 0);
205 std::hash<std::thread::id> __hasher;
207 __atomic_phase_ref_t __phase(&_M_phase);
208 const auto __old_phase = __phase.load(memory_order_relaxed);
209 const auto __cur =
static_cast<unsigned char>(__old_phase);
211 if (__cur == 0 && !_M_state.load(memory_order_relaxed)) [[unlikely]]
213 auto __p = _M_alloc_state();
214 __state_t* __val =
nullptr;
215 if (_M_state.compare_exchange_strong(__val, __p.get(),
216 memory_order_seq_cst,
217 memory_order_acquire))
221 for (; __update; --__update)
223 if (_M_arrive(__old_phase, __current))
225 _M_invoke_completion();
226 _M_expected += _M_expected_adjustment.load(memory_order_relaxed);
227 _M_expected_adjustment.store(0, memory_order_relaxed);
228 auto __new_phase =
static_cast<__barrier_phase_t
>(__cur + 2);
229 __phase.store(__new_phase, memory_order_release);
230 __phase.notify_all();
237 wait(arrival_token&& __old_phase)
const
239 __atomic_phase_const_ref_t __phase(&_M_phase);
240 __phase.wait(__old_phase, memory_order_acquire);
246 _M_expected_adjustment.fetch_sub(1, memory_order_relaxed);
251 template<
typename _CompletionF = __empty_completion>
254 static_assert(is_invocable_v<_CompletionF&>);
258 using __algorithm_t = __tree_barrier<_CompletionF>;
262 class arrival_token final
265 arrival_token(arrival_token&&) =
default;
266 arrival_token& operator=(arrival_token&&) =
default;
267 ~arrival_token() =
default;
270 friend class barrier;
271 using __token =
typename __algorithm_t::arrival_token;
272 explicit arrival_token(__token __tok) noexcept : _M_tok(__tok) { }
276 static constexpr ptrdiff_t
278 {
return __algorithm_t::max(); }
281 barrier(ptrdiff_t __count, _CompletionF __completion = _CompletionF())
282 : _M_b(__count, std::move(__completion))
285 barrier(barrier
const&) =
delete;
286 barrier& operator=(barrier
const&) =
delete;
288 [[nodiscard]] arrival_token
289 arrive(ptrdiff_t __update = 1)
290 {
return arrival_token{_M_b.arrive(__update)}; }
293 wait(arrival_token&& __phase)
const
294 { _M_b.wait(
std::move(__phase._M_tok)); }
302 { _M_b.arrive_and_drop(); }
305_GLIBCXX_END_NAMESPACE_VERSION
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
ISO C++ entities toplevel namespace is std.
thread::id get_id() noexcept
The unique identifier of the current thread.