1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2015-2021 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library.  This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_futex.h
26  *  This is an internal header file, included by other library headers.
27  *  Do not attempt to use it directly.
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_FUTEX_H
31 #define _GLIBCXX_ATOMIC_FUTEX_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/c++config.h>
36 #include <atomic>
37 #include <chrono>
38 #if ! (defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1)
39 #include <mutex>
40 #include <condition_variable>
41 #endif
42 
43 #ifndef _GLIBCXX_ALWAYS_INLINE
44 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
45 #endif
46 
_GLIBCXX_VISIBILITY(default)47 namespace std _GLIBCXX_VISIBILITY(default)
48 {
49 _GLIBCXX_BEGIN_NAMESPACE_VERSION
50 
51 #ifdef _GLIBCXX_HAS_GTHREADS
52 #if defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
53   struct __atomic_futex_unsigned_base
54   {
55     // __s and __ns are measured against CLOCK_REALTIME. Returns false
56     // iff a timeout occurred.
57     bool
58     _M_futex_wait_until(unsigned *__addr, unsigned __val, bool __has_timeout,
59 	chrono::seconds __s, chrono::nanoseconds __ns);
60 
61     // __s and __ns are measured against CLOCK_MONOTONIC. Returns
62     // false iff a timeout occurred.
63     bool
64     _M_futex_wait_until_steady(unsigned *__addr, unsigned __val,
65 	bool __has_timeout, chrono::seconds __s, chrono::nanoseconds __ns);
66 
67     // This can be executed after the object has been destroyed.
68     static void _M_futex_notify_all(unsigned* __addr);
69   };
70 
71   template <unsigned _Waiter_bit = 0x80000000>
72   class __atomic_futex_unsigned : __atomic_futex_unsigned_base
73   {
74     typedef chrono::steady_clock __clock_t;
75 
76     // This must be lock-free and at offset 0.
77     atomic<unsigned> _M_data;
78 
79   public:
80     explicit
81     __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
82     { }
83 
84     _GLIBCXX_ALWAYS_INLINE unsigned
85     _M_load(memory_order __mo)
86     {
87       return _M_data.load(__mo) & ~_Waiter_bit;
88     }
89 
90   private:
91     // If a timeout occurs, returns a current value after the timeout;
92     // otherwise, returns the operand's value if equal is true or a different
93     // value if equal is false.
94     // The assumed value is the caller's assumption about the current value
95     // when making the call.
96     // __s and __ns are measured against CLOCK_REALTIME.
97     unsigned
98     _M_load_and_test_until(unsigned __assumed, unsigned __operand,
99 	bool __equal, memory_order __mo, bool __has_timeout,
100 	chrono::seconds __s, chrono::nanoseconds __ns)
101     {
102       for (;;)
103 	{
104 	  // Don't bother checking the value again because we expect the caller
105 	  // to have done it recently.
106 	  // memory_order_relaxed is sufficient because we can rely on just the
107 	  // modification order (store_notify uses an atomic RMW operation too),
108 	  // and the futex syscalls synchronize between themselves.
109 	  _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
110 	  bool __ret = _M_futex_wait_until((unsigned*)(void*)&_M_data,
111 					   __assumed | _Waiter_bit,
112 					   __has_timeout, __s, __ns);
113 	  // Fetch the current value after waiting (clears _Waiter_bit).
114 	  __assumed = _M_load(__mo);
115 	  if (!__ret || ((__operand == __assumed) == __equal))
116 	    return __assumed;
117 	  // TODO adapt wait time
118 	}
119     }
120 
121     // If a timeout occurs, returns a current value after the timeout;
122     // otherwise, returns the operand's value if equal is true or a different
123     // value if equal is false.
124     // The assumed value is the caller's assumption about the current value
125     // when making the call.
126     // __s and __ns are measured against CLOCK_MONOTONIC.
127     unsigned
128     _M_load_and_test_until_steady(unsigned __assumed, unsigned __operand,
129 	bool __equal, memory_order __mo, bool __has_timeout,
130 	chrono::seconds __s, chrono::nanoseconds __ns)
131     {
132       for (;;)
133 	{
134 	  // Don't bother checking the value again because we expect the caller
135 	  // to have done it recently.
136 	  // memory_order_relaxed is sufficient because we can rely on just the
137 	  // modification order (store_notify uses an atomic RMW operation too),
138 	  // and the futex syscalls synchronize between themselves.
139 	  _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
140 	  bool __ret = _M_futex_wait_until_steady((unsigned*)(void*)&_M_data,
141 					   __assumed | _Waiter_bit,
142 					   __has_timeout, __s, __ns);
143 	  // Fetch the current value after waiting (clears _Waiter_bit).
144 	  __assumed = _M_load(__mo);
145 	  if (!__ret || ((__operand == __assumed) == __equal))
146 	    return __assumed;
147 	  // TODO adapt wait time
148 	}
149     }
150 
151     // Returns the operand's value if equal is true or a different value if
152     // equal is false.
153     // The assumed value is the caller's assumption about the current value
154     // when making the call.
155     unsigned
156     _M_load_and_test(unsigned __assumed, unsigned __operand,
157 	bool __equal, memory_order __mo)
158     {
159       return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
160 				    false, {}, {});
161     }
162 
163     // If a timeout occurs, returns a current value after the timeout;
164     // otherwise, returns the operand's value if equal is true or a different
165     // value if equal is false.
166     // The assumed value is the caller's assumption about the current value
167     // when making the call.
168     template<typename _Dur>
169     unsigned
170     _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
171 	bool __equal, memory_order __mo,
172 	const chrono::time_point<std::chrono::system_clock, _Dur>& __atime)
173     {
174       auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
175       auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
176       // XXX correct?
177       return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
178 	  true, __s.time_since_epoch(), __ns);
179     }
180 
181     template<typename _Dur>
182     unsigned
183     _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
184 	bool __equal, memory_order __mo,
185 	const chrono::time_point<std::chrono::steady_clock, _Dur>& __atime)
186     {
187       auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
188       auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
189       // XXX correct?
190       return _M_load_and_test_until_steady(__assumed, __operand, __equal, __mo,
191 	  true, __s.time_since_epoch(), __ns);
192     }
193 
194   public:
195 
196     _GLIBCXX_ALWAYS_INLINE unsigned
197     _M_load_when_not_equal(unsigned __val, memory_order __mo)
198     {
199       unsigned __i = _M_load(__mo);
200       if ((__i & ~_Waiter_bit) != __val)
201 	return (__i & ~_Waiter_bit);
202       // TODO Spin-wait first.
203       return _M_load_and_test(__i, __val, false, __mo);
204     }
205 
206     _GLIBCXX_ALWAYS_INLINE void
207     _M_load_when_equal(unsigned __val, memory_order __mo)
208     {
209       unsigned __i = _M_load(__mo);
210       if ((__i & ~_Waiter_bit) == __val)
211 	return;
212       // TODO Spin-wait first.
213       _M_load_and_test(__i, __val, true, __mo);
214     }
215 
216     // Returns false iff a timeout occurred.
217     template<typename _Rep, typename _Period>
218       _GLIBCXX_ALWAYS_INLINE bool
219       _M_load_when_equal_for(unsigned __val, memory_order __mo,
220 	  const chrono::duration<_Rep, _Period>& __rtime)
221       {
222 	using __dur = typename __clock_t::duration;
223 	return _M_load_when_equal_until(__val, __mo,
224 		    __clock_t::now() + chrono::__detail::ceil<__dur>(__rtime));
225       }
226 
227     // Returns false iff a timeout occurred.
228     template<typename _Clock, typename _Duration>
229       _GLIBCXX_ALWAYS_INLINE bool
230       _M_load_when_equal_until(unsigned __val, memory_order __mo,
231 	  const chrono::time_point<_Clock, _Duration>& __atime)
232       {
233 	typename _Clock::time_point __c_entry = _Clock::now();
234 	do {
235 	  const __clock_t::time_point __s_entry = __clock_t::now();
236 	  const auto __delta = __atime - __c_entry;
237 	  const auto __s_atime = __s_entry +
238 	      chrono::__detail::ceil<__clock_t::duration>(__delta);
239 	  if (_M_load_when_equal_until(__val, __mo, __s_atime))
240 	    return true;
241 	  __c_entry = _Clock::now();
242 	} while (__c_entry < __atime);
243 	return false;
244       }
245 
246     // Returns false iff a timeout occurred.
247     template<typename _Duration>
248     _GLIBCXX_ALWAYS_INLINE bool
249     _M_load_when_equal_until(unsigned __val, memory_order __mo,
250 	const chrono::time_point<std::chrono::system_clock, _Duration>& __atime)
251     {
252       unsigned __i = _M_load(__mo);
253       if ((__i & ~_Waiter_bit) == __val)
254 	return true;
255       // TODO Spin-wait first.  Ignore effect on timeout.
256       __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
257       return (__i & ~_Waiter_bit) == __val;
258     }
259 
260     // Returns false iff a timeout occurred.
261     template<typename _Duration>
262     _GLIBCXX_ALWAYS_INLINE bool
263     _M_load_when_equal_until(unsigned __val, memory_order __mo,
264 	const chrono::time_point<std::chrono::steady_clock, _Duration>& __atime)
265     {
266       unsigned __i = _M_load(__mo);
267       if ((__i & ~_Waiter_bit) == __val)
268 	return true;
269       // TODO Spin-wait first.  Ignore effect on timeout.
270       __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
271       return (__i & ~_Waiter_bit) == __val;
272     }
273 
274     _GLIBCXX_ALWAYS_INLINE void
275     _M_store_notify_all(unsigned __val, memory_order __mo)
276     {
277       unsigned* __futex = (unsigned *)(void *)&_M_data;
278       if (_M_data.exchange(__val, __mo) & _Waiter_bit)
279 	_M_futex_notify_all(__futex);
280     }
281   };
282 
283 #else // ! (_GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1)
284 
285   // If futexes are not available, use a mutex and a condvar to wait.
286   // Because we access the data only within critical sections, all accesses
287   // are sequentially consistent; thus, we satisfy any provided memory_order.
288   template <unsigned _Waiter_bit = 0x80000000>
289   class __atomic_futex_unsigned
290   {
291     typedef chrono::system_clock __clock_t;
292 
293     unsigned _M_data;
294     mutex _M_mutex;
295     condition_variable _M_condvar;
296 
297   public:
298     explicit
299     __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
300     { }
301 
302     _GLIBCXX_ALWAYS_INLINE unsigned
303     _M_load(memory_order __mo)
304     {
305       unique_lock<mutex> __lock(_M_mutex);
306       return _M_data;
307     }
308 
309     _GLIBCXX_ALWAYS_INLINE unsigned
310     _M_load_when_not_equal(unsigned __val, memory_order __mo)
311     {
312       unique_lock<mutex> __lock(_M_mutex);
313       while (_M_data == __val)
314 	_M_condvar.wait(__lock);
315       return _M_data;
316     }
317 
318     _GLIBCXX_ALWAYS_INLINE void
319     _M_load_when_equal(unsigned __val, memory_order __mo)
320     {
321       unique_lock<mutex> __lock(_M_mutex);
322       while (_M_data != __val)
323 	_M_condvar.wait(__lock);
324     }
325 
326     template<typename _Rep, typename _Period>
327       _GLIBCXX_ALWAYS_INLINE bool
328       _M_load_when_equal_for(unsigned __val, memory_order __mo,
329 	  const chrono::duration<_Rep, _Period>& __rtime)
330       {
331 	unique_lock<mutex> __lock(_M_mutex);
332 	return _M_condvar.wait_for(__lock, __rtime,
333 				   [&] { return _M_data == __val;});
334       }
335 
336     template<typename _Clock, typename _Duration>
337       _GLIBCXX_ALWAYS_INLINE bool
338       _M_load_when_equal_until(unsigned __val, memory_order __mo,
339 	  const chrono::time_point<_Clock, _Duration>& __atime)
340       {
341 	unique_lock<mutex> __lock(_M_mutex);
342 	return _M_condvar.wait_until(__lock, __atime,
343 				     [&] { return _M_data == __val;});
344       }
345 
346     _GLIBCXX_ALWAYS_INLINE void
347     _M_store_notify_all(unsigned __val, memory_order __mo)
348     {
349       unique_lock<mutex> __lock(_M_mutex);
350       _M_data = __val;
351       _M_condvar.notify_all();
352     }
353   };
354 
355 #endif // _GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1
356 #endif // _GLIBCXX_HAS_GTHREADS
357 
358 _GLIBCXX_END_NAMESPACE_VERSION
359 } // namespace std
360 
361 #endif
362