1// <stop_token> -*- C++ -*-
2
3// Copyright (C) 2019-2020 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library.  This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/stop_token
26 *  This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_STOP_TOKEN
30#define _GLIBCXX_STOP_TOKEN
31
32#if __cplusplus > 201703L
33
34#include <atomic>
35
36#ifdef _GLIBCXX_HAS_GTHREADS
37# define __cpp_lib_jthread 201911L
38# include <bits/gthr.h>
39# if __has_include(<semaphore>)
40#  include <semaphore>
41# endif
42#endif
43
44namespace std _GLIBCXX_VISIBILITY(default)
45{
46_GLIBCXX_BEGIN_NAMESPACE_VERSION
47
48  /// Tag type indicating a stop_source should have no shared-stop-state.
49  struct nostopstate_t { explicit nostopstate_t() = default; };
50  inline constexpr nostopstate_t nostopstate{};
51
52  class stop_source;
53
54  /// Allow testing whether a stop request has been made on a `stop_source`.
55  class stop_token
56  {
57  public:
58    stop_token() noexcept = default;
59
60    stop_token(const stop_token&) noexcept = default;
61    stop_token(stop_token&&) noexcept = default;
62
63    ~stop_token() = default;
64
65    stop_token&
66    operator=(const stop_token&) noexcept = default;
67
68    stop_token&
69    operator=(stop_token&&) noexcept = default;
70
71    [[nodiscard]]
72    bool
73    stop_possible() const noexcept
74    {
75      return static_cast<bool>(_M_state) && _M_state->_M_stop_possible();
76    }
77
78    [[nodiscard]]
79    bool
80    stop_requested() const noexcept
81    {
82      return static_cast<bool>(_M_state) && _M_state->_M_stop_requested();
83    }
84
85    void
86    swap(stop_token& __rhs) noexcept
87    { _M_state.swap(__rhs._M_state); }
88
89    [[nodiscard]]
90    friend bool
91    operator==(const stop_token& __a, const stop_token& __b)
92    { return __a._M_state == __b._M_state; }
93
94    friend void
95    swap(stop_token& __lhs, stop_token& __rhs) noexcept
96    { __lhs.swap(__rhs); }
97
98  private:
99    friend class stop_source;
100    template<typename _Callback>
101      friend class stop_callback;
102
103    static void
104    _S_yield() noexcept
105    {
106#if defined __i386__ || defined __x86_64__
107      __builtin_ia32_pause();
108#elif defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
109      __gthread_yield();
110#endif
111    }
112
113#ifndef __cpp_lib_semaphore
114    // TODO: replace this with a real implementation of std::binary_semaphore
115    struct binary_semaphore
116    {
117      explicit binary_semaphore(int __d) : _M_counter(__d > 0) { }
118
119      void release() { _M_counter.fetch_add(1, memory_order::release); }
120
121      void acquire()
122      {
123	int __old = 1;
124	while (!_M_counter.compare_exchange_weak(__old, 0,
125						 memory_order::acquire,
126						 memory_order::relaxed))
127	  {
128	    __old = 1;
129	    _S_yield();
130	  }
131      }
132
133      atomic<int> _M_counter;
134    };
135#endif
136
137    struct _Stop_cb
138    {
139      using __cb_type = void(_Stop_cb*) noexcept;
140      __cb_type* _M_callback;
141      _Stop_cb* _M_prev = nullptr;
142      _Stop_cb* _M_next = nullptr;
143      bool* _M_destroyed = nullptr;
144      binary_semaphore _M_done{0};
145
146      [[__gnu__::__nonnull__]]
147      explicit
148      _Stop_cb(__cb_type* __cb)
149      : _M_callback(__cb)
150      { }
151
152      void _M_run() noexcept { _M_callback(this); }
153    };
154
155    struct _Stop_state_t
156    {
157      using value_type = uint32_t;
158      static constexpr value_type _S_stop_requested_bit = 1;
159      static constexpr value_type _S_locked_bit = 2;
160      static constexpr value_type _S_ssrc_counter_inc = 4;
161
162      std::atomic<value_type> _M_owners{1};
163      std::atomic<value_type> _M_value{_S_ssrc_counter_inc};
164      _Stop_cb* _M_head = nullptr;
165#ifdef _GLIBCXX_HAS_GTHREADS
166      __gthread_t _M_requester;
167#endif
168
169      _Stop_state_t() noexcept { }
170
171      bool
172      _M_stop_possible() noexcept
173      {
174	// true if a stop request has already been made or there are still
175	// stop_source objects that would allow one to be made.
176	return _M_value.load(memory_order::acquire) & ~_S_locked_bit;
177      }
178
179      bool
180      _M_stop_requested() noexcept
181      {
182	return _M_value.load(memory_order::acquire) & _S_stop_requested_bit;
183      }
184
185      void
186      _M_add_owner() noexcept
187      {
188	_M_owners.fetch_add(1, memory_order::relaxed);
189      }
190
191      void
192      _M_release_ownership() noexcept
193      {
194	if (_M_owners.fetch_sub(1, memory_order::acq_rel) == 1)
195	  delete this;
196      }
197
198      void
199      _M_add_ssrc() noexcept
200      {
201	_M_value.fetch_add(_S_ssrc_counter_inc, memory_order::relaxed);
202      }
203
204      void
205      _M_sub_ssrc() noexcept
206      {
207	_M_value.fetch_sub(_S_ssrc_counter_inc, memory_order::release);
208      }
209
210      // Obtain lock.
211      void
212      _M_lock() noexcept
213      {
214	// Can use relaxed loads to get the current value.
215	// The successful call to _M_try_lock is an acquire operation.
216	auto __old = _M_value.load(memory_order::relaxed);
217	while (!_M_try_lock(__old, memory_order::relaxed))
218	  { }
219      }
220
221      // Precondition: calling thread holds the lock.
222      void
223      _M_unlock() noexcept
224      {
225	_M_value.fetch_sub(_S_locked_bit, memory_order::release);
226      }
227
228      bool
229      _M_request_stop() noexcept
230      {
231	// obtain lock and set stop_requested bit
232	auto __old = _M_value.load(memory_order::acquire);
233	do
234	  {
235	    if (__old & _S_stop_requested_bit) // stop request already made
236	      return false;
237	  }
238	while (!_M_try_lock_and_stop(__old));
239
240#ifdef _GLIBCXX_HAS_GTHREADS
241#ifdef _GLIBCXX_NATIVE_THREAD_ID
242	_M_requester = _GLIBCXX_NATIVE_THREAD_ID;
243#else
244	_M_requester = __gthread_self();
245#endif
246#endif
247
248	while (_M_head)
249	  {
250	    bool __last_cb;
251	    _Stop_cb* __cb = _M_head;
252	    _M_head = _M_head->_M_next;
253	    if (_M_head)
254	      {
255		_M_head->_M_prev = nullptr;
256		__last_cb = false;
257	      }
258	    else
259	      __last_cb = true;
260
261	    // Allow other callbacks to be unregistered while __cb runs.
262	    _M_unlock();
263
264	    bool __destroyed = false;
265	    __cb->_M_destroyed = &__destroyed;
266
267	    // run callback
268	    __cb->_M_run();
269
270	    if (!__destroyed)
271	      {
272		__cb->_M_destroyed = nullptr;
273#ifdef _GLIBCXX_HAS_GTHREADS
274		// synchronize with destructor of stop_callback that owns *__cb
275		__cb->_M_done.release();
276#endif
277	      }
278
279	    // Avoid relocking if we already know there are no more callbacks.
280	    if (__last_cb)
281	      return true;
282
283	    _M_lock();
284	  }
285
286	_M_unlock();
287	return true;
288      }
289
290      [[__gnu__::__nonnull__]]
291      bool
292      _M_register_callback(_Stop_cb* __cb) noexcept
293      {
294	auto __old = _M_value.load(memory_order::acquire);
295	do
296	  {
297	    if (__old & _S_stop_requested_bit) // stop request already made
298	      {
299		__cb->_M_run(); // run synchronously
300		return false;
301	      }
302
303	    if (__old < _S_ssrc_counter_inc) // no stop_source owns *this
304	      // No need to register callback if no stop request can be made.
305	      // Returning false also means the stop_callback does not share
306	      // ownership of this state, but that's not observable.
307	      return false;
308	  }
309	while (!_M_try_lock(__old));
310
311        __cb->_M_next = _M_head;
312        if (_M_head)
313          {
314            _M_head->_M_prev = __cb;
315          }
316        _M_head = __cb;
317	_M_unlock();
318        return true;
319      }
320
321      // Called by ~stop_callback just before destroying *__cb.
322      [[__gnu__::__nonnull__]]
323      void
324      _M_remove_callback(_Stop_cb* __cb)
325      {
326	_M_lock();
327
328        if (__cb == _M_head)
329          {
330            _M_head = _M_head->_M_next;
331            if (_M_head)
332	      _M_head->_M_prev = nullptr;
333	    _M_unlock();
334	    return;
335          }
336	else if (__cb->_M_prev)
337          {
338            __cb->_M_prev->_M_next = __cb->_M_next;
339            if (__cb->_M_next)
340	      __cb->_M_next->_M_prev = __cb->_M_prev;
341	    _M_unlock();
342	    return;
343          }
344
345	_M_unlock();
346
347	// Callback is not in the list, so must have been removed by a call to
348	// _M_request_stop.
349
350#ifdef _GLIBCXX_HAS_GTHREADS
351#ifdef _GLIBCXX_NATIVE_THREAD_ID
352	auto __tid = _GLIBCXX_NATIVE_THREAD_ID;
353#else
354	auto __tid = __gthread_self();
355#endif
356	// Despite appearances there is no data race on _M_requester. The only
357	// write to it happens before the callback is removed from the list,
358	// and removing it from the list happens before this read.
359	if (!__gthread_equal(_M_requester, __tid))
360	  {
361	    // Synchronize with completion of callback.
362	    __cb->_M_done.acquire();
363	    // Safe for ~stop_callback to destroy *__cb now.
364	    return;
365	  }
366#endif
367	if (__cb->_M_destroyed)
368	  *__cb->_M_destroyed = true;
369      }
370
371      // Try to obtain the lock.
372      // Returns true if the lock is acquired (with memory order acquire).
373      // Otherwise, sets __curval = _M_value.load(__failure) and returns false.
374      // Might fail spuriously, so must be called in a loop.
375      bool
376      _M_try_lock(value_type& __curval,
377		  memory_order __failure = memory_order::acquire) noexcept
378      {
379	return _M_do_try_lock(__curval, 0, memory_order::acquire, __failure);
380      }
381
382      // Try to obtain the lock to make a stop request.
383      // Returns true if the lock is acquired and the _S_stop_requested_bit is
384      // set (with memory order acq_rel so that other threads see the request).
385      // Otherwise, sets __curval = _M_value.load(memory_order::acquire) and
386      // returns false.
387      // Might fail spuriously, so must be called in a loop.
388      bool
389      _M_try_lock_and_stop(value_type& __curval) noexcept
390      {
391	return _M_do_try_lock(__curval, _S_stop_requested_bit,
392			      memory_order::acq_rel, memory_order::acquire);
393      }
394
395      bool
396      _M_do_try_lock(value_type& __curval, value_type __newbits,
397		     memory_order __success, memory_order __failure) noexcept
398      {
399	if (__curval & _S_locked_bit)
400	  {
401	    _S_yield();
402	    __curval = _M_value.load(__failure);
403	    return false;
404	  }
405	__newbits |= _S_locked_bit;
406	return _M_value.compare_exchange_weak(__curval, __curval | __newbits,
407					      __success, __failure);
408      }
409    };
410
411    struct _Stop_state_ref
412    {
413      _Stop_state_ref() = default;
414
415      explicit
416      _Stop_state_ref(const stop_source&)
417      : _M_ptr(new _Stop_state_t())
418      { }
419
420      _Stop_state_ref(const _Stop_state_ref& __other) noexcept
421      : _M_ptr(__other._M_ptr)
422      {
423	if (_M_ptr)
424	  _M_ptr->_M_add_owner();
425      }
426
427      _Stop_state_ref(_Stop_state_ref&& __other) noexcept
428      : _M_ptr(__other._M_ptr)
429      {
430	__other._M_ptr = nullptr;
431      }
432
433      _Stop_state_ref&
434      operator=(const _Stop_state_ref& __other) noexcept
435      {
436	if (auto __ptr = __other._M_ptr; __ptr != _M_ptr)
437	  {
438	    if (__ptr)
439	      __ptr->_M_add_owner();
440	    if (_M_ptr)
441	      _M_ptr->_M_release_ownership();
442	    _M_ptr = __ptr;
443	  }
444	return *this;
445      }
446
447      _Stop_state_ref&
448      operator=(_Stop_state_ref&& __other) noexcept
449      {
450	_Stop_state_ref(std::move(__other)).swap(*this);
451	return *this;
452      }
453
454      ~_Stop_state_ref()
455      {
456	if (_M_ptr)
457	  _M_ptr->_M_release_ownership();
458      }
459
460      void
461      swap(_Stop_state_ref& __other) noexcept
462      { std::swap(_M_ptr, __other._M_ptr); }
463
464      explicit operator bool() const noexcept { return _M_ptr != nullptr; }
465
466      _Stop_state_t* operator->() const noexcept { return _M_ptr; }
467
468#if __cpp_impl_three_way_comparison >= 201907L
469      friend bool
470      operator==(const _Stop_state_ref&, const _Stop_state_ref&) = default;
471#else
472      friend bool
473      operator==(const _Stop_state_ref& __lhs, const _Stop_state_ref& __rhs)
474      noexcept
475      { return __lhs._M_ptr == __rhs._M_ptr; }
476
477      friend bool
478      operator!=(const _Stop_state_ref& __lhs, const _Stop_state_ref& __rhs)
479      noexcept
480      { return __lhs._M_ptr != __rhs._M_ptr; }
481#endif
482
483    private:
484      _Stop_state_t* _M_ptr = nullptr;
485    };
486
487    _Stop_state_ref _M_state;
488
489    explicit
490    stop_token(const _Stop_state_ref& __state) noexcept
491    : _M_state{__state}
492    { }
493  };
494
495  /// A type that allows a stop request to be made.
496  class stop_source
497  {
498  public:
499    stop_source() : _M_state(*this)
500    { }
501
502    explicit stop_source(std::nostopstate_t) noexcept
503    { }
504
505    stop_source(const stop_source& __other) noexcept
506    : _M_state(__other._M_state)
507    {
508      if (_M_state)
509	_M_state->_M_add_ssrc();
510    }
511
512    stop_source(stop_source&&) noexcept = default;
513
514    stop_source&
515    operator=(const stop_source& __other) noexcept
516    {
517      if (_M_state != __other._M_state)
518	{
519	  stop_source __sink(std::move(*this));
520	  _M_state = __other._M_state;
521	  if (_M_state)
522	    _M_state->_M_add_ssrc();
523	}
524      return *this;
525    }
526
527    stop_source&
528    operator=(stop_source&&) noexcept = default;
529
530    ~stop_source()
531    {
532      if (_M_state)
533	_M_state->_M_sub_ssrc();
534    }
535
536    [[nodiscard]]
537    bool
538    stop_possible() const noexcept
539    {
540      return static_cast<bool>(_M_state);
541    }
542
543    [[nodiscard]]
544    bool
545    stop_requested() const noexcept
546    {
547      return static_cast<bool>(_M_state) && _M_state->_M_stop_requested();
548    }
549
550    bool
551    request_stop() const noexcept
552    {
553      if (stop_possible())
554        return _M_state->_M_request_stop();
555      return false;
556    }
557
558    [[nodiscard]]
559    stop_token
560    get_token() const noexcept
561    {
562      return stop_token{_M_state};
563    }
564
565    void
566    swap(stop_source& __other) noexcept
567    {
568      _M_state.swap(__other._M_state);
569    }
570
571    [[nodiscard]]
572    friend bool
573    operator==(const stop_source& __a, const stop_source& __b) noexcept
574    {
575      return __a._M_state == __b._M_state;
576    }
577
578    friend void
579    swap(stop_source& __lhs, stop_source& __rhs) noexcept
580    {
581      __lhs.swap(__rhs);
582    }
583
584  private:
585    stop_token::_Stop_state_ref _M_state;
586  };
587
588  /// A wrapper for callbacks to be run when a stop request is made.
589  template<typename _Callback>
590    class [[nodiscard]] stop_callback
591    {
592      static_assert(is_nothrow_destructible_v<_Callback>);
593      static_assert(is_invocable_v<_Callback>);
594
595    public:
596      using callback_type = _Callback;
597
598      template<typename _Cb,
599               enable_if_t<is_constructible_v<_Callback, _Cb>, int> = 0>
600        explicit
601	stop_callback(const stop_token& __token, _Cb&& __cb)
602        noexcept(is_nothrow_constructible_v<_Callback, _Cb>)
603	: _M_cb(std::forward<_Cb>(__cb))
604        {
605	  if (auto __state = __token._M_state)
606	    {
607	      if (__state->_M_register_callback(&_M_cb))
608		_M_state.swap(__state);
609	    }
610        }
611
612      template<typename _Cb,
613               enable_if_t<is_constructible_v<_Callback, _Cb>, int> = 0>
614        explicit
615	stop_callback(stop_token&& __token, _Cb&& __cb)
616        noexcept(is_nothrow_constructible_v<_Callback, _Cb>)
617	: _M_cb(std::forward<_Cb>(__cb))
618	{
619	  if (auto& __state = __token._M_state)
620	    {
621	      if (__state->_M_register_callback(&_M_cb))
622		_M_state.swap(__state);
623	    }
624	}
625
626      ~stop_callback()
627      {
628	if (_M_state)
629	  {
630	    _M_state->_M_remove_callback(&_M_cb);
631	  }
632      }
633
634      stop_callback(const stop_callback&) = delete;
635      stop_callback& operator=(const stop_callback&) = delete;
636      stop_callback(stop_callback&&) = delete;
637      stop_callback& operator=(stop_callback&&) = delete;
638
639    private:
640      struct _Cb_impl : stop_token::_Stop_cb
641      {
642	template<typename _Cb>
643	  explicit
644	  _Cb_impl(_Cb&& __cb)
645	  : _Stop_cb(&_S_execute),
646	    _M_cb(std::forward<_Cb>(__cb))
647	  { }
648
649	_Callback _M_cb;
650
651	[[__gnu__::__nonnull__]]
652	static void
653	_S_execute(_Stop_cb* __that) noexcept
654	{
655	  _Callback& __cb = static_cast<_Cb_impl*>(__that)->_M_cb;
656	  std::forward<_Callback>(__cb)();
657	}
658      };
659
660      _Cb_impl _M_cb;
661      stop_token::_Stop_state_ref _M_state;
662    };
663
664  template<typename _Callback>
665    stop_callback(stop_token, _Callback) -> stop_callback<_Callback>;
666
667_GLIBCXX_END_NAMESPACE_VERSION
668} // namespace
669#endif // __cplusplus > 201703L
670#endif // _GLIBCXX_STOP_TOKEN
671