1// <mutex> -*- C++ -*-
2
3// Copyright (C) 2003-2020 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library.  This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/mutex
26 *  This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_MUTEX
30#define _GLIBCXX_MUTEX 1
31
32#pragma GCC system_header
33
34#if __cplusplus < 201103L
35# include <bits/c++0x_warning.h>
36#else
37
38#include <tuple>
39#include <chrono>
40#include <exception>
41#include <type_traits>
42#include <system_error>
43#include <bits/std_mutex.h>
44#include <bits/unique_lock.h>
45#if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
46# include <condition_variable>
47# include <thread>
48#endif
49#ifndef _GLIBCXX_HAVE_TLS
50# include <bits/std_function.h>
51#endif
52
53namespace std _GLIBCXX_VISIBILITY(default)
54{
55_GLIBCXX_BEGIN_NAMESPACE_VERSION
56
57  /**
58   * @addtogroup mutexes
59   * @{
60   */
61
62#ifdef _GLIBCXX_HAS_GTHREADS
63
64  // Common base class for std::recursive_mutex and std::recursive_timed_mutex
65  class __recursive_mutex_base
66  {
67  protected:
68    typedef __gthread_recursive_mutex_t		__native_type;
69
70    __recursive_mutex_base(const __recursive_mutex_base&) = delete;
71    __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
72
73#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
74    __native_type  _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
75
76    __recursive_mutex_base() = default;
77#else
78    __native_type  _M_mutex;
79
80    __recursive_mutex_base()
81    {
82      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
83      __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
84    }
85
86    ~__recursive_mutex_base()
87    { __gthread_recursive_mutex_destroy(&_M_mutex); }
88#endif
89  };
90
91  /// The standard recursive mutex type.
92  class recursive_mutex : private __recursive_mutex_base
93  {
94  public:
95    typedef __native_type* 			native_handle_type;
96
97    recursive_mutex() = default;
98    ~recursive_mutex() = default;
99
100    recursive_mutex(const recursive_mutex&) = delete;
101    recursive_mutex& operator=(const recursive_mutex&) = delete;
102
103    void
104    lock()
105    {
106      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
107
108      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
109      if (__e)
110	__throw_system_error(__e);
111    }
112
113    bool
114    try_lock() noexcept
115    {
116      // XXX EINVAL, EAGAIN, EBUSY
117      return !__gthread_recursive_mutex_trylock(&_M_mutex);
118    }
119
120    void
121    unlock()
122    {
123      // XXX EINVAL, EAGAIN, EBUSY
124      __gthread_recursive_mutex_unlock(&_M_mutex);
125    }
126
127    native_handle_type
128    native_handle() noexcept
129    { return &_M_mutex; }
130  };
131
132#if _GTHREAD_USE_MUTEX_TIMEDLOCK
133  template<typename _Derived>
134    class __timed_mutex_impl
135    {
136    protected:
137      template<typename _Rep, typename _Period>
138	bool
139	_M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
140	{
141#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
142	  using __clock = chrono::steady_clock;
143#else
144	  using __clock = chrono::system_clock;
145#endif
146
147	  auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
148	  if (ratio_greater<__clock::period, _Period>())
149	    ++__rt;
150	  return _M_try_lock_until(__clock::now() + __rt);
151	}
152
153      template<typename _Duration>
154	bool
155	_M_try_lock_until(const chrono::time_point<chrono::system_clock,
156						   _Duration>& __atime)
157	{
158	  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
159	  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
160
161	  __gthread_time_t __ts = {
162	    static_cast<std::time_t>(__s.time_since_epoch().count()),
163	    static_cast<long>(__ns.count())
164	  };
165
166	  return static_cast<_Derived*>(this)->_M_timedlock(__ts);
167	}
168
169#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
170      template<typename _Duration>
171	bool
172	_M_try_lock_until(const chrono::time_point<chrono::steady_clock,
173						   _Duration>& __atime)
174	{
175	  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
176	  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
177
178	  __gthread_time_t __ts = {
179	    static_cast<std::time_t>(__s.time_since_epoch().count()),
180	    static_cast<long>(__ns.count())
181	  };
182
183	  return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
184							    __ts);
185	}
186#endif
187
188      template<typename _Clock, typename _Duration>
189	bool
190	_M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
191	{
192#if __cplusplus > 201703L
193	  static_assert(chrono::is_clock_v<_Clock>);
194#endif
195	  // The user-supplied clock may not tick at the same rate as
196	  // steady_clock, so we must loop in order to guarantee that
197	  // the timeout has expired before returning false.
198	  auto __now = _Clock::now();
199	  do {
200	    auto __rtime = __atime - __now;
201	    if (_M_try_lock_for(__rtime))
202	      return true;
203	    __now = _Clock::now();
204	  } while (__atime > __now);
205	  return false;
206	}
207    };
208
209  /// The standard timed mutex type.
210  class timed_mutex
211  : private __mutex_base, public __timed_mutex_impl<timed_mutex>
212  {
213  public:
214    typedef __native_type* 		  	native_handle_type;
215
216    timed_mutex() = default;
217    ~timed_mutex() = default;
218
219    timed_mutex(const timed_mutex&) = delete;
220    timed_mutex& operator=(const timed_mutex&) = delete;
221
222    void
223    lock()
224    {
225      int __e = __gthread_mutex_lock(&_M_mutex);
226
227      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
228      if (__e)
229	__throw_system_error(__e);
230    }
231
232    bool
233    try_lock() noexcept
234    {
235      // XXX EINVAL, EAGAIN, EBUSY
236      return !__gthread_mutex_trylock(&_M_mutex);
237    }
238
239    template <class _Rep, class _Period>
240      bool
241      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
242      { return _M_try_lock_for(__rtime); }
243
244    template <class _Clock, class _Duration>
245      bool
246      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
247      { return _M_try_lock_until(__atime); }
248
249    void
250    unlock()
251    {
252      // XXX EINVAL, EAGAIN, EBUSY
253      __gthread_mutex_unlock(&_M_mutex);
254    }
255
256    native_handle_type
257    native_handle() noexcept
258    { return &_M_mutex; }
259
260    private:
261      friend class __timed_mutex_impl<timed_mutex>;
262
263      bool
264      _M_timedlock(const __gthread_time_t& __ts)
265      { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
266
267#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
268      bool
269      _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
270      { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
271#endif
272  };
273
274  /// recursive_timed_mutex
275  class recursive_timed_mutex
276  : private __recursive_mutex_base,
277    public __timed_mutex_impl<recursive_timed_mutex>
278  {
279  public:
280    typedef __native_type* 			native_handle_type;
281
282    recursive_timed_mutex() = default;
283    ~recursive_timed_mutex() = default;
284
285    recursive_timed_mutex(const recursive_timed_mutex&) = delete;
286    recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
287
288    void
289    lock()
290    {
291      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
292
293      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
294      if (__e)
295	__throw_system_error(__e);
296    }
297
298    bool
299    try_lock() noexcept
300    {
301      // XXX EINVAL, EAGAIN, EBUSY
302      return !__gthread_recursive_mutex_trylock(&_M_mutex);
303    }
304
305    template <class _Rep, class _Period>
306      bool
307      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
308      { return _M_try_lock_for(__rtime); }
309
310    template <class _Clock, class _Duration>
311      bool
312      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
313      { return _M_try_lock_until(__atime); }
314
315    void
316    unlock()
317    {
318      // XXX EINVAL, EAGAIN, EBUSY
319      __gthread_recursive_mutex_unlock(&_M_mutex);
320    }
321
322    native_handle_type
323    native_handle() noexcept
324    { return &_M_mutex; }
325
326    private:
327      friend class __timed_mutex_impl<recursive_timed_mutex>;
328
329      bool
330      _M_timedlock(const __gthread_time_t& __ts)
331      { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
332
333#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
334      bool
335      _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
336      { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
337#endif
338  };
339
340#else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
341
342  /// timed_mutex
343  class timed_mutex
344  {
345    mutex		_M_mut;
346    condition_variable	_M_cv;
347    bool		_M_locked = false;
348
349  public:
350
351    timed_mutex() = default;
352    ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
353
354    timed_mutex(const timed_mutex&) = delete;
355    timed_mutex& operator=(const timed_mutex&) = delete;
356
357    void
358    lock()
359    {
360      unique_lock<mutex> __lk(_M_mut);
361      _M_cv.wait(__lk, [&]{ return !_M_locked; });
362      _M_locked = true;
363    }
364
365    bool
366    try_lock()
367    {
368      lock_guard<mutex> __lk(_M_mut);
369      if (_M_locked)
370	return false;
371      _M_locked = true;
372      return true;
373    }
374
375    template<typename _Rep, typename _Period>
376      bool
377      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
378      {
379	unique_lock<mutex> __lk(_M_mut);
380	if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
381	  return false;
382	_M_locked = true;
383	return true;
384      }
385
386    template<typename _Clock, typename _Duration>
387      bool
388      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
389      {
390	unique_lock<mutex> __lk(_M_mut);
391	if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
392	  return false;
393	_M_locked = true;
394	return true;
395      }
396
397    void
398    unlock()
399    {
400      lock_guard<mutex> __lk(_M_mut);
401      __glibcxx_assert( _M_locked );
402      _M_locked = false;
403      _M_cv.notify_one();
404    }
405  };
406
407  /// recursive_timed_mutex
408  class recursive_timed_mutex
409  {
410    mutex		_M_mut;
411    condition_variable	_M_cv;
412    thread::id		_M_owner;
413    unsigned		_M_count = 0;
414
415    // Predicate type that tests whether the current thread can lock a mutex.
416    struct _Can_lock
417    {
418      // Returns true if the mutex is unlocked or is locked by _M_caller.
419      bool
420      operator()() const noexcept
421      { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
422
423      const recursive_timed_mutex* _M_mx;
424      thread::id _M_caller;
425    };
426
427  public:
428
429    recursive_timed_mutex() = default;
430    ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
431
432    recursive_timed_mutex(const recursive_timed_mutex&) = delete;
433    recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
434
435    void
436    lock()
437    {
438      auto __id = this_thread::get_id();
439      _Can_lock __can_lock{this, __id};
440      unique_lock<mutex> __lk(_M_mut);
441      _M_cv.wait(__lk, __can_lock);
442      if (_M_count == -1u)
443	__throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
444      _M_owner = __id;
445      ++_M_count;
446    }
447
448    bool
449    try_lock()
450    {
451      auto __id = this_thread::get_id();
452      _Can_lock __can_lock{this, __id};
453      lock_guard<mutex> __lk(_M_mut);
454      if (!__can_lock())
455	return false;
456      if (_M_count == -1u)
457	return false;
458      _M_owner = __id;
459      ++_M_count;
460      return true;
461    }
462
463    template<typename _Rep, typename _Period>
464      bool
465      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
466      {
467	auto __id = this_thread::get_id();
468	_Can_lock __can_lock{this, __id};
469	unique_lock<mutex> __lk(_M_mut);
470	if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
471	  return false;
472	if (_M_count == -1u)
473	  return false;
474	_M_owner = __id;
475	++_M_count;
476	return true;
477      }
478
479    template<typename _Clock, typename _Duration>
480      bool
481      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
482      {
483	auto __id = this_thread::get_id();
484	_Can_lock __can_lock{this, __id};
485	unique_lock<mutex> __lk(_M_mut);
486	if (!_M_cv.wait_until(__lk, __atime, __can_lock))
487	  return false;
488	if (_M_count == -1u)
489	  return false;
490	_M_owner = __id;
491	++_M_count;
492	return true;
493      }
494
495    void
496    unlock()
497    {
498      lock_guard<mutex> __lk(_M_mut);
499      __glibcxx_assert( _M_owner == this_thread::get_id() );
500      __glibcxx_assert( _M_count > 0 );
501      if (--_M_count == 0)
502	{
503	  _M_owner = {};
504	  _M_cv.notify_one();
505	}
506    }
507  };
508
509#endif
510#endif // _GLIBCXX_HAS_GTHREADS
511
512  /// @cond undocumented
513  template<typename _Lock>
514    inline unique_lock<_Lock>
515    __try_to_lock(_Lock& __l)
516    { return unique_lock<_Lock>{__l, try_to_lock}; }
517
518  template<int _Idx, bool _Continue = true>
519    struct __try_lock_impl
520    {
521      template<typename... _Lock>
522	static void
523	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
524	{
525          __idx = _Idx;
526          auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
527          if (__lock.owns_lock())
528            {
529	      constexpr bool __cont = _Idx + 2 < sizeof...(_Lock);
530	      using __try_locker = __try_lock_impl<_Idx + 1, __cont>;
531	      __try_locker::__do_try_lock(__locks, __idx);
532              if (__idx == -1)
533                __lock.release();
534            }
535	}
536    };
537
538  template<int _Idx>
539    struct __try_lock_impl<_Idx, false>
540    {
541      template<typename... _Lock>
542	static void
543	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
544	{
545          __idx = _Idx;
546          auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
547          if (__lock.owns_lock())
548            {
549              __idx = -1;
550              __lock.release();
551            }
552	}
553    };
554  /// @endcond
555
556  /** @brief Generic try_lock.
557   *  @param __l1 Meets Lockable requirements (try_lock() may throw).
558   *  @param __l2 Meets Lockable requirements (try_lock() may throw).
559   *  @param __l3 Meets Lockable requirements (try_lock() may throw).
560   *  @return Returns -1 if all try_lock() calls return true. Otherwise returns
561   *          a 0-based index corresponding to the argument that returned false.
562   *  @post Either all arguments are locked, or none will be.
563   *
564   *  Sequentially calls try_lock() on each argument.
565   */
566  template<typename _Lock1, typename _Lock2, typename... _Lock3>
567    int
568    try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
569    {
570      int __idx;
571      auto __locks = std::tie(__l1, __l2, __l3...);
572      __try_lock_impl<0>::__do_try_lock(__locks, __idx);
573      return __idx;
574    }
575
576  /** @brief Generic lock.
577   *  @param __l1 Meets Lockable requirements (try_lock() may throw).
578   *  @param __l2 Meets Lockable requirements (try_lock() may throw).
579   *  @param __l3 Meets Lockable requirements (try_lock() may throw).
580   *  @throw An exception thrown by an argument's lock() or try_lock() member.
581   *  @post All arguments are locked.
582   *
583   *  All arguments are locked via a sequence of calls to lock(), try_lock()
584   *  and unlock().  If the call exits via an exception any locks that were
585   *  obtained will be released.
586   */
587  template<typename _L1, typename _L2, typename... _L3>
588    void
589    lock(_L1& __l1, _L2& __l2, _L3&... __l3)
590    {
591      while (true)
592        {
593          using __try_locker = __try_lock_impl<0, sizeof...(_L3) != 0>;
594          unique_lock<_L1> __first(__l1);
595          int __idx;
596          auto __locks = std::tie(__l2, __l3...);
597          __try_locker::__do_try_lock(__locks, __idx);
598          if (__idx == -1)
599            {
600              __first.release();
601              return;
602            }
603        }
604    }
605
606#if __cplusplus >= 201703L
607#define __cpp_lib_scoped_lock 201703
608  /** @brief A scoped lock type for multiple lockable objects.
609   *
610   * A scoped_lock controls mutex ownership within a scope, releasing
611   * ownership in the destructor.
612   */
613  template<typename... _MutexTypes>
614    class scoped_lock
615    {
616    public:
617      explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
618      { std::lock(__m...); }
619
620      explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
621      : _M_devices(std::tie(__m...))
622      { } // calling thread owns mutex
623
624      ~scoped_lock()
625      { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
626
627      scoped_lock(const scoped_lock&) = delete;
628      scoped_lock& operator=(const scoped_lock&) = delete;
629
630    private:
631      tuple<_MutexTypes&...> _M_devices;
632    };
633
634  template<>
635    class scoped_lock<>
636    {
637    public:
638      explicit scoped_lock() = default;
639      explicit scoped_lock(adopt_lock_t) noexcept { }
640      ~scoped_lock() = default;
641
642      scoped_lock(const scoped_lock&) = delete;
643      scoped_lock& operator=(const scoped_lock&) = delete;
644    };
645
646  template<typename _Mutex>
647    class scoped_lock<_Mutex>
648    {
649    public:
650      using mutex_type = _Mutex;
651
652      explicit scoped_lock(mutex_type& __m) : _M_device(__m)
653      { _M_device.lock(); }
654
655      explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
656      : _M_device(__m)
657      { } // calling thread owns mutex
658
659      ~scoped_lock()
660      { _M_device.unlock(); }
661
662      scoped_lock(const scoped_lock&) = delete;
663      scoped_lock& operator=(const scoped_lock&) = delete;
664
665    private:
666      mutex_type&  _M_device;
667    };
668#endif // C++17
669
670#ifdef _GLIBCXX_HAS_GTHREADS
671  /// Flag type used by std::call_once
672  struct once_flag
673  {
674  private:
675    typedef __gthread_once_t __native_type;
676    __native_type  _M_once = __GTHREAD_ONCE_INIT;
677
678  public:
679    /// Constructor
680    constexpr once_flag() noexcept = default;
681
682    /// Deleted copy constructor
683    once_flag(const once_flag&) = delete;
684    /// Deleted assignment operator
685    once_flag& operator=(const once_flag&) = delete;
686
687    template<typename _Callable, typename... _Args>
688      friend void
689      call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
690  };
691
692  /// @cond undocumented
693#ifdef _GLIBCXX_HAVE_TLS
694  extern __thread void* __once_callable;
695  extern __thread void (*__once_call)();
696#else
697  extern function<void()> __once_functor;
698
699  extern void
700  __set_once_functor_lock_ptr(unique_lock<mutex>*);
701
702  extern mutex&
703  __get_once_mutex();
704#endif
705
706  extern "C" void __once_proxy(void);
707  /// @endcond
708
709  /// Invoke a callable and synchronize with other calls using the same flag
710  template<typename _Callable, typename... _Args>
711    void
712    call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
713    {
714      // _GLIBCXX_RESOLVE_LIB_DEFECTS
715      // 2442. call_once() shouldn't DECAY_COPY()
716      auto __callable = [&] {
717	  std::__invoke(std::forward<_Callable>(__f),
718			std::forward<_Args>(__args)...);
719      };
720#ifdef _GLIBCXX_HAVE_TLS
721      __once_callable = std::__addressof(__callable); // NOLINT: PR 82481
722      __once_call = []{ (*(decltype(__callable)*)__once_callable)(); };
723#else
724      unique_lock<mutex> __functor_lock(__get_once_mutex());
725      __once_functor = __callable;
726      __set_once_functor_lock_ptr(&__functor_lock);
727#endif
728
729      int __e = __gthread_once(&__once._M_once, &__once_proxy);
730
731#ifndef _GLIBCXX_HAVE_TLS
732      if (__functor_lock)
733        __set_once_functor_lock_ptr(0);
734#endif
735
736      if (__e)
737	__throw_system_error(__e);
738    }
739#endif // _GLIBCXX_HAS_GTHREADS
740
741  /// @} group mutexes
742_GLIBCXX_END_NAMESPACE_VERSION
743} // namespace
744
745#endif // C++11
746
747#endif // _GLIBCXX_MUTEX
748