1// <mutex> -*- C++ -*-
2
3// Copyright (C) 2003-2015 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library.  This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/mutex
26 *  This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_MUTEX
30#define _GLIBCXX_MUTEX 1
31
32#pragma GCC system_header
33
34#if __cplusplus < 201103L
35# include <bits/c++0x_warning.h>
36#else
37
38#include <tuple>
39#include <chrono>
40#include <exception>
41#include <type_traits>
42#include <functional>
43#include <system_error>
44#include <bits/functexcept.h>
45#include <bits/gthr.h>
46#include <bits/move.h> // for std::swap
47#include <bits/cxxabi_forced.h>
48
49#ifdef _GLIBCXX_USE_C99_STDINT_TR1
50
51namespace std _GLIBCXX_VISIBILITY(default)
52{
53_GLIBCXX_BEGIN_NAMESPACE_VERSION
54
55#ifdef _GLIBCXX_HAS_GTHREADS
56  // Common base class for std::mutex and std::timed_mutex
57  class __mutex_base
58  {
59  protected:
60    typedef __gthread_mutex_t			__native_type;
61
62#ifdef __GTHREAD_MUTEX_INIT
63    __native_type  _M_mutex = __GTHREAD_MUTEX_INIT;
64
65    constexpr __mutex_base() noexcept = default;
66#else
67    __native_type  _M_mutex;
68
69    __mutex_base() noexcept
70    {
71      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
72      __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
73    }
74
75    ~__mutex_base() noexcept { __gthread_mutex_destroy(&_M_mutex); }
76#endif
77
78    __mutex_base(const __mutex_base&) = delete;
79    __mutex_base& operator=(const __mutex_base&) = delete;
80  };
81
82  // Common base class for std::recursive_mutex and std::recursive_timed_mutex
83  class __recursive_mutex_base
84  {
85  protected:
86    typedef __gthread_recursive_mutex_t		__native_type;
87
88    __recursive_mutex_base(const __recursive_mutex_base&) = delete;
89    __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
90
91#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
92    __native_type  _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
93
94    __recursive_mutex_base() = default;
95#else
96    __native_type  _M_mutex;
97
98    __recursive_mutex_base()
99    {
100      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
101      __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
102    }
103
104    ~__recursive_mutex_base()
105    { __gthread_recursive_mutex_destroy(&_M_mutex); }
106#endif
107  };
108
109  /**
110   * @defgroup mutexes Mutexes
111   * @ingroup concurrency
112   *
113   * Classes for mutex support.
114   * @{
115   */
116
117  /// The standard mutex type.
118  class mutex : private __mutex_base
119  {
120  public:
121    typedef __native_type* 			native_handle_type;
122
123#ifdef __GTHREAD_MUTEX_INIT
124    constexpr
125#endif
126    mutex() noexcept = default;
127    ~mutex() = default;
128
129    mutex(const mutex&) = delete;
130    mutex& operator=(const mutex&) = delete;
131
132    void
133    lock()
134    {
135      int __e = __gthread_mutex_lock(&_M_mutex);
136
137      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
138      if (__e)
139	__throw_system_error(__e);
140    }
141
142    bool
143    try_lock() noexcept
144    {
145      // XXX EINVAL, EAGAIN, EBUSY
146      return !__gthread_mutex_trylock(&_M_mutex);
147    }
148
149    void
150    unlock()
151    {
152      // XXX EINVAL, EAGAIN, EPERM
153      __gthread_mutex_unlock(&_M_mutex);
154    }
155
156    native_handle_type
157    native_handle()
158    { return &_M_mutex; }
159  };
160
161  /// The standard recursive mutex type.
162  class recursive_mutex : private __recursive_mutex_base
163  {
164  public:
165    typedef __native_type* 			native_handle_type;
166
167    recursive_mutex() = default;
168    ~recursive_mutex() = default;
169
170    recursive_mutex(const recursive_mutex&) = delete;
171    recursive_mutex& operator=(const recursive_mutex&) = delete;
172
173    void
174    lock()
175    {
176      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
177
178      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
179      if (__e)
180	__throw_system_error(__e);
181    }
182
183    bool
184    try_lock() noexcept
185    {
186      // XXX EINVAL, EAGAIN, EBUSY
187      return !__gthread_recursive_mutex_trylock(&_M_mutex);
188    }
189
190    void
191    unlock()
192    {
193      // XXX EINVAL, EAGAIN, EBUSY
194      __gthread_recursive_mutex_unlock(&_M_mutex);
195    }
196
197    native_handle_type
198    native_handle()
199    { return &_M_mutex; }
200  };
201
202#if _GTHREAD_USE_MUTEX_TIMEDLOCK
203  template<typename _Derived>
204    class __timed_mutex_impl
205    {
206    protected:
207      typedef chrono::high_resolution_clock 	__clock_t;
208
209      template<typename _Rep, typename _Period>
210	bool
211	_M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
212	{
213	  using chrono::steady_clock;
214	  auto __rt = chrono::duration_cast<steady_clock::duration>(__rtime);
215	  if (ratio_greater<steady_clock::period, _Period>())
216	    ++__rt;
217	  return _M_try_lock_until(steady_clock::now() + __rt);
218	}
219
220      template<typename _Duration>
221	bool
222	_M_try_lock_until(const chrono::time_point<__clock_t,
223						   _Duration>& __atime)
224	{
225	  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
226	  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
227
228	  __gthread_time_t __ts = {
229	    static_cast<std::time_t>(__s.time_since_epoch().count()),
230	    static_cast<long>(__ns.count())
231	  };
232
233	  auto __mutex = static_cast<_Derived*>(this)->native_handle();
234	  return !__gthread_mutex_timedlock(__mutex, &__ts);
235	}
236
237      template<typename _Clock, typename _Duration>
238	bool
239	_M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
240	{
241	  auto __rtime = __atime - _Clock::now();
242	  return _M_try_lock_until(__clock_t::now() + __rtime);
243	}
244    };
245
246  /// The standard timed mutex type.
247  class timed_mutex
248  : private __mutex_base, public __timed_mutex_impl<timed_mutex>
249  {
250  public:
251    typedef __native_type* 		  	native_handle_type;
252
253    timed_mutex() = default;
254    ~timed_mutex() = default;
255
256    timed_mutex(const timed_mutex&) = delete;
257    timed_mutex& operator=(const timed_mutex&) = delete;
258
259    void
260    lock()
261    {
262      int __e = __gthread_mutex_lock(&_M_mutex);
263
264      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
265      if (__e)
266	__throw_system_error(__e);
267    }
268
269    bool
270    try_lock() noexcept
271    {
272      // XXX EINVAL, EAGAIN, EBUSY
273      return !__gthread_mutex_trylock(&_M_mutex);
274    }
275
276    template <class _Rep, class _Period>
277      bool
278      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
279      { return _M_try_lock_for(__rtime); }
280
281    template <class _Clock, class _Duration>
282      bool
283      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
284      { return _M_try_lock_until(__atime); }
285
286    void
287    unlock()
288    {
289      // XXX EINVAL, EAGAIN, EBUSY
290      __gthread_mutex_unlock(&_M_mutex);
291    }
292
293    native_handle_type
294    native_handle()
295    { return &_M_mutex; }
296  };
297
298  /// The standard recursive timed mutex type.
299  class recursive_timed_mutex
300  : private __recursive_mutex_base,
301    public __timed_mutex_impl<recursive_timed_mutex>
302  {
303  public:
304    typedef __native_type* 			native_handle_type;
305
306    recursive_timed_mutex() = default;
307    ~recursive_timed_mutex() = default;
308
309    recursive_timed_mutex(const recursive_timed_mutex&) = delete;
310    recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
311
312    void
313    lock()
314    {
315      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
316
317      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
318      if (__e)
319	__throw_system_error(__e);
320    }
321
322    bool
323    try_lock() noexcept
324    {
325      // XXX EINVAL, EAGAIN, EBUSY
326      return !__gthread_recursive_mutex_trylock(&_M_mutex);
327    }
328
329    template <class _Rep, class _Period>
330      bool
331      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
332      { return _M_try_lock_for(__rtime); }
333
334    template <class _Clock, class _Duration>
335      bool
336      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
337      { return _M_try_lock_until(__atime); }
338
339    void
340    unlock()
341    {
342      // XXX EINVAL, EAGAIN, EBUSY
343      __gthread_recursive_mutex_unlock(&_M_mutex);
344    }
345
346    native_handle_type
347    native_handle()
348    { return &_M_mutex; }
349  };
350#endif
351#endif // _GLIBCXX_HAS_GTHREADS
352
353  /// Do not acquire ownership of the mutex.
354  struct defer_lock_t { };
355
356  /// Try to acquire ownership of the mutex without blocking.
357  struct try_to_lock_t { };
358
359  /// Assume the calling thread has already obtained mutex ownership
360  /// and manage it.
361  struct adopt_lock_t { };
362
363  /// Tag used to prevent a scoped lock from acquiring ownership of a mutex.
364  constexpr defer_lock_t	defer_lock { };
365
366  /// Tag used to prevent a scoped lock from blocking if a mutex is locked.
367  constexpr try_to_lock_t	try_to_lock { };
368
369  /// Tag used to make a scoped lock take ownership of a locked mutex.
370  constexpr adopt_lock_t	adopt_lock { };
371
372  /** @brief A movable scoped lock type.
373   *
374   * A unique_lock controls mutex ownership within a scope. Ownership of the
375   * mutex can be delayed until after construction and can be transferred
376   * to another unique_lock by move construction or move assignment. If a
377   * mutex lock is owned when the destructor runs ownership will be released.
378   */
379  template<typename _Mutex>
380    class lock_guard
381    {
382    public:
383      typedef _Mutex mutex_type;
384
385      explicit lock_guard(mutex_type& __m) : _M_device(__m)
386      { _M_device.lock(); }
387
388      lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
389      { } // calling thread owns mutex
390
391      ~lock_guard()
392      { _M_device.unlock(); }
393
394      lock_guard(const lock_guard&) = delete;
395      lock_guard& operator=(const lock_guard&) = delete;
396
397    private:
398      mutex_type&  _M_device;
399    };
400
401  /// unique_lock
402  template<typename _Mutex>
403    class unique_lock
404    {
405    public:
406      typedef _Mutex mutex_type;
407
408      unique_lock() noexcept
409      : _M_device(0), _M_owns(false)
410      { }
411
412      explicit unique_lock(mutex_type& __m)
413      : _M_device(std::__addressof(__m)), _M_owns(false)
414      {
415	lock();
416	_M_owns = true;
417      }
418
419      unique_lock(mutex_type& __m, defer_lock_t) noexcept
420      : _M_device(std::__addressof(__m)), _M_owns(false)
421      { }
422
423      unique_lock(mutex_type& __m, try_to_lock_t)
424      : _M_device(std::__addressof(__m)), _M_owns(_M_device->try_lock())
425      { }
426
427      unique_lock(mutex_type& __m, adopt_lock_t)
428      : _M_device(std::__addressof(__m)), _M_owns(true)
429      {
430	// XXX calling thread owns mutex
431      }
432
433      template<typename _Clock, typename _Duration>
434	unique_lock(mutex_type& __m,
435		    const chrono::time_point<_Clock, _Duration>& __atime)
436	: _M_device(std::__addressof(__m)),
437	  _M_owns(_M_device->try_lock_until(__atime))
438	{ }
439
440      template<typename _Rep, typename _Period>
441	unique_lock(mutex_type& __m,
442		    const chrono::duration<_Rep, _Period>& __rtime)
443	: _M_device(std::__addressof(__m)),
444	  _M_owns(_M_device->try_lock_for(__rtime))
445	{ }
446
447      ~unique_lock()
448      {
449	if (_M_owns)
450	  unlock();
451      }
452
453      unique_lock(const unique_lock&) = delete;
454      unique_lock& operator=(const unique_lock&) = delete;
455
456      unique_lock(unique_lock&& __u) noexcept
457      : _M_device(__u._M_device), _M_owns(__u._M_owns)
458      {
459	__u._M_device = 0;
460	__u._M_owns = false;
461      }
462
463      unique_lock& operator=(unique_lock&& __u) noexcept
464      {
465	if(_M_owns)
466	  unlock();
467
468	unique_lock(std::move(__u)).swap(*this);
469
470	__u._M_device = 0;
471	__u._M_owns = false;
472
473	return *this;
474      }
475
476      void
477      lock()
478      {
479	if (!_M_device)
480	  __throw_system_error(int(errc::operation_not_permitted));
481	else if (_M_owns)
482	  __throw_system_error(int(errc::resource_deadlock_would_occur));
483	else
484	  {
485	    _M_device->lock();
486	    _M_owns = true;
487	  }
488      }
489
490      bool
491      try_lock()
492      {
493	if (!_M_device)
494	  __throw_system_error(int(errc::operation_not_permitted));
495	else if (_M_owns)
496	  __throw_system_error(int(errc::resource_deadlock_would_occur));
497	else
498	  {
499	    _M_owns = _M_device->try_lock();
500	    return _M_owns;
501	  }
502      }
503
504      template<typename _Clock, typename _Duration>
505	bool
506	try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
507	{
508	  if (!_M_device)
509	    __throw_system_error(int(errc::operation_not_permitted));
510	  else if (_M_owns)
511	    __throw_system_error(int(errc::resource_deadlock_would_occur));
512	  else
513	    {
514	      _M_owns = _M_device->try_lock_until(__atime);
515	      return _M_owns;
516	    }
517	}
518
519      template<typename _Rep, typename _Period>
520	bool
521	try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
522	{
523	  if (!_M_device)
524	    __throw_system_error(int(errc::operation_not_permitted));
525	  else if (_M_owns)
526	    __throw_system_error(int(errc::resource_deadlock_would_occur));
527	  else
528	    {
529	      _M_owns = _M_device->try_lock_for(__rtime);
530	      return _M_owns;
531	    }
532	 }
533
534      void
535      unlock()
536      {
537	if (!_M_owns)
538	  __throw_system_error(int(errc::operation_not_permitted));
539	else if (_M_device)
540	  {
541	    _M_device->unlock();
542	    _M_owns = false;
543	  }
544      }
545
546      void
547      swap(unique_lock& __u) noexcept
548      {
549	std::swap(_M_device, __u._M_device);
550	std::swap(_M_owns, __u._M_owns);
551      }
552
553      mutex_type*
554      release() noexcept
555      {
556	mutex_type* __ret = _M_device;
557	_M_device = 0;
558	_M_owns = false;
559	return __ret;
560      }
561
562      bool
563      owns_lock() const noexcept
564      { return _M_owns; }
565
566      explicit operator bool() const noexcept
567      { return owns_lock(); }
568
569      mutex_type*
570      mutex() const noexcept
571      { return _M_device; }
572
573    private:
574      mutex_type*	_M_device;
575      bool		_M_owns; // XXX use atomic_bool
576    };
577
578  /// Swap overload for unique_lock objects.
579  template<typename _Mutex>
580    inline void
581    swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) noexcept
582    { __x.swap(__y); }
583
584  template<typename _Lock>
585    inline unique_lock<_Lock>
586    __try_to_lock(_Lock& __l)
587    { return unique_lock<_Lock>{__l, try_to_lock}; }
588
589  template<int _Idx, bool _Continue = true>
590    struct __try_lock_impl
591    {
592      template<typename... _Lock>
593	static void
594	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
595	{
596          __idx = _Idx;
597          auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
598          if (__lock.owns_lock())
599            {
600	      constexpr bool __cont = _Idx + 2 < sizeof...(_Lock);
601	      using __try_locker = __try_lock_impl<_Idx + 1, __cont>;
602	      __try_locker::__do_try_lock(__locks, __idx);
603              if (__idx == -1)
604                __lock.release();
605            }
606	}
607    };
608
609  template<int _Idx>
610    struct __try_lock_impl<_Idx, false>
611    {
612      template<typename... _Lock>
613	static void
614	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
615	{
616          __idx = _Idx;
617          auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
618          if (__lock.owns_lock())
619            {
620              __idx = -1;
621              __lock.release();
622            }
623	}
624    };
625
626  /** @brief Generic try_lock.
627   *  @param __l1 Meets Mutex requirements (try_lock() may throw).
628   *  @param __l2 Meets Mutex requirements (try_lock() may throw).
629   *  @param __l3 Meets Mutex requirements (try_lock() may throw).
630   *  @return Returns -1 if all try_lock() calls return true. Otherwise returns
631   *          a 0-based index corresponding to the argument that returned false.
632   *  @post Either all arguments are locked, or none will be.
633   *
634   *  Sequentially calls try_lock() on each argument.
635   */
636  template<typename _Lock1, typename _Lock2, typename... _Lock3>
637    int
638    try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
639    {
640      int __idx;
641      auto __locks = std::tie(__l1, __l2, __l3...);
642      __try_lock_impl<0>::__do_try_lock(__locks, __idx);
643      return __idx;
644    }
645
646  /** @brief Generic lock.
647   *  @param __l1 Meets Mutex requirements (try_lock() may throw).
648   *  @param __l2 Meets Mutex requirements (try_lock() may throw).
649   *  @param __l3 Meets Mutex requirements (try_lock() may throw).
650   *  @throw An exception thrown by an argument's lock() or try_lock() member.
651   *  @post All arguments are locked.
652   *
653   *  All arguments are locked via a sequence of calls to lock(), try_lock()
654   *  and unlock().  If the call exits via an exception any locks that were
655   *  obtained will be released.
656   */
657  template<typename _L1, typename _L2, typename... _L3>
658    void
659    lock(_L1& __l1, _L2& __l2, _L3&... __l3)
660    {
661      while (true)
662        {
663          using __try_locker = __try_lock_impl<0, sizeof...(_L3) != 0>;
664          unique_lock<_L1> __first(__l1);
665          int __idx;
666          auto __locks = std::tie(__l2, __l3...);
667          __try_locker::__do_try_lock(__locks, __idx);
668          if (__idx == -1)
669            {
670              __first.release();
671              return;
672            }
673        }
674    }
675
676#ifdef _GLIBCXX_HAS_GTHREADS
677  /// once_flag
678  struct once_flag
679  {
680  private:
681    typedef __gthread_once_t __native_type;
682    __native_type  _M_once = __GTHREAD_ONCE_INIT;
683
684  public:
685    /// Constructor
686    constexpr once_flag() noexcept = default;
687
688    /// Deleted copy constructor
689    once_flag(const once_flag&) = delete;
690    /// Deleted assignment operator
691    once_flag& operator=(const once_flag&) = delete;
692
693    template<typename _Callable, typename... _Args>
694      friend void
695      call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
696  };
697
698#ifdef _GLIBCXX_HAVE_TLS
699  extern __thread void* __once_callable;
700  extern __thread void (*__once_call)();
701
702  template<typename _Callable>
703    inline void
704    __once_call_impl()
705    {
706      (*(_Callable*)__once_callable)();
707    }
708#else
709  extern function<void()> __once_functor;
710
711  extern void
712  __set_once_functor_lock_ptr(unique_lock<mutex>*);
713
714  extern mutex&
715  __get_once_mutex();
716#endif
717
718  extern "C" void __once_proxy(void);
719
720  /// call_once
721  template<typename _Callable, typename... _Args>
722    void
723    call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
724    {
725#ifdef _GLIBCXX_HAVE_TLS
726      auto __bound_functor = std::__bind_simple(std::forward<_Callable>(__f),
727          std::forward<_Args>(__args)...);
728      __once_callable = std::__addressof(__bound_functor);
729      __once_call = &__once_call_impl<decltype(__bound_functor)>;
730#else
731      unique_lock<mutex> __functor_lock(__get_once_mutex());
732      auto __callable = std::__bind_simple(std::forward<_Callable>(__f),
733          std::forward<_Args>(__args)...);
734      __once_functor = [&]() { __callable(); };
735      __set_once_functor_lock_ptr(&__functor_lock);
736#endif
737
738      int __e = __gthread_once(&__once._M_once, &__once_proxy);
739
740#ifndef _GLIBCXX_HAVE_TLS
741      if (__functor_lock)
742        __set_once_functor_lock_ptr(0);
743#endif
744
745      if (__e)
746	__throw_system_error(__e);
747    }
748#endif // _GLIBCXX_HAS_GTHREADS
749
750  // @} group mutexes
751_GLIBCXX_END_NAMESPACE_VERSION
752} // namespace
753#endif // _GLIBCXX_USE_C99_STDINT_TR1
754
755#endif // C++11
756
757#endif // _GLIBCXX_MUTEX
758