1// <shared_mutex> -*- C++ -*-
2
3// Copyright (C) 2013-2014 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library.  This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/shared_mutex
26 *  This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_SHARED_MUTEX
30#define _GLIBCXX_SHARED_MUTEX 1
31
32#pragma GCC system_header
33
34#if __cplusplus <= 201103L
35# include <bits/c++14_warning.h>
36#else
37
38#include <bits/c++config.h>
39#include <mutex>
40#include <condition_variable>
41#include <bits/functexcept.h>
42
43namespace std _GLIBCXX_VISIBILITY(default)
44{
45_GLIBCXX_BEGIN_NAMESPACE_VERSION
46
47  /**
48   * @ingroup mutexes
49   * @{
50   */
51
52#ifdef _GLIBCXX_USE_C99_STDINT_TR1
53#ifdef _GLIBCXX_HAS_GTHREADS
54
55#define __cpp_lib_shared_timed_mutex 201402
56
57  /// shared_timed_mutex
58  class shared_timed_mutex
59  {
60    // Must use the same clock as condition_variable
61    typedef chrono::system_clock	__clock_t;
62
63    // Based on Howard Hinnant's reference implementation from N2406.
64
65    // The high bit of _M_state is the write-entered flag which is set to
66    // indicate a writer has taken the lock or is queuing to take the lock.
67    // The remaining bits are the count of reader locks.
68    //
69    // To take a reader lock, block on gate1 while the write-entered flag is
70    // set or the maximum number of reader locks is held, then increment the
71    // reader lock count.
72    // To release, decrement the count, then if the write-entered flag is set
73    // and the count is zero then signal gate2 to wake a queued writer,
74    // otherwise if the maximum number of reader locks was held signal gate1
75    // to wake a reader.
76    //
77    // To take a writer lock, block on gate1 while the write-entered flag is
78    // set, then set the write-entered flag to start queueing, then block on
79    // gate2 while the number of reader locks is non-zero.
80    // To release, unset the write-entered flag and signal gate1 to wake all
81    // blocked readers and writers.
82    //
83    // This means that when no reader locks are held readers and writers get
84    // equal priority. When one or more reader locks is held a writer gets
85    // priority and no more reader locks can be taken while the writer is
86    // queued.
87
88    // Only locked when accessing _M_state or waiting on condition variables.
89    mutex		_M_mut;
90    // Used to block while write-entered is set or reader count at maximum.
91    condition_variable	_M_gate1;
92    // Used to block queued writers while reader count is non-zero.
93    condition_variable	_M_gate2;
94    // The write-entered flag and reader count.
95    unsigned		_M_state;
96
97    static constexpr unsigned _S_write_entered
98      = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
99    static constexpr unsigned _S_max_readers = ~_S_write_entered;
100
101    // Test whether the write-entered flag is set. _M_mut must be locked.
102    bool _M_write_entered() const { return _M_state & _S_write_entered; }
103
104    // The number of reader locks currently held. _M_mut must be locked.
105    unsigned _M_readers() const { return _M_state & _S_max_readers; }
106
107  public:
108    shared_timed_mutex() : _M_state(0) {}
109
110    ~shared_timed_mutex()
111    {
112      _GLIBCXX_DEBUG_ASSERT( _M_state == 0 );
113    }
114
115    shared_timed_mutex(const shared_timed_mutex&) = delete;
116    shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
117
118    // Exclusive ownership
119
120    void
121    lock()
122    {
123      unique_lock<mutex> __lk(_M_mut);
124      // Wait until we can set the write-entered flag.
125      _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
126      _M_state |= _S_write_entered;
127      // Then wait until there are no more readers.
128      _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
129    }
130
131    bool
132    try_lock()
133    {
134      unique_lock<mutex> __lk(_M_mut, try_to_lock);
135      if (__lk.owns_lock() && _M_state == 0)
136	{
137	  _M_state = _S_write_entered;
138	  return true;
139	}
140      return false;
141    }
142
143    template<typename _Rep, typename _Period>
144      bool
145      try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
146      {
147	return try_lock_until(__clock_t::now() + __rel_time);
148      }
149
150    template<typename _Clock, typename _Duration>
151      bool
152      try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
153      {
154	unique_lock<mutex> __lk(_M_mut);
155	if (!_M_gate1.wait_until(__lk, __abs_time,
156				 [=]{ return !_M_write_entered(); }))
157	  {
158	    return false;
159	  }
160	_M_state |= _S_write_entered;
161	if (!_M_gate2.wait_until(__lk, __abs_time,
162				 [=]{ return _M_readers() == 0; }))
163	  {
164	    _M_state ^= _S_write_entered;
165	    // Wake all threads blocked while the write-entered flag was set.
166	    _M_gate1.notify_all();
167	    return false;
168	  }
169	return true;
170      }
171
172    void
173    unlock()
174    {
175      lock_guard<mutex> __lk(_M_mut);
176      _GLIBCXX_DEBUG_ASSERT( _M_write_entered() );
177      _M_state = 0;
178      // call notify_all() while mutex is held so that another thread can't
179      // lock and unlock the mutex then destroy *this before we make the call.
180      _M_gate1.notify_all();
181    }
182
183    // Shared ownership
184
185    void
186    lock_shared()
187    {
188      unique_lock<mutex> __lk(_M_mut);
189      _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
190      ++_M_state;
191    }
192
193    bool
194    try_lock_shared()
195    {
196      unique_lock<mutex> __lk(_M_mut, try_to_lock);
197      if (!__lk.owns_lock())
198	return false;
199      if (_M_state < _S_max_readers)
200	{
201	  ++_M_state;
202	  return true;
203	}
204      return false;
205    }
206
207    template<typename _Rep, typename _Period>
208      bool
209      try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
210      {
211	return try_lock_shared_until(__clock_t::now() + __rel_time);
212      }
213
214    template <typename _Clock, typename _Duration>
215      bool
216      try_lock_shared_until(const chrono::time_point<_Clock,
217						     _Duration>& __abs_time)
218      {
219	unique_lock<mutex> __lk(_M_mut);
220	if (!_M_gate1.wait_until(__lk, __abs_time,
221				 [=]{ return _M_state < _S_max_readers; }))
222	  {
223	    return false;
224	  }
225	++_M_state;
226	return true;
227      }
228
229    void
230    unlock_shared()
231    {
232      lock_guard<mutex> __lk(_M_mut);
233      _GLIBCXX_DEBUG_ASSERT( _M_readers() > 0 );
234      auto __prev = _M_state--;
235      if (_M_write_entered())
236	{
237	  // Wake the queued writer if there are no more readers.
238	  if (_M_readers() == 0)
239	    _M_gate2.notify_one();
240	  // No need to notify gate1 because we give priority to the queued
241	  // writer, and that writer will eventually notify gate1 after it
242	  // clears the write-entered flag.
243	}
244      else
245	{
246	  // Wake any thread that was blocked on reader overflow.
247	  if (__prev == _S_max_readers)
248	    _M_gate1.notify_one();
249	}
250    }
251  };
252#endif // _GLIBCXX_HAS_GTHREADS
253
254  /// shared_lock
255  template<typename _Mutex>
256    class shared_lock
257    {
258    public:
259      typedef _Mutex mutex_type;
260
261      // Shared locking
262
263      shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
264
265      explicit
266      shared_lock(mutex_type& __m) : _M_pm(&__m), _M_owns(true)
267      { __m.lock_shared(); }
268
269      shared_lock(mutex_type& __m, defer_lock_t) noexcept
270      : _M_pm(&__m), _M_owns(false) { }
271
272      shared_lock(mutex_type& __m, try_to_lock_t)
273      : _M_pm(&__m), _M_owns(__m.try_lock_shared()) { }
274
275      shared_lock(mutex_type& __m, adopt_lock_t)
276      : _M_pm(&__m), _M_owns(true) { }
277
278      template<typename _Clock, typename _Duration>
279	shared_lock(mutex_type& __m,
280		    const chrono::time_point<_Clock, _Duration>& __abs_time)
281      : _M_pm(&__m), _M_owns(__m.try_lock_shared_until(__abs_time)) { }
282
283      template<typename _Rep, typename _Period>
284	shared_lock(mutex_type& __m,
285		    const chrono::duration<_Rep, _Period>& __rel_time)
286      : _M_pm(&__m), _M_owns(__m.try_lock_shared_for(__rel_time)) { }
287
288      ~shared_lock()
289      {
290	if (_M_owns)
291	  _M_pm->unlock_shared();
292      }
293
294      shared_lock(shared_lock const&) = delete;
295      shared_lock& operator=(shared_lock const&) = delete;
296
297      shared_lock(shared_lock&& __sl) noexcept : shared_lock()
298      { swap(__sl); }
299
300      shared_lock&
301      operator=(shared_lock&& __sl) noexcept
302      {
303	shared_lock(std::move(__sl)).swap(*this);
304	return *this;
305      }
306
307      void
308      lock()
309      {
310	_M_lockable();
311	_M_pm->lock_shared();
312	_M_owns = true;
313      }
314
315      bool
316      try_lock()
317      {
318	_M_lockable();
319	return _M_owns = _M_pm->try_lock_shared();
320      }
321
322      template<typename _Rep, typename _Period>
323	bool
324	try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
325	{
326	  _M_lockable();
327	  return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
328	}
329
330      template<typename _Clock, typename _Duration>
331	bool
332	try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
333	{
334	  _M_lockable();
335	  return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
336	}
337
338      void
339      unlock()
340      {
341	if (!_M_owns)
342	  __throw_system_error(int(errc::resource_deadlock_would_occur));
343	_M_pm->unlock_shared();
344	_M_owns = false;
345      }
346
347      // Setters
348
349      void
350      swap(shared_lock& __u) noexcept
351      {
352	std::swap(_M_pm, __u._M_pm);
353	std::swap(_M_owns, __u._M_owns);
354      }
355
356      mutex_type*
357      release() noexcept
358      {
359	_M_owns = false;
360	return std::exchange(_M_pm, nullptr);
361      }
362
363      // Getters
364
365      bool owns_lock() const noexcept { return _M_owns; }
366
367      explicit operator bool() const noexcept { return _M_owns; }
368
369      mutex_type* mutex() const noexcept { return _M_pm; }
370
371    private:
372      void
373      _M_lockable() const
374      {
375	if (_M_pm == nullptr)
376	  __throw_system_error(int(errc::operation_not_permitted));
377	if (_M_owns)
378	  __throw_system_error(int(errc::resource_deadlock_would_occur));
379      }
380
381      mutex_type*	_M_pm;
382      bool		_M_owns;
383    };
384
385  /// Swap specialization for shared_lock
386  template<typename _Mutex>
387    void
388    swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
389    { __x.swap(__y); }
390
391#endif // _GLIBCXX_USE_C99_STDINT_TR1
392
393  // @} group mutexes
394_GLIBCXX_END_NAMESPACE_VERSION
395} // namespace
396
397#endif // C++14
398
399#endif // _GLIBCXX_SHARED_MUTEX
400