1// <shared_mutex> -*- C++ -*- 2 3// Copyright (C) 2013-2017 Free Software Foundation, Inc. 4// 5// This file is part of the GNU ISO C++ Library. This library is free 6// software; you can redistribute it and/or modify it under the 7// terms of the GNU General Public License as published by the 8// Free Software Foundation; either version 3, or (at your option) 9// any later version. 10 11// This library is distributed in the hope that it will be useful, 12// but WITHOUT ANY WARRANTY; without even the implied warranty of 13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14// GNU General Public License for more details. 15 16// Under Section 7 of GPL version 3, you are granted additional 17// permissions described in the GCC Runtime Library Exception, version 18// 3.1, as published by the Free Software Foundation. 19 20// You should have received a copy of the GNU General Public License and 21// a copy of the GCC Runtime Library Exception along with this program; 22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 23// <http://www.gnu.org/licenses/>. 24 25/** @file include/shared_mutex 26 * This is a Standard C++ Library header. 27 */ 28 29#ifndef _GLIBCXX_SHARED_MUTEX 30#define _GLIBCXX_SHARED_MUTEX 1 31 32#pragma GCC system_header 33 34#if __cplusplus >= 201402L 35 36#include <bits/c++config.h> 37#include <condition_variable> 38#include <bits/functexcept.h> 39 40namespace std _GLIBCXX_VISIBILITY(default) 41{ 42_GLIBCXX_BEGIN_NAMESPACE_VERSION 43 44 /** 45 * @ingroup mutexes 46 * @{ 47 */ 48 49#ifdef _GLIBCXX_USE_C99_STDINT_TR1 50#ifdef _GLIBCXX_HAS_GTHREADS 51 52#if __cplusplus >= 201703L 53#define __cpp_lib_shared_mutex 201505 54 class shared_mutex; 55#endif 56 57#define __cpp_lib_shared_timed_mutex 201402 58 class shared_timed_mutex; 59 60#if _GLIBCXX_USE_PTHREAD_RWLOCK_T 61 /// A shared mutex type implemented using pthread_rwlock_t. 62 class __shared_mutex_pthread 63 { 64 friend class shared_timed_mutex; 65 66#ifdef PTHREAD_RWLOCK_INITIALIZER 67 pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER; 68 69 public: 70 __shared_mutex_pthread() = default; 71 ~__shared_mutex_pthread() = default; 72#else 73 pthread_rwlock_t _M_rwlock; 74 75 public: 76 __shared_mutex_pthread() 77 { 78 int __ret = pthread_rwlock_init(&_M_rwlock, NULL); 79 if (__ret == ENOMEM) 80 __throw_bad_alloc(); 81 else if (__ret == EAGAIN) 82 __throw_system_error(int(errc::resource_unavailable_try_again)); 83 else if (__ret == EPERM) 84 __throw_system_error(int(errc::operation_not_permitted)); 85 // Errors not handled: EBUSY, EINVAL 86 __glibcxx_assert(__ret == 0); 87 } 88 89 ~__shared_mutex_pthread() 90 { 91 int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock); 92 // Errors not handled: EBUSY, EINVAL 93 __glibcxx_assert(__ret == 0); 94 } 95#endif 96 97 __shared_mutex_pthread(const __shared_mutex_pthread&) = delete; 98 __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete; 99 100 void 101 lock() 102 { 103 int __ret = pthread_rwlock_wrlock(&_M_rwlock); 104 if (__ret == EDEADLK) 105 __throw_system_error(int(errc::resource_deadlock_would_occur)); 106 // Errors not handled: EINVAL 107 __glibcxx_assert(__ret == 0); 108 } 109 110 bool 111 try_lock() 112 { 113 int __ret = pthread_rwlock_trywrlock(&_M_rwlock); 114 if (__ret == EBUSY) return false; 115 // Errors not handled: EINVAL 116 __glibcxx_assert(__ret == 0); 117 return true; 118 } 119 120 void 121 unlock() 122 { 123 int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock); 124 // Errors not handled: EPERM, EBUSY, EINVAL 125 __glibcxx_assert(__ret == 0); 126 } 127 128 // Shared ownership 129 130 void 131 lock_shared() 132 { 133 int __ret; 134 // We retry if we exceeded the maximum number of read locks supported by 135 // the POSIX implementation; this can result in busy-waiting, but this 136 // is okay based on the current specification of forward progress 137 // guarantees by the standard. 138 do 139 __ret = pthread_rwlock_rdlock(&_M_rwlock); 140 while (__ret == EAGAIN); 141 if (__ret == EDEADLK) 142 __throw_system_error(int(errc::resource_deadlock_would_occur)); 143 // Errors not handled: EINVAL 144 __glibcxx_assert(__ret == 0); 145 } 146 147 bool 148 try_lock_shared() 149 { 150 int __ret = pthread_rwlock_tryrdlock(&_M_rwlock); 151 // If the maximum number of read locks has been exceeded, we just fail 152 // to acquire the lock. Unlike for lock(), we are not allowed to throw 153 // an exception. 154 if (__ret == EBUSY || __ret == EAGAIN) return false; 155 // Errors not handled: EINVAL 156 __glibcxx_assert(__ret == 0); 157 return true; 158 } 159 160 void 161 unlock_shared() 162 { 163 unlock(); 164 } 165 166 void* native_handle() { return &_M_rwlock; } 167 }; 168#endif 169 170#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK) 171 /// A shared mutex type implemented using std::condition_variable. 172 class __shared_mutex_cv 173 { 174 friend class shared_timed_mutex; 175 176 // Based on Howard Hinnant's reference implementation from N2406. 177 178 // The high bit of _M_state is the write-entered flag which is set to 179 // indicate a writer has taken the lock or is queuing to take the lock. 180 // The remaining bits are the count of reader locks. 181 // 182 // To take a reader lock, block on gate1 while the write-entered flag is 183 // set or the maximum number of reader locks is held, then increment the 184 // reader lock count. 185 // To release, decrement the count, then if the write-entered flag is set 186 // and the count is zero then signal gate2 to wake a queued writer, 187 // otherwise if the maximum number of reader locks was held signal gate1 188 // to wake a reader. 189 // 190 // To take a writer lock, block on gate1 while the write-entered flag is 191 // set, then set the write-entered flag to start queueing, then block on 192 // gate2 while the number of reader locks is non-zero. 193 // To release, unset the write-entered flag and signal gate1 to wake all 194 // blocked readers and writers. 195 // 196 // This means that when no reader locks are held readers and writers get 197 // equal priority. When one or more reader locks is held a writer gets 198 // priority and no more reader locks can be taken while the writer is 199 // queued. 200 201 // Only locked when accessing _M_state or waiting on condition variables. 202 mutex _M_mut; 203 // Used to block while write-entered is set or reader count at maximum. 204 condition_variable _M_gate1; 205 // Used to block queued writers while reader count is non-zero. 206 condition_variable _M_gate2; 207 // The write-entered flag and reader count. 208 unsigned _M_state; 209 210 static constexpr unsigned _S_write_entered 211 = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1); 212 static constexpr unsigned _S_max_readers = ~_S_write_entered; 213 214 // Test whether the write-entered flag is set. _M_mut must be locked. 215 bool _M_write_entered() const { return _M_state & _S_write_entered; } 216 217 // The number of reader locks currently held. _M_mut must be locked. 218 unsigned _M_readers() const { return _M_state & _S_max_readers; } 219 220 public: 221 __shared_mutex_cv() : _M_state(0) {} 222 223 ~__shared_mutex_cv() 224 { 225 __glibcxx_assert( _M_state == 0 ); 226 } 227 228 __shared_mutex_cv(const __shared_mutex_cv&) = delete; 229 __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete; 230 231 // Exclusive ownership 232 233 void 234 lock() 235 { 236 unique_lock<mutex> __lk(_M_mut); 237 // Wait until we can set the write-entered flag. 238 _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); }); 239 _M_state |= _S_write_entered; 240 // Then wait until there are no more readers. 241 _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; }); 242 } 243 244 bool 245 try_lock() 246 { 247 unique_lock<mutex> __lk(_M_mut, try_to_lock); 248 if (__lk.owns_lock() && _M_state == 0) 249 { 250 _M_state = _S_write_entered; 251 return true; 252 } 253 return false; 254 } 255 256 void 257 unlock() 258 { 259 lock_guard<mutex> __lk(_M_mut); 260 __glibcxx_assert( _M_write_entered() ); 261 _M_state = 0; 262 // call notify_all() while mutex is held so that another thread can't 263 // lock and unlock the mutex then destroy *this before we make the call. 264 _M_gate1.notify_all(); 265 } 266 267 // Shared ownership 268 269 void 270 lock_shared() 271 { 272 unique_lock<mutex> __lk(_M_mut); 273 _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; }); 274 ++_M_state; 275 } 276 277 bool 278 try_lock_shared() 279 { 280 unique_lock<mutex> __lk(_M_mut, try_to_lock); 281 if (!__lk.owns_lock()) 282 return false; 283 if (_M_state < _S_max_readers) 284 { 285 ++_M_state; 286 return true; 287 } 288 return false; 289 } 290 291 void 292 unlock_shared() 293 { 294 lock_guard<mutex> __lk(_M_mut); 295 __glibcxx_assert( _M_readers() > 0 ); 296 auto __prev = _M_state--; 297 if (_M_write_entered()) 298 { 299 // Wake the queued writer if there are no more readers. 300 if (_M_readers() == 0) 301 _M_gate2.notify_one(); 302 // No need to notify gate1 because we give priority to the queued 303 // writer, and that writer will eventually notify gate1 after it 304 // clears the write-entered flag. 305 } 306 else 307 { 308 // Wake any thread that was blocked on reader overflow. 309 if (__prev == _S_max_readers) 310 _M_gate1.notify_one(); 311 } 312 } 313 }; 314#endif 315 316#if __cplusplus > 201402L 317 /// The standard shared mutex type. 318 class shared_mutex 319 { 320 public: 321 shared_mutex() = default; 322 ~shared_mutex() = default; 323 324 shared_mutex(const shared_mutex&) = delete; 325 shared_mutex& operator=(const shared_mutex&) = delete; 326 327 // Exclusive ownership 328 329 void lock() { _M_impl.lock(); } 330 bool try_lock() { return _M_impl.try_lock(); } 331 void unlock() { _M_impl.unlock(); } 332 333 // Shared ownership 334 335 void lock_shared() { _M_impl.lock_shared(); } 336 bool try_lock_shared() { return _M_impl.try_lock_shared(); } 337 void unlock_shared() { _M_impl.unlock_shared(); } 338 339#if _GLIBCXX_USE_PTHREAD_RWLOCK_T 340 typedef void* native_handle_type; 341 native_handle_type native_handle() { return _M_impl.native_handle(); } 342 343 private: 344 __shared_mutex_pthread _M_impl; 345#else 346 private: 347 __shared_mutex_cv _M_impl; 348#endif 349 }; 350#endif // C++17 351 352#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK 353 using __shared_timed_mutex_base = __shared_mutex_pthread; 354#else 355 using __shared_timed_mutex_base = __shared_mutex_cv; 356#endif 357 358 /// The standard shared timed mutex type. 359 class shared_timed_mutex 360 : private __shared_timed_mutex_base 361 { 362 using _Base = __shared_timed_mutex_base; 363 364 // Must use the same clock as condition_variable for __shared_mutex_cv. 365 typedef chrono::system_clock __clock_t; 366 367 public: 368 shared_timed_mutex() = default; 369 ~shared_timed_mutex() = default; 370 371 shared_timed_mutex(const shared_timed_mutex&) = delete; 372 shared_timed_mutex& operator=(const shared_timed_mutex&) = delete; 373 374 // Exclusive ownership 375 376 void lock() { _Base::lock(); } 377 bool try_lock() { return _Base::try_lock(); } 378 void unlock() { _Base::unlock(); } 379 380 template<typename _Rep, typename _Period> 381 bool 382 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time) 383 { 384 return try_lock_until(__clock_t::now() + __rel_time); 385 } 386 387 // Shared ownership 388 389 void lock_shared() { _Base::lock_shared(); } 390 bool try_lock_shared() { return _Base::try_lock_shared(); } 391 void unlock_shared() { _Base::unlock_shared(); } 392 393 template<typename _Rep, typename _Period> 394 bool 395 try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time) 396 { 397 return try_lock_shared_until(__clock_t::now() + __rel_time); 398 } 399 400#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK 401 402 // Exclusive ownership 403 404 template<typename _Duration> 405 bool 406 try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime) 407 { 408 auto __s = chrono::time_point_cast<chrono::seconds>(__atime); 409 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); 410 411 __gthread_time_t __ts = 412 { 413 static_cast<std::time_t>(__s.time_since_epoch().count()), 414 static_cast<long>(__ns.count()) 415 }; 416 417 int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts); 418 // On self-deadlock, we just fail to acquire the lock. Technically, 419 // the program violated the precondition. 420 if (__ret == ETIMEDOUT || __ret == EDEADLK) 421 return false; 422 // Errors not handled: EINVAL 423 __glibcxx_assert(__ret == 0); 424 return true; 425 } 426 427 template<typename _Clock, typename _Duration> 428 bool 429 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time) 430 { 431 // DR 887 - Sync unknown clock to known clock. 432 const typename _Clock::time_point __c_entry = _Clock::now(); 433 const __clock_t::time_point __s_entry = __clock_t::now(); 434 const auto __delta = __abs_time - __c_entry; 435 const auto __s_atime = __s_entry + __delta; 436 return try_lock_until(__s_atime); 437 } 438 439 // Shared ownership 440 441 template<typename _Duration> 442 bool 443 try_lock_shared_until(const chrono::time_point<__clock_t, 444 _Duration>& __atime) 445 { 446 auto __s = chrono::time_point_cast<chrono::seconds>(__atime); 447 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); 448 449 __gthread_time_t __ts = 450 { 451 static_cast<std::time_t>(__s.time_since_epoch().count()), 452 static_cast<long>(__ns.count()) 453 }; 454 455 int __ret; 456 // Unlike for lock(), we are not allowed to throw an exception so if 457 // the maximum number of read locks has been exceeded, or we would 458 // deadlock, we just try to acquire the lock again (and will time out 459 // eventually). 460 // In cases where we would exceed the maximum number of read locks 461 // throughout the whole time until the timeout, we will fail to 462 // acquire the lock even if it would be logically free; however, this 463 // is allowed by the standard, and we made a "strong effort" 464 // (see C++14 30.4.1.4p26). 465 // For cases where the implementation detects a deadlock we 466 // intentionally block and timeout so that an early return isn't 467 // mistaken for a spurious failure, which might help users realise 468 // there is a deadlock. 469 do 470 __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts); 471 while (__ret == EAGAIN || __ret == EDEADLK); 472 if (__ret == ETIMEDOUT) 473 return false; 474 // Errors not handled: EINVAL 475 __glibcxx_assert(__ret == 0); 476 return true; 477 } 478 479 template<typename _Clock, typename _Duration> 480 bool 481 try_lock_shared_until(const chrono::time_point<_Clock, 482 _Duration>& __abs_time) 483 { 484 // DR 887 - Sync unknown clock to known clock. 485 const typename _Clock::time_point __c_entry = _Clock::now(); 486 const __clock_t::time_point __s_entry = __clock_t::now(); 487 const auto __delta = __abs_time - __c_entry; 488 const auto __s_atime = __s_entry + __delta; 489 return try_lock_shared_until(__s_atime); 490 } 491 492#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK) 493 494 // Exclusive ownership 495 496 template<typename _Clock, typename _Duration> 497 bool 498 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time) 499 { 500 unique_lock<mutex> __lk(_M_mut); 501 if (!_M_gate1.wait_until(__lk, __abs_time, 502 [=]{ return !_M_write_entered(); })) 503 { 504 return false; 505 } 506 _M_state |= _S_write_entered; 507 if (!_M_gate2.wait_until(__lk, __abs_time, 508 [=]{ return _M_readers() == 0; })) 509 { 510 _M_state ^= _S_write_entered; 511 // Wake all threads blocked while the write-entered flag was set. 512 _M_gate1.notify_all(); 513 return false; 514 } 515 return true; 516 } 517 518 // Shared ownership 519 520 template <typename _Clock, typename _Duration> 521 bool 522 try_lock_shared_until(const chrono::time_point<_Clock, 523 _Duration>& __abs_time) 524 { 525 unique_lock<mutex> __lk(_M_mut); 526 if (!_M_gate1.wait_until(__lk, __abs_time, 527 [=]{ return _M_state < _S_max_readers; })) 528 { 529 return false; 530 } 531 ++_M_state; 532 return true; 533 } 534 535#endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK 536 }; 537#endif // _GLIBCXX_HAS_GTHREADS 538 539 /// shared_lock 540 template<typename _Mutex> 541 class shared_lock 542 { 543 public: 544 typedef _Mutex mutex_type; 545 546 // Shared locking 547 548 shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { } 549 550 explicit 551 shared_lock(mutex_type& __m) 552 : _M_pm(std::__addressof(__m)), _M_owns(true) 553 { __m.lock_shared(); } 554 555 shared_lock(mutex_type& __m, defer_lock_t) noexcept 556 : _M_pm(std::__addressof(__m)), _M_owns(false) { } 557 558 shared_lock(mutex_type& __m, try_to_lock_t) 559 : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { } 560 561 shared_lock(mutex_type& __m, adopt_lock_t) 562 : _M_pm(std::__addressof(__m)), _M_owns(true) { } 563 564 template<typename _Clock, typename _Duration> 565 shared_lock(mutex_type& __m, 566 const chrono::time_point<_Clock, _Duration>& __abs_time) 567 : _M_pm(std::__addressof(__m)), 568 _M_owns(__m.try_lock_shared_until(__abs_time)) { } 569 570 template<typename _Rep, typename _Period> 571 shared_lock(mutex_type& __m, 572 const chrono::duration<_Rep, _Period>& __rel_time) 573 : _M_pm(std::__addressof(__m)), 574 _M_owns(__m.try_lock_shared_for(__rel_time)) { } 575 576 ~shared_lock() 577 { 578 if (_M_owns) 579 _M_pm->unlock_shared(); 580 } 581 582 shared_lock(shared_lock const&) = delete; 583 shared_lock& operator=(shared_lock const&) = delete; 584 585 shared_lock(shared_lock&& __sl) noexcept : shared_lock() 586 { swap(__sl); } 587 588 shared_lock& 589 operator=(shared_lock&& __sl) noexcept 590 { 591 shared_lock(std::move(__sl)).swap(*this); 592 return *this; 593 } 594 595 void 596 lock() 597 { 598 _M_lockable(); 599 _M_pm->lock_shared(); 600 _M_owns = true; 601 } 602 603 bool 604 try_lock() 605 { 606 _M_lockable(); 607 return _M_owns = _M_pm->try_lock_shared(); 608 } 609 610 template<typename _Rep, typename _Period> 611 bool 612 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time) 613 { 614 _M_lockable(); 615 return _M_owns = _M_pm->try_lock_shared_for(__rel_time); 616 } 617 618 template<typename _Clock, typename _Duration> 619 bool 620 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time) 621 { 622 _M_lockable(); 623 return _M_owns = _M_pm->try_lock_shared_until(__abs_time); 624 } 625 626 void 627 unlock() 628 { 629 if (!_M_owns) 630 __throw_system_error(int(errc::resource_deadlock_would_occur)); 631 _M_pm->unlock_shared(); 632 _M_owns = false; 633 } 634 635 // Setters 636 637 void 638 swap(shared_lock& __u) noexcept 639 { 640 std::swap(_M_pm, __u._M_pm); 641 std::swap(_M_owns, __u._M_owns); 642 } 643 644 mutex_type* 645 release() noexcept 646 { 647 _M_owns = false; 648 return std::exchange(_M_pm, nullptr); 649 } 650 651 // Getters 652 653 bool owns_lock() const noexcept { return _M_owns; } 654 655 explicit operator bool() const noexcept { return _M_owns; } 656 657 mutex_type* mutex() const noexcept { return _M_pm; } 658 659 private: 660 void 661 _M_lockable() const 662 { 663 if (_M_pm == nullptr) 664 __throw_system_error(int(errc::operation_not_permitted)); 665 if (_M_owns) 666 __throw_system_error(int(errc::resource_deadlock_would_occur)); 667 } 668 669 mutex_type* _M_pm; 670 bool _M_owns; 671 }; 672 673 /// Swap specialization for shared_lock 674 template<typename _Mutex> 675 void 676 swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept 677 { __x.swap(__y); } 678 679#endif // _GLIBCXX_USE_C99_STDINT_TR1 680 681 // @} group mutexes 682_GLIBCXX_END_NAMESPACE_VERSION 683} // namespace 684 685#endif // C++14 686 687#endif // _GLIBCXX_SHARED_MUTEX 688