1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <lib/sync/mutex.h>
6 
7 #include <zircon/syscalls.h>
8 #include <stdatomic.h>
9 
10 // This mutex implementation is based on Ulrich Drepper's paper "Futexes
11 // Are Tricky" (dated November 5, 2011; see
12 // http://www.akkadia.org/drepper/futex.pdf).  We use the approach from
13 // "Mutex, Take 2", with one modification: We use an atomic swap in
14 // sync_mutex_unlock() rather than an atomic decrement.
15 
16 // The value of UNLOCKED must be 0 to match C11's mtx.h and so that
17 // mutexes can be allocated in BSS segments (zero-initialized data).
18 enum {
19     UNLOCKED = 0,
20     LOCKED_WITHOUT_WAITERS = 1,
21     LOCKED_WITH_WAITERS = 2
22 };
23 
24 // On success, this will leave the mutex in the LOCKED_WITH_WAITERS state.
lock_slow_path(sync_mutex_t * mutex,zx_time_t deadline,int old_state)25 static zx_status_t lock_slow_path(sync_mutex_t* mutex, zx_time_t deadline,
26                                   int old_state) {
27     for (;;) {
28         // If the state shows there are already waiters, or we can update
29         // it to indicate that there are waiters, then wait.
30         if (old_state == LOCKED_WITH_WAITERS ||
31             (old_state == LOCKED_WITHOUT_WAITERS &&
32              atomic_compare_exchange_strong(&mutex->futex, &old_state,
33                                             LOCKED_WITH_WAITERS))) {
34             zx_status_t status = _zx_futex_wait(
35                     &mutex->futex, LOCKED_WITH_WAITERS, ZX_HANDLE_INVALID, deadline);
36             if (status == ZX_ERR_TIMED_OUT)
37                 return ZX_ERR_TIMED_OUT;
38         }
39 
40         // Try again to claim the mutex.  On this try, we must set the
41         // mutex state to LOCKED_WITH_WAITERS rather than
42         // LOCKED_WITHOUT_WAITERS.  This is because we could have been
43         // woken up when many threads are in the wait queue for the mutex.
44         old_state = UNLOCKED;
45         if (atomic_compare_exchange_strong(&mutex->futex, &old_state,
46                                            LOCKED_WITH_WAITERS)) {
47             return ZX_OK;
48         }
49     }
50 }
51 
sync_mutex_trylock(sync_mutex_t * mutex)52 zx_status_t sync_mutex_trylock(sync_mutex_t* mutex) {
53     int old_state = UNLOCKED;
54     if (atomic_compare_exchange_strong(&mutex->futex, &old_state,
55                                        LOCKED_WITHOUT_WAITERS)) {
56         return ZX_OK;
57     }
58     return ZX_ERR_BAD_STATE;
59 }
60 
sync_mutex_timedlock(sync_mutex_t * mutex,zx_time_t deadline)61 zx_status_t sync_mutex_timedlock(sync_mutex_t* mutex, zx_time_t deadline) {
62     // Try to claim the mutex.  This compare-and-swap executes the full
63     // memory barrier that locking a mutex is required to execute.
64     int old_state = UNLOCKED;
65     if (atomic_compare_exchange_strong(&mutex->futex, &old_state,
66                                        LOCKED_WITHOUT_WAITERS)) {
67         return ZX_OK;
68     }
69     return lock_slow_path(mutex, deadline, old_state);
70 }
71 
sync_mutex_lock(sync_mutex_t * mutex)72 void sync_mutex_lock(sync_mutex_t* mutex) __TA_NO_THREAD_SAFETY_ANALYSIS {
73     zx_status_t status = sync_mutex_timedlock(mutex, ZX_TIME_INFINITE);
74     if (status != ZX_OK) {
75         __builtin_trap();
76     }
77 }
78 
sync_mutex_lock_with_waiter(sync_mutex_t * mutex)79 void sync_mutex_lock_with_waiter(sync_mutex_t* mutex) __TA_NO_THREAD_SAFETY_ANALYSIS {
80     int old_state = UNLOCKED;
81     if (atomic_compare_exchange_strong(&mutex->futex, &old_state,
82                                        LOCKED_WITH_WAITERS)) {
83         return;
84     }
85     zx_status_t status = lock_slow_path(mutex, ZX_TIME_INFINITE, old_state);
86     if (status != ZX_OK) {
87         __builtin_trap();
88     }
89 }
90 
sync_mutex_unlock(sync_mutex_t * mutex)91 void sync_mutex_unlock(sync_mutex_t* mutex) __TA_NO_THREAD_SAFETY_ANALYSIS {
92     // Attempt to release the mutex.  This atomic swap executes the full
93     // memory barrier that unlocking a mutex is required to execute.
94     int old_state = atomic_exchange(&mutex->futex, UNLOCKED);
95 
96     // At this point, the mutex was unlocked.  In some usage patterns
97     // (e.g. for reference counting), another thread might now acquire the
98     // mutex and free the memory containing it.  This means we must not
99     // dereference |mutex| from this point onwards.
100 
101     switch (old_state) {
102         case LOCKED_WITHOUT_WAITERS:
103             // There were no waiters, so there is nothing more to do.
104             break;
105 
106         case LOCKED_WITH_WAITERS: {
107             // Note that the mutex's memory could have been freed and
108             // reused by this point, so this could cause a spurious futex
109             // wakeup for a unrelated user of the memory location.
110             zx_status_t status = _zx_futex_wake(&mutex->futex, 1);
111             if (status != ZX_OK) {
112                 __builtin_trap();
113             }
114             break;
115         }
116 
117         case UNLOCKED:
118         default:
119             // Either the mutex was unlocked (in which case the unlock call
120             // was invalid), or the mutex was in an invalid state.
121             __builtin_trap();
122             break;
123     }
124 }
125