1 // Copyright 2017 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include <arch/ops.h>
8 #include <arch/spinlock.h>
9 #include <kernel/atomic.h>
10
11 // We need to disable thread safety analysis in this file, since we're
12 // implementing the locks themselves. Without this, the header-level
13 // annotations cause Clang to detect violations.
14
arch_spin_lock(spin_lock_t * lock)15 void arch_spin_lock(spin_lock_t* lock) TA_NO_THREAD_SAFETY_ANALYSIS {
16 unsigned long val = arch_curr_cpu_num() + 1;
17 uint64_t temp;
18
19 __asm__ volatile(
20 "sevl;"
21 "1: wfe;"
22 "ldaxr %[temp], [%[lock]];"
23 "cbnz %[temp], 1b;"
24 "stxr %w[temp], %[val], [%[lock]];"
25 "cbnz %w[temp], 1b;"
26 : [temp] "=&r"(temp)
27 : [lock] "r"(&lock->value), [val] "r"(val)
28 : "cc", "memory");
29 }
30
arch_spin_trylock(spin_lock_t * lock)31 int arch_spin_trylock(spin_lock_t* lock) TA_NO_THREAD_SAFETY_ANALYSIS {
32 unsigned long val = arch_curr_cpu_num() + 1;
33 uint64_t out;
34
35 __asm__ volatile(
36 "ldaxr %[out], [%[lock]];"
37 "cbnz %[out], 1f;"
38 "stxr %w[out], %[val], [%[lock]];"
39 "1:"
40 : [out] "=&r"(out)
41 : [lock] "r"(&lock->value), [val] "r"(val)
42 : "cc", "memory");
43
44 return (int)out;
45 }
46
arch_spin_unlock(spin_lock_t * lock)47 void arch_spin_unlock(spin_lock_t* lock) TA_NO_THREAD_SAFETY_ANALYSIS {
48 __atomic_store_n(&lock->value, 0UL, __ATOMIC_SEQ_CST);
49 }
50