1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4 
5 #include <hyptypes.h>
6 
7 #include <atomic.h>
8 #include <preempt.h>
9 #include <spinlock.h>
10 
11 #include <events/spinlock.h>
12 
13 #include <asm/event.h>
14 
15 // Ticket spinlock implementation, used for multiprocessor builds on
16 // architectures that have event-wait instructions (i.e. ARMv7 and ARMv8). If
17 // there is no event-wait then a more cache-efficient (but more complex) lock
18 // may be preferable.
19 
20 void
spinlock_init(spinlock_t * lock)21 spinlock_init(spinlock_t *lock)
22 {
23 	atomic_init(&lock->now_serving, 0);
24 	atomic_init(&lock->next_ticket, 0);
25 	trigger_spinlock_init_event(lock);
26 }
27 
28 void
spinlock_acquire(spinlock_t * lock)29 spinlock_acquire(spinlock_t *lock)
30 {
31 	preempt_disable();
32 	spinlock_acquire_nopreempt(lock);
33 }
34 
35 void
spinlock_acquire_nopreempt(spinlock_t * lock)36 spinlock_acquire_nopreempt(spinlock_t *lock) LOCK_IMPL
37 {
38 	trigger_spinlock_acquire_event(lock);
39 
40 	// Take a ticket
41 	uint16_t my_ticket = atomic_fetch_add_explicit(&lock->next_ticket, 1,
42 						       memory_order_relaxed);
43 
44 	// Wait until our ticket is being served
45 	while (asm_event_load_before_wait(&lock->now_serving) != my_ticket) {
46 		asm_event_wait(&lock->now_serving);
47 	}
48 
49 	trigger_spinlock_acquired_event(lock);
50 }
51 
52 bool
spinlock_trylock(spinlock_t * lock)53 spinlock_trylock(spinlock_t *lock)
54 {
55 	bool success;
56 
57 	preempt_disable();
58 	success = spinlock_trylock_nopreempt(lock);
59 	if (!success) {
60 		preempt_enable();
61 	}
62 
63 	return success;
64 }
65 
66 bool
spinlock_trylock_nopreempt(spinlock_t * lock)67 spinlock_trylock_nopreempt(spinlock_t *lock) LOCK_IMPL
68 {
69 	trigger_spinlock_acquire_event(lock);
70 
71 	// See which ticket is being served
72 	uint16_t now_serving = atomic_load_relaxed(&lock->now_serving);
73 
74 	// Take a ticket, but only if it's being served already
75 	bool success = atomic_compare_exchange_strong_explicit(
76 		&lock->next_ticket, &now_serving, now_serving + 1U,
77 		memory_order_acquire, memory_order_relaxed);
78 
79 	if (success) {
80 		trigger_spinlock_acquired_event(lock);
81 	} else {
82 		trigger_spinlock_failed_event(lock);
83 	}
84 	return success;
85 }
86 
87 void
spinlock_release(spinlock_t * lock)88 spinlock_release(spinlock_t *lock)
89 {
90 	spinlock_release_nopreempt(lock);
91 	preempt_enable();
92 }
93 
94 void
spinlock_release_nopreempt(spinlock_t * lock)95 spinlock_release_nopreempt(spinlock_t *lock) LOCK_IMPL
96 {
97 	uint16_t now_serving = atomic_load_relaxed(&lock->now_serving);
98 
99 	trigger_spinlock_release_event(lock);
100 
101 	// Start serving the next ticket
102 	asm_event_store_and_wake(&lock->now_serving, now_serving + 1U);
103 
104 	trigger_spinlock_released_event(lock);
105 }
106 
107 void
assert_spinlock_held(const spinlock_t * lock)108 assert_spinlock_held(const spinlock_t *lock)
109 {
110 	assert_preempt_disabled();
111 	trigger_spinlock_assert_held_event(lock);
112 }
113