1 /*
2  * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef _HARDWARE_SYNC_H
8 #define _HARDWARE_SYNC_H
9 
10 #include "pico.h"
11 #include "hardware/address_mapped.h"
12 #include "hardware/regs/sio.h"
13 
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17 
18 
19 /** \file hardware/sync.h
20  *  \defgroup hardware_sync hardware_sync
21  *
22  * Low level hardware spin-lock, barrier and processor event API
23  *
24  * Functions for synchronisation between core's, HW, etc
25  *
26  * The RP2040 provides 32 hardware spin locks, which can be used to manage mutually-exclusive access to shared software
27  * resources.
28  *
29  * \note spin locks 0-15 are currently reserved for fixed uses by the SDK - i.e. if you use them other
30  * functionality may break or not function optimally
31  */
32 
33 /** \brief A spin lock identifier
34  * \ingroup hardware_sync
35  */
36 typedef volatile uint32_t spin_lock_t;
37 
38 // PICO_CONFIG: PICO_SPINLOCK_ID_IRQ, Spinlock ID for IRQ protection, min=0, max=31, default=9, group=hardware_sync
39 #ifndef PICO_SPINLOCK_ID_IRQ
40 #define PICO_SPINLOCK_ID_IRQ 9
41 #endif
42 
43 // PICO_CONFIG: PICO_SPINLOCK_ID_TIMER, Spinlock ID for Timer protection, min=0, max=31, default=10, group=hardware_sync
44 #ifndef PICO_SPINLOCK_ID_TIMER
45 #define PICO_SPINLOCK_ID_TIMER 10
46 #endif
47 
48 // PICO_CONFIG: PICO_SPINLOCK_ID_HARDWARE_CLAIM, Spinlock ID for Hardware claim protection, min=0, max=31, default=11, group=hardware_sync
49 #ifndef PICO_SPINLOCK_ID_HARDWARE_CLAIM
50 #define PICO_SPINLOCK_ID_HARDWARE_CLAIM 11
51 #endif
52 
53 // PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_FIRST, Spinlock ID for striped first, min=16, max=31, default=16, group=hardware_sync
54 #ifndef PICO_SPINLOCK_ID_STRIPED_FIRST
55 #define PICO_SPINLOCK_ID_STRIPED_FIRST 16
56 #endif
57 
58 // PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_LAST, Spinlock ID for striped last, min=16, max=31, default=23, group=hardware_sync
59 #ifndef PICO_SPINLOCK_ID_STRIPED_LAST
60 #define PICO_SPINLOCK_ID_STRIPED_LAST 23
61 #endif
62 
63 // PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_FIRST, Spinlock ID for claim free first, min=16, max=31, default=24, group=hardware_sync
64 #ifndef PICO_SPINLOCK_ID_CLAIM_FREE_FIRST
65 #define PICO_SPINLOCK_ID_CLAIM_FREE_FIRST 24
66 #endif
67 
68 // PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_END, Spinlock ID for claim free end, min=16, max=31, default=31, group=hardware_sync
69 #ifndef PICO_SPINLOCK_ID_CLAIM_FREE_END
70 #define PICO_SPINLOCK_ID_CLAIM_FREE_END 31
71 #endif
72 
73 // PICO_CONFIG: PARAM_ASSERTIONS_ENABLED_SYNC, Enable/disable assertions in the HW sync module, type=bool, default=0, group=hardware_sync
74 #ifndef PARAM_ASSERTIONS_ENABLED_SYNC
75 #define PARAM_ASSERTIONS_ENABLED_SYNC 0
76 #endif
77 
78 
79 /*! \brief Insert a SEV instruction in to the code path.
80  *  \ingroup hardware_sync
81 
82  * The SEV (send event) instruction sends an event to both cores.
83  */
__sev(void)84 inline static void __sev(void) {
85     __asm volatile ("sev");
86 }
87 
88 /*! \brief Insert a WFE instruction in to the code path.
89  *  \ingroup hardware_sync
90  *
91  * The WFE (wait for event) instruction waits until one of a number of
92  * events occurs, including events signalled by the SEV instruction on either core.
93  */
__wfe(void)94 inline static void __wfe(void) {
95     __asm volatile ("wfe");
96 }
97 
98 /*! \brief Insert a WFI instruction in to the code path.
99   *  \ingroup hardware_sync
100 *
101  * The WFI (wait for interrupt) instruction waits for a interrupt to wake up the core.
102  */
__wfi(void)103 inline static void __wfi(void) {
104     __asm volatile ("wfi");
105 }
106 
107 /*! \brief Insert a DMB instruction in to the code path.
108  *  \ingroup hardware_sync
109  *
110  * The DMB (data memory barrier) acts as a memory barrier, all memory accesses prior to this
111  * instruction will be observed before any explicit access after the instruction.
112  */
__dmb(void)113 inline static void __dmb(void) {
114     __asm volatile ("dmb");
115 }
116 
117 /*! \brief Insert a ISB instruction in to the code path.
118  *  \ingroup hardware_sync
119  *
120  * ISB acts as an instruction synchronization barrier. It flushes the pipeline of the processor,
121  * so that all instructions following the ISB are fetched from cache or memory again, after
122  * the ISB instruction has been completed.
123  */
__isb(void)124 inline static void __isb(void) {
125     __asm volatile ("isb");
126 }
127 
128 /*! \brief Acquire a memory fence
129  *  \ingroup hardware_sync
130  */
__mem_fence_acquire(void)131 inline static void __mem_fence_acquire(void) {
132     // the original code below makes it hard for us to be included from C++ via a header
133     // which itself is in an extern "C", so just use __dmb instead, which is what
134     // is required on Cortex M0+
135     __dmb();
136 //#ifndef __cplusplus
137 //    atomic_thread_fence(memory_order_acquire);
138 //#else
139 //    std::atomic_thread_fence(std::memory_order_acquire);
140 //#endif
141 }
142 
143 /*! \brief Release a memory fence
144  *  \ingroup hardware_sync
145  *
146  */
__mem_fence_release(void)147 inline static void __mem_fence_release(void) {
148     // the original code below makes it hard for us to be included from C++ via a header
149     // which itself is in an extern "C", so just use __dmb instead, which is what
150     // is required on Cortex M0+
151     __dmb();
152 //#ifndef __cplusplus
153 //    atomic_thread_fence(memory_order_release);
154 //#else
155 //    std::atomic_thread_fence(std::memory_order_release);
156 //#endif
157 }
158 
159 /*! \brief Save and disable interrupts
160  *  \ingroup hardware_sync
161  *
162  * \return The prior interrupt enable status for restoration later via restore_interrupts()
163  */
save_and_disable_interrupts(void)164 inline static uint32_t save_and_disable_interrupts(void) {
165     uint32_t status;
166     __asm volatile ("mrs %0, PRIMASK" : "=r" (status)::);
167     __asm volatile ("cpsid i");
168     return status;
169 }
170 
171 /*! \brief Restore interrupts to a specified state
172  *  \ingroup hardware_sync
173  *
174  * \param status Previous interrupt status from save_and_disable_interrupts()
175   */
restore_interrupts(uint32_t status)176 inline static void restore_interrupts(uint32_t status) {
177     __asm volatile ("msr PRIMASK,%0"::"r" (status) : );
178 }
179 
180 /*! \brief Get HW Spinlock instance from number
181  *  \ingroup hardware_sync
182  *
183  * \param lock_num Spinlock ID
184  * \return The spinlock instance
185  */
spin_lock_instance(uint lock_num)186 inline static spin_lock_t *spin_lock_instance(uint lock_num) {
187     return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4);
188 }
189 
190 /*! \brief Get HW Spinlock number from instance
191  *  \ingroup hardware_sync
192  *
193  * \param lock The Spinlock instance
194  * \return The Spinlock ID
195  */
spin_lock_get_num(spin_lock_t * lock)196 inline static uint spin_lock_get_num(spin_lock_t *lock) {
197     return lock - (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET);
198 }
199 
200 /*! \brief Acquire a spin lock without disabling interrupts (hence unsafe)
201  *  \ingroup hardware_sync
202  *
203  * \param lock Spinlock instance
204  */
spin_lock_unsafe_blocking(spin_lock_t * lock)205 inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
206     // Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
207     // with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
208     // anyway which should be finished soon
209     while (__builtin_expect(!*lock, 0));
210     __mem_fence_acquire();
211 }
212 
213 /*! \brief Release a spin lock without re-enabling interrupts
214  *  \ingroup hardware_sync
215  *
216  * \param lock Spinlock instance
217  */
spin_unlock_unsafe(spin_lock_t * lock)218 inline static void spin_unlock_unsafe(spin_lock_t *lock) {
219     __mem_fence_release();
220     *lock = 0;
221 }
222 
223 /*! \brief Acquire a spin lock safely
224  *  \ingroup hardware_sync
225  *
226  * This function will disable interrupts prior to acquiring the spinlock
227  *
228  * \param lock Spinlock instance
229  * \return interrupt status to be used when unlocking, to restore to original state
230  */
spin_lock_blocking(spin_lock_t * lock)231 inline static uint32_t spin_lock_blocking(spin_lock_t *lock) {
232     uint32_t save = save_and_disable_interrupts();
233     spin_lock_unsafe_blocking(lock);
234     return save;
235 }
236 
237 /*! \brief Check to see if a spinlock is currently acquired elsewhere.
238  *  \ingroup hardware_sync
239  *
240  * \param lock Spinlock instance
241  */
is_spin_locked(const spin_lock_t * lock)242 inline static bool is_spin_locked(const spin_lock_t *lock) {
243     check_hw_size(spin_lock_t, 4);
244     uint32_t lock_num = lock - spin_lock_instance(0);
245     return 0 != (*(io_ro_32 *) (SIO_BASE + SIO_SPINLOCK_ST_OFFSET) & (1u << lock_num));
246 }
247 
248 /*! \brief Release a spin lock safely
249  *  \ingroup hardware_sync
250  *
251  * This function will re-enable interrupts according to the parameters.
252  *
253  * \param lock Spinlock instance
254  * \param saved_irq Return value from the \ref spin_lock_blocking() function.
255  * \return interrupt status to be used when unlocking, to restore to original state
256  *
257  * \sa spin_lock_blocking()
258  */
spin_unlock(spin_lock_t * lock,uint32_t saved_irq)259 inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
260     spin_unlock_unsafe(lock);
261     restore_interrupts(saved_irq);
262 }
263 
264 /*! \brief Get the current core number
265  *  \ingroup hardware_sync
266  *
267  * \return The core number the call was made from
268  */
get_core_num(void)269 static inline uint get_core_num(void) {
270     return (*(uint32_t *) (SIO_BASE + SIO_CPUID_OFFSET));
271 }
272 
273 /*! \brief Initialise a spin lock
274  *  \ingroup hardware_sync
275  *
276  * The spin lock is initially unlocked
277  *
278  * \param lock_num The spin lock number
279  * \return The spin lock instance
280  */
281 spin_lock_t *spin_lock_init(uint lock_num);
282 
283 /*! \brief Release all spin locks
284  *  \ingroup hardware_sync
285  */
286 void spin_locks_reset(void);
287 
288 // this number is not claimed
289 uint next_striped_spin_lock_num(void);
290 
291 /*! \brief Mark a spin lock as used
292  *  \ingroup hardware_sync
293  *
294  * Method for cooperative claiming of hardware. Will cause a panic if the spin lock
295  * is already claimed. Use of this method by libraries detects accidental
296  * configurations that would fail in unpredictable ways.
297  *
298  * \param lock_num the spin lock number
299  */
300 void spin_lock_claim(uint lock_num);
301 
302 /*! \brief Mark multiple spin locks as used
303  *  \ingroup hardware_sync
304  *
305  * Method for cooperative claiming of hardware. Will cause a panic if any of the spin locks
306  * are already claimed. Use of this method by libraries detects accidental
307  * configurations that would fail in unpredictable ways.
308  *
309  * \param lock_num_mask Bitfield of all required spin locks to claim (bit 0 == spin lock 0, bit 1 == spin lock 1 etc)
310  */
311 void spin_lock_claim_mask(uint32_t lock_num_mask);
312 
313 /*! \brief Mark a spin lock as no longer used
314  *  \ingroup hardware_sync
315  *
316  * Method for cooperative claiming of hardware.
317  *
318  * \param lock_num the spin lock number to release
319  */
320 void spin_lock_unclaim(uint lock_num);
321 
322 /*! \brief Claim a free spin lock
323  *  \ingroup hardware_sync
324  *
325  * \param required if true the function will panic if none are available
326  * \return the spin lock number or -1 if required was false, and none were free
327  */
328 int spin_lock_claim_unused(bool required);
329 
330 #define remove_volatile_cast(t, x) ({__mem_fence_acquire(); (t)(x); })
331 
332 #ifdef __cplusplus
333 }
334 #endif
335 
336 #endif
337