1 /*
2 * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #ifndef _HARDWARE_SYNC_H
8 #define _HARDWARE_SYNC_H
9
10 #include "pico.h"
11 #include "hardware/address_mapped.h"
12 #include "hardware/regs/sio.h"
13
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17
18 /** \file hardware/sync.h
19 * \defgroup hardware_sync hardware_sync
20 *
21 * Low level hardware spin locks, barrier and processor event APIs
22 *
23 * Spin Locks
24 * ----------
25 *
26 * The RP2040 provides 32 hardware spin locks, which can be used to manage mutually-exclusive access to shared software
27 * and hardware resources.
28 *
29 * Generally each spin lock itself is a shared resource,
30 * i.e. the same hardware spin lock can be used by multiple higher level primitives (as long as the spin locks are neither held for long periods, nor
31 * held concurrently with other spin locks by the same core - which could lead to deadlock). A hardware spin lock that is exclusively owned can be used
32 * individually without more flexibility and without regard to other software. Note that no hardware spin lock may
33 * be acquired re-entrantly (i.e. hardware spin locks are not on their own safe for use by both thread code and IRQs) however the default spinlock related
34 * methods here (e.g. \ref spin_lock_blocking) always disable interrupts while the lock is held as use by IRQ handlers and user code is common/desirable,
35 * and spin locks are only expected to be held for brief periods.
36 *
37 * The SDK uses the following default spin lock assignments, classifying which spin locks are reserved for exclusive/special purposes
38 * vs those suitable for more general shared use:
39 *
40 * Number (ID) | Description
41 * :---------: | -----------
42 * 0-13 | Currently reserved for exclusive use by the SDK and other libraries. If you use these spin locks, you risk breaking SDK or other library functionality. Each reserved spin lock used individually has its own PICO_SPINLOCK_ID so you can search for those.
43 * 14,15 | (\ref PICO_SPINLOCK_ID_OS1 and \ref PICO_SPINLOCK_ID_OS2). Currently reserved for exclusive use by an operating system (or other system level software) co-existing with the SDK.
44 * 16-23 | (\ref PICO_SPINLOCK_ID_STRIPED_FIRST - \ref PICO_SPINLOCK_ID_STRIPED_LAST). Spin locks from this range are assigned in a round-robin fashion via \ref next_striped_spin_lock_num(). These spin locks are shared, but assigning numbers from a range reduces the probability that two higher level locking primitives using _striped_ spin locks will actually be using the same spin lock.
45 * 24-31 | (\ref PICO_SPINLOCK_ID_CLAIM_FREE_FIRST - \ref PICO_SPINLOCK_ID_CLAIM_FREE_LAST). These are reserved for exclusive use and are allocated on a first come first served basis at runtime via \ref spin_lock_claim_unused()
46 */
47
48 // PICO_CONFIG: PARAM_ASSERTIONS_ENABLED_SYNC, Enable/disable assertions in the HW sync module, type=bool, default=0, group=hardware_sync
49 #ifndef PARAM_ASSERTIONS_ENABLED_SYNC
50 #define PARAM_ASSERTIONS_ENABLED_SYNC 0
51 #endif
52
53 /** \brief A spin lock identifier
54 * \ingroup hardware_sync
55 */
56 typedef volatile uint32_t spin_lock_t;
57
58 // PICO_CONFIG: PICO_SPINLOCK_ID_IRQ, Spinlock ID for IRQ protection, min=0, max=31, default=9, group=hardware_sync
59 #ifndef PICO_SPINLOCK_ID_IRQ
60 #define PICO_SPINLOCK_ID_IRQ 9
61 #endif
62
63 // PICO_CONFIG: PICO_SPINLOCK_ID_TIMER, Spinlock ID for Timer protection, min=0, max=31, default=10, group=hardware_sync
64 #ifndef PICO_SPINLOCK_ID_TIMER
65 #define PICO_SPINLOCK_ID_TIMER 10
66 #endif
67
68 // PICO_CONFIG: PICO_SPINLOCK_ID_HARDWARE_CLAIM, Spinlock ID for Hardware claim protection, min=0, max=31, default=11, group=hardware_sync
69 #ifndef PICO_SPINLOCK_ID_HARDWARE_CLAIM
70 #define PICO_SPINLOCK_ID_HARDWARE_CLAIM 11
71 #endif
72
73 // PICO_CONFIG: PICO_SPINLOCK_ID_RAND, Spinlock ID for Random Number Generator, min=0, max=31, default=12, group=hardware_sync
74 #ifndef PICO_SPINLOCK_ID_RAND
75 #define PICO_SPINLOCK_ID_RAND 12
76 #endif
77
78 // PICO_CONFIG: PICO_SPINLOCK_ID_OS1, First Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=14, group=hardware_sync
79 #ifndef PICO_SPINLOCK_ID_OS1
80 #define PICO_SPINLOCK_ID_OS1 14
81 #endif
82
83 // PICO_CONFIG: PICO_SPINLOCK_ID_OS2, Second Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=15, group=hardware_sync
84 #ifndef PICO_SPINLOCK_ID_OS2
85 #define PICO_SPINLOCK_ID_OS2 15
86 #endif
87
88 // PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_FIRST, Lowest Spinlock ID in the 'striped' range, min=0, max=31, default=16, group=hardware_sync
89 #ifndef PICO_SPINLOCK_ID_STRIPED_FIRST
90 #define PICO_SPINLOCK_ID_STRIPED_FIRST 16
91 #endif
92
93 // PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_LAST, Highest Spinlock ID in the 'striped' range, min=0, max=31, default=23, group=hardware_sync
94 #ifndef PICO_SPINLOCK_ID_STRIPED_LAST
95 #define PICO_SPINLOCK_ID_STRIPED_LAST 23
96 #endif
97
98 // PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_FIRST, Lowest Spinlock ID in the 'claim free' range, min=0, max=31, default=24, group=hardware_sync
99 #ifndef PICO_SPINLOCK_ID_CLAIM_FREE_FIRST
100 #define PICO_SPINLOCK_ID_CLAIM_FREE_FIRST 24
101 #endif
102
103 #ifdef PICO_SPINLOCK_ID_CLAIM_FREE_END
104 #warning PICO_SPINLOCK_ID_CLAIM_FREE_END has been renamed to PICO_SPINLOCK_ID_CLAIM_FREE_LAST
105 #endif
106
107 // PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_LAST, Highest Spinlock ID in the 'claim free' range, min=0, max=31, default=31, group=hardware_sync
108 #ifndef PICO_SPINLOCK_ID_CLAIM_FREE_LAST
109 #define PICO_SPINLOCK_ID_CLAIM_FREE_LAST 31
110 #endif
111
112 /*! \brief Insert a SEV instruction in to the code path.
113 * \ingroup hardware_sync
114
115 * The SEV (send event) instruction sends an event to both cores.
116 */
117 #if !__has_builtin(__sev)
__sev(void)118 __force_inline static void __sev(void) {
119 pico_default_asm_volatile ("sev");
120 }
121 #endif
122
123 /*! \brief Insert a WFE instruction in to the code path.
124 * \ingroup hardware_sync
125 *
126 * The WFE (wait for event) instruction waits until one of a number of
127 * events occurs, including events signalled by the SEV instruction on either core.
128 */
129 #if !__has_builtin(__wfe)
__wfe(void)130 __force_inline static void __wfe(void) {
131 pico_default_asm_volatile ("wfe");
132 }
133 #endif
134
135 /*! \brief Insert a WFI instruction in to the code path.
136 * \ingroup hardware_sync
137 *
138 * The WFI (wait for interrupt) instruction waits for a interrupt to wake up the core.
139 */
140 #if !__has_builtin(__wfi)
__wfi(void)141 __force_inline static void __wfi(void) {
142 pico_default_asm_volatile("wfi");
143 }
144 #endif
145
146 /*! \brief Insert a DMB instruction in to the code path.
147 * \ingroup hardware_sync
148 *
149 * The DMB (data memory barrier) acts as a memory barrier, all memory accesses prior to this
150 * instruction will be observed before any explicit access after the instruction.
151 */
__dmb(void)152 __force_inline static void __dmb(void) {
153 pico_default_asm_volatile("dmb" : : : "memory");
154 }
155
156 /*! \brief Insert a DSB instruction in to the code path.
157 * \ingroup hardware_sync
158 *
159 * The DSB (data synchronization barrier) acts as a special kind of data
160 * memory barrier (DMB). The DSB operation completes when all explicit memory
161 * accesses before this instruction complete.
162 */
__dsb(void)163 __force_inline static void __dsb(void) {
164 pico_default_asm_volatile("dsb" : : : "memory");
165 }
166
167 /*! \brief Insert a ISB instruction in to the code path.
168 * \ingroup hardware_sync
169 *
170 * ISB acts as an instruction synchronization barrier. It flushes the pipeline of the processor,
171 * so that all instructions following the ISB are fetched from cache or memory again, after
172 * the ISB instruction has been completed.
173 */
__isb(void)174 __force_inline static void __isb(void) {
175 pico_default_asm_volatile("isb" ::: "memory");
176 }
177
178 /*! \brief Acquire a memory fence
179 * \ingroup hardware_sync
180 */
__mem_fence_acquire(void)181 __force_inline static void __mem_fence_acquire(void) {
182 // the original code below makes it hard for us to be included from C++ via a header
183 // which itself is in an extern "C", so just use __dmb instead, which is what
184 // is required on Cortex M0+
185 __dmb();
186 //#ifndef __cplusplus
187 // atomic_thread_fence(memory_order_acquire);
188 //#else
189 // std::atomic_thread_fence(std::memory_order_acquire);
190 //#endif
191 }
192
193 /*! \brief Release a memory fence
194 * \ingroup hardware_sync
195 *
196 */
__mem_fence_release(void)197 __force_inline static void __mem_fence_release(void) {
198 // the original code below makes it hard for us to be included from C++ via a header
199 // which itself is in an extern "C", so just use __dmb instead, which is what
200 // is required on Cortex M0+
201 __dmb();
202 //#ifndef __cplusplus
203 // atomic_thread_fence(memory_order_release);
204 //#else
205 // std::atomic_thread_fence(std::memory_order_release);
206 //#endif
207 }
208
209 /*! \brief Save and disable interrupts
210 * \ingroup hardware_sync
211 *
212 * \return The prior interrupt enable status for restoration later via restore_interrupts()
213 */
save_and_disable_interrupts(void)214 __force_inline static uint32_t save_and_disable_interrupts(void) {
215 uint32_t status;
216 pico_default_asm_volatile(
217 "mrs %0, PRIMASK\n"
218 "cpsid i"
219 : "=r" (status) ::);
220 return status;
221 }
222
223 /*! \brief Restore interrupts to a specified state
224 * \ingroup hardware_sync
225 *
226 * \param status Previous interrupt status from save_and_disable_interrupts()
227 */
restore_interrupts(uint32_t status)228 __force_inline static void restore_interrupts(uint32_t status) {
229 pico_default_asm_volatile("msr PRIMASK,%0"::"r" (status) : );
230 }
231
232 /*! \brief Get HW Spinlock instance from number
233 * \ingroup hardware_sync
234 *
235 * \param lock_num Spinlock ID
236 * \return The spinlock instance
237 */
spin_lock_instance(uint lock_num)238 __force_inline static spin_lock_t *spin_lock_instance(uint lock_num) {
239 invalid_params_if(SYNC, lock_num >= NUM_SPIN_LOCKS);
240 return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4);
241 }
242
243 /*! \brief Get HW Spinlock number from instance
244 * \ingroup hardware_sync
245 *
246 * \param lock The Spinlock instance
247 * \return The Spinlock ID
248 */
spin_lock_get_num(spin_lock_t * lock)249 __force_inline static uint spin_lock_get_num(spin_lock_t *lock) {
250 invalid_params_if(SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET ||
251 (uint) lock >= NUM_SPIN_LOCKS * sizeof(spin_lock_t) + SIO_BASE + SIO_SPINLOCK0_OFFSET ||
252 ((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof(spin_lock_t) != 0);
253 return (uint) (lock - (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET));
254 }
255
256 /*! \brief Acquire a spin lock without disabling interrupts (hence unsafe)
257 * \ingroup hardware_sync
258 *
259 * \param lock Spinlock instance
260 */
spin_lock_unsafe_blocking(spin_lock_t * lock)261 __force_inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
262 // Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
263 // with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
264 // anyway which should be finished soon
265 while (__builtin_expect(!*lock, 0));
266 __mem_fence_acquire();
267 }
268
269 /*! \brief Release a spin lock without re-enabling interrupts
270 * \ingroup hardware_sync
271 *
272 * \param lock Spinlock instance
273 */
spin_unlock_unsafe(spin_lock_t * lock)274 __force_inline static void spin_unlock_unsafe(spin_lock_t *lock) {
275 __mem_fence_release();
276 *lock = 0;
277 }
278
279 /*! \brief Acquire a spin lock safely
280 * \ingroup hardware_sync
281 *
282 * This function will disable interrupts prior to acquiring the spinlock
283 *
284 * \param lock Spinlock instance
285 * \return interrupt status to be used when unlocking, to restore to original state
286 */
spin_lock_blocking(spin_lock_t * lock)287 __force_inline static uint32_t spin_lock_blocking(spin_lock_t *lock) {
288 uint32_t save = save_and_disable_interrupts();
289 spin_lock_unsafe_blocking(lock);
290 return save;
291 }
292
293 /*! \brief Check to see if a spinlock is currently acquired elsewhere.
294 * \ingroup hardware_sync
295 *
296 * \param lock Spinlock instance
297 */
is_spin_locked(spin_lock_t * lock)298 inline static bool is_spin_locked(spin_lock_t *lock) {
299 check_hw_size(spin_lock_t, 4);
300 uint lock_num = spin_lock_get_num(lock);
301 return 0 != (*(io_ro_32 *) (SIO_BASE + SIO_SPINLOCK_ST_OFFSET) & (1u << lock_num));
302 }
303
304 /*! \brief Release a spin lock safely
305 * \ingroup hardware_sync
306 *
307 * This function will re-enable interrupts according to the parameters.
308 *
309 * \param lock Spinlock instance
310 * \param saved_irq Return value from the \ref spin_lock_blocking() function.
311 *
312 * \sa spin_lock_blocking()
313 */
spin_unlock(spin_lock_t * lock,uint32_t saved_irq)314 __force_inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
315 spin_unlock_unsafe(lock);
316 restore_interrupts(saved_irq);
317 }
318
319 /*! \brief Initialise a spin lock
320 * \ingroup hardware_sync
321 *
322 * The spin lock is initially unlocked
323 *
324 * \param lock_num The spin lock number
325 * \return The spin lock instance
326 */
327 spin_lock_t *spin_lock_init(uint lock_num);
328
329 /*! \brief Release all spin locks
330 * \ingroup hardware_sync
331 */
332 void spin_locks_reset(void);
333
334 /*! \brief Return a spin lock number from the _striped_ range
335 * \ingroup hardware_sync
336 *
337 * Returns a spin lock number in the range PICO_SPINLOCK_ID_STRIPED_FIRST to PICO_SPINLOCK_ID_STRIPED_LAST
338 * in a round robin fashion. This does not grant the caller exclusive access to the spin lock, so the caller
339 * must:
340 *
341 * -# Abide (with other callers) by the contract of only holding this spin lock briefly (and with IRQs disabled - the default via \ref spin_lock_blocking()),
342 * and not whilst holding other spin locks.
343 * -# Be OK with any contention caused by the - brief due to the above requirement - contention with other possible users of the spin lock.
344 *
345 * \return lock_num a spin lock number the caller may use (non exclusively)
346 * \see PICO_SPINLOCK_ID_STRIPED_FIRST
347 * \see PICO_SPINLOCK_ID_STRIPED_LAST
348 */
349 uint next_striped_spin_lock_num(void);
350
351 /*! \brief Mark a spin lock as used
352 * \ingroup hardware_sync
353 *
354 * Method for cooperative claiming of hardware. Will cause a panic if the spin lock
355 * is already claimed. Use of this method by libraries detects accidental
356 * configurations that would fail in unpredictable ways.
357 *
358 * \param lock_num the spin lock number
359 */
360 void spin_lock_claim(uint lock_num);
361
362 /*! \brief Mark multiple spin locks as used
363 * \ingroup hardware_sync
364 *
365 * Method for cooperative claiming of hardware. Will cause a panic if any of the spin locks
366 * are already claimed. Use of this method by libraries detects accidental
367 * configurations that would fail in unpredictable ways.
368 *
369 * \param lock_num_mask Bitfield of all required spin locks to claim (bit 0 == spin lock 0, bit 1 == spin lock 1 etc)
370 */
371 void spin_lock_claim_mask(uint32_t lock_num_mask);
372
373 /*! \brief Mark a spin lock as no longer used
374 * \ingroup hardware_sync
375 *
376 * Method for cooperative claiming of hardware.
377 *
378 * \param lock_num the spin lock number to release
379 */
380 void spin_lock_unclaim(uint lock_num);
381
382 /*! \brief Claim a free spin lock
383 * \ingroup hardware_sync
384 *
385 * \param required if true the function will panic if none are available
386 * \return the spin lock number or -1 if required was false, and none were free
387 */
388 int spin_lock_claim_unused(bool required);
389
390 /*! \brief Determine if a spin lock is claimed
391 * \ingroup hardware_sync
392 *
393 * \param lock_num the spin lock number
394 * \return true if claimed, false otherwise
395 * \see spin_lock_claim
396 * \see spin_lock_claim_mask
397 */
398 bool spin_lock_is_claimed(uint lock_num);
399
400 // no longer use __mem_fence_acquire here, as it is overkill on cortex M0+
401 #define remove_volatile_cast(t, x) ({__compiler_memory_barrier(); Clang_Pragma("clang diagnostic push"); Clang_Pragma("clang diagnostic ignored \"-Wcast-qual\""); (t)(x); Clang_Pragma("clang diagnostic pop"); })
402
403 #ifdef __cplusplus
404 }
405 #endif
406
407 #endif
408