1 /* 2 * Copyright (c) 2020 Raspberry Pi (Trading) Ltd. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #ifndef _PICO_LOCK_CORE_H 8 #define _PICO_LOCK_CORE_H 9 10 #include "pico.h" 11 #include "pico/time.h" 12 #include "hardware/sync.h" 13 14 /** \file lock_core.h 15 * \defgroup lock_core lock_core 16 * \ingroup pico_sync 17 * \brief base synchronization/lock primitive support 18 * 19 * Most of the pico_sync locking primitives contain a lock_core_t structure member. This currently just holds a spin 20 * lock which is used only to protect the contents of the rest of the structure as part of implementing the synchronization 21 * primitive. As such, the spin_lock member of lock core is never still held on return from any function for the primitive. 22 * 23 * \ref critical_section is an exceptional case in that it does not have a lock_core_t and simply wraps a spin lock, providing 24 * methods to lock and unlock said spin lock. 25 * 26 * lock_core based structures work by locking the spin lock, checking state, and then deciding whether they additionally need to block 27 * or notify when the spin lock is released. In the blocking case, they will wake up again in the future, and try the process again. 28 * 29 * By default the SDK just uses the processors' events via SEV and WEV for notification and blocking as these are sufficient for 30 * cross core, and notification from interrupt handlers. However macros are defined in this file that abstract the wait 31 * and notify mechanisms to allow the SDK locking functions to effectively be used within an RTOS or other environment. 32 * 33 * When implementing an RTOS, it is desirable for the SDK synchronization primitives that wait, to block the calling task (and immediately yield), 34 * and those that notify, to wake a blocked task which isn't on processor. At least the wait macro implementation needs to be atomic with the protecting 35 * spin_lock unlock from the callers point of view; i.e. the task should unlock the spin lock when it starts its wait. Such implementation is 36 * up to the RTOS integration, however the macros are defined such that such operations are always combined into a single call 37 * (so they can be perfomed atomically) even though the default implementation does not need this, as a WFE which starts 38 * following the corresponding SEV is not missed. 39 */ 40 41 // PICO_CONFIG: PARAM_ASSERTIONS_ENABLED_LOCK_CORE, Enable/disable assertions in the lock core, type=bool, default=0, group=pico_sync 42 #ifndef PARAM_ASSERTIONS_ENABLED_LOCK_CORE 43 #define PARAM_ASSERTIONS_ENABLED_LOCK_CORE 0 44 #endif 45 46 /** \file lock_core.h 47 * \ingroup lock_core 48 * 49 * Base implementation for locking primitives protected by a spin lock. The spin lock is only used to protect 50 * access to the remaining lock state (in primitives using lock_core); it is never left locked outside 51 * of the function implementations 52 */ 53 struct lock_core { 54 // spin lock protecting this lock's state 55 spin_lock_t *spin_lock; 56 57 // note any lock members in containing structures need not be volatile; 58 // they are protected by memory/compiler barriers when gaining and release spin locks 59 }; 60 61 typedef struct lock_core lock_core_t; 62 63 /*! \brief Initialise a lock structure 64 * \ingroup lock_core 65 * 66 * Inititalize a lock structure, providing the spin lock number to use for protecting internal state. 67 * 68 * \param core Pointer to the lock_core to initialize 69 * \param lock_num Spin lock number to use for the lock. As the spin lock is only used internally to the locking primitive 70 * method implementations, this does not need to be globally unique, however could suffer contention 71 */ 72 void lock_init(lock_core_t *core, uint lock_num); 73 74 #ifndef lock_owner_id_t 75 /*! \brief type to use to store the 'owner' of a lock. 76 * \ingroup lock_core 77 * By default this is int8_t as it only needs to store the core number or -1, however it may be 78 * overridden if a larger type is required (e.g. for an RTOS task id) 79 */ 80 #define lock_owner_id_t int8_t 81 #endif 82 83 #ifndef LOCK_INVALID_OWNER_ID 84 /*! \brief marker value to use for a lock_owner_id_t which does not refer to any valid owner 85 * \ingroup lock_core 86 */ 87 #define LOCK_INVALID_OWNER_ID ((lock_owner_id_t)-1) 88 #endif 89 90 #ifndef lock_get_caller_owner_id 91 /*! \brief return the owner id for the caller 92 * \ingroup lock_core 93 * By default this returns the calling core number, but may be overridden (e.g. to return an RTOS task id) 94 */ 95 #define lock_get_caller_owner_id() ((lock_owner_id_t)get_core_num()) 96 #ifndef lock_is_owner_id_valid 97 #define lock_is_owner_id_valid(id) ((id)>=0) 98 #endif 99 #endif 100 101 #ifndef lock_is_owner_id_valid 102 #define lock_is_owner_id_valid(id) ((id) != LOCK_INVALID_OWNER_ID) 103 #endif 104 105 #ifndef lock_internal_spin_unlock_with_wait 106 /*! \brief Atomically unlock the lock's spin lock, and wait for a notification. 107 * \ingroup lock_core 108 * 109 * _Atomic_ here refers to the fact that it should not be possible for a concurrent lock_internal_spin_unlock_with_notify 110 * to insert itself between the spin unlock and this wait in a way that the wait does not see the notification (i.e. causing 111 * a missed notification). In other words this method should always wake up in response to a lock_internal_spin_unlock_with_notify 112 * for the same lock, which completes after this call starts. 113 * 114 * In an ideal implementation, this method would return exactly after the corresponding lock_internal_spin_unlock_with_notify 115 * has subsequently been called on the same lock instance, however this method is free to return at _any_ point before that; 116 * this macro is _always_ used in a loop which locks the spin lock, checks the internal locking primitive state and then 117 * waits again if the calling thread should not proceed. 118 * 119 * By default this macro simply unlocks the spin lock, and then performs a WFE, but may be overridden 120 * (e.g. to actually block the RTOS task). 121 * 122 * \param lock the lock_core for the primitive which needs to block 123 * \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the `PRIMASK` 124 * state when the spin lock was acquire 125 */ 126 #define lock_internal_spin_unlock_with_wait(lock, save) spin_unlock((lock)->spin_lock, save), __wfe() 127 #endif 128 129 #ifndef lock_internal_spin_unlock_with_notify 130 /*! \brief Atomically unlock the lock's spin lock, and send a notification 131 * \ingroup lock_core 132 * 133 * _Atomic_ here refers to the fact that it should not be possible for this notification to happen during a 134 * lock_internal_spin_unlock_with_wait in a way that that wait does not see the notification (i.e. causing 135 * a missed notification). In other words this method should always wake up any lock_internal_spin_unlock_with_wait 136 * which started before this call completes. 137 * 138 * In an ideal implementation, this method would wake up only the corresponding lock_internal_spin_unlock_with_wait 139 * that has been called on the same lock instance, however it is free to wake up any of them, as they will check 140 * their condition and then re-wait if necessary/ 141 * 142 * By default this macro simply unlocks the spin lock, and then performs a SEV, but may be overridden 143 * (e.g. to actually un-block RTOS task(s)). 144 * 145 * \param lock the lock_core for the primitive which needs to block 146 * \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the PRIMASK 147 * state when the spin lock was acquire) 148 */ 149 #define lock_internal_spin_unlock_with_notify(lock, save) spin_unlock((lock)->spin_lock, save), __sev() 150 #endif 151 152 #ifndef lock_internal_spin_unlock_with_best_effort_wait_or_timeout 153 /*! \brief Atomically unlock the lock's spin lock, and wait for a notification or a timeout 154 * \ingroup lock_core 155 * 156 * _Atomic_ here refers to the fact that it should not be possible for a concurrent lock_internal_spin_unlock_with_notify 157 * to insert itself between the spin unlock and this wait in a way that the wait does not see the notification (i.e. causing 158 * a missed notification). In other words this method should always wake up in response to a lock_internal_spin_unlock_with_notify 159 * for the same lock, which completes after this call starts. 160 * 161 * In an ideal implementation, this method would return exactly after the corresponding lock_internal_spin_unlock_with_notify 162 * has subsequently been called on the same lock instance or the timeout has been reached, however this method is free to return 163 * at _any_ point before that; this macro is _always_ used in a loop which locks the spin lock, checks the internal locking 164 * primitive state and then waits again if the calling thread should not proceed. 165 * 166 * By default this simply unlocks the spin lock, and then calls \ref best_effort_wfe_or_timeout 167 * but may be overridden (e.g. to actually block the RTOS task with a timeout). 168 * 169 * \param lock the lock_core for the primitive which needs to block 170 * \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the PRIMASK 171 * state when the spin lock was acquire) 172 * \param until the \ref absolute_time_t value 173 * \return true if the timeout has been reached 174 */ 175 #define lock_internal_spin_unlock_with_best_effort_wait_or_timeout(lock, save, until) ({ \ 176 spin_unlock((lock)->spin_lock, save); \ 177 best_effort_wfe_or_timeout(until); \ 178 }) 179 #endif 180 181 #ifndef sync_internal_yield_until_before 182 /*! \brief yield to other processing until some time before the requested time 183 * \ingroup lock_core 184 * 185 * This method is provided for cases where the caller has no useful work to do 186 * until the specified time. 187 * 188 * By default this method does nothing, however it can be overridden (for example by an 189 * RTOS which is able to block the current task until the scheduler tick before 190 * the given time) 191 * 192 * \param until the \ref absolute_time_t value 193 */ 194 #define sync_internal_yield_until_before(until) ((void)0) 195 #endif 196 197 #endif 198