1 /*
2 * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 * Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
5 *
6 * SPDX-License-Identifier: MIT AND BSD-3-Clause
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy of
9 * this software and associated documentation files (the "Software"), to deal in
10 * the Software without restriction, including without limitation the rights to
11 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
12 * the Software, and to permit persons to whom the Software is furnished to do so,
13 * subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in all
16 * copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
20 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
21 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
22 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * https://www.FreeRTOS.org
26 * https://github.com/FreeRTOS
27 *
28 */
29
30 /*----------------------------------------------------------------------
31 * Implementation of functions defined in portable.h for the RP2040 port.
32 *----------------------------------------------------------------------*/
33
34 #include "FreeRTOS.h"
35 #include "task.h"
36 #include "rp2040_config.h"
37 #include "hardware/clocks.h"
38 #include "hardware/exception.h"
39
40 /*
41 * LIB_PICO_MULTICORE == 1, if we are linked with pico_multicore (note that
42 * the non SMP FreeRTOS_Kernel is not linked with pico_multicore itself). We
43 * use this flag to determine if we need multi-core functionality.
44 */
45 #if ( LIB_PICO_MULTICORE == 1 )
46 #include "pico/multicore.h"
47 #endif /* LIB_PICO_MULTICORE */
48
49 /* Constants required to manipulate the NVIC. */
50 #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
51 #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
52 #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
53 #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
54 #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
55 #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
56 #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
57 #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
58 #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
59 #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
60 #define portMIN_INTERRUPT_PRIORITY ( 255UL )
61 #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
62 #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
63
64 /* Constants required to set up the initial stack. */
65 #define portINITIAL_XPSR ( 0x01000000 )
66
67 /* The systick is a 24-bit counter. */
68 #define portMAX_24_BIT_NUMBER ( 0xffffffUL )
69
70 /* A fiddle factor to estimate the number of SysTick counts that would have
71 * occurred while the SysTick counter is stopped during tickless idle
72 * calculations. */
73 #ifndef portMISSED_COUNTS_FACTOR
74 #define portMISSED_COUNTS_FACTOR ( 45UL )
75 #endif
76
77 /* Let the user override the pre-loading of the initial LR with the address of
78 * prvTaskExitError() in case it messes up unwinding of the stack in the
79 * debugger. */
80 #ifdef configTASK_RETURN_ADDRESS
81 #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
82 #else
83 #define portTASK_RETURN_ADDRESS prvTaskExitError
84 #endif
85
86 /*
87 * Setup the timer to generate the tick interrupts. The implementation in this
88 * file is weak to allow application writers to change the timer used to
89 * generate the tick interrupt.
90 */
91 void vPortSetupTimerInterrupt( void );
92
93 /*
94 * Exception handlers.
95 */
96 void xPortPendSVHandler( void ) __attribute__( ( naked ) );
97 void xPortSysTickHandler( void );
98 void vPortSVCHandler( void );
99
100 /*
101 * Start first task is a separate function so it can be tested in isolation.
102 */
103 static void vPortStartFirstTask( void ) __attribute__( ( naked ) );
104
105 /*
106 * Used to catch tasks that attempt to return from their implementing function.
107 */
108 static void prvTaskExitError( void );
109
110 /*-----------------------------------------------------------*/
111
112 /* Each task maintains its own interrupt status in the critical nesting
113 * variable. This is initialized to 0 to allow vPortEnter/ExitCritical
114 * to be called before the scheduler is started */
115 #if ( configNUMBER_OF_CORES == 1 )
116 static UBaseType_t uxCriticalNesting;
117 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
118 UBaseType_t uxCriticalNestings[ configNUMBER_OF_CORES ] = { 0 };
119 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
120
121 /*-----------------------------------------------------------*/
122
123 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 || configNUMBER_OF_CORES > 1 )
124 #include "hardware/irq.h"
125 #endif /* ( configSUPPORT_PICO_SYNC_INTEROP == 1 || configNUMBER_OF_CORES > 1 ) */
126 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
127 #include "pico/lock_core.h"
128 #include "event_groups.h"
129 #if configSUPPORT_STATIC_ALLOCATION
130 static StaticEventGroup_t xStaticEventGroup;
131 #define pEventGroup ( &xStaticEventGroup )
132 #endif /* configSUPPORT_STATIC_ALLOCATION */
133 static EventGroupHandle_t xEventGroup;
134 #if ( configNUMBER_OF_CORES == 1 )
135 static EventBits_t uxCrossCoreEventBits;
136 static spin_lock_t * pxCrossCoreSpinLock; /* protects uxCrossCoreEventBits */
137 #endif
138 #endif /* configSUPPORT_PICO_SYNC_INTEROP */
139
140 /*
141 * The number of SysTick increments that make up one tick period.
142 */
143 #if ( configUSE_TICKLESS_IDLE == 1 )
144 static uint32_t ulTimerCountsForOneTick = 0;
145 #endif /* configUSE_TICKLESS_IDLE */
146
147 /*
148 * The maximum number of tick periods that can be suppressed is limited by the
149 * 24 bit resolution of the SysTick timer.
150 */
151 #if ( configUSE_TICKLESS_IDLE == 1 )
152 static uint32_t xMaximumPossibleSuppressedTicks = 0;
153 #endif /* configUSE_TICKLESS_IDLE */
154
155 /*
156 * Compensate for the CPU cycles that pass while the SysTick is stopped (low
157 * power functionality only.
158 */
159 #if ( configUSE_TICKLESS_IDLE == 1 )
160 static uint32_t ulStoppedTimerCompensation = 0;
161 #endif /* configUSE_TICKLESS_IDLE */
162
163 /*-----------------------------------------------------------*/
164
165 #define INVALID_PRIMARY_CORE_NUM 0xffu
166 /* The primary core number (the own which has the SysTick handler) */
167 static uint8_t ucPrimaryCoreNum = INVALID_PRIMARY_CORE_NUM;
168
169 /* Note: portIS_FREE_RTOS_CORE() also returns false until the scheduler is started */
170 #if ( configNUMBER_OF_CORES != 1 )
171 #define portIS_FREE_RTOS_CORE() ( ucPrimaryCoreNum != INVALID_PRIMARY_CORE_NUM )
172 #else
173 #define portIS_FREE_RTOS_CORE() ( ucPrimaryCoreNum == get_core_num() )
174 #endif
175
176 /*
177 * See header file for description.
178 */
pxPortInitialiseStack(StackType_t * pxTopOfStack,TaskFunction_t pxCode,void * pvParameters)179 StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
180 TaskFunction_t pxCode,
181 void * pvParameters )
182 {
183 /* Simulate the stack frame as it would be created by a context switch
184 * interrupt. */
185 pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
186 *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
187 pxTopOfStack--;
188 *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
189 pxTopOfStack--;
190 *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
191 pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
192 *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
193 pxTopOfStack -= 8; /* R11..R4. */
194
195 return pxTopOfStack;
196 }
197 /*-----------------------------------------------------------*/
198
prvTaskExitError(void)199 static void prvTaskExitError( void )
200 {
201 /* A function that implements a task must not exit or attempt to return to
202 * its caller as there is nothing to return to. If a task wants to exit it
203 * should instead call vTaskDelete( NULL ). */
204 panic_unsupported();
205 }
206 /*-----------------------------------------------------------*/
207
vPortSVCHandler(void)208 void vPortSVCHandler( void )
209 {
210 /* This function is no longer used, but retained for backward
211 * compatibility. */
212 }
213 /*-----------------------------------------------------------*/
214
vPortStartFirstTask(void)215 void vPortStartFirstTask( void )
216 {
217 #if ( configNUMBER_OF_CORES == 1 )
218 __asm volatile (
219 " .syntax unified \n"
220 " ldr r2, pxCurrentTCBConst1 \n" /* Obtain location of pxCurrentTCB. */
221 " ldr r3, [r2] \n"
222 " ldr r0, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */
223 " adds r0, #32 \n" /* Discard everything up to r0. */
224 " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
225 " movs r0, #2 \n" /* Switch to the psp stack. */
226 " msr CONTROL, r0 \n"
227 " isb \n"
228 " pop {r0-r5} \n" /* Pop the registers that are saved automatically. */
229 " mov lr, r5 \n" /* lr is now in r5. */
230 " pop {r3} \n" /* Return address is now in r3. */
231 " pop {r2} \n" /* Pop and discard XPSR. */
232 " cpsie i \n" /* The first task has its context and interrupts can be enabled. */
233 " bx r3 \n" /* Finally, jump to the user defined task code. */
234 " .align 4 \n"
235 "pxCurrentTCBConst1: .word pxCurrentTCB\n"
236 );
237 #else /* if ( configNUMBER_OF_CORES == 1 ) */
238 __asm volatile (
239 " .syntax unified \n"
240 #if configRESET_STACK_POINTER
241 " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
242 " ldr r0, [r0] \n"
243 " ldr r0, [r0] \n"
244 " msr msp, r0 \n" /* Set the msp back to the start of the stack. */
245 #endif /* configRESET_STACK_POINTER */
246 #if ( configNUMBER_OF_CORES != 1 )
247 " adr r1, ulAsmLocals \n" /* Get the location of the current TCB for the current core. */
248 " ldmia r1!, {r2, r3} \n"
249 " ldr r2, [r2] \n" /* r2 = Core number */
250 " lsls r2, #2 \n"
251 " ldr r3, [r3, r2] \n" /* r3 = pxCurrentTCBs[get_core_num()] */
252 #else /* configNUMBER_OF_CORES != 1 */
253 " ldr r3, =pxCurrentTCBs \n"
254 " ldr r3, [r3] \n" /* r3 = pxCurrentTCBs[0] */
255 #endif /* configNUMBER_OF_CORES != 1 */
256 " ldr r0, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */
257 " adds r0, #32 \n" /* Discard everything up to r0. */
258 " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
259 " movs r0, #2 \n" /* Switch to the psp stack. */
260 " msr CONTROL, r0 \n"
261 " isb \n"
262 " pop {r0-r5} \n" /* Pop the registers that are saved automatically. */
263 " mov lr, r5 \n" /* lr is now in r5. */
264 " pop {r3} \n" /* Return address is now in r3. */
265 " pop {r2} \n" /* Pop and discard XPSR. */
266 " cpsie i \n" /* The first task has its context and interrupts can be enabled. */
267 " bx r3 \n" /* Finally, jump to the user defined task code. */
268 #if configNUMBER_OF_CORES != 1
269 " \n"
270 " .align 4 \n"
271 "ulAsmLocals: \n"
272 " .word 0xD0000000 \n" /* SIO */
273 " .word pxCurrentTCBs \n"
274 #endif /* portRUNNING_ON_BOTH_CORES */
275 );
276 #endif /* if ( configNUMBER_OF_CORES == 1 ) */
277 }
278 /*-----------------------------------------------------------*/
279
280 #if ( LIB_PICO_MULTICORE == 1 ) && ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
prvFIFOInterruptHandler()281 static void prvFIFOInterruptHandler()
282 {
283 /* We must remove the contents (which we don't care about)
284 * to clear the IRQ */
285 multicore_fifo_drain();
286
287 /* And explicitly clear any other IRQ flags. */
288 multicore_fifo_clear_irq();
289
290 #if ( configNUMBER_OF_CORES != 1 )
291 portYIELD_FROM_ISR( pdTRUE );
292 #elif ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
293 BaseType_t xHigherPriorityTaskWoken = pdFALSE;
294 uint32_t ulSave = spin_lock_blocking( pxCrossCoreSpinLock );
295 EventBits_t ulBits = uxCrossCoreEventBits;
296 uxCrossCoreEventBits &= ~ulBits;
297 spin_unlock( pxCrossCoreSpinLock, ulSave );
298 xEventGroupSetBitsFromISR( xEventGroup, ulBits, &xHigherPriorityTaskWoken );
299 portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
300 #endif /* configNUMBER_OF_CORES != 1 */
301 }
302 #endif /* if ( LIB_PICO_MULTICORE == 1 ) && ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) */
303
304 #if ( configNUMBER_OF_CORES > 1 )
305
306 /*
307 * See header file for description.
308 */
xPortStartSchedulerOnCore()309 static BaseType_t xPortStartSchedulerOnCore()
310 {
311 if( ucPrimaryCoreNum == get_core_num() )
312 {
313 /* Start the timer that generates the tick ISR. Interrupts are disabled
314 * here already. */
315 vPortSetupTimerInterrupt();
316
317 /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
318 portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
319 #if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 )
320 exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler );
321 #endif
322 }
323
324 portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
325
326 #if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 )
327 exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler );
328 exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler );
329 #endif
330
331 /* Install FIFO handler to receive interrupt from other core */
332 multicore_fifo_clear_irq();
333 multicore_fifo_drain();
334 uint32_t ulIRQNum = SIO_IRQ_PROC0 + get_core_num();
335 irq_set_priority( ulIRQNum, portMIN_INTERRUPT_PRIORITY );
336 irq_set_exclusive_handler( ulIRQNum, prvFIFOInterruptHandler );
337 irq_set_enabled( ulIRQNum, 1 );
338
339 /* Start the first task. */
340 vPortStartFirstTask();
341
342 /* Should never get here as the tasks will now be executing! Call the task
343 * exit error function to prevent compiler warnings about a static function
344 * not being called in the case that the application writer overrides this
345 * functionality by defining configTASK_RETURN_ADDRESS. Call
346 * vTaskSwitchContext() so link time optimization does not remove the
347 * symbol. */
348 vTaskSwitchContext( portGET_CORE_ID() );
349 prvTaskExitError();
350
351 /* Should not get here. */
352 return 0;
353 }
354
prvDisableInterruptsAndPortStartSchedulerOnCore(void)355 static void prvDisableInterruptsAndPortStartSchedulerOnCore( void )
356 {
357 portDISABLE_INTERRUPTS();
358 xPortStartSchedulerOnCore();
359 }
360
361 /*
362 * See header file for description.
363 */
xPortStartScheduler(void)364 BaseType_t xPortStartScheduler( void )
365 {
366 configASSERT( ucPrimaryCoreNum == INVALID_PRIMARY_CORE_NUM );
367
368 /* No one else should use these! */
369 spin_lock_claim( configSMP_SPINLOCK_0 );
370 spin_lock_claim( configSMP_SPINLOCK_1 );
371
372 #if configNUMBER_OF_CORES != 1
373 ucPrimaryCoreNum = configTICK_CORE;
374 configASSERT( get_core_num() == 0 ); /* we must be started on core 0 */
375 multicore_reset_core1();
376 multicore_launch_core1( prvDisableInterruptsAndPortStartSchedulerOnCore );
377 #else
378 ucPrimaryCoreNum = get_core_num();
379 #endif
380 xPortStartSchedulerOnCore();
381
382 /* Should not get here! */
383 return 0;
384 }
385
386 #else /* if ( configNUMBER_OF_CORES > 1 ) */
387
388 /*
389 * See header file for description.
390 */
xPortStartScheduler(void)391 BaseType_t xPortStartScheduler( void )
392 {
393 /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
394 portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
395 portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
396
397 #if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 )
398 exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler );
399 exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler );
400 exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler );
401 #endif
402
403 /* Start the timer that generates the tick ISR. Interrupts are disabled
404 * here already. */
405 vPortSetupTimerInterrupt();
406
407 /* Initialise the critical nesting count ready for the first task. */
408 uxCriticalNesting = 0;
409
410 ucPrimaryCoreNum = get_core_num();
411 #if ( LIB_PICO_MULTICORE == 1 )
412 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
413 multicore_fifo_clear_irq();
414 multicore_fifo_drain();
415 uint32_t irq_num = SIO_IRQ_PROC0 + get_core_num();
416 irq_set_priority( irq_num, portMIN_INTERRUPT_PRIORITY );
417 irq_set_exclusive_handler( irq_num, prvFIFOInterruptHandler );
418 irq_set_enabled( irq_num, 1 );
419 #endif
420 #endif
421
422 /* Start the first task. */
423 vPortStartFirstTask();
424
425 /* Should never get here as the tasks will now be executing! Call the task
426 * exit error function to prevent compiler warnings about a static function
427 * not being called in the case that the application writer overrides this
428 * functionality by defining configTASK_RETURN_ADDRESS. Call
429 * vTaskSwitchContext() so link time optimization does not remove the
430 * symbol. */
431 vTaskSwitchContext();
432 prvTaskExitError();
433
434 /* Should not get here! */
435 return 0;
436 }
437 #endif /* if ( configNUMBER_OF_CORES > 1 ) */
438
439 /*-----------------------------------------------------------*/
440
vPortEndScheduler(void)441 void vPortEndScheduler( void )
442 {
443 /* Not implemented in ports where there is nothing to return to.
444 * Artificially force an assert. */
445 configASSERT( portGET_CORE_ID() == 1000UL );
446 }
447 /*-----------------------------------------------------------*/
448
vPortYield(void)449 void vPortYield( void )
450 {
451 /* Set a PendSV to request a context switch. */
452 portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
453
454 /* Barriers are normally not required but do ensure the code is completely
455 * within the specified behaviour for the architecture. */
456 __asm volatile ( "dsb" ::: "memory" );
457 __asm volatile ( "isb" );
458 }
459
460 /*-----------------------------------------------------------*/
461
462 #if ( configNUMBER_OF_CORES == 1 )
vPortEnterCritical(void)463 void vPortEnterCritical( void )
464 {
465 portDISABLE_INTERRUPTS();
466 uxCriticalNesting++;
467 __asm volatile ( "dsb" ::: "memory" );
468 __asm volatile ( "isb" );
469 }
470 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
471 /*-----------------------------------------------------------*/
472
473 #if ( configNUMBER_OF_CORES == 1 )
vPortExitCritical(void)474 void vPortExitCritical( void )
475 {
476 configASSERT( uxCriticalNesting );
477 uxCriticalNesting--;
478
479 if( uxCriticalNesting == 0 )
480 {
481 portENABLE_INTERRUPTS();
482 }
483 }
484 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
485
486 /*-----------------------------------------------------------*/
487
ulSetInterruptMaskFromISR(void)488 uint32_t ulSetInterruptMaskFromISR( void )
489 {
490 __asm volatile (
491 " mrs r0, PRIMASK \n"
492 " cpsid i \n"
493 " bx lr "
494 ::: "memory"
495 );
496 }
497 /*-----------------------------------------------------------*/
498
vClearInterruptMaskFromISR(uint32_t ulMask)499 void vClearInterruptMaskFromISR( __attribute__( ( unused ) ) uint32_t ulMask )
500 {
501 __asm volatile (
502 " msr PRIMASK, r0 \n"
503 " bx lr "
504 ::: "memory"
505 );
506 }
507
508 /*-----------------------------------------------------------*/
509
vYieldCore(int xCoreID)510 void vYieldCore( int xCoreID )
511 {
512 /* Remove warning if configASSERT is not defined.
513 * xCoreID is not used in this function due to this is a dual-core system. The yielding core must be different from the current core. */
514 ( void ) xCoreID;
515
516 configASSERT( xCoreID != ( int ) portGET_CORE_ID() );
517
518 #if configNUMBER_OF_CORES != 1
519
520 /* Non blocking, will cause interrupt on other core if the queue isn't already full,
521 * in which case an IRQ must be pending */
522 sio_hw->fifo_wr = 0;
523 #endif
524 }
525
526 /*-----------------------------------------------------------*/
527
xPortPendSVHandler(void)528 void xPortPendSVHandler( void )
529 {
530 /* This is a naked function. */
531 #if ( configNUMBER_OF_CORES == 1 )
532 __asm volatile
533 (
534 " .syntax unified \n"
535 " mrs r0, psp \n"
536 " \n"
537 " ldr r3, pxCurrentTCBConst2 \n" /* Get the location of the current TCB. */
538 " ldr r2, [r3] \n"
539 " \n"
540 " subs r0, r0, #32 \n" /* Make space for the remaining low registers. */
541 " str r0, [r2] \n" /* Save the new top of stack. */
542 " stmia r0!, {r4-r7} \n" /* Store the low registers that are not saved automatically. */
543 " mov r4, r8 \n" /* Store the high registers. */
544 " mov r5, r9 \n"
545 " mov r6, r10 \n"
546 " mov r7, r11 \n"
547 " stmia r0!, {r4-r7} \n"
548 #if portUSE_DIVIDER_SAVE_RESTORE
549 " movs r2, #0xd \n" /* Store the divider state. */
550 " lsls r2, #28 \n"
551
552 /* We expect that the divider is ready at this point (which is
553 * necessary to safely save/restore), because:
554 * a) if we have not been interrupted since we entered this method,
555 * then >8 cycles have clearly passed, so the divider is done
556 * b) if we were interrupted in the interim, then any "safe" - i.e.
557 * does the right thing in an IRQ - use of the divider should
558 * have waited for any in-process divide to complete, saved and
559 * then fully restored the result, thus the result is ready in
560 * that case too. */
561 " ldr r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
562 " ldr r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
563 " ldr r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
564 " ldr r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
565
566 /* We actually save the divider state in the 4 words below
567 * our recorded stack pointer, so as not to disrupt the stack
568 * frame expected by debuggers - this is addressed by
569 * portEXTRA_STACK_SIZE */
570 " subs r0, r0, #48 \n"
571 " stmia r0!, {r4-r7} \n"
572 #endif /* portUSE_DIVIDER_SAVE_RESTORE */
573 " push {r3, r14} \n"
574 " cpsid i \n"
575 " bl vTaskSwitchContext \n"
576 " cpsie i \n"
577 " pop {r2, r3} \n" /* lr goes in r3. r2 now holds tcb pointer. */
578 " \n"
579 " ldr r1, [r2] \n"
580 " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */
581 " adds r0, r0, #16 \n" /* Move to the high registers. */
582 " ldmia r0!, {r4-r7} \n" /* Pop the high registers. */
583 " mov r8, r4 \n"
584 " mov r9, r5 \n"
585 " mov r10, r6 \n"
586 " mov r11, r7 \n"
587 " \n"
588 " msr psp, r0 \n" /* Remember the new top of stack for the task. */
589 " \n"
590 #if portUSE_DIVIDER_SAVE_RESTORE
591 " movs r2, #0xd \n" /* Pop the divider state. */
592 " lsls r2, #28 \n"
593 " subs r0, r0, #48 \n" /* Go back for the divider state */
594 " ldmia r0!, {r4-r7} \n" /* Pop the divider state. */
595
596 /* Note always restore via SIO_DIV_UDIVI*, because we will overwrite the
597 * results stopping the calculation anyway, however the sign of results
598 * is adjusted by the h/w at read time based on whether the last started
599 * division was signed and the inputs' signs differed */
600 " str r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
601 " str r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
602 " str r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
603 " str r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
604 #else /* if portUSE_DIVIDER_SAVE_RESTORE */
605 " subs r0, r0, #32 \n" /* Go back for the low registers that are not automatically restored. */
606 #endif /* portUSE_DIVIDER_SAVE_RESTORE */
607 " ldmia r0!, {r4-r7} \n" /* Pop low registers. */
608 " \n"
609 " bx r3 \n"
610 " .align 4 \n"
611 "pxCurrentTCBConst2: .word pxCurrentTCB \n"
612 );
613 #else /* if ( configNUMBER_OF_CORES == 1 ) */
614 __asm volatile
615 (
616 " .syntax unified \n"
617 " mrs r1, psp \n"
618 " \n"
619 " adr r0, ulAsmLocals2 \n" /* Get the location of the current TCB for the current core. */
620 " ldmia r0!, {r2, r3} \n"
621 #if configNUMBER_OF_CORES != 1
622 " ldr r0, [r2] \n" /* r0 = Core number */
623 " lsls r0, r0, #2 \n"
624 " adds r3, r0 \n" /* r3 = &pxCurrentTCBs[get_core_num()] */
625 #else
626 " \n" /* r3 = &pxCurrentTCBs[0] */
627 #endif /* portRUNNING_ON_BOTH_CORES */
628 " ldr r0, [r3] \n" /* r0 = pxCurrentTCB */
629 " \n"
630 " subs r1, r1, #32 \n" /* Make space for the remaining low registers. */
631 " str r1, [r0] \n" /* Save the new top of stack. */
632 " stmia r1!, {r4-r7} \n" /* Store the low registers that are not saved automatically. */
633 " mov r4, r8 \n" /* Store the high registers. */
634 " mov r5, r9 \n"
635 " mov r6, r10 \n"
636 " mov r7, r11 \n"
637 " stmia r1!, {r4-r7} \n"
638 #if portUSE_DIVIDER_SAVE_RESTORE
639
640 /* We expect that the divider is ready at this point (which is
641 * necessary to safely save/restore), because:
642 * a) if we have not been interrupted since we entered this method,
643 * then >8 cycles have clearly passed, so the divider is done
644 * b) if we were interrupted in the interim, then any "safe" - i.e.
645 * does the right thing in an IRQ - use of the divider should
646 * have waited for any in-process divide to complete, saved and
647 * then fully restored the result, thus the result is ready in
648 * that case too. */
649 " ldr r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
650 " ldr r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
651 " ldr r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
652 " ldr r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
653
654 /* We actually save the divider state in the 4 words below
655 * our recorded stack pointer, so as not to disrupt the stack
656 * frame expected by debuggers - this is addressed by
657 * portEXTRA_STACK_SIZE */
658 " subs r1, r1, #48 \n"
659 " stmia r1!, {r4-r7} \n"
660 #endif /* portUSE_DIVIDER_SAVE_RESTORE */
661 #if configNUMBER_OF_CORES != 1
662 " ldr r0, [r2] \n" /* r0 = Core number */
663 #else
664 " movs r0, #0 \n"
665 #endif /* configNUMBER_OF_CORES != 1 */
666 " push {r3, r14} \n"
667 " cpsid i \n"
668 " bl vTaskSwitchContext \n"
669 " cpsie i \n"
670 " pop {r2, r3} \n" /* lr goes in r3. r2 now holds tcb pointer. */
671 " \n"
672 " ldr r1, [r2] \n"
673 " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */
674 " adds r0, r0, #16 \n" /* Move to the high registers. */
675 " ldmia r0!, {r4-r7} \n" /* Pop the high registers. */
676 " mov r8, r4 \n"
677 " mov r9, r5 \n"
678 " mov r10, r6 \n"
679 " mov r11, r7 \n"
680 " \n"
681 " msr psp, r0 \n" /* Remember the new top of stack for the task. */
682 " \n"
683 #if portUSE_DIVIDER_SAVE_RESTORE
684 " movs r2, #0xd \n" /* Pop the divider state. */
685 " lsls r2, #28 \n"
686 " subs r0, r0, #48 \n" /* Go back for the divider state */
687 " ldmia r0!, {r4-r7} \n" /* Pop the divider state. */
688
689 /* Note always restore via SIO_DIV_UDIVI*, because we will overwrite the
690 * results stopping the calculation anyway, however the sign of results
691 * is adjusted by the h/w at read time based on whether the last started
692 * division was signed and the inputs' signs differed */
693 " str r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
694 " str r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
695 " str r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
696 " str r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
697 #else /* if portUSE_DIVIDER_SAVE_RESTORE */
698 " subs r0, r0, #32 \n" /* Go back for the low registers that are not automatically restored. */
699 #endif /* portUSE_DIVIDER_SAVE_RESTORE */
700 " ldmia r0!, {r4-r7} \n" /* Pop low registers. */
701 " \n"
702 " bx r3 \n"
703 " \n"
704 " .align 4 \n"
705 "ulAsmLocals2: \n"
706 " .word 0xD0000000 \n" /* SIO */
707 " .word pxCurrentTCBs \n"
708 );
709 #endif /* if ( configNUMBER_OF_CORES == 1 ) */
710 }
711 /*-----------------------------------------------------------*/
712
xPortSysTickHandler(void)713 void xPortSysTickHandler( void )
714 {
715 uint32_t ulPreviousMask;
716
717 ulPreviousMask = taskENTER_CRITICAL_FROM_ISR();
718 traceISR_ENTER();
719 {
720 /* Increment the RTOS tick. */
721 if( xTaskIncrementTick() != pdFALSE )
722 {
723 traceISR_EXIT_TO_SCHEDULER();
724 /* Pend a context switch. */
725 portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
726 }
727 else
728 {
729 traceISR_EXIT();
730 }
731 }
732 taskEXIT_CRITICAL_FROM_ISR( ulPreviousMask );
733 }
734 /*-----------------------------------------------------------*/
735
736 /*
737 * Setup the systick timer to generate the tick interrupts at the required
738 * frequency.
739 */
vPortSetupTimerInterrupt(void)740 __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
741 {
742 /* Calculate the constants required to configure the tick interrupt. */
743 #if ( configUSE_TICKLESS_IDLE == 1 )
744 {
745 ulTimerCountsForOneTick = ( clock_get_hz( clk_sys ) / configTICK_RATE_HZ );
746 xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
747 ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR;
748 }
749 #endif /* configUSE_TICKLESS_IDLE */
750
751 /* Stop and reset the SysTick. */
752 portNVIC_SYSTICK_CTRL_REG = 0UL;
753 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
754
755 /* Configure SysTick to interrupt at the requested rate. */
756 portNVIC_SYSTICK_LOAD_REG = ( clock_get_hz( clk_sys ) / configTICK_RATE_HZ ) - 1UL;
757 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
758 }
759 /*-----------------------------------------------------------*/
760
761 #if ( configUSE_TICKLESS_IDLE == 1 )
762
vPortSuppressTicksAndSleep(TickType_t xExpectedIdleTime)763 __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
764 {
765 uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements;
766 TickType_t xModifiableIdleTime;
767
768 /* Make sure the SysTick reload value does not overflow the counter. */
769 if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
770 {
771 xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
772 }
773
774 /* Stop the SysTick momentarily. The time the SysTick is stopped for
775 * is accounted for as best it can be, but using the tickless mode will
776 * inevitably result in some tiny drift of the time maintained by the
777 * kernel with respect to calendar time. */
778 portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT;
779
780 /* Calculate the reload value required to wait xExpectedIdleTime
781 * tick periods. -1 is used because this code will execute part way
782 * through one of the tick periods. */
783 ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
784
785 if( ulReloadValue > ulStoppedTimerCompensation )
786 {
787 ulReloadValue -= ulStoppedTimerCompensation;
788 }
789
790 /* Enter a critical section but don't use the taskENTER_CRITICAL()
791 * method as that will mask interrupts that should exit sleep mode. */
792 __asm volatile ( "cpsid i" ::: "memory" );
793 __asm volatile ( "dsb" );
794 __asm volatile ( "isb" );
795
796 /* If a context switch is pending or a task is waiting for the scheduler
797 * to be unsuspended then abandon the low power entry. */
798 if( eTaskConfirmSleepModeStatus() == eAbortSleep )
799 {
800 /* Restart from whatever is left in the count register to complete
801 * this tick period. */
802 portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG;
803
804 /* Restart SysTick. */
805 portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
806
807 /* Reset the reload register to the value required for normal tick
808 * periods. */
809 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
810
811 /* Re-enable interrupts - see comments above the cpsid instruction()
812 * above. */
813 __asm volatile ( "cpsie i" ::: "memory" );
814 }
815 else
816 {
817 /* Set the new reload value. */
818 portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
819
820 /* Clear the SysTick count flag and set the count value back to
821 * zero. */
822 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
823
824 /* Restart SysTick. */
825 portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
826
827 /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
828 * set its parameter to 0 to indicate that its implementation contains
829 * its own wait for interrupt or wait for event instruction, and so wfi
830 * should not be executed again. However, the original expected idle
831 * time variable must remain unmodified, so a copy is taken. */
832 xModifiableIdleTime = xExpectedIdleTime;
833 configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
834
835 if( xModifiableIdleTime > 0 )
836 {
837 __asm volatile ( "dsb" ::: "memory" );
838 __asm volatile ( "wfi" );
839 __asm volatile ( "isb" );
840 }
841
842 configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
843
844 /* Re-enable interrupts to allow the interrupt that brought the MCU
845 * out of sleep mode to execute immediately. see comments above
846 * __disable_interrupt() call above. */
847 __asm volatile ( "cpsie i" ::: "memory" );
848 __asm volatile ( "dsb" );
849 __asm volatile ( "isb" );
850
851 /* Disable interrupts again because the clock is about to be stopped
852 * and interrupts that execute while the clock is stopped will increase
853 * any slippage between the time maintained by the RTOS and calendar
854 * time. */
855 __asm volatile ( "cpsid i" ::: "memory" );
856 __asm volatile ( "dsb" );
857 __asm volatile ( "isb" );
858
859 /* Disable the SysTick clock without reading the
860 * portNVIC_SYSTICK_CTRL_REG register to ensure the
861 * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
862 * the time the SysTick is stopped for is accounted for as best it can
863 * be, but using the tickless mode will inevitably result in some tiny
864 * drift of the time maintained by the kernel with respect to calendar
865 * time*/
866 portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT );
867
868 /* Determine if the SysTick clock has already counted to zero and
869 * been set back to the current reload value (the reload back being
870 * correct for the entire expected idle time) or if the SysTick is yet
871 * to count to zero (in which case an interrupt other than the SysTick
872 * must have brought the system out of sleep mode). */
873 if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
874 {
875 uint32_t ulCalculatedLoadValue;
876
877 /* The tick interrupt is already pending, and the SysTick count
878 * reloaded with ulReloadValue. Reset the
879 * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick
880 * period. */
881 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
882
883 /* Don't allow a tiny value, or values that have somehow
884 * underflowed because the post sleep hook did something
885 * that took too long. */
886 if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
887 {
888 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
889 }
890
891 portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
892
893 /* As the pending tick will be processed as soon as this
894 * function exits, the tick value maintained by the tick is stepped
895 * forward by one less than the time spent waiting. */
896 ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
897 }
898 else
899 {
900 /* Something other than the tick interrupt ended the sleep.
901 * Work out how long the sleep lasted rounded to complete tick
902 * periods (not the ulReload value which accounted for part
903 * ticks). */
904 ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG;
905
906 /* How many complete tick periods passed while the processor
907 * was waiting? */
908 ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
909
910 /* The reload value is set to whatever fraction of a single tick
911 * period remains. */
912 portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
913 }
914
915 /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG
916 * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard
917 * value. */
918 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
919 portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
920 vTaskStepTick( ulCompleteTickPeriods );
921 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
922
923 /* Exit with interrupts enabled. */
924 __asm volatile ( "cpsie i" ::: "memory" );
925 }
926 }
927
928 #endif /* configUSE_TICKLESS_IDLE */
929
930 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) || ( configSUPPORT_PICO_TIME_INTEROP == 1 )
prvGetTicksToWaitBefore(absolute_time_t t)931 static TickType_t prvGetTicksToWaitBefore( absolute_time_t t )
932 {
933 int64_t xDelay = absolute_time_diff_us( get_absolute_time(), t );
934 const uint32_t ulTickPeriod = 1000000 / configTICK_RATE_HZ;
935
936 xDelay -= ulTickPeriod;
937
938 if( xDelay >= ulTickPeriod )
939 {
940 return xDelay / ulTickPeriod;
941 }
942
943 return 0;
944 }
945 #endif /* if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) || ( configSUPPORT_PICO_TIME_INTEROP == 1 ) */
946
947 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
ulPortLockGetCurrentOwnerId()948 uint32_t ulPortLockGetCurrentOwnerId()
949 {
950 if( portIS_FREE_RTOS_CORE() )
951 {
952 uint32_t exception = __get_current_exception();
953
954 if( !exception )
955 {
956 return ( uintptr_t ) xTaskGetCurrentTaskHandle();
957 }
958
959 /* Note: since ROM as at 0x00000000, these can't be confused with
960 * valid task handles (pointers) in RAM */
961 /* We make all exception handler/core combinations distinct owners */
962 return get_core_num() + exception * 2;
963 }
964
965 /* Note: since ROM as at 0x00000000, this can't be confused with
966 * valid task handles (pointers) in RAM */
967 return get_core_num();
968 }
969
prvGetEventGroupBit(spin_lock_t * spinLock)970 static inline EventBits_t prvGetEventGroupBit( spin_lock_t * spinLock )
971 {
972 uint32_t ulBit;
973
974 #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
975 ulBit = 1u << ( spin_lock_get_num( spinLock ) & 0x7u );
976 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
977 /* Avoid potential use of SIO divider for % here out of abundance of caution */
978 ulBit = spin_lock_get_num( spinLock );
979 if (ulBit >= 24) ulBit -= 24;
980 ulBit = 1u << ulBit;
981 #endif /* configTICK_TYPE_WIDTH_IN_BITS */
982 return ( EventBits_t ) ulBit;
983 }
984
prvGetAllEventGroupBits()985 static inline EventBits_t prvGetAllEventGroupBits()
986 {
987 #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
988 return ( EventBits_t ) 0xffu;
989 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
990 return ( EventBits_t ) 0xffffffu;
991 #endif /* configTICK_TYPE_WIDTH_IN_BITS */
992 }
993
vPortLockInternalSpinUnlockWithWait(struct lock_core * pxLock,uint32_t ulSave)994 void vPortLockInternalSpinUnlockWithWait( struct lock_core * pxLock,
995 uint32_t ulSave )
996 {
997 configASSERT( !portCHECK_IF_IN_ISR() );
998 configASSERT( pxLock->spin_lock );
999
1000 if( !portIS_FREE_RTOS_CORE() )
1001 {
1002 spin_unlock( pxLock->spin_lock, ulSave );
1003 __wfe();
1004 }
1005 else
1006 {
1007 /* The requirement (from the SDK) on this implementation is that this method
1008 * should always wake up from a corresponding call to vPortLockInternalSpinUnlockWithNotify
1009 * that happens after this method is called.
1010 *
1011 * The moment that we unlock the spin lock, we need to be sure that
1012 * there is no way that we end up blocking in xEventGroupWaitBits,
1013 * despite the fact that other tasks can now run, if the corresponding
1014 * unlock has occurred.
1015 *
1016 * Previously the RP2xxx ports used to attempt to disable IRQs until the
1017 * task actually (potentially) became blocked by hooking the IRQ re-enable
1018 * when xEventGroupWaitBits completes (or switches tasks), but this
1019 * was a broken hack, in that IRQs are re-enabled at other points during
1020 * that call.
1021 *
1022 * This deferred IRQ enable is not actually needed, because all we
1023 * care about is that:
1024 *
1025 * Even in the presence of other tasks acquiring then releasing
1026 * the lock, between the interrupt_enable and the xEventGroupWaitBits,
1027 * the corresponding bit will still be set.
1028 *
1029 * This is the case, even any intervening blocked lock (which
1030 * clears the event bit) will need to unlock it before we proceed,
1031 * which will set the event bit again.
1032 *
1033 * The multiplexing down of multiple spin lock numbers to fewer
1034 * event bits does not cause a possible race condition,
1035 * but it does mean that a task waiting for lock A can be
1036 * blocked by a task B which owns another lock.
1037 *
1038 * This could be fixed by using an array of event groups, however
1039 * since the SDK spin locks are generally intended for very short
1040 * term usage anyway, and rarely nested except in exotic cases
1041 * like video output, we'll leave it as one event group for now
1042 */
1043 spin_unlock( pxLock->spin_lock, ulSave);
1044 xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit( pxLock->spin_lock ),
1045 pdTRUE, pdFALSE, portMAX_DELAY );
1046 }
1047 }
1048
vPortLockInternalSpinUnlockWithNotify(struct lock_core * pxLock,uint32_t ulSave)1049 void vPortLockInternalSpinUnlockWithNotify( struct lock_core * pxLock,
1050 uint32_t ulSave )
1051 {
1052 EventBits_t uxBits = prvGetEventGroupBit( pxLock->spin_lock );
1053
1054 if( portIS_FREE_RTOS_CORE() )
1055 {
1056 #if LIB_PICO_MULTICORE
1057 /* signal an event in case a regular core is waiting */
1058 __sev();
1059 #endif
1060 spin_unlock( pxLock->spin_lock, ulSave );
1061
1062 if( !portCHECK_IF_IN_ISR() )
1063 {
1064 xEventGroupSetBits( xEventGroup, uxBits );
1065 }
1066 else
1067 {
1068 BaseType_t xHigherPriorityTaskWoken = pdFALSE;
1069 xEventGroupSetBitsFromISR( xEventGroup, uxBits, &xHigherPriorityTaskWoken );
1070 portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
1071 }
1072 }
1073 else
1074 {
1075 __sev();
1076 #if ( configNUMBER_OF_CORES == 1 )
1077 if( pxCrossCoreSpinLock != pxLock->spin_lock )
1078 {
1079 spin_lock_unsafe_blocking( pxCrossCoreSpinLock );
1080 uxCrossCoreEventBits |= uxBits;
1081 spin_unlock_unsafe( pxCrossCoreSpinLock );
1082 }
1083 else
1084 {
1085 uxCrossCoreEventBits |= uxBits;
1086 }
1087
1088 /* This causes fifo irq on the other (FreeRTOS) core which will do the set the event bits */
1089 sio_hw->fifo_wr = 0;
1090 #endif /* configNUMBER_OF_CORES == 1 */
1091 spin_unlock( pxLock->spin_lock, ulSave );
1092 }
1093 }
1094
xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout(struct lock_core * pxLock,uint32_t ulSave,absolute_time_t uxUntil)1095 bool xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout( struct lock_core * pxLock,
1096 uint32_t ulSave,
1097 absolute_time_t uxUntil )
1098 {
1099 configASSERT( !portCHECK_IF_IN_ISR() );
1100 configASSERT( pxLock->spin_lock );
1101
1102 /* note no need to check LIB_PICO_MULTICORE, as this is always returns true if that is not defined */
1103 if( !portIS_FREE_RTOS_CORE() )
1104 {
1105 spin_unlock( pxLock->spin_lock, ulSave );
1106 return best_effort_wfe_or_timeout( uxUntil );
1107 }
1108 else
1109 {
1110 configASSERT( portIS_FREE_RTOS_CORE() );
1111
1112 TickType_t uxTicksToWait = prvGetTicksToWaitBefore( uxUntil );
1113
1114 if( uxTicksToWait )
1115 {
1116 /* See comment in vPortLockInternalSpinUnlockWithWait for detail
1117 * about possible race conditions */
1118 spin_unlock( pxLock->spin_lock, ulSave );
1119 xEventGroupWaitBits( xEventGroup,
1120 prvGetEventGroupBit( pxLock->spin_lock ), pdTRUE,
1121 pdFALSE, uxTicksToWait );
1122 }
1123 else
1124 {
1125 spin_unlock( pxLock->spin_lock, ulSave );
1126 }
1127
1128 if( time_reached( uxUntil ) )
1129 {
1130 return true;
1131 }
1132 else
1133 {
1134 /* We do not want to hog the core */
1135 portYIELD();
1136 /* We aren't sure if we've reached the timeout yet; the caller will check */
1137 return false;
1138 }
1139 }
1140 }
1141
1142 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
1143 /* runs before main */
prvRuntimeInitializer(void)1144 static void __attribute__( ( constructor ) ) prvRuntimeInitializer( void )
1145 {
1146 /* This must be done even before the scheduler is started, as the spin lock
1147 * is used by the overrides of the SDK wait/notify primitives */
1148 #if ( configNUMBER_OF_CORES == 1 )
1149 pxCrossCoreSpinLock = spin_lock_instance( next_striped_spin_lock_num() );
1150 #endif /* configNUMBER_OF_CORES == 1 */
1151
1152 /* The event group is not used prior to scheduler init, but is initialized
1153 * here to since it logically belongs with the spin lock */
1154 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
1155 xEventGroup = xEventGroupCreateStatic( &xStaticEventGroup );
1156 #else
1157
1158 /* Note that it is slightly dubious calling this here before the scheduler is initialized,
1159 * however the only thing it touches is the allocator which then calls vPortEnterCritical
1160 * and vPortExitCritical, and allocating here saves us checking the one time initialized variable in
1161 * some rather critical code paths */
1162 xEventGroup = xEventGroupCreate();
1163 #endif /* configSUPPORT_STATIC_ALLOCATION */
1164 }
1165 #endif /* if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) */
1166 #endif /* configSUPPORT_PICO_SYNC_INTEROP */
1167
1168 #if ( configSUPPORT_PICO_TIME_INTEROP == 1 )
xPortSyncInternalYieldUntilBefore(absolute_time_t t)1169 void xPortSyncInternalYieldUntilBefore( absolute_time_t t )
1170 {
1171 TickType_t uxTicksToWait = prvGetTicksToWaitBefore( t );
1172
1173 if( uxTicksToWait )
1174 {
1175 vTaskDelay( uxTicksToWait );
1176 }
1177 }
1178 #endif /* configSUPPORT_PICO_TIME_INTEROP */
1179