1 /*
2  * SPDX-FileCopyrightText: Copyright The TrustedFirmware-M Contributors
3  * Copyright (c) 2021-2024 Cypress Semiconductor Corporation (an Infineon
4  * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5  * reserved.
6  *
7  * SPDX-License-Identifier: BSD-3-Clause
8  *
9  */
10 
11 #include <stdint.h>
12 #include <assert.h>
13 #include "aapcs_local.h"
14 #include "async.h"
15 #include "config_spm.h"
16 #include "critical_section.h"
17 #include "compiler_ext_defs.h"
18 #include "config_spm.h"
19 #include "ffm/psa_api.h"
20 #include "fih.h"
21 #include "runtime_defs.h"
22 #include "stack_watermark.h"
23 #include "spm.h"
24 #include "tfm_hal_isolation.h"
25 #include "tfm_hal_platform.h"
26 #include "tfm_nspm.h"
27 #include "tfm_rpc.h"
28 #include "ffm/backend.h"
29 #include "utilities.h"
30 #include "memory_symbols.h"
31 #include "load/partition_defs.h"
32 #include "load/service_defs.h"
33 #include "load/spm_load_api.h"
34 #include "psa/error.h"
35 #include "internal_status_code.h"
36 #include "sprt_partition_metadata_indicator.h"
37 
38 #if TFM_PARTITION_NS_AGENT_MAILBOX == 1
39 #include "psa_manifest/ns_agent_mailbox.h"
40 #endif
41 
42 /* Declare the global component list */
43 struct partition_head_t partition_listhead;
44 
45 #ifdef CONFIG_TFM_USE_TRUSTZONE
46 /* Instance for SPM_THREAD_CONTEXT */
47 struct context_ctrl_t *p_spm_thread_context;
48 #else
49 /* If ns_agent_tz isn't used, we need to provide a stack for SPM to use */
50 static uint8_t spm_thread_stack[CONFIG_TFM_SPM_THREAD_STACK_SIZE] __aligned(8);
51 ARCH_CLAIM_CTXCTRL_INSTANCE(spm_thread_context,
52                             spm_thread_stack,
53                             sizeof(spm_thread_stack));
54 
55 struct context_ctrl_t *p_spm_thread_context = &spm_thread_context;
56 #endif
57 
58 #if (CONFIG_TFM_SECURE_THREAD_MASK_NS_INTERRUPT == 1) && defined(CONFIG_TFM_USE_TRUSTZONE)
59 static bool basepri_set_by_ipc_schedule;
60 #endif
61 
62 /*
63  * Query the state of current thread.
64  */
query_state(const struct thread_t * p_thrd,uint32_t * p_retval)65 static uint32_t query_state(const struct thread_t *p_thrd, uint32_t *p_retval)
66 {
67     struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
68     struct partition_t *p_pt = NULL;
69     uint32_t state = p_thrd->state;
70     psa_signal_t retval_signals = 0;
71 
72     /* Get current partition of thread. */
73     p_pt = TO_CONTAINER(p_thrd->p_context_ctrl,
74                         struct partition_t, ctx_ctrl);
75 
76     CRITICAL_SECTION_ENTER(cs_signal);
77 
78     retval_signals = p_pt->signals_waiting & p_pt->signals_asserted;
79 
80     if (retval_signals) {
81         /*
82          * Signal "ASYNC_MSG_REPLY" can only be waited in one of cases below:
83          *
84          *   - A FF-M Secure Partition is calling the Client API and
85          *     expecting a replied "handle/status" from RoT Services.
86          *     FF-M Secure Partitions cannot use 'psa_wait' to wait
87          *     on this signal because the signal is not set in FF-M
88          *     Secure Partitions' "signals_allowed".
89          *
90          *   - A Mailbox NS Agent is calling "psa_wait" with a pattern
91          *     containing "ASYNC_MSG_REPLY". The signal is set in
92          *     Mailbox NS Agents' "signals_allowed".
93          *
94          * Here uses "signals_allowed" to check if the calling target is a
95          * FF-M Secure Partition or a Mailbox NS Agent.
96          */
97         if ((retval_signals ==  ASYNC_MSG_REPLY) &&
98             ((p_pt->signals_allowed & ASYNC_MSG_REPLY) != ASYNC_MSG_REPLY)) {
99             p_pt->signals_asserted &= ~ASYNC_MSG_REPLY;
100 
101 #ifndef NDEBUG
102             assert(p_pt->p_replied->status < TFM_HANDLE_STATUS_MAX);
103 #endif
104 
105             /*
106              * For FF-M Secure Partition, the reply is synchronous and only one
107              * replied handle node should be mounted. Take the reply value from
108              * the node and delete it then.
109              */
110             *p_retval = (uint32_t)p_pt->p_replied->replied_value;
111             if (p_pt->p_replied->status == TFM_HANDLE_STATUS_TO_FREE) {
112                 spm_free_connection(p_pt->p_replied);
113             }
114             p_pt->p_replied = NULL;
115         } else {
116             *p_retval = retval_signals;
117         }
118 
119         /* Clear 'signals_waiting' to indicate the component is not waiting. */
120         p_pt->signals_waiting = 0;
121         state = THRD_STATE_RET_VAL_AVAIL;
122     } else if (p_pt->signals_waiting != 0) {
123         /*
124          * If the thread is waiting some signals but none of them is asserted,
125          * block the thread.
126          */
127         state = THRD_STATE_BLOCK;
128     }
129 
130     CRITICAL_SECTION_LEAVE(cs_signal);
131     return state;
132 }
133 
134 extern struct psa_api_tbl_t psa_api_thread_fn_call;
135 extern struct psa_api_tbl_t psa_api_svc;
136 
prv_process_metadata(struct partition_t * p_pt)137 static void prv_process_metadata(struct partition_t *p_pt)
138 {
139     const struct partition_load_info_t *p_pt_ldi;
140     const struct service_load_info_t *p_srv_ldi;
141     struct context_ctrl_t *ctx_ctrl;
142     struct runtime_metadata_t *p_rt_meta;
143     service_fn_t *p_sfn_table;
144     uint32_t allocate_size;
145 #if TFM_ISOLATION_LEVEL != 1
146     FIH_RET_TYPE(bool) fih_rc;
147 #endif
148 
149     p_pt_ldi = p_pt->p_ldinf;
150     p_srv_ldi = LOAD_INFO_SERVICE(p_pt_ldi);
151     ctx_ctrl = &p_pt->ctx_ctrl;
152 
153     /* common runtime metadata */
154     allocate_size = sizeof(*p_rt_meta);
155 
156     if (!IS_IPC_MODEL(p_pt_ldi)) {
157         /* SFN specific metadata - SFN function table */
158         allocate_size += sizeof(service_fn_t) * p_pt_ldi->nservices;
159     }
160 
161     ARCH_CTXCTRL_ALLOCATE_STACK(ctx_ctrl, allocate_size);
162     p_rt_meta = (struct runtime_metadata_t *)
163                                     ARCH_CTXCTRL_ALLOCATED_PTR(ctx_ctrl);
164 
165 #if TFM_ISOLATION_LEVEL == 1
166     p_rt_meta->psa_fns = &psa_api_thread_fn_call;
167 #else
168     FIH_CALL(tfm_hal_boundary_need_switch, fih_rc, get_spm_boundary(), p_pt->boundary);
169     if (fih_not_eq(fih_rc, fih_int_encode(false))) {
170         p_rt_meta->psa_fns = &psa_api_svc;
171     } else {
172         p_rt_meta->psa_fns = &psa_api_thread_fn_call;
173     }
174 #endif
175 
176     p_rt_meta->entry = p_pt_ldi->entry;
177     p_rt_meta->n_sfn = 0;
178     p_sfn_table = p_rt_meta->sfn_table;
179 
180     if (!IS_IPC_MODEL(p_pt_ldi)) {
181         /* SFN table. The signal bit of the service is the same index of SFN. */
182         for (int i = 0; i < p_pt_ldi->nservices; i++) {
183             p_sfn_table[i] = (service_fn_t)p_srv_ldi[i].sfn;
184         }
185 
186         p_rt_meta->n_sfn = p_pt_ldi->nservices;
187     }
188 
189     p_pt->p_metadata = p_rt_meta;
190 }
191 
192 /*
193  * Send message and wake up the SP who is waiting on message queue, block the
194  * current thread and trigger scheduler.
195  */
backend_messaging(struct connection_t * p_connection)196 psa_status_t backend_messaging(struct connection_t *p_connection)
197 {
198     struct partition_t *p_owner = NULL;
199     psa_signal_t signal = 0;
200     psa_status_t ret = PSA_SUCCESS;
201 
202     if (!p_connection || !p_connection->service ||
203         !p_connection->service->p_ldinf         ||
204         !p_connection->service->partition) {
205         return PSA_ERROR_PROGRAMMER_ERROR;
206     }
207 
208     p_owner = p_connection->service->partition;
209     signal = p_connection->service->p_ldinf->signal;
210 
211     UNI_LIST_INSERT_AFTER(p_owner, p_connection, p_reqs);
212 
213     /* Messages put. Update signals */
214     ret = backend_assert_signal(p_owner, signal);
215 
216     /*
217      * If it is a request from NS Mailbox Agent, it is NOT necessary to block
218      * the current thread.
219      */
220     if (IS_NS_AGENT_MAILBOX(p_connection->p_client->p_ldinf)) {
221         ret = PSA_SUCCESS;
222     } else {
223         signal = backend_wait_signals(p_connection->p_client, ASYNC_MSG_REPLY);
224         if (signal == (psa_signal_t)0) {
225             ret = STATUS_NEED_SCHEDULE;
226         }
227     }
228 
229     return ret;
230 }
231 
backend_replying(struct connection_t * handle,int32_t status)232 psa_status_t backend_replying(struct connection_t *handle, int32_t status)
233 {
234     struct partition_t *client = handle->p_client;
235 
236     /* Prepare the replied handle. */
237     handle->replied_value = (uintptr_t)status;
238 
239     /* Mount the replied handle. There are two mode for replying.
240      *
241      *  - For synchronous reply, only one node is mounted.
242      *  - For asynchronous reply, the first moundted is at the tail of the list
243      *    and will be first replied.
244      *    - Currently, this is used for mailbox multi-core technology.
245      */
246     UNI_LIST_INSERT_AFTER(client, handle, p_replied);
247 
248     return backend_assert_signal(handle->p_client, ASYNC_MSG_REPLY);
249 }
250 
251 extern void common_sfn_thread(void *param);
252 
partition_init(struct partition_t * p_pt,uint32_t service_setting,uint32_t * param)253 static thrd_fn_t partition_init(struct partition_t *p_pt,
254                                 uint32_t service_setting, uint32_t *param)
255 {
256     thrd_fn_t thrd_entry;
257 
258     (void)param;
259     assert(p_pt);
260 
261 #if CONFIG_TFM_DOORBELL_API == 1
262     p_pt->signals_allowed |= PSA_DOORBELL;
263 #endif /* CONFIG_TFM_DOORBELL_API == 1 */
264 
265     p_pt->signals_allowed |= service_setting;
266 
267     /* Allow 'ASYNC_MSG_REPLY' for Mailbox NS Agent. */
268     if (IS_NS_AGENT_MAILBOX(p_pt->p_ldinf)) {
269         p_pt->signals_allowed |= ASYNC_MSG_REPLY;
270     }
271 
272     UNI_LIST_INIT_NODE(p_pt, p_reqs);
273     UNI_LIST_INIT_NODE(p_pt, p_replied);
274 
275     if (IS_IPC_MODEL(p_pt->p_ldinf)) {
276         /* IPC Partition */
277         thrd_entry = POSITION_TO_ENTRY(p_pt->p_ldinf->entry, thrd_fn_t);
278     } else {
279         /* SFN Partition */
280         thrd_entry = (thrd_fn_t)common_sfn_thread;
281     }
282     return thrd_entry;
283 }
284 
285 #ifdef CONFIG_TFM_USE_TRUSTZONE
ns_agent_tz_init(struct partition_t * p_pt,uint32_t service_setting,uint32_t * param)286 static thrd_fn_t ns_agent_tz_init(struct partition_t *p_pt,
287                                   uint32_t service_setting, uint32_t *param)
288 {
289     thrd_fn_t thrd_entry;
290 
291     (void)service_setting;
292     assert(p_pt);
293     assert(param);
294 
295     tz_ns_agent_register_client_id_range(p_pt->p_ldinf->client_id_base,
296                                          p_pt->p_ldinf->client_id_limit);
297 
298     /* Get the context from ns_agent_tz */
299     SPM_THREAD_CONTEXT = &p_pt->ctx_ctrl;
300 
301     thrd_entry = POSITION_TO_ENTRY(p_pt->p_ldinf->entry, thrd_fn_t);
302 
303     /* NS agent TZ expects NSPE entry point as the parameter */
304     *param = tfm_hal_get_ns_entry_point();
305     return thrd_entry;
306 }
307 #else
ns_agent_tz_init(struct partition_t * p_pt,uint32_t service_setting,uint32_t * param)308 static thrd_fn_t ns_agent_tz_init(struct partition_t *p_pt,
309                                   uint32_t service_setting, uint32_t *param)
310 {
311     (void)p_pt;
312     (void)service_setting;
313     (void)param;
314 
315     return POSITION_TO_ENTRY(NULL, thrd_fn_t);
316 }
317 #endif
318 
319 typedef thrd_fn_t (*comp_init_fn_t)(struct partition_t *, uint32_t, uint32_t *);
320 static const comp_init_fn_t comp_init_fns[] = {
321     partition_init,
322     ns_agent_tz_init,
323 };
324 
325 /* Parameters are treated as assuredly */
backend_init_comp_assuredly(struct partition_t * p_pt,uint32_t service_setting)326 void backend_init_comp_assuredly(struct partition_t *p_pt, uint32_t service_setting)
327 {
328     const struct partition_load_info_t *p_pldi = p_pt->p_ldinf;
329     thrd_fn_t thrd_entry;
330     uint32_t param;
331     int32_t index = PARTITION_TYPE_TO_INDEX(p_pldi->flags);
332 
333     ARCH_CTXCTRL_INIT(&p_pt->ctx_ctrl,
334                       LOAD_ALLOCED_STACK_ADDR(p_pldi),
335                       p_pldi->stack_size);
336 
337     watermark_stack(p_pt);
338 
339     /*
340      * Use Secure Partition loading order as the initial priority of scheduling
341      * in IPC backend.
342      */
343     THRD_INIT(&p_pt->thrd, &p_pt->ctx_ctrl, p_pldi->load_order);
344 
345     thrd_entry = (comp_init_fns[index])(p_pt, service_setting, &param);
346 
347     prv_process_metadata(p_pt);
348 
349     thrd_start(&p_pt->thrd, thrd_entry, THRD_GENERAL_EXIT, (void *)param);
350 }
351 
backend_system_run(void)352 uint32_t backend_system_run(void)
353 {
354     uint32_t control;
355     const struct partition_t *p_cur_pt;
356     fih_int fih_rc = FIH_FAILURE;
357 
358     assert(SPM_THREAD_CONTEXT);
359 
360 #ifndef CONFIG_TFM_USE_TRUSTZONE
361     /*
362      * TZ NS Agent is mandatory when Trustzone is enabled. SPM borrows its
363      * stack to improve the stack usage efficiency.
364      * Hence SPM needs to have a dedicated stack when Trustzone is not enabled,
365      * and this stack needs to be sealed before upcoming usage.
366      */
367     watermark_spm_stack();
368     ARCH_CTXCTRL_ALLOCATE_STACK(SPM_THREAD_CONTEXT, sizeof(uint64_t));
369     arch_seal_thread_stack(ARCH_CTXCTRL_ALLOCATED_PTR(SPM_THREAD_CONTEXT));
370 #endif
371 
372     /* Init thread callback function. */
373     thrd_set_query_callback(query_state);
374 
375     control = thrd_start_scheduler(&CURRENT_THREAD);
376 
377     p_cur_pt = TO_CONTAINER(CURRENT_THREAD->p_context_ctrl,
378                             struct partition_t, ctx_ctrl);
379 
380     FIH_CALL(tfm_hal_activate_boundary, fih_rc, p_cur_pt->p_ldinf, p_cur_pt->boundary);
381     if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
382         tfm_core_panic();
383     }
384 
385     return control;
386 }
387 
backend_wait_signals(struct partition_t * p_pt,psa_signal_t signals)388 psa_signal_t backend_wait_signals(struct partition_t *p_pt, psa_signal_t signals)
389 {
390     struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
391     psa_signal_t ret;
392 
393     if (!p_pt) {
394         tfm_core_panic();
395     }
396 
397     CRITICAL_SECTION_ENTER(cs_signal);
398 
399     ret = p_pt->signals_asserted & signals;
400     if (ret == (psa_signal_t)0) {
401         p_pt->signals_waiting = signals;
402     }
403 
404     CRITICAL_SECTION_LEAVE(cs_signal);
405 
406     return ret;
407 }
408 
409 #if (CONFIG_TFM_HYBRID_PLAT_SCHED_TYPE == TFM_HYBRID_PLAT_SCHED_NSPE)
backend_assert_hybridplat_signal(struct partition_t * p_pt,psa_signal_t signal)410 static void backend_assert_hybridplat_signal(
411     struct partition_t *p_pt, psa_signal_t signal)
412 {
413     const struct irq_load_info_t *irq_info;
414     uint32_t irq_info_idx;
415     uint32_t nirqs;
416 
417     if (IS_NS_AGENT_MAILBOX(p_pt->p_ldinf)) {
418         nirqs = p_pt->p_ldinf->nirqs;
419         irq_info = LOAD_INFO_IRQ(p_pt->p_ldinf);
420         for (irq_info_idx = 0; irq_info_idx < nirqs; irq_info_idx++) {
421             if (irq_info == NULL) {
422                 tfm_core_panic();
423             }
424             if (irq_info[irq_info_idx].signal == signal) {
425                 break;
426             }
427         }
428 
429         if (irq_info_idx < nirqs) {
430             /*
431              * The incoming signal is found in the irq_load_info_t,
432              * do not assert the signal now.
433              * NSPE will drive the processing for this request via the mailbox
434              * dedicated auxiliary service.
435              */
436             return;
437         }
438     }
439 
440     p_pt->signals_asserted |= signal;
441 }
442 #endif
443 
backend_assert_signal(struct partition_t * p_pt,psa_signal_t signal)444 psa_status_t backend_assert_signal(struct partition_t *p_pt, psa_signal_t signal)
445 {
446     struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
447     psa_status_t ret = PSA_SUCCESS;
448 
449     if (!p_pt) {
450         tfm_core_panic();
451     }
452 
453     CRITICAL_SECTION_ENTER(cs_signal);
454 
455 #if (CONFIG_TFM_HYBRID_PLAT_SCHED_TYPE == TFM_HYBRID_PLAT_SCHED_NSPE)
456     backend_assert_hybridplat_signal(p_pt, signal);
457 #else
458     p_pt->signals_asserted |= signal;
459 #endif
460 
461     if (p_pt->signals_asserted & p_pt->signals_waiting) {
462         ret = STATUS_NEED_SCHEDULE;
463     }
464     CRITICAL_SECTION_LEAVE(cs_signal);
465 
466     return ret;
467 }
468 
backend_abi_entering_spm(void)469 uint64_t backend_abi_entering_spm(void)
470 {
471     struct partition_t *caller = GET_CURRENT_COMPONENT();
472     uint32_t sp = 0;
473     uint32_t sp_limit = 0;
474     AAPCS_DUAL_U32_T spm_stack_info;
475 
476 #if TFM_ISOLATION_LEVEL == 1
477     /* PSA APIs must be called from Thread mode */
478     if (__get_active_exc_num() != EXC_NUM_THREAD_MODE) {
479         tfm_core_panic();
480     }
481 #endif
482 
483     /*
484      * Check if caller stack is within SPM stack. If not, then stack needs to
485      * switch. Otherwise, return zeros.
486      */
487     if ((caller->ctx_ctrl.sp <= SPM_THREAD_CONTEXT->sp_limit) ||
488         (caller->ctx_ctrl.sp >  SPM_THREAD_CONTEXT->sp_base)) {
489         sp       = SPM_THREAD_CONTEXT->sp;
490         sp_limit = SPM_THREAD_CONTEXT->sp_limit;
491     }
492 
493     AAPCS_DUAL_U32_SET(spm_stack_info, sp, sp_limit);
494 
495     arch_acquire_sched_lock();
496 
497     return AAPCS_DUAL_U32_AS_U64(spm_stack_info);
498 }
499 
backend_abi_leaving_spm(uint32_t result)500 uint32_t backend_abi_leaving_spm(uint32_t result)
501 {
502     uint32_t sched_attempted;
503 
504     spm_handle_programmer_errors(result);
505 
506     /* Release scheduler lock and check the record of schedule attempt. */
507     sched_attempted = arch_release_sched_lock();
508 
509     /* Interrupt is masked, PendSV will not happen immediately. */
510     if (((psa_status_t)result == STATUS_NEED_SCHEDULE) ||
511         (sched_attempted == SCHEDULER_ATTEMPTED)) {
512         arch_attempt_schedule();
513     }
514 
515     return result;
516 }
517 
ipc_schedule(uint32_t exc_return)518 uint64_t ipc_schedule(uint32_t exc_return)
519 {
520     fih_int fih_rc = FIH_FAILURE;
521     FIH_RET_TYPE(bool) fih_bool;
522     AAPCS_DUAL_U32_T ctx_ctrls;
523     const struct partition_t *p_part_curr;
524     struct partition_t *p_part_next;
525     struct context_ctrl_t *p_curr_ctx;
526     struct thread_t *pth_next;
527     struct critical_section_t cs = CRITICAL_SECTION_STATIC_INIT;
528 
529     /* Protect concurrent access to current thread/component and thread status */
530     CRITICAL_SECTION_ENTER(cs);
531 
532 #if (CONFIG_TFM_SECURE_THREAD_MASK_NS_INTERRUPT == 1) && defined(CONFIG_TFM_USE_TRUSTZONE)
533     if (__get_BASEPRI() == 0) {
534         /*
535          * If BASEPRI is not set, that means an interrupt was taken when
536          * Non-Secure code was executing, and a scheduling is necessary because
537          * a secure partition become runnable.
538          */
539         assert(!basepri_set_by_ipc_schedule);
540         basepri_set_by_ipc_schedule = true;
541         __set_BASEPRI(SECURE_THREAD_EXECUTION_PRIORITY);
542     }
543 #endif
544 
545     p_curr_ctx = CURRENT_THREAD->p_context_ctrl;
546 
547     /*
548      * Update SP for current thread, in case tfm_arch_set_context_ret_code have to update R0
549      * in the current thread's saved context.
550      */
551     p_curr_ctx->sp = __get_PSP() -
552         (is_default_stacking_rules_apply(exc_return) ?
553             sizeof(struct tfm_additional_context_t) : 0) -
554             TFM_FPU_CONTEXT_SIZE;
555 
556     pth_next = thrd_next();
557 
558     AAPCS_DUAL_U32_SET(ctx_ctrls, (uint32_t)p_curr_ctx, (uint32_t)p_curr_ctx);
559 
560     p_part_curr = GET_CURRENT_COMPONENT();
561     p_part_next = GET_THRD_OWNER(pth_next);
562 
563     if ((pth_next != NULL) && (p_part_curr != p_part_next)) {
564         /* Check if there is enough room on stack to save more context */
565         if ((p_curr_ctx->sp_limit + TFM_FPU_CONTEXT_SIZE +
566                 sizeof(struct tfm_additional_context_t)) > __get_PSP()) {
567             tfm_core_panic();
568         }
569 
570         /*
571          * If required, let the platform update boundary based on its
572          * implementation. Change privilege, MPU or other configurations.
573          */
574         FIH_CALL(tfm_hal_boundary_need_switch, fih_bool,
575                  p_part_curr->boundary, p_part_next->boundary);
576         if (fih_not_eq(fih_bool, fih_int_encode(false))) {
577             FIH_CALL(tfm_hal_activate_boundary, fih_rc,
578                      p_part_next->p_ldinf, p_part_next->boundary);
579             if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
580                 tfm_core_panic();
581             }
582         }
583         ARCH_FLUSH_FP_CONTEXT();
584 
585 #if (CONFIG_TFM_SECURE_THREAD_MASK_NS_INTERRUPT == 1) && defined(CONFIG_TFM_USE_TRUSTZONE)
586         if (IS_NS_AGENT_TZ(p_part_next->p_ldinf)) {
587             /*
588              * The Non-Secure Agent for TrustZone is going to be scheduled.
589              * A secure partition was scheduled previously, so BASEPRI must be
590              * set to non-zero. However BASEPRI only needs to be reset to 0 if
591              * Non-Secure code execution was interrupted (and not got to secure
592              * execution through a veneer call. Veneers set and unset BASEPRI on
593              * enter and exit). In this case basepri_set_by_ipc_schedule is set,
594              * so it can be used in the condition.
595              */
596             assert(__get_BASEPRI() == SECURE_THREAD_EXECUTION_PRIORITY);
597             if (basepri_set_by_ipc_schedule) {
598                 basepri_set_by_ipc_schedule = false;
599                 __set_BASEPRI(0);
600             }
601         }
602 #endif
603 
604         AAPCS_DUAL_U32_SET_A1(ctx_ctrls, (uint32_t)pth_next->p_context_ctrl);
605 
606         CURRENT_THREAD = pth_next;
607     }
608 
609     /* Update meta indicator */
610     if (p_part_next->p_metadata == NULL) {
611         tfm_core_panic();
612     }
613     p_partition_metadata = (uintptr_t)(p_part_next->p_metadata);
614 
615     /*
616      * ctx_ctrl is set from struct thread_t's p_context_ctrl, and p_part_curr
617      * and p_part_next are calculated from the thread pointer.
618      * struct partition_t's ctx_ctrl is pointed to by struct thread_t's p_context_ctrl,
619      * but the optimiser doesn't know that when building this code.
620      * Use that information to check that the context, thread, and partition
621      * are all consistent
622      */
623     if (ctx_ctrls.u32_regs.r0 != (uint32_t)&p_part_curr->ctx_ctrl) {
624         tfm_core_panic();
625     }
626 
627     if (ctx_ctrls.u32_regs.r1 != (uint32_t)&p_part_next->ctx_ctrl) {
628         tfm_core_panic();
629     }
630 
631     if (&p_part_next->thrd != CURRENT_THREAD) {
632         tfm_core_panic();
633     }
634 
635     /* also double-check the metadata */
636     if ((uintptr_t)GET_CTX_OWNER(ctx_ctrls.u32_regs.r1)->p_metadata != p_partition_metadata) {
637         tfm_core_panic();
638     }
639 
640     CRITICAL_SECTION_LEAVE(cs);
641 
642     return AAPCS_DUAL_U32_AS_U64(ctx_ctrls);
643 }
644