1 /*
2  * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
3  * Copyright (c) 2022-2024 Cypress Semiconductor Corporation (an Infineon
4  * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5  * reserved.
6  *
7  * SPDX-License-Identifier: BSD-3-Clause
8  *
9  */
10 
11 #include "interrupt.h"
12 
13 #include "bitops.h"
14 #include "current.h"
15 #include "fih.h"
16 #include "svc_num.h"
17 #include "tfm_arch.h"
18 #include "tfm_hal_interrupt.h"
19 #include "tfm_hal_isolation.h"
20 #include "tfm_svcalls.h"
21 #include "thread.h"
22 #include "utilities.h"
23 #include "load/spm_load_api.h"
24 #include "ffm/backend.h"
25 #include "internal_status_code.h"
26 
27 #if TFM_ISOLATION_LEVEL != 1
28 extern void tfm_flih_func_return(psa_flih_result_t result);
29 
30 __attribute__((naked))
tfm_flih_deprivileged_handling(struct partition_t * p_pt,uintptr_t fn_flih,void * curr_component)31 static psa_flih_result_t tfm_flih_deprivileged_handling(struct partition_t *p_pt,
32                                                         uintptr_t fn_flih,
33                                                         void *curr_component)
34 {
35     __ASM volatile("SVC "M2S(TFM_SVC_PREPARE_DEPRIV_FLIH)"           \n"
36                    "BX LR                                            \n"
37                    );
38 }
39 
tfm_flih_prepare_depriv_flih(struct partition_t * p_owner_sp,uintptr_t flih_func)40 uint32_t tfm_flih_prepare_depriv_flih(struct partition_t *p_owner_sp,
41                                       uintptr_t flih_func)
42 {
43     const struct partition_t *p_curr_sp;
44     uintptr_t sp_base, sp_limit, curr_stack, ctx_stack;
45     struct context_ctrl_t flih_ctx_ctrl;
46     fih_int fih_rc = FIH_FAILURE;
47     FIH_RET_TYPE(bool) fih_bool;
48 
49     /* Come too early before runtime setup, should not happen. */
50     if (!CURRENT_THREAD) {
51         tfm_core_panic();
52     }
53 
54     p_curr_sp = GET_CURRENT_COMPONENT();
55     sp_base  = LOAD_ALLOCED_STACK_ADDR(p_owner_sp->p_ldinf)
56                                               + p_owner_sp->p_ldinf->stack_size;
57     sp_limit = LOAD_ALLOCED_STACK_ADDR(p_owner_sp->p_ldinf);
58 
59     curr_stack = (uintptr_t)__get_PSP();
60     if (curr_stack < sp_base && curr_stack > sp_limit) {
61         /* The IRQ Partition's stack is being used */
62         ctx_stack = curr_stack;
63     } else {
64         ctx_stack = p_owner_sp->thrd.p_context_ctrl->sp;
65     }
66 
67     FIH_CALL(tfm_hal_boundary_need_switch, fih_bool,
68              p_curr_sp->boundary, p_owner_sp->boundary);
69     if (fih_not_eq(fih_bool, fih_int_encode(false))) {
70         /*
71          * FPU lazy stacking context preservation uses privilege and relative priorities
72          * recorded during original stacking. Thus it's important to flush FP context
73          * before boundary is changed for a new partition.
74          */
75         ARCH_FLUSH_FP_CONTEXT();
76 
77         FIH_CALL(tfm_hal_activate_boundary, fih_rc,
78                  p_owner_sp->p_ldinf, p_owner_sp->boundary);
79         if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
80             tfm_core_panic();
81         }
82     }
83 
84     /*
85      * The CURRENT_COMPONENT has been stored on MSP by the SVC call, safe to
86      * update it.
87      */
88     SET_CURRENT_COMPONENT(p_owner_sp);
89 
90     flih_ctx_ctrl.sp_limit = sp_limit;
91     flih_ctx_ctrl.sp       = ctx_stack;
92 
93     tfm_arch_init_context(&flih_ctx_ctrl,
94                           flih_func, NULL,
95                           (uintptr_t)tfm_flih_func_return);
96 
97     (void)tfm_arch_refresh_hardware_context(&flih_ctx_ctrl);
98 
99     return flih_ctx_ctrl.exc_ret;
100 }
101 
102 /* Go back to ISR from FLIH functions */
tfm_flih_return_to_isr(psa_flih_result_t result,struct context_flih_ret_t * p_ctx_flih_ret)103 uint32_t tfm_flih_return_to_isr(psa_flih_result_t result,
104                                 struct context_flih_ret_t *p_ctx_flih_ret)
105 {
106     const struct partition_t *p_owner_sp;
107     struct partition_t *p_prev_sp;
108     FIH_RET_TYPE(bool) fih_bool;
109     fih_int fih_rc = FIH_FAILURE;
110 
111     p_prev_sp = (struct partition_t *)(p_ctx_flih_ret->state_ctx.r2);
112     p_owner_sp = GET_CURRENT_COMPONENT();
113 
114     FIH_CALL(tfm_hal_boundary_need_switch, fih_bool,
115              p_owner_sp->boundary, p_prev_sp->boundary);
116     if (fih_not_eq(fih_bool, fih_int_encode(false))) {
117         /*
118          * FPU lazy stacking context preservation uses privilege and relative priorities
119          * recorded during original stacking. Thus it's important to flush FP context
120          * before boundary is changed for a new partition.
121          */
122         ARCH_FLUSH_FP_CONTEXT();
123 
124         FIH_CALL(tfm_hal_activate_boundary, fih_rc,
125                  p_prev_sp->p_ldinf, p_prev_sp->boundary);
126         if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
127             tfm_core_panic();
128         }
129     }
130 
131     /*
132      * If the interrupted Thread mode context was running SPM code, then
133      * Privileged thread mode needs to be restored.
134      */
135     if (tfm_svc_thread_mode_spm_active()) {
136         __set_CONTROL_nPRIV(0);
137     }
138 
139     /* Restore current component */
140     SET_CURRENT_COMPONENT(p_prev_sp);
141 
142     arch_update_process_sp(p_ctx_flih_ret->psp, p_ctx_flih_ret->psplim);
143 
144     /* Set FLIH result to the ISR */
145     p_ctx_flih_ret->state_ctx.r0 = (uint32_t)result;
146 
147     return p_ctx_flih_ret->exc_return;
148 }
149 #endif
150 
get_irq_info_for_signal(const struct partition_load_info_t * p_ldinf,psa_signal_t signal)151 const struct irq_load_info_t *get_irq_info_for_signal(
152                                     const struct partition_load_info_t *p_ldinf,
153                                     psa_signal_t signal)
154 {
155     size_t i;
156     const struct irq_load_info_t *irq_info;
157 
158     if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
159         return NULL;
160     }
161 
162     irq_info = LOAD_INFO_IRQ(p_ldinf);
163     for (i = 0; i < p_ldinf->nirqs; i++) {
164         if (irq_info[i].signal == signal) {
165             return &irq_info[i];
166         }
167     }
168 
169     return NULL;
170 }
171 
spm_handle_interrupt(struct partition_t * p_pt,const struct irq_load_info_t * p_ildi)172 void spm_handle_interrupt(struct partition_t *p_pt,
173                           const struct irq_load_info_t *p_ildi)
174 {
175     psa_flih_result_t flih_result;
176     psa_status_t ret;
177     FIH_RET_TYPE(bool) fih_bool;
178 
179     if (!p_pt || !p_ildi) {
180         tfm_core_panic();
181     }
182 
183     if (p_ildi->pid != p_pt->p_ldinf->pid) {
184         tfm_core_panic();
185     }
186 
187     if (p_ildi->flih_func == NULL) {
188         /* SLIH Model Handling */
189         tfm_hal_irq_disable(p_ildi->source);
190         flih_result = PSA_FLIH_SIGNAL;
191     } else {
192         /* FLIH Model Handling */
193 #if TFM_ISOLATION_LEVEL == 1
194         flih_result = p_ildi->flih_func();
195         (void)fih_bool;
196 #else
197         FIH_CALL(tfm_hal_boundary_need_switch, fih_bool,
198                  get_spm_boundary(), p_pt->boundary);
199         if (fih_eq(fih_bool, fih_int_encode(false))) {
200             flih_result = p_ildi->flih_func();
201         } else {
202             flih_result = tfm_flih_deprivileged_handling(
203                                                 p_pt,
204                                                 (uintptr_t)p_ildi->flih_func,
205                                                 GET_CURRENT_COMPONENT());
206         }
207 #endif
208     }
209 
210     if (flih_result == PSA_FLIH_SIGNAL) {
211         ret = backend_assert_signal(p_pt, p_ildi->signal);
212         /* In SFN backend, there is only one thread, no thread switch. */
213 #if CONFIG_TFM_SPM_BACKEND_SFN != 1
214         if (ret == STATUS_NEED_SCHEDULE) {
215             arch_attempt_schedule();
216         }
217 #else
218         (void)ret;
219 #endif
220     }
221 }
222