1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2015-2021, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7 #include <config.h>
8 #include <kernel/boot.h>
9 #include <kernel/misc.h>
10 #include <kernel/notif.h>
11 #include <kernel/tee_l2cc_mutex.h>
12 #include <kernel/virtualization.h>
13 #include <mm/core_mmu.h>
14 #include <optee_msg.h>
15 #include <sm/optee_smc.h>
16 #include <tee/entry_fast.h>
17
18 #ifdef CFG_CORE_RESERVED_SHM
tee_entry_get_shm_config(struct thread_smc_args * args)19 static void tee_entry_get_shm_config(struct thread_smc_args *args)
20 {
21 args->a0 = OPTEE_SMC_RETURN_OK;
22 args->a1 = default_nsec_shm_paddr;
23 args->a2 = default_nsec_shm_size;
24 /* Should this be TEESMC cache attributes instead? */
25 args->a3 = core_mmu_is_shm_cached();
26 }
27 #endif
28
tee_entry_fastcall_l2cc_mutex(struct thread_smc_args * args)29 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args)
30 {
31 TEE_Result ret;
32 #ifdef ARM32
33 paddr_t pa = 0;
34
35 switch (args->a1) {
36 case OPTEE_SMC_L2CC_MUTEX_GET_ADDR:
37 ret = tee_get_l2cc_mutex(&pa);
38 reg_pair_from_64(pa, &args->a2, &args->a3);
39 break;
40 case OPTEE_SMC_L2CC_MUTEX_SET_ADDR:
41 pa = reg_pair_to_64(args->a2, args->a3);
42 ret = tee_set_l2cc_mutex(&pa);
43 break;
44 case OPTEE_SMC_L2CC_MUTEX_ENABLE:
45 ret = tee_enable_l2cc_mutex();
46 break;
47 case OPTEE_SMC_L2CC_MUTEX_DISABLE:
48 ret = tee_disable_l2cc_mutex();
49 break;
50 default:
51 args->a0 = OPTEE_SMC_RETURN_EBADCMD;
52 return;
53 }
54 #else
55 ret = TEE_ERROR_NOT_SUPPORTED;
56 #endif
57 if (ret == TEE_ERROR_NOT_SUPPORTED)
58 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
59 else if (ret)
60 args->a0 = OPTEE_SMC_RETURN_EBADADDR;
61 else
62 args->a0 = OPTEE_SMC_RETURN_OK;
63 }
64
tee_entry_exchange_capabilities(struct thread_smc_args * args)65 static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
66 {
67 bool res_shm_en = IS_ENABLED(CFG_CORE_RESERVED_SHM);
68 bool dyn_shm_en __maybe_unused = false;
69
70 /*
71 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
72 *
73 * The memory mapping of shared memory is defined as normal
74 * shared memory for SMP systems and normal memory for UP
75 * systems. Currently we map all memory as shared in secure
76 * world.
77 *
78 * When translation tables are created with shared bit cleared for
79 * uniprocessor systems we'll need to check
80 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
81 */
82
83 if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) {
84 /* Unknown capability. */
85 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
86 return;
87 }
88
89 args->a0 = OPTEE_SMC_RETURN_OK;
90 args->a1 = 0;
91
92 if (res_shm_en)
93 args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
94 IMSG("Reserved shared memory is %sabled", res_shm_en ? "en" : "dis");
95
96 #if defined(CFG_CORE_DYN_SHM)
97 dyn_shm_en = core_mmu_nsec_ddr_is_defined();
98 if (dyn_shm_en)
99 args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
100 #endif
101 IMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis");
102
103 if (IS_ENABLED(CFG_VIRTUALIZATION))
104 args->a1 |= OPTEE_SMC_SEC_CAP_VIRTUALIZATION;
105 IMSG("Normal World virtualization support is %sabled",
106 IS_ENABLED(CFG_VIRTUALIZATION) ? "en" : "dis");
107
108 args->a1 |= OPTEE_SMC_SEC_CAP_MEMREF_NULL;
109
110 if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
111 args->a1 |= OPTEE_SMC_SEC_CAP_ASYNC_NOTIF;
112 args->a2 = NOTIF_VALUE_MAX;
113 }
114 IMSG("Asynchronous notifications are %sabled",
115 IS_ENABLED(CFG_CORE_ASYNC_NOTIF) ? "en" : "dis");
116
117 args->a1 |= OPTEE_SMC_SEC_CAP_RPC_ARG;
118 args->a3 = THREAD_RPC_MAX_NUM_PARAMS;
119 }
120
tee_entry_disable_shm_cache(struct thread_smc_args * args)121 static void tee_entry_disable_shm_cache(struct thread_smc_args *args)
122 {
123 uint64_t cookie;
124
125 if (!thread_disable_prealloc_rpc_cache(&cookie)) {
126 args->a0 = OPTEE_SMC_RETURN_EBUSY;
127 return;
128 }
129
130 if (!cookie) {
131 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
132 return;
133 }
134
135 args->a0 = OPTEE_SMC_RETURN_OK;
136 args->a1 = cookie >> 32;
137 args->a2 = cookie;
138 }
139
tee_entry_enable_shm_cache(struct thread_smc_args * args)140 static void tee_entry_enable_shm_cache(struct thread_smc_args *args)
141 {
142 if (thread_enable_prealloc_rpc_cache())
143 args->a0 = OPTEE_SMC_RETURN_OK;
144 else
145 args->a0 = OPTEE_SMC_RETURN_EBUSY;
146 }
147
tee_entry_boot_secondary(struct thread_smc_args * args)148 static void tee_entry_boot_secondary(struct thread_smc_args *args)
149 {
150 #if defined(CFG_BOOT_SECONDARY_REQUEST)
151 if (!boot_core_release(args->a1, (paddr_t)(args->a3)))
152 args->a0 = OPTEE_SMC_RETURN_OK;
153 else
154 args->a0 = OPTEE_SMC_RETURN_EBADCMD;
155 #else
156 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
157 #endif
158 }
159
tee_entry_get_thread_count(struct thread_smc_args * args)160 static void tee_entry_get_thread_count(struct thread_smc_args *args)
161 {
162 args->a0 = OPTEE_SMC_RETURN_OK;
163 args->a1 = CFG_NUM_THREADS;
164 }
165
166 #if defined(CFG_VIRTUALIZATION)
tee_entry_vm_created(struct thread_smc_args * args)167 static void tee_entry_vm_created(struct thread_smc_args *args)
168 {
169 uint16_t guest_id = args->a1;
170
171 /* Only hypervisor can issue this request */
172 if (args->a7 != HYP_CLNT_ID) {
173 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
174 return;
175 }
176
177 if (virt_guest_created(guest_id))
178 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
179 else
180 args->a0 = OPTEE_SMC_RETURN_OK;
181 }
182
tee_entry_vm_destroyed(struct thread_smc_args * args)183 static void tee_entry_vm_destroyed(struct thread_smc_args *args)
184 {
185 uint16_t guest_id = args->a1;
186
187 /* Only hypervisor can issue this request */
188 if (args->a7 != HYP_CLNT_ID) {
189 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
190 return;
191 }
192
193 if (virt_guest_destroyed(guest_id))
194 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
195 else
196 args->a0 = OPTEE_SMC_RETURN_OK;
197 }
198 #endif
199
200 /* Note: this function is weak to let platforms add special handling */
tee_entry_fast(struct thread_smc_args * args)201 void __weak tee_entry_fast(struct thread_smc_args *args)
202 {
203 __tee_entry_fast(args);
204 }
205
get_async_notif_value(struct thread_smc_args * args)206 static void get_async_notif_value(struct thread_smc_args *args)
207 {
208 bool value_valid = false;
209 bool value_pending = false;
210
211 args->a0 = OPTEE_SMC_RETURN_OK;
212 args->a1 = notif_get_value(&value_valid, &value_pending);
213 args->a2 = 0;
214 if (value_valid)
215 args->a2 |= OPTEE_SMC_ASYNC_NOTIF_VALID;
216 if (value_pending)
217 args->a2 |= OPTEE_SMC_ASYNC_NOTIF_PENDING;
218 }
219
220 /*
221 * If tee_entry_fast() is overridden, it's still supposed to call this
222 * function.
223 */
__tee_entry_fast(struct thread_smc_args * args)224 void __tee_entry_fast(struct thread_smc_args *args)
225 {
226 switch (args->a0) {
227
228 /* Generic functions */
229 case OPTEE_SMC_CALLS_COUNT:
230 tee_entry_get_api_call_count(args);
231 break;
232 case OPTEE_SMC_CALLS_UID:
233 tee_entry_get_api_uuid(args);
234 break;
235 case OPTEE_SMC_CALLS_REVISION:
236 tee_entry_get_api_revision(args);
237 break;
238 case OPTEE_SMC_CALL_GET_OS_UUID:
239 tee_entry_get_os_uuid(args);
240 break;
241 case OPTEE_SMC_CALL_GET_OS_REVISION:
242 tee_entry_get_os_revision(args);
243 break;
244
245 /* OP-TEE specific SMC functions */
246 #ifdef CFG_CORE_RESERVED_SHM
247 case OPTEE_SMC_GET_SHM_CONFIG:
248 tee_entry_get_shm_config(args);
249 break;
250 #endif
251 case OPTEE_SMC_L2CC_MUTEX:
252 tee_entry_fastcall_l2cc_mutex(args);
253 break;
254 case OPTEE_SMC_EXCHANGE_CAPABILITIES:
255 tee_entry_exchange_capabilities(args);
256 break;
257 case OPTEE_SMC_DISABLE_SHM_CACHE:
258 tee_entry_disable_shm_cache(args);
259 break;
260 case OPTEE_SMC_ENABLE_SHM_CACHE:
261 tee_entry_enable_shm_cache(args);
262 break;
263 case OPTEE_SMC_BOOT_SECONDARY:
264 tee_entry_boot_secondary(args);
265 break;
266 case OPTEE_SMC_GET_THREAD_COUNT:
267 tee_entry_get_thread_count(args);
268 break;
269
270 #if defined(CFG_VIRTUALIZATION)
271 case OPTEE_SMC_VM_CREATED:
272 tee_entry_vm_created(args);
273 break;
274 case OPTEE_SMC_VM_DESTROYED:
275 tee_entry_vm_destroyed(args);
276 break;
277 #endif
278
279 case OPTEE_SMC_ENABLE_ASYNC_NOTIF:
280 if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
281 notif_deliver_atomic_event(NOTIF_EVENT_STARTED);
282 args->a0 = OPTEE_SMC_RETURN_OK;
283 } else {
284 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
285 }
286 break;
287 case OPTEE_SMC_GET_ASYNC_NOTIF_VALUE:
288 if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF))
289 get_async_notif_value(args);
290 else
291 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
292 break;
293
294 default:
295 args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
296 break;
297 }
298 }
299
tee_entry_generic_get_api_call_count(void)300 size_t tee_entry_generic_get_api_call_count(void)
301 {
302 /*
303 * All the different calls handled in this file. If the specific
304 * target has additional calls it will call this function and
305 * add the number of calls the target has added.
306 */
307 size_t ret = 12;
308
309 if (IS_ENABLED(CFG_VIRTUALIZATION))
310 ret += 2;
311
312 return ret;
313 }
314
tee_entry_get_api_call_count(struct thread_smc_args * args)315 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args)
316 {
317 args->a0 = tee_entry_generic_get_api_call_count();
318 }
319
tee_entry_get_api_uuid(struct thread_smc_args * args)320 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args)
321 {
322 args->a0 = OPTEE_MSG_UID_0;
323 args->a1 = OPTEE_MSG_UID_1;
324 args->a2 = OPTEE_MSG_UID_2;
325 args->a3 = OPTEE_MSG_UID_3;
326 }
327
tee_entry_get_api_revision(struct thread_smc_args * args)328 void __weak tee_entry_get_api_revision(struct thread_smc_args *args)
329 {
330 args->a0 = OPTEE_MSG_REVISION_MAJOR;
331 args->a1 = OPTEE_MSG_REVISION_MINOR;
332 }
333
tee_entry_get_os_uuid(struct thread_smc_args * args)334 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args)
335 {
336 args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
337 args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
338 args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
339 args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
340 }
341
tee_entry_get_os_revision(struct thread_smc_args * args)342 void __weak tee_entry_get_os_revision(struct thread_smc_args *args)
343 {
344 args->a0 = CFG_OPTEE_REVISION_MAJOR;
345 args->a1 = CFG_OPTEE_REVISION_MINOR;
346 args->a2 = TEE_IMPL_GIT_SHA1;
347 }
348