1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2015-2022, Linaro Limited
4 */
5
6 #include <arm.h>
7 #include <kernel/abort.h>
8 #include <kernel/linker.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/tee_ta_manager.h>
12 #include <kernel/thread_private.h>
13 #include <kernel/user_mode_ctx.h>
14 #include <mm/core_mmu.h>
15 #include <mm/mobj.h>
16 #include <mm/tee_pager.h>
17 #include <tee/tee_svc.h>
18 #include <trace.h>
19 #include <unw/unwind.h>
20
21 enum fault_type {
22 FAULT_TYPE_USER_MODE_PANIC,
23 FAULT_TYPE_USER_MODE_VFP,
24 FAULT_TYPE_PAGEABLE,
25 FAULT_TYPE_IGNORE,
26 };
27
28 #ifdef CFG_UNWIND
29
30 #ifdef ARM32
31 /*
32 * Kernel or user mode unwind (32-bit execution state).
33 */
__print_stack_unwind(struct abort_info * ai)34 static void __print_stack_unwind(struct abort_info *ai)
35 {
36 struct unwind_state_arm32 state = { };
37 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
38 uint32_t sp = 0;
39 uint32_t lr = 0;
40
41 assert(!abort_is_user_exception(ai));
42
43 if (mode == CPSR_MODE_SYS) {
44 sp = ai->regs->usr_sp;
45 lr = ai->regs->usr_lr;
46 } else {
47 sp = read_mode_sp(mode);
48 lr = read_mode_lr(mode);
49 }
50
51 memset(&state, 0, sizeof(state));
52 state.registers[0] = ai->regs->r0;
53 state.registers[1] = ai->regs->r1;
54 state.registers[2] = ai->regs->r2;
55 state.registers[3] = ai->regs->r3;
56 state.registers[4] = ai->regs->r4;
57 state.registers[5] = ai->regs->r5;
58 state.registers[6] = ai->regs->r6;
59 state.registers[7] = ai->regs->r7;
60 state.registers[8] = ai->regs->r8;
61 state.registers[9] = ai->regs->r9;
62 state.registers[10] = ai->regs->r10;
63 state.registers[11] = ai->regs->r11;
64 state.registers[13] = sp;
65 state.registers[14] = lr;
66 state.registers[15] = ai->pc;
67
68 print_stack_arm32(&state, thread_stack_start(), thread_stack_size());
69 }
70 #endif /* ARM32 */
71
72 #ifdef ARM64
73 /* Kernel mode unwind (64-bit execution state) */
__print_stack_unwind(struct abort_info * ai)74 static void __print_stack_unwind(struct abort_info *ai)
75 {
76 struct unwind_state_arm64 state = {
77 .pc = ai->regs->elr,
78 .fp = ai->regs->x29,
79 };
80
81 print_stack_arm64(&state, thread_stack_start(), thread_stack_size());
82 }
83 #endif /*ARM64*/
84
85 #else /* CFG_UNWIND */
__print_stack_unwind(struct abort_info * ai __unused)86 static void __print_stack_unwind(struct abort_info *ai __unused)
87 {
88 }
89 #endif /* CFG_UNWIND */
90
abort_type_to_str(uint32_t abort_type)91 static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
92 {
93 if (abort_type == ABORT_TYPE_DATA)
94 return "data";
95 if (abort_type == ABORT_TYPE_PREFETCH)
96 return "prefetch";
97 return "undef";
98 }
99
fault_to_str(uint32_t abort_type,uint32_t fault_descr)100 static __maybe_unused const char *fault_to_str(uint32_t abort_type,
101 uint32_t fault_descr)
102 {
103 /* fault_descr is only valid for data or prefetch abort */
104 if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
105 return "";
106
107 switch (core_mmu_get_fault_type(fault_descr)) {
108 case CORE_MMU_FAULT_ALIGNMENT:
109 return " (alignment fault)";
110 case CORE_MMU_FAULT_TRANSLATION:
111 return " (translation fault)";
112 case CORE_MMU_FAULT_READ_PERMISSION:
113 return " (read permission fault)";
114 case CORE_MMU_FAULT_WRITE_PERMISSION:
115 return " (write permission fault)";
116 case CORE_MMU_FAULT_TAG_CHECK:
117 return " (tag check fault)";
118 default:
119 return "";
120 }
121 }
122
123 static __maybe_unused void
__print_abort_info(struct abort_info * ai __maybe_unused,const char * ctx __maybe_unused)124 __print_abort_info(struct abort_info *ai __maybe_unused,
125 const char *ctx __maybe_unused)
126 {
127 __maybe_unused size_t core_pos = 0;
128 #ifdef ARM32
129 uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK;
130 __maybe_unused uint32_t sp = 0;
131 __maybe_unused uint32_t lr = 0;
132
133 if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) {
134 sp = ai->regs->usr_sp;
135 lr = ai->regs->usr_lr;
136 core_pos = thread_get_tsd()->abort_core;
137 } else {
138 sp = read_mode_sp(mode);
139 lr = read_mode_lr(mode);
140 core_pos = get_core_pos();
141 }
142 #endif /*ARM32*/
143 #ifdef ARM64
144 if (abort_is_user_exception(ai))
145 core_pos = thread_get_tsd()->abort_core;
146 else
147 core_pos = get_core_pos();
148 #endif /*ARM64*/
149
150 EMSG_RAW("");
151 EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s",
152 ctx, abort_type_to_str(ai->abort_type), ai->va,
153 fault_to_str(ai->abort_type, ai->fault_descr));
154 #ifdef ARM32
155 EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X",
156 ai->fault_descr, read_ttbr0(), read_ttbr1(),
157 read_contextidr());
158 EMSG_RAW(" cpu #%zu cpsr 0x%08x",
159 core_pos, ai->regs->spsr);
160 EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x",
161 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
162 EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x",
163 ai->regs->r1, ai->regs->r5, ai->regs->r9, sp);
164 EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x",
165 ai->regs->r2, ai->regs->r6, ai->regs->r10, lr);
166 EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x",
167 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
168 #endif /*ARM32*/
169 #ifdef ARM64
170 EMSG_RAW(" esr 0x%08x ttbr0 0x%08" PRIx64 " ttbr1 0x%08" PRIx64
171 " cidr 0x%X", ai->fault_descr, read_ttbr0_el1(),
172 read_ttbr1_el1(), read_contextidr_el1());
173 EMSG_RAW(" cpu #%zu cpsr 0x%08x",
174 core_pos, (uint32_t)ai->regs->spsr);
175 EMSG_RAW(" x0 %016" PRIx64 " x1 %016" PRIx64,
176 ai->regs->x0, ai->regs->x1);
177 EMSG_RAW(" x2 %016" PRIx64 " x3 %016" PRIx64,
178 ai->regs->x2, ai->regs->x3);
179 EMSG_RAW(" x4 %016" PRIx64 " x5 %016" PRIx64,
180 ai->regs->x4, ai->regs->x5);
181 EMSG_RAW(" x6 %016" PRIx64 " x7 %016" PRIx64,
182 ai->regs->x6, ai->regs->x7);
183 EMSG_RAW(" x8 %016" PRIx64 " x9 %016" PRIx64,
184 ai->regs->x8, ai->regs->x9);
185 EMSG_RAW(" x10 %016" PRIx64 " x11 %016" PRIx64,
186 ai->regs->x10, ai->regs->x11);
187 EMSG_RAW(" x12 %016" PRIx64 " x13 %016" PRIx64,
188 ai->regs->x12, ai->regs->x13);
189 EMSG_RAW(" x14 %016" PRIx64 " x15 %016" PRIx64,
190 ai->regs->x14, ai->regs->x15);
191 EMSG_RAW(" x16 %016" PRIx64 " x17 %016" PRIx64,
192 ai->regs->x16, ai->regs->x17);
193 EMSG_RAW(" x18 %016" PRIx64 " x19 %016" PRIx64,
194 ai->regs->x18, ai->regs->x19);
195 EMSG_RAW(" x20 %016" PRIx64 " x21 %016" PRIx64,
196 ai->regs->x20, ai->regs->x21);
197 EMSG_RAW(" x22 %016" PRIx64 " x23 %016" PRIx64,
198 ai->regs->x22, ai->regs->x23);
199 EMSG_RAW(" x24 %016" PRIx64 " x25 %016" PRIx64,
200 ai->regs->x24, ai->regs->x25);
201 EMSG_RAW(" x26 %016" PRIx64 " x27 %016" PRIx64,
202 ai->regs->x26, ai->regs->x27);
203 EMSG_RAW(" x28 %016" PRIx64 " x29 %016" PRIx64,
204 ai->regs->x28, ai->regs->x29);
205 EMSG_RAW(" x30 %016" PRIx64 " elr %016" PRIx64,
206 ai->regs->x30, ai->regs->elr);
207 EMSG_RAW(" sp_el0 %016" PRIx64, ai->regs->sp_el0);
208 #endif /*ARM64*/
209 }
210
211 /*
212 * Print abort info and (optionally) stack dump to the console
213 * @ai kernel-mode abort info.
214 * @stack_dump true to show a stack trace
215 */
__abort_print(struct abort_info * ai,bool stack_dump)216 static void __abort_print(struct abort_info *ai, bool stack_dump)
217 {
218 assert(!abort_is_user_exception(ai));
219
220 __print_abort_info(ai, "Core");
221
222 if (stack_dump) {
223 trace_printf_helper_raw(TRACE_ERROR, true,
224 "TEE load address @ %#"PRIxVA,
225 VCORE_START_VA);
226 __print_stack_unwind(ai);
227 }
228 }
229
abort_print(struct abort_info * ai)230 void abort_print(struct abort_info *ai)
231 {
232 __abort_print(ai, false);
233 }
234
abort_print_error(struct abort_info * ai)235 void abort_print_error(struct abort_info *ai)
236 {
237 __abort_print(ai, true);
238 }
239
240 /* This function must be called from a normal thread */
abort_print_current_ts(void)241 void abort_print_current_ts(void)
242 {
243 struct thread_specific_data *tsd = thread_get_tsd();
244 struct abort_info ai = { };
245 struct ts_session *s = ts_get_current_session();
246
247 ai.abort_type = tsd->abort_type;
248 ai.fault_descr = tsd->abort_descr;
249 ai.va = tsd->abort_va;
250 ai.pc = tsd->abort_regs.elr;
251 ai.regs = &tsd->abort_regs;
252
253 if (ai.abort_type != ABORT_TYPE_USER_MODE_PANIC)
254 __print_abort_info(&ai, "User mode");
255
256 s->ctx->ops->dump_state(s->ctx);
257
258 #if defined(CFG_FTRACE_SUPPORT)
259 if (s->ctx->ops->dump_ftrace) {
260 s->fbuf = NULL;
261 s->ctx->ops->dump_ftrace(s->ctx);
262 }
263 #endif
264 }
265
save_abort_info_in_tsd(struct abort_info * ai)266 static void save_abort_info_in_tsd(struct abort_info *ai)
267 {
268 struct thread_specific_data *tsd = thread_get_tsd();
269
270 tsd->abort_type = ai->abort_type;
271 tsd->abort_descr = ai->fault_descr;
272 tsd->abort_va = ai->va;
273 tsd->abort_regs = *ai->regs;
274 tsd->abort_core = get_core_pos();
275 }
276
277 #ifdef ARM32
set_abort_info(uint32_t abort_type,struct thread_abort_regs * regs,struct abort_info * ai)278 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
279 struct abort_info *ai)
280 {
281 switch (abort_type) {
282 case ABORT_TYPE_DATA:
283 ai->fault_descr = read_dfsr();
284 ai->va = read_dfar();
285 break;
286 case ABORT_TYPE_PREFETCH:
287 ai->fault_descr = read_ifsr();
288 ai->va = read_ifar();
289 break;
290 default:
291 ai->fault_descr = 0;
292 ai->va = regs->elr;
293 break;
294 }
295 ai->abort_type = abort_type;
296 ai->pc = regs->elr;
297 ai->regs = regs;
298 }
299 #endif /*ARM32*/
300
301 #ifdef ARM64
set_abort_info(uint32_t abort_type __unused,struct thread_abort_regs * regs,struct abort_info * ai)302 static void set_abort_info(uint32_t abort_type __unused,
303 struct thread_abort_regs *regs, struct abort_info *ai)
304 {
305 ai->fault_descr = read_esr_el1();
306 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
307 case ESR_EC_IABT_EL0:
308 case ESR_EC_IABT_EL1:
309 ai->abort_type = ABORT_TYPE_PREFETCH;
310 ai->va = read_far_el1();
311 break;
312 case ESR_EC_DABT_EL0:
313 case ESR_EC_DABT_EL1:
314 case ESR_EC_SP_ALIGN:
315 ai->abort_type = ABORT_TYPE_DATA;
316 ai->va = read_far_el1();
317 break;
318 default:
319 ai->abort_type = ABORT_TYPE_UNDEF;
320 ai->va = regs->elr;
321 }
322 ai->pc = regs->elr;
323 ai->regs = regs;
324 }
325 #endif /*ARM64*/
326
327 #ifdef ARM32
handle_user_mode_panic(struct abort_info * ai)328 static void handle_user_mode_panic(struct abort_info *ai)
329 {
330 /*
331 * It was a user exception, stop user execution and return
332 * to TEE Core.
333 */
334 ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
335 ai->regs->r1 = true;
336 ai->regs->r2 = 0xdeadbeef;
337 ai->regs->elr = (uint32_t)thread_unwind_user_mode;
338 ai->regs->spsr &= CPSR_FIA;
339 ai->regs->spsr &= ~CPSR_MODE_MASK;
340 ai->regs->spsr |= CPSR_MODE_SVC;
341 /* Select Thumb or ARM mode */
342 if (ai->regs->elr & 1)
343 ai->regs->spsr |= CPSR_T;
344 else
345 ai->regs->spsr &= ~CPSR_T;
346 }
347 #endif /*ARM32*/
348
349 #ifdef ARM64
handle_user_mode_panic(struct abort_info * ai)350 static void handle_user_mode_panic(struct abort_info *ai)
351 {
352 struct thread_ctx *tc __maybe_unused = NULL;
353 uint32_t daif = 0;
354
355 /*
356 * It was a user exception, stop user execution and return
357 * to TEE Core.
358 */
359 ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
360 ai->regs->x1 = true;
361 ai->regs->x2 = 0xdeadbeef;
362 ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
363 ai->regs->sp_el0 = thread_get_saved_thread_sp();
364
365 #if defined(CFG_CORE_PAUTH)
366 /*
367 * We're going to return to the privileged core thread, update the
368 * APIA key to match the key used by the thread.
369 */
370 tc = threads + thread_get_id();
371 ai->regs->apiakey_hi = tc->keys.apia_hi;
372 ai->regs->apiakey_lo = tc->keys.apia_lo;
373 #endif
374
375 daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
376 /* XXX what about DAIF_D? */
377 ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
378 }
379 #endif /*ARM64*/
380
381 #ifdef CFG_WITH_VFP
handle_user_mode_vfp(void)382 static void handle_user_mode_vfp(void)
383 {
384 struct ts_session *s = ts_get_current_session();
385
386 thread_user_enable_vfp(&to_user_mode_ctx(s->ctx)->vfp);
387 }
388 #endif /*CFG_WITH_VFP*/
389
390 #ifdef CFG_WITH_USER_TA
391 #ifdef ARM32
392 /* Returns true if the exception originated from user mode */
abort_is_user_exception(struct abort_info * ai)393 bool abort_is_user_exception(struct abort_info *ai)
394 {
395 return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
396 }
397 #endif /*ARM32*/
398
399 #ifdef ARM64
400 /* Returns true if the exception originated from user mode */
abort_is_user_exception(struct abort_info * ai)401 bool abort_is_user_exception(struct abort_info *ai)
402 {
403 uint32_t spsr = ai->regs->spsr;
404
405 if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
406 return true;
407 if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
408 SPSR_64_MODE_EL0)
409 return true;
410 return false;
411 }
412 #endif /*ARM64*/
413 #else /*CFG_WITH_USER_TA*/
abort_is_user_exception(struct abort_info * ai __unused)414 bool abort_is_user_exception(struct abort_info *ai __unused)
415 {
416 return false;
417 }
418 #endif /*CFG_WITH_USER_TA*/
419
420 #if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
421 #ifdef ARM32
is_vfp_fault(struct abort_info * ai)422 static bool is_vfp_fault(struct abort_info *ai)
423 {
424 if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
425 return false;
426
427 /*
428 * Not entirely accurate, but if it's a truly undefined instruction
429 * we'll end up in this function again, except this time
430 * vfp_is_enabled() so we'll return false.
431 */
432 return true;
433 }
434 #endif /*ARM32*/
435
436 #ifdef ARM64
is_vfp_fault(struct abort_info * ai)437 static bool is_vfp_fault(struct abort_info *ai)
438 {
439 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
440 case ESR_EC_FP_ASIMD:
441 case ESR_EC_AARCH32_FP:
442 case ESR_EC_AARCH64_FP:
443 return true;
444 default:
445 return false;
446 }
447 }
448 #endif /*ARM64*/
449 #else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
is_vfp_fault(struct abort_info * ai __unused)450 static bool is_vfp_fault(struct abort_info *ai __unused)
451 {
452 return false;
453 }
454 #endif /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
455
abort_is_write_fault(struct abort_info * ai)456 bool abort_is_write_fault(struct abort_info *ai)
457 {
458 #ifdef ARM32
459 unsigned int write_not_read = 11;
460 #endif
461 #ifdef ARM64
462 unsigned int write_not_read = 6;
463 #endif
464
465 return ai->abort_type == ABORT_TYPE_DATA &&
466 (ai->fault_descr & BIT(write_not_read));
467 }
468
get_fault_type(struct abort_info * ai)469 static enum fault_type get_fault_type(struct abort_info *ai)
470 {
471 if (abort_is_user_exception(ai)) {
472 if (is_vfp_fault(ai))
473 return FAULT_TYPE_USER_MODE_VFP;
474 #ifndef CFG_WITH_PAGER
475 return FAULT_TYPE_USER_MODE_PANIC;
476 #endif
477 }
478
479 if (thread_is_from_abort_mode()) {
480 abort_print_error(ai);
481 panic("[abort] abort in abort handler (trap CPU)");
482 }
483
484 if (ai->abort_type == ABORT_TYPE_UNDEF) {
485 if (abort_is_user_exception(ai))
486 return FAULT_TYPE_USER_MODE_PANIC;
487 abort_print_error(ai);
488 panic("[abort] undefined abort (trap CPU)");
489 }
490
491 switch (core_mmu_get_fault_type(ai->fault_descr)) {
492 case CORE_MMU_FAULT_ALIGNMENT:
493 if (abort_is_user_exception(ai))
494 return FAULT_TYPE_USER_MODE_PANIC;
495 abort_print_error(ai);
496 panic("[abort] alignement fault! (trap CPU)");
497 break;
498
499 case CORE_MMU_FAULT_ACCESS_BIT:
500 if (abort_is_user_exception(ai))
501 return FAULT_TYPE_USER_MODE_PANIC;
502 abort_print_error(ai);
503 panic("[abort] access bit fault! (trap CPU)");
504 break;
505
506 case CORE_MMU_FAULT_DEBUG_EVENT:
507 if (!abort_is_user_exception(ai))
508 abort_print(ai);
509 DMSG("[abort] Ignoring debug event!");
510 return FAULT_TYPE_IGNORE;
511
512 case CORE_MMU_FAULT_TRANSLATION:
513 case CORE_MMU_FAULT_WRITE_PERMISSION:
514 case CORE_MMU_FAULT_READ_PERMISSION:
515 return FAULT_TYPE_PAGEABLE;
516
517 case CORE_MMU_FAULT_ASYNC_EXTERNAL:
518 if (!abort_is_user_exception(ai))
519 abort_print(ai);
520 DMSG("[abort] Ignoring async external abort!");
521 return FAULT_TYPE_IGNORE;
522
523 case CORE_MMU_FAULT_TAG_CHECK:
524 if (abort_is_user_exception(ai))
525 return FAULT_TYPE_USER_MODE_PANIC;
526 abort_print_error(ai);
527 panic("[abort] Tag check fault! (trap CPU)");
528 break;
529
530 case CORE_MMU_FAULT_OTHER:
531 default:
532 if (!abort_is_user_exception(ai))
533 abort_print(ai);
534 DMSG("[abort] Unhandled fault!");
535 return FAULT_TYPE_IGNORE;
536 }
537 }
538
abort_handler(uint32_t abort_type,struct thread_abort_regs * regs)539 void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
540 {
541 struct abort_info ai;
542 bool handled;
543
544 set_abort_info(abort_type, regs, &ai);
545
546 switch (get_fault_type(&ai)) {
547 case FAULT_TYPE_IGNORE:
548 break;
549 case FAULT_TYPE_USER_MODE_PANIC:
550 DMSG("[abort] abort in User mode (TA will panic)");
551 save_abort_info_in_tsd(&ai);
552 vfp_disable();
553 handle_user_mode_panic(&ai);
554 break;
555 #ifdef CFG_WITH_VFP
556 case FAULT_TYPE_USER_MODE_VFP:
557 handle_user_mode_vfp();
558 break;
559 #endif
560 case FAULT_TYPE_PAGEABLE:
561 default:
562 if (thread_get_id_may_fail() < 0) {
563 abort_print_error(&ai);
564 panic("abort outside thread context");
565 }
566 thread_kernel_save_vfp();
567 handled = tee_pager_handle_fault(&ai);
568 thread_kernel_restore_vfp();
569 if (!handled) {
570 if (!abort_is_user_exception(&ai)) {
571 abort_print_error(&ai);
572 panic("unhandled pageable abort");
573 }
574 DMSG("[abort] abort in User mode (TA will panic)");
575 save_abort_info_in_tsd(&ai);
576 vfp_disable();
577 handle_user_mode_panic(&ai);
578 }
579 break;
580 }
581 }
582