1 /*
2 * Copyright 2018 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/arch/barriers.h"
10 #include "hf/arch/gicv3.h"
11 #include "hf/arch/host_timer.h"
12 #include "hf/arch/memcpy_trapped.h"
13 #include "hf/arch/mmu.h"
14 #include "hf/arch/plat/smc.h"
15 #include "hf/arch/timer.h"
16 #include "hf/arch/vmid_base.h"
17
18 #include "hf/api.h"
19 #include "hf/check.h"
20 #include "hf/cpu.h"
21 #include "hf/dlog.h"
22 #include "hf/ffa.h"
23 #include "hf/ffa/cpu_cycles.h"
24 #include "hf/ffa/indirect_messaging.h"
25 #include "hf/ffa/interrupts.h"
26 #include "hf/ffa/notifications.h"
27 #include "hf/ffa/vm.h"
28 #include "hf/ffa_internal.h"
29 #include "hf/panic.h"
30 #include "hf/plat/interrupts.h"
31 #include "hf/timer_mgmt.h"
32 #include "hf/vm.h"
33
34 #include "debug_el1.h"
35 #include "el1_physical_timer.h"
36 #include "feature_id.h"
37 #include "perfmon.h"
38 #include "psci.h"
39 #include "psci_handler.h"
40 #include "smc.h"
41 #include "sysregs.h"
42 #include "sysregs_defs.h"
43
44 /**
45 * Hypervisor Fault Address Register Non-Secure.
46 */
47 #define HPFAR_EL2_NS (UINT64_C(0x1) << 63)
48
49 /**
50 * Hypervisor Fault Address Register Faulting IPA.
51 */
52 #define HPFAR_EL2_FIPA (UINT64_C(0xFFFFFFFFFF0))
53
54 /**
55 * Gets the value to increment for the next PC.
56 * The ESR encodes whether the instruction is 2 bytes or 4 bytes long.
57 */
58 #define GET_NEXT_PC_INC(esr) (GET_ESR_IL(esr) ? 4 : 2)
59
60 /**
61 * The Client ID field within X7 for an SMC64 call.
62 */
63 #define CLIENT_ID_MASK UINT64_C(0xffff)
64
65 /**
66 * Returns a reference to the currently executing vCPU.
67 */
current(void)68 static struct vcpu *current(void)
69 {
70 // NOLINTNEXTLINE(performance-no-int-to-ptr)
71 return (struct vcpu *)read_msr(tpidr_el2);
72 }
73
74 /**
75 * Saves the state of per-vCPU peripherals, such as the arch timer, and
76 * informs the arch-independent sections that registers have been saved.
77 */
complete_saving_state(struct vcpu * vcpu)78 void complete_saving_state(struct vcpu *vcpu)
79 {
80 host_timer_save_arch_timer(&vcpu->regs.arch_timer);
81
82 timer_vcpu_manage(vcpu);
83 api_regs_state_saved(vcpu);
84
85 /*
86 * Since switching away from current vCPU, disable the host physical
87 * timer for now. If necessary, the host timer will be reconfigured
88 * at appropriate time to track timer deadline of the vCPU.
89 */
90 host_timer_disable();
91 }
92
93 /**
94 * Restores the state of per-vCPU peripherals, such as the arch timer.
95 */
begin_restoring_state(struct vcpu * vcpu)96 void begin_restoring_state(struct vcpu *vcpu)
97 {
98 /*
99 * If a vCPU's timer has expired while it was de-scheduled, SPMC will
100 * inject the virtual timer interrupt before resuming the vCPU.
101 * If not, there is a live state and we need to configure the host timer
102 * to track it again.
103 */
104 if (arch_timer_enabled(&vcpu->regs) &&
105 (arch_timer_remaining_ns(&vcpu->regs) != 0)) {
106 host_timer_track_deadline(&vcpu->regs.arch_timer);
107 }
108 }
109
110 /**
111 * Invalidate all stage 1 TLB entries on the current (physical) CPU for the
112 * current VMID.
113 */
invalidate_vm_tlb(void)114 static void invalidate_vm_tlb(void)
115 {
116 /*
117 * Ensure that the last VTTBR write has taken effect so we invalidate
118 * the right set of TLB entries.
119 */
120 isb();
121
122 tlbi(vmalle1);
123
124 /*
125 * Ensure that no instructions are fetched for the VM until after the
126 * TLB invalidation has taken effect.
127 */
128 isb();
129
130 /*
131 * Ensure that no data reads or writes for the VM happen until after the
132 * TLB invalidation has taken effect. Non-shareable is enough because
133 * the TLB is local to the CPU.
134 */
135 dsb(nsh);
136 }
137
138 /**
139 * Invalidates the TLB if a different vCPU is being run than the last vCPU of
140 * the same VM which was run on the current pCPU.
141 *
142 * This is necessary because VMs may (contrary to the architecture
143 * specification) use inconsistent ASIDs across vCPUs. c.f. KVM's similar
144 * workaround:
145 * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=94d0e5980d6791b9
146 */
maybe_invalidate_tlb(struct vcpu * vcpu)147 void maybe_invalidate_tlb(struct vcpu *vcpu)
148 {
149 size_t current_cpu_index = cpu_index(vcpu->cpu);
150 ffa_vcpu_index_t new_vcpu_index = vcpu_index(vcpu);
151
152 if (vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] !=
153 new_vcpu_index) {
154 /*
155 * The vCPU has changed since the last time this VM was run on
156 * this pCPU, so we need to invalidate the TLB.
157 */
158 invalidate_vm_tlb();
159
160 /* Record the fact that this vCPU is now running on this CPU. */
161 vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] =
162 new_vcpu_index;
163 }
164 }
165
irq_current_exception_noreturn(uintreg_t elr,uintreg_t spsr)166 [[noreturn]] void irq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
167 {
168 (void)elr;
169 (void)spsr;
170
171 panic("IRQ from current exception level.");
172 }
173
fiq_current_exception_noreturn(uintreg_t elr,uintreg_t spsr)174 [[noreturn]] void fiq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
175 {
176 (void)elr;
177 (void)spsr;
178
179 panic("FIQ from current exception level.");
180 }
181
serr_current_exception_noreturn(uintreg_t elr,uintreg_t spsr)182 [[noreturn]] void serr_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
183 {
184 (void)elr;
185 (void)spsr;
186
187 panic("SError from current exception level.");
188 }
189
190 /**
191 * Returns true if ELR_EL2 is not to be restored from stack.
192 * Currently function doesn't return false, as for all other cases
193 * panics.
194 */
sync_current_exception(uintreg_t elr,uintreg_t spsr)195 bool sync_current_exception(uintreg_t elr, uintreg_t spsr)
196 {
197 uintreg_t esr = read_msr(esr_el2);
198 uintreg_t ec = GET_ESR_EC(esr);
199 (void)spsr;
200
201 switch (ec) {
202 case EC_DATA_ABORT_SAME_EL: {
203 uint64_t iss = GET_ESR_ISS(esr);
204 uint64_t dfsc = GET_ESR_ISS_DFSC(iss);
205 uint64_t far = read_msr(far_el2);
206
207 /* Handle Granule Protection Fault. */
208 if (is_arch_feat_rme_supported() && dfsc == DFSC_GPF) {
209 dlog_verbose(
210 "Granule Protection Fault: esr=%#lx, ec=%#lx, "
211 "far=%#lx, elr=%#lx\n",
212 esr, ec, far, elr);
213
214 /*
215 * Change ELR_EL2 only if failed whilst either
216 * reading or writing within 'memcpy_trapped'.
217 */
218 if (elr == (uintptr_t)memcpy_trapped_read ||
219 elr == (uintptr_t)memcpy_trapped_write) {
220 dlog_verbose(
221 "GPF due to data abort on %s.\n",
222 (elr == (uintptr_t)memcpy_trapped_read)
223 ? "read"
224 : "write");
225
226 /*
227 * Update the ELR_EL2 with the return
228 * address, to return error from the
229 * call to 'memcpy_trapped'.
230 */
231 write_msr(ELR_EL2, memcpy_trapped_aborted);
232 return true;
233 }
234 }
235
236 #if ENABLE_MTE
237 if (dfsc == DFSC_SYNC_TAG_CHECK_FAULT) {
238 dlog_error(
239 "Data abort due to synchronous tag check "
240 "fault: pc=%#lx, esr=%#lx, ec=%#lx, "
241 "far=%#lx, dfsc = %#lx\n",
242 elr, esr, ec, far, dfsc);
243 }
244 #endif
245 if (!GET_ESR_FNV(esr)) {
246 dlog_error(
247 "Data abort: pc=%#lx, esr=%#lx, ec=%#lx, "
248 "far=%#lx\n",
249 elr, esr, ec, far);
250
251 } else {
252 dlog_error(
253 "Data abort: pc=%#lx, esr=%#lx, ec=%#lx, "
254 "far=invalid\n",
255 elr, esr, ec);
256 }
257 } break;
258 default:
259 dlog_error(
260 "Unknown current sync exception pc=%#lx, esr=%#lx, "
261 "ec=%#lx\n",
262 elr, esr, ec);
263 break;
264 }
265
266 panic("EL2 exception");
267 }
268
269 /**
270 * Sets or clears the VF bit in the HCR_EL2 register saved in the given
271 * arch_regs.
272 */
set_virtual_fiq(struct arch_regs * r,bool enable)273 static void set_virtual_fiq(struct arch_regs *r, bool enable)
274 {
275 if (enable) {
276 r->hyp_state.hcr_el2 |= HCR_EL2_VF;
277 } else {
278 r->hyp_state.hcr_el2 &= ~HCR_EL2_VF;
279 }
280 }
281
282 /**
283 * Sets or clears the VI bit in the HCR_EL2 register saved in the given
284 * arch_regs.
285 */
set_virtual_irq(struct arch_regs * r,bool enable)286 static void set_virtual_irq(struct arch_regs *r, bool enable)
287 {
288 if (enable) {
289 r->hyp_state.hcr_el2 |= HCR_EL2_VI;
290 } else {
291 r->hyp_state.hcr_el2 &= ~HCR_EL2_VI;
292 }
293 }
294
295 /**
296 * Checks whether to block an SMC being forwarded from a VM.
297 */
smc_is_blocked(const struct vm * vm,uint32_t func)298 static bool smc_is_blocked(const struct vm *vm, uint32_t func)
299 {
300 bool block_by_default = !vm->smc_whitelist.permissive;
301
302 for (size_t i = 0; i < vm->smc_whitelist.smc_count; ++i) {
303 if (func == vm->smc_whitelist.smcs[i]) {
304 return false;
305 }
306 }
307
308 dlog_notice("SMC %#010x attempted from VM %#x, blocked=%u\n", func,
309 vm->id, block_by_default);
310
311 /* Access is still allowed in permissive mode. */
312 return block_by_default;
313 }
314
315 /**
316 * Applies SMC access control according to manifest and forwards the call if
317 * access is granted.
318 */
smc_forwarder(const struct vm * vm,struct ffa_value * args)319 static void smc_forwarder(const struct vm *vm, struct ffa_value *args)
320 {
321 struct ffa_value ret;
322 uint32_t client_id = vm->id;
323 uintreg_t arg7 = args->arg7;
324
325 if (smc_is_blocked(vm, args->func)) {
326 args->func = SMCCC_ERROR_UNKNOWN;
327 return;
328 }
329
330 /*
331 * Set the Client ID but keep the existing Secure OS ID and anything
332 * else (currently unspecified) that the client may have passed in the
333 * upper bits.
334 */
335 args->arg7 = client_id | (arg7 & ~CLIENT_ID_MASK);
336 ret = smc_forward(args->func, args->arg1, args->arg2, args->arg3,
337 args->arg4, args->arg5, args->arg6, args->arg7);
338
339 /*
340 * Preserve the value passed by the caller, rather than the generated
341 * client_id. Note that this would also overwrite any return value that
342 * may be in x7, but the SMCs that we are forwarding are legacy calls
343 * from before SMCCC 1.2 so won't have more than 4 return values anyway.
344 */
345 ret.arg7 = arg7;
346
347 plat_smc_post_forward(*args, &ret);
348
349 *args = ret;
350 }
351
352 /**
353 * In the normal world, ffa_handler is always called from the virtual FF-A
354 * instance (from a VM in EL1). In the secure world, ffa_handler may be called
355 * from the virtual (a secure partition in S-EL1) or physical FF-A instance
356 * (from the normal world via EL3). The function returns true when the call is
357 * handled. The *next pointer is updated to the next vCPU to run, which might be
358 * the 'other world' vCPU if the call originated from the virtual FF-A instance
359 * and has to be forwarded down to EL3, or left as is to resume the current
360 * vCPU.
361 */
ffa_handler(struct ffa_value * args,struct vcpu * current,struct vcpu ** next)362 static bool ffa_handler(struct ffa_value *args, struct vcpu *current,
363 struct vcpu **next)
364 {
365 uint32_t func = args->func;
366
367 /*
368 * NOTE: When adding new methods to this handler update
369 * api_ffa_features accordingly.
370 */
371 switch (func) {
372 case FFA_VERSION_32:
373 *args = api_ffa_version(current, args->arg1);
374 return true;
375 case FFA_PARTITION_INFO_GET_32: {
376 struct ffa_uuid uuid;
377
378 ffa_uuid_init(args->arg1, args->arg2, args->arg3, args->arg4,
379 &uuid);
380 *args = api_ffa_partition_info_get(current, &uuid, args->arg5);
381 return true;
382 }
383 case FFA_PARTITION_INFO_GET_REGS_64: {
384 struct ffa_uuid uuid;
385 uint16_t start_index;
386 uint16_t tag;
387
388 ffa_uuid_from_u64x2(args->arg1, args->arg2, &uuid);
389 start_index = args->arg3 & 0xFFFF;
390 tag = (args->arg3 >> 16) & 0xFFFF;
391 *args = api_ffa_partition_info_get_regs(current, &uuid,
392 start_index, tag);
393 return true;
394 }
395 case FFA_ID_GET_32:
396 *args = api_ffa_id_get(current);
397 return true;
398 case FFA_SPM_ID_GET_32:
399 *args = api_ffa_spm_id_get();
400 return true;
401 case FFA_FEATURES_32:
402 *args = api_ffa_features(args->arg1, args->arg2, current);
403 return true;
404 case FFA_RX_RELEASE_32:
405 *args = api_ffa_rx_release(ffa_receiver(*args), current);
406 return true;
407 case FFA_RXTX_MAP_64:
408 *args = api_ffa_rxtx_map(ipa_init(args->arg1),
409 ipa_init(args->arg2), args->arg3,
410 current);
411 return true;
412 case FFA_RXTX_UNMAP_32:
413 *args = api_ffa_rxtx_unmap(ffa_vm_id(*args), current);
414 return true;
415 case FFA_RX_ACQUIRE_32:
416 *args = api_ffa_rx_acquire(ffa_receiver(*args), current);
417 return true;
418 case FFA_YIELD_32:
419 *args = api_yield(current, next, args);
420 return true;
421 case FFA_MSG_SEND_32:
422 *args = ffa_indirect_msg_send(
423 ffa_sender(*args), ffa_receiver(*args),
424 ffa_msg_send_size(*args), current, next);
425 return true;
426 case FFA_MSG_SEND2_32:
427 *args = api_ffa_msg_send2(ffa_sender(*args),
428 ffa_msg_send2_flags(*args), current);
429 return true;
430 case FFA_MSG_WAIT_32:
431 *args = api_ffa_msg_wait(current, next, args);
432 return true;
433 #if SECURE_WORLD == 0
434 case FFA_MSG_POLL_32: {
435 struct vcpu_locked current_locked;
436
437 current_locked = vcpu_lock(current);
438 *args = ffa_indirect_msg_recv(false, current_locked, next);
439 vcpu_unlock(¤t_locked);
440 return true;
441 }
442 #endif
443 case FFA_RUN_32:
444 /**
445 * Ensure that an FF-A v1.2 endpoint preserves the
446 * runtime state of the calling partition by setting
447 * the extended registers (x8-x17) to zero.
448 */
449 if (current->vm->ffa_version >= FFA_VERSION_1_2 &&
450 !api_extended_args_are_zero(args)) {
451 *args = ffa_error(FFA_INVALID_PARAMETERS);
452 return false;
453 }
454 *args = api_ffa_run(ffa_vm_id(*args), ffa_vcpu_index(*args),
455 current, next);
456 return true;
457 case FFA_MEM_DONATE_32:
458 case FFA_MEM_DONATE_64:
459 case FFA_MEM_LEND_32:
460 case FFA_MEM_LEND_64:
461 case FFA_MEM_SHARE_32:
462 case FFA_MEM_SHARE_64:
463 *args = api_ffa_mem_send(func, args->arg1, args->arg2,
464 ipa_init(args->arg3), args->arg4,
465 current);
466 return true;
467 case FFA_MEM_RETRIEVE_REQ_64:
468 case FFA_MEM_RETRIEVE_REQ_32:
469 *args = api_ffa_mem_retrieve_req(args->arg1, args->arg2,
470 ipa_init(args->arg3),
471 args->arg4, current);
472 return true;
473 case FFA_MEM_RELINQUISH_32:
474 *args = api_ffa_mem_relinquish(current);
475 return true;
476 case FFA_MEM_RECLAIM_32:
477 *args = api_ffa_mem_reclaim(
478 ffa_assemble_handle(args->arg1, args->arg2), args->arg3,
479 current);
480 return true;
481 case FFA_MEM_FRAG_RX_32:
482 *args = api_ffa_mem_frag_rx(ffa_frag_handle(*args), args->arg3,
483 (args->arg4 >> 16) & 0xffff,
484 current);
485 return true;
486 case FFA_MEM_FRAG_TX_32:
487 *args = api_ffa_mem_frag_tx(ffa_frag_handle(*args), args->arg3,
488 (args->arg4 >> 16) & 0xffff,
489 current);
490 return true;
491 case FFA_MSG_SEND_DIRECT_REQ_64:
492 case FFA_MSG_SEND_DIRECT_REQ_32:
493 case FFA_MSG_SEND_DIRECT_REQ2_64:
494 *args = api_ffa_msg_send_direct_req(*args, current, next);
495 return true;
496 case FFA_MSG_SEND_DIRECT_RESP_64:
497 case FFA_MSG_SEND_DIRECT_RESP_32:
498 case FFA_MSG_SEND_DIRECT_RESP2_64:
499 *args = api_ffa_msg_send_direct_resp(*args, current, next);
500 return true;
501 case FFA_SECONDARY_EP_REGISTER_64:
502 /*
503 * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1.1
504 * The callee must return NOT_SUPPORTED if this function is
505 * invoked by a caller that implements version v1.0 of
506 * the Framework.
507 */
508 *args = api_ffa_secondary_ep_register(ipa_init(args->arg1),
509 current);
510 return true;
511 case FFA_NOTIFICATION_BITMAP_CREATE_32:
512 *args = api_ffa_notification_bitmap_create(
513 (ffa_id_t)args->arg1, (ffa_vcpu_count_t)args->arg2,
514 current);
515 return true;
516 case FFA_NOTIFICATION_BITMAP_DESTROY_32:
517 *args = api_ffa_notification_bitmap_destroy(
518 (ffa_id_t)args->arg1, current);
519 return true;
520 case FFA_NOTIFICATION_BIND_32:
521 *args = api_ffa_notification_update_bindings(
522 ffa_sender(*args), ffa_receiver(*args), args->arg2,
523 ffa_notifications_bitmap(args->arg3, args->arg4), true,
524 current);
525 return true;
526 case FFA_NOTIFICATION_UNBIND_32:
527 *args = api_ffa_notification_update_bindings(
528 ffa_sender(*args), ffa_receiver(*args), 0,
529 ffa_notifications_bitmap(args->arg3, args->arg4), false,
530 current);
531 return true;
532 case FFA_MEM_PERM_SET_32:
533 case FFA_MEM_PERM_SET_64:
534 *args = api_ffa_mem_perm_set(va_init(args->arg1), args->arg2,
535 args->arg3, current);
536 return true;
537 case FFA_MEM_PERM_GET_32:
538 case FFA_MEM_PERM_GET_64:
539 *args = api_ffa_mem_perm_get(va_init(args->arg1), args->arg2,
540 current);
541 return true;
542 case FFA_NOTIFICATION_SET_32:
543 *args = api_ffa_notification_set(
544 ffa_sender(*args), ffa_receiver(*args), args->arg2,
545 ffa_notifications_bitmap(args->arg3, args->arg4),
546 current);
547 return true;
548 case FFA_NOTIFICATION_GET_32:
549 *args = api_ffa_notification_get(
550 ffa_receiver(*args), ffa_notifications_get_vcpu(*args),
551 args->arg2, current);
552 return true;
553 case FFA_NOTIFICATION_INFO_GET_64:
554 *args = api_ffa_notification_info_get(current);
555 return true;
556 case FFA_INTERRUPT_32:
557 /*
558 * A malicious SP could invoke a HVC/SMC call with
559 * FFA_INTERRUPT_32 as the function argument. Return error to
560 * avoid DoS.
561 */
562 if (current->vm->id != HF_OTHER_WORLD_ID) {
563 *args = ffa_error(FFA_DENIED);
564 return true;
565 }
566
567 ffa_interrupts_handle_secure_interrupt(current, next);
568
569 /*
570 * If the next vCPU belongs to an SP, the next time the NWd
571 * gets resumed these values will be overwritten by the ABI
572 * that used to handover execution back to the NWd.
573 * If the NWd is to be resumed from here, then it will
574 * receive the FFA_NORMAL_WORLD_RESUME ABI which is to signal
575 * that an interrupt has occured, thought it wasn't handled.
576 * This happens when the target vCPU was in preempted state,
577 * and the SP couldn't not be resumed to handle the interrupt.
578 */
579 *args = (struct ffa_value){.func = FFA_NORMAL_WORLD_RESUME};
580 return true;
581 case FFA_CONSOLE_LOG_32:
582 case FFA_CONSOLE_LOG_64:
583 *args = api_ffa_console_log(*args, current);
584 return true;
585 case FFA_ERROR_32:
586 *args = ffa_cpu_cycles_error_32(current, next, args->arg2);
587 return true;
588
589 default:
590 return false;
591 }
592 }
593
594 /**
595 * Set or clear VI/VF bits according to pending interrupts.
596 * If `vcpu` is NULL, the function will set it to the currently running
597 * vCPU.
598 */
vcpu_update_virtual_interrupts(struct vcpu * vcpu)599 static void vcpu_update_virtual_interrupts(struct vcpu *vcpu)
600 {
601 struct vcpu_locked vcpu_locked;
602
603 if (vcpu == NULL) {
604 vcpu = current();
605 }
606
607 /* Only update to those at the virtual instance. */
608 if (vcpu->vm->el0_partition || !vm_id_is_current_world(vcpu->vm->id)) {
609 return;
610 }
611
612 vcpu_locked = vcpu_lock(vcpu);
613 set_virtual_irq(&vcpu->regs,
614 vcpu_virt_interrupt_irq_count_get(vcpu_locked) > 0);
615 set_virtual_fiq(&vcpu->regs,
616 vcpu_virt_interrupt_fiq_count_get(vcpu_locked) > 0);
617 vcpu_unlock(&vcpu_locked);
618 }
619
620 /**
621 * Handles PSCI and FF-A calls and writes the return value back to the registers
622 * of the vCPU. This is shared between smc_handler and hvc_handler.
623 *
624 * Returns true if the call was handled.
625 */
hvc_smc_handler(struct ffa_value args,struct vcpu * vcpu,struct vcpu ** next)626 static bool hvc_smc_handler(struct ffa_value args, struct vcpu *vcpu,
627 struct vcpu **next)
628 {
629 const uint32_t func = args.func;
630
631 /* Do not expect PSCI calls emitted from within the secure world. */
632 #if SECURE_WORLD == 0
633 if (psci_handler(vcpu, func, args.arg1, args.arg2, args.arg3,
634 &vcpu->regs.r[0], next)) {
635 return true;
636 }
637 #endif
638
639 if (ffa_handler(&args, vcpu, next)) {
640 #if SECURE_WORLD == 1
641 /*
642 * If giving back execution to the NWd, check if the Schedule
643 * Receiver Interrupt has been delayed, and trigger it on
644 * current core if so.
645 */
646 if ((*next != NULL && (*next)->vm->id == HF_OTHER_WORLD_ID) ||
647 (*next == NULL && vcpu->vm->id == HF_OTHER_WORLD_ID)) {
648 ffa_notifications_sri_trigger_if_delayed(vcpu->cpu);
649 }
650 #endif
651 if (func != FFA_VERSION_32) {
652 struct vm_locked vm_locked = vm_lock(vcpu->vm);
653
654 vm_locked.vm->ffa_version_negotiated = true;
655 vm_unlock(&vm_locked);
656 }
657
658 arch_regs_set_retval(&vcpu->regs, args);
659
660 /*
661 * In case there has been an update after handling the last
662 * ff-a call, update the next vCPU directly in the
663 * register.
664 */
665 vcpu_update_virtual_interrupts(*next);
666 return true;
667 }
668
669 return false;
670 }
671
672 /**
673 * Processes SMC instruction calls.
674 */
smc_handler(struct vcpu * vcpu)675 static struct vcpu *smc_handler(struct vcpu *vcpu)
676 {
677 struct ffa_value args = arch_regs_get_args(&vcpu->regs);
678 struct vcpu *next = NULL;
679
680 /* Mask out SMCCC SVE hint bit from function id. */
681 args.func &= ~SMCCC_SVE_HINT_MASK;
682
683 if (hvc_smc_handler(args, vcpu, &next)) {
684 return next;
685 }
686
687 smc_forwarder(vcpu->vm, &args);
688 arch_regs_set_retval(&vcpu->regs, args);
689 return NULL;
690 }
691
692 #if SECURE_WORLD == 1
693
694 /**
695 * Called from other_world_loop return from SMC.
696 * Processes SMC calls originating from the NWd.
697 */
smc_handler_from_nwd(struct vcpu * vcpu)698 struct vcpu *smc_handler_from_nwd(struct vcpu *vcpu)
699 {
700 struct ffa_value args = arch_regs_get_args(&vcpu->regs);
701 struct vcpu *next = NULL;
702
703 plat_save_ns_simd_context(vcpu);
704
705 /* Mask out SMCCC SVE hint bit from function id. */
706 args.func &= ~SMCCC_SVE_HINT_MASK;
707
708 if (hvc_smc_handler(args, vcpu, &next)) {
709 return next;
710 }
711
712 /*
713 * If the SMC emitted by the normal world is not handled in the secure
714 * world then return an error stating such ABI is not supported. Only
715 * FF-A calls are supported. We cannot return SMCCC_ERROR_UNKNOWN
716 * directly because the SPMD smc handler would not recognize it as a
717 * standard FF-A call returning from the SPMC.
718 */
719 arch_regs_set_retval(&vcpu->regs, ffa_error(FFA_NOT_SUPPORTED));
720
721 return NULL;
722 }
723
724 #endif
725
726 /*
727 * Exception vector offsets.
728 * See Arm Architecture Reference Manual Armv8-A, D1.10.2.
729 */
730
731 /**
732 * Offset for synchronous exceptions at current EL with SPx.
733 */
734 #define OFFSET_CURRENT_SPX UINT64_C(0x200)
735
736 /**
737 * Offset for synchronous exceptions at lower EL using AArch64.
738 */
739 #define OFFSET_LOWER_EL_64 UINT64_C(0x400)
740
741 /**
742 * Offset for synchronous exceptions at lower EL using AArch32.
743 */
744 #define OFFSET_LOWER_EL_32 UINT64_C(0x600)
745
746 /**
747 * Returns the address for the exception handler at EL1.
748 */
get_el1_exception_handler_addr(const struct vcpu * vcpu)749 static uintreg_t get_el1_exception_handler_addr(const struct vcpu *vcpu)
750 {
751 uintreg_t base_addr = has_vhe_support() ? read_msr(MSR_VBAR_EL12)
752 : read_msr(vbar_el1);
753 uintreg_t pe_mode = vcpu->regs.spsr & PSR_PE_MODE_MASK;
754 bool is_arch32 = vcpu->regs.spsr & PSR_ARCH_MODE_32;
755
756 if (pe_mode == PSR_PE_MODE_EL0T) {
757 if (is_arch32) {
758 base_addr += OFFSET_LOWER_EL_32;
759 } else {
760 base_addr += OFFSET_LOWER_EL_64;
761 }
762 } else {
763 CHECK(!is_arch32);
764 base_addr += OFFSET_CURRENT_SPX;
765 }
766
767 return base_addr;
768 }
769
770 /**
771 * Injects an exception with the specified Exception Syndrom Register value into
772 * the EL1.
773 *
774 * NOTE: This function assumes that the lazy registers haven't been saved, and
775 * writes to the lazy registers of the CPU directly instead of the vCPU.
776 */
inject_el1_exception(struct vcpu * vcpu,uintreg_t esr_el1_value,uintreg_t far_el1_value)777 static void inject_el1_exception(struct vcpu *vcpu, uintreg_t esr_el1_value,
778 uintreg_t far_el1_value)
779 {
780 uintreg_t handler_address = get_el1_exception_handler_addr(vcpu);
781
782 /* Update the CPU state to inject the exception. */
783 if (has_vhe_support()) {
784 write_msr(MSR_ESR_EL12, esr_el1_value);
785 write_msr(MSR_FAR_EL12, far_el1_value);
786 write_msr(MSR_ELR_EL12, vcpu->regs.pc);
787 write_msr(MSR_SPSR_EL12, vcpu->regs.spsr);
788 } else {
789 write_msr(esr_el1, esr_el1_value);
790 write_msr(far_el1, far_el1_value);
791 write_msr(elr_el1, vcpu->regs.pc);
792 write_msr(spsr_el1, vcpu->regs.spsr);
793 }
794
795 /*
796 * Mask (disable) interrupts and run in EL1h mode.
797 * EL1h mode is used because by default, taking an exception selects the
798 * stack pointer for the target Exception level. The software can change
799 * that later in the handler if needed.
800 */
801 vcpu->regs.spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H;
802
803 /* Transfer control to the exception hander. */
804 vcpu->regs.pc = handler_address;
805 }
806
807 /**
808 * Injects a Data Abort exception (same exception level).
809 */
inject_el1_data_abort_exception(struct vcpu * vcpu,uintreg_t esr_el2,uintreg_t far_el2)810 static void inject_el1_data_abort_exception(struct vcpu *vcpu,
811 uintreg_t esr_el2,
812 uintreg_t far_el2)
813 {
814 /*
815 * ISS encoding remains the same, but the EC is changed to reflect
816 * where the exception came from.
817 * See Arm Architecture Reference Manual Armv8-A, pages D13-2943/2982.
818 */
819 uintreg_t esr_el1_value = GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) |
820 (EC_DATA_ABORT_SAME_EL << ESR_EC_OFFSET);
821
822 dlog_notice("Injecting Data Abort exception into VM %#x.\n",
823 vcpu->vm->id);
824
825 inject_el1_exception(vcpu, esr_el1_value, far_el2);
826 }
827
828 /**
829 * Injects a Data Abort exception (same exception level).
830 */
inject_el1_instruction_abort_exception(struct vcpu * vcpu,uintreg_t esr_el2,uintreg_t far_el2)831 static void inject_el1_instruction_abort_exception(struct vcpu *vcpu,
832 uintreg_t esr_el2,
833 uintreg_t far_el2)
834 {
835 /*
836 * ISS encoding remains the same, but the EC is changed to reflect
837 * where the exception came from.
838 * See Arm Architecture Reference Manual Armv8-A, pages D13-2941/2980.
839 */
840 uintreg_t esr_el1_value =
841 GET_ESR_ISS(esr_el2) | GET_ESR_IL(esr_el2) |
842 (EC_INSTRUCTION_ABORT_SAME_EL << ESR_EC_OFFSET);
843
844 dlog_notice("Injecting Instruction Abort exception into VM %#x.\n",
845 vcpu->vm->id);
846
847 inject_el1_exception(vcpu, esr_el1_value, far_el2);
848 }
849
850 /**
851 * Injects an exception with an unknown reason into the EL1.
852 */
inject_el1_unknown_exception(struct vcpu * vcpu,uintreg_t esr_el2)853 static void inject_el1_unknown_exception(struct vcpu *vcpu, uintreg_t esr_el2)
854 {
855 uintreg_t esr_el1_value =
856 GET_ESR_IL(esr_el2) | (EC_UNKNOWN << ESR_EC_OFFSET);
857
858 dlog_notice("Injecting Unknown Reason exception into VM %#x.\n",
859 vcpu->vm->id);
860
861 /*
862 * The value of the far_el2 register is UNKNOWN in this case,
863 * therefore, don't propagate it to avoid leaking sensitive information.
864 */
865 inject_el1_exception(vcpu, esr_el1_value, 0);
866 }
867
868 /**
869 * Injects an exception because of a system register trap.
870 */
inject_el1_sysreg_trap_exception(struct vcpu * vcpu,uintreg_t esr_el2)871 static void inject_el1_sysreg_trap_exception(struct vcpu *vcpu,
872 uintreg_t esr_el2)
873 {
874 char *direction_str = ISS_IS_READ(esr_el2) ? "read" : "write";
875
876 dlog_notice(
877 "Trapped access to system register %s: op0=%lu, op1=%lu, "
878 "crn=%lu, "
879 "crm=%lu, op2=%lu, rt=%lu.\n",
880 direction_str, GET_ISS_OP0(esr_el2), GET_ISS_OP1(esr_el2),
881 GET_ISS_CRN(esr_el2), GET_ISS_CRM(esr_el2),
882 GET_ISS_OP2(esr_el2), GET_ISS_RT(esr_el2));
883
884 inject_el1_unknown_exception(vcpu, esr_el2);
885 }
886
hvc_handler(struct vcpu * vcpu)887 static struct vcpu *hvc_handler(struct vcpu *vcpu)
888 {
889 struct ffa_value args = arch_regs_get_args(&vcpu->regs);
890 struct vcpu *next = NULL;
891
892 /* Mask out SMCCC SVE hint bit from function id. */
893 args.func &= ~SMCCC_SVE_HINT_MASK;
894
895 if (hvc_smc_handler(args, vcpu, &next)) {
896 return next;
897 }
898
899 switch (args.func) {
900 #if SECURE_WORLD == 1
901 case HF_INTERRUPT_DEACTIVATE:
902 vcpu->regs.r[0] =
903 ffa_interrupts_deactivate(args.arg1, args.arg2, vcpu);
904 break;
905
906 case HF_INTERRUPT_RECONFIGURE:
907 vcpu->regs.r[0] = ffa_interrupts_reconfigure(
908 args.arg1, args.arg2, args.arg3, vcpu);
909 break;
910
911 case HF_INTERRUPT_SEND_IPI:
912 vcpu->regs.r[0] = api_hf_interrupt_send_ipi(args.arg1, vcpu);
913 break;
914 #endif
915 case HF_INTERRUPT_ENABLE:
916 vcpu->regs.r[0] = api_interrupt_enable(args.arg1, args.arg2,
917 args.arg3, vcpu);
918 break;
919
920 case HF_INTERRUPT_GET: {
921 struct vcpu_locked current_locked;
922
923 current_locked = vcpu_lock(vcpu);
924 vcpu->regs.r[0] = api_interrupt_get(current_locked);
925 vcpu_unlock(¤t_locked);
926 break;
927 }
928 default:
929 vcpu->regs.r[0] = SMCCC_ERROR_UNKNOWN;
930 dlog_verbose("Unsupported function %#lx\n", args.func);
931 }
932
933 /*
934 * In case there has been an update after handling the last
935 * hypervisor call, update the next vCPU directly in the register.
936 */
937 vcpu_update_virtual_interrupts(next);
938
939 return next;
940 }
941
irq_lower(void)942 struct vcpu *irq_lower(void)
943 {
944 #if SECURE_WORLD == 1
945 struct vcpu *next = NULL;
946
947 ffa_interrupts_handle_secure_interrupt(current(), &next);
948
949 /*
950 * Since we are in interrupt context, set the bit for the
951 * next vCPU directly in the register.
952 */
953 vcpu_update_virtual_interrupts(next);
954
955 return next;
956 #else
957 /*
958 * Switch back to primary VM, interrupts will be handled there.
959 *
960 * If the VM has aborted, this vCPU will be aborted when the scheduler
961 * tries to run it again. This means the interrupt will not be delayed
962 * by the aborted VM.
963 *
964 * TODO: Only switch when the interrupt isn't for the current VM.
965 */
966 return api_preempt(current());
967 #endif
968 }
969
970 #if SECURE_WORLD == 1
spmd_group0_intr_delegate(void)971 static void spmd_group0_intr_delegate(void)
972 {
973 struct ffa_value ret;
974
975 dlog_verbose("Delegating Group0 interrupt to SPMD\n");
976
977 ret = smc_ffa_call((struct ffa_value){.func = FFA_EL3_INTR_HANDLE_32});
978
979 /* Check if the Group0 interrupt was handled successfully. */
980 CHECK(ret.func == FFA_SUCCESS_32);
981 }
982 #endif
983
fiq_lower(void)984 struct vcpu *fiq_lower(void)
985 {
986 #if SECURE_WORLD == 1
987 struct vcpu_locked current_locked;
988 struct vcpu *current_vcpu = current();
989 uint32_t intid;
990
991 intid = get_highest_pending_g0_interrupt_id();
992
993 /* Check for the highest priority pending Group0 interrupt. */
994 if (intid != SPURIOUS_INTID_OTHER_WORLD) {
995 /* Delegate handling of Group0 interrupt to EL3 firmware. */
996 spmd_group0_intr_delegate();
997
998 /* Resume current vCPU. */
999 return NULL;
1000 }
1001
1002 /*
1003 * A special interrupt indicating there is no pending interrupt
1004 * with sufficient priority for current security state. This
1005 * means a non-secure interrupt is pending.
1006 */
1007 assert(current_vcpu->vm->ns_interrupts_action != NS_ACTION_QUEUED);
1008
1009 if (ffa_vm_managed_exit_supported(current_vcpu->vm)) {
1010 uint8_t pmr = plat_interrupts_get_priority_mask();
1011
1012 /*
1013 * Mask non-secure interrupt from triggering again till the
1014 * vCPU completes the managed exit sequenece.
1015 */
1016 plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
1017
1018 current_locked = vcpu_lock(current_vcpu);
1019 current_vcpu->prev_interrupt_priority = pmr;
1020 vcpu_virt_interrupt_inject(current_locked,
1021 HF_MANAGED_EXIT_INTID);
1022
1023 /* Entering managed exit sequence. */
1024 current_vcpu->processing_managed_exit = true;
1025
1026 vcpu_unlock(¤t_locked);
1027
1028 /*
1029 * Since we are in interrupt context, set the bit for the
1030 * current vCPU directly in the register.
1031 */
1032 vcpu_update_virtual_interrupts(NULL);
1033
1034 /* Resume current vCPU. */
1035 return NULL;
1036 }
1037
1038 /*
1039 * Unwind Normal World Scheduled Call chain in response to NS
1040 * Interrupt.
1041 */
1042 return ffa_interrupts_unwind_nwd_call_chain(current_vcpu);
1043 #else
1044 return irq_lower();
1045 #endif
1046 }
1047
serr_lower(void)1048 [[noreturn]] struct vcpu *serr_lower(void)
1049 {
1050 /*
1051 * SError exceptions should be isolated and handled by the responsible
1052 * VM/exception level. Getting here indicates a bug, that isolation is
1053 * not working, or a processor that does not support ARMv8.2-IESB, in
1054 * which case Hafnium routes SError exceptions to EL2 (here).
1055 */
1056 panic("SError from a lower exception level.");
1057 }
1058
1059 /**
1060 * Initialises a fault info structure. It assumes that an FnV bit exists at
1061 * bit offset 10 of the ESR, and that it is only valid when the bottom 6 bits of
1062 * the ESR (the fault status code) are 010000; this is the case for both
1063 * instruction and data aborts, but not necessarily for other exception reasons.
1064 */
fault_info_init(uintreg_t esr,const struct vcpu * vcpu,mm_mode_t mode)1065 static struct vcpu_fault_info fault_info_init(uintreg_t esr,
1066 const struct vcpu *vcpu,
1067 mm_mode_t mode)
1068 {
1069 uint32_t fsc = esr & 0x3f;
1070 struct vcpu_fault_info r;
1071 uint64_t hpfar_el2_val;
1072 uint64_t hpfar_el2_fipa;
1073
1074 r.mode = mode;
1075 r.pc = va_init(vcpu->regs.pc);
1076
1077 /* Get Hypervisor IPA Fault Address value. */
1078 hpfar_el2_val = read_msr(hpfar_el2);
1079
1080 /* Extract Faulting IPA. */
1081 hpfar_el2_fipa = (hpfar_el2_val & HPFAR_EL2_FIPA) << 8;
1082
1083 #if SECURE_WORLD == 1
1084
1085 /**
1086 * Determine if faulting IPA targets NS space.
1087 * At NS-EL2 hpfar_el2 bit 63 is RES0. At S-EL2, this bit determines if
1088 * the faulting Stage-1 address output is a secure or non-secure IPA.
1089 */
1090 if ((hpfar_el2_val & HPFAR_EL2_NS) != 0) {
1091 r.mode |= MM_MODE_NS;
1092 }
1093
1094 #endif
1095
1096 /*
1097 * Check the FnV bit, which is only valid if dfsc/ifsc is 010000. It
1098 * indicates that we cannot rely on far_el2.
1099 */
1100 if (fsc == 0x10 && GET_ESR_FNV(esr)) {
1101 r.vaddr = va_init(0);
1102 r.ipaddr = ipa_init(hpfar_el2_fipa);
1103 } else {
1104 r.vaddr = va_init(read_msr(far_el2));
1105 r.ipaddr = ipa_init(hpfar_el2_fipa |
1106 (read_msr(far_el2) & (PAGE_SIZE - 1)));
1107 }
1108
1109 return r;
1110 }
1111
sync_lower_exception(uintreg_t esr,uintreg_t far)1112 struct vcpu *sync_lower_exception(uintreg_t esr, uintreg_t far)
1113 {
1114 struct vcpu *vcpu = current();
1115 struct vcpu_fault_info info;
1116 struct vcpu *new_vcpu = NULL;
1117 uintreg_t ec = GET_ESR_EC(esr);
1118 bool is_el0_partition = vcpu->vm->el0_partition;
1119 bool resume = false;
1120
1121 switch (ec) {
1122 case EC_WFI_WFE:
1123 /* Skip the instruction. */
1124 vcpu->regs.pc += GET_NEXT_PC_INC(esr);
1125
1126 /*
1127 * For EL0 partitions, treat both WFI and WFE the same way so
1128 * that FFA_RUN can be called on the partition to resume it. If
1129 * we treat WFI using api_wait_for_interrupt, the VCPU will be
1130 * in blocked waiting for interrupt but we cannot inject
1131 * interrupts into EL0 partitions.
1132 */
1133 if (is_el0_partition) {
1134 api_yield(vcpu, &new_vcpu, NULL);
1135 return new_vcpu;
1136 }
1137
1138 /* Check TI bit of ISS, 0 = WFI, 1 = WFE. */
1139 if (esr & 1) {
1140 /* WFE */
1141 /*
1142 * TODO: consider giving the scheduler more context,
1143 * somehow.
1144 */
1145 api_yield(vcpu, &new_vcpu, NULL);
1146 return new_vcpu;
1147 }
1148 /* WFI */
1149 return api_wait_for_interrupt(vcpu);
1150
1151 case EC_DATA_ABORT_LOWER_EL:
1152 info = fault_info_init(
1153 esr, vcpu, (esr & (1U << 6)) ? MM_MODE_W : MM_MODE_R);
1154
1155 resume = vcpu_handle_page_fault(vcpu, &info);
1156 if (is_el0_partition) {
1157 dlog_warning("Data abort on EL0 partition\n");
1158 /*
1159 * Abort EL0 context if we should not resume the
1160 * context, or it is an alignment fault.
1161 * vcpu_handle_page_fault() only checks the mode of the
1162 * page in an architecture agnostic way but alignment
1163 * faults on aarch64 can happen on a correctly mapped
1164 * page.
1165 */
1166 if (!resume || ((esr & 0x3f) == 0x21)) {
1167 return api_abort(vcpu);
1168 }
1169 }
1170
1171 if (resume) {
1172 return NULL;
1173 }
1174
1175 /* Inform the EL1 of the data abort. */
1176 inject_el1_data_abort_exception(vcpu, esr, far);
1177
1178 /* Schedule the same VM to continue running. */
1179 return NULL;
1180
1181 case EC_INSTRUCTION_ABORT_LOWER_EL:
1182 info = fault_info_init(esr, vcpu, MM_MODE_X);
1183
1184 if (vcpu_handle_page_fault(vcpu, &info)) {
1185 return NULL;
1186 }
1187
1188 if (is_el0_partition) {
1189 dlog_warning("Instruction abort on EL0 partition\n");
1190 return api_abort(vcpu);
1191 }
1192
1193 /* Inform the EL1 of the instruction abort. */
1194 inject_el1_instruction_abort_exception(vcpu, esr, far);
1195
1196 /* Schedule the same VM to continue running. */
1197 return NULL;
1198 case EC_SVC:
1199 CHECK(is_el0_partition);
1200 return hvc_handler(vcpu);
1201 case EC_HVC:
1202 if (is_el0_partition) {
1203 dlog_warning("Unexpected HVC Trap on EL0 partition\n");
1204 return api_abort(vcpu);
1205 }
1206 return hvc_handler(vcpu);
1207
1208 case EC_SMC: {
1209 uintreg_t smc_pc = vcpu->regs.pc;
1210 struct vcpu *next = smc_handler(vcpu);
1211
1212 /* Skip the SMC instruction. */
1213 vcpu->regs.pc = smc_pc + GET_NEXT_PC_INC(esr);
1214
1215 return next;
1216 }
1217
1218 case EC_MSR:
1219 /*
1220 * NOTE: This should never be reached because it goes through a
1221 * separate path handled by handle_system_register_access().
1222 */
1223 panic("Handled by handle_system_register_access().");
1224
1225 default:
1226 dlog_notice(
1227 "Unknown lower sync exception pc=%#lx, esr=%#lx, "
1228 "ec=%#lx\n",
1229 vcpu->regs.pc, esr, ec);
1230 break;
1231 }
1232
1233 if (is_el0_partition) {
1234 return api_abort(vcpu);
1235 }
1236
1237 /*
1238 * The exception wasn't handled. Inject to the VM to give it chance to
1239 * handle as an unknown exception.
1240 */
1241 inject_el1_unknown_exception(vcpu, esr);
1242
1243 /* Schedule the same VM to continue running. */
1244 return NULL;
1245 }
1246
1247 /**
1248 * Handles EC = 011000, MSR, MRS instruction traps.
1249 * Returns non-null ONLY if the access failed and the vCPU is changing.
1250 */
handle_system_register_access(uintreg_t esr_el2)1251 struct vcpu *handle_system_register_access(uintreg_t esr_el2)
1252 {
1253 struct vcpu *vcpu = current();
1254 ffa_id_t vm_id = vcpu->vm->id;
1255 uintreg_t ec = GET_ESR_EC(esr_el2);
1256 bool is_el0_partition = vcpu->vm->el0_partition;
1257
1258 CHECK(ec == EC_MSR);
1259 /*
1260 * Handle accesses to debug and performance monitor registers.
1261 * Inject an exception for unhandled/unsupported registers.
1262 */
1263 if (debug_el1_is_register_access(esr_el2)) {
1264 if (!debug_el1_process_access(vcpu, vm_id, esr_el2)) {
1265 inject_el1_sysreg_trap_exception(vcpu, esr_el2);
1266 return NULL;
1267 }
1268 } else if (perfmon_is_register_access(esr_el2)) {
1269 if (!perfmon_process_access(vcpu, vm_id, esr_el2)) {
1270 inject_el1_sysreg_trap_exception(vcpu, esr_el2);
1271 return NULL;
1272 }
1273 } else if (feature_id_is_register_access(esr_el2)) {
1274 if (!feature_id_process_access(vcpu, esr_el2)) {
1275 inject_el1_sysreg_trap_exception(vcpu, esr_el2);
1276 return NULL;
1277 }
1278 } else if (el1_physical_timer_is_register_access(esr_el2)) {
1279 if (!el1_physical_timer_process_access(vcpu, esr_el2)) {
1280 inject_el1_sysreg_trap_exception(vcpu, esr_el2);
1281 return NULL;
1282 }
1283 } else {
1284 if (is_el0_partition) {
1285 dlog_warning(
1286 "Unexpected system register access by EL0 "
1287 "partition\n");
1288 return api_abort(vcpu);
1289 }
1290
1291 inject_el1_sysreg_trap_exception(vcpu, esr_el2);
1292 return NULL;
1293 }
1294
1295 /* Instruction was fulfilled. Skip it and run the next one. */
1296 vcpu->regs.pc += GET_NEXT_PC_INC(esr_el2);
1297 return NULL;
1298 }
1299