1 /*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/plat/interrupts.h"
10
11 #include "hf/arch/gicv3.h"
12 #include "hf/arch/host_timer.h"
13
14 #include "hf/api.h"
15 #include "hf/check.h"
16 #include "hf/ffa/direct_messaging.h"
17 #include "hf/ffa/notifications.h"
18 #include "hf/ffa/vm.h"
19 #include "hf/hf_ipi.h"
20 #include "hf/vm.h"
21
22 /**
23 * This function has been deprecated and it's contents moved into
24 * api_interrupt_get in order to align the bitmap and queue for tracking
25 * interupts.
26 * Returns 0 on success, or -1 otherwise.
27 */
ffa_interrupts_deactivate(uint32_t pint_id,uint32_t vint_id,struct vcpu * current)28 int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
29 struct vcpu *current)
30 {
31 (void)pint_id;
32 (void)vint_id;
33 (void)current;
34 return 0;
35 }
36
ffa_interrupts_find_target_vcpu_secure_interrupt(struct vcpu * current,uint32_t interrupt_id)37 static struct vcpu *ffa_interrupts_find_target_vcpu_secure_interrupt(
38 struct vcpu *current, uint32_t interrupt_id)
39 {
40 /*
41 * Find which VM/SP owns this interrupt. We then find the
42 * corresponding vCPU context for this CPU.
43 */
44 for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
45 struct vm *vm = vm_find_index(index);
46
47 for (uint32_t j = 0; j < VM_MANIFEST_MAX_INTERRUPTS; j++) {
48 struct interrupt_descriptor int_desc =
49 vm->interrupt_desc[j];
50
51 /*
52 * Interrupt descriptors are populated
53 * contiguously.
54 */
55 if (!int_desc.valid) {
56 break;
57 }
58 if (int_desc.interrupt_id == interrupt_id) {
59 return api_ffa_get_vm_vcpu(vm, current);
60 }
61 }
62 }
63
64 return NULL;
65 }
66
ffa_interrupts_find_target_vcpu(struct vcpu * current,uint32_t interrupt_id,uint32_t * v_intid)67 static struct vcpu *ffa_interrupts_find_target_vcpu(struct vcpu *current,
68 uint32_t interrupt_id,
69 uint32_t *v_intid)
70 {
71 struct vcpu *target_vcpu;
72
73 assert(current != NULL);
74 assert(v_intid != NULL);
75
76 *v_intid = interrupt_id;
77
78 switch (interrupt_id) {
79 case SPURIOUS_INTID_OTHER_WORLD:
80 /*
81 * Spurious interrupt ID indicating that there are no pending
82 * interrupts to acknowledge. For such scenarios, resume the
83 * current vCPU.
84 */
85 target_vcpu = NULL;
86 break;
87 case HF_IPI_INTID:
88 /*
89 * Get the next vCPU with a pending IPI. If all vCPUs
90 * have had their IPIs handled this will return NULL.
91 */
92 target_vcpu = hf_ipi_get_pending_target_vcpu(current);
93 break;
94 case ARM_SEL2_TIMER_PHYS_INT:
95 /* Disable the S-EL2 physical timer */
96 host_timer_disable();
97 target_vcpu = timer_find_target_vcpu(current);
98
99 if (target_vcpu != NULL) {
100 *v_intid = HF_VIRTUAL_TIMER_INTID;
101 }
102 /*
103 * It is possible for target_vcpu to be NULL in case of spurious
104 * timer interrupt.
105 */
106 break;
107 case ARM_EL1_VIRT_TIMER_PHYS_INT:
108 [[fallthrough]];
109 case ARM_EL1_PHYS_TIMER_PHYS_INT:
110 panic("Timer interrupt not expected to fire: %u\n",
111 interrupt_id);
112 default:
113 target_vcpu = ffa_interrupts_find_target_vcpu_secure_interrupt(
114 current, interrupt_id);
115
116 /* The target vCPU for a secure interrupt cannot be NULL. */
117 CHECK(target_vcpu != NULL);
118 }
119
120 return target_vcpu;
121 }
122
123 /*
124 * If the current vCPU is being preempted, record this in the target vCPU
125 * and set the current states to VCPU_STATE_PREEMPTED.
126 */
ffa_interrupts_set_preempted_vcpu(struct vcpu_locked target_vcpu_locked,struct vcpu_locked current_locked)127 static void ffa_interrupts_set_preempted_vcpu(
128 struct vcpu_locked target_vcpu_locked,
129 struct vcpu_locked current_locked)
130 {
131 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
132 struct vcpu *preempted_vcpu = current_locked.vcpu;
133
134 assert(target_vcpu != NULL);
135 assert(preempted_vcpu != NULL);
136
137 target_vcpu->preempted_vcpu = preempted_vcpu;
138 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
139 }
140
141 /**
142 * If the interrupts were indeed masked by SPMC before an SP's vCPU was resumed,
143 * restore the priority mask thereby allowing the interrupts to be delivered.
144 */
ffa_interrupts_unmask(struct vcpu * current)145 void ffa_interrupts_unmask(struct vcpu *current)
146 {
147 plat_interrupts_set_priority_mask(current->prev_interrupt_priority);
148 }
149
150 /**
151 * Enforce action of an SP in response to non-secure or other-secure interrupt
152 * by changing the priority mask. Effectively, physical interrupts shall not
153 * trigger which has the same effect as queueing interrupts.
154 */
ffa_interrupts_mask(struct vcpu_locked receiver_vcpu_locked)155 void ffa_interrupts_mask(struct vcpu_locked receiver_vcpu_locked)
156 {
157 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
158 uint8_t current_priority;
159
160 /* Save current value of priority mask. */
161 current_priority = plat_interrupts_get_priority_mask();
162 receiver_vcpu->prev_interrupt_priority = current_priority;
163
164 if (receiver_vcpu->vm->other_s_interrupts_action ==
165 OTHER_S_INT_ACTION_QUEUED ||
166 receiver_vcpu->scheduling_mode == SPMC_MODE) {
167 /*
168 * If secure interrupts not masked yet, mask them now. We could
169 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
170 * sends a direct request, and we are making the IMPDEF choice
171 * to mask interrupts when such a situation occurs. This keeps
172 * design simple.
173 */
174 if (current_priority > SWD_MASK_ALL_INT) {
175 plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
176 }
177 } else if (receiver_vcpu->vm->ns_interrupts_action ==
178 NS_ACTION_QUEUED) {
179 /* If non secure interrupts not masked yet, mask them now. */
180 if (current_priority > SWD_MASK_NS_INT) {
181 plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
182 }
183 }
184 }
185
186 /**
187 * Change the state of both current vCPU and the target vCPU.
188 * For S-EL0 partitions it will pop from the queue and write to the vCPU
189 * the return FFA_INTERRUPT(virtual interrupt).
190 * For S-EL1 partitions, it peeks to the queue to get the next interrupt
191 * ID, so it can be included in the return. Partition should still call
192 * `hf_interrupt_get()`.
193 *
194 * If `interrupt_return` is passed as NULL, the function will write to
195 * partition context.
196 * Otherwise, it will be used to return the ffa_value with the FFA_INTERRUPT
197 * ABI.
198 *
199 * Returns the injected virtual interrupt ID.
200 */
interrupt_resume_waiting(struct vcpu_locked current_locked,struct vcpu_locked target_vcpu_locked)201 static uint32_t interrupt_resume_waiting(struct vcpu_locked current_locked,
202 struct vcpu_locked target_vcpu_locked)
203 {
204 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
205 /*
206 * Since S-EL0 partitions will not receive the interrupt through a vIRQ
207 * signal in addition to the FFA_INTERRUPT ERET, make the interrupt no
208 * longer pending at this point. Otherwise keep it as pending for
209 * when the S-EL1 parition calls hf_interrupt_get.
210 */
211 uint32_t pending_intid =
212 target_vcpu_locked.vcpu->vm->el0_partition
213 ? vcpu_virt_interrupt_get_pending_and_enabled(
214 target_vcpu_locked)
215 : vcpu_virt_interrupt_peek_pending_and_enabled(
216 target_vcpu_locked);
217
218 /* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
219 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
220 ffa_interrupts_mask(target_vcpu_locked);
221 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked, current_locked);
222
223 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
224 /*
225 * The target vcpu could have migrated to a different
226 * physical CPU. SPMC will migrate it to current
227 * physical CPU and resume it.
228 */
229 assert(target_vcpu->vm->vcpu_count == 1);
230 target_vcpu->cpu = current_locked.vcpu->cpu;
231 }
232
233 return pending_intid;
234 }
235
236 /**
237 * Handles the secure interrupt according to the target vCPU's state.
238 * Returns the next vCPU to resume accordingly.
239 * If it returns NULL, the current vCPU shall be resumed.
240 * This might be if the target vCPU is the current vCPU, or if the
241 * target vCPU is not in a state in which it can be resumed to handle
242 * the secure interrupt.
243 */
ffa_interrupts_signal_secure_interrupt(struct vcpu_locked current_locked,struct vcpu_locked target_vcpu_locked,uint32_t v_intid)244 static struct vcpu *ffa_interrupts_signal_secure_interrupt(
245 struct vcpu_locked current_locked,
246 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
247 {
248 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
249 struct vcpu *current = current_locked.vcpu;
250 struct vcpu *next = NULL;
251
252 /*
253 * The target vcpu has migrated to a different physical
254 * CPU. Hence, it cannot be resumed on this CPU, SPMC
255 * resumes current vCPU.
256 */
257 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
258 assert(target_vcpu->vm->vcpu_count == 1);
259 }
260
261 /* Secure interrupt signaling and queuing for SP. */
262 switch (target_vcpu->state) {
263 case VCPU_STATE_WAITING:
264 if (!target_vcpu->vm->sri_policy.intr_while_waiting) {
265 uint32_t inject_int_id = interrupt_resume_waiting(
266 current_locked, target_vcpu_locked);
267 struct ffa_value int_ret =
268 api_ffa_interrupt_return(inject_int_id);
269
270 if (inject_int_id != 0) {
271 assert(v_intid == inject_int_id);
272 }
273
274 next = target_vcpu;
275
276 vcpu_set_running(target_vcpu_locked, &int_ret);
277 } else {
278 dlog_verbose(
279 "%s: SP is waiting, SRI delayed due to "
280 "interrupt. Partition %x, vcpu %x, interrupt "
281 "%x\n",
282 __func__, target_vcpu->vm->id,
283 vcpu_index(target_vcpu), v_intid);
284 ffa_notifications_sri_set_delayed(target_vcpu->cpu);
285 }
286 break;
287 case VCPU_STATE_BLOCKED:
288 if (!target_vcpu->vm->el0_partition &&
289 target_vcpu->cpu == current_locked.vcpu->cpu &&
290 ffa_direct_msg_precedes_in_call_chain(current_locked,
291 target_vcpu_locked)) {
292 struct ffa_value ret_interrupt =
293 api_ffa_interrupt_return(0);
294
295 /*
296 * If the target vCPU ran earlier in the same call
297 * chain as the current vCPU, SPMC leaves all
298 * intermediate execution contexts in blocked state and
299 * resumes the target vCPU for handling secure
300 * interrupt.
301 * Under the current design, there is only one possible
302 * scenario in which this could happen: both the
303 * preempted (i.e. current) and target vCPU are in the
304 * same NWd scheduled call chain and is described in the
305 * Scenario 1 of Table 8.4 in EAC0 spec.
306 */
307 assert(current_locked.vcpu->scheduling_mode ==
308 NWD_MODE);
309 assert(target_vcpu->scheduling_mode == NWD_MODE);
310
311 /*
312 * The execution preempted the call chain that involved
313 * the targeted and the current SPs.
314 * The targetted SP is set running, whilst the
315 * preempted SP is set PREEMPTED.
316 */
317 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
318
319 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked,
320 current_locked);
321 next = target_vcpu;
322 break;
323 }
324
325 /*
326 * `next` is NULL.
327 * Either:
328 * - EL0 paritition can't be resumed when in blocked state.
329 * - The target vCPU has migrated to a different
330 * physical CPU. Hence, it cannot be resumed on this
331 * CPU, SPMC resumes current vCPU.
332 * - The target vCPU cannot be resumed now because it is
333 * in BLOCKED state (it yielded CPU cycles using
334 * FFA_YIELD). SPMC queues the virtual interrupt and
335 * resumes the current vCPU which could belong to either
336 * a VM or a SP.
337 */
338 break;
339 case VCPU_STATE_PREEMPTED:
340 /*
341 * We do not resume a target vCPU that has been already
342 * pre-empted by an interrupt. Make the vIRQ pending for
343 * target SP(i.e., queue the interrupt) and continue to
344 * resume current vCPU. Refer to section 8.3.2.1 bullet
345 * 3 in the FF-A v1.1 EAC0 spec.
346 */
347 if (!target_vcpu->vm->el0_partition &&
348 target_vcpu->cpu == current_locked.vcpu->cpu &&
349 current->vm->id == HF_OTHER_WORLD_ID) {
350 /*
351 * The target vCPU must have been preempted by a
352 * non secure interrupt. It could not have been
353 * preempted by a secure interrupt as current
354 * SPMC implementation does not allow secure
355 * interrupt prioritization. Moreover, the
356 * target vCPU should have been in Normal World
357 * scheduled mode as SPMC scheduled mode call
358 * chain cannot be preempted by a non secure
359 * interrupt.
360 */
361 CHECK(target_vcpu->scheduling_mode == NWD_MODE);
362 }
363 break;
364 case VCPU_STATE_RUNNING:
365 /*
366 * Interrupt has been injected in the vCPU state.
367 */
368 break;
369 case VCPU_STATE_BLOCKED_INTERRUPT:
370 /* WFI is no-op for SP. Fall through. */
371 default:
372 /*
373 * vCPU of Target SP cannot be in OFF/ABORTED state if it has
374 * to handle secure interrupt.
375 */
376 panic("Secure interrupt cannot be signaled to target SP\n");
377 break;
378 }
379
380 return next;
381 }
382
383 /**
384 * Obtain the physical interrupt that triggered from the interrupt controller,
385 * and inject the corresponding virtual interrupt to the target vCPU.
386 * When PEs executing in the Normal World, and secure interrupts trigger,
387 * execution is trapped into EL3. SPMD then routes the interrupt to SPMC
388 * through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
389 */
ffa_interrupts_handle_secure_interrupt(struct vcpu * current,struct vcpu ** next)390 void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
391 struct vcpu **next)
392 {
393 struct vcpu *target_vcpu;
394 struct vcpu_locked target_vcpu_locked =
395 (struct vcpu_locked){.vcpu = NULL};
396 struct vcpu_locked current_locked;
397 uint32_t intid;
398 struct vm_locked target_vm_locked;
399 uint32_t v_intid;
400
401 /* Find pending interrupt id. This also activates the interrupt. */
402 intid = plat_interrupts_get_pending_interrupt_id();
403 v_intid = intid;
404
405 /* Get the target vCPU and get the virtual interrupt ID. */
406 target_vcpu = ffa_interrupts_find_target_vcpu(current, intid, &v_intid);
407
408 /*
409 * Spurious interrupt ID indicates there is no pending interrupt to
410 * acknowledge so we do not need to call end of interrupt.
411 */
412 if (v_intid != SPURIOUS_INTID_OTHER_WORLD) {
413 /*
414 * End the interrupt to drop the running priority. It also
415 * deactivates the physical interrupt. If not, the interrupt
416 * could trigger again after resuming current vCPU.
417 */
418 plat_interrupts_end_of_interrupt(intid);
419 }
420
421 if (target_vcpu == NULL) {
422 /* No further handling required. Resume the current vCPU. */
423 *next = NULL;
424 return;
425 }
426
427 target_vm_locked = vm_lock(target_vcpu->vm);
428
429 if (target_vcpu == current) {
430 current_locked = vcpu_lock(current);
431 target_vcpu_locked = current_locked;
432 } else {
433 struct two_vcpu_locked vcpus_locked;
434 /* Lock both vCPUs at once to avoid deadlock. */
435 vcpus_locked = vcpu_lock_both(current, target_vcpu);
436 current_locked = vcpus_locked.vcpu1;
437 target_vcpu_locked = vcpus_locked.vcpu2;
438 }
439
440 /*
441 * A race condition can occur with the execution contexts belonging to
442 * an MP SP. An interrupt targeting the execution context on present
443 * core can trigger while the execution context of this SP on a
444 * different core is being aborted. In such scenario, the physical
445 * interrupts beloning to the aborted SP are disabled and the current
446 * execution context is resumed.
447 */
448 if (target_vcpu->state == VCPU_STATE_ABORTED ||
449 atomic_load_explicit(&target_vcpu->vm->aborting,
450 memory_order_relaxed)) {
451 /* Clear fields corresponding to secure interrupt handling. */
452 vcpu_secure_interrupt_complete(target_vcpu_locked);
453 ffa_vm_disable_interrupts(target_vm_locked);
454
455 /* Resume current vCPU. */
456 *next = NULL;
457 } else {
458 /* Set the interrupt pending in the target vCPU. */
459 vcpu_virt_interrupt_inject(target_vcpu_locked, v_intid);
460
461 switch (intid) {
462 case HF_IPI_INTID:
463 if (hf_ipi_handle(target_vcpu_locked)) {
464 *next = NULL;
465 break;
466 }
467 /*
468 * Fall through in the case handling has not been fully
469 * completed.
470 */
471 [[fallthrough]];
472 default:
473 /*
474 * Either invoke the handler related to partitions from
475 * S-EL0 or from S-EL1.
476 */
477 *next = ffa_interrupts_signal_secure_interrupt(
478 current_locked, target_vcpu_locked, v_intid);
479 }
480 }
481
482 if (target_vcpu_locked.vcpu != NULL) {
483 vcpu_unlock(&target_vcpu_locked);
484 }
485
486 vcpu_unlock(¤t_locked);
487 vm_unlock(&target_vm_locked);
488 }
489
ffa_interrupts_inject_notification_pending_interrupt(struct vcpu_locked target_locked,struct vm_locked receiver_locked)490 bool ffa_interrupts_inject_notification_pending_interrupt(
491 struct vcpu_locked target_locked, struct vm_locked receiver_locked)
492 {
493 struct vm *next_vm = target_locked.vcpu->vm;
494 bool ret = false;
495
496 /*
497 * Inject the NPI if:
498 * - The targeted VM ID is from this world (i.e. if it is an SP).
499 * - The partition has global pending notifications or there are
500 * pending per-vCPU notifications in the next vCPU.
501 */
502 if (vm_id_is_current_world(next_vm->id) &&
503 (vm_are_per_vcpu_notifications_pending(
504 receiver_locked, vcpu_index(target_locked.vcpu)) ||
505 vm_are_global_notifications_pending(receiver_locked))) {
506 vcpu_virt_interrupt_inject(target_locked,
507 HF_NOTIFICATION_PENDING_INTID);
508 ret = true;
509 }
510
511 return ret;
512 }
513
ffa_interrupts_unwind_nwd_call_chain(struct vcpu * current_vcpu)514 struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
515 {
516 struct vcpu *next;
517 struct two_vcpu_locked both_vcpu_locked;
518
519 /*
520 * The action specified by SP in its manifest is ``Non-secure interrupt
521 * is signaled``. Refer to section 8.2.4 rules and guidelines bullet 4.
522 * Hence, the call chain starts unwinding. The current vCPU must have
523 * been a part of NWd scheduled call chain. Therefore, it is pre-empted
524 * and execution is either handed back to the normal world or to the
525 * previous SP vCPU in the call chain through the FFA_INTERRUPT ABI.
526 * The api_preempt() call is equivalent to calling
527 * api_switch_to_other_world for current vCPU passing FFA_INTERRUPT. The
528 * SP can be resumed later by FFA_RUN.
529 */
530 CHECK(current_vcpu->scheduling_mode == NWD_MODE);
531 assert(current_vcpu->call_chain.next_node == NULL);
532
533 if (current_vcpu->call_chain.prev_node == NULL) {
534 /* End of NWd scheduled call chain */
535 return api_preempt(current_vcpu);
536 }
537
538 next = current_vcpu->call_chain.prev_node;
539 CHECK(next != NULL);
540
541 /*
542 * Lock both vCPUs. Strictly speaking, it may not be necessary since
543 * next is guaranteed to be in BLOCKED state as it is the predecessor of
544 * the current vCPU in the present call chain.
545 */
546 both_vcpu_locked = vcpu_lock_both(current_vcpu, next);
547
548 /* Removing a node from an existing call chain. */
549 current_vcpu->call_chain.prev_node = NULL;
550 current_vcpu->state = VCPU_STATE_PREEMPTED;
551
552 /*
553 * SPMC applies the runtime model till when the vCPU transitions from
554 * running to waiting state. Moreover, the SP continues to remain in
555 * its CPU cycle allocation mode. Hence, rt_model and scheduling_mode
556 * are not changed here.
557 */
558 assert(next->state == VCPU_STATE_BLOCKED);
559 assert(next->call_chain.next_node == current_vcpu);
560
561 next->call_chain.next_node = NULL;
562
563 vcpu_set_running(both_vcpu_locked.vcpu2,
564 &(struct ffa_value){
565 .func = FFA_INTERRUPT_32,
566 .arg1 = ffa_vm_vcpu(current_vcpu->vm->id,
567 vcpu_index(current_vcpu)),
568 });
569
570 sl_unlock(&next->lock);
571 sl_unlock(¤t_vcpu->lock);
572
573 return next;
574 }
575
ffa_interrupts_enable_virtual_maintenance_interrupts(struct vcpu_locked current_locked)576 static void ffa_interrupts_enable_virtual_maintenance_interrupts(
577 struct vcpu_locked current_locked)
578 {
579 struct vcpu *current;
580 struct interrupts *interrupts;
581 struct vm *vm;
582
583 current = current_locked.vcpu;
584 interrupts = ¤t->interrupts;
585 vm = current->vm;
586
587 if (ffa_vm_managed_exit_supported(vm)) {
588 vcpu_virt_interrupt_enable(current_locked,
589 HF_MANAGED_EXIT_INTID, true);
590 /*
591 * SPMC decides the interrupt type for Managed exit signal based
592 * on the partition manifest.
593 */
594 if (vm->me_signal_virq) {
595 vcpu_virt_interrupt_set_type(interrupts,
596 HF_MANAGED_EXIT_INTID,
597 INTERRUPT_TYPE_IRQ);
598 } else {
599 vcpu_virt_interrupt_set_type(interrupts,
600 HF_MANAGED_EXIT_INTID,
601 INTERRUPT_TYPE_FIQ);
602 }
603 }
604
605 if (vm->notifications.enabled) {
606 vcpu_virt_interrupt_enable(current_locked,
607 HF_NOTIFICATION_PENDING_INTID, true);
608 }
609 }
610
611 /**
612 * Enable relevant virtual interrupts for Secure Partitions.
613 * For all SPs, any applicable virtual maintenance interrupts are enabled.
614 * Additionally, for S-EL0 partitions, all the interrupts declared in the
615 * partition manifest are enabled at the virtual interrupt controller
616 * interface early during the boot stage as an S-EL0 SP need not call
617 * HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
618 */
ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,struct vm_locked vm_locked)619 void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
620 struct vm_locked vm_locked)
621 {
622 struct vcpu *current;
623 struct vm *vm;
624
625 current = current_locked.vcpu;
626 vm = current->vm;
627 assert(vm == vm_locked.vm);
628
629 if (vm->el0_partition) {
630 for (uint32_t k = 0; k < VM_MANIFEST_MAX_INTERRUPTS; k++) {
631 struct interrupt_descriptor int_desc;
632
633 int_desc = vm_locked.vm->interrupt_desc[k];
634
635 /* Interrupt descriptors are populated contiguously. */
636 if (!int_desc.valid) {
637 break;
638 }
639 vcpu_virt_interrupt_enable(current_locked,
640 int_desc.interrupt_id, true);
641 }
642 }
643
644 ffa_interrupts_enable_virtual_maintenance_interrupts(current_locked);
645 }
646
647 /**
648 * Reconfigure the interrupt belonging to the current partition at runtime.
649 * At present, this paravirtualized interface only allows the following
650 * commands which signify what change is being requested by the current
651 * partition:
652 * - Change the target CPU of the interrupt.
653 * - Change the security state of the interrupt.
654 * - Enable or disable the physical interrupt.
655 */
ffa_interrupts_reconfigure(uint32_t int_id,uint32_t command,uint32_t value,struct vcpu * current)656 int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
657 uint32_t value, struct vcpu *current)
658 {
659 struct vm *vm = current->vm;
660 struct vm_locked vm_locked;
661 int64_t ret = -1;
662 struct interrupt_descriptor *int_desc = NULL;
663
664 /*
665 * Lock VM to protect interrupt descriptor from being modified
666 * concurrently.
667 */
668 vm_locked = vm_lock(vm);
669
670 switch (command) {
671 case INT_RECONFIGURE_TARGET_PE:
672 /* Here, value represents the target PE index. */
673 if (value >= MAX_CPUS) {
674 dlog_verbose(
675 "Illegal target PE index specified while "
676 "reconfiguring interrupt %x\n",
677 int_id);
678 goto out_unlock;
679 }
680
681 /*
682 * An UP SP cannot reconfigure an interrupt to be targetted to
683 * any other physical CPU except the one it is currently
684 * running on.
685 */
686 if (vm_is_up(vm) && value != cpu_index(current->cpu)) {
687 dlog_verbose(
688 "Illegal target PE index specified by current "
689 "UP SP\n");
690 goto out_unlock;
691 }
692
693 /* Configure the interrupt to be routed to a specific CPU. */
694 int_desc = vm_interrupt_set_target_mpidr(
695 vm_locked, int_id, cpu_find_index(value)->id);
696 break;
697 case INT_RECONFIGURE_SEC_STATE:
698 /* Specify the new security state of the interrupt. */
699 if (value != INT_DESC_SEC_STATE_NS &&
700 value != INT_DESC_SEC_STATE_S) {
701 dlog_verbose(
702 "Illegal value %x specified while "
703 "reconfiguring interrupt %x\n",
704 value, int_id);
705 goto out_unlock;
706 }
707 int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value);
708 break;
709 case INT_RECONFIGURE_ENABLE:
710 /* Enable or disable the interrupt. */
711 if (value != INT_DISABLE && value != INT_ENABLE) {
712 dlog_verbose(
713 "Illegal value %x specified while "
714 "reconfiguring interrupt %x\n",
715 value, int_id);
716 goto out_unlock;
717 } else {
718 int_desc = vm_interrupt_set_enable(vm_locked, int_id,
719 value == INT_ENABLE);
720 }
721 break;
722 default:
723 dlog_verbose("Interrupt reconfigure: Unsupported command %x\n",
724 command);
725 goto out_unlock;
726 }
727
728 /* Check if the interrupt belongs to the current SP. */
729 if (int_desc == NULL) {
730 dlog_verbose("Interrupt %x does not belong to current SP\n",
731 int_id);
732 goto out_unlock;
733 }
734
735 ret = 0;
736 plat_interrupts_reconfigure_interrupt(*int_desc);
737
738 out_unlock:
739 vm_unlock(&vm_locked);
740
741 return ret;
742 }
743
ffa_interrupts_intercept_call(struct vcpu_locked current_locked,struct vcpu_locked next_locked,struct ffa_value * interrupt_ret)744 bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
745 struct vcpu_locked next_locked,
746 struct ffa_value *interrupt_ret)
747 {
748 uint32_t intid;
749 struct vm *current_vm = current_locked.vcpu->vm;
750
751 /* No pending interrupts, no need to intercept or trigger SRI. */
752 if (vcpu_virt_interrupt_count_get(current_locked) == 0U) {
753 return false;
754 }
755
756 /*
757 * Do not intercept the call.
758 * Let the partition go into waiting state as planned.
759 * Pend the SRI on the next world switch to the NWd.
760 */
761 if (current_vm->sri_policy.intr_pending_entry_wait) {
762 dlog_verbose(
763 "Partition entry to wait. Interrupts pending. Send "
764 "SRI.\n");
765 ffa_notifications_sri_set_delayed(current_locked.vcpu->cpu);
766 return false;
767 }
768
769 /**
770 * At this point the handling of ABIs which can be intercepted by
771 * 'ffa_interrupts_intercept_call' did all the partition/vCPU state
772 * changes assuming there were no interrupts pending, and the call
773 * wouldn't be preempted.
774 * So it helps to think the current partition/vCPU have changed.
775 * If the call is intercepted, the current partition is left in
776 * preempted state, and execution is given to the target of the
777 * interrupt. In the arguments to interrupt_resume_waiting, pass
778 * "next_locked" and "current_locked" in the arguments for current and
779 * next vCPU, respectively. This is according to the description
780 * above.
781 */
782 intid = interrupt_resume_waiting(next_locked, current_locked);
783
784 assert(interrupt_ret != NULL);
785
786 dlog_verbose("%s: Pending interrupt %d, intercepting FF-A call.\n",
787 __func__, intid);
788
789 *interrupt_ret = api_ffa_interrupt_return(intid);
790
791 vcpu_set_running(current_locked, NULL);
792
793 return true;
794 }
795