1 /*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/vcpu.h"
10
11 #include "hf/arch/cpu.h"
12
13 #include "hf/check.h"
14 #include "hf/dlog.h"
15 #include "hf/std.h"
16 #include "hf/vm.h"
17
18 /** GP register to be used to pass the current vCPU ID, at core bring up. */
19 #define PHYS_CORE_IDX_GP_REG 4
20
21 /**
22 * Locks the given vCPU and updates `locked` to hold the newly locked vCPU.
23 */
vcpu_lock(struct vcpu * vcpu)24 struct vcpu_locked vcpu_lock(struct vcpu *vcpu)
25 {
26 struct vcpu_locked locked = {
27 .vcpu = vcpu,
28 };
29
30 sl_lock(&vcpu->lock);
31
32 return locked;
33 }
34
35 /**
36 * Locks two vCPUs ensuring that the locking order is according to the locks'
37 * addresses.
38 */
vcpu_lock_both(struct vcpu * vcpu1,struct vcpu * vcpu2)39 struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2)
40 {
41 struct two_vcpu_locked dual_lock;
42
43 sl_lock_both(&vcpu1->lock, &vcpu2->lock);
44 dual_lock.vcpu1.vcpu = vcpu1;
45 dual_lock.vcpu2.vcpu = vcpu2;
46
47 return dual_lock;
48 }
49
50 /**
51 * Unlocks a vCPU previously locked with vpu_lock, and updates `locked` to
52 * reflect the fact that the vCPU is no longer locked.
53 */
vcpu_unlock(struct vcpu_locked * locked)54 void vcpu_unlock(struct vcpu_locked *locked)
55 {
56 sl_unlock(&locked->vcpu->lock);
57 locked->vcpu = NULL;
58 }
59
vcpu_init(struct vcpu * vcpu,struct vm * vm)60 void vcpu_init(struct vcpu *vcpu, struct vm *vm)
61 {
62 memset_s(vcpu, sizeof(*vcpu), 0, sizeof(*vcpu));
63 sl_init(&vcpu->lock);
64 vcpu->regs_available = true;
65 vcpu->vm = vm;
66 vcpu->state = VCPU_STATE_OFF;
67 vcpu->direct_request_origin.is_ffa_req2 = false;
68 vcpu->direct_request_origin.vm_id = HF_INVALID_VM_ID;
69 vcpu->rt_model = RTM_SP_INIT;
70 list_init(&vcpu->timer_node);
71 list_init(&vcpu->ipi_list_node);
72 }
73
74 /**
75 * Initialise the registers for the given vCPU and set the state to
76 * VCPU_STATE_WAITING. The caller must hold the vCPU lock while calling this.
77 */
vcpu_on(struct vcpu_locked vcpu,ipaddr_t entry,uintreg_t arg)78 void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg)
79 {
80 arch_regs_set_pc_arg(&vcpu.vcpu->regs, entry, arg);
81 vcpu.vcpu->state = VCPU_STATE_WAITING;
82 }
83
vcpu_index(const struct vcpu * vcpu)84 ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu)
85 {
86 size_t index = vcpu - vcpu->vm->vcpus;
87
88 CHECK(index < UINT16_MAX);
89 return index;
90 }
91
92 /**
93 * Check whether the given vcpu_state is an off state, for the purpose of
94 * turning vCPUs on and off. Note that Aborted still counts as ON for the
95 * purposes of PSCI, because according to the PSCI specification (section
96 * 5.7.1) a core is only considered to be off if it has been turned off
97 * with a CPU_OFF call or hasn't yet been turned on with a CPU_ON call.
98 */
vcpu_is_off(struct vcpu_locked vcpu)99 bool vcpu_is_off(struct vcpu_locked vcpu)
100 {
101 return (vcpu.vcpu->state == VCPU_STATE_OFF);
102 }
103
104 /**
105 * Starts a vCPU of a secondary VM.
106 *
107 * Returns true if the secondary was reset and started, or false if it was
108 * already on and so nothing was done.
109 */
vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,ipaddr_t entry,uintreg_t arg)110 bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
111 ipaddr_t entry, uintreg_t arg)
112 {
113 struct vm *vm = vcpu_locked.vcpu->vm;
114 bool vcpu_was_off;
115
116 CHECK(vm->id != HF_PRIMARY_VM_ID);
117
118 vcpu_was_off = vcpu_is_off(vcpu_locked);
119 if (vcpu_was_off) {
120 /*
121 * Set vCPU registers to a clean state ready for boot. As this
122 * is a secondary which can migrate between pCPUs, the ID of the
123 * vCPU is defined as the index and does not match the ID of the
124 * pCPU it is running on.
125 */
126 arch_regs_reset(vcpu_locked.vcpu);
127 vcpu_on(vcpu_locked, entry, arg);
128 }
129
130 return vcpu_was_off;
131 }
132
133 /**
134 * Handles a page fault. It does so by determining if it's a legitimate or
135 * spurious fault, and recovering from the latter.
136 *
137 * Returns true if the caller should resume the current vCPU, or false if its VM
138 * should be aborted.
139 */
vcpu_handle_page_fault(const struct vcpu * current,struct vcpu_fault_info * f)140 bool vcpu_handle_page_fault(const struct vcpu *current,
141 struct vcpu_fault_info *f)
142 {
143 struct vm *vm = current->vm;
144 mm_mode_t mode;
145 uint32_t mask = f->mode | MM_MODE_INVALID;
146 bool resume;
147 struct vm_locked locked_vm;
148
149 locked_vm = vm_lock(vm);
150 /*
151 * Check if this is a legitimate fault, i.e., if the page table doesn't
152 * allow the access attempted by the VM.
153 *
154 * Otherwise, this is a spurious fault, likely because another CPU is
155 * updating the page table. It is responsible for issuing global TLB
156 * invalidations while holding the VM lock, so we don't need to do
157 * anything else to recover from it. (Acquiring/releasing the lock
158 * ensured that the invalidations have completed.)
159 */
160 if (!locked_vm.vm->el0_partition) {
161 resume = vm_mem_get_mode(locked_vm, f->ipaddr,
162 ipa_add(f->ipaddr, 1), &mode) &&
163 (mode & mask) == f->mode;
164 } else {
165 /*
166 * For EL0 partitions we need to get the mode for the faulting
167 * vaddr.
168 */
169 resume =
170 vm_mem_get_mode(locked_vm, ipa_init(va_addr(f->vaddr)),
171 ipa_add(ipa_init(va_addr(f->vaddr)), 1),
172 &mode) &&
173 (mode & mask) == f->mode;
174
175 /*
176 * For EL0 partitions, if there is an instruction abort and the
177 * mode of the page is RWX, we don't resume since Hafnium does
178 * not allow write and executable pages.
179 */
180 if ((f->mode == MM_MODE_X) &&
181 ((mode & MM_MODE_W) == MM_MODE_W)) {
182 resume = false;
183 }
184 }
185
186 vm_unlock(&locked_vm);
187
188 if (!resume) {
189 dlog_warning(
190 "Stage-%d page fault: pc=%#lx, vmid=%#x, vcpu=%u, "
191 "vaddr=%#lx, ipaddr=%#lx, mode=%#x %#x\n",
192 current->vm->el0_partition ? 1 : 2, va_addr(f->pc),
193 vm->id, vcpu_index(current), va_addr(f->vaddr),
194 ipa_addr(f->ipaddr), f->mode, mode);
195 }
196
197 return resume;
198 }
199
vcpu_set_phys_core_idx(struct vcpu * vcpu)200 void vcpu_set_phys_core_idx(struct vcpu *vcpu)
201 {
202 arch_regs_set_gp_reg(&vcpu->regs, cpu_index(vcpu->cpu),
203 PHYS_CORE_IDX_GP_REG);
204 }
205
206 /**
207 * Sets the designated GP register through which the vCPU expects to receive the
208 * boot info's address.
209 */
vcpu_set_boot_info_gp_reg(struct vcpu * vcpu)210 void vcpu_set_boot_info_gp_reg(struct vcpu *vcpu)
211 {
212 struct vm *vm = vcpu->vm;
213 uint32_t gp_register_num = vm->boot_info.gp_register_num;
214
215 if (vm->boot_info.blob_addr.ipa != 0U) {
216 arch_regs_set_gp_reg(&vcpu->regs,
217 ipa_addr(vm->boot_info.blob_addr),
218 gp_register_num);
219 }
220 }
221
vcpu_is_virt_interrupt_enabled(struct interrupts * interrupts,uint32_t intid)222 static bool vcpu_is_virt_interrupt_enabled(struct interrupts *interrupts,
223 uint32_t intid)
224 {
225 return interrupt_bitmap_get_value(&interrupts->interrupt_enabled,
226 intid) == 1U;
227 }
228
vcpu_virt_interrupt_set_enabled(struct interrupts * interrupts,uint32_t intid)229 static void vcpu_virt_interrupt_set_enabled(struct interrupts *interrupts,
230 uint32_t intid)
231 {
232 interrupt_bitmap_set_value(&interrupts->interrupt_enabled, intid);
233 }
234
vcpu_virt_interrupt_clear_enabled(struct interrupts * interrupts,uint32_t intid)235 static void vcpu_virt_interrupt_clear_enabled(struct interrupts *interrupts,
236 uint32_t intid)
237 {
238 interrupt_bitmap_clear_value(&interrupts->interrupt_enabled, intid);
239 }
240
vcpu_virt_interrupt_set_pending(struct interrupts * interrupts,uint32_t intid)241 static void vcpu_virt_interrupt_set_pending(struct interrupts *interrupts,
242 uint32_t intid)
243 {
244 interrupt_bitmap_set_value(&interrupts->interrupt_pending, intid);
245 }
246
vcpu_virt_interrupt_clear_pending(struct interrupts * interrupts,uint32_t intid)247 static void vcpu_virt_interrupt_clear_pending(struct interrupts *interrupts,
248 uint32_t intid)
249 {
250 interrupt_bitmap_clear_value(&interrupts->interrupt_pending, intid);
251 }
252
vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)253 static void vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)
254 {
255 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count++;
256 }
257
vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)258 static void vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)
259 {
260 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count--;
261 }
262
vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)263 static void vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)
264 {
265 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count++;
266 }
267
vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)268 static void vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)
269 {
270 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count--;
271 }
272
vcpu_interrupt_count_increment(struct vcpu_locked vcpu_locked,uint32_t intid)273 static void vcpu_interrupt_count_increment(struct vcpu_locked vcpu_locked,
274 uint32_t intid)
275 {
276 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
277
278 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
279 INTERRUPT_TYPE_IRQ) {
280 vcpu_irq_count_increment(vcpu_locked);
281 } else {
282 vcpu_fiq_count_increment(vcpu_locked);
283 }
284 }
285
vcpu_interrupt_count_decrement(struct vcpu_locked vcpu_locked,uint32_t intid)286 static void vcpu_interrupt_count_decrement(struct vcpu_locked vcpu_locked,
287 uint32_t intid)
288 {
289 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
290
291 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
292 INTERRUPT_TYPE_IRQ) {
293 vcpu_irq_count_decrement(vcpu_locked);
294 } else {
295 vcpu_fiq_count_decrement(vcpu_locked);
296 }
297 }
298
vcpu_virt_interrupt_irq_count_get(struct vcpu_locked vcpu_locked)299 uint32_t vcpu_virt_interrupt_irq_count_get(struct vcpu_locked vcpu_locked)
300 {
301 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count;
302 }
303
vcpu_virt_interrupt_fiq_count_get(struct vcpu_locked vcpu_locked)304 uint32_t vcpu_virt_interrupt_fiq_count_get(struct vcpu_locked vcpu_locked)
305 {
306 return vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
307 }
308
vcpu_virt_interrupt_count_get(struct vcpu_locked vcpu_locked)309 uint32_t vcpu_virt_interrupt_count_get(struct vcpu_locked vcpu_locked)
310 {
311 return vcpu_virt_interrupt_irq_count_get(vcpu_locked) +
312 vcpu_virt_interrupt_fiq_count_get(vcpu_locked);
313 }
314
vcpu_interrupt_clear_decrement(struct vcpu_locked vcpu_locked,uint32_t intid)315 static void vcpu_interrupt_clear_decrement(struct vcpu_locked vcpu_locked,
316 uint32_t intid)
317 {
318 struct interrupts *interrupts = &(vcpu_locked.vcpu->interrupts);
319
320 /*
321 * Mark the virtual interrupt as no longer pending and decrement
322 * the interrupt count if it is enabled.
323 */
324 vcpu_virt_interrupt_clear_pending(interrupts, intid);
325 if (vcpu_is_virt_interrupt_enabled(interrupts, intid)) {
326 vcpu_interrupt_count_decrement(vcpu_locked, intid);
327 }
328 }
329
330 /**
331 * Sets the vcpu in the VCPU_STATE_RUNNING.
332 * With that, its register are set as "not available".
333 * If there are registers to be written to vCPU's context, do so.
334 * However, this action is restricted to WAITING and BLOCKED states,
335 * as such, assert accordingly.
336 */
vcpu_set_running(struct vcpu_locked target_locked,const struct ffa_value * args)337 void vcpu_set_running(struct vcpu_locked target_locked,
338 const struct ffa_value *args)
339 {
340 struct vcpu *target_vcpu = target_locked.vcpu;
341
342 if (args != NULL) {
343 CHECK(target_vcpu->regs_available);
344 assert(target_vcpu->state == VCPU_STATE_WAITING ||
345 target_vcpu->state == VCPU_STATE_BLOCKED);
346
347 arch_regs_set_retval(&target_vcpu->regs, *args);
348 }
349
350 /* Mark the registers as unavailable now. */
351 target_vcpu->regs_available = false;
352
353 /* We are about to resume target vCPU. */
354 target_vcpu->state = VCPU_STATE_RUNNING;
355 }
356
vcpu_enter_secure_interrupt_rtm(struct vcpu_locked vcpu_locked)357 void vcpu_enter_secure_interrupt_rtm(struct vcpu_locked vcpu_locked)
358 {
359 struct vcpu *target_vcpu = vcpu_locked.vcpu;
360
361 assert(target_vcpu->scheduling_mode == NONE);
362 assert(target_vcpu->call_chain.prev_node == NULL);
363 assert(target_vcpu->call_chain.next_node == NULL);
364 assert(target_vcpu->rt_model == RTM_NONE);
365
366 target_vcpu->scheduling_mode = SPMC_MODE;
367 target_vcpu->rt_model = RTM_SEC_INTERRUPT;
368 }
369
queue_increment_index(uint16_t current_idx)370 static uint16_t queue_increment_index(uint16_t current_idx)
371 {
372 /* Look at the next index. Wrap around if necessary. */
373 if (current_idx == VINT_QUEUE_MAX - 1) {
374 return 0;
375 }
376
377 return current_idx + 1;
378 }
379
380 /**
381 * If tail reaches head of the queue, and the count of queued interrupts
382 * 0, then the queue is empty.
383 */
is_queue_empty(struct interrupt_queue * q)384 static bool is_queue_empty(struct interrupt_queue *q)
385 {
386 return q->head == q->tail && q->queued_vint_count == 0U;
387 }
388
389 /**
390 * If tail reaches head of the queue, and the count of queued interrupts
391 * matches the size of the buffer, then the queue is full.
392 */
is_queue_full(struct interrupt_queue * q)393 static bool is_queue_full(struct interrupt_queue *q)
394 {
395 return q->head == q->tail && q->queued_vint_count == VINT_QUEUE_MAX;
396 }
397
398 /**
399 * Queue the pending virtual interrupt for target vCPU.
400 *
401 * Returns true if successful in pushing a new entry to the queue, or false
402 * otherwise.
403 */
vcpu_interrupt_queue_push(struct vcpu_locked vcpu_locked,uint32_t vint_id)404 static bool vcpu_interrupt_queue_push(struct vcpu_locked vcpu_locked,
405 uint32_t vint_id)
406 {
407 struct interrupt_queue *q;
408 uint16_t new_tail;
409
410 assert(vint_id != HF_INVALID_INTID);
411
412 q = &vcpu_locked.vcpu->interrupts.vint_q;
413
414 /*
415 * A new entry is pushed at the tail of the queue. Upon successful
416 * push operation, the tail increments or wraps around.
417 */
418 new_tail = queue_increment_index(q->tail);
419
420 if (is_queue_full(q)) {
421 return false;
422 }
423
424 /* Add the virtual interrupt to the queue. */
425 q->vint_buffer[q->tail] = vint_id;
426 q->tail = new_tail;
427
428 assert(q->queued_vint_count < VINT_QUEUE_MAX);
429 q->queued_vint_count++;
430
431 return true;
432 }
433
434 /**
435 * Remove an entry from the specified vCPU's queue at the head.
436 * Returns true if successful in removing the entry, or false otherwise.
437 */
vcpu_interrupt_queue_pop(struct vcpu_locked vcpu_locked)438 static uint32_t vcpu_interrupt_queue_pop(struct vcpu_locked vcpu_locked)
439 {
440 struct interrupt_queue *q;
441 uint16_t new_head;
442 uint32_t vint_id;
443
444 q = &vcpu_locked.vcpu->interrupts.vint_q;
445
446 /* Check if queue is empty. */
447 if (is_queue_empty(q)) {
448 return HF_INVALID_INTID;
449 }
450
451 /*
452 * An entry is removed from the head of the queue. Once successful, the
453 * head is incremented or wrapped around if needed.
454 */
455 new_head = queue_increment_index(q->head);
456 vint_id = q->vint_buffer[q->head];
457 q->head = new_head;
458
459 assert(q->queued_vint_count > 0);
460 q->queued_vint_count--;
461
462 return vint_id;
463 }
464
465 /**
466 * Look for the first pending virtual interrupt from the vcpu's queue. Note
467 * that the entry is not removed from the queue.
468 *
469 * Returns true if a valid entry exists in the queue, or false otherwise.
470 */
vcpu_interrupt_queue_peek(struct vcpu_locked vcpu_locked)471 static uint32_t vcpu_interrupt_queue_peek(struct vcpu_locked vcpu_locked)
472 {
473 struct interrupt_queue *q;
474 uint32_t queued_vint;
475
476 q = &vcpu_locked.vcpu->interrupts.vint_q;
477
478 /* Check if queue is empty. */
479 if (is_queue_empty(q)) {
480 return HF_INVALID_INTID;
481 }
482
483 queued_vint = q->vint_buffer[q->head];
484 assert(queued_vint != HF_INVALID_INTID);
485
486 return queued_vint;
487 }
488
489 /**
490 * When interrupt handling is complete the preempted_vcpu field should go back
491 * to NULL.
492 */
vcpu_secure_interrupt_complete(struct vcpu_locked vcpu_locked)493 void vcpu_secure_interrupt_complete(struct vcpu_locked vcpu_locked)
494 {
495 struct vcpu *vcpu;
496
497 vcpu = vcpu_locked.vcpu;
498 vcpu->preempted_vcpu = NULL;
499 }
500
vcpu_virt_interrupt_enable(struct vcpu_locked vcpu_locked,uint32_t vint_id,bool enable)501 void vcpu_virt_interrupt_enable(struct vcpu_locked vcpu_locked,
502 uint32_t vint_id, bool enable)
503 {
504 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
505
506 if (enable) {
507 /*
508 * If it is pending and was not enabled before, increment the
509 * count.
510 */
511 if (vcpu_is_virt_interrupt_pending(interrupts, vint_id) &&
512 !vcpu_is_virt_interrupt_enabled(interrupts, vint_id)) {
513 vcpu_interrupt_count_increment(vcpu_locked, vint_id);
514 }
515 vcpu_virt_interrupt_set_enabled(interrupts, vint_id);
516 } else {
517 /*
518 * If it is pending and was enabled before, decrement the count.
519 */
520 if (vcpu_is_virt_interrupt_pending(interrupts, vint_id) &&
521 vcpu_is_virt_interrupt_enabled(interrupts, vint_id)) {
522 vcpu_interrupt_count_decrement(vcpu_locked, vint_id);
523 }
524 vcpu_virt_interrupt_clear_enabled(interrupts, vint_id);
525 }
526 }
527
528 /*
529 * Find and return the first intid that is pending and enabled, the interrupt
530 * struct for this intid will be at the head of the list so can be popped later.
531 */
vcpu_virt_interrupt_peek_pending_and_enabled(struct vcpu_locked vcpu_locked)532 uint32_t vcpu_virt_interrupt_peek_pending_and_enabled(
533 struct vcpu_locked vcpu_locked)
534 {
535 uint32_t vint_id;
536 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
537 uint32_t pending_and_enabled_count =
538 vcpu_virt_interrupt_count_get(vcpu_locked);
539
540 /* First check there is a pending and enabled interrupt to return. */
541 if (pending_and_enabled_count == 0) {
542 return HF_INVALID_INTID;
543 }
544
545 /*
546 * We know here there is a pending and enabled interrupt in
547 * the queue. So push any interrupts that are not enabled to
548 * the back of the queue until we reach the first enabled one.
549 */
550 vint_id = vcpu_interrupt_queue_peek(vcpu_locked);
551 while (!vcpu_is_virt_interrupt_enabled(interrupts, vint_id)) {
552 vcpu_interrupt_queue_pop(vcpu_locked);
553 vcpu_interrupt_queue_push(vcpu_locked, vint_id);
554 vint_id = vcpu_interrupt_queue_peek(vcpu_locked);
555 }
556
557 assert(vint_id != HF_INVALID_INTID);
558
559 return vint_id;
560 }
561
562 /*
563 * Get the next pending and enabled virtual interrupt ID.
564 * Pops from the queue and clears the bitmap.
565 */
vcpu_virt_interrupt_get_pending_and_enabled(struct vcpu_locked vcpu_locked)566 uint32_t vcpu_virt_interrupt_get_pending_and_enabled(
567 struct vcpu_locked vcpu_locked)
568 {
569 uint32_t vint_id =
570 vcpu_virt_interrupt_peek_pending_and_enabled(vcpu_locked);
571
572 if (vint_id != HF_INVALID_INTID) {
573 vcpu_interrupt_queue_pop(vcpu_locked);
574 vcpu_interrupt_clear_decrement(vcpu_locked, vint_id);
575
576 /*
577 * Resetting the state of the interrupts_info_get_retrieved,
578 * so the interrupts pending in the vCPU can be included in the
579 * FFA_NOTIFICATION_INFO_GET list.
580 * Resetting now as this functions clears the state of the
581 * virtual interrupt.
582 */
583 vcpu_locked.vcpu->interrupts_info_get_retrieved = false;
584 }
585
586 return vint_id;
587 }
588
589 /*
590 * Set a virtual interrupt to pending. Add it to the queue and set the bitmap.
591 */
vcpu_virt_interrupt_inject(struct vcpu_locked vcpu_locked,uint32_t vint_id)592 void vcpu_virt_interrupt_inject(struct vcpu_locked vcpu_locked,
593 uint32_t vint_id)
594 {
595 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
596
597 /*
598 * An interrupt can only be pending once so return if it is
599 * already pending.
600 */
601 if (vcpu_is_virt_interrupt_pending(interrupts, vint_id)) {
602 return;
603 }
604
605 /* Push to the queue and set the bitmap. */
606 if (!vcpu_interrupt_queue_push(vcpu_locked, vint_id)) {
607 dlog_verbose(
608 "Exhausted interrupt queue for vCPU %u of SP %#x\n",
609 vcpu_index(vcpu_locked.vcpu), vcpu_locked.vcpu->vm->id);
610 assert(false);
611 return;
612 }
613 vcpu_virt_interrupt_set_pending(interrupts, vint_id);
614
615 if (vcpu_is_virt_interrupt_enabled(interrupts, vint_id)) {
616 vcpu_interrupt_count_increment(vcpu_locked, vint_id);
617 }
618 }
619
vcpu_virt_interrupt_clear(struct vcpu_locked vcpu_locked,uint32_t vint_id)620 void vcpu_virt_interrupt_clear(struct vcpu_locked vcpu_locked, uint32_t vint_id)
621 {
622 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
623 uint32_t queued_vint_count = interrupts->vint_q.queued_vint_count;
624
625 /* See if interrupt is pending and therefore needs to be cleared. */
626 if (!vcpu_is_virt_interrupt_pending(interrupts, vint_id)) {
627 return;
628 }
629
630 for (uint32_t i = 0; i < queued_vint_count; i++) {
631 uint32_t intid = vcpu_interrupt_queue_pop(vcpu_locked);
632
633 if (intid == vint_id) {
634 vcpu_interrupt_clear_decrement(vcpu_locked, intid);
635 } else {
636 /*
637 * If the interrupt is not the one we wish to remove,
638 * inject it again. We must pop and push all interrupts
639 * to ensure the FIFO ordering is maintained.
640 */
641 vcpu_interrupt_queue_push(vcpu_locked, intid);
642 }
643 }
644 }
645
646 /**
647 * Prepare the target vCPU to run after receiving direct request ABI.
648 */
vcpu_dir_req_set_state(struct vcpu_locked target_locked,bool is_ffa_req2,ffa_id_t sender_vm_id,struct ffa_value args)649 void vcpu_dir_req_set_state(struct vcpu_locked target_locked, bool is_ffa_req2,
650 ffa_id_t sender_vm_id, struct ffa_value args)
651 {
652 struct vcpu *target_vcpu = target_locked.vcpu;
653
654 target_vcpu->state = VCPU_STATE_RUNNING;
655 target_vcpu->regs_available = false;
656 target_vcpu->direct_request_origin.is_ffa_req2 = is_ffa_req2;
657 target_vcpu->direct_request_origin.vm_id = sender_vm_id;
658 target_vcpu->direct_request_origin.is_framework =
659 ffa_is_framework_msg(args);
660
661 arch_regs_set_retval(&target_vcpu->regs, args);
662 }
663
664 /**
665 * Clear direct request origin vm_id and request type for the target vCPU.
666 * Also, the scheduling mode and partition runtime model are reset.
667 */
vcpu_dir_req_reset_state(struct vcpu_locked vcpu_locked)668 void vcpu_dir_req_reset_state(struct vcpu_locked vcpu_locked)
669 {
670 struct vcpu *vcpu = vcpu_locked.vcpu;
671
672 /* Clear direct request origin vm_id and request type. */
673 vcpu->direct_request_origin.vm_id = HF_INVALID_VM_ID;
674 vcpu->direct_request_origin.is_framework = false;
675
676 /* Reset runtime model and scheduling mode. */
677 vcpu->scheduling_mode = NONE;
678 vcpu->rt_model = RTM_NONE;
679 }
680