1 /*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <config.h>
8
9 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
10
11 #include <arch/object/vcpu.h>
12 #include <armv/vcpu.h>
13 #include <arch/machine/debug.h> /* Arch_debug[A/Di]ssociateVCPUTCB() */
14 #include <arch/machine/debug_conf.h>
15 #include <drivers/timer/arm_generic.h>
16 #include <plat/platform_gen.h> /* Ensure correct GIC header is included */
17
vcpu_boot_init(void)18 BOOT_CODE void vcpu_boot_init(void)
19 {
20 armv_vcpu_boot_init();
21 gic_vcpu_num_list_regs = VGIC_VTR_NLISTREGS(get_gic_vcpu_ctrl_vtr());
22 if (gic_vcpu_num_list_regs > GIC_VCPU_MAX_NUM_LR) {
23 printf("Warning: VGIC is reporting more list registers than we support. Truncating\n");
24 gic_vcpu_num_list_regs = GIC_VCPU_MAX_NUM_LR;
25 }
26 vcpu_disable(NULL);
27 ARCH_NODE_STATE(armHSCurVCPU) = NULL;
28 ARCH_NODE_STATE(armHSVCPUActive) = false;
29
30 }
31
vcpu_save(vcpu_t * vcpu,bool_t active)32 static void vcpu_save(vcpu_t *vcpu, bool_t active)
33 {
34 word_t i;
35 unsigned int lr_num;
36
37 assert(vcpu);
38 dsb();
39 /* If we aren't active then this state already got stored when
40 * we were disabled */
41 if (active) {
42 vcpu_save_reg(vcpu, seL4_VCPUReg_SCTLR);
43 vcpu->vgic.hcr = get_gic_vcpu_ctrl_hcr();
44 save_virt_timer(vcpu);
45 }
46
47 /* Store GIC VCPU control state */
48 vcpu->vgic.vmcr = get_gic_vcpu_ctrl_vmcr();
49 vcpu->vgic.apr = get_gic_vcpu_ctrl_apr();
50 lr_num = gic_vcpu_num_list_regs;
51 for (i = 0; i < lr_num; i++) {
52 vcpu->vgic.lr[i] = get_gic_vcpu_ctrl_lr(i);
53 }
54 armv_vcpu_save(vcpu, active);
55 }
56
57
readVCPUReg(vcpu_t * vcpu,word_t field)58 static word_t readVCPUReg(vcpu_t *vcpu, word_t field)
59 {
60 if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
61 if (vcpu_reg_saved_when_disabled(field) && !ARCH_NODE_STATE(armHSVCPUActive)) {
62 return vcpu_read_reg(vcpu, field);
63 } else {
64 return vcpu_hw_read_reg(field);
65 }
66 } else {
67 return vcpu_read_reg(vcpu, field);
68 }
69 }
70
writeVCPUReg(vcpu_t * vcpu,word_t field,word_t value)71 static void writeVCPUReg(vcpu_t *vcpu, word_t field, word_t value)
72 {
73 if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
74 if (vcpu_reg_saved_when_disabled(field) && !ARCH_NODE_STATE(armHSVCPUActive)) {
75 vcpu_write_reg(vcpu, field, value);
76 } else {
77 vcpu_hw_write_reg(field, value);
78 }
79 } else {
80 vcpu_write_reg(vcpu, field, value);
81 }
82 }
83
vcpu_restore(vcpu_t * vcpu)84 void vcpu_restore(vcpu_t *vcpu)
85 {
86 assert(vcpu);
87 word_t i;
88 unsigned int lr_num;
89 /* Turn off the VGIC */
90 set_gic_vcpu_ctrl_hcr(0);
91 isb();
92
93 /* Restore GIC VCPU control state */
94 set_gic_vcpu_ctrl_vmcr(vcpu->vgic.vmcr);
95 set_gic_vcpu_ctrl_apr(vcpu->vgic.apr);
96 lr_num = gic_vcpu_num_list_regs;
97 for (i = 0; i < lr_num; i++) {
98 set_gic_vcpu_ctrl_lr(i, vcpu->vgic.lr[i]);
99 }
100
101 /* restore registers */
102 #ifdef CONFIG_ARCH_AARCH64
103 vcpu_restore_reg_range(vcpu, seL4_VCPUReg_TTBR0, seL4_VCPUReg_SPSR_EL1);
104 #else
105 vcpu_restore_reg_range(vcpu, seL4_VCPUReg_ACTLR, seL4_VCPUReg_SPSRfiq);
106 #endif
107 vcpu_enable(vcpu);
108 }
109
VPPIEvent(irq_t irq)110 void VPPIEvent(irq_t irq)
111 {
112 #ifdef CONFIG_KERNEL_MCS
113 /* If the current task is currently enqueued it will not be able to
114 * correctly receive a fault IPC message. This may occur due to the
115 * budget check that happens early in the handleInterruptEntry.
116 *
117 * If the current thread does *not* have budget this interrupt is
118 * ignored for now. As it is a level-triggered interrupt it shall
119 * be re-raised (and not lost).
120 */
121 if (thread_state_get_tcbQueued(NODE_STATE(ksCurThread)->tcbState)) {
122 return;
123 }
124 #endif
125
126 if (ARCH_NODE_STATE(armHSVCPUActive)) {
127 maskInterrupt(true, irq);
128 assert(irqVPPIEventIndex(irq) != VPPIEventIRQ_invalid);
129 ARCH_NODE_STATE(armHSCurVCPU)->vppi_masked[irqVPPIEventIndex(irq)] = true;
130 current_fault = seL4_Fault_VPPIEvent_new(IRQT_TO_IRQ(irq));
131 /* Current VCPU being active should indicate that the current thread
132 * is runnable. At present, verification cannot establish this so we
133 * perform an extra check. */
134 assert(isRunnable(NODE_STATE(ksCurThread)));
135 if (isRunnable(NODE_STATE(ksCurThread))) {
136 handleFault(NODE_STATE(ksCurThread));
137 }
138 }
139 }
140
VGICMaintenance(void)141 void VGICMaintenance(void)
142 {
143 uint32_t eisr0, eisr1;
144 uint32_t flags;
145
146 #ifdef CONFIG_KERNEL_MCS
147 /* See VPPIEvent for details on this check. */
148 if (thread_state_get_tcbQueued(NODE_STATE(ksCurThread)->tcbState)) {
149 return;
150 }
151 #endif
152
153 /* We shouldn't get a VGICMaintenance interrupt while a VCPU isn't active,
154 * but if one becomes pending before the VGIC is disabled we might get one
155 * when returning to userlevel after disabling the current VCPU. In this
156 * case we simply return and rely on the interrupt being raised again when
157 * the VCPU is reenabled.
158 */
159 if (!ARCH_NODE_STATE(armHSVCPUActive)) {
160 printf("Received VGIC maintenance without active VCPU!\n");
161 return;
162 }
163
164 eisr0 = get_gic_vcpu_ctrl_eisr0();
165 eisr1 = get_gic_vcpu_ctrl_eisr1();
166 flags = get_gic_vcpu_ctrl_misr();
167
168 if (flags & VGIC_MISR_EOI) {
169 int irq_idx;
170 if (eisr0) {
171 irq_idx = ctzl(eisr0);
172 } else if (eisr1) {
173 irq_idx = ctzl(eisr1) + 32;
174 } else {
175 irq_idx = -1;
176 }
177
178 /* the hardware should never give us an invalid index, but we don't
179 * want to trust it that far */
180 if (irq_idx == -1 || irq_idx >= gic_vcpu_num_list_regs) {
181 current_fault = seL4_Fault_VGICMaintenance_new(0, 0);
182 } else {
183 virq_t virq = get_gic_vcpu_ctrl_lr(irq_idx);
184 switch (virq_get_virqType(virq)) {
185 case virq_virq_active:
186 virq = virq_virq_active_set_virqEOIIRQEN(virq, 0);
187 break;
188 case virq_virq_pending:
189 virq = virq_virq_pending_set_virqEOIIRQEN(virq, 0);
190 break;
191 case virq_virq_invalid:
192 virq = virq_virq_invalid_set_virqEOIIRQEN(virq, 0);
193 break;
194 }
195 set_gic_vcpu_ctrl_lr(irq_idx, virq);
196 /* decodeVCPUInjectIRQ below checks the vgic.lr register,
197 * so we should also sync the shadow data structure as well */
198 assert(ARCH_NODE_STATE(armHSCurVCPU) != NULL && ARCH_NODE_STATE(armHSVCPUActive));
199 if (ARCH_NODE_STATE(armHSCurVCPU) != NULL && ARCH_NODE_STATE(armHSVCPUActive)) {
200 ARCH_NODE_STATE(armHSCurVCPU)->vgic.lr[irq_idx] = virq;
201 } else {
202 /* FIXME This should not happen */
203 }
204 current_fault = seL4_Fault_VGICMaintenance_new(irq_idx, 1);
205 }
206
207 } else {
208 /* Assume that it was an EOI for a LR that was not present */
209 current_fault = seL4_Fault_VGICMaintenance_new(0, 0);
210 }
211
212 /* Current VCPU being active should indicate that the current thread
213 * is runnable. At present, verification cannot establish this so we
214 * perform an extra check. */
215 assert(isRunnable(NODE_STATE(ksCurThread)));
216 if (isRunnable(NODE_STATE(ksCurThread))) {
217 handleFault(NODE_STATE(ksCurThread));
218 }
219 }
220
vcpu_init(vcpu_t * vcpu)221 void vcpu_init(vcpu_t *vcpu)
222 {
223 armv_vcpu_init(vcpu);
224 /* GICH VCPU interface control */
225 vcpu->vgic.hcr = VGIC_HCR_EN;
226 #ifdef CONFIG_VTIMER_UPDATE_VOFFSET
227 /* Virtual Timer interface */
228 vcpu->virtTimer.last_pcount = 0;
229 #endif
230 }
231
vcpu_switch(vcpu_t * new)232 void vcpu_switch(vcpu_t *new)
233 {
234 if (likely(ARCH_NODE_STATE(armHSCurVCPU) != new)) {
235 if (unlikely(new != NULL)) {
236 if (unlikely(ARCH_NODE_STATE(armHSCurVCPU) != NULL)) {
237 vcpu_save(ARCH_NODE_STATE(armHSCurVCPU), ARCH_NODE_STATE(armHSVCPUActive));
238 }
239 vcpu_restore(new);
240 ARCH_NODE_STATE(armHSCurVCPU) = new;
241 ARCH_NODE_STATE(armHSVCPUActive) = true;
242 } else if (unlikely(ARCH_NODE_STATE(armHSVCPUActive))) {
243 /* leave the current VCPU state loaded, but disable vgic and mmu */
244 #ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS
245 saveAllBreakpointState(ARCH_NODE_STATE(armHSCurVCPU)->vcpuTCB);
246 #endif
247 vcpu_disable(ARCH_NODE_STATE(armHSCurVCPU));
248 ARCH_NODE_STATE(armHSVCPUActive) = false;
249 }
250 } else if (likely(!ARCH_NODE_STATE(armHSVCPUActive) && new != NULL)) {
251 isb();
252 vcpu_enable(new);
253 ARCH_NODE_STATE(armHSVCPUActive) = true;
254 }
255 }
256
vcpu_invalidate_active(void)257 static void vcpu_invalidate_active(void)
258 {
259 if (ARCH_NODE_STATE(armHSVCPUActive)) {
260 vcpu_disable(NULL);
261 ARCH_NODE_STATE(armHSVCPUActive) = false;
262 }
263 ARCH_NODE_STATE(armHSCurVCPU) = NULL;
264 }
265
vcpu_finalise(vcpu_t * vcpu)266 void vcpu_finalise(vcpu_t *vcpu)
267 {
268 if (vcpu->vcpuTCB) {
269 dissociateVCPUTCB(vcpu, vcpu->vcpuTCB);
270 }
271 }
272
associateVCPUTCB(vcpu_t * vcpu,tcb_t * tcb)273 void associateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb)
274 {
275 if (tcb->tcbArch.tcbVCPU) {
276 dissociateVCPUTCB(tcb->tcbArch.tcbVCPU, tcb);
277 }
278 if (vcpu->vcpuTCB) {
279 dissociateVCPUTCB(vcpu, vcpu->vcpuTCB);
280 }
281 tcb->tcbArch.tcbVCPU = vcpu;
282 vcpu->vcpuTCB = tcb;
283 }
284
dissociateVCPUTCB(vcpu_t * vcpu,tcb_t * tcb)285 void dissociateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb)
286 {
287 if (tcb->tcbArch.tcbVCPU != vcpu || vcpu->vcpuTCB != tcb) {
288 fail("TCB and VCPU not associated.");
289 }
290 if (vcpu == ARCH_NODE_STATE(armHSCurVCPU)) {
291 vcpu_invalidate_active();
292 }
293 tcb->tcbArch.tcbVCPU = NULL;
294 vcpu->vcpuTCB = NULL;
295 #ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS
296 Arch_debugDissociateVCPUTCB(tcb);
297 #endif
298
299 /* sanitize the CPSR as without a VCPU a thread should only be in user mode */
300 #ifdef CONFIG_ARCH_AARCH64
301 setRegister(tcb, SPSR_EL1, sanitiseRegister(SPSR_EL1, getRegister(tcb, SPSR_EL1), false));
302 #else
303 setRegister(tcb, CPSR, sanitiseRegister(CPSR, getRegister(tcb, CPSR), false));
304 #endif
305 }
306
invokeVCPUWriteReg(vcpu_t * vcpu,word_t field,word_t value)307 exception_t invokeVCPUWriteReg(vcpu_t *vcpu, word_t field, word_t value)
308 {
309 writeVCPUReg(vcpu, field, value);
310 return EXCEPTION_NONE;
311 }
312
decodeVCPUWriteReg(cap_t cap,unsigned int length,word_t * buffer)313 exception_t decodeVCPUWriteReg(cap_t cap, unsigned int length, word_t *buffer)
314 {
315 word_t field;
316 word_t value;
317 if (length < 2) {
318 userError("VCPUWriteReg: Truncated message.");
319 current_syscall_error.type = seL4_TruncatedMessage;
320 return EXCEPTION_SYSCALL_ERROR;
321 }
322 field = getSyscallArg(0, buffer);
323 value = getSyscallArg(1, buffer);
324 if (field >= seL4_VCPUReg_Num) {
325 userError("VCPUWriteReg: Invalid field 0x%lx.", (long)field);
326 current_syscall_error.type = seL4_InvalidArgument;
327 current_syscall_error.invalidArgumentNumber = 1;
328 return EXCEPTION_SYSCALL_ERROR;
329 }
330 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
331 return invokeVCPUWriteReg(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), field, value);
332 }
333
invokeVCPUReadReg(vcpu_t * vcpu,word_t field,bool_t call)334 exception_t invokeVCPUReadReg(vcpu_t *vcpu, word_t field, bool_t call)
335 {
336 tcb_t *thread;
337 thread = NODE_STATE(ksCurThread);
338 word_t value = readVCPUReg(vcpu, field);
339 if (call) {
340 word_t *ipcBuffer = lookupIPCBuffer(true, thread);
341 setRegister(thread, badgeRegister, 0);
342 unsigned int length = setMR(thread, ipcBuffer, 0, value);
343 setRegister(thread, msgInfoRegister, wordFromMessageInfo(
344 seL4_MessageInfo_new(0, 0, 0, length)));
345 }
346 setThreadState(NODE_STATE(ksCurThread), ThreadState_Running);
347 return EXCEPTION_NONE;
348 }
349
decodeVCPUReadReg(cap_t cap,unsigned int length,bool_t call,word_t * buffer)350 exception_t decodeVCPUReadReg(cap_t cap, unsigned int length, bool_t call, word_t *buffer)
351 {
352 word_t field;
353 if (length < 1) {
354 userError("VCPUReadReg: Truncated message.");
355 current_syscall_error.type = seL4_TruncatedMessage;
356 return EXCEPTION_SYSCALL_ERROR;
357 }
358
359 field = getSyscallArg(0, buffer);
360
361 if (field >= seL4_VCPUReg_Num) {
362 userError("VCPUReadReg: Invalid field 0x%lx.", (long)field);
363 current_syscall_error.type = seL4_InvalidArgument;
364 current_syscall_error.invalidArgumentNumber = 1;
365 return EXCEPTION_SYSCALL_ERROR;
366 }
367
368 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
369 return invokeVCPUReadReg(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), field, call);
370 }
371
invokeVCPUInjectIRQ(vcpu_t * vcpu,unsigned long index,virq_t virq)372 exception_t invokeVCPUInjectIRQ(vcpu_t *vcpu, unsigned long index, virq_t virq)
373 {
374 if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
375 set_gic_vcpu_ctrl_lr(index, virq);
376 #ifdef ENABLE_SMP_SUPPORT
377 } else if (vcpu->vcpuTCB->tcbAffinity != getCurrentCPUIndex()) {
378 doRemoteOp3Arg(IpiRemoteCall_VCPUInjectInterrupt, (word_t)vcpu, index, virq.words[0], vcpu->vcpuTCB->tcbAffinity);
379 #endif /* CONFIG_ENABLE_SMP */
380 } else {
381 vcpu->vgic.lr[index] = virq;
382 }
383
384 return EXCEPTION_NONE;
385 }
386
decodeVCPUInjectIRQ(cap_t cap,unsigned int length,word_t * buffer)387 exception_t decodeVCPUInjectIRQ(cap_t cap, unsigned int length, word_t *buffer)
388 {
389 word_t vid, priority, group, index;
390 vcpu_t *vcpu;
391 #ifdef CONFIG_ARCH_AARCH64
392 word_t mr0;
393
394 vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
395
396 if (length < 1) {
397 current_syscall_error.type = seL4_TruncatedMessage;
398 return EXCEPTION_SYSCALL_ERROR;
399 }
400
401 mr0 = getSyscallArg(0, buffer);
402 vid = mr0 & 0xffff;
403 priority = (mr0 >> 16) & 0xff;
404 group = (mr0 >> 24) & 0xff;
405 index = (mr0 >> 32) & 0xff;
406 #else
407 uint32_t mr0, mr1;
408
409 vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
410
411 if (length < 2) {
412 current_syscall_error.type = seL4_TruncatedMessage;
413 return EXCEPTION_SYSCALL_ERROR;
414 }
415
416 mr0 = getSyscallArg(0, buffer);
417 mr1 = getSyscallArg(1, buffer);
418 vid = mr0 & 0xffff;
419 priority = (mr0 >> 16) & 0xff;
420 group = (mr0 >> 24) & 0xff;
421 index = mr1 & 0xff;
422 #endif
423
424 /* Check IRQ parameters */
425 if (vid > (1U << 10) - 1) {
426 current_syscall_error.type = seL4_RangeError;
427 current_syscall_error.rangeErrorMin = 0;
428 current_syscall_error.rangeErrorMax = (1U << 10) - 1;
429 current_syscall_error.invalidArgumentNumber = 1;
430 current_syscall_error.type = seL4_RangeError;
431 return EXCEPTION_SYSCALL_ERROR;
432 }
433 if (priority > 31) {
434 current_syscall_error.type = seL4_RangeError;
435 current_syscall_error.rangeErrorMin = 0;
436 current_syscall_error.rangeErrorMax = 31;
437 current_syscall_error.invalidArgumentNumber = 2;
438 current_syscall_error.type = seL4_RangeError;
439 return EXCEPTION_SYSCALL_ERROR;
440 }
441 if (group > 1) {
442 current_syscall_error.type = seL4_RangeError;
443 current_syscall_error.rangeErrorMin = 0;
444 current_syscall_error.rangeErrorMax = 1;
445 current_syscall_error.invalidArgumentNumber = 3;
446 current_syscall_error.type = seL4_RangeError;
447 return EXCEPTION_SYSCALL_ERROR;
448 }
449 /* LR index out of range */
450 if (index >= gic_vcpu_num_list_regs) {
451 current_syscall_error.type = seL4_RangeError;
452 current_syscall_error.rangeErrorMin = 0;
453 current_syscall_error.rangeErrorMax = gic_vcpu_num_list_regs - 1;
454 current_syscall_error.invalidArgumentNumber = 4;
455 current_syscall_error.type = seL4_RangeError;
456 return EXCEPTION_SYSCALL_ERROR;
457 }
458 /* LR index is in use */
459 if (virq_get_virqType(vcpu->vgic.lr[index]) == virq_virq_active) {
460 userError("VGIC List register in use.");
461 current_syscall_error.type = seL4_DeleteFirst;
462 return EXCEPTION_SYSCALL_ERROR;
463 }
464 virq_t virq = virq_virq_pending_new(group, priority, 1, vid);
465
466 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
467 return invokeVCPUInjectIRQ(vcpu, index, virq);
468 }
469
decodeARMVCPUInvocation(word_t label,unsigned int length,cptr_t cptr,cte_t * slot,cap_t cap,bool_t call,word_t * buffer)470 exception_t decodeARMVCPUInvocation(
471 word_t label,
472 unsigned int length,
473 cptr_t cptr,
474 cte_t *slot,
475 cap_t cap,
476 bool_t call,
477 word_t *buffer
478 )
479 {
480 switch (label) {
481 case ARMVCPUSetTCB:
482 return decodeVCPUSetTCB(cap);
483 case ARMVCPUReadReg:
484 return decodeVCPUReadReg(cap, length, call, buffer);
485 case ARMVCPUWriteReg:
486 return decodeVCPUWriteReg(cap, length, buffer);
487 case ARMVCPUInjectIRQ:
488 return decodeVCPUInjectIRQ(cap, length, buffer);
489 case ARMVCPUAckVPPI:
490 return decodeVCPUAckVPPI(cap, length, buffer);
491 default:
492 userError("VCPU: Illegal operation.");
493 current_syscall_error.type = seL4_IllegalOperation;
494 return EXCEPTION_SYSCALL_ERROR;
495 }
496 }
497
decodeVCPUAckVPPI(cap_t cap,unsigned int length,word_t * buffer)498 exception_t decodeVCPUAckVPPI(cap_t cap, unsigned int length, word_t *buffer)
499 {
500 vcpu_t *vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
501
502 if (length < 1) {
503 current_syscall_error.type = seL4_TruncatedMessage;
504 return EXCEPTION_SYSCALL_ERROR;
505 }
506
507 word_t irq_w = getSyscallArg(0, buffer);
508 irq_t irq = (irq_t) CORE_IRQ_TO_IRQT(CURRENT_CPU_INDEX(), irq_w);
509 exception_t status = Arch_checkIRQ(irq_w);
510 if (status != EXCEPTION_NONE) {
511 return status;
512 }
513
514 VPPIEventIRQ_t vppi = irqVPPIEventIndex(irq);
515 if (vppi == VPPIEventIRQ_invalid) {
516 userError("VCPUAckVPPI: Invalid irq number.");
517 current_syscall_error.type = seL4_InvalidArgument;
518 current_syscall_error.invalidArgumentNumber = 0;
519 return EXCEPTION_SYSCALL_ERROR;
520 }
521
522 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
523 return invokeVCPUAckVPPI(vcpu, vppi);
524 }
525
invokeVCPUAckVPPI(vcpu_t * vcpu,VPPIEventIRQ_t vppi)526 exception_t invokeVCPUAckVPPI(vcpu_t *vcpu, VPPIEventIRQ_t vppi)
527 {
528 vcpu->vppi_masked[vppi] = false;
529 return EXCEPTION_NONE;
530 }
531
decodeVCPUSetTCB(cap_t cap)532 exception_t decodeVCPUSetTCB(cap_t cap)
533 {
534 cap_t tcbCap;
535 if (current_extra_caps.excaprefs[0] == NULL) {
536 userError("VCPU SetTCB: Truncated message.");
537 current_syscall_error.type = seL4_TruncatedMessage;
538 return EXCEPTION_SYSCALL_ERROR;
539 }
540 tcbCap = current_extra_caps.excaprefs[0]->cap;
541
542 if (cap_get_capType(tcbCap) != cap_thread_cap) {
543 userError("TCB cap is not a TCB cap.");
544 current_syscall_error.type = seL4_IllegalOperation;
545 return EXCEPTION_SYSCALL_ERROR;
546 }
547
548 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
549 return invokeVCPUSetTCB(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), TCB_PTR(cap_thread_cap_get_capTCBPtr(tcbCap)));
550 }
551
invokeVCPUSetTCB(vcpu_t * vcpu,tcb_t * tcb)552 exception_t invokeVCPUSetTCB(vcpu_t *vcpu, tcb_t *tcb)
553 {
554 associateVCPUTCB(vcpu, tcb);
555
556 return EXCEPTION_NONE;
557 }
558
559
handleVCPUFault(word_t hsr)560 void handleVCPUFault(word_t hsr)
561 {
562 MCS_DO_IF_BUDGET({
563 if (armv_handleVCPUFault(hsr))
564 {
565 return;
566 }
567 current_fault = seL4_Fault_VCPUFault_new(hsr);
568 handleFault(NODE_STATE(ksCurThread));
569 })
570 schedule();
571 activateThread();
572 }
573
574 #ifdef ENABLE_SMP_SUPPORT
handleVCPUInjectInterruptIPI(vcpu_t * vcpu,unsigned long index,virq_t virq)575 void handleVCPUInjectInterruptIPI(vcpu_t *vcpu, unsigned long index, virq_t virq)
576 {
577 if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
578 set_gic_vcpu_ctrl_lr(index, virq);
579 } else {
580 vcpu->vgic.lr[index] = virq;
581 }
582 }
583 #endif /* ENABLE_SMP_SUPPORT */
584
585 #endif
586