1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12 #include <xen/bitops.h>
13 #include <xen/errno.h>
14 #include <xen/grant_table.h>
15 #include <xen/hypercall.h>
16 #include <xen/init.h>
17 #include <xen/lib.h>
18 #include <xen/livepatch.h>
19 #include <xen/sched.h>
20 #include <xen/softirq.h>
21 #include <xen/wait.h>
22
23 #include <asm/alternative.h>
24 #include <asm/cpufeature.h>
25 #include <asm/current.h>
26 #include <asm/event.h>
27 #include <asm/gic.h>
28 #include <asm/guest_access.h>
29 #include <asm/irq.h>
30 #include <asm/p2m.h>
31 #include <asm/platform.h>
32 #include <asm/procinfo.h>
33 #include <asm/regs.h>
34 #include <asm/vfp.h>
35 #include <asm/vgic.h>
36 #include <asm/vtimer.h>
37
38 #include "vuart.h"
39
40 DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
41
do_idle(void)42 static void do_idle(void)
43 {
44 unsigned int cpu = smp_processor_id();
45
46 sched_tick_suspend();
47 /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
48 process_pending_softirqs();
49
50 local_irq_disable();
51 if ( cpu_is_haltable(cpu) )
52 {
53 dsb(sy);
54 wfi();
55 }
56 local_irq_enable();
57
58 sched_tick_resume();
59 }
60
idle_loop(void)61 void idle_loop(void)
62 {
63 unsigned int cpu = smp_processor_id();
64
65 for ( ; ; )
66 {
67 if ( cpu_is_offline(cpu) )
68 stop_cpu();
69
70 /* Are we here for running vcpu context tasklets, or for idling? */
71 if ( unlikely(tasklet_work_to_do(cpu)) )
72 do_tasklet();
73 /*
74 * Test softirqs twice --- first to see if should even try scrubbing
75 * and then, after it is done, whether softirqs became pending
76 * while we were scrubbing.
77 */
78 else if ( !softirq_pending(cpu) && !scrub_free_pages() &&
79 !softirq_pending(cpu) )
80 do_idle();
81
82 do_softirq();
83 /*
84 * We MUST be last (or before dsb, wfi). Otherwise after we get the
85 * softirq we would execute dsb,wfi (and sleep) and not patch.
86 */
87 check_for_livepatch_work();
88 }
89 }
90
ctxt_switch_from(struct vcpu * p)91 static void ctxt_switch_from(struct vcpu *p)
92 {
93 /* When the idle VCPU is running, Xen will always stay in hypervisor
94 * mode. Therefore we don't need to save the context of an idle VCPU.
95 */
96 if ( is_idle_vcpu(p) )
97 return;
98
99 p2m_save_state(p);
100
101 /* CP 15 */
102 p->arch.csselr = READ_SYSREG(CSSELR_EL1);
103
104 /* Control Registers */
105 p->arch.cpacr = READ_SYSREG(CPACR_EL1);
106
107 p->arch.contextidr = READ_SYSREG(CONTEXTIDR_EL1);
108 p->arch.tpidr_el0 = READ_SYSREG(TPIDR_EL0);
109 p->arch.tpidrro_el0 = READ_SYSREG(TPIDRRO_EL0);
110 p->arch.tpidr_el1 = READ_SYSREG(TPIDR_EL1);
111
112 /* Arch timer */
113 p->arch.cntkctl = READ_SYSREG32(CNTKCTL_EL1);
114 virt_timer_save(p);
115
116 if ( is_32bit_domain(p->domain) && cpu_has_thumbee )
117 {
118 p->arch.teecr = READ_SYSREG32(TEECR32_EL1);
119 p->arch.teehbr = READ_SYSREG32(TEEHBR32_EL1);
120 }
121
122 #ifdef CONFIG_ARM_32
123 p->arch.joscr = READ_CP32(JOSCR);
124 p->arch.jmcr = READ_CP32(JMCR);
125 #endif
126
127 isb();
128
129 /* MMU */
130 p->arch.vbar = READ_SYSREG(VBAR_EL1);
131 p->arch.ttbcr = READ_SYSREG(TCR_EL1);
132 p->arch.ttbr0 = READ_SYSREG64(TTBR0_EL1);
133 p->arch.ttbr1 = READ_SYSREG64(TTBR1_EL1);
134 if ( is_32bit_domain(p->domain) )
135 p->arch.dacr = READ_SYSREG(DACR32_EL2);
136 p->arch.par = READ_SYSREG64(PAR_EL1);
137 #if defined(CONFIG_ARM_32)
138 p->arch.mair0 = READ_CP32(MAIR0);
139 p->arch.mair1 = READ_CP32(MAIR1);
140 p->arch.amair0 = READ_CP32(AMAIR0);
141 p->arch.amair1 = READ_CP32(AMAIR1);
142 #else
143 p->arch.mair = READ_SYSREG64(MAIR_EL1);
144 p->arch.amair = READ_SYSREG64(AMAIR_EL1);
145 #endif
146
147 /* Fault Status */
148 #if defined(CONFIG_ARM_32)
149 p->arch.dfar = READ_CP32(DFAR);
150 p->arch.ifar = READ_CP32(IFAR);
151 p->arch.dfsr = READ_CP32(DFSR);
152 #elif defined(CONFIG_ARM_64)
153 p->arch.far = READ_SYSREG64(FAR_EL1);
154 p->arch.esr = READ_SYSREG64(ESR_EL1);
155 #endif
156
157 if ( is_32bit_domain(p->domain) )
158 p->arch.ifsr = READ_SYSREG(IFSR32_EL2);
159 p->arch.afsr0 = READ_SYSREG(AFSR0_EL1);
160 p->arch.afsr1 = READ_SYSREG(AFSR1_EL1);
161
162 /* XXX MPU */
163
164 /* VFP */
165 vfp_save_state(p);
166
167 /* VGIC */
168 gic_save_state(p);
169
170 isb();
171 }
172
ctxt_switch_to(struct vcpu * n)173 static void ctxt_switch_to(struct vcpu *n)
174 {
175 /* When the idle VCPU is running, Xen will always stay in hypervisor
176 * mode. Therefore we don't need to restore the context of an idle VCPU.
177 */
178 if ( is_idle_vcpu(n) )
179 return;
180
181 p2m_restore_state(n);
182
183 WRITE_SYSREG32(n->domain->arch.vpidr, VPIDR_EL2);
184 WRITE_SYSREG(n->arch.vmpidr, VMPIDR_EL2);
185
186 /* VGIC */
187 gic_restore_state(n);
188
189 /* VFP */
190 vfp_restore_state(n);
191
192 /* XXX MPU */
193
194 /* Fault Status */
195 #if defined(CONFIG_ARM_32)
196 WRITE_CP32(n->arch.dfar, DFAR);
197 WRITE_CP32(n->arch.ifar, IFAR);
198 WRITE_CP32(n->arch.dfsr, DFSR);
199 #elif defined(CONFIG_ARM_64)
200 WRITE_SYSREG64(n->arch.far, FAR_EL1);
201 WRITE_SYSREG64(n->arch.esr, ESR_EL1);
202 #endif
203
204 if ( is_32bit_domain(n->domain) )
205 WRITE_SYSREG(n->arch.ifsr, IFSR32_EL2);
206 WRITE_SYSREG(n->arch.afsr0, AFSR0_EL1);
207 WRITE_SYSREG(n->arch.afsr1, AFSR1_EL1);
208
209 /* MMU */
210 WRITE_SYSREG(n->arch.vbar, VBAR_EL1);
211 WRITE_SYSREG(n->arch.ttbcr, TCR_EL1);
212 WRITE_SYSREG64(n->arch.ttbr0, TTBR0_EL1);
213 WRITE_SYSREG64(n->arch.ttbr1, TTBR1_EL1);
214
215 /*
216 * Erratum #852523: DACR32_EL2 must be restored before one of the
217 * following sysregs: SCTLR_EL1, TCR_EL1, TTBR0_EL1, TTBR1_EL1 or
218 * CONTEXTIDR_EL1.
219 */
220 if ( is_32bit_domain(n->domain) )
221 WRITE_SYSREG(n->arch.dacr, DACR32_EL2);
222 WRITE_SYSREG64(n->arch.par, PAR_EL1);
223 #if defined(CONFIG_ARM_32)
224 WRITE_CP32(n->arch.mair0, MAIR0);
225 WRITE_CP32(n->arch.mair1, MAIR1);
226 WRITE_CP32(n->arch.amair0, AMAIR0);
227 WRITE_CP32(n->arch.amair1, AMAIR1);
228 #elif defined(CONFIG_ARM_64)
229 WRITE_SYSREG64(n->arch.mair, MAIR_EL1);
230 WRITE_SYSREG64(n->arch.amair, AMAIR_EL1);
231 #endif
232 isb();
233
234 /* Control Registers */
235 WRITE_SYSREG(n->arch.cpacr, CPACR_EL1);
236
237 /*
238 * This write to sysreg CONTEXTIDR_EL1 ensures we don't hit erratum
239 * #852523. I.e DACR32_EL2 is not correctly synchronized.
240 */
241 WRITE_SYSREG(n->arch.contextidr, CONTEXTIDR_EL1);
242 WRITE_SYSREG(n->arch.tpidr_el0, TPIDR_EL0);
243 WRITE_SYSREG(n->arch.tpidrro_el0, TPIDRRO_EL0);
244 WRITE_SYSREG(n->arch.tpidr_el1, TPIDR_EL1);
245
246 if ( is_32bit_domain(n->domain) && cpu_has_thumbee )
247 {
248 WRITE_SYSREG32(n->arch.teecr, TEECR32_EL1);
249 WRITE_SYSREG32(n->arch.teehbr, TEEHBR32_EL1);
250 }
251
252 #ifdef CONFIG_ARM_32
253 WRITE_CP32(n->arch.joscr, JOSCR);
254 WRITE_CP32(n->arch.jmcr, JMCR);
255 #endif
256 isb();
257
258 /* CP 15 */
259 WRITE_SYSREG(n->arch.csselr, CSSELR_EL1);
260
261 isb();
262
263 /* This is could trigger an hardware interrupt from the virtual
264 * timer. The interrupt needs to be injected into the guest. */
265 WRITE_SYSREG32(n->arch.cntkctl, CNTKCTL_EL1);
266 virt_timer_restore(n);
267 }
268
269 /* Update per-VCPU guest runstate shared memory area (if registered). */
update_runstate_area(struct vcpu * v)270 static void update_runstate_area(struct vcpu *v)
271 {
272 void __user *guest_handle = NULL;
273
274 if ( guest_handle_is_null(runstate_guest(v)) )
275 return;
276
277 if ( VM_ASSIST(v->domain, runstate_update_flag) )
278 {
279 guest_handle = &v->runstate_guest.p->state_entry_time + 1;
280 guest_handle--;
281 v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
282 __raw_copy_to_guest(guest_handle,
283 (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
284 smp_wmb();
285 }
286
287 __copy_to_guest(runstate_guest(v), &v->runstate, 1);
288
289 if ( guest_handle )
290 {
291 v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
292 smp_wmb();
293 __raw_copy_to_guest(guest_handle,
294 (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
295 }
296 }
297
schedule_tail(struct vcpu * prev)298 static void schedule_tail(struct vcpu *prev)
299 {
300 ctxt_switch_from(prev);
301
302 ctxt_switch_to(current);
303
304 local_irq_enable();
305
306 context_saved(prev);
307
308 if ( prev != current )
309 update_runstate_area(current);
310
311 /* Ensure that the vcpu has an up-to-date time base. */
312 update_vcpu_system_time(current);
313 }
314
continue_new_vcpu(struct vcpu * prev)315 static void continue_new_vcpu(struct vcpu *prev)
316 {
317 schedule_tail(prev);
318
319 if ( is_idle_vcpu(current) )
320 reset_stack_and_jump(idle_loop);
321 else if ( is_32bit_domain(current->domain) )
322 /* check_wakeup_from_wait(); */
323 reset_stack_and_jump(return_to_new_vcpu32);
324 else
325 /* check_wakeup_from_wait(); */
326 reset_stack_and_jump(return_to_new_vcpu64);
327 }
328
context_switch(struct vcpu * prev,struct vcpu * next)329 void context_switch(struct vcpu *prev, struct vcpu *next)
330 {
331 ASSERT(local_irq_is_enabled());
332 ASSERT(prev != next);
333 ASSERT(cpumask_empty(next->vcpu_dirty_cpumask));
334
335 if ( prev != next )
336 update_runstate_area(prev);
337
338 local_irq_disable();
339
340 /*
341 * If the serrors_op is "FORWARD", we have to prevent forwarding
342 * SError to wrong vCPU. So before context switch, we have to use
343 * the SYNCRONIZE_SERROR to guarantee that the pending SError would
344 * be caught by current vCPU.
345 *
346 * The SKIP_CTXT_SWITCH_SERROR_SYNC will be set to cpu_hwcaps when the
347 * serrors_op is NOT "FORWARD".
348 */
349 SYNCHRONIZE_SERROR(SKIP_CTXT_SWITCH_SERROR_SYNC);
350
351 set_current(next);
352
353 prev = __context_switch(prev, next);
354
355 schedule_tail(prev);
356 }
357
continue_running(struct vcpu * same)358 void continue_running(struct vcpu *same)
359 {
360 /* Nothing to do */
361 }
362
sync_local_execstate(void)363 void sync_local_execstate(void)
364 {
365 /* Nothing to do -- no lazy switching */
366 }
367
sync_vcpu_execstate(struct vcpu * v)368 void sync_vcpu_execstate(struct vcpu *v)
369 {
370 /* Nothing to do -- no lazy switching */
371 }
372
373 #define next_arg(fmt, args) ({ \
374 unsigned long __arg; \
375 switch ( *(fmt)++ ) \
376 { \
377 case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \
378 case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \
379 case 'h': __arg = (unsigned long)va_arg(args, void *); break; \
380 default: __arg = 0; BUG(); \
381 } \
382 __arg; \
383 })
384
hypercall_create_continuation(unsigned int op,const char * format,...)385 unsigned long hypercall_create_continuation(
386 unsigned int op, const char *format, ...)
387 {
388 struct mc_state *mcs = ¤t->mc_state;
389 struct cpu_user_regs *regs;
390 const char *p = format;
391 unsigned long arg, rc;
392 unsigned int i;
393 va_list args;
394
395 /* All hypercalls take at least one argument */
396 BUG_ON( !p || *p == '\0' );
397
398 current->hcall_preempted = true;
399
400 va_start(args, format);
401
402 if ( mcs->flags & MCSF_in_multicall )
403 {
404 for ( i = 0; *p != '\0'; i++ )
405 mcs->call.args[i] = next_arg(p, args);
406
407 /* Return value gets written back to mcs->call.result */
408 rc = mcs->call.result;
409 }
410 else
411 {
412 regs = guest_cpu_user_regs();
413
414 #ifdef CONFIG_ARM_64
415 if ( !is_32bit_domain(current->domain) )
416 {
417 regs->x16 = op;
418
419 for ( i = 0; *p != '\0'; i++ )
420 {
421 arg = next_arg(p, args);
422
423 switch ( i )
424 {
425 case 0: regs->x0 = arg; break;
426 case 1: regs->x1 = arg; break;
427 case 2: regs->x2 = arg; break;
428 case 3: regs->x3 = arg; break;
429 case 4: regs->x4 = arg; break;
430 case 5: regs->x5 = arg; break;
431 }
432 }
433
434 /* Return value gets written back to x0 */
435 rc = regs->x0;
436 }
437 else
438 #endif
439 {
440 regs->r12 = op;
441
442 for ( i = 0; *p != '\0'; i++ )
443 {
444 arg = next_arg(p, args);
445
446 switch ( i )
447 {
448 case 0: regs->r0 = arg; break;
449 case 1: regs->r1 = arg; break;
450 case 2: regs->r2 = arg; break;
451 case 3: regs->r3 = arg; break;
452 case 4: regs->r4 = arg; break;
453 case 5: regs->r5 = arg; break;
454 }
455 }
456
457 /* Return value gets written back to r0 */
458 rc = regs->r0;
459 }
460 }
461
462 va_end(args);
463
464 return rc;
465 }
466
startup_cpu_idle_loop(void)467 void startup_cpu_idle_loop(void)
468 {
469 struct vcpu *v = current;
470
471 ASSERT(is_idle_vcpu(v));
472 /* TODO
473 cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
474 cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask);
475 */
476
477 reset_stack_and_jump(idle_loop);
478 }
479
alloc_domain_struct(void)480 struct domain *alloc_domain_struct(void)
481 {
482 struct domain *d;
483 BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE);
484 d = alloc_xenheap_pages(0, 0);
485 if ( d == NULL )
486 return NULL;
487
488 clear_page(d);
489 return d;
490 }
491
free_domain_struct(struct domain * d)492 void free_domain_struct(struct domain *d)
493 {
494 free_xenheap_page(d);
495 }
496
dump_pageframe_info(struct domain * d)497 void dump_pageframe_info(struct domain *d)
498 {
499
500 }
501
alloc_vcpu_struct(void)502 struct vcpu *alloc_vcpu_struct(void)
503 {
504 struct vcpu *v;
505 BUILD_BUG_ON(sizeof(*v) > PAGE_SIZE);
506 v = alloc_xenheap_pages(0, 0);
507 if ( v != NULL )
508 clear_page(v);
509 return v;
510 }
511
free_vcpu_struct(struct vcpu * v)512 void free_vcpu_struct(struct vcpu *v)
513 {
514 free_xenheap_page(v);
515 }
516
vcpu_initialise(struct vcpu * v)517 int vcpu_initialise(struct vcpu *v)
518 {
519 int rc = 0;
520
521 BUILD_BUG_ON( sizeof(struct cpu_info) > STACK_SIZE );
522
523 v->arch.stack = alloc_xenheap_pages(STACK_ORDER, MEMF_node(vcpu_to_node(v)));
524 if ( v->arch.stack == NULL )
525 return -ENOMEM;
526
527 v->arch.cpu_info = (struct cpu_info *)(v->arch.stack
528 + STACK_SIZE
529 - sizeof(struct cpu_info));
530
531 memset(&v->arch.saved_context, 0, sizeof(v->arch.saved_context));
532 v->arch.saved_context.sp = (register_t)v->arch.cpu_info;
533 v->arch.saved_context.pc = (register_t)continue_new_vcpu;
534
535 /* Idle VCPUs don't need the rest of this setup */
536 if ( is_idle_vcpu(v) )
537 return rc;
538
539 v->arch.sctlr = SCTLR_GUEST_INIT;
540
541 v->arch.vmpidr = MPIDR_SMP | vcpuid_to_vaffinity(v->vcpu_id);
542
543 v->arch.actlr = READ_SYSREG32(ACTLR_EL1);
544
545 v->arch.hcr_el2 = get_default_hcr_flags();
546
547 processor_vcpu_initialise(v);
548
549 if ( (rc = vcpu_vgic_init(v)) != 0 )
550 goto fail;
551
552 if ( (rc = vcpu_vtimer_init(v)) != 0 )
553 goto fail;
554
555 return rc;
556
557 fail:
558 vcpu_destroy(v);
559 return rc;
560 }
561
vcpu_destroy(struct vcpu * v)562 void vcpu_destroy(struct vcpu *v)
563 {
564 vcpu_timer_destroy(v);
565 vcpu_vgic_free(v);
566 free_xenheap_pages(v->arch.stack, STACK_ORDER);
567 }
568
vcpu_switch_to_aarch64_mode(struct vcpu * v)569 void vcpu_switch_to_aarch64_mode(struct vcpu *v)
570 {
571 v->arch.hcr_el2 |= HCR_RW;
572 }
573
arch_domain_create(struct domain * d,unsigned int domcr_flags,struct xen_arch_domainconfig * config)574 int arch_domain_create(struct domain *d, unsigned int domcr_flags,
575 struct xen_arch_domainconfig *config)
576 {
577 int rc, count = 0;
578
579 BUILD_BUG_ON(GUEST_MAX_VCPUS < MAX_VIRT_CPUS);
580 d->arch.relmem = RELMEM_not_started;
581
582 /* Idle domains do not need this setup */
583 if ( is_idle_domain(d) )
584 return 0;
585
586 ASSERT(config != NULL);
587
588 /* p2m_init relies on some value initialized by the IOMMU subsystem */
589 if ( (rc = iommu_domain_init(d)) != 0 )
590 goto fail;
591
592 if ( (rc = p2m_init(d)) != 0 )
593 goto fail;
594
595 rc = -ENOMEM;
596 if ( (d->shared_info = alloc_xenheap_pages(0, 0)) == NULL )
597 goto fail;
598
599 /* Default the virtual ID to match the physical */
600 d->arch.vpidr = boot_cpu_data.midr.bits;
601
602 clear_page(d->shared_info);
603 share_xen_page_with_guest(
604 virt_to_page(d->shared_info), d, XENSHARE_writable);
605
606 switch ( config->gic_version )
607 {
608 case XEN_DOMCTL_CONFIG_GIC_NATIVE:
609 switch ( gic_hw_version () )
610 {
611 case GIC_V2:
612 config->gic_version = XEN_DOMCTL_CONFIG_GIC_V2;
613 d->arch.vgic.version = GIC_V2;
614 break;
615
616 case GIC_V3:
617 config->gic_version = XEN_DOMCTL_CONFIG_GIC_V3;
618 d->arch.vgic.version = GIC_V3;
619 break;
620
621 default:
622 BUG();
623 }
624 break;
625
626 case XEN_DOMCTL_CONFIG_GIC_V2:
627 d->arch.vgic.version = GIC_V2;
628 break;
629
630 case XEN_DOMCTL_CONFIG_GIC_V3:
631 d->arch.vgic.version = GIC_V3;
632 break;
633
634 default:
635 rc = -EOPNOTSUPP;
636 goto fail;
637 }
638
639 if ( (rc = domain_vgic_register(d, &count)) != 0 )
640 goto fail;
641
642 if ( (rc = domain_io_init(d, count + MAX_IO_HANDLER)) != 0 )
643 goto fail;
644
645 if ( (rc = domain_vgic_init(d, config->nr_spis)) != 0 )
646 goto fail;
647
648 if ( (rc = domain_vtimer_init(d, config)) != 0 )
649 goto fail;
650
651 update_domain_wallclock_time(d);
652
653 /*
654 * The hardware domain will get a PPI later in
655 * arch/arm/domain_build.c depending on the
656 * interrupt map of the hardware.
657 */
658 if ( !is_hardware_domain(d) )
659 {
660 d->arch.evtchn_irq = GUEST_EVTCHN_PPI;
661 /* At this stage vgic_reserve_virq should never fail */
662 if ( !vgic_reserve_virq(d, GUEST_EVTCHN_PPI) )
663 BUG();
664 }
665
666 /*
667 * Virtual UART is only used by linux early printk and decompress code.
668 * Only use it for the hardware domain because the linux kernel may not
669 * support multi-platform.
670 */
671 if ( is_hardware_domain(d) && (rc = domain_vuart_init(d)) )
672 goto fail;
673
674 return 0;
675
676 fail:
677 d->is_dying = DOMDYING_dead;
678 arch_domain_destroy(d);
679
680 return rc;
681 }
682
arch_domain_destroy(struct domain * d)683 void arch_domain_destroy(struct domain *d)
684 {
685 /* IOMMU page table is shared with P2M, always call
686 * iommu_domain_destroy() before p2m_teardown().
687 */
688 iommu_domain_destroy(d);
689 p2m_teardown(d);
690 domain_vgic_free(d);
691 domain_vuart_free(d);
692 free_xenheap_page(d->shared_info);
693 #ifdef CONFIG_ACPI
694 free_xenheap_pages(d->arch.efi_acpi_table,
695 get_order_from_bytes(d->arch.efi_acpi_len));
696 #endif
697 domain_io_free(d);
698 }
699
arch_domain_shutdown(struct domain * d)700 void arch_domain_shutdown(struct domain *d)
701 {
702 }
703
arch_domain_pause(struct domain * d)704 void arch_domain_pause(struct domain *d)
705 {
706 }
707
arch_domain_unpause(struct domain * d)708 void arch_domain_unpause(struct domain *d)
709 {
710 }
711
arch_domain_soft_reset(struct domain * d)712 int arch_domain_soft_reset(struct domain *d)
713 {
714 return -ENOSYS;
715 }
716
is_guest_pv32_psr(uint32_t psr)717 static int is_guest_pv32_psr(uint32_t psr)
718 {
719 switch (psr & PSR_MODE_MASK)
720 {
721 case PSR_MODE_USR:
722 case PSR_MODE_FIQ:
723 case PSR_MODE_IRQ:
724 case PSR_MODE_SVC:
725 case PSR_MODE_ABT:
726 case PSR_MODE_UND:
727 case PSR_MODE_SYS:
728 return 1;
729 case PSR_MODE_MON:
730 case PSR_MODE_HYP:
731 default:
732 return 0;
733 }
734 }
735
736
737 #ifdef CONFIG_ARM_64
is_guest_pv64_psr(uint32_t psr)738 static int is_guest_pv64_psr(uint32_t psr)
739 {
740 if ( psr & PSR_MODE_BIT )
741 return 0;
742
743 switch (psr & PSR_MODE_MASK)
744 {
745 case PSR_MODE_EL1h:
746 case PSR_MODE_EL1t:
747 case PSR_MODE_EL0t:
748 return 1;
749 case PSR_MODE_EL3h:
750 case PSR_MODE_EL3t:
751 case PSR_MODE_EL2h:
752 case PSR_MODE_EL2t:
753 default:
754 return 0;
755 }
756 }
757 #endif
758
759 /*
760 * Initialise VCPU state. The context can be supplied by either the
761 * toolstack (XEN_DOMCTL_setvcpucontext) or the guest
762 * (VCPUOP_initialise) and therefore must be properly validated.
763 */
arch_set_info_guest(struct vcpu * v,vcpu_guest_context_u c)764 int arch_set_info_guest(
765 struct vcpu *v, vcpu_guest_context_u c)
766 {
767 struct vcpu_guest_context *ctxt = c.nat;
768 struct vcpu_guest_core_regs *regs = &c.nat->user_regs;
769
770 if ( is_32bit_domain(v->domain) )
771 {
772 if ( !is_guest_pv32_psr(regs->cpsr) )
773 return -EINVAL;
774
775 if ( regs->spsr_svc && !is_guest_pv32_psr(regs->spsr_svc) )
776 return -EINVAL;
777 if ( regs->spsr_abt && !is_guest_pv32_psr(regs->spsr_abt) )
778 return -EINVAL;
779 if ( regs->spsr_und && !is_guest_pv32_psr(regs->spsr_und) )
780 return -EINVAL;
781 if ( regs->spsr_irq && !is_guest_pv32_psr(regs->spsr_irq) )
782 return -EINVAL;
783 if ( regs->spsr_fiq && !is_guest_pv32_psr(regs->spsr_fiq) )
784 return -EINVAL;
785 }
786 #ifdef CONFIG_ARM_64
787 else
788 {
789 if ( !is_guest_pv64_psr(regs->cpsr) )
790 return -EINVAL;
791
792 if ( regs->spsr_el1 && !is_guest_pv64_psr(regs->spsr_el1) )
793 return -EINVAL;
794 }
795 #endif
796
797 vcpu_regs_user_to_hyp(v, regs);
798
799 v->arch.sctlr = ctxt->sctlr;
800 v->arch.ttbr0 = ctxt->ttbr0;
801 v->arch.ttbr1 = ctxt->ttbr1;
802 v->arch.ttbcr = ctxt->ttbcr;
803
804 v->is_initialised = 1;
805
806 if ( ctxt->flags & VGCF_online )
807 clear_bit(_VPF_down, &v->pause_flags);
808 else
809 set_bit(_VPF_down, &v->pause_flags);
810
811 return 0;
812 }
813
arch_initialise_vcpu(struct vcpu * v,XEN_GUEST_HANDLE_PARAM (void)arg)814 int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
815 {
816 return default_initialise_vcpu(v, arg);
817 }
818
arch_vcpu_reset(struct vcpu * v)819 int arch_vcpu_reset(struct vcpu *v)
820 {
821 vcpu_end_shutdown_deferral(v);
822 return 0;
823 }
824
relinquish_memory(struct domain * d,struct page_list_head * list)825 static int relinquish_memory(struct domain *d, struct page_list_head *list)
826 {
827 struct page_info *page, *tmp;
828 int ret = 0;
829
830 /* Use a recursive lock, as we may enter 'free_domheap_page'. */
831 spin_lock_recursive(&d->page_alloc_lock);
832
833 page_list_for_each_safe( page, tmp, list )
834 {
835 /* Grab a reference to the page so it won't disappear from under us. */
836 if ( unlikely(!get_page(page, d)) )
837 /*
838 * Couldn't get a reference -- someone is freeing this page and
839 * has already committed to doing so, so no more to do here.
840 *
841 * Note that the page must be left on the list, a list_del
842 * here will clash with the list_del done by the other
843 * party in the race and corrupt the list head.
844 */
845 continue;
846
847 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
848 put_page(page);
849
850 put_page(page);
851
852 if ( hypercall_preempt_check() )
853 {
854 ret = -ERESTART;
855 goto out;
856 }
857 }
858
859 out:
860 spin_unlock_recursive(&d->page_alloc_lock);
861 return ret;
862 }
863
domain_relinquish_resources(struct domain * d)864 int domain_relinquish_resources(struct domain *d)
865 {
866 int ret = 0;
867
868 switch ( d->arch.relmem )
869 {
870 case RELMEM_not_started:
871 ret = iommu_release_dt_devices(d);
872 if ( ret )
873 return ret;
874
875 /*
876 * Release the resources allocated for vpl011 which were
877 * allocated via a DOMCTL call XEN_DOMCTL_vuart_op.
878 */
879 domain_vpl011_deinit(d);
880
881 d->arch.relmem = RELMEM_xen;
882 /* Fallthrough */
883
884 case RELMEM_xen:
885 ret = relinquish_memory(d, &d->xenpage_list);
886 if ( ret )
887 return ret;
888
889 d->arch.relmem = RELMEM_page;
890 /* Fallthrough */
891
892 case RELMEM_page:
893 ret = relinquish_memory(d, &d->page_list);
894 if ( ret )
895 return ret;
896
897 d->arch.relmem = RELMEM_mapping;
898 /* Fallthrough */
899
900 case RELMEM_mapping:
901 ret = relinquish_p2m_mapping(d);
902 if ( ret )
903 return ret;
904
905 d->arch.relmem = RELMEM_done;
906 /* Fallthrough */
907
908 case RELMEM_done:
909 break;
910
911 default:
912 BUG();
913 }
914
915 return 0;
916 }
917
arch_dump_domain_info(struct domain * d)918 void arch_dump_domain_info(struct domain *d)
919 {
920 p2m_dump_info(d);
921 }
922
923
do_arm_vcpu_op(int cmd,unsigned int vcpuid,XEN_GUEST_HANDLE_PARAM (void)arg)924 long do_arm_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
925 {
926 switch ( cmd )
927 {
928 case VCPUOP_register_vcpu_info:
929 case VCPUOP_register_runstate_memory_area:
930 return do_vcpu_op(cmd, vcpuid, arg);
931 default:
932 return -EINVAL;
933 }
934 }
935
arch_do_vcpu_op(int cmd,struct vcpu * v,XEN_GUEST_HANDLE_PARAM (void)arg)936 long arch_do_vcpu_op(int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
937 {
938 return -ENOSYS;
939 }
940
arch_dump_vcpu_info(struct vcpu * v)941 void arch_dump_vcpu_info(struct vcpu *v)
942 {
943 gic_dump_info(v);
944 }
945
vcpu_mark_events_pending(struct vcpu * v)946 void vcpu_mark_events_pending(struct vcpu *v)
947 {
948 int already_pending = test_and_set_bit(
949 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
950
951 if ( already_pending )
952 return;
953
954 vgic_vcpu_inject_irq(v, v->domain->arch.evtchn_irq);
955 }
956
957 /* The ARM spec declares that even if local irqs are masked in
958 * the CPSR register, an irq should wake up a cpu from WFI anyway.
959 * For this reason we need to check for irqs that need delivery,
960 * ignoring the CPSR register, *after* calling SCHEDOP_block to
961 * avoid races with vgic_vcpu_inject_irq.
962 */
vcpu_block_unless_event_pending(struct vcpu * v)963 void vcpu_block_unless_event_pending(struct vcpu *v)
964 {
965 vcpu_block();
966 if ( local_events_need_delivery_nomask() )
967 vcpu_unblock(current);
968 }
969
domain_max_vcpus(const struct domain * d)970 unsigned int domain_max_vcpus(const struct domain *d)
971 {
972 /*
973 * Since evtchn_init would call domain_max_vcpus for poll_mask
974 * allocation when the vgic_ops haven't been initialised yet,
975 * we return MAX_VIRT_CPUS if d->arch.vgic.handler is null.
976 */
977 if ( !d->arch.vgic.handler )
978 return MAX_VIRT_CPUS;
979 else
980 return min_t(unsigned int, MAX_VIRT_CPUS,
981 d->arch.vgic.handler->max_vcpus);
982 }
983
984 /*
985 * Local variables:
986 * mode: C
987 * c-file-style: "BSD"
988 * c-basic-offset: 4
989 * indent-tabs-mode: nil
990 * End:
991 */
992