1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /******************************************************************************
3 * Arch-specific domctl.c
4 *
5 * Copyright (c) 2012, Citrix Systems
6 */
7
8 #include <xen/dt-overlay.h>
9 #include <xen/errno.h>
10 #include <xen/guest_access.h>
11 #include <xen/hypercall.h>
12 #include <xen/iocap.h>
13 #include <xen/lib.h>
14 #include <xen/mm.h>
15 #include <xen/sched.h>
16 #include <xen/types.h>
17 #include <xsm/xsm.h>
18 #include <public/domctl.h>
19
arch_get_domain_info(const struct domain * d,struct xen_domctl_getdomaininfo * info)20 void arch_get_domain_info(const struct domain *d,
21 struct xen_domctl_getdomaininfo *info)
22 {
23 /* All ARM domains use hardware assisted paging. */
24 info->flags |= XEN_DOMINF_hap;
25
26 info->gpaddr_bits = p2m_ipa_bits;
27 }
28
handle_vuart_init(struct domain * d,struct xen_domctl_vuart_op * vuart_op)29 static int handle_vuart_init(struct domain *d,
30 struct xen_domctl_vuart_op *vuart_op)
31 {
32 int rc;
33 struct vpl011_init_info info;
34
35 info.console_domid = vuart_op->console_domid;
36 info.gfn = _gfn(vuart_op->gfn);
37
38 if ( d->creation_finished )
39 return -EPERM;
40
41 if ( vuart_op->type != XEN_DOMCTL_VUART_TYPE_VPL011 )
42 return -EOPNOTSUPP;
43
44 rc = domain_vpl011_init(d, &info);
45
46 if ( !rc )
47 vuart_op->evtchn = info.evtchn;
48
49 return rc;
50 }
51
arch_do_domctl(struct xen_domctl * domctl,struct domain * d,XEN_GUEST_HANDLE_PARAM (xen_domctl_t)u_domctl)52 long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
53 XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
54 {
55 switch ( domctl->cmd )
56 {
57 case XEN_DOMCTL_cacheflush:
58 {
59 gfn_t s = _gfn(domctl->u.cacheflush.start_pfn);
60 gfn_t e = gfn_add(s, domctl->u.cacheflush.nr_pfns);
61 int rc;
62
63 if ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) )
64 return -EINVAL;
65
66 if ( gfn_x(e) < gfn_x(s) )
67 return -EINVAL;
68
69 /* XXX: Handle preemption */
70 do
71 rc = p2m_cache_flush_range(d, &s, e);
72 while ( rc == -ERESTART );
73
74 return rc;
75 }
76 case XEN_DOMCTL_bind_pt_irq:
77 {
78 int rc;
79 struct xen_domctl_bind_pt_irq *bind = &domctl->u.bind_pt_irq;
80 uint32_t irq = bind->u.spi.spi;
81 uint32_t virq = bind->machine_irq;
82
83 /* We only support PT_IRQ_TYPE_SPI */
84 if ( bind->irq_type != PT_IRQ_TYPE_SPI )
85 return -EOPNOTSUPP;
86
87 /*
88 * XXX: For now map the interrupt 1:1. Other support will require to
89 * modify domain_pirq_to_irq macro.
90 */
91 if ( irq != virq )
92 return -EINVAL;
93
94 /*
95 * ARM doesn't require separating IRQ assignation into 2
96 * hypercalls (PHYSDEVOP_map_pirq and DOMCTL_bind_pt_irq).
97 *
98 * Call xsm_map_domain_irq in order to keep the same XSM checks
99 * done by the 2 hypercalls for consistency with other
100 * architectures.
101 */
102 rc = xsm_map_domain_irq(XSM_HOOK, d, irq, NULL);
103 if ( rc )
104 return rc;
105
106 rc = xsm_bind_pt_irq(XSM_HOOK, d, bind);
107 if ( rc )
108 return rc;
109
110 if ( !irq_access_permitted(current->domain, irq) )
111 return -EPERM;
112
113 if ( !vgic_reserve_virq(d, virq) )
114 return -EBUSY;
115
116 rc = route_irq_to_guest(d, virq, irq, "routed IRQ");
117 if ( rc )
118 vgic_free_virq(d, virq);
119
120 return rc;
121 }
122 case XEN_DOMCTL_unbind_pt_irq:
123 {
124 int rc;
125 struct xen_domctl_bind_pt_irq *bind = &domctl->u.bind_pt_irq;
126 uint32_t irq = bind->u.spi.spi;
127 uint32_t virq = bind->machine_irq;
128
129 /* We only support PT_IRQ_TYPE_SPI */
130 if ( bind->irq_type != PT_IRQ_TYPE_SPI )
131 return -EOPNOTSUPP;
132
133 /* For now map the interrupt 1:1 */
134 if ( irq != virq )
135 return -EINVAL;
136
137 rc = xsm_unbind_pt_irq(XSM_HOOK, d, bind);
138 if ( rc )
139 return rc;
140
141 if ( !irq_access_permitted(current->domain, irq) )
142 return -EPERM;
143
144 rc = release_guest_irq(d, virq);
145 if ( rc )
146 return rc;
147
148 vgic_free_virq(d, virq);
149
150 return 0;
151 }
152
153 case XEN_DOMCTL_vuart_op:
154 {
155 int rc;
156 unsigned int i;
157 struct xen_domctl_vuart_op *vuart_op = &domctl->u.vuart_op;
158
159 /* check that structure padding must be 0. */
160 for ( i = 0; i < sizeof(vuart_op->pad); i++ )
161 if ( vuart_op->pad[i] )
162 return -EINVAL;
163
164 switch( vuart_op->cmd )
165 {
166 case XEN_DOMCTL_VUART_OP_INIT:
167 rc = handle_vuart_init(d, vuart_op);
168 break;
169
170 default:
171 rc = -EINVAL;
172 break;
173 }
174
175 if ( !rc && copy_to_guest(u_domctl, domctl, 1) )
176 rc = -EFAULT;
177
178 return rc;
179 }
180 case XEN_DOMCTL_dt_overlay:
181 return dt_overlay_domctl(d, &domctl->u.dt_overlay);
182 default:
183 return subarch_do_domctl(domctl, d, u_domctl);
184 }
185 }
186
arch_get_info_guest(struct vcpu * v,vcpu_guest_context_u c)187 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
188 {
189 struct vcpu_guest_context *ctxt = c.nat;
190 struct vcpu_guest_core_regs *regs = &c.nat->user_regs;
191
192 vcpu_regs_hyp_to_user(v, regs);
193
194 ctxt->sctlr = v->arch.sctlr;
195 ctxt->ttbr0 = v->arch.ttbr0;
196 ctxt->ttbr1 = v->arch.ttbr1;
197 ctxt->ttbcr = v->arch.ttbcr;
198
199 if ( !test_bit(_VPF_down, &v->pause_flags) )
200 ctxt->flags |= VGCF_online;
201 }
202
203 /*
204 * Local variables:
205 * mode: C
206 * c-file-style: "BSD"
207 * c-basic-offset: 4
208 * tab-width: 4
209 * indent-tabs-mode: nil
210 * End:
211 */
212