1 #include <xen/event.h>
2 #include <xen/mem_access.h>
3 #include <xen/multicall.h>
4 #include <compat/memory.h>
5 #include <compat/xen.h>
6 #include <asm/mem_paging.h>
7 #include <asm/mem_sharing.h>
8
9 #include <asm/pv/mm.h>
10
compat_arch_memory_op(unsigned long cmd,XEN_GUEST_HANDLE_PARAM (void)arg)11 int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
12 {
13 struct compat_machphys_mfn_list xmml;
14 l2_pgentry_t l2e;
15 unsigned long v;
16 compat_pfn_t mfn;
17 unsigned int i;
18 int rc = 0;
19
20 switch ( cmd )
21 {
22 case XENMEM_set_memory_map:
23 {
24 struct compat_foreign_memory_map cmp;
25 struct xen_foreign_memory_map *nat = COMPAT_ARG_XLAT_VIRT_BASE;
26
27 if ( copy_from_guest(&cmp, arg, 1) )
28 return -EFAULT;
29
30 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \
31 guest_from_compat_handle((_d_)->buffer, (_s_)->buffer)
32 XLAT_foreign_memory_map(nat, &cmp);
33 #undef XLAT_memory_map_HNDL_buffer
34
35 rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void));
36
37 break;
38 }
39
40 case XENMEM_memory_map:
41 case XENMEM_machine_memory_map:
42 {
43 struct compat_memory_map cmp;
44 struct xen_memory_map *nat = COMPAT_ARG_XLAT_VIRT_BASE;
45
46 if ( copy_from_guest(&cmp, arg, 1) )
47 return -EFAULT;
48
49 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \
50 guest_from_compat_handle((_d_)->buffer, (_s_)->buffer)
51 XLAT_memory_map(nat, &cmp);
52 #undef XLAT_memory_map_HNDL_buffer
53
54 rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void));
55 if ( rc < 0 )
56 break;
57
58 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) ((void)0)
59 XLAT_memory_map(&cmp, nat);
60 #undef XLAT_memory_map_HNDL_buffer
61 if ( __copy_to_guest(arg, &cmp, 1) )
62 rc = -EFAULT;
63
64 break;
65 }
66
67 case XENMEM_set_pod_target:
68 case XENMEM_get_pod_target:
69 {
70 struct compat_pod_target cmp;
71 struct xen_pod_target *nat = COMPAT_ARG_XLAT_VIRT_BASE;
72
73 if ( copy_from_guest(&cmp, arg, 1) )
74 return -EFAULT;
75
76 XLAT_pod_target(nat, &cmp);
77
78 rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void));
79 if ( rc < 0 )
80 break;
81
82 if ( rc == __HYPERVISOR_memory_op )
83 hypercall_xlat_continuation(NULL, 2, 0x2, nat, arg);
84
85 XLAT_pod_target(&cmp, nat);
86
87 if ( __copy_to_guest(arg, &cmp, 1) )
88 {
89 if ( rc == __HYPERVISOR_memory_op )
90 hypercall_cancel_continuation(current);
91 rc = -EFAULT;
92 }
93
94 break;
95 }
96
97 case XENMEM_machphys_mapping:
98 {
99 struct domain *d = current->domain;
100 struct compat_machphys_mapping mapping = {
101 .v_start = MACH2PHYS_COMPAT_VIRT_START(d),
102 .v_end = MACH2PHYS_COMPAT_VIRT_END,
103 .max_mfn = MACH2PHYS_COMPAT_NR_ENTRIES(d) - 1
104 };
105
106 if ( copy_to_guest(arg, &mapping, 1) )
107 rc = -EFAULT;
108
109 break;
110 }
111
112 case XENMEM_machphys_mfn_list:
113 case XENMEM_machphys_compat_mfn_list:
114 {
115 unsigned long limit;
116 compat_pfn_t last_mfn;
117
118 if ( copy_from_guest(&xmml, arg, 1) )
119 return -EFAULT;
120
121 limit = (unsigned long)(compat_machine_to_phys_mapping + max_page);
122 if ( limit > RDWR_COMPAT_MPT_VIRT_END )
123 limit = RDWR_COMPAT_MPT_VIRT_END;
124 for ( i = 0, v = RDWR_COMPAT_MPT_VIRT_START, last_mfn = 0;
125 (i != xmml.max_extents) && (v < limit);
126 i++, v += 1 << L2_PAGETABLE_SHIFT )
127 {
128 l2e = compat_idle_pg_table_l2[l2_table_offset(v)];
129 if ( l2e_get_flags(l2e) & _PAGE_PRESENT )
130 mfn = l2e_get_pfn(l2e);
131 else
132 mfn = last_mfn;
133 ASSERT(mfn);
134 if ( copy_to_compat_offset(xmml.extent_start, i, &mfn, 1) )
135 return -EFAULT;
136 last_mfn = mfn;
137 }
138
139 xmml.nr_extents = i;
140 if ( __copy_to_guest(arg, &xmml, 1) )
141 rc = -EFAULT;
142
143 break;
144 }
145
146 case XENMEM_get_sharing_freed_pages:
147 return mem_sharing_get_nr_saved_mfns();
148
149 case XENMEM_get_sharing_shared_pages:
150 return mem_sharing_get_nr_shared_mfns();
151
152 case XENMEM_paging_op:
153 return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
154
155 case XENMEM_sharing_op:
156 return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
157
158 default:
159 rc = -ENOSYS;
160 break;
161 }
162
163 return rc;
164 }
165
compat_update_va_mapping(unsigned int va,u32 lo,u32 hi,unsigned int flags)166 int compat_update_va_mapping(unsigned int va, u32 lo, u32 hi,
167 unsigned int flags)
168 {
169 return do_update_va_mapping(va, lo | ((u64)hi << 32), flags);
170 }
171
compat_update_va_mapping_otherdomain(unsigned long va,u32 lo,u32 hi,unsigned long flags,domid_t domid)172 int compat_update_va_mapping_otherdomain(unsigned long va, u32 lo, u32 hi,
173 unsigned long flags,
174 domid_t domid)
175 {
176 return do_update_va_mapping_otherdomain(va, lo | ((u64)hi << 32), flags, domid);
177 }
178
179 DEFINE_XEN_GUEST_HANDLE(mmuext_op_compat_t);
180
compat_mmuext_op(XEN_GUEST_HANDLE_PARAM (void)arg,unsigned int count,XEN_GUEST_HANDLE_PARAM (uint)pdone,unsigned int foreigndom)181 int compat_mmuext_op(XEN_GUEST_HANDLE_PARAM(void) arg,
182 unsigned int count,
183 XEN_GUEST_HANDLE_PARAM(uint) pdone,
184 unsigned int foreigndom)
185 {
186 unsigned int i, preempt_mask;
187 int rc = 0;
188 XEN_GUEST_HANDLE_PARAM(mmuext_op_compat_t) cmp_uops =
189 guest_handle_cast(arg, mmuext_op_compat_t);
190 XEN_GUEST_HANDLE_PARAM(mmuext_op_t) nat_ops;
191
192 if ( unlikely(count == MMU_UPDATE_PREEMPTED) &&
193 likely(guest_handle_is_null(cmp_uops)) )
194 {
195 set_xen_guest_handle(nat_ops, NULL);
196 return do_mmuext_op(nat_ops, count, pdone, foreigndom);
197 }
198
199 preempt_mask = count & MMU_UPDATE_PREEMPTED;
200 count ^= preempt_mask;
201
202 if ( unlikely(!guest_handle_okay(cmp_uops, count)) )
203 return -EFAULT;
204
205 set_xen_guest_handle(nat_ops, COMPAT_ARG_XLAT_VIRT_BASE);
206
207 for ( ; count; count -= i )
208 {
209 mmuext_op_t *nat_op = nat_ops.p;
210 unsigned int limit = COMPAT_ARG_XLAT_SIZE / sizeof(*nat_op);
211 int err;
212
213 for ( i = 0; i < min(limit, count); ++i )
214 {
215 mmuext_op_compat_t cmp_op;
216 enum XLAT_mmuext_op_arg1 arg1;
217 enum XLAT_mmuext_op_arg2 arg2;
218
219 if ( unlikely(__copy_from_guest(&cmp_op, cmp_uops, 1) != 0) )
220 {
221 rc = -EFAULT;
222 break;
223 }
224
225 switch ( cmp_op.cmd )
226 {
227 case MMUEXT_PIN_L1_TABLE:
228 case MMUEXT_PIN_L2_TABLE:
229 case MMUEXT_PIN_L3_TABLE:
230 case MMUEXT_PIN_L4_TABLE:
231 case MMUEXT_UNPIN_TABLE:
232 case MMUEXT_NEW_BASEPTR:
233 case MMUEXT_CLEAR_PAGE:
234 case MMUEXT_COPY_PAGE:
235 arg1 = XLAT_mmuext_op_arg1_mfn;
236 break;
237 default:
238 arg1 = XLAT_mmuext_op_arg1_linear_addr;
239 break;
240 case MMUEXT_NEW_USER_BASEPTR:
241 rc = -EINVAL;
242 /* fallthrough */
243 case MMUEXT_TLB_FLUSH_LOCAL:
244 case MMUEXT_TLB_FLUSH_MULTI:
245 case MMUEXT_TLB_FLUSH_ALL:
246 case MMUEXT_FLUSH_CACHE:
247 arg1 = -1;
248 break;
249 }
250
251 if ( rc )
252 break;
253
254 switch ( cmp_op.cmd )
255 {
256 case MMUEXT_SET_LDT:
257 arg2 = XLAT_mmuext_op_arg2_nr_ents;
258 break;
259 case MMUEXT_TLB_FLUSH_MULTI:
260 case MMUEXT_INVLPG_MULTI:
261 arg2 = XLAT_mmuext_op_arg2_vcpumask;
262 break;
263 case MMUEXT_COPY_PAGE:
264 arg2 = XLAT_mmuext_op_arg2_src_mfn;
265 break;
266 default:
267 arg2 = -1;
268 break;
269 }
270
271 #define XLAT_mmuext_op_HNDL_arg2_vcpumask(_d_, _s_) \
272 guest_from_compat_handle((_d_)->arg2.vcpumask, (_s_)->arg2.vcpumask)
273 XLAT_mmuext_op(nat_op, &cmp_op);
274 #undef XLAT_mmuext_op_HNDL_arg2_vcpumask
275
276 if ( rc || i >= limit )
277 break;
278
279 guest_handle_add_offset(cmp_uops, 1);
280 ++nat_op;
281 }
282
283 err = do_mmuext_op(nat_ops, i | preempt_mask, pdone, foreigndom);
284
285 if ( err )
286 {
287 BUILD_BUG_ON(__HYPERVISOR_mmuext_op <= 0);
288 if ( err == __HYPERVISOR_mmuext_op )
289 {
290 struct cpu_user_regs *regs = guest_cpu_user_regs();
291 struct mc_state *mcs = ¤t->mc_state;
292 unsigned int arg1 = !(mcs->flags & MCSF_in_multicall)
293 ? regs->ecx
294 : mcs->call.args[1];
295 unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
296
297 BUG_ON(left == arg1 && left != i);
298 BUG_ON(left > count);
299 guest_handle_add_offset(nat_ops, i - left);
300 guest_handle_subtract_offset(cmp_uops, left);
301 left = 1;
302 if ( arg1 != MMU_UPDATE_PREEMPTED )
303 {
304 BUG_ON(!hypercall_xlat_continuation(&left, 4, 0x01, nat_ops,
305 cmp_uops));
306 if ( !(mcs->flags & MCSF_in_multicall) )
307 regs->ecx += count - i;
308 else
309 mcs->compat_call.args[1] += count - i;
310 }
311 else
312 BUG_ON(hypercall_xlat_continuation(&left, 4, 0));
313 BUG_ON(left != arg1);
314 }
315 else
316 BUG_ON(err > 0);
317 rc = err;
318 }
319
320 if ( rc )
321 break;
322
323 /* Force do_mmuext_op() to not start counting from zero again. */
324 preempt_mask = MMU_UPDATE_PREEMPTED;
325 }
326
327 return rc;
328 }
329
330 /*
331 * Local variables:
332 * mode: C
333 * c-file-style: "BSD"
334 * c-basic-offset: 4
335 * tab-width: 4
336 * indent-tabs-mode: nil
337 * End:
338 */
339