1 asm(".file \"" __FILE__ "\"");
2
3 #include <xen/types.h>
4 #include <xen/hypercall.h>
5 #include <xen/guest_access.h>
6 #include <xen/sched.h>
7 #include <xen/event.h>
8 #include <xen/mem_access.h>
9 #include <asm/current.h>
10 #include <compat/memory.h>
11
12 #define xen_domid_t domid_t
13 #define compat_domid_t domid_compat_t
14 CHECK_TYPE(domid);
15 #undef compat_domid_t
16 #undef xen_domid_t
17
18 CHECK_vmemrange;
19
20 #ifdef CONFIG_HAS_PASSTHROUGH
21 struct get_reserved_device_memory {
22 struct compat_reserved_device_memory_map map;
23 unsigned int used_entries;
24 };
25
get_reserved_device_memory(xen_pfn_t start,xen_ulong_t nr,u32 id,void * ctxt)26 static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
27 u32 id, void *ctxt)
28 {
29 struct get_reserved_device_memory *grdm = ctxt;
30 u32 sbdf = PCI_SBDF3(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus,
31 grdm->map.dev.pci.devfn);
32
33 if ( !(grdm->map.flags & XENMEM_RDM_ALL) && (sbdf != id) )
34 return 0;
35
36 if ( grdm->used_entries < grdm->map.nr_entries )
37 {
38 struct compat_reserved_device_memory rdm = {
39 .start_pfn = start, .nr_pages = nr
40 };
41
42 if ( rdm.start_pfn != start || rdm.nr_pages != nr )
43 return -ERANGE;
44
45 if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
46 &rdm, 1) )
47 return -EFAULT;
48 }
49
50 ++grdm->used_entries;
51
52 return 1;
53 }
54 #endif
55
compat_memory_op(unsigned int cmd,XEN_GUEST_HANDLE_PARAM (void)compat)56 int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat)
57 {
58 int split, op = cmd & MEMOP_CMD_MASK;
59 long rc;
60 unsigned int start_extent = cmd >> MEMOP_EXTENT_SHIFT;
61
62 do
63 {
64 unsigned int i, end_extent = 0;
65 union {
66 XEN_GUEST_HANDLE_PARAM(void) hnd;
67 struct xen_memory_reservation *rsrv;
68 struct xen_memory_exchange *xchg;
69 struct xen_add_to_physmap *atp;
70 struct xen_add_to_physmap_batch *atpb;
71 struct xen_remove_from_physmap *xrfp;
72 struct xen_vnuma_topology_info *vnuma;
73 struct xen_mem_access_op *mao;
74 } nat;
75 union {
76 struct compat_memory_reservation rsrv;
77 struct compat_memory_exchange xchg;
78 struct compat_add_to_physmap atp;
79 struct compat_add_to_physmap_batch atpb;
80 struct compat_vnuma_topology_info vnuma;
81 struct compat_mem_access_op mao;
82 } cmp;
83
84 set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
85 split = 0;
86 switch ( op )
87 {
88 xen_pfn_t *space;
89
90 case XENMEM_increase_reservation:
91 case XENMEM_decrease_reservation:
92 case XENMEM_populate_physmap:
93 if ( copy_from_guest(&cmp.rsrv, compat, 1) )
94 return start_extent;
95
96 /* Is size too large for us to encode a continuation? */
97 if ( cmp.rsrv.nr_extents > (UINT_MAX >> MEMOP_EXTENT_SHIFT) )
98 return start_extent;
99
100 if ( !compat_handle_is_null(cmp.rsrv.extent_start) &&
101 !compat_handle_okay(cmp.rsrv.extent_start, cmp.rsrv.nr_extents) )
102 return start_extent;
103
104 end_extent = start_extent + (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.rsrv)) /
105 sizeof(*space);
106 if ( end_extent > cmp.rsrv.nr_extents )
107 end_extent = cmp.rsrv.nr_extents;
108
109 space = (xen_pfn_t *)(nat.rsrv + 1);
110 #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \
111 do \
112 { \
113 if ( !compat_handle_is_null((_s_)->extent_start) ) \
114 { \
115 set_xen_guest_handle((_d_)->extent_start, space - start_extent); \
116 if ( op != XENMEM_increase_reservation ) \
117 { \
118 for ( i = start_extent; i < end_extent; ++i ) \
119 { \
120 compat_pfn_t pfn; \
121 if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \
122 { \
123 end_extent = i; \
124 split = -1; \
125 break; \
126 } \
127 *space++ = pfn; \
128 } \
129 } \
130 } \
131 else \
132 { \
133 set_xen_guest_handle((_d_)->extent_start, NULL); \
134 end_extent = cmp.rsrv.nr_extents; \
135 } \
136 } while (0)
137 XLAT_memory_reservation(nat.rsrv, &cmp.rsrv);
138 #undef XLAT_memory_reservation_HNDL_extent_start
139
140 if ( end_extent < cmp.rsrv.nr_extents )
141 {
142 nat.rsrv->nr_extents = end_extent;
143 ++split;
144 }
145
146 break;
147
148 case XENMEM_exchange:
149 {
150 int order_delta;
151
152 if ( copy_from_guest(&cmp.xchg, compat, 1) )
153 return -EFAULT;
154
155 order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order;
156 /* Various sanity checks. */
157 if ( (cmp.xchg.nr_exchanged > cmp.xchg.in.nr_extents) ||
158 (order_delta > 0 && (cmp.xchg.nr_exchanged & ((1U << order_delta) - 1))) ||
159 /* Sizes of input and output lists do not overflow an int? */
160 ((~0U >> cmp.xchg.in.extent_order) < cmp.xchg.in.nr_extents) ||
161 ((~0U >> cmp.xchg.out.extent_order) < cmp.xchg.out.nr_extents) ||
162 /* Sizes of input and output lists match? */
163 ((cmp.xchg.in.nr_extents << cmp.xchg.in.extent_order) !=
164 (cmp.xchg.out.nr_extents << cmp.xchg.out.extent_order)) )
165 return -EINVAL;
166
167 if ( !compat_handle_okay(cmp.xchg.in.extent_start,
168 cmp.xchg.in.nr_extents) ||
169 !compat_handle_okay(cmp.xchg.out.extent_start,
170 cmp.xchg.out.nr_extents) )
171 return -EFAULT;
172
173 start_extent = cmp.xchg.nr_exchanged;
174 end_extent = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.xchg)) /
175 (((1U << ABS(order_delta)) + 1) *
176 sizeof(*space));
177 if ( end_extent == 0 )
178 {
179 printk("Cannot translate compatibility mode XENMEM_exchange extents (%u,%u)\n",
180 cmp.xchg.in.extent_order, cmp.xchg.out.extent_order);
181 return -E2BIG;
182 }
183 if ( order_delta > 0 )
184 end_extent <<= order_delta;
185 end_extent += start_extent;
186 if ( end_extent > cmp.xchg.in.nr_extents )
187 end_extent = cmp.xchg.in.nr_extents;
188
189 space = (xen_pfn_t *)(nat.xchg + 1);
190 /* Code below depends upon .in preceding .out. */
191 BUILD_BUG_ON(offsetof(xen_memory_exchange_t, in) > offsetof(xen_memory_exchange_t, out));
192 #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \
193 do \
194 { \
195 set_xen_guest_handle((_d_)->extent_start, space - start_extent); \
196 for ( i = start_extent; i < end_extent; ++i ) \
197 { \
198 compat_pfn_t pfn; \
199 if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \
200 return -EFAULT; \
201 *space++ = pfn; \
202 } \
203 if ( order_delta > 0 ) \
204 { \
205 start_extent >>= order_delta; \
206 end_extent >>= order_delta; \
207 } \
208 else \
209 { \
210 start_extent <<= -order_delta; \
211 end_extent <<= -order_delta; \
212 } \
213 order_delta = -order_delta; \
214 } while (0)
215 XLAT_memory_exchange(nat.xchg, &cmp.xchg);
216 #undef XLAT_memory_reservation_HNDL_extent_start
217
218 if ( end_extent < cmp.xchg.in.nr_extents )
219 {
220 nat.xchg->in.nr_extents = end_extent;
221 if ( order_delta >= 0 )
222 nat.xchg->out.nr_extents = end_extent >> order_delta;
223 else
224 nat.xchg->out.nr_extents = end_extent << -order_delta;
225 ++split;
226 }
227
228 break;
229 }
230
231 case XENMEM_current_reservation:
232 case XENMEM_maximum_reservation:
233 case XENMEM_maximum_gpfn:
234 case XENMEM_maximum_ram_page:
235 nat.hnd = compat;
236 break;
237
238 case XENMEM_add_to_physmap:
239 BUILD_BUG_ON((typeof(cmp.atp.size))-1 >
240 (UINT_MAX >> MEMOP_EXTENT_SHIFT));
241
242 if ( copy_from_guest(&cmp.atp, compat, 1) )
243 return -EFAULT;
244
245 XLAT_add_to_physmap(nat.atp, &cmp.atp);
246
247 break;
248
249 case XENMEM_add_to_physmap_batch:
250 {
251 unsigned int limit = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.atpb))
252 / (sizeof(nat.atpb->idxs.p) + sizeof(nat.atpb->gpfns.p));
253 /* Use an intermediate variable to suppress warnings on old gcc: */
254 unsigned int size;
255 xen_ulong_t *idxs = (void *)(nat.atpb + 1);
256 xen_pfn_t *gpfns = (void *)(idxs + limit);
257 /*
258 * The union will always be 16-bit width. So it is not
259 * necessary to have the exact field which correspond to the
260 * space.
261 */
262 enum XLAT_add_to_physmap_batch_u u =
263 XLAT_add_to_physmap_batch_u_res0;
264
265 if ( copy_from_guest(&cmp.atpb, compat, 1) )
266 return -EFAULT;
267 size = cmp.atpb.size;
268 if ( !compat_handle_okay(cmp.atpb.idxs, size) ||
269 !compat_handle_okay(cmp.atpb.gpfns, size) ||
270 !compat_handle_okay(cmp.atpb.errs, size) )
271 return -EFAULT;
272
273 end_extent = start_extent + limit;
274 if ( end_extent > size )
275 end_extent = size;
276
277 idxs -= start_extent;
278 gpfns -= start_extent;
279
280 for ( i = start_extent; i < end_extent; ++i )
281 {
282 compat_ulong_t idx;
283 compat_pfn_t gpfn;
284
285 if ( __copy_from_compat_offset(&idx, cmp.atpb.idxs, i, 1) ||
286 __copy_from_compat_offset(&gpfn, cmp.atpb.gpfns, i, 1) )
287 return -EFAULT;
288 idxs[i] = idx;
289 gpfns[i] = gpfn;
290 }
291
292 #define XLAT_add_to_physmap_batch_HNDL_idxs(_d_, _s_) \
293 set_xen_guest_handle((_d_)->idxs, idxs)
294 #define XLAT_add_to_physmap_batch_HNDL_gpfns(_d_, _s_) \
295 set_xen_guest_handle((_d_)->gpfns, gpfns)
296 #define XLAT_add_to_physmap_batch_HNDL_errs(_d_, _s_) \
297 guest_from_compat_handle((_d_)->errs, (_s_)->errs)
298
299 XLAT_add_to_physmap_batch(nat.atpb, &cmp.atpb);
300
301 #undef XLAT_add_to_physmap_batch_HNDL_errs
302 #undef XLAT_add_to_physmap_batch_HNDL_gpfns
303 #undef XLAT_add_to_physmap_batch_HNDL_idxs
304
305 if ( end_extent < cmp.atpb.size )
306 {
307 nat.atpb->size = end_extent;
308 ++split;
309 }
310
311 break;
312 }
313
314 case XENMEM_remove_from_physmap:
315 {
316 struct compat_remove_from_physmap cmp;
317
318 if ( copy_from_guest(&cmp, compat, 1) )
319 return -EFAULT;
320
321 XLAT_remove_from_physmap(nat.xrfp, &cmp);
322
323 break;
324 }
325
326 case XENMEM_access_op:
327 if ( copy_from_guest(&cmp.mao, compat, 1) )
328 return -EFAULT;
329
330 #define XLAT_mem_access_op_HNDL_pfn_list(_d_, _s_) \
331 guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list)
332 #define XLAT_mem_access_op_HNDL_access_list(_d_, _s_) \
333 guest_from_compat_handle((_d_)->access_list, (_s_)->access_list)
334
335 XLAT_mem_access_op(nat.mao, &cmp.mao);
336
337 #undef XLAT_mem_access_op_HNDL_pfn_list
338 #undef XLAT_mem_access_op_HNDL_access_list
339
340 break;
341
342 case XENMEM_get_vnumainfo:
343 {
344 enum XLAT_vnuma_topology_info_vdistance vdistance =
345 XLAT_vnuma_topology_info_vdistance_h;
346 enum XLAT_vnuma_topology_info_vcpu_to_vnode vcpu_to_vnode =
347 XLAT_vnuma_topology_info_vcpu_to_vnode_h;
348 enum XLAT_vnuma_topology_info_vmemrange vmemrange =
349 XLAT_vnuma_topology_info_vmemrange_h;
350
351 if ( copy_from_guest(&cmp.vnuma, compat, 1) )
352 return -EFAULT;
353
354 #define XLAT_vnuma_topology_info_HNDL_vdistance_h(_d_, _s_) \
355 guest_from_compat_handle((_d_)->vdistance.h, (_s_)->vdistance.h)
356 #define XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h(_d_, _s_) \
357 guest_from_compat_handle((_d_)->vcpu_to_vnode.h, (_s_)->vcpu_to_vnode.h)
358 #define XLAT_vnuma_topology_info_HNDL_vmemrange_h(_d_, _s_) \
359 guest_from_compat_handle((_d_)->vmemrange.h, (_s_)->vmemrange.h)
360
361 XLAT_vnuma_topology_info(nat.vnuma, &cmp.vnuma);
362
363 #undef XLAT_vnuma_topology_info_HNDL_vdistance_h
364 #undef XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h
365 #undef XLAT_vnuma_topology_info_HNDL_vmemrange_h
366 break;
367 }
368
369 #ifdef CONFIG_HAS_PASSTHROUGH
370 case XENMEM_reserved_device_memory_map:
371 {
372 struct get_reserved_device_memory grdm;
373
374 if ( unlikely(start_extent) )
375 return -EINVAL;
376
377 if ( copy_from_guest(&grdm.map, compat, 1) ||
378 !compat_handle_okay(grdm.map.buffer, grdm.map.nr_entries) )
379 return -EFAULT;
380
381 if ( grdm.map.flags & ~XENMEM_RDM_ALL )
382 return -EINVAL;
383
384 grdm.used_entries = 0;
385 rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
386 &grdm);
387
388 if ( !rc && grdm.map.nr_entries < grdm.used_entries )
389 rc = -ENOBUFS;
390 grdm.map.nr_entries = grdm.used_entries;
391 if ( __copy_to_guest(compat, &grdm.map, 1) )
392 rc = -EFAULT;
393
394 return rc;
395 }
396 #endif
397
398 default:
399 return compat_arch_memory_op(cmd, compat);
400 }
401
402 rc = do_memory_op(cmd, nat.hnd);
403 if ( rc < 0 )
404 {
405 if ( rc == -ENOBUFS && op == XENMEM_get_vnumainfo )
406 {
407 cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes;
408 cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus;
409 cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges;
410 if ( __copy_to_guest(compat, &cmp.vnuma, 1) )
411 rc = -EFAULT;
412 }
413 break;
414 }
415
416 cmd = 0;
417 if ( hypercall_xlat_continuation(&cmd, 2, 0x02, nat.hnd, compat) )
418 {
419 BUG_ON(rc != __HYPERVISOR_memory_op);
420 BUG_ON((cmd & MEMOP_CMD_MASK) != op);
421 split = -1;
422 }
423
424 switch ( op )
425 {
426 case XENMEM_increase_reservation:
427 case XENMEM_decrease_reservation:
428 case XENMEM_populate_physmap:
429 end_extent = split >= 0 ? rc : cmd >> MEMOP_EXTENT_SHIFT;
430 if ( (op != XENMEM_decrease_reservation) &&
431 !guest_handle_is_null(nat.rsrv->extent_start) )
432 {
433 for ( ; start_extent < end_extent; ++start_extent )
434 {
435 compat_pfn_t pfn = nat.rsrv->extent_start.p[start_extent];
436
437 BUG_ON(pfn != nat.rsrv->extent_start.p[start_extent]);
438 if ( __copy_to_compat_offset(cmp.rsrv.extent_start,
439 start_extent, &pfn, 1) )
440 {
441 if ( split >= 0 )
442 {
443 rc = start_extent;
444 split = 0;
445 }
446 else
447 /*
448 * Short of being able to cancel the continuation,
449 * force it to restart here; eventually we shall
450 * get out of this state.
451 */
452 rc = (start_extent << MEMOP_EXTENT_SHIFT) | op;
453 break;
454 }
455 }
456 }
457 else
458 {
459 start_extent = end_extent;
460 }
461 /* Bail if there was an error. */
462 if ( (split >= 0) && (end_extent != nat.rsrv->nr_extents) )
463 split = 0;
464 break;
465
466 case XENMEM_exchange:
467 {
468 DEFINE_XEN_GUEST_HANDLE(compat_memory_exchange_t);
469 int order_delta;
470
471 BUG_ON(split >= 0 && rc);
472 BUG_ON(end_extent < nat.xchg->nr_exchanged);
473 end_extent = nat.xchg->nr_exchanged;
474
475 order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order;
476 if ( order_delta > 0 )
477 {
478 start_extent >>= order_delta;
479 BUG_ON(end_extent & ((1U << order_delta) - 1));
480 end_extent >>= order_delta;
481 }
482 else
483 {
484 start_extent <<= -order_delta;
485 end_extent <<= -order_delta;
486 }
487
488 for ( ; start_extent < end_extent; ++start_extent )
489 {
490 compat_pfn_t pfn = nat.xchg->out.extent_start.p[start_extent];
491
492 BUG_ON(pfn != nat.xchg->out.extent_start.p[start_extent]);
493 if ( __copy_to_compat_offset(cmp.xchg.out.extent_start,
494 start_extent, &pfn, 1) )
495 {
496 rc = -EFAULT;
497 break;
498 }
499 }
500
501 cmp.xchg.nr_exchanged = nat.xchg->nr_exchanged;
502 if ( __copy_field_to_guest(guest_handle_cast(compat,
503 compat_memory_exchange_t),
504 &cmp.xchg, nr_exchanged) )
505 rc = -EFAULT;
506
507 if ( rc < 0 )
508 {
509 if ( split < 0 )
510 /* Cannot cancel the continuation... */
511 domain_crash(current->domain);
512 return rc;
513 }
514 break;
515 }
516
517 case XENMEM_add_to_physmap_batch:
518 start_extent = end_extent;
519 break;
520
521 case XENMEM_maximum_ram_page:
522 case XENMEM_current_reservation:
523 case XENMEM_maximum_reservation:
524 case XENMEM_maximum_gpfn:
525 case XENMEM_add_to_physmap:
526 case XENMEM_remove_from_physmap:
527 case XENMEM_access_op:
528 break;
529
530 case XENMEM_get_vnumainfo:
531 cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes;
532 cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus;
533 cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges;
534 if ( __copy_to_guest(compat, &cmp.vnuma, 1) )
535 rc = -EFAULT;
536 break;
537
538 default:
539 domain_crash(current->domain);
540 split = 0;
541 break;
542 }
543
544 cmd = op | (start_extent << MEMOP_EXTENT_SHIFT);
545 if ( split > 0 && hypercall_preempt_check() )
546 return hypercall_create_continuation(
547 __HYPERVISOR_memory_op, "ih", cmd, compat);
548 } while ( split > 0 );
549
550 if ( unlikely(rc > INT_MAX) )
551 return INT_MAX;
552
553 if ( unlikely(rc < INT_MIN) )
554 return INT_MIN;
555
556 return rc;
557 }
558