1 EMIT_FILE;
2
3 #include <xen/types.h>
4 #include <xen/hypercall.h>
5 #include <xen/guest_access.h>
6 #include <xen/sched.h>
7 #include <xen/event.h>
8 #include <xen/mem_access.h>
9 #include <asm/current.h>
10 #include <asm/guest.h>
11 #include <compat/memory.h>
12
13 #define xen_domid_t domid_t
14 #define compat_domid_t domid_compat_t
15 CHECK_TYPE(domid);
16 #undef compat_domid_t
17 #undef xen_domid_t
18
19 CHECK_vmemrange;
20
21 #ifdef CONFIG_HAS_PASSTHROUGH
22 struct get_reserved_device_memory {
23 struct compat_reserved_device_memory_map map;
24 unsigned int used_entries;
25 };
26
get_reserved_device_memory(xen_pfn_t start,xen_ulong_t nr,u32 id,void * ctxt)27 static int cf_check get_reserved_device_memory(
28 xen_pfn_t start, xen_ulong_t nr, u32 id, void *ctxt)
29 {
30 struct get_reserved_device_memory *grdm = ctxt;
31 uint32_t sbdf = PCI_SBDF(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus,
32 grdm->map.dev.pci.devfn).sbdf;
33
34 if ( !(grdm->map.flags & XENMEM_RDM_ALL) && (sbdf != id) )
35 return 0;
36
37 if ( grdm->used_entries < grdm->map.nr_entries )
38 {
39 struct compat_reserved_device_memory rdm = {
40 .start_pfn = start, .nr_pages = nr
41 };
42
43 if ( rdm.start_pfn != start || rdm.nr_pages != nr )
44 return -ERANGE;
45
46 if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
47 &rdm, 1) )
48 return -EFAULT;
49 }
50
51 ++grdm->used_entries;
52
53 return 1;
54 }
55 #endif
56
compat_memory_op(unsigned int cmd,XEN_GUEST_HANDLE_PARAM (void)arg)57 int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
58 {
59 struct vcpu *curr = current;
60 struct domain *currd = curr->domain;
61 int split, op = cmd & MEMOP_CMD_MASK;
62 long rc;
63 unsigned int start_extent = cmd >> MEMOP_EXTENT_SHIFT;
64
65 do
66 {
67 unsigned int i, end_extent = 0;
68 union {
69 XEN_GUEST_HANDLE_PARAM(void) hnd;
70 struct xen_memory_reservation *rsrv;
71 struct xen_memory_exchange *xchg;
72 struct xen_add_to_physmap *atp;
73 struct xen_add_to_physmap_batch *atpb;
74 struct xen_remove_from_physmap *xrfp;
75 struct xen_vnuma_topology_info *vnuma;
76 struct xen_mem_access_op *mao;
77 struct xen_mem_acquire_resource *mar;
78 } nat;
79 union {
80 struct compat_memory_reservation rsrv;
81 struct compat_memory_exchange xchg;
82 struct compat_add_to_physmap atp;
83 struct compat_add_to_physmap_batch atpb;
84 struct compat_remove_from_physmap xrfp;
85 struct compat_vnuma_topology_info vnuma;
86 struct compat_mem_access_op mao;
87 struct compat_mem_acquire_resource mar;
88 } cmp;
89
90 set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
91 split = 0;
92 switch ( op )
93 {
94 xen_pfn_t *space;
95
96 case XENMEM_increase_reservation:
97 case XENMEM_decrease_reservation:
98 case XENMEM_populate_physmap:
99 if ( copy_from_guest(&cmp.rsrv, arg, 1) )
100 return start_extent;
101
102 /* Is size too large for us to encode a continuation? */
103 if ( cmp.rsrv.nr_extents > (UINT_MAX >> MEMOP_EXTENT_SHIFT) )
104 return start_extent;
105
106 if ( !compat_handle_is_null(cmp.rsrv.extent_start) &&
107 !compat_handle_okay(cmp.rsrv.extent_start, cmp.rsrv.nr_extents) )
108 return start_extent;
109
110 end_extent = start_extent + (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.rsrv)) /
111 sizeof(*space);
112 if ( end_extent > cmp.rsrv.nr_extents )
113 end_extent = cmp.rsrv.nr_extents;
114
115 space = (xen_pfn_t *)(nat.rsrv + 1);
116 #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \
117 do \
118 { \
119 if ( !compat_handle_is_null((_s_)->extent_start) ) \
120 { \
121 set_xen_guest_handle((_d_)->extent_start, space - start_extent); \
122 if ( op != XENMEM_increase_reservation ) \
123 { \
124 for ( i = start_extent; i < end_extent; ++i ) \
125 { \
126 compat_pfn_t pfn; \
127 if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \
128 { \
129 end_extent = i; \
130 split = -1; \
131 break; \
132 } \
133 *space++ = pfn; \
134 } \
135 } \
136 } \
137 else \
138 { \
139 set_xen_guest_handle((_d_)->extent_start, NULL); \
140 end_extent = cmp.rsrv.nr_extents; \
141 } \
142 } while (0)
143 XLAT_memory_reservation(nat.rsrv, &cmp.rsrv);
144 #undef XLAT_memory_reservation_HNDL_extent_start
145
146 if ( end_extent < cmp.rsrv.nr_extents )
147 {
148 nat.rsrv->nr_extents = end_extent;
149 ++split;
150 }
151 /* Avoid calling pv_shim_online_memory() when in a continuation. */
152 if ( pv_shim && op != XENMEM_decrease_reservation && !start_extent )
153 pv_shim_online_memory(cmp.rsrv.nr_extents - nat.rsrv->nr_extents,
154 cmp.rsrv.extent_order);
155 break;
156
157 case XENMEM_exchange:
158 {
159 int order_delta;
160
161 if ( copy_from_guest(&cmp.xchg, arg, 1) )
162 return -EFAULT;
163
164 /* Early coarse check, as max_order() isn't available here. */
165 if ( cmp.xchg.in.extent_order >= BITS_PER_INT ||
166 cmp.xchg.out.extent_order >= BITS_PER_INT )
167 return -EPERM;
168
169 order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order;
170 /* Various sanity checks. */
171 if ( (cmp.xchg.nr_exchanged > cmp.xchg.in.nr_extents) ||
172 (order_delta > 0 && (cmp.xchg.nr_exchanged & ((1U << order_delta) - 1))) ||
173 /* Sizes of input and output lists do not overflow an int? */
174 ((~0U >> cmp.xchg.in.extent_order) < cmp.xchg.in.nr_extents) ||
175 ((~0U >> cmp.xchg.out.extent_order) < cmp.xchg.out.nr_extents) ||
176 /* Sizes of input and output lists match? */
177 ((cmp.xchg.in.nr_extents << cmp.xchg.in.extent_order) !=
178 (cmp.xchg.out.nr_extents << cmp.xchg.out.extent_order)) )
179 return -EINVAL;
180
181 if ( !compat_handle_okay(cmp.xchg.in.extent_start,
182 cmp.xchg.in.nr_extents) ||
183 !compat_handle_okay(cmp.xchg.out.extent_start,
184 cmp.xchg.out.nr_extents) )
185 return -EFAULT;
186
187 start_extent = cmp.xchg.nr_exchanged;
188 end_extent = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.xchg)) /
189 (((1U << ABS(order_delta)) + 1) *
190 sizeof(*space));
191 if ( end_extent == 0 )
192 {
193 printk("Cannot translate compatibility mode XENMEM_exchange extents (%u,%u)\n",
194 cmp.xchg.in.extent_order, cmp.xchg.out.extent_order);
195 return -E2BIG;
196 }
197 if ( order_delta > 0 )
198 end_extent <<= order_delta;
199 end_extent += start_extent;
200 if ( end_extent > cmp.xchg.in.nr_extents )
201 end_extent = cmp.xchg.in.nr_extents;
202
203 space = (xen_pfn_t *)(nat.xchg + 1);
204 /* Code below depends upon .in preceding .out. */
205 BUILD_BUG_ON(offsetof(xen_memory_exchange_t, in) > offsetof(xen_memory_exchange_t, out));
206 #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \
207 do \
208 { \
209 set_xen_guest_handle((_d_)->extent_start, space - start_extent); \
210 for ( i = start_extent; i < end_extent; ++i ) \
211 { \
212 compat_pfn_t pfn; \
213 if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \
214 return -EFAULT; \
215 *space++ = pfn; \
216 } \
217 if ( order_delta > 0 ) \
218 { \
219 start_extent >>= order_delta; \
220 end_extent >>= order_delta; \
221 } \
222 else \
223 { \
224 start_extent <<= -order_delta; \
225 end_extent <<= -order_delta; \
226 } \
227 order_delta = -order_delta; \
228 } while (0)
229 XLAT_memory_exchange(nat.xchg, &cmp.xchg);
230 #undef XLAT_memory_reservation_HNDL_extent_start
231
232 if ( end_extent < cmp.xchg.in.nr_extents )
233 {
234 nat.xchg->in.nr_extents = end_extent;
235 if ( order_delta >= 0 )
236 nat.xchg->out.nr_extents = end_extent >> order_delta;
237 else
238 nat.xchg->out.nr_extents = end_extent << -order_delta;
239 ++split;
240 }
241
242 break;
243 }
244
245 case XENMEM_current_reservation:
246 case XENMEM_maximum_reservation:
247 case XENMEM_maximum_gpfn:
248 case XENMEM_maximum_ram_page:
249 nat.hnd = arg;
250 break;
251
252 case XENMEM_add_to_physmap:
253 BUILD_BUG_ON((typeof(cmp.atp.size))-1 >
254 (UINT_MAX >> MEMOP_EXTENT_SHIFT));
255
256 if ( copy_from_guest(&cmp.atp, arg, 1) )
257 return -EFAULT;
258
259 XLAT_add_to_physmap(nat.atp, &cmp.atp);
260
261 break;
262
263 case XENMEM_add_to_physmap_batch:
264 {
265 unsigned int limit = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.atpb))
266 / (sizeof(nat.atpb->idxs.p) + sizeof(nat.atpb->gpfns.p));
267 /* Use an intermediate variable to suppress warnings on old gcc: */
268 unsigned int size;
269 xen_ulong_t *idxs = (void *)(nat.atpb + 1);
270 xen_pfn_t *gpfns = (void *)(idxs + limit);
271 /*
272 * The union will always be 16-bit width. So it is not
273 * necessary to have the exact field which correspond to the
274 * space.
275 */
276 enum XLAT_add_to_physmap_batch_u u =
277 XLAT_add_to_physmap_batch_u_res0;
278
279 if ( copy_from_guest(&cmp.atpb, arg, 1) )
280 return -EFAULT;
281 size = cmp.atpb.size;
282 if ( !compat_handle_okay(cmp.atpb.idxs, size) ||
283 !compat_handle_okay(cmp.atpb.gpfns, size) ||
284 !compat_handle_okay(cmp.atpb.errs, size) )
285 return -EFAULT;
286
287 end_extent = start_extent + limit;
288 if ( end_extent > size )
289 end_extent = size;
290
291 idxs -= start_extent;
292 gpfns -= start_extent;
293
294 for ( i = start_extent; i < end_extent; ++i )
295 {
296 compat_ulong_t idx;
297 compat_pfn_t gpfn;
298
299 if ( __copy_from_compat_offset(&idx, cmp.atpb.idxs, i, 1) ||
300 __copy_from_compat_offset(&gpfn, cmp.atpb.gpfns, i, 1) )
301 return -EFAULT;
302 idxs[i] = idx;
303 gpfns[i] = gpfn;
304 }
305
306 #define XLAT_add_to_physmap_batch_HNDL_idxs(_d_, _s_) \
307 set_xen_guest_handle((_d_)->idxs, idxs)
308 #define XLAT_add_to_physmap_batch_HNDL_gpfns(_d_, _s_) \
309 set_xen_guest_handle((_d_)->gpfns, gpfns)
310 #define XLAT_add_to_physmap_batch_HNDL_errs(_d_, _s_) \
311 guest_from_compat_handle((_d_)->errs, (_s_)->errs)
312
313 XLAT_add_to_physmap_batch(nat.atpb, &cmp.atpb);
314
315 #undef XLAT_add_to_physmap_batch_HNDL_errs
316 #undef XLAT_add_to_physmap_batch_HNDL_gpfns
317 #undef XLAT_add_to_physmap_batch_HNDL_idxs
318
319 if ( end_extent < cmp.atpb.size )
320 {
321 nat.atpb->size = end_extent;
322 ++split;
323 }
324
325 break;
326 }
327
328 case XENMEM_remove_from_physmap:
329 {
330 if ( copy_from_guest(&cmp.xrfp, arg, 1) )
331 return -EFAULT;
332
333 XLAT_remove_from_physmap(nat.xrfp, &cmp.xrfp);
334
335 break;
336 }
337
338 case XENMEM_access_op:
339 if ( copy_from_guest(&cmp.mao, arg, 1) )
340 return -EFAULT;
341
342 #define XLAT_mem_access_op_HNDL_pfn_list(_d_, _s_) \
343 guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list)
344 #define XLAT_mem_access_op_HNDL_access_list(_d_, _s_) \
345 guest_from_compat_handle((_d_)->access_list, (_s_)->access_list)
346
347 XLAT_mem_access_op(nat.mao, &cmp.mao);
348
349 #undef XLAT_mem_access_op_HNDL_pfn_list
350 #undef XLAT_mem_access_op_HNDL_access_list
351
352 break;
353
354 case XENMEM_get_vnumainfo:
355 {
356 enum XLAT_vnuma_topology_info_vdistance vdistance =
357 XLAT_vnuma_topology_info_vdistance_h;
358 enum XLAT_vnuma_topology_info_vcpu_to_vnode vcpu_to_vnode =
359 XLAT_vnuma_topology_info_vcpu_to_vnode_h;
360 enum XLAT_vnuma_topology_info_vmemrange vmemrange =
361 XLAT_vnuma_topology_info_vmemrange_h;
362
363 if ( copy_from_guest(&cmp.vnuma, arg, 1) )
364 return -EFAULT;
365
366 #define XLAT_vnuma_topology_info_HNDL_vdistance_h(_d_, _s_) \
367 guest_from_compat_handle((_d_)->vdistance.h, (_s_)->vdistance.h)
368 #define XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h(_d_, _s_) \
369 guest_from_compat_handle((_d_)->vcpu_to_vnode.h, (_s_)->vcpu_to_vnode.h)
370 #define XLAT_vnuma_topology_info_HNDL_vmemrange_h(_d_, _s_) \
371 guest_from_compat_handle((_d_)->vmemrange.h, (_s_)->vmemrange.h)
372
373 XLAT_vnuma_topology_info(nat.vnuma, &cmp.vnuma);
374
375 #undef XLAT_vnuma_topology_info_HNDL_vdistance_h
376 #undef XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h
377 #undef XLAT_vnuma_topology_info_HNDL_vmemrange_h
378 break;
379 }
380
381 #ifdef CONFIG_HAS_PASSTHROUGH
382 case XENMEM_reserved_device_memory_map:
383 {
384 struct get_reserved_device_memory grdm;
385
386 if ( unlikely(start_extent) )
387 return -EINVAL;
388
389 if ( copy_from_guest(&grdm.map, arg, 1) ||
390 !compat_handle_okay(grdm.map.buffer, grdm.map.nr_entries) )
391 return -EFAULT;
392
393 if ( grdm.map.flags & ~XENMEM_RDM_ALL )
394 return -EINVAL;
395
396 grdm.used_entries = 0;
397 rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
398 &grdm);
399
400 if ( !rc && grdm.map.nr_entries < grdm.used_entries )
401 rc = -ENOBUFS;
402 grdm.map.nr_entries = grdm.used_entries;
403 if ( __copy_to_guest(arg, &grdm.map, 1) )
404 rc = -EFAULT;
405
406 return rc;
407 }
408 #endif
409
410 case XENMEM_acquire_resource:
411 {
412 xen_pfn_t *xen_frame_list = NULL;
413
414 if ( copy_from_guest(&cmp.mar, arg, 1) )
415 return -EFAULT;
416
417 /* Marshal the frame list in the remainder of the xlat space. */
418 if ( !compat_handle_is_null(cmp.mar.frame_list) )
419 xen_frame_list = (xen_pfn_t *)(nat.mar + 1);
420
421 #define XLAT_mem_acquire_resource_HNDL_frame_list(_d_, _s_) \
422 set_xen_guest_handle((_d_)->frame_list, xen_frame_list)
423
424 XLAT_mem_acquire_resource(nat.mar, &cmp.mar);
425
426 #undef XLAT_mem_acquire_resource_HNDL_frame_list
427
428 if ( xen_frame_list && cmp.mar.nr_frames )
429 {
430 unsigned int xlat_max_frames =
431 (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.mar)) /
432 sizeof(*xen_frame_list);
433
434 if ( start_extent >= cmp.mar.nr_frames )
435 return -EINVAL;
436
437 /*
438 * Adjust nat to account for work done on previous
439 * continuations, leaving cmp pristine. Hide the continaution
440 * from the native code to prevent double accounting.
441 */
442 nat.mar->nr_frames -= start_extent;
443 nat.mar->frame += start_extent;
444 cmd &= MEMOP_CMD_MASK;
445
446 /*
447 * If there are two many frames to fit within the xlat buffer,
448 * we'll need to loop to marshal them all.
449 */
450 nat.mar->nr_frames = min(nat.mar->nr_frames, xlat_max_frames);
451
452 /*
453 * frame_list is an input for translated guests, and an output
454 * for untranslated guests. Only copy in for translated guests.
455 */
456 if ( paging_mode_translate(currd) )
457 {
458 compat_pfn_t *compat_frame_list = (void *)xen_frame_list;
459
460 if ( !compat_handle_okay(cmp.mar.frame_list,
461 cmp.mar.nr_frames) ||
462 __copy_from_compat_offset(
463 compat_frame_list, cmp.mar.frame_list,
464 start_extent, nat.mar->nr_frames) )
465 return -EFAULT;
466
467 /*
468 * Iterate backwards over compat_frame_list[] expanding
469 * compat_pfn_t to xen_pfn_t in place.
470 */
471 for ( int x = nat.mar->nr_frames - 1; x >= 0; --x )
472 xen_frame_list[x] = compat_frame_list[x];
473 }
474 }
475 break;
476 }
477 default:
478 return compat_arch_memory_op(cmd, arg);
479 }
480
481 rc = do_memory_op(cmd, nat.hnd);
482 if ( rc < 0 )
483 {
484 if ( rc == -ENOBUFS && op == XENMEM_get_vnumainfo )
485 {
486 cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes;
487 cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus;
488 cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges;
489 if ( __copy_to_guest(arg, &cmp.vnuma, 1) )
490 rc = -EFAULT;
491 }
492 break;
493 }
494
495 cmd = 0;
496 if ( hypercall_xlat_continuation(&cmd, 2, 0x02, nat.hnd, arg) )
497 {
498 BUG_ON(rc != __HYPERVISOR_memory_op);
499 BUG_ON((cmd & MEMOP_CMD_MASK) != op);
500 split = -1;
501 }
502
503 switch ( op )
504 {
505 case XENMEM_increase_reservation:
506 case XENMEM_decrease_reservation:
507 case XENMEM_populate_physmap:
508 end_extent = split >= 0 ? rc : cmd >> MEMOP_EXTENT_SHIFT;
509 if ( (op != XENMEM_decrease_reservation) &&
510 !guest_handle_is_null(nat.rsrv->extent_start) )
511 {
512 for ( ; start_extent < end_extent; ++start_extent )
513 {
514 compat_pfn_t pfn = nat.rsrv->extent_start.p[start_extent];
515
516 BUG_ON(pfn != nat.rsrv->extent_start.p[start_extent]);
517 if ( __copy_to_compat_offset(cmp.rsrv.extent_start,
518 start_extent, &pfn, 1) )
519 {
520 if ( split >= 0 )
521 {
522 rc = start_extent;
523 split = 0;
524 }
525 else
526 /*
527 * Short of being able to cancel the continuation,
528 * force it to restart here; eventually we shall
529 * get out of this state.
530 */
531 rc = (start_extent << MEMOP_EXTENT_SHIFT) | op;
532 break;
533 }
534 }
535 }
536 else
537 {
538 start_extent = end_extent;
539 }
540 /* Bail if there was an error. */
541 if ( (split >= 0) && (end_extent != nat.rsrv->nr_extents) )
542 split = 0;
543 break;
544
545 case XENMEM_exchange:
546 {
547 DEFINE_XEN_GUEST_HANDLE(compat_memory_exchange_t);
548 int order_delta;
549
550 BUG_ON(split >= 0 && rc);
551 BUG_ON(end_extent < nat.xchg->nr_exchanged);
552 end_extent = nat.xchg->nr_exchanged;
553
554 order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order;
555 if ( order_delta > 0 )
556 {
557 start_extent >>= order_delta;
558 BUG_ON(end_extent & ((1U << order_delta) - 1));
559 end_extent >>= order_delta;
560 }
561 else
562 {
563 start_extent <<= -order_delta;
564 end_extent <<= -order_delta;
565 }
566
567 for ( ; start_extent < end_extent; ++start_extent )
568 {
569 compat_pfn_t pfn = nat.xchg->out.extent_start.p[start_extent];
570
571 BUG_ON(pfn != nat.xchg->out.extent_start.p[start_extent]);
572 if ( __copy_to_compat_offset(cmp.xchg.out.extent_start,
573 start_extent, &pfn, 1) )
574 {
575 rc = -EFAULT;
576 break;
577 }
578 }
579
580 cmp.xchg.nr_exchanged = nat.xchg->nr_exchanged;
581 if ( __copy_field_to_guest(guest_handle_cast(arg,
582 compat_memory_exchange_t),
583 &cmp.xchg, nr_exchanged) )
584 rc = -EFAULT;
585
586 if ( rc < 0 )
587 {
588 if ( split < 0 )
589 /* Cannot cancel the continuation... */
590 domain_crash(current->domain);
591 return rc;
592 }
593 break;
594 }
595
596 case XENMEM_add_to_physmap_batch:
597 start_extent = end_extent;
598 break;
599
600 case XENMEM_maximum_ram_page:
601 case XENMEM_current_reservation:
602 case XENMEM_maximum_reservation:
603 case XENMEM_maximum_gpfn:
604 case XENMEM_add_to_physmap:
605 case XENMEM_remove_from_physmap:
606 case XENMEM_access_op:
607 break;
608
609 case XENMEM_get_vnumainfo:
610 cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes;
611 cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus;
612 cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges;
613 if ( __copy_to_guest(arg, &cmp.vnuma, 1) )
614 rc = -EFAULT;
615 break;
616
617 case XENMEM_acquire_resource:
618 {
619 DEFINE_XEN_GUEST_HANDLE(compat_mem_acquire_resource_t);
620 unsigned int done;
621
622 if ( compat_handle_is_null(cmp.mar.frame_list) )
623 {
624 ASSERT(split == 0 && rc == 0);
625 if ( __copy_field_to_guest(
626 guest_handle_cast(arg,
627 compat_mem_acquire_resource_t),
628 nat.mar, nr_frames) )
629 return -EFAULT;
630 break;
631 }
632
633 if ( split < 0 )
634 {
635 /* Continuation occurred. */
636 ASSERT(rc != XENMEM_acquire_resource);
637 done = cmd >> MEMOP_EXTENT_SHIFT;
638 }
639 else
640 {
641 /* No continuation. */
642 ASSERT(rc == 0);
643 done = nat.mar->nr_frames;
644 }
645
646 ASSERT(done <= nat.mar->nr_frames);
647
648 /*
649 * frame_list is an input for translated guests, and an output for
650 * untranslated guests. Only copy out for untranslated guests.
651 */
652 if ( !paging_mode_translate(currd) )
653 {
654 const xen_pfn_t *xen_frame_list = (xen_pfn_t *)(nat.mar + 1);
655 compat_pfn_t *compat_frame_list = (compat_pfn_t *)(nat.mar + 1);
656
657 /*
658 * NOTE: the smaller compat array overwrites the native
659 * array.
660 */
661 BUILD_BUG_ON(sizeof(compat_pfn_t) > sizeof(xen_pfn_t));
662
663 rc = 0;
664 for ( i = 0; i < done; i++ )
665 {
666 compat_pfn_t frame = xen_frame_list[i];
667
668 if ( frame != xen_frame_list[i] )
669 {
670 rc = -ERANGE;
671 break;
672 }
673
674 compat_frame_list[i] = frame;
675 }
676
677 if ( !rc && __copy_to_compat_offset(
678 cmp.mar.frame_list, start_extent,
679 compat_frame_list, done) )
680 rc = -EFAULT;
681
682 if ( rc )
683 {
684 if ( split < 0 )
685 {
686 gdprintk(XENLOG_ERR,
687 "Cannot cancel continuation: %ld\n", rc);
688 domain_crash(current->domain);
689 }
690 return rc;
691 }
692 }
693
694 start_extent += done;
695
696 /* Completely done. */
697 if ( start_extent == cmp.mar.nr_frames )
698 break;
699
700 /*
701 * Done a "full" batch, but we were limited by space in the xlat
702 * area. Go around the loop again without necesserily returning
703 * to guest context.
704 */
705 if ( done == nat.mar->nr_frames )
706 {
707 split = 1;
708 break;
709 }
710
711 /* Explicit continuation request from a higher level. */
712 if ( done < nat.mar->nr_frames )
713 return hypercall_create_continuation(
714 __HYPERVISOR_memory_op, "ih",
715 op | (start_extent << MEMOP_EXTENT_SHIFT), arg);
716
717 /*
718 * Well... Somethings gone wrong with the two levels of chunking.
719 * My condolences to whomever next has to debug this mess.
720 */
721 ASSERT_UNREACHABLE();
722 domain_crash(current->domain);
723 split = 0;
724 break;
725 }
726
727 default:
728 domain_crash(current->domain);
729 split = 0;
730 break;
731 }
732
733 cmd = op | (start_extent << MEMOP_EXTENT_SHIFT);
734 if ( split > 0 && hypercall_preempt_check() )
735 return hypercall_create_continuation(
736 __HYPERVISOR_memory_op, "ih", cmd, arg);
737 } while ( split > 0 );
738
739 if ( unlikely(rc > INT_MAX) )
740 return INT_MAX;
741
742 if ( unlikely(rc < INT_MIN) )
743 return INT_MIN;
744
745 return rc;
746 }
747