1 EMIT_FILE;
2 
3 #include <xen/types.h>
4 #include <xen/hypercall.h>
5 #include <xen/guest_access.h>
6 #include <xen/sched.h>
7 #include <xen/event.h>
8 #include <xen/mem_access.h>
9 #include <asm/current.h>
10 #include <asm/guest.h>
11 #include <compat/memory.h>
12 
13 #define xen_domid_t domid_t
14 #define compat_domid_t domid_compat_t
15 CHECK_TYPE(domid);
16 #undef compat_domid_t
17 #undef xen_domid_t
18 
19 CHECK_vmemrange;
20 
21 #ifdef CONFIG_HAS_PASSTHROUGH
22 struct get_reserved_device_memory {
23     struct compat_reserved_device_memory_map map;
24     unsigned int used_entries;
25 };
26 
get_reserved_device_memory(xen_pfn_t start,xen_ulong_t nr,u32 id,void * ctxt)27 static int cf_check get_reserved_device_memory(
28     xen_pfn_t start, xen_ulong_t nr, u32 id, void *ctxt)
29 {
30     struct get_reserved_device_memory *grdm = ctxt;
31     uint32_t sbdf = PCI_SBDF(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus,
32                              grdm->map.dev.pci.devfn).sbdf;
33 
34     if ( !(grdm->map.flags & XENMEM_RDM_ALL) && (sbdf != id) )
35         return 0;
36 
37     if ( grdm->used_entries < grdm->map.nr_entries )
38     {
39         struct compat_reserved_device_memory rdm = {
40             .start_pfn = start, .nr_pages = nr
41         };
42 
43         if ( rdm.start_pfn != start || rdm.nr_pages != nr )
44             return -ERANGE;
45 
46         if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
47                                      &rdm, 1) )
48             return -EFAULT;
49     }
50 
51     ++grdm->used_entries;
52 
53     return 1;
54 }
55 #endif
56 
compat_memory_op(unsigned int cmd,XEN_GUEST_HANDLE_PARAM (void)arg)57 int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
58 {
59     struct vcpu *curr = current;
60     struct domain *currd = curr->domain;
61     int split, op = cmd & MEMOP_CMD_MASK;
62     long rc;
63     unsigned int start_extent = cmd >> MEMOP_EXTENT_SHIFT;
64 
65     do
66     {
67         unsigned int i, end_extent = 0;
68         union {
69             XEN_GUEST_HANDLE_PARAM(void) hnd;
70             struct xen_memory_reservation *rsrv;
71             struct xen_memory_exchange *xchg;
72             struct xen_add_to_physmap *atp;
73             struct xen_add_to_physmap_batch *atpb;
74             struct xen_remove_from_physmap *xrfp;
75             struct xen_vnuma_topology_info *vnuma;
76             struct xen_mem_access_op *mao;
77             struct xen_mem_acquire_resource *mar;
78         } nat;
79         union {
80             struct compat_memory_reservation rsrv;
81             struct compat_memory_exchange xchg;
82             struct compat_add_to_physmap atp;
83             struct compat_add_to_physmap_batch atpb;
84             struct compat_remove_from_physmap xrfp;
85             struct compat_vnuma_topology_info vnuma;
86             struct compat_mem_access_op mao;
87             struct compat_mem_acquire_resource mar;
88         } cmp;
89 
90         set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
91         split = 0;
92         switch ( op )
93         {
94             xen_pfn_t *space;
95 
96         case XENMEM_increase_reservation:
97         case XENMEM_decrease_reservation:
98         case XENMEM_populate_physmap:
99             if ( copy_from_guest(&cmp.rsrv, arg, 1) )
100                 return start_extent;
101 
102             /* Is size too large for us to encode a continuation? */
103             if ( cmp.rsrv.nr_extents > (UINT_MAX >> MEMOP_EXTENT_SHIFT) )
104                 return start_extent;
105 
106             if ( !compat_handle_is_null(cmp.rsrv.extent_start) &&
107                  !compat_handle_okay(cmp.rsrv.extent_start, cmp.rsrv.nr_extents) )
108                 return start_extent;
109 
110             end_extent = start_extent + (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.rsrv)) /
111                                         sizeof(*space);
112             if ( end_extent > cmp.rsrv.nr_extents )
113                 end_extent = cmp.rsrv.nr_extents;
114 
115             space = (xen_pfn_t *)(nat.rsrv + 1);
116 #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \
117             do \
118             { \
119                 if ( !compat_handle_is_null((_s_)->extent_start) ) \
120                 { \
121                     set_xen_guest_handle((_d_)->extent_start, space - start_extent); \
122                     if ( op != XENMEM_increase_reservation ) \
123                     { \
124                         for ( i = start_extent; i < end_extent; ++i ) \
125                         { \
126                             compat_pfn_t pfn; \
127                             if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \
128                             { \
129                                 end_extent = i; \
130                                 split = -1; \
131                                 break; \
132                             } \
133                             *space++ = pfn; \
134                         } \
135                     } \
136                 } \
137                 else \
138                 { \
139                     set_xen_guest_handle((_d_)->extent_start, NULL); \
140                     end_extent = cmp.rsrv.nr_extents; \
141                 } \
142             } while (0)
143             XLAT_memory_reservation(nat.rsrv, &cmp.rsrv);
144 #undef XLAT_memory_reservation_HNDL_extent_start
145 
146             if ( end_extent < cmp.rsrv.nr_extents )
147             {
148                 nat.rsrv->nr_extents = end_extent;
149                 ++split;
150             }
151            /* Avoid calling pv_shim_online_memory() when in a continuation. */
152            if ( pv_shim && op != XENMEM_decrease_reservation && !start_extent )
153                pv_shim_online_memory(cmp.rsrv.nr_extents - nat.rsrv->nr_extents,
154                                      cmp.rsrv.extent_order);
155             break;
156 
157         case XENMEM_exchange:
158         {
159             int order_delta;
160 
161             if ( copy_from_guest(&cmp.xchg, arg, 1) )
162                 return -EFAULT;
163 
164             order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order;
165             /* Various sanity checks. */
166             if ( (cmp.xchg.nr_exchanged > cmp.xchg.in.nr_extents) ||
167                  (order_delta > 0 && (cmp.xchg.nr_exchanged & ((1U << order_delta) - 1))) ||
168                  /* Sizes of input and output lists do not overflow an int? */
169                  ((~0U >> cmp.xchg.in.extent_order) < cmp.xchg.in.nr_extents) ||
170                  ((~0U >> cmp.xchg.out.extent_order) < cmp.xchg.out.nr_extents) ||
171                  /* Sizes of input and output lists match? */
172                  ((cmp.xchg.in.nr_extents << cmp.xchg.in.extent_order) !=
173                   (cmp.xchg.out.nr_extents << cmp.xchg.out.extent_order)) )
174                 return -EINVAL;
175 
176             if ( !compat_handle_okay(cmp.xchg.in.extent_start,
177                                      cmp.xchg.in.nr_extents) ||
178                  !compat_handle_okay(cmp.xchg.out.extent_start,
179                                      cmp.xchg.out.nr_extents) )
180                 return -EFAULT;
181 
182             start_extent = cmp.xchg.nr_exchanged;
183             end_extent = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.xchg)) /
184                          (((1U << ABS(order_delta)) + 1) *
185                           sizeof(*space));
186             if ( end_extent == 0 )
187             {
188                 printk("Cannot translate compatibility mode XENMEM_exchange extents (%u,%u)\n",
189                        cmp.xchg.in.extent_order, cmp.xchg.out.extent_order);
190                 return -E2BIG;
191             }
192             if ( order_delta > 0 )
193                 end_extent <<= order_delta;
194             end_extent += start_extent;
195             if ( end_extent > cmp.xchg.in.nr_extents )
196                 end_extent = cmp.xchg.in.nr_extents;
197 
198             space = (xen_pfn_t *)(nat.xchg + 1);
199             /* Code below depends upon .in preceding .out. */
200             BUILD_BUG_ON(offsetof(xen_memory_exchange_t, in) > offsetof(xen_memory_exchange_t, out));
201 #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \
202             do \
203             { \
204                 set_xen_guest_handle((_d_)->extent_start, space - start_extent); \
205                 for ( i = start_extent; i < end_extent; ++i ) \
206                 { \
207                     compat_pfn_t pfn; \
208                     if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \
209                         return -EFAULT; \
210                     *space++ = pfn; \
211                 } \
212                 if ( order_delta > 0 ) \
213                 { \
214                     start_extent >>= order_delta; \
215                     end_extent >>= order_delta; \
216                 } \
217                 else \
218                 { \
219                     start_extent <<= -order_delta; \
220                     end_extent <<= -order_delta; \
221                 } \
222                 order_delta = -order_delta; \
223             } while (0)
224             XLAT_memory_exchange(nat.xchg, &cmp.xchg);
225 #undef XLAT_memory_reservation_HNDL_extent_start
226 
227             if ( end_extent < cmp.xchg.in.nr_extents )
228             {
229                 nat.xchg->in.nr_extents = end_extent;
230                 if ( order_delta >= 0 )
231                     nat.xchg->out.nr_extents = end_extent >> order_delta;
232                 else
233                     nat.xchg->out.nr_extents = end_extent << -order_delta;
234                 ++split;
235             }
236 
237             break;
238         }
239 
240         case XENMEM_current_reservation:
241         case XENMEM_maximum_reservation:
242         case XENMEM_maximum_gpfn:
243         case XENMEM_maximum_ram_page:
244             nat.hnd = arg;
245             break;
246 
247         case XENMEM_add_to_physmap:
248             BUILD_BUG_ON((typeof(cmp.atp.size))-1 >
249                          (UINT_MAX >> MEMOP_EXTENT_SHIFT));
250 
251             if ( copy_from_guest(&cmp.atp, arg, 1) )
252                 return -EFAULT;
253 
254             XLAT_add_to_physmap(nat.atp, &cmp.atp);
255 
256             break;
257 
258         case XENMEM_add_to_physmap_batch:
259         {
260             unsigned int limit = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.atpb))
261                                  / (sizeof(nat.atpb->idxs.p) + sizeof(nat.atpb->gpfns.p));
262             /* Use an intermediate variable to suppress warnings on old gcc: */
263             unsigned int size;
264             xen_ulong_t *idxs = (void *)(nat.atpb + 1);
265             xen_pfn_t *gpfns = (void *)(idxs + limit);
266             /*
267              * The union will always be 16-bit width. So it is not
268              * necessary to have the exact field which correspond to the
269              * space.
270              */
271             enum XLAT_add_to_physmap_batch_u u =
272                 XLAT_add_to_physmap_batch_u_res0;
273 
274             if ( copy_from_guest(&cmp.atpb, arg, 1) )
275                 return -EFAULT;
276             size = cmp.atpb.size;
277             if ( !compat_handle_okay(cmp.atpb.idxs, size) ||
278                  !compat_handle_okay(cmp.atpb.gpfns, size) ||
279                  !compat_handle_okay(cmp.atpb.errs, size) )
280                 return -EFAULT;
281 
282             end_extent = start_extent + limit;
283             if ( end_extent > size )
284                 end_extent = size;
285 
286             idxs -= start_extent;
287             gpfns -= start_extent;
288 
289             for ( i = start_extent; i < end_extent; ++i )
290             {
291                 compat_ulong_t idx;
292                 compat_pfn_t gpfn;
293 
294                 if ( __copy_from_compat_offset(&idx, cmp.atpb.idxs, i, 1) ||
295                      __copy_from_compat_offset(&gpfn, cmp.atpb.gpfns, i, 1) )
296                     return -EFAULT;
297                 idxs[i] = idx;
298                 gpfns[i] = gpfn;
299             }
300 
301 #define XLAT_add_to_physmap_batch_HNDL_idxs(_d_, _s_) \
302             set_xen_guest_handle((_d_)->idxs, idxs)
303 #define XLAT_add_to_physmap_batch_HNDL_gpfns(_d_, _s_) \
304             set_xen_guest_handle((_d_)->gpfns, gpfns)
305 #define XLAT_add_to_physmap_batch_HNDL_errs(_d_, _s_) \
306             guest_from_compat_handle((_d_)->errs, (_s_)->errs)
307 
308             XLAT_add_to_physmap_batch(nat.atpb, &cmp.atpb);
309 
310 #undef XLAT_add_to_physmap_batch_HNDL_errs
311 #undef XLAT_add_to_physmap_batch_HNDL_gpfns
312 #undef XLAT_add_to_physmap_batch_HNDL_idxs
313 
314             if ( end_extent < cmp.atpb.size )
315             {
316                 nat.atpb->size = end_extent;
317                 ++split;
318             }
319 
320             break;
321         }
322 
323         case XENMEM_remove_from_physmap:
324         {
325             if ( copy_from_guest(&cmp.xrfp, arg, 1) )
326                 return -EFAULT;
327 
328             XLAT_remove_from_physmap(nat.xrfp, &cmp.xrfp);
329 
330             break;
331         }
332 
333         case XENMEM_access_op:
334             if ( copy_from_guest(&cmp.mao, arg, 1) )
335                 return -EFAULT;
336 
337 #define XLAT_mem_access_op_HNDL_pfn_list(_d_, _s_)                      \
338             guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list)
339 #define XLAT_mem_access_op_HNDL_access_list(_d_, _s_)                   \
340             guest_from_compat_handle((_d_)->access_list, (_s_)->access_list)
341 
342             XLAT_mem_access_op(nat.mao, &cmp.mao);
343 
344 #undef XLAT_mem_access_op_HNDL_pfn_list
345 #undef XLAT_mem_access_op_HNDL_access_list
346 
347             break;
348 
349         case XENMEM_get_vnumainfo:
350         {
351             enum XLAT_vnuma_topology_info_vdistance vdistance =
352                 XLAT_vnuma_topology_info_vdistance_h;
353             enum XLAT_vnuma_topology_info_vcpu_to_vnode vcpu_to_vnode =
354                 XLAT_vnuma_topology_info_vcpu_to_vnode_h;
355             enum XLAT_vnuma_topology_info_vmemrange vmemrange =
356                 XLAT_vnuma_topology_info_vmemrange_h;
357 
358             if ( copy_from_guest(&cmp.vnuma, arg, 1) )
359                 return -EFAULT;
360 
361 #define XLAT_vnuma_topology_info_HNDL_vdistance_h(_d_, _s_)		\
362             guest_from_compat_handle((_d_)->vdistance.h, (_s_)->vdistance.h)
363 #define XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h(_d_, _s_)		\
364             guest_from_compat_handle((_d_)->vcpu_to_vnode.h, (_s_)->vcpu_to_vnode.h)
365 #define XLAT_vnuma_topology_info_HNDL_vmemrange_h(_d_, _s_)		\
366             guest_from_compat_handle((_d_)->vmemrange.h, (_s_)->vmemrange.h)
367 
368             XLAT_vnuma_topology_info(nat.vnuma, &cmp.vnuma);
369 
370 #undef XLAT_vnuma_topology_info_HNDL_vdistance_h
371 #undef XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h
372 #undef XLAT_vnuma_topology_info_HNDL_vmemrange_h
373             break;
374         }
375 
376 #ifdef CONFIG_HAS_PASSTHROUGH
377         case XENMEM_reserved_device_memory_map:
378         {
379             struct get_reserved_device_memory grdm;
380 
381             if ( unlikely(start_extent) )
382                 return -EINVAL;
383 
384             if ( copy_from_guest(&grdm.map, arg, 1) ||
385                  !compat_handle_okay(grdm.map.buffer, grdm.map.nr_entries) )
386                 return -EFAULT;
387 
388             if ( grdm.map.flags & ~XENMEM_RDM_ALL )
389                 return -EINVAL;
390 
391             grdm.used_entries = 0;
392             rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
393                                                   &grdm);
394 
395             if ( !rc && grdm.map.nr_entries < grdm.used_entries )
396                 rc = -ENOBUFS;
397             grdm.map.nr_entries = grdm.used_entries;
398             if ( __copy_to_guest(arg, &grdm.map, 1) )
399                 rc = -EFAULT;
400 
401             return rc;
402         }
403 #endif
404 
405         case XENMEM_acquire_resource:
406         {
407             xen_pfn_t *xen_frame_list = NULL;
408 
409             if ( copy_from_guest(&cmp.mar, arg, 1) )
410                 return -EFAULT;
411 
412             /* Marshal the frame list in the remainder of the xlat space. */
413             if ( !compat_handle_is_null(cmp.mar.frame_list) )
414                 xen_frame_list = (xen_pfn_t *)(nat.mar + 1);
415 
416 #define XLAT_mem_acquire_resource_HNDL_frame_list(_d_, _s_) \
417             set_xen_guest_handle((_d_)->frame_list, xen_frame_list)
418 
419             XLAT_mem_acquire_resource(nat.mar, &cmp.mar);
420 
421 #undef XLAT_mem_acquire_resource_HNDL_frame_list
422 
423             if ( xen_frame_list && cmp.mar.nr_frames )
424             {
425                 unsigned int xlat_max_frames =
426                     (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.mar)) /
427                     sizeof(*xen_frame_list);
428 
429                 if ( start_extent >= cmp.mar.nr_frames )
430                     return -EINVAL;
431 
432                 /*
433                  * Adjust nat to account for work done on previous
434                  * continuations, leaving cmp pristine.  Hide the continaution
435                  * from the native code to prevent double accounting.
436                  */
437                 nat.mar->nr_frames -= start_extent;
438                 nat.mar->frame += start_extent;
439                 cmd &= MEMOP_CMD_MASK;
440 
441                 /*
442                  * If there are two many frames to fit within the xlat buffer,
443                  * we'll need to loop to marshal them all.
444                  */
445                 nat.mar->nr_frames = min(nat.mar->nr_frames, xlat_max_frames);
446 
447                 /*
448                  * frame_list is an input for translated guests, and an output
449                  * for untranslated guests.  Only copy in for translated guests.
450                  */
451                 if ( paging_mode_translate(currd) )
452                 {
453                     compat_pfn_t *compat_frame_list = (void *)xen_frame_list;
454 
455                     if ( !compat_handle_okay(cmp.mar.frame_list,
456                                              cmp.mar.nr_frames) ||
457                          __copy_from_compat_offset(
458                              compat_frame_list, cmp.mar.frame_list,
459                              start_extent, nat.mar->nr_frames) )
460                         return -EFAULT;
461 
462                     /*
463                      * Iterate backwards over compat_frame_list[] expanding
464                      * compat_pfn_t to xen_pfn_t in place.
465                      */
466                     for ( int x = nat.mar->nr_frames - 1; x >= 0; --x )
467                         xen_frame_list[x] = compat_frame_list[x];
468                 }
469             }
470             break;
471         }
472         default:
473             return compat_arch_memory_op(cmd, arg);
474         }
475 
476         rc = do_memory_op(cmd, nat.hnd);
477         if ( rc < 0 )
478         {
479             if ( rc == -ENOBUFS && op == XENMEM_get_vnumainfo )
480             {
481                 cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes;
482                 cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus;
483                 cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges;
484                 if ( __copy_to_guest(arg, &cmp.vnuma, 1) )
485                     rc = -EFAULT;
486             }
487             break;
488         }
489 
490         cmd = 0;
491         if ( hypercall_xlat_continuation(&cmd, 2, 0x02, nat.hnd, arg) )
492         {
493             BUG_ON(rc != __HYPERVISOR_memory_op);
494             BUG_ON((cmd & MEMOP_CMD_MASK) != op);
495             split = -1;
496         }
497 
498         switch ( op )
499         {
500         case XENMEM_increase_reservation:
501         case XENMEM_decrease_reservation:
502         case XENMEM_populate_physmap:
503             end_extent = split >= 0 ? rc : cmd >> MEMOP_EXTENT_SHIFT;
504             if ( (op != XENMEM_decrease_reservation) &&
505                  !guest_handle_is_null(nat.rsrv->extent_start) )
506             {
507                 for ( ; start_extent < end_extent; ++start_extent )
508                 {
509                     compat_pfn_t pfn = nat.rsrv->extent_start.p[start_extent];
510 
511                     BUG_ON(pfn != nat.rsrv->extent_start.p[start_extent]);
512                     if ( __copy_to_compat_offset(cmp.rsrv.extent_start,
513                                                  start_extent, &pfn, 1) )
514                     {
515                         if ( split >= 0 )
516                         {
517                             rc = start_extent;
518                             split = 0;
519                         }
520                         else
521                             /*
522                              * Short of being able to cancel the continuation,
523                              * force it to restart here; eventually we shall
524                              * get out of this state.
525                              */
526                             rc = (start_extent << MEMOP_EXTENT_SHIFT) | op;
527                         break;
528                     }
529                 }
530             }
531             else
532             {
533                 start_extent = end_extent;
534             }
535             /* Bail if there was an error. */
536             if ( (split >= 0) && (end_extent != nat.rsrv->nr_extents) )
537                 split = 0;
538             break;
539 
540         case XENMEM_exchange:
541         {
542             DEFINE_XEN_GUEST_HANDLE(compat_memory_exchange_t);
543             int order_delta;
544 
545             BUG_ON(split >= 0 && rc);
546             BUG_ON(end_extent < nat.xchg->nr_exchanged);
547             end_extent = nat.xchg->nr_exchanged;
548 
549             order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order;
550             if ( order_delta > 0 )
551             {
552                 start_extent >>= order_delta;
553                 BUG_ON(end_extent & ((1U << order_delta) - 1));
554                 end_extent >>= order_delta;
555             }
556             else
557             {
558                 start_extent <<= -order_delta;
559                 end_extent <<= -order_delta;
560             }
561 
562             for ( ; start_extent < end_extent; ++start_extent )
563             {
564                 compat_pfn_t pfn = nat.xchg->out.extent_start.p[start_extent];
565 
566                 BUG_ON(pfn != nat.xchg->out.extent_start.p[start_extent]);
567                 if ( __copy_to_compat_offset(cmp.xchg.out.extent_start,
568                                              start_extent, &pfn, 1) )
569                 {
570                     rc = -EFAULT;
571                     break;
572                 }
573             }
574 
575             cmp.xchg.nr_exchanged = nat.xchg->nr_exchanged;
576             if ( __copy_field_to_guest(guest_handle_cast(arg,
577                                                          compat_memory_exchange_t),
578                                        &cmp.xchg, nr_exchanged) )
579                 rc = -EFAULT;
580 
581             if ( rc < 0 )
582             {
583                 if ( split < 0 )
584                     /* Cannot cancel the continuation... */
585                     domain_crash(current->domain);
586                 return rc;
587             }
588             break;
589         }
590 
591         case XENMEM_add_to_physmap_batch:
592             start_extent = end_extent;
593             break;
594 
595         case XENMEM_maximum_ram_page:
596         case XENMEM_current_reservation:
597         case XENMEM_maximum_reservation:
598         case XENMEM_maximum_gpfn:
599         case XENMEM_add_to_physmap:
600         case XENMEM_remove_from_physmap:
601         case XENMEM_access_op:
602             break;
603 
604         case XENMEM_get_vnumainfo:
605             cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes;
606             cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus;
607             cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges;
608             if ( __copy_to_guest(arg, &cmp.vnuma, 1) )
609                 rc = -EFAULT;
610             break;
611 
612         case XENMEM_acquire_resource:
613         {
614             DEFINE_XEN_GUEST_HANDLE(compat_mem_acquire_resource_t);
615             unsigned int done;
616 
617             if ( compat_handle_is_null(cmp.mar.frame_list) )
618             {
619                 ASSERT(split == 0 && rc == 0);
620                 if ( __copy_field_to_guest(
621                          guest_handle_cast(arg,
622                                            compat_mem_acquire_resource_t),
623                          nat.mar, nr_frames) )
624                     return -EFAULT;
625                 break;
626             }
627 
628             if ( split < 0 )
629             {
630                 /* Continuation occurred. */
631                 ASSERT(rc != XENMEM_acquire_resource);
632                 done = cmd >> MEMOP_EXTENT_SHIFT;
633             }
634             else
635             {
636                 /* No continuation. */
637                 ASSERT(rc == 0);
638                 done = nat.mar->nr_frames;
639             }
640 
641             ASSERT(done <= nat.mar->nr_frames);
642 
643             /*
644              * frame_list is an input for translated guests, and an output for
645              * untranslated guests.  Only copy out for untranslated guests.
646              */
647             if ( !paging_mode_translate(currd) )
648             {
649                 const xen_pfn_t *xen_frame_list = (xen_pfn_t *)(nat.mar + 1);
650                 compat_pfn_t *compat_frame_list = (compat_pfn_t *)(nat.mar + 1);
651 
652                 /*
653                  * NOTE: the smaller compat array overwrites the native
654                  *       array.
655                  */
656                 BUILD_BUG_ON(sizeof(compat_pfn_t) > sizeof(xen_pfn_t));
657 
658                 rc = 0;
659                 for ( i = 0; i < done; i++ )
660                 {
661                     compat_pfn_t frame = xen_frame_list[i];
662 
663                     if ( frame != xen_frame_list[i] )
664                     {
665                         rc = -ERANGE;
666                         break;
667                     }
668 
669                     compat_frame_list[i] = frame;
670                 }
671 
672                 if ( !rc && __copy_to_compat_offset(
673                          cmp.mar.frame_list, start_extent,
674                          compat_frame_list, done) )
675                     rc = -EFAULT;
676 
677                 if ( rc )
678                 {
679                     if ( split < 0 )
680                     {
681                         gdprintk(XENLOG_ERR,
682                                  "Cannot cancel continuation: %ld\n", rc);
683                         domain_crash(current->domain);
684                     }
685                     return rc;
686                 }
687             }
688 
689             start_extent += done;
690 
691             /* Completely done. */
692             if ( start_extent == cmp.mar.nr_frames )
693                 break;
694 
695             /*
696              * Done a "full" batch, but we were limited by space in the xlat
697              * area.  Go around the loop again without necesserily returning
698              * to guest context.
699              */
700             if ( done == nat.mar->nr_frames )
701             {
702                 split = 1;
703                 break;
704             }
705 
706             /* Explicit continuation request from a higher level. */
707             if ( done < nat.mar->nr_frames )
708                 return hypercall_create_continuation(
709                     __HYPERVISOR_memory_op, "ih",
710                     op | (start_extent << MEMOP_EXTENT_SHIFT), arg);
711 
712             /*
713              * Well... Somethings gone wrong with the two levels of chunking.
714              * My condolences to whomever next has to debug this mess.
715              */
716             ASSERT_UNREACHABLE();
717             domain_crash(current->domain);
718             split = 0;
719             break;
720         }
721 
722         default:
723             domain_crash(current->domain);
724             split = 0;
725             break;
726         }
727 
728         cmd = op | (start_extent << MEMOP_EXTENT_SHIFT);
729         if ( split > 0 && hypercall_preempt_check() )
730             return hypercall_create_continuation(
731                 __HYPERVISOR_memory_op, "ih", cmd, arg);
732     } while ( split > 0 );
733 
734     if ( unlikely(rc > INT_MAX) )
735         return INT_MAX;
736 
737     if ( unlikely(rc < INT_MIN) )
738         return INT_MIN;
739 
740     return rc;
741 }
742