1 /******************************************************************************
2 * memory.c
3 *
4 * Code to handle memory-related requests.
5 *
6 * Copyright (c) 2003-2004, B Dragovic
7 * Copyright (c) 2003-2005, K A Fraser
8 */
9
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/perfc.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <xen/paging.h>
17 #include <xen/iocap.h>
18 #include <xen/guest_access.h>
19 #include <xen/hypercall.h>
20 #include <xen/errno.h>
21 #include <xen/tmem.h>
22 #include <xen/tmem_xen.h>
23 #include <xen/numa.h>
24 #include <xen/mem_access.h>
25 #include <xen/trace.h>
26 #include <asm/current.h>
27 #include <asm/hardirq.h>
28 #include <asm/p2m.h>
29 #include <public/memory.h>
30 #include <xsm/xsm.h>
31
32 #ifdef CONFIG_X86
33 #include <asm/guest.h>
34 #endif
35
36 struct memop_args {
37 /* INPUT */
38 struct domain *domain; /* Domain to be affected. */
39 XEN_GUEST_HANDLE(xen_pfn_t) extent_list; /* List of extent base addrs. */
40 unsigned int nr_extents; /* Number of extents to allocate or free. */
41 unsigned int extent_order; /* Size of each extent. */
42 unsigned int memflags; /* Allocation flags. */
43
44 /* INPUT/OUTPUT */
45 unsigned int nr_done; /* Number of extents processed so far. */
46 int preempted; /* Was the hypercall preempted? */
47 };
48
49 #ifndef CONFIG_CTLDOM_MAX_ORDER
50 #define CONFIG_CTLDOM_MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
51 #endif
52 #ifndef CONFIG_PTDOM_MAX_ORDER
53 #define CONFIG_PTDOM_MAX_ORDER CONFIG_HWDOM_MAX_ORDER
54 #endif
55
56 static unsigned int __read_mostly domu_max_order = CONFIG_DOMU_MAX_ORDER;
57 static unsigned int __read_mostly ctldom_max_order = CONFIG_CTLDOM_MAX_ORDER;
58 static unsigned int __read_mostly hwdom_max_order = CONFIG_HWDOM_MAX_ORDER;
59 #ifdef HAS_PASSTHROUGH
60 static unsigned int __read_mostly ptdom_max_order = CONFIG_PTDOM_MAX_ORDER;
61 #endif
62
parse_max_order(const char * s)63 static int __init parse_max_order(const char *s)
64 {
65 if ( *s != ',' )
66 domu_max_order = simple_strtoul(s, &s, 0);
67 if ( *s == ',' && *++s != ',' )
68 ctldom_max_order = simple_strtoul(s, &s, 0);
69 if ( *s == ',' && *++s != ',' )
70 hwdom_max_order = simple_strtoul(s, &s, 0);
71 #ifdef HAS_PASSTHROUGH
72 if ( *s == ',' && *++s != ',' )
73 ptdom_max_order = simple_strtoul(s, &s, 0);
74 #endif
75
76 return *s ? -EINVAL : 0;
77 }
78 custom_param("memop-max-order", parse_max_order);
79
max_order(const struct domain * d)80 static unsigned int max_order(const struct domain *d)
81 {
82 unsigned int order = domu_max_order;
83
84 #ifdef HAS_PASSTHROUGH
85 if ( cache_flush_permitted(d) && order < ptdom_max_order )
86 order = ptdom_max_order;
87 #endif
88
89 if ( is_control_domain(d) && order < ctldom_max_order )
90 order = ctldom_max_order;
91
92 if ( is_hardware_domain(d) && order < hwdom_max_order )
93 order = hwdom_max_order;
94
95 return min(order, MAX_ORDER + 0U);
96 }
97
increase_reservation(struct memop_args * a)98 static void increase_reservation(struct memop_args *a)
99 {
100 struct page_info *page;
101 unsigned long i;
102 xen_pfn_t mfn;
103 struct domain *d = a->domain;
104
105 if ( !guest_handle_is_null(a->extent_list) &&
106 !guest_handle_subrange_okay(a->extent_list, a->nr_done,
107 a->nr_extents-1) )
108 return;
109
110 if ( a->extent_order > max_order(current->domain) )
111 return;
112
113 for ( i = a->nr_done; i < a->nr_extents; i++ )
114 {
115 if ( i != a->nr_done && hypercall_preempt_check() )
116 {
117 a->preempted = 1;
118 goto out;
119 }
120
121 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
122 if ( unlikely(page == NULL) )
123 {
124 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
125 "id=%d memflags=%x (%ld of %d)\n",
126 a->extent_order, d->domain_id, a->memflags,
127 i, a->nr_extents);
128 goto out;
129 }
130
131 /* Inform the domain of the new page's machine address. */
132 if ( !paging_mode_translate(d) &&
133 !guest_handle_is_null(a->extent_list) )
134 {
135 mfn = page_to_mfn(page);
136 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
137 goto out;
138 }
139 }
140
141 out:
142 a->nr_done = i;
143 }
144
populate_physmap(struct memop_args * a)145 static void populate_physmap(struct memop_args *a)
146 {
147 struct page_info *page;
148 unsigned int i, j;
149 xen_pfn_t gpfn, mfn;
150 struct domain *d = a->domain, *curr_d = current->domain;
151 bool need_tlbflush = false;
152 uint32_t tlbflush_timestamp = 0;
153
154 if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
155 a->nr_extents-1) )
156 return;
157
158 if ( a->extent_order > (a->memflags & MEMF_populate_on_demand ? MAX_ORDER :
159 max_order(curr_d)) )
160 return;
161
162 if ( unlikely(!d->creation_finished) )
163 {
164 /*
165 * With MEMF_no_tlbflush set, alloc_heap_pages() will ignore
166 * TLB-flushes. After VM creation, this is a security issue (it can
167 * make pages accessible to guest B, when guest A may still have a
168 * cached mapping to them). So we do this only during domain creation,
169 * when the domain itself has not yet been unpaused for the first
170 * time.
171 */
172 a->memflags |= MEMF_no_tlbflush;
173 /*
174 * With MEMF_no_icache_flush, alloc_heap_pages() will skip
175 * performing icache flushes. We do it only before domain
176 * creation as once the domain is running there is a danger of
177 * executing instructions from stale caches if icache flush is
178 * delayed.
179 */
180 a->memflags |= MEMF_no_icache_flush;
181 }
182
183 for ( i = a->nr_done; i < a->nr_extents; i++ )
184 {
185 if ( i != a->nr_done && hypercall_preempt_check() )
186 {
187 a->preempted = 1;
188 goto out;
189 }
190
191 if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
192 goto out;
193
194 if ( a->memflags & MEMF_populate_on_demand )
195 {
196 /* Disallow populating PoD pages on oneself. */
197 if ( d == curr_d )
198 goto out;
199
200 if ( guest_physmap_mark_populate_on_demand(d, gpfn,
201 a->extent_order) < 0 )
202 goto out;
203 }
204 else
205 {
206 if ( is_domain_direct_mapped(d) )
207 {
208 mfn = gpfn;
209
210 for ( j = 0; j < (1U << a->extent_order); j++, mfn++ )
211 {
212 if ( !mfn_valid(_mfn(mfn)) )
213 {
214 gdprintk(XENLOG_INFO, "Invalid mfn %#"PRI_xen_pfn"\n",
215 mfn);
216 goto out;
217 }
218
219 page = mfn_to_page(mfn);
220 if ( !get_page(page, d) )
221 {
222 gdprintk(XENLOG_INFO,
223 "mfn %#"PRI_xen_pfn" doesn't belong to d%d\n",
224 mfn, d->domain_id);
225 goto out;
226 }
227 put_page(page);
228 }
229
230 mfn = gpfn;
231 }
232 else
233 {
234 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
235
236 if ( unlikely(!page) )
237 {
238 if ( !tmem_enabled() || a->extent_order )
239 gdprintk(XENLOG_INFO,
240 "Could not allocate order=%u extent: id=%d memflags=%#x (%u of %u)\n",
241 a->extent_order, d->domain_id, a->memflags,
242 i, a->nr_extents);
243 goto out;
244 }
245
246 if ( unlikely(a->memflags & MEMF_no_tlbflush) )
247 {
248 for ( j = 0; j < (1U << a->extent_order); j++ )
249 accumulate_tlbflush(&need_tlbflush, &page[j],
250 &tlbflush_timestamp);
251 }
252
253 mfn = page_to_mfn(page);
254 }
255
256 guest_physmap_add_page(d, _gfn(gpfn), _mfn(mfn), a->extent_order);
257
258 if ( !paging_mode_translate(d) )
259 {
260 for ( j = 0; j < (1U << a->extent_order); j++ )
261 set_gpfn_from_mfn(mfn + j, gpfn + j);
262
263 /* Inform the domain of the new page's machine address. */
264 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
265 goto out;
266 }
267 }
268 }
269
270 out:
271 if ( need_tlbflush )
272 filtered_flush_tlb_mask(tlbflush_timestamp);
273
274 if ( a->memflags & MEMF_no_icache_flush )
275 invalidate_icache();
276
277 a->nr_done = i;
278 }
279
guest_remove_page(struct domain * d,unsigned long gmfn)280 int guest_remove_page(struct domain *d, unsigned long gmfn)
281 {
282 struct page_info *page;
283 #ifdef CONFIG_X86
284 p2m_type_t p2mt;
285 #endif
286 mfn_t mfn;
287 int rc;
288
289 #ifdef CONFIG_X86
290 mfn = get_gfn_query(d, gmfn, &p2mt);
291 if ( unlikely(p2m_is_paging(p2mt)) )
292 {
293 rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
294 put_gfn(d, gmfn);
295
296 if ( rc )
297 return rc;
298
299 /* If the page hasn't yet been paged out, there is an
300 * actual page that needs to be released. */
301 if ( p2mt == p2m_ram_paging_out )
302 {
303 ASSERT(mfn_valid(mfn));
304 page = mfn_to_page(mfn_x(mfn));
305 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
306 put_page(page);
307 }
308 p2m_mem_paging_drop_page(d, gmfn, p2mt);
309
310 return 0;
311 }
312 if ( p2mt == p2m_mmio_direct )
313 {
314 rc = clear_mmio_p2m_entry(d, gmfn, mfn, PAGE_ORDER_4K);
315 put_gfn(d, gmfn);
316
317 return rc;
318 }
319 #else
320 mfn = gfn_to_mfn(d, _gfn(gmfn));
321 #endif
322 if ( unlikely(!mfn_valid(mfn)) )
323 {
324 put_gfn(d, gmfn);
325 gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
326 d->domain_id, gmfn);
327
328 return -EINVAL;
329 }
330
331 #ifdef CONFIG_X86
332 if ( p2m_is_shared(p2mt) )
333 {
334 /*
335 * Unshare the page, bail out on error. We unshare because we
336 * might be the only one using this shared page, and we need to
337 * trigger proper cleanup. Once done, this is like any other page.
338 */
339 rc = mem_sharing_unshare_page(d, gmfn, 0);
340 if ( rc )
341 {
342 put_gfn(d, gmfn);
343 (void)mem_sharing_notify_enomem(d, gmfn, 0);
344
345 return rc;
346 }
347 /* Maybe the mfn changed */
348 mfn = get_gfn_query_unlocked(d, gmfn, &p2mt);
349 ASSERT(!p2m_is_shared(p2mt));
350 }
351 #endif /* CONFIG_X86 */
352
353 page = mfn_to_page(mfn_x(mfn));
354 if ( unlikely(!get_page(page, d)) )
355 {
356 put_gfn(d, gmfn);
357 gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
358
359 return -ENXIO;
360 }
361
362 rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
363
364 #ifdef _PGT_pinned
365 if ( !rc && test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
366 put_page_and_type(page);
367 #endif
368
369 /*
370 * With the lack of an IOMMU on some platforms, domains with DMA-capable
371 * device must retrieve the same pfn when the hypercall populate_physmap
372 * is called.
373 *
374 * For this purpose (and to match populate_physmap() behavior), the page
375 * is kept allocated.
376 */
377 if ( !rc && !is_domain_direct_mapped(d) &&
378 test_and_clear_bit(_PGC_allocated, &page->count_info) )
379 put_page(page);
380
381 put_page(page);
382 put_gfn(d, gmfn);
383
384 return rc;
385 }
386
decrease_reservation(struct memop_args * a)387 static void decrease_reservation(struct memop_args *a)
388 {
389 unsigned long i, j;
390 xen_pfn_t gmfn;
391
392 if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
393 a->nr_extents-1) ||
394 a->extent_order > max_order(current->domain) )
395 return;
396
397 for ( i = a->nr_done; i < a->nr_extents; i++ )
398 {
399 if ( i != a->nr_done && hypercall_preempt_check() )
400 {
401 a->preempted = 1;
402 goto out;
403 }
404
405 if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
406 goto out;
407
408 if ( tb_init_done )
409 {
410 struct {
411 u64 gfn;
412 int d:16,order:16;
413 } t;
414
415 t.gfn = gmfn;
416 t.d = a->domain->domain_id;
417 t.order = a->extent_order;
418
419 __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), &t);
420 }
421
422 /* See if populate-on-demand wants to handle this */
423 if ( is_hvm_domain(a->domain)
424 && p2m_pod_decrease_reservation(a->domain, _gfn(gmfn),
425 a->extent_order) )
426 continue;
427
428 for ( j = 0; j < (1 << a->extent_order); j++ )
429 if ( guest_remove_page(a->domain, gmfn + j) )
430 goto out;
431 }
432
433 out:
434 a->nr_done = i;
435 }
436
propagate_node(unsigned int xmf,unsigned int * memflags)437 static bool propagate_node(unsigned int xmf, unsigned int *memflags)
438 {
439 const struct domain *currd = current->domain;
440
441 BUILD_BUG_ON(XENMEMF_get_node(0) != NUMA_NO_NODE);
442 BUILD_BUG_ON(MEMF_get_node(0) != NUMA_NO_NODE);
443
444 if ( XENMEMF_get_node(xmf) == NUMA_NO_NODE )
445 return true;
446
447 if ( is_hardware_domain(currd) || is_control_domain(currd) )
448 {
449 if ( XENMEMF_get_node(xmf) >= MAX_NUMNODES )
450 return false;
451
452 *memflags |= MEMF_node(XENMEMF_get_node(xmf));
453 if ( xmf & XENMEMF_exact_node_request )
454 *memflags |= MEMF_exact_node;
455 }
456 else if ( xmf & XENMEMF_exact_node_request )
457 return false;
458
459 return true;
460 }
461
memory_exchange(XEN_GUEST_HANDLE_PARAM (xen_memory_exchange_t)arg)462 static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
463 {
464 struct xen_memory_exchange exch;
465 PAGE_LIST_HEAD(in_chunk_list);
466 PAGE_LIST_HEAD(out_chunk_list);
467 unsigned long in_chunk_order, out_chunk_order;
468 xen_pfn_t gpfn, gmfn, mfn;
469 unsigned long i, j, k;
470 unsigned int memflags = 0;
471 long rc = 0;
472 struct domain *d;
473 struct page_info *page;
474
475 if ( copy_from_guest(&exch, arg, 1) )
476 return -EFAULT;
477
478 if ( max(exch.in.extent_order, exch.out.extent_order) >
479 max_order(current->domain) )
480 {
481 rc = -EPERM;
482 goto fail_early;
483 }
484
485 /* Various sanity checks. */
486 if ( (exch.nr_exchanged > exch.in.nr_extents) ||
487 /* Input and output domain identifiers match? */
488 (exch.in.domid != exch.out.domid) ||
489 /* Sizes of input and output lists do not overflow a long? */
490 ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
491 ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||
492 /* Sizes of input and output lists match? */
493 ((exch.in.nr_extents << exch.in.extent_order) !=
494 (exch.out.nr_extents << exch.out.extent_order)) )
495 {
496 rc = -EINVAL;
497 goto fail_early;
498 }
499
500 if ( !guest_handle_subrange_okay(exch.in.extent_start, exch.nr_exchanged,
501 exch.in.nr_extents - 1) )
502 {
503 rc = -EFAULT;
504 goto fail_early;
505 }
506
507 if ( exch.in.extent_order <= exch.out.extent_order )
508 {
509 in_chunk_order = exch.out.extent_order - exch.in.extent_order;
510 out_chunk_order = 0;
511
512 if ( !guest_handle_subrange_okay(exch.out.extent_start,
513 exch.nr_exchanged >> in_chunk_order,
514 exch.out.nr_extents - 1) )
515 {
516 rc = -EFAULT;
517 goto fail_early;
518 }
519 }
520 else
521 {
522 in_chunk_order = 0;
523 out_chunk_order = exch.in.extent_order - exch.out.extent_order;
524
525 if ( !guest_handle_subrange_okay(exch.out.extent_start,
526 exch.nr_exchanged << out_chunk_order,
527 exch.out.nr_extents - 1) )
528 {
529 rc = -EFAULT;
530 goto fail_early;
531 }
532 }
533
534 if ( unlikely(!propagate_node(exch.out.mem_flags, &memflags)) )
535 {
536 rc = -EINVAL;
537 goto fail_early;
538 }
539
540 d = rcu_lock_domain_by_any_id(exch.in.domid);
541 if ( d == NULL )
542 {
543 rc = -ESRCH;
544 goto fail_early;
545 }
546
547 rc = xsm_memory_exchange(XSM_TARGET, d);
548 if ( rc )
549 {
550 rcu_unlock_domain(d);
551 goto fail_early;
552 }
553
554 memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
555 d,
556 XENMEMF_get_address_bits(exch.out.mem_flags) ? :
557 (BITS_PER_LONG+PAGE_SHIFT)));
558
559 for ( i = (exch.nr_exchanged >> in_chunk_order);
560 i < (exch.in.nr_extents >> in_chunk_order);
561 i++ )
562 {
563 if ( i != (exch.nr_exchanged >> in_chunk_order) &&
564 hypercall_preempt_check() )
565 {
566 exch.nr_exchanged = i << in_chunk_order;
567 rcu_unlock_domain(d);
568 if ( __copy_field_to_guest(arg, &exch, nr_exchanged) )
569 return -EFAULT;
570 return hypercall_create_continuation(
571 __HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg);
572 }
573
574 /* Steal a chunk's worth of input pages from the domain. */
575 for ( j = 0; j < (1UL << in_chunk_order); j++ )
576 {
577 if ( unlikely(__copy_from_guest_offset(
578 &gmfn, exch.in.extent_start, (i<<in_chunk_order)+j, 1)) )
579 {
580 rc = -EFAULT;
581 goto fail;
582 }
583
584 for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
585 {
586 #ifdef CONFIG_X86
587 p2m_type_t p2mt;
588
589 /* Shared pages cannot be exchanged */
590 mfn = mfn_x(get_gfn_unshare(d, gmfn + k, &p2mt));
591 if ( p2m_is_shared(p2mt) )
592 {
593 put_gfn(d, gmfn + k);
594 rc = -ENOMEM;
595 goto fail;
596 }
597 #else /* !CONFIG_X86 */
598 mfn = mfn_x(gfn_to_mfn(d, _gfn(gmfn + k)));
599 #endif
600 if ( unlikely(!mfn_valid(_mfn(mfn))) )
601 {
602 put_gfn(d, gmfn + k);
603 rc = -EINVAL;
604 goto fail;
605 }
606
607 page = mfn_to_page(mfn);
608
609 rc = steal_page(d, page, MEMF_no_refcount);
610 if ( unlikely(rc) )
611 {
612 put_gfn(d, gmfn + k);
613 goto fail;
614 }
615
616 page_list_add(page, &in_chunk_list);
617 put_gfn(d, gmfn + k);
618 }
619 }
620
621 /* Allocate a chunk's worth of anonymous output pages. */
622 for ( j = 0; j < (1UL << out_chunk_order); j++ )
623 {
624 page = alloc_domheap_pages(d, exch.out.extent_order,
625 MEMF_no_owner | memflags);
626 if ( unlikely(page == NULL) )
627 {
628 rc = -ENOMEM;
629 goto fail;
630 }
631
632 page_list_add(page, &out_chunk_list);
633 }
634
635 /*
636 * Success! Beyond this point we cannot fail for this chunk.
637 */
638
639 /* Destroy final reference to each input page. */
640 while ( (page = page_list_remove_head(&in_chunk_list)) )
641 {
642 unsigned long gfn;
643
644 if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
645 BUG();
646 mfn = page_to_mfn(page);
647 gfn = mfn_to_gmfn(d, mfn);
648 /* Pages were unshared above */
649 BUG_ON(SHARED_M2P(gfn));
650 if ( guest_physmap_remove_page(d, _gfn(gfn), _mfn(mfn), 0) )
651 domain_crash(d);
652 put_page(page);
653 }
654
655 /* Assign each output page to the domain. */
656 for ( j = 0; (page = page_list_remove_head(&out_chunk_list)); ++j )
657 {
658 if ( assign_pages(d, page, exch.out.extent_order,
659 MEMF_no_refcount) )
660 {
661 unsigned long dec_count;
662 bool_t drop_dom_ref;
663
664 /*
665 * Pages in in_chunk_list is stolen without
666 * decreasing the tot_pages. If the domain is dying when
667 * assign pages, we need decrease the count. For those pages
668 * that has been assigned, it should be covered by
669 * domain_relinquish_resources().
670 */
671 dec_count = (((1UL << exch.in.extent_order) *
672 (1UL << in_chunk_order)) -
673 (j * (1UL << exch.out.extent_order)));
674
675 spin_lock(&d->page_alloc_lock);
676 drop_dom_ref = (dec_count &&
677 !domain_adjust_tot_pages(d, -dec_count));
678 spin_unlock(&d->page_alloc_lock);
679
680 if ( drop_dom_ref )
681 put_domain(d);
682
683 free_domheap_pages(page, exch.out.extent_order);
684 goto dying;
685 }
686
687 if ( __copy_from_guest_offset(&gpfn, exch.out.extent_start,
688 (i << out_chunk_order) + j, 1) )
689 {
690 rc = -EFAULT;
691 continue;
692 }
693
694 mfn = page_to_mfn(page);
695 guest_physmap_add_page(d, _gfn(gpfn), _mfn(mfn),
696 exch.out.extent_order);
697
698 if ( !paging_mode_translate(d) )
699 {
700 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
701 set_gpfn_from_mfn(mfn + k, gpfn + k);
702 if ( __copy_to_guest_offset(exch.out.extent_start,
703 (i << out_chunk_order) + j,
704 &mfn, 1) )
705 rc = -EFAULT;
706 }
707 }
708 BUG_ON( !(d->is_dying) && (j != (1UL << out_chunk_order)) );
709
710 if ( rc )
711 goto fail;
712 }
713
714 exch.nr_exchanged = exch.in.nr_extents;
715 if ( __copy_field_to_guest(arg, &exch, nr_exchanged) )
716 rc = -EFAULT;
717 rcu_unlock_domain(d);
718 return rc;
719
720 /*
721 * Failed a chunk! Free any partial chunk work. Tell caller how many
722 * chunks succeeded.
723 */
724 fail:
725 /* Reassign any input pages we managed to steal. */
726 while ( (page = page_list_remove_head(&in_chunk_list)) )
727 if ( assign_pages(d, page, 0, MEMF_no_refcount) )
728 {
729 BUG_ON(!d->is_dying);
730 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
731 put_page(page);
732 }
733
734 dying:
735 rcu_unlock_domain(d);
736 /* Free any output pages we managed to allocate. */
737 while ( (page = page_list_remove_head(&out_chunk_list)) )
738 free_domheap_pages(page, exch.out.extent_order);
739
740 exch.nr_exchanged = i << in_chunk_order;
741
742 fail_early:
743 if ( __copy_field_to_guest(arg, &exch, nr_exchanged) )
744 rc = -EFAULT;
745 return rc;
746 }
747
xenmem_add_to_physmap(struct domain * d,struct xen_add_to_physmap * xatp,unsigned int start)748 static int xenmem_add_to_physmap(struct domain *d,
749 struct xen_add_to_physmap *xatp,
750 unsigned int start)
751 {
752 unsigned int done = 0;
753 long rc = 0;
754 union xen_add_to_physmap_batch_extra extra;
755
756 if ( xatp->space != XENMAPSPACE_gmfn_foreign )
757 extra.res0 = 0;
758 else
759 extra.foreign_domid = DOMID_INVALID;
760
761 if ( xatp->space != XENMAPSPACE_gmfn_range )
762 return xenmem_add_to_physmap_one(d, xatp->space, extra,
763 xatp->idx, _gfn(xatp->gpfn));
764
765 if ( xatp->size < start )
766 return -EILSEQ;
767
768 xatp->idx += start;
769 xatp->gpfn += start;
770 xatp->size -= start;
771
772 #ifdef CONFIG_HAS_PASSTHROUGH
773 if ( need_iommu(d) )
774 this_cpu(iommu_dont_flush_iotlb) = 1;
775 #endif
776
777 while ( xatp->size > done )
778 {
779 rc = xenmem_add_to_physmap_one(d, xatp->space, extra,
780 xatp->idx, _gfn(xatp->gpfn));
781 if ( rc < 0 )
782 break;
783
784 xatp->idx++;
785 xatp->gpfn++;
786
787 /* Check for continuation if it's not the last iteration. */
788 if ( xatp->size > ++done && hypercall_preempt_check() )
789 {
790 rc = start + done;
791 break;
792 }
793 }
794
795 #ifdef CONFIG_HAS_PASSTHROUGH
796 if ( need_iommu(d) )
797 {
798 int ret;
799
800 this_cpu(iommu_dont_flush_iotlb) = 0;
801
802 ret = iommu_iotlb_flush(d, xatp->idx - done, done);
803 if ( unlikely(ret) && rc >= 0 )
804 rc = ret;
805
806 ret = iommu_iotlb_flush(d, xatp->gpfn - done, done);
807 if ( unlikely(ret) && rc >= 0 )
808 rc = ret;
809 }
810 #endif
811
812 return rc;
813 }
814
xenmem_add_to_physmap_batch(struct domain * d,struct xen_add_to_physmap_batch * xatpb,unsigned int start)815 static int xenmem_add_to_physmap_batch(struct domain *d,
816 struct xen_add_to_physmap_batch *xatpb,
817 unsigned int start)
818 {
819 unsigned int done = 0;
820 int rc;
821
822 if ( xatpb->size < start )
823 return -EILSEQ;
824
825 guest_handle_add_offset(xatpb->idxs, start);
826 guest_handle_add_offset(xatpb->gpfns, start);
827 guest_handle_add_offset(xatpb->errs, start);
828 xatpb->size -= start;
829
830 if ( !guest_handle_okay(xatpb->idxs, xatpb->size) ||
831 !guest_handle_okay(xatpb->gpfns, xatpb->size) ||
832 !guest_handle_okay(xatpb->errs, xatpb->size) )
833 return -EFAULT;
834
835 while ( xatpb->size > done )
836 {
837 xen_ulong_t idx;
838 xen_pfn_t gpfn;
839
840 if ( unlikely(__copy_from_guest_offset(&idx, xatpb->idxs, 0, 1)) )
841 {
842 rc = -EFAULT;
843 goto out;
844 }
845
846 if ( unlikely(__copy_from_guest_offset(&gpfn, xatpb->gpfns, 0, 1)) )
847 {
848 rc = -EFAULT;
849 goto out;
850 }
851
852 rc = xenmem_add_to_physmap_one(d, xatpb->space,
853 xatpb->u,
854 idx, _gfn(gpfn));
855
856 if ( unlikely(__copy_to_guest_offset(xatpb->errs, 0, &rc, 1)) )
857 {
858 rc = -EFAULT;
859 goto out;
860 }
861
862 guest_handle_add_offset(xatpb->idxs, 1);
863 guest_handle_add_offset(xatpb->gpfns, 1);
864 guest_handle_add_offset(xatpb->errs, 1);
865
866 /* Check for continuation if it's not the last iteration. */
867 if ( xatpb->size > ++done && hypercall_preempt_check() )
868 {
869 rc = start + done;
870 goto out;
871 }
872 }
873
874 rc = 0;
875
876 out:
877 return rc;
878 }
879
construct_memop_from_reservation(const struct xen_memory_reservation * r,struct memop_args * a)880 static int construct_memop_from_reservation(
881 const struct xen_memory_reservation *r,
882 struct memop_args *a)
883 {
884 unsigned int address_bits;
885
886 a->extent_list = r->extent_start;
887 a->nr_extents = r->nr_extents;
888 a->extent_order = r->extent_order;
889 a->memflags = 0;
890
891 address_bits = XENMEMF_get_address_bits(r->mem_flags);
892 if ( (address_bits != 0) &&
893 (address_bits < (get_order_from_pages(max_page) + PAGE_SHIFT)) )
894 {
895 if ( address_bits <= PAGE_SHIFT )
896 return -EINVAL;
897 a->memflags = MEMF_bits(address_bits);
898 }
899
900 if ( r->mem_flags & XENMEMF_vnode )
901 {
902 nodeid_t vnode, pnode;
903 struct domain *d = a->domain;
904
905 read_lock(&d->vnuma_rwlock);
906 if ( d->vnuma )
907 {
908 vnode = XENMEMF_get_node(r->mem_flags);
909 if ( vnode >= d->vnuma->nr_vnodes )
910 {
911 read_unlock(&d->vnuma_rwlock);
912 return -EINVAL;
913 }
914
915 pnode = d->vnuma->vnode_to_pnode[vnode];
916 if ( pnode != NUMA_NO_NODE )
917 {
918 a->memflags |= MEMF_node(pnode);
919 if ( r->mem_flags & XENMEMF_exact_node_request )
920 a->memflags |= MEMF_exact_node;
921 }
922 }
923 read_unlock(&d->vnuma_rwlock);
924 }
925 else if ( unlikely(!propagate_node(r->mem_flags, &a->memflags)) )
926 return -EINVAL;
927
928 return 0;
929 }
930
931 #ifdef CONFIG_HAS_PASSTHROUGH
932 struct get_reserved_device_memory {
933 struct xen_reserved_device_memory_map map;
934 unsigned int used_entries;
935 };
936
get_reserved_device_memory(xen_pfn_t start,xen_ulong_t nr,u32 id,void * ctxt)937 static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
938 u32 id, void *ctxt)
939 {
940 struct get_reserved_device_memory *grdm = ctxt;
941 u32 sbdf = PCI_SBDF3(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus,
942 grdm->map.dev.pci.devfn);
943
944 if ( !(grdm->map.flags & XENMEM_RDM_ALL) && (sbdf != id) )
945 return 0;
946
947 if ( grdm->used_entries < grdm->map.nr_entries )
948 {
949 struct xen_reserved_device_memory rdm = {
950 .start_pfn = start, .nr_pages = nr
951 };
952
953 if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries,
954 &rdm, 1) )
955 return -EFAULT;
956 }
957
958 ++grdm->used_entries;
959
960 return 1;
961 }
962 #endif
963
xatp_permission_check(struct domain * d,unsigned int space)964 static long xatp_permission_check(struct domain *d, unsigned int space)
965 {
966 /*
967 * XENMAPSPACE_dev_mmio mapping is only supported for hardware Domain
968 * to map this kind of space to itself.
969 */
970 if ( (space == XENMAPSPACE_dev_mmio) &&
971 (!is_hardware_domain(current->domain) || (d != current->domain)) )
972 return -EACCES;
973
974 return xsm_add_to_physmap(XSM_TARGET, current->domain, d);
975 }
976
do_memory_op(unsigned long cmd,XEN_GUEST_HANDLE_PARAM (void)arg)977 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
978 {
979 struct domain *d, *curr_d = current->domain;
980 long rc;
981 struct xen_memory_reservation reservation;
982 struct memop_args args;
983 domid_t domid;
984 unsigned long start_extent = cmd >> MEMOP_EXTENT_SHIFT;
985 int op = cmd & MEMOP_CMD_MASK;
986
987 switch ( op )
988 {
989 case XENMEM_increase_reservation:
990 case XENMEM_decrease_reservation:
991 case XENMEM_populate_physmap:
992 if ( copy_from_guest(&reservation, arg, 1) )
993 return start_extent;
994
995 /* Is size too large for us to encode a continuation? */
996 if ( reservation.nr_extents > (UINT_MAX >> MEMOP_EXTENT_SHIFT) )
997 return start_extent;
998
999 if ( unlikely(start_extent >= reservation.nr_extents) )
1000 return start_extent;
1001
1002 d = rcu_lock_domain_by_any_id(reservation.domid);
1003 if ( d == NULL )
1004 return start_extent;
1005 args.domain = d;
1006
1007 if ( construct_memop_from_reservation(&reservation, &args) )
1008 {
1009 rcu_unlock_domain(d);
1010 return start_extent;
1011 }
1012
1013 args.nr_done = start_extent;
1014 args.preempted = 0;
1015
1016 if ( op == XENMEM_populate_physmap
1017 && (reservation.mem_flags & XENMEMF_populate_on_demand) )
1018 args.memflags |= MEMF_populate_on_demand;
1019
1020 if ( xsm_memory_adjust_reservation(XSM_TARGET, curr_d, d) )
1021 {
1022 rcu_unlock_domain(d);
1023 return start_extent;
1024 }
1025
1026 #ifdef CONFIG_X86
1027 if ( pv_shim && op != XENMEM_decrease_reservation && !args.preempted )
1028 /* Avoid calling pv_shim_online_memory when preempted. */
1029 pv_shim_online_memory(args.nr_extents, args.extent_order);
1030 #endif
1031
1032 switch ( op )
1033 {
1034 case XENMEM_increase_reservation:
1035 increase_reservation(&args);
1036 break;
1037 case XENMEM_decrease_reservation:
1038 decrease_reservation(&args);
1039 break;
1040 default: /* XENMEM_populate_physmap */
1041 populate_physmap(&args);
1042 break;
1043 }
1044
1045 rcu_unlock_domain(d);
1046
1047 rc = args.nr_done;
1048
1049 if ( args.preempted )
1050 return hypercall_create_continuation(
1051 __HYPERVISOR_memory_op, "lh",
1052 op | (rc << MEMOP_EXTENT_SHIFT), arg);
1053
1054 #ifdef CONFIG_X86
1055 if ( pv_shim && op == XENMEM_decrease_reservation )
1056 /*
1057 * Only call pv_shim_offline_memory when the hypercall has
1058 * finished. Note that nr_done is used to cope in case the
1059 * hypercall has failed and only part of the extents where
1060 * processed.
1061 */
1062 pv_shim_offline_memory(args.nr_extents, args.nr_done);
1063 #endif
1064
1065 break;
1066
1067 case XENMEM_exchange:
1068 if ( unlikely(start_extent) )
1069 return -EINVAL;
1070
1071 rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
1072 break;
1073
1074 case XENMEM_maximum_ram_page:
1075 if ( unlikely(start_extent) )
1076 return -EINVAL;
1077
1078 rc = max_page;
1079 break;
1080
1081 case XENMEM_current_reservation:
1082 case XENMEM_maximum_reservation:
1083 case XENMEM_maximum_gpfn:
1084 if ( unlikely(start_extent) )
1085 return -EINVAL;
1086
1087 if ( copy_from_guest(&domid, arg, 1) )
1088 return -EFAULT;
1089
1090 d = rcu_lock_domain_by_any_id(domid);
1091 if ( d == NULL )
1092 return -ESRCH;
1093
1094 rc = xsm_memory_stat_reservation(XSM_TARGET, curr_d, d);
1095 if ( rc )
1096 {
1097 rcu_unlock_domain(d);
1098 return rc;
1099 }
1100
1101 switch ( op )
1102 {
1103 case XENMEM_current_reservation:
1104 rc = d->tot_pages;
1105 break;
1106 case XENMEM_maximum_reservation:
1107 rc = d->max_pages;
1108 break;
1109 default:
1110 ASSERT(op == XENMEM_maximum_gpfn);
1111 rc = domain_get_maximum_gpfn(d);
1112 break;
1113 }
1114
1115 rcu_unlock_domain(d);
1116
1117 break;
1118
1119 case XENMEM_add_to_physmap:
1120 {
1121 struct xen_add_to_physmap xatp;
1122
1123 BUILD_BUG_ON((typeof(xatp.size))-1 > (UINT_MAX >> MEMOP_EXTENT_SHIFT));
1124
1125 /* Check for malicious or buggy input. */
1126 if ( start_extent != (typeof(xatp.size))start_extent )
1127 return -EDOM;
1128
1129 if ( copy_from_guest(&xatp, arg, 1) )
1130 return -EFAULT;
1131
1132 /* Foreign mapping is only possible via add_to_physmap_batch. */
1133 if ( xatp.space == XENMAPSPACE_gmfn_foreign )
1134 return -ENOSYS;
1135
1136 d = rcu_lock_domain_by_any_id(xatp.domid);
1137 if ( d == NULL )
1138 return -ESRCH;
1139
1140 rc = xatp_permission_check(d, xatp.space);
1141 if ( rc )
1142 {
1143 rcu_unlock_domain(d);
1144 return rc;
1145 }
1146
1147 rc = xenmem_add_to_physmap(d, &xatp, start_extent);
1148
1149 rcu_unlock_domain(d);
1150
1151 if ( xatp.space == XENMAPSPACE_gmfn_range && rc > 0 )
1152 rc = hypercall_create_continuation(
1153 __HYPERVISOR_memory_op, "lh",
1154 op | (rc << MEMOP_EXTENT_SHIFT), arg);
1155
1156 return rc;
1157 }
1158
1159 case XENMEM_add_to_physmap_batch:
1160 {
1161 struct xen_add_to_physmap_batch xatpb;
1162
1163 BUILD_BUG_ON((typeof(xatpb.size))-1 >
1164 (UINT_MAX >> MEMOP_EXTENT_SHIFT));
1165
1166 /* Check for malicious or buggy input. */
1167 if ( start_extent != (typeof(xatpb.size))start_extent )
1168 return -EDOM;
1169
1170 if ( copy_from_guest(&xatpb, arg, 1) )
1171 return -EFAULT;
1172
1173 /* This mapspace is unsupported for this hypercall. */
1174 if ( xatpb.space == XENMAPSPACE_gmfn_range )
1175 return -EOPNOTSUPP;
1176
1177 d = rcu_lock_domain_by_any_id(xatpb.domid);
1178 if ( d == NULL )
1179 return -ESRCH;
1180
1181 rc = xatp_permission_check(d, xatpb.space);
1182 if ( rc )
1183 {
1184 rcu_unlock_domain(d);
1185 return rc;
1186 }
1187
1188 rc = xenmem_add_to_physmap_batch(d, &xatpb, start_extent);
1189
1190 rcu_unlock_domain(d);
1191
1192 if ( rc > 0 )
1193 rc = hypercall_create_continuation(
1194 __HYPERVISOR_memory_op, "lh",
1195 op | (rc << MEMOP_EXTENT_SHIFT), arg);
1196
1197 return rc;
1198 }
1199
1200 case XENMEM_remove_from_physmap:
1201 {
1202 struct xen_remove_from_physmap xrfp;
1203 struct page_info *page;
1204
1205 if ( unlikely(start_extent) )
1206 return -EINVAL;
1207
1208 if ( copy_from_guest(&xrfp, arg, 1) )
1209 return -EFAULT;
1210
1211 d = rcu_lock_domain_by_any_id(xrfp.domid);
1212 if ( d == NULL )
1213 return -ESRCH;
1214
1215 rc = xsm_remove_from_physmap(XSM_TARGET, curr_d, d);
1216 if ( rc )
1217 {
1218 rcu_unlock_domain(d);
1219 return rc;
1220 }
1221
1222 page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
1223 if ( page )
1224 {
1225 rc = guest_physmap_remove_page(d, _gfn(xrfp.gpfn),
1226 _mfn(page_to_mfn(page)), 0);
1227 put_page(page);
1228 }
1229 else
1230 rc = -ENOENT;
1231
1232 rcu_unlock_domain(d);
1233
1234 break;
1235 }
1236
1237 case XENMEM_access_op:
1238 rc = mem_access_memop(cmd, guest_handle_cast(arg, xen_mem_access_op_t));
1239 break;
1240
1241 case XENMEM_claim_pages:
1242 if ( unlikely(start_extent) )
1243 return -EINVAL;
1244
1245 if ( copy_from_guest(&reservation, arg, 1) )
1246 return -EFAULT;
1247
1248 if ( !guest_handle_is_null(reservation.extent_start) )
1249 return -EINVAL;
1250
1251 if ( reservation.extent_order != 0 )
1252 return -EINVAL;
1253
1254 if ( reservation.mem_flags != 0 )
1255 return -EINVAL;
1256
1257 d = rcu_lock_domain_by_id(reservation.domid);
1258 if ( d == NULL )
1259 return -EINVAL;
1260
1261 rc = xsm_claim_pages(XSM_PRIV, d);
1262
1263 if ( !rc )
1264 rc = domain_set_outstanding_pages(d, reservation.nr_extents);
1265
1266 rcu_unlock_domain(d);
1267
1268 break;
1269
1270 case XENMEM_get_vnumainfo:
1271 {
1272 struct xen_vnuma_topology_info topology;
1273 unsigned int dom_vnodes, dom_vranges, dom_vcpus;
1274 struct vnuma_info tmp;
1275
1276 if ( unlikely(start_extent) )
1277 return -EINVAL;
1278
1279 /*
1280 * Guest passes nr_vnodes, number of regions and nr_vcpus thus
1281 * we know how much memory guest has allocated.
1282 */
1283 if ( copy_from_guest(&topology, arg, 1 ))
1284 return -EFAULT;
1285
1286 if ( topology.pad != 0 )
1287 return -EINVAL;
1288
1289 if ( (d = rcu_lock_domain_by_any_id(topology.domid)) == NULL )
1290 return -ESRCH;
1291
1292 rc = xsm_get_vnumainfo(XSM_TARGET, d);
1293 if ( rc )
1294 {
1295 rcu_unlock_domain(d);
1296 return rc;
1297 }
1298
1299 read_lock(&d->vnuma_rwlock);
1300
1301 if ( d->vnuma == NULL )
1302 {
1303 read_unlock(&d->vnuma_rwlock);
1304 rcu_unlock_domain(d);
1305 return -EOPNOTSUPP;
1306 }
1307
1308 dom_vnodes = d->vnuma->nr_vnodes;
1309 dom_vranges = d->vnuma->nr_vmemranges;
1310 dom_vcpus = d->max_vcpus;
1311
1312 /*
1313 * Copied from guest values may differ from domain vnuma config.
1314 * Check here guest parameters make sure we dont overflow.
1315 * Additionaly check padding.
1316 */
1317 if ( topology.nr_vnodes < dom_vnodes ||
1318 topology.nr_vcpus < dom_vcpus ||
1319 topology.nr_vmemranges < dom_vranges )
1320 {
1321 read_unlock(&d->vnuma_rwlock);
1322 rcu_unlock_domain(d);
1323
1324 topology.nr_vnodes = dom_vnodes;
1325 topology.nr_vcpus = dom_vcpus;
1326 topology.nr_vmemranges = dom_vranges;
1327
1328 /* Copy back needed values. */
1329 return __copy_to_guest(arg, &topology, 1) ? -EFAULT : -ENOBUFS;
1330 }
1331
1332 read_unlock(&d->vnuma_rwlock);
1333
1334 tmp.vdistance = xmalloc_array(unsigned int, dom_vnodes * dom_vnodes);
1335 tmp.vmemrange = xmalloc_array(xen_vmemrange_t, dom_vranges);
1336 tmp.vcpu_to_vnode = xmalloc_array(unsigned int, dom_vcpus);
1337
1338 if ( tmp.vdistance == NULL ||
1339 tmp.vmemrange == NULL ||
1340 tmp.vcpu_to_vnode == NULL )
1341 {
1342 rc = -ENOMEM;
1343 goto vnumainfo_out;
1344 }
1345
1346 /*
1347 * Check if vnuma info has changed and if the allocated arrays
1348 * are not big enough.
1349 */
1350 read_lock(&d->vnuma_rwlock);
1351
1352 if ( dom_vnodes < d->vnuma->nr_vnodes ||
1353 dom_vranges < d->vnuma->nr_vmemranges ||
1354 dom_vcpus < d->max_vcpus )
1355 {
1356 read_unlock(&d->vnuma_rwlock);
1357 rc = -EAGAIN;
1358 goto vnumainfo_out;
1359 }
1360
1361 dom_vnodes = d->vnuma->nr_vnodes;
1362 dom_vranges = d->vnuma->nr_vmemranges;
1363 dom_vcpus = d->max_vcpus;
1364
1365 memcpy(tmp.vmemrange, d->vnuma->vmemrange,
1366 sizeof(*d->vnuma->vmemrange) * dom_vranges);
1367 memcpy(tmp.vdistance, d->vnuma->vdistance,
1368 sizeof(*d->vnuma->vdistance) * dom_vnodes * dom_vnodes);
1369 memcpy(tmp.vcpu_to_vnode, d->vnuma->vcpu_to_vnode,
1370 sizeof(*d->vnuma->vcpu_to_vnode) * dom_vcpus);
1371
1372 read_unlock(&d->vnuma_rwlock);
1373
1374 rc = -EFAULT;
1375
1376 if ( copy_to_guest(topology.vmemrange.h, tmp.vmemrange,
1377 dom_vranges) != 0 )
1378 goto vnumainfo_out;
1379
1380 if ( copy_to_guest(topology.vdistance.h, tmp.vdistance,
1381 dom_vnodes * dom_vnodes) != 0 )
1382 goto vnumainfo_out;
1383
1384 if ( copy_to_guest(topology.vcpu_to_vnode.h, tmp.vcpu_to_vnode,
1385 dom_vcpus) != 0 )
1386 goto vnumainfo_out;
1387
1388 topology.nr_vnodes = dom_vnodes;
1389 topology.nr_vcpus = dom_vcpus;
1390 topology.nr_vmemranges = dom_vranges;
1391
1392 rc = __copy_to_guest(arg, &topology, 1) ? -EFAULT : 0;
1393
1394 vnumainfo_out:
1395 rcu_unlock_domain(d);
1396
1397 xfree(tmp.vdistance);
1398 xfree(tmp.vmemrange);
1399 xfree(tmp.vcpu_to_vnode);
1400 break;
1401 }
1402
1403 #ifdef CONFIG_HAS_PASSTHROUGH
1404 case XENMEM_reserved_device_memory_map:
1405 {
1406 struct get_reserved_device_memory grdm;
1407
1408 if ( unlikely(start_extent) )
1409 return -EINVAL;
1410
1411 if ( copy_from_guest(&grdm.map, arg, 1) ||
1412 !guest_handle_okay(grdm.map.buffer, grdm.map.nr_entries) )
1413 return -EFAULT;
1414
1415 if ( grdm.map.flags & ~XENMEM_RDM_ALL )
1416 return -EINVAL;
1417
1418 grdm.used_entries = 0;
1419 rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
1420 &grdm);
1421
1422 if ( !rc && grdm.map.nr_entries < grdm.used_entries )
1423 rc = -ENOBUFS;
1424 grdm.map.nr_entries = grdm.used_entries;
1425 if ( __copy_to_guest(arg, &grdm.map, 1) )
1426 rc = -EFAULT;
1427
1428 break;
1429 }
1430 #endif
1431
1432 default:
1433 rc = arch_memory_op(cmd, arg);
1434 break;
1435 }
1436
1437 return rc;
1438 }
1439
clear_domain_page(mfn_t mfn)1440 void clear_domain_page(mfn_t mfn)
1441 {
1442 void *ptr = map_domain_page(mfn);
1443
1444 clear_page(ptr);
1445 unmap_domain_page(ptr);
1446 }
1447
copy_domain_page(mfn_t dest,mfn_t source)1448 void copy_domain_page(mfn_t dest, mfn_t source)
1449 {
1450 const void *src = map_domain_page(source);
1451 void *dst = map_domain_page(dest);
1452
1453 copy_page(dst, src);
1454 unmap_domain_page(dst);
1455 unmap_domain_page(src);
1456 }
1457
destroy_ring_for_helper(void ** _va,struct page_info * page)1458 void destroy_ring_for_helper(
1459 void **_va, struct page_info *page)
1460 {
1461 void *va = *_va;
1462
1463 if ( va != NULL )
1464 {
1465 unmap_domain_page_global(va);
1466 put_page_and_type(page);
1467 *_va = NULL;
1468 }
1469 }
1470
prepare_ring_for_helper(struct domain * d,unsigned long gmfn,struct page_info ** _page,void ** _va)1471 int prepare_ring_for_helper(
1472 struct domain *d, unsigned long gmfn, struct page_info **_page,
1473 void **_va)
1474 {
1475 struct page_info *page;
1476 p2m_type_t p2mt;
1477 void *va;
1478
1479 page = get_page_from_gfn(d, gmfn, &p2mt, P2M_UNSHARE);
1480
1481 #ifdef CONFIG_HAS_MEM_PAGING
1482 if ( p2m_is_paging(p2mt) )
1483 {
1484 if ( page )
1485 put_page(page);
1486 p2m_mem_paging_populate(d, gmfn);
1487 return -ENOENT;
1488 }
1489 #endif
1490 #ifdef CONFIG_HAS_MEM_SHARING
1491 if ( p2m_is_shared(p2mt) )
1492 {
1493 if ( page )
1494 put_page(page);
1495 return -ENOENT;
1496 }
1497 #endif
1498
1499 if ( !page )
1500 return -EINVAL;
1501
1502 if ( !get_page_type(page, PGT_writable_page) )
1503 {
1504 put_page(page);
1505 return -EINVAL;
1506 }
1507
1508 va = __map_domain_page_global(page);
1509 if ( va == NULL )
1510 {
1511 put_page_and_type(page);
1512 return -ENOMEM;
1513 }
1514
1515 *_va = va;
1516 *_page = page;
1517
1518 return 0;
1519 }
1520
1521 /*
1522 * Local variables:
1523 * mode: C
1524 * c-file-style: "BSD"
1525 * c-basic-offset: 4
1526 * tab-width: 4
1527 * indent-tabs-mode: nil
1528 * End:
1529 */
1530