1 /******************************************************************************
2 * xc_offline_page.c
3 *
4 * Helper functions to offline/online one page
5 *
6 * Copyright (c) 2003, K A Fraser.
7 * Copyright (c) 2009, Intel Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation;
12 * version 2.1 of the License.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
21 */
22
23 #include <inttypes.h>
24 #include <time.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #include <sys/time.h>
28 #include <xc_core.h>
29
30 #include "xc_private.h"
31 #include "xc_dom.h"
32 #include "xg_private.h"
33 #include "xg_save_restore.h"
34
35 struct pte_backup_entry
36 {
37 xen_pfn_t table_mfn;
38 int offset;
39 };
40
41 #define DEFAULT_BACKUP_COUNT 1024
42 struct pte_backup
43 {
44 struct pte_backup_entry *entries;
45 int max;
46 int cur;
47 };
48
49 static struct domain_info_context _dinfo;
50 static struct domain_info_context *dinfo = &_dinfo;
51
xc_mark_page_online(xc_interface * xch,unsigned long start,unsigned long end,uint32_t * status)52 int xc_mark_page_online(xc_interface *xch, unsigned long start,
53 unsigned long end, uint32_t *status)
54 {
55 DECLARE_SYSCTL;
56 DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
57 int ret = -1;
58
59 if ( !status || (end < start) )
60 {
61 errno = EINVAL;
62 return -1;
63 }
64 if ( xc_hypercall_bounce_pre(xch, status) )
65 {
66 ERROR("Could not bounce memory for xc_mark_page_online\n");
67 return -1;
68 }
69
70 sysctl.cmd = XEN_SYSCTL_page_offline_op;
71 sysctl.u.page_offline.start = start;
72 sysctl.u.page_offline.cmd = sysctl_page_online;
73 sysctl.u.page_offline.end = end;
74 set_xen_guest_handle(sysctl.u.page_offline.status, status);
75 ret = xc_sysctl(xch, &sysctl);
76
77 xc_hypercall_bounce_post(xch, status);
78
79 return ret;
80 }
81
xc_mark_page_offline(xc_interface * xch,unsigned long start,unsigned long end,uint32_t * status)82 int xc_mark_page_offline(xc_interface *xch, unsigned long start,
83 unsigned long end, uint32_t *status)
84 {
85 DECLARE_SYSCTL;
86 DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
87 int ret = -1;
88
89 if ( !status || (end < start) )
90 {
91 errno = EINVAL;
92 return -1;
93 }
94 if ( xc_hypercall_bounce_pre(xch, status) )
95 {
96 ERROR("Could not bounce memory for xc_mark_page_offline");
97 return -1;
98 }
99
100 sysctl.cmd = XEN_SYSCTL_page_offline_op;
101 sysctl.u.page_offline.start = start;
102 sysctl.u.page_offline.cmd = sysctl_page_offline;
103 sysctl.u.page_offline.end = end;
104 set_xen_guest_handle(sysctl.u.page_offline.status, status);
105 ret = xc_sysctl(xch, &sysctl);
106
107 xc_hypercall_bounce_post(xch, status);
108
109 return ret;
110 }
111
xc_query_page_offline_status(xc_interface * xch,unsigned long start,unsigned long end,uint32_t * status)112 int xc_query_page_offline_status(xc_interface *xch, unsigned long start,
113 unsigned long end, uint32_t *status)
114 {
115 DECLARE_SYSCTL;
116 DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
117 int ret = -1;
118
119 if ( !status || (end < start) )
120 {
121 errno = EINVAL;
122 return -1;
123 }
124 if ( xc_hypercall_bounce_pre(xch, status) )
125 {
126 ERROR("Could not bounce memory for xc_query_page_offline_status\n");
127 return -1;
128 }
129
130 sysctl.cmd = XEN_SYSCTL_page_offline_op;
131 sysctl.u.page_offline.start = start;
132 sysctl.u.page_offline.cmd = sysctl_query_page_offline;
133 sysctl.u.page_offline.end = end;
134 set_xen_guest_handle(sysctl.u.page_offline.status, status);
135 ret = xc_sysctl(xch, &sysctl);
136
137 xc_hypercall_bounce_post(xch, status);
138
139 return ret;
140 }
141
142 /*
143 * There should no update to the grant when domain paused
144 */
xc_is_page_granted_v1(xc_interface * xch,xen_pfn_t gpfn,grant_entry_v1_t * gnttab,int gnt_num)145 static int xc_is_page_granted_v1(xc_interface *xch, xen_pfn_t gpfn,
146 grant_entry_v1_t *gnttab, int gnt_num)
147 {
148 int i = 0;
149
150 if (!gnttab)
151 return 0;
152
153 for (i = 0; i < gnt_num; i++)
154 if ( ((gnttab[i].flags & GTF_type_mask) != GTF_invalid) &&
155 (gnttab[i].frame == gpfn) )
156 break;
157
158 return (i != gnt_num);
159 }
160
xc_is_page_granted_v2(xc_interface * xch,xen_pfn_t gpfn,grant_entry_v2_t * gnttab,int gnt_num)161 static int xc_is_page_granted_v2(xc_interface *xch, xen_pfn_t gpfn,
162 grant_entry_v2_t *gnttab, int gnt_num)
163 {
164 int i = 0;
165
166 if (!gnttab)
167 return 0;
168
169 for (i = 0; i < gnt_num; i++)
170 if ( ((gnttab[i].hdr.flags & GTF_type_mask) != GTF_invalid) &&
171 (gnttab[i].full_page.frame == gpfn) )
172 break;
173
174 return (i != gnt_num);
175 }
176
backup_ptes(xen_pfn_t table_mfn,int offset,struct pte_backup * backup)177 static int backup_ptes(xen_pfn_t table_mfn, int offset,
178 struct pte_backup *backup)
179 {
180 if (!backup)
181 return -EINVAL;
182
183 if (backup->max == backup->cur)
184 {
185 backup->entries = realloc(backup->entries,
186 backup->max * 2 * sizeof(struct pte_backup_entry));
187 if (backup->entries == NULL)
188 return -1;
189 else
190 backup->max *= 2;
191 }
192
193 backup->entries[backup->cur].table_mfn = table_mfn;
194 backup->entries[backup->cur++].offset = offset;
195
196 return 0;
197 }
198
199 /*
200 * return:
201 * 1 when MMU update is required
202 * 0 when no changes
203 * <0 when error happen
204 */
205 typedef int (*pte_func)(xc_interface *xch,
206 uint64_t pte, uint64_t *new_pte,
207 unsigned long table_mfn, int table_offset,
208 struct pte_backup *backup,
209 unsigned long no_use);
210
__clear_pte(xc_interface * xch,uint64_t pte,uint64_t * new_pte,unsigned long table_mfn,int table_offset,struct pte_backup * backup,unsigned long mfn)211 static int __clear_pte(xc_interface *xch,
212 uint64_t pte, uint64_t *new_pte,
213 unsigned long table_mfn, int table_offset,
214 struct pte_backup *backup,
215 unsigned long mfn)
216 {
217 /* If no new_pte pointer, same as no changes needed */
218 if (!new_pte || !backup)
219 return -EINVAL;
220
221 if ( !(pte & _PAGE_PRESENT))
222 return 0;
223
224 /* XXX Check for PSE bit here */
225 /* Hit one entry */
226 if ( ((pte >> PAGE_SHIFT_X86) & MFN_MASK_X86) == mfn)
227 {
228 *new_pte = pte & ~_PAGE_PRESENT;
229 if (!backup_ptes(table_mfn, table_offset, backup))
230 return 1;
231 }
232
233 return 0;
234 }
235
__update_pte(xc_interface * xch,uint64_t pte,uint64_t * new_pte,unsigned long table_mfn,int table_offset,struct pte_backup * backup,unsigned long new_mfn)236 static int __update_pte(xc_interface *xch,
237 uint64_t pte, uint64_t *new_pte,
238 unsigned long table_mfn, int table_offset,
239 struct pte_backup *backup,
240 unsigned long new_mfn)
241 {
242 int index;
243
244 if (!new_pte)
245 return 0;
246
247 for (index = 0; index < backup->cur; index ++)
248 if ( (backup->entries[index].table_mfn == table_mfn) &&
249 (backup->entries[index].offset == table_offset) )
250 break;
251
252 if (index != backup->cur)
253 {
254 if (pte & _PAGE_PRESENT)
255 ERROR("Page present while in backup ptes\n");
256 pte &= ~MFN_MASK_X86;
257 pte |= (new_mfn << PAGE_SHIFT_X86) | _PAGE_PRESENT;
258 *new_pte = pte;
259 return 1;
260 }
261
262 return 0;
263 }
264
change_pte(xc_interface * xch,uint32_t domid,struct xc_domain_meminfo * minfo,struct pte_backup * backup,struct xc_mmu * mmu,pte_func func,unsigned long data)265 static int change_pte(xc_interface *xch, uint32_t domid,
266 struct xc_domain_meminfo *minfo,
267 struct pte_backup *backup,
268 struct xc_mmu *mmu,
269 pte_func func,
270 unsigned long data)
271 {
272 int pte_num, rc;
273 uint64_t i;
274 void *content = NULL;
275
276 pte_num = PAGE_SIZE / ((minfo->pt_levels == 2) ? 4 : 8);
277
278 for (i = 0; i < minfo->p2m_size; i++)
279 {
280 xen_pfn_t table_mfn = xc_pfn_to_mfn(i, minfo->p2m_table,
281 minfo->guest_width);
282 uint64_t pte, new_pte;
283 int j;
284
285 if ( (table_mfn == INVALID_PFN) ||
286 ((minfo->pfn_type[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) ==
287 XEN_DOMCTL_PFINFO_XTAB) )
288 continue;
289
290 if ( minfo->pfn_type[i] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK )
291 {
292 content = xc_map_foreign_range(xch, domid, PAGE_SIZE,
293 PROT_READ, table_mfn);
294 if (!content)
295 goto failed;
296
297 for (j = 0; j < pte_num; j++)
298 {
299 if ( minfo->pt_levels == 2 )
300 pte = ((const uint32_t*)content)[j];
301 else
302 pte = ((const uint64_t*)content)[j];
303
304 rc = func(xch, pte, &new_pte, table_mfn, j, backup, data);
305
306 switch (rc)
307 {
308 case 1:
309 if ( xc_add_mmu_update(xch, mmu,
310 table_mfn << PAGE_SHIFT |
311 j * ( (minfo->pt_levels == 2) ?
312 sizeof(uint32_t): sizeof(uint64_t)) |
313 MMU_PT_UPDATE_PRESERVE_AD,
314 new_pte) )
315 goto failed;
316 break;
317
318 case 0:
319 break;
320
321 default:
322 goto failed;
323 }
324 }
325
326 munmap(content, PAGE_SIZE);
327 content = NULL;
328 }
329 }
330
331 if ( xc_flush_mmu_updates(xch, mmu) )
332 goto failed;
333
334 return 0;
335 failed:
336 /* XXX Shall we take action if we have fail to swap? */
337 if (content)
338 munmap(content, PAGE_SIZE);
339
340 return -1;
341 }
342
update_pte(xc_interface * xch,uint32_t domid,struct xc_domain_meminfo * minfo,struct pte_backup * backup,struct xc_mmu * mmu,unsigned long new_mfn)343 static int update_pte(xc_interface *xch, uint32_t domid,
344 struct xc_domain_meminfo *minfo,
345 struct pte_backup *backup,
346 struct xc_mmu *mmu,
347 unsigned long new_mfn)
348 {
349 return change_pte(xch, domid, minfo, backup, mmu,
350 __update_pte, new_mfn);
351 }
352
clear_pte(xc_interface * xch,uint32_t domid,struct xc_domain_meminfo * minfo,struct pte_backup * backup,struct xc_mmu * mmu,xen_pfn_t mfn)353 static int clear_pte(xc_interface *xch, uint32_t domid,
354 struct xc_domain_meminfo *minfo,
355 struct pte_backup *backup,
356 struct xc_mmu *mmu,
357 xen_pfn_t mfn)
358 {
359 return change_pte(xch, domid, minfo, backup, mmu,
360 __clear_pte, mfn);
361 }
362
363 /*
364 * Check if a page can be exchanged successfully
365 */
366
is_page_exchangable(xc_interface * xch,uint32_t domid,xen_pfn_t mfn,xc_dominfo_t * info)367 static int is_page_exchangable(xc_interface *xch, uint32_t domid, xen_pfn_t mfn,
368 xc_dominfo_t *info)
369 {
370 uint32_t status;
371 int rc;
372
373 /* domain checking */
374 if ( !domid || (domid > DOMID_FIRST_RESERVED) )
375 {
376 DPRINTF("Dom0's page can't be LM");
377 return 0;
378 }
379 if (info->hvm)
380 {
381 DPRINTF("Currently we can only live change PV guest's page\n");
382 return 0;
383 }
384
385 /* Check if pages are offline pending or not */
386 rc = xc_query_page_offline_status(xch, mfn, mfn, &status);
387
388 if ( rc || !(status & PG_OFFLINE_STATUS_OFFLINE_PENDING) )
389 {
390 ERROR("Page %lx is not offline pending %x\n",
391 mfn, status);
392 return 0;
393 }
394
395 return 1;
396 }
397
xc_map_m2p(xc_interface * xch,unsigned long max_mfn,int prot,unsigned long * mfn0)398 xen_pfn_t *xc_map_m2p(xc_interface *xch,
399 unsigned long max_mfn,
400 int prot,
401 unsigned long *mfn0)
402 {
403 privcmd_mmap_entry_t *entries;
404 unsigned long m2p_chunks, m2p_size;
405 xen_pfn_t *m2p;
406 xen_pfn_t *extent_start;
407 int i;
408
409 m2p = NULL;
410 m2p_size = M2P_SIZE(max_mfn);
411 m2p_chunks = M2P_CHUNKS(max_mfn);
412
413 extent_start = calloc(m2p_chunks, sizeof(xen_pfn_t));
414 if ( !extent_start )
415 {
416 ERROR("failed to allocate space for m2p mfns");
417 goto err0;
418 }
419
420 if ( xc_machphys_mfn_list(xch, m2p_chunks, extent_start) )
421 {
422 PERROR("xc_get_m2p_mfns");
423 goto err1;
424 }
425
426 entries = calloc(m2p_chunks, sizeof(privcmd_mmap_entry_t));
427 if (entries == NULL)
428 {
429 ERROR("failed to allocate space for mmap entries");
430 goto err1;
431 }
432
433 for ( i = 0; i < m2p_chunks; i++ )
434 entries[i].mfn = extent_start[i];
435
436 m2p = xc_map_foreign_ranges(xch, DOMID_XEN,
437 m2p_size, prot, M2P_CHUNK_SIZE,
438 entries, m2p_chunks);
439 if (m2p == NULL)
440 {
441 PERROR("xc_mmap_foreign_ranges failed");
442 goto err2;
443 }
444
445 if (mfn0)
446 *mfn0 = entries[0].mfn;
447
448 err2:
449 free(entries);
450 err1:
451 free(extent_start);
452
453 err0:
454 return m2p;
455 }
456
457 /* The domain should be suspended when called here */
xc_exchange_page(xc_interface * xch,uint32_t domid,xen_pfn_t mfn)458 int xc_exchange_page(xc_interface *xch, uint32_t domid, xen_pfn_t mfn)
459 {
460 xc_dominfo_t info;
461 struct xc_domain_meminfo minfo;
462 struct xc_mmu *mmu = NULL;
463 struct pte_backup old_ptes = {NULL, 0, 0};
464 grant_entry_v1_t *gnttab_v1 = NULL;
465 grant_entry_v2_t *gnttab_v2 = NULL;
466 struct mmuext_op mops;
467 int gnt_num, unpined = 0;
468 void *old_p, *backup = NULL;
469 int rc, result = -1;
470 uint32_t status;
471 xen_pfn_t new_mfn, gpfn;
472 xen_pfn_t *m2p_table;
473 unsigned long max_mfn;
474
475 if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 )
476 {
477 ERROR("Could not get domain info");
478 return -1;
479 }
480
481 if (!info.shutdown || info.shutdown_reason != SHUTDOWN_suspend)
482 {
483 errno = EINVAL;
484 ERROR("Can't exchange page unless domain is suspended\n");
485 return -1;
486 }
487 if (!is_page_exchangable(xch, domid, mfn, &info))
488 {
489 ERROR("Could not exchange page\n");
490 return -1;
491 }
492
493 /* Map M2P and obtain gpfn */
494 rc = xc_maximum_ram_page(xch, &max_mfn);
495 if ( rc || !(m2p_table = xc_map_m2p(xch, max_mfn, PROT_READ, NULL)) )
496 {
497 PERROR("Failed to map live M2P table");
498 return -1;
499 }
500 gpfn = m2p_table[mfn];
501
502 /* Map domain's memory information */
503 memset(&minfo, 0, sizeof(minfo));
504 if ( xc_map_domain_meminfo(xch, domid, &minfo) )
505 {
506 PERROR("Could not map domain's memory information\n");
507 goto failed;
508 }
509
510 /* For translation macros */
511 dinfo->guest_width = minfo.guest_width;
512 dinfo->p2m_size = minfo.p2m_size;
513
514 /* Don't exchange CR3 for PAE guest in PAE host environment */
515 if (minfo.guest_width > sizeof(long))
516 {
517 if ( (minfo.pfn_type[gpfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) ==
518 XEN_DOMCTL_PFINFO_L3TAB )
519 goto failed;
520 }
521
522 gnttab_v2 = xc_gnttab_map_table_v2(xch, domid, &gnt_num);
523 if (!gnttab_v2)
524 {
525 gnttab_v1 = xc_gnttab_map_table_v1(xch, domid, &gnt_num);
526 if (!gnttab_v1)
527 {
528 ERROR("Failed to map grant table\n");
529 goto failed;
530 }
531 }
532
533 if (gnttab_v1
534 ? xc_is_page_granted_v1(xch, mfn, gnttab_v1, gnt_num)
535 : xc_is_page_granted_v2(xch, mfn, gnttab_v2, gnt_num))
536 {
537 ERROR("Page %lx is granted now\n", mfn);
538 goto failed;
539 }
540
541 /* allocate required data structure */
542 backup = malloc(PAGE_SIZE);
543 if (!backup)
544 {
545 ERROR("Failed to allocate backup pages pointer\n");
546 goto failed;
547 }
548
549 old_ptes.max = DEFAULT_BACKUP_COUNT;
550 old_ptes.entries = malloc(sizeof(struct pte_backup_entry) *
551 DEFAULT_BACKUP_COUNT);
552
553 if (!old_ptes.entries)
554 {
555 ERROR("Faield to allocate backup\n");
556 goto failed;
557 }
558 old_ptes.cur = 0;
559
560 /* Unpin the page if it is pined */
561 if (minfo.pfn_type[gpfn] & XEN_DOMCTL_PFINFO_LPINTAB)
562 {
563 mops.cmd = MMUEXT_UNPIN_TABLE;
564 mops.arg1.mfn = mfn;
565
566 if ( xc_mmuext_op(xch, &mops, 1, domid) < 0 )
567 {
568 ERROR("Failed to unpin page %lx", mfn);
569 goto failed;
570 }
571 mops.arg1.mfn = mfn;
572 unpined = 1;
573 }
574
575 /* backup the content */
576 old_p = xc_map_foreign_range(xch, domid, PAGE_SIZE,
577 PROT_READ, mfn);
578 if (!old_p)
579 {
580 ERROR("Failed to map foreign page %lx\n", mfn);
581 goto failed;
582 }
583
584 memcpy(backup, old_p, PAGE_SIZE);
585 munmap(old_p, PAGE_SIZE);
586
587 mmu = xc_alloc_mmu_updates(xch, domid);
588 if ( mmu == NULL )
589 {
590 ERROR("%s: failed at %d\n", __FUNCTION__, __LINE__);
591 goto failed;
592 }
593
594 /* Firstly update all pte to be invalid to remove the reference */
595 rc = clear_pte(xch, domid, &minfo, &old_ptes, mmu, mfn);
596
597 if (rc)
598 {
599 ERROR("clear pte failed\n");
600 goto failed;
601 }
602
603 rc = xc_domain_memory_exchange_pages(xch, domid,
604 1, 0, &mfn,
605 1, 0, &new_mfn);
606
607 if (rc)
608 {
609 ERROR("Exchange the page failed\n");
610 /* Exchange fail means there are refere to the page still */
611 rc = update_pte(xch, domid, &minfo, &old_ptes, mmu, mfn);
612 if (rc)
613 result = -2;
614 goto failed;
615 }
616
617 rc = update_pte(xch, domid, &minfo, &old_ptes, mmu, new_mfn);
618
619 if (rc)
620 {
621 ERROR("update pte failed guest may be broken now\n");
622 /* No recover action now for swap fail */
623 result = -2;
624 goto failed;
625 }
626
627 /* Check if pages are offlined already */
628 rc = xc_query_page_offline_status(xch, mfn, mfn,
629 &status);
630
631 if (rc)
632 {
633 ERROR("Fail to query offline status\n");
634 }else if ( !(status & PG_OFFLINE_STATUS_OFFLINED) )
635 {
636 ERROR("page is still online or pending\n");
637 goto failed;
638 }
639 else
640 {
641 void *new_p;
642 IPRINTF("Now page is offlined %lx\n", mfn);
643 /* Update the p2m table */
644 minfo.p2m_table[gpfn] = new_mfn;
645
646 new_p = xc_map_foreign_range(xch, domid, PAGE_SIZE,
647 PROT_READ|PROT_WRITE, new_mfn);
648 if ( new_p == NULL )
649 {
650 ERROR("failed to map new_p for copy, guest may be broken?");
651 goto failed;
652 }
653 memcpy(new_p, backup, PAGE_SIZE);
654 munmap(new_p, PAGE_SIZE);
655 mops.arg1.mfn = new_mfn;
656 result = 0;
657 }
658
659 failed:
660
661 if (unpined && (minfo.pfn_type[mfn] & XEN_DOMCTL_PFINFO_LPINTAB))
662 {
663 switch ( minfo.pfn_type[mfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK )
664 {
665 case XEN_DOMCTL_PFINFO_L1TAB:
666 mops.cmd = MMUEXT_PIN_L1_TABLE;
667 break;
668
669 case XEN_DOMCTL_PFINFO_L2TAB:
670 mops.cmd = MMUEXT_PIN_L2_TABLE;
671 break;
672
673 case XEN_DOMCTL_PFINFO_L3TAB:
674 mops.cmd = MMUEXT_PIN_L3_TABLE;
675 break;
676
677 case XEN_DOMCTL_PFINFO_L4TAB:
678 mops.cmd = MMUEXT_PIN_L4_TABLE;
679 break;
680
681 default:
682 ERROR("Unpined for non pate table page\n");
683 break;
684 }
685
686 if ( xc_mmuext_op(xch, &mops, 1, domid) < 0 )
687 {
688 ERROR("failed to pin the mfn again\n");
689 result = -2;
690 }
691 }
692
693 free(mmu);
694
695 free(old_ptes.entries);
696
697 free(backup);
698
699 if (gnttab_v1)
700 munmap(gnttab_v1, gnt_num / (PAGE_SIZE/sizeof(grant_entry_v1_t)));
701 if (gnttab_v2)
702 munmap(gnttab_v2, gnt_num / (PAGE_SIZE/sizeof(grant_entry_v2_t)));
703
704 xc_unmap_domain_meminfo(xch, &minfo);
705 munmap(m2p_table, M2P_SIZE(max_mfn));
706
707 return result;
708 }
709