1 /******************************************************************************
2  * xc_offline_page.c
3  *
4  * Helper functions to offline/online one page
5  *
6  * Copyright (c) 2003, K A Fraser.
7  * Copyright (c) 2009, Intel Corporation.
8  *
9  * This library is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation;
12  * version 2.1 of the License.
13  *
14  * This library is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with this library; If not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include <inttypes.h>
24 #include <time.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #include <sys/time.h>
28 #include "xg_core.h"
29 
30 #include "xc_private.h"
31 #include "xg_private.h"
32 #include "xg_save_restore.h"
33 
34 struct pte_backup_entry
35 {
36     xen_pfn_t table_mfn;
37     int offset;
38 };
39 
40 #define DEFAULT_BACKUP_COUNT 1024
41 struct pte_backup
42 {
43     struct pte_backup_entry *entries;
44     int max;
45     int cur;
46 };
47 
48 static struct domain_info_context _dinfo;
49 static struct domain_info_context *dinfo = &_dinfo;
50 
xc_mark_page_online(xc_interface * xch,unsigned long start,unsigned long end,uint32_t * status)51 int xc_mark_page_online(xc_interface *xch, unsigned long start,
52                         unsigned long end, uint32_t *status)
53 {
54     struct xen_sysctl sysctl = {};
55     DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
56     int ret = -1;
57 
58     if ( !status || (end < start) )
59     {
60         errno = EINVAL;
61         return -1;
62     }
63     if ( xc_hypercall_bounce_pre(xch, status) )
64     {
65         ERROR("Could not bounce memory for xc_mark_page_online\n");
66         return -1;
67     }
68 
69     sysctl.cmd = XEN_SYSCTL_page_offline_op;
70     sysctl.u.page_offline.start = start;
71     sysctl.u.page_offline.cmd = sysctl_page_online;
72     sysctl.u.page_offline.end = end;
73     set_xen_guest_handle(sysctl.u.page_offline.status, status);
74     ret = xc_sysctl(xch, &sysctl);
75 
76     xc_hypercall_bounce_post(xch, status);
77 
78     return ret;
79 }
80 
xc_mark_page_offline(xc_interface * xch,unsigned long start,unsigned long end,uint32_t * status)81 int xc_mark_page_offline(xc_interface *xch, unsigned long start,
82                           unsigned long end, uint32_t *status)
83 {
84     struct xen_sysctl sysctl = {};
85     DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
86     int ret = -1;
87 
88     if ( !status || (end < start) )
89     {
90         errno = EINVAL;
91         return -1;
92     }
93     if ( xc_hypercall_bounce_pre(xch, status) )
94     {
95         ERROR("Could not bounce memory for xc_mark_page_offline");
96         return -1;
97     }
98 
99     sysctl.cmd = XEN_SYSCTL_page_offline_op;
100     sysctl.u.page_offline.start = start;
101     sysctl.u.page_offline.cmd = sysctl_page_offline;
102     sysctl.u.page_offline.end = end;
103     set_xen_guest_handle(sysctl.u.page_offline.status, status);
104     ret = xc_sysctl(xch, &sysctl);
105 
106     xc_hypercall_bounce_post(xch, status);
107 
108     return ret;
109 }
110 
xc_query_page_offline_status(xc_interface * xch,unsigned long start,unsigned long end,uint32_t * status)111 int xc_query_page_offline_status(xc_interface *xch, unsigned long start,
112                                  unsigned long end, uint32_t *status)
113 {
114     struct xen_sysctl sysctl = {};
115     DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
116     int ret = -1;
117 
118     if ( !status || (end < start) )
119     {
120         errno = EINVAL;
121         return -1;
122     }
123     if ( xc_hypercall_bounce_pre(xch, status) )
124     {
125         ERROR("Could not bounce memory for xc_query_page_offline_status\n");
126         return -1;
127     }
128 
129     sysctl.cmd = XEN_SYSCTL_page_offline_op;
130     sysctl.u.page_offline.start = start;
131     sysctl.u.page_offline.cmd = sysctl_query_page_offline;
132     sysctl.u.page_offline.end = end;
133     set_xen_guest_handle(sysctl.u.page_offline.status, status);
134     ret = xc_sysctl(xch, &sysctl);
135 
136     xc_hypercall_bounce_post(xch, status);
137 
138     return ret;
139 }
140 
141  /*
142   * There should no update to the grant when domain paused
143   */
xc_is_page_granted_v1(xc_interface * xch,xen_pfn_t gpfn,grant_entry_v1_t * gnttab,int gnt_num)144 static int xc_is_page_granted_v1(xc_interface *xch, xen_pfn_t gpfn,
145                                  grant_entry_v1_t *gnttab, int gnt_num)
146 {
147     int i = 0;
148 
149     if (!gnttab)
150         return 0;
151 
152     for (i = 0; i < gnt_num; i++)
153         if ( ((gnttab[i].flags & GTF_type_mask) !=  GTF_invalid) &&
154              (gnttab[i].frame == gpfn) )
155              break;
156 
157    return (i != gnt_num);
158 }
159 
xc_is_page_granted_v2(xc_interface * xch,xen_pfn_t gpfn,grant_entry_v2_t * gnttab,int gnt_num)160 static int xc_is_page_granted_v2(xc_interface *xch, xen_pfn_t gpfn,
161                                  grant_entry_v2_t *gnttab, int gnt_num)
162 {
163     int i = 0;
164 
165     if (!gnttab)
166         return 0;
167 
168     for (i = 0; i < gnt_num; i++)
169         if ( ((gnttab[i].hdr.flags & GTF_type_mask) !=  GTF_invalid) &&
170              (gnttab[i].full_page.frame == gpfn) )
171              break;
172 
173    return (i != gnt_num);
174 }
175 
backup_ptes(xen_pfn_t table_mfn,int offset,struct pte_backup * backup)176 static int backup_ptes(xen_pfn_t table_mfn, int offset,
177                        struct pte_backup *backup)
178 {
179     if (!backup)
180         return -EINVAL;
181 
182     if (backup->max == backup->cur)
183     {
184         void *entries = realloc(backup->entries, backup->max * 2 *
185                                 sizeof(struct pte_backup_entry));
186 
187         if (entries == NULL)
188             return -1;
189 
190         backup->entries = entries;
191         backup->max *= 2;
192     }
193 
194     backup->entries[backup->cur].table_mfn = table_mfn;
195     backup->entries[backup->cur++].offset = offset;
196 
197     return 0;
198 }
199 
200 /*
201  * return:
202  * 1 when MMU update is required
203  * 0 when no changes
204  * <0 when error happen
205  */
206 typedef int (*pte_func)(xc_interface *xch,
207                        uint64_t pte, uint64_t *new_pte,
208                        unsigned long table_mfn, int table_offset,
209                        struct pte_backup *backup,
210                        unsigned long no_use);
211 
__clear_pte(xc_interface * xch,uint64_t pte,uint64_t * new_pte,unsigned long table_mfn,int table_offset,struct pte_backup * backup,unsigned long mfn)212 static int __clear_pte(xc_interface *xch,
213                        uint64_t pte, uint64_t *new_pte,
214                        unsigned long table_mfn, int table_offset,
215                        struct pte_backup *backup,
216                        unsigned long mfn)
217 {
218     /* If no new_pte pointer, same as no changes needed */
219     if (!new_pte || !backup)
220         return -EINVAL;
221 
222     if ( !(pte & _PAGE_PRESENT))
223         return 0;
224 
225     /* XXX Check for PSE bit here */
226     /* Hit one entry */
227     if ( ((pte >> PAGE_SHIFT_X86) & MFN_MASK_X86) == mfn)
228     {
229         *new_pte = pte & ~_PAGE_PRESENT;
230         if (!backup_ptes(table_mfn, table_offset, backup))
231             return 1;
232     }
233 
234     return 0;
235 }
236 
__update_pte(xc_interface * xch,uint64_t pte,uint64_t * new_pte,unsigned long table_mfn,int table_offset,struct pte_backup * backup,unsigned long new_mfn)237 static int __update_pte(xc_interface *xch,
238                       uint64_t pte, uint64_t *new_pte,
239                       unsigned long table_mfn, int table_offset,
240                       struct pte_backup *backup,
241                       unsigned long new_mfn)
242 {
243     int index;
244 
245     if (!new_pte)
246         return 0;
247 
248     for (index = 0; index < backup->cur; index ++)
249         if ( (backup->entries[index].table_mfn == table_mfn) &&
250              (backup->entries[index].offset == table_offset) )
251             break;
252 
253     if (index != backup->cur)
254     {
255         if (pte & _PAGE_PRESENT)
256             ERROR("Page present while in backup ptes\n");
257         pte &= ~MFN_MASK_X86;
258         pte |= (new_mfn << PAGE_SHIFT_X86) | _PAGE_PRESENT;
259         *new_pte = pte;
260         return 1;
261     }
262 
263     return 0;
264 }
265 
change_pte(xc_interface * xch,uint32_t domid,struct xc_domain_meminfo * minfo,struct pte_backup * backup,struct xc_mmu * mmu,pte_func func,unsigned long data)266 static int change_pte(xc_interface *xch, uint32_t domid,
267                      struct xc_domain_meminfo *minfo,
268                      struct pte_backup *backup,
269                      struct xc_mmu *mmu,
270                      pte_func func,
271                      unsigned long data)
272 {
273     int pte_num, rc;
274     uint64_t i;
275     void *content = NULL;
276 
277     pte_num = PAGE_SIZE / ((minfo->pt_levels == 2) ? 4 : 8);
278 
279     for (i = 0; i < minfo->p2m_size; i++)
280     {
281         xen_pfn_t table_mfn = xc_pfn_to_mfn(i, minfo->p2m_table,
282                                             minfo->guest_width);
283         uint64_t pte, new_pte;
284         int j;
285 
286         if ( (table_mfn == INVALID_PFN) ||
287              ((minfo->pfn_type[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) ==
288               XEN_DOMCTL_PFINFO_XTAB) )
289             continue;
290 
291         if ( minfo->pfn_type[i] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK )
292         {
293             content = xc_map_foreign_range(xch, domid, PAGE_SIZE,
294                                             PROT_READ, table_mfn);
295             if (!content)
296                 goto failed;
297 
298             for (j = 0; j < pte_num; j++)
299             {
300                 if ( minfo->pt_levels == 2 )
301                     pte = ((const uint32_t*)content)[j];
302                 else
303                     pte = ((const uint64_t*)content)[j];
304 
305                 rc = func(xch, pte, &new_pte, table_mfn, j, backup, data);
306 
307                 switch (rc)
308                 {
309                     case 1:
310                     if ( xc_add_mmu_update(xch, mmu,
311                           table_mfn << PAGE_SHIFT |
312                           j * ( (minfo->pt_levels == 2) ?
313                               sizeof(uint32_t): sizeof(uint64_t)) |
314                           MMU_PT_UPDATE_PRESERVE_AD,
315                           new_pte) )
316                         goto failed;
317                     break;
318 
319                     case 0:
320                     break;
321 
322                     default:
323                     goto failed;
324                 }
325             }
326 
327             munmap(content, PAGE_SIZE);
328             content = NULL;
329         }
330     }
331 
332     if ( xc_flush_mmu_updates(xch, mmu) )
333         goto failed;
334 
335     return 0;
336 failed:
337     /* XXX Shall we take action if we have fail to swap? */
338     if (content)
339         munmap(content, PAGE_SIZE);
340 
341     return -1;
342 }
343 
update_pte(xc_interface * xch,uint32_t domid,struct xc_domain_meminfo * minfo,struct pte_backup * backup,struct xc_mmu * mmu,unsigned long new_mfn)344 static int update_pte(xc_interface *xch, uint32_t domid,
345                      struct xc_domain_meminfo *minfo,
346                      struct pte_backup *backup,
347                      struct xc_mmu *mmu,
348                      unsigned long new_mfn)
349 {
350     return change_pte(xch, domid,  minfo, backup, mmu,
351                       __update_pte, new_mfn);
352 }
353 
clear_pte(xc_interface * xch,uint32_t domid,struct xc_domain_meminfo * minfo,struct pte_backup * backup,struct xc_mmu * mmu,xen_pfn_t mfn)354 static int clear_pte(xc_interface *xch, uint32_t domid,
355                      struct xc_domain_meminfo *minfo,
356                      struct pte_backup *backup,
357                      struct xc_mmu *mmu,
358                      xen_pfn_t mfn)
359 {
360     return change_pte(xch, domid, minfo, backup, mmu,
361                       __clear_pte, mfn);
362 }
363 
364 /*
365  * Check if a page can be exchanged successfully
366  */
367 
is_page_exchangable(xc_interface * xch,uint32_t domid,xen_pfn_t mfn,xc_domaininfo_t * info)368 static int is_page_exchangable(xc_interface *xch, uint32_t domid, xen_pfn_t mfn,
369                                xc_domaininfo_t *info)
370 {
371     uint32_t status;
372     int rc;
373 
374     /* domain checking */
375     if ( !domid || (domid > DOMID_FIRST_RESERVED) )
376     {
377         DPRINTF("Dom0's page can't be LM");
378         return 0;
379     }
380     if (info->flags & XEN_DOMINF_hvm_guest)
381     {
382         DPRINTF("Currently we can only live change PV guest's page\n");
383         return 0;
384     }
385 
386     /* Check if pages are offline pending or not */
387     rc = xc_query_page_offline_status(xch, mfn, mfn, &status);
388 
389     if ( rc || !(status & PG_OFFLINE_STATUS_OFFLINE_PENDING) )
390     {
391         ERROR("Page %lx is not offline pending %x\n",
392           mfn, status);
393         return 0;
394     }
395 
396     return 1;
397 }
398 
xc_map_m2p(xc_interface * xch,unsigned long max_mfn,int prot,unsigned long * mfn0)399 xen_pfn_t *xc_map_m2p(xc_interface *xch,
400                       unsigned long max_mfn,
401                       int prot,
402                       unsigned long *mfn0)
403 {
404     privcmd_mmap_entry_t *entries;
405     unsigned long m2p_chunks, m2p_size;
406     xen_pfn_t *m2p;
407     xen_pfn_t *extent_start;
408     int i;
409 
410     m2p = NULL;
411     m2p_size   = M2P_SIZE(max_mfn);
412     m2p_chunks = M2P_CHUNKS(max_mfn);
413 
414     extent_start = calloc(m2p_chunks, sizeof(xen_pfn_t));
415     if ( !extent_start )
416     {
417         ERROR("failed to allocate space for m2p mfns");
418         goto err0;
419     }
420 
421     if ( xc_machphys_mfn_list(xch, m2p_chunks, extent_start) )
422     {
423         PERROR("xc_get_m2p_mfns");
424         goto err1;
425     }
426 
427     entries = calloc(m2p_chunks, sizeof(privcmd_mmap_entry_t));
428     if (entries == NULL)
429     {
430         ERROR("failed to allocate space for mmap entries");
431         goto err1;
432     }
433 
434     for ( i = 0; i < m2p_chunks; i++ )
435         entries[i].mfn = extent_start[i];
436 
437     m2p = xc_map_foreign_ranges(xch, DOMID_XEN,
438 			m2p_size, prot, M2P_CHUNK_SIZE,
439 			entries, m2p_chunks);
440     if (m2p == NULL)
441     {
442         PERROR("xc_mmap_foreign_ranges failed");
443         goto err2;
444     }
445 
446     if (mfn0)
447         *mfn0 = entries[0].mfn;
448 
449 err2:
450     free(entries);
451 err1:
452     free(extent_start);
453 
454 err0:
455     return m2p;
456 }
457 
458 /* The domain should be suspended when called here */
xc_exchange_page(xc_interface * xch,uint32_t domid,xen_pfn_t mfn)459 int xc_exchange_page(xc_interface *xch, uint32_t domid, xen_pfn_t mfn)
460 {
461     xc_domaininfo_t info;
462     struct xc_domain_meminfo minfo;
463     struct xc_mmu *mmu = NULL;
464     struct pte_backup old_ptes = {NULL, 0, 0};
465     grant_entry_v1_t *gnttab_v1 = NULL;
466     grant_entry_v2_t *gnttab_v2 = NULL;
467     struct mmuext_op mops;
468     int gnt_num, unpined = 0;
469     void *old_p, *backup = NULL;
470     int rc, result = -1;
471     uint32_t status;
472     xen_pfn_t new_mfn, gpfn;
473     xen_pfn_t *m2p_table;
474     unsigned long max_mfn;
475 
476     if ( xc_domain_getinfo_single(xch, domid, &info) < 0 )
477     {
478         PERROR("Could not get domain info for dom%u", domid);
479         return -1;
480     }
481 
482     if (!dominfo_shutdown_with(&info, SHUTDOWN_suspend))
483     {
484         errno = EINVAL;
485         ERROR("Can't exchange page unless domain is suspended\n");
486         return -1;
487     }
488     if (!is_page_exchangable(xch, domid, mfn, &info))
489     {
490         ERROR("Could not exchange page\n");
491         return -1;
492     }
493 
494     /* Map M2P and obtain gpfn */
495     rc = xc_maximum_ram_page(xch, &max_mfn);
496     if ( rc || !(m2p_table = xc_map_m2p(xch, max_mfn, PROT_READ, NULL)) )
497     {
498         PERROR("Failed to map live M2P table");
499         return -1;
500     }
501     gpfn = m2p_table[mfn];
502 
503     /* Map domain's memory information */
504     memset(&minfo, 0, sizeof(minfo));
505     if ( xc_map_domain_meminfo(xch, domid, &minfo) )
506     {
507         PERROR("Could not map domain's memory information\n");
508         goto failed;
509     }
510 
511     /* For translation macros */
512     dinfo->guest_width = minfo.guest_width;
513     dinfo->p2m_size = minfo.p2m_size;
514 
515     /* Don't exchange CR3 for PAE guest in PAE host environment */
516     if (minfo.guest_width > sizeof(long))
517     {
518         if ( (minfo.pfn_type[gpfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) ==
519                     XEN_DOMCTL_PFINFO_L3TAB )
520             goto failed;
521     }
522 
523     gnttab_v2 = xc_gnttab_map_table_v2(xch, domid, &gnt_num);
524     if (!gnttab_v2)
525     {
526         gnttab_v1 = xc_gnttab_map_table_v1(xch, domid, &gnt_num);
527         if (!gnttab_v1)
528         {
529             ERROR("Failed to map grant table\n");
530             goto failed;
531         }
532     }
533 
534     if (gnttab_v1
535         ? xc_is_page_granted_v1(xch, mfn, gnttab_v1, gnt_num)
536         : xc_is_page_granted_v2(xch, mfn, gnttab_v2, gnt_num))
537     {
538         ERROR("Page %lx is granted now\n", mfn);
539         goto failed;
540     }
541 
542     /* allocate required data structure */
543     backup = malloc(PAGE_SIZE);
544     if (!backup)
545     {
546         ERROR("Failed to allocate backup pages pointer\n");
547         goto failed;
548     }
549 
550     old_ptes.max = DEFAULT_BACKUP_COUNT;
551     old_ptes.entries = malloc(sizeof(struct pte_backup_entry) *
552                               DEFAULT_BACKUP_COUNT);
553 
554     if (!old_ptes.entries)
555     {
556         ERROR("Faield to allocate backup\n");
557         goto failed;
558     }
559     old_ptes.cur = 0;
560 
561     /* Unpin the page if it is pined */
562     if (minfo.pfn_type[gpfn] & XEN_DOMCTL_PFINFO_LPINTAB)
563     {
564         mops.cmd = MMUEXT_UNPIN_TABLE;
565         mops.arg1.mfn = mfn;
566 
567         if ( xc_mmuext_op(xch, &mops, 1, domid) < 0 )
568         {
569             ERROR("Failed to unpin page %lx", mfn);
570             goto failed;
571         }
572         mops.arg1.mfn = mfn;
573         unpined = 1;
574     }
575 
576     /* backup the content */
577     old_p = xc_map_foreign_range(xch, domid, PAGE_SIZE,
578       PROT_READ, mfn);
579     if (!old_p)
580     {
581         ERROR("Failed to map foreign page %lx\n", mfn);
582         goto failed;
583     }
584 
585     memcpy(backup, old_p, PAGE_SIZE);
586     munmap(old_p, PAGE_SIZE);
587 
588     mmu = xc_alloc_mmu_updates(xch, domid);
589     if ( mmu == NULL )
590     {
591         ERROR("%s: failed at %d\n", __FUNCTION__, __LINE__);
592         goto failed;
593     }
594 
595     /* Firstly update all pte to be invalid to remove the reference */
596     rc = clear_pte(xch, domid,  &minfo, &old_ptes, mmu, mfn);
597 
598     if (rc)
599     {
600         ERROR("clear pte failed\n");
601         goto failed;
602     }
603 
604     rc = xc_domain_memory_exchange_pages(xch, domid,
605 					 1, 0, &mfn,
606 					 1, 0, &new_mfn);
607 
608     if (rc)
609     {
610         ERROR("Exchange the page failed\n");
611         /* Exchange fail means there are refere to the page still */
612         rc = update_pte(xch, domid, &minfo, &old_ptes, mmu, mfn);
613         if (rc)
614             result = -2;
615         goto failed;
616     }
617 
618     rc = update_pte(xch, domid, &minfo, &old_ptes, mmu, new_mfn);
619 
620     if (rc)
621     {
622         ERROR("update pte failed guest may be broken now\n");
623         /* No recover action now for swap fail */
624         result = -2;
625         goto failed;
626     }
627 
628     /* Check if pages are offlined already */
629     rc = xc_query_page_offline_status(xch, mfn, mfn,
630                             &status);
631 
632     if (rc)
633     {
634         ERROR("Fail to query offline status\n");
635     }else if ( !(status & PG_OFFLINE_STATUS_OFFLINED) )
636     {
637         ERROR("page is still online or pending\n");
638         goto failed;
639     }
640     else
641     {
642         void *new_p;
643         IPRINTF("Now page is offlined %lx\n", mfn);
644         /* Update the p2m table */
645         minfo.p2m_table[gpfn] = new_mfn;
646 
647         new_p = xc_map_foreign_range(xch, domid, PAGE_SIZE,
648                                      PROT_READ|PROT_WRITE, new_mfn);
649         if ( new_p == NULL )
650         {
651             ERROR("failed to map new_p for copy, guest may be broken?");
652             goto failed;
653         }
654         memcpy(new_p, backup, PAGE_SIZE);
655         munmap(new_p, PAGE_SIZE);
656         mops.arg1.mfn = new_mfn;
657         result = 0;
658     }
659 
660 failed:
661 
662     if (unpined && (minfo.pfn_type[mfn] & XEN_DOMCTL_PFINFO_LPINTAB))
663     {
664         switch ( minfo.pfn_type[mfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK )
665         {
666             case XEN_DOMCTL_PFINFO_L1TAB:
667                 mops.cmd = MMUEXT_PIN_L1_TABLE;
668                 break;
669 
670             case XEN_DOMCTL_PFINFO_L2TAB:
671                 mops.cmd = MMUEXT_PIN_L2_TABLE;
672                 break;
673 
674             case XEN_DOMCTL_PFINFO_L3TAB:
675                 mops.cmd = MMUEXT_PIN_L3_TABLE;
676                 break;
677 
678             case XEN_DOMCTL_PFINFO_L4TAB:
679                 mops.cmd = MMUEXT_PIN_L4_TABLE;
680                 break;
681 
682             default:
683                 ERROR("Unpined for non pate table page\n");
684                 break;
685         }
686 
687         if ( xc_mmuext_op(xch, &mops, 1, domid) < 0 )
688         {
689             ERROR("failed to pin the mfn again\n");
690             result = -2;
691         }
692     }
693 
694     free(mmu);
695 
696     free(old_ptes.entries);
697 
698     free(backup);
699 
700     if (gnttab_v1)
701         munmap(gnttab_v1, gnt_num / (PAGE_SIZE/sizeof(grant_entry_v1_t)));
702     if (gnttab_v2)
703         munmap(gnttab_v2, gnt_num / (PAGE_SIZE/sizeof(grant_entry_v2_t)));
704 
705     xc_unmap_domain_meminfo(xch, &minfo);
706     munmap(m2p_table, M2P_SIZE(max_mfn));
707 
708     return result;
709 }
710