1 #include <assert.h>
2 
3 #include "xc_sr_common_x86_pv.h"
4 
mfn_to_pfn(struct xc_sr_context * ctx,xen_pfn_t mfn)5 xen_pfn_t mfn_to_pfn(struct xc_sr_context *ctx, xen_pfn_t mfn)
6 {
7     assert(mfn <= ctx->x86_pv.max_mfn);
8     return ctx->x86_pv.m2p[mfn];
9 }
10 
mfn_in_pseudophysmap(struct xc_sr_context * ctx,xen_pfn_t mfn)11 bool mfn_in_pseudophysmap(struct xc_sr_context *ctx, xen_pfn_t mfn)
12 {
13     return ( (mfn <= ctx->x86_pv.max_mfn) &&
14              (mfn_to_pfn(ctx, mfn) <= ctx->x86_pv.max_pfn) &&
15              (xc_pfn_to_mfn(mfn_to_pfn(ctx, mfn), ctx->x86_pv.p2m,
16                             ctx->x86_pv.width) == mfn) );
17 }
18 
dump_bad_pseudophysmap_entry(struct xc_sr_context * ctx,xen_pfn_t mfn)19 void dump_bad_pseudophysmap_entry(struct xc_sr_context *ctx, xen_pfn_t mfn)
20 {
21     xc_interface *xch = ctx->xch;
22     xen_pfn_t pfn = ~0UL;
23 
24     ERROR("mfn %#lx, max %#lx", mfn, ctx->x86_pv.max_mfn);
25 
26     if ( (mfn != ~0UL) && (mfn <= ctx->x86_pv.max_mfn) )
27     {
28         pfn = ctx->x86_pv.m2p[mfn];
29         ERROR("  m2p[%#lx] = %#lx, max_pfn %#lx",
30               mfn, pfn, ctx->x86_pv.max_pfn);
31     }
32 
33     if ( (pfn != ~0UL) && (pfn <= ctx->x86_pv.max_pfn) )
34         ERROR("  p2m[%#lx] = %#lx",
35               pfn, xc_pfn_to_mfn(pfn, ctx->x86_pv.p2m, ctx->x86_pv.width));
36 }
37 
cr3_to_mfn(struct xc_sr_context * ctx,uint64_t cr3)38 xen_pfn_t cr3_to_mfn(struct xc_sr_context *ctx, uint64_t cr3)
39 {
40     if ( ctx->x86_pv.width == 8 )
41         return cr3 >> 12;
42     else
43     {
44         /* 32bit guests can't represent mfns wider than 32 bits */
45         if ( cr3 & 0xffffffff00000000UL )
46             return ~0UL;
47         else
48             return (uint32_t)((cr3 >> 12) | (cr3 << 20));
49     }
50 }
51 
mfn_to_cr3(struct xc_sr_context * ctx,xen_pfn_t _mfn)52 uint64_t mfn_to_cr3(struct xc_sr_context *ctx, xen_pfn_t _mfn)
53 {
54     uint64_t mfn = _mfn;
55 
56     if ( ctx->x86_pv.width == 8 )
57         return mfn << 12;
58     else
59     {
60         /* 32bit guests can't represent mfns wider than 32 bits */
61         if ( mfn & 0xffffffff00000000UL )
62             return ~0UL;
63         else
64             return (uint32_t)((mfn << 12) | (mfn >> 20));
65     }
66 }
67 
x86_pv_domain_info(struct xc_sr_context * ctx)68 int x86_pv_domain_info(struct xc_sr_context *ctx)
69 {
70     xc_interface *xch = ctx->xch;
71     unsigned int guest_width, guest_levels;
72 
73     /* Get the domain width */
74     if ( xc_domain_get_guest_width(xch, ctx->domid, &guest_width) )
75     {
76         PERROR("Unable to determine dom%d's width", ctx->domid);
77         return -1;
78     }
79 
80     if ( guest_width == 4 )
81         guest_levels = 3;
82     else if ( guest_width == 8 )
83         guest_levels = 4;
84     else
85     {
86         ERROR("Invalid guest width %d.  Expected 32 or 64", guest_width * 8);
87         return -1;
88     }
89     ctx->x86_pv.width = guest_width;
90     ctx->x86_pv.levels = guest_levels;
91 
92     DPRINTF("%d bits, %d levels", guest_width * 8, guest_levels);
93 
94     return 0;
95 }
96 
x86_pv_map_m2p(struct xc_sr_context * ctx)97 int x86_pv_map_m2p(struct xc_sr_context *ctx)
98 {
99     xc_interface *xch = ctx->xch;
100     xen_pfn_t m2p_chunks, m2p_size, max_page;
101     privcmd_mmap_entry_t *entries = NULL;
102     xen_pfn_t *extents_start = NULL;
103     int rc = -1, i;
104 
105     if ( xc_maximum_ram_page(xch, &max_page) < 0 )
106     {
107         PERROR("Failed to get maximum ram page");
108         goto err;
109     }
110 
111     ctx->x86_pv.max_mfn = max_page;
112     m2p_size   = M2P_SIZE(ctx->x86_pv.max_mfn);
113     m2p_chunks = M2P_CHUNKS(ctx->x86_pv.max_mfn);
114 
115     extents_start = malloc(m2p_chunks * sizeof(xen_pfn_t));
116     if ( !extents_start )
117     {
118         ERROR("Unable to allocate %lu bytes for m2p mfns",
119               m2p_chunks * sizeof(xen_pfn_t));
120         goto err;
121     }
122 
123     if ( xc_machphys_mfn_list(xch, m2p_chunks, extents_start) )
124     {
125         PERROR("Failed to get m2p mfn list");
126         goto err;
127     }
128 
129     entries = malloc(m2p_chunks * sizeof(privcmd_mmap_entry_t));
130     if ( !entries )
131     {
132         ERROR("Unable to allocate %lu bytes for m2p mapping mfns",
133               m2p_chunks * sizeof(privcmd_mmap_entry_t));
134         goto err;
135     }
136 
137     for ( i = 0; i < m2p_chunks; ++i )
138         entries[i].mfn = extents_start[i];
139 
140     ctx->x86_pv.m2p = xc_map_foreign_ranges(
141         xch, DOMID_XEN, m2p_size, PROT_READ,
142         M2P_CHUNK_SIZE, entries, m2p_chunks);
143 
144     if ( !ctx->x86_pv.m2p )
145     {
146         PERROR("Failed to mmap() m2p ranges");
147         goto err;
148     }
149 
150     ctx->x86_pv.nr_m2p_frames = (M2P_CHUNK_SIZE >> PAGE_SHIFT) * m2p_chunks;
151 
152 #ifdef __i386__
153     /* 32 bit toolstacks automatically get the compat m2p */
154     ctx->x86_pv.compat_m2p_mfn0 = entries[0].mfn;
155 #else
156     /* 64 bit toolstacks need to ask Xen specially for it */
157     {
158         struct xen_machphys_mfn_list xmml = {
159             .max_extents = 1,
160             .extent_start = { &ctx->x86_pv.compat_m2p_mfn0 }
161         };
162 
163         rc = do_memory_op(xch, XENMEM_machphys_compat_mfn_list,
164                           &xmml, sizeof(xmml));
165         if ( rc || xmml.nr_extents != 1 )
166         {
167             PERROR("Failed to get compat mfn list from Xen");
168             rc = -1;
169             goto err;
170         }
171     }
172 #endif
173 
174     /* All Done */
175     rc = 0;
176     DPRINTF("max_mfn %#lx", ctx->x86_pv.max_mfn);
177 
178 err:
179     free(entries);
180     free(extents_start);
181 
182     return rc;
183 }
184 
185 /*
186  * Local variables:
187  * mode: C
188  * c-file-style: "BSD"
189  * c-basic-offset: 4
190  * tab-width: 4
191  * indent-tabs-mode: nil
192  * End:
193  */
194