1 #include <assert.h>
2 #include <arpa/inet.h>
3
4 #include "xc_sr_common_x86.h"
5
6 /*
7 * Process an HVM_CONTEXT record from the stream.
8 */
handle_hvm_context(struct xc_sr_context * ctx,struct xc_sr_record * rec)9 static int handle_hvm_context(struct xc_sr_context *ctx,
10 struct xc_sr_record *rec)
11 {
12 xc_interface *xch = ctx->xch;
13 void *p;
14
15 p = malloc(rec->length);
16 if ( !p )
17 {
18 ERROR("Unable to allocate %u bytes for hvm context", rec->length);
19 return -1;
20 }
21
22 free(ctx->x86_hvm.restore.context);
23
24 ctx->x86_hvm.restore.context = memcpy(p, rec->data, rec->length);
25 ctx->x86_hvm.restore.contextsz = rec->length;
26
27 return 0;
28 }
29
30 /*
31 * Process an HVM_PARAMS record from the stream.
32 */
handle_hvm_params(struct xc_sr_context * ctx,struct xc_sr_record * rec)33 static int handle_hvm_params(struct xc_sr_context *ctx,
34 struct xc_sr_record *rec)
35 {
36 xc_interface *xch = ctx->xch;
37 struct xc_sr_rec_hvm_params *hdr = rec->data;
38 struct xc_sr_rec_hvm_params_entry *entry = hdr->param;
39 unsigned int i;
40 int rc;
41
42 if ( rec->length < sizeof(*hdr) )
43 {
44 ERROR("HVM_PARAMS record truncated: length %u, header size %zu",
45 rec->length, sizeof(*hdr));
46 return -1;
47 }
48
49 if ( rec->length != (sizeof(*hdr) + hdr->count * sizeof(*entry)) )
50 {
51 ERROR("HVM_PARAMS record truncated: header %zu, count %u, "
52 "expected len %zu, got %u",
53 sizeof(*hdr), hdr->count, hdr->count * sizeof(*entry),
54 rec->length);
55 return -1;
56 }
57
58 /*
59 * Tolerate empty records. Older sending sides used to accidentally
60 * generate them.
61 */
62 if ( hdr->count == 0 )
63 {
64 DBGPRINTF("Skipping empty HVM_PARAMS record\n");
65 return 0;
66 }
67
68 for ( i = 0; i < hdr->count; i++, entry++ )
69 {
70 switch ( entry->index )
71 {
72 case HVM_PARAM_CONSOLE_PFN:
73 ctx->restore.console_gfn = entry->value;
74 xc_clear_domain_page(xch, ctx->domid, entry->value);
75 break;
76 case HVM_PARAM_STORE_PFN:
77 ctx->restore.xenstore_gfn = entry->value;
78 xc_clear_domain_page(xch, ctx->domid, entry->value);
79 break;
80 case HVM_PARAM_IOREQ_PFN:
81 case HVM_PARAM_BUFIOREQ_PFN:
82 xc_clear_domain_page(xch, ctx->domid, entry->value);
83 break;
84 }
85
86 rc = xc_hvm_param_set(xch, ctx->domid, entry->index, entry->value);
87 if ( rc < 0 )
88 {
89 PERROR("set HVM param %"PRId64" = 0x%016"PRIx64,
90 entry->index, entry->value);
91 return rc;
92 }
93 }
94 return 0;
95 }
96
97 /* restore_ops function. */
x86_hvm_pfn_is_valid(const struct xc_sr_context * ctx,xen_pfn_t pfn)98 static bool x86_hvm_pfn_is_valid(const struct xc_sr_context *ctx, xen_pfn_t pfn)
99 {
100 return true;
101 }
102
103 /* restore_ops function. */
x86_hvm_pfn_to_gfn(const struct xc_sr_context * ctx,xen_pfn_t pfn)104 static xen_pfn_t x86_hvm_pfn_to_gfn(const struct xc_sr_context *ctx,
105 xen_pfn_t pfn)
106 {
107 return pfn;
108 }
109
110 /* restore_ops function. */
x86_hvm_set_gfn(struct xc_sr_context * ctx,xen_pfn_t pfn,xen_pfn_t gfn)111 static void x86_hvm_set_gfn(struct xc_sr_context *ctx, xen_pfn_t pfn,
112 xen_pfn_t gfn)
113 {
114 /* no op */
115 }
116
117 /* restore_ops function. */
x86_hvm_set_page_type(struct xc_sr_context * ctx,xen_pfn_t pfn,xen_pfn_t type)118 static void x86_hvm_set_page_type(struct xc_sr_context *ctx,
119 xen_pfn_t pfn, xen_pfn_t type)
120 {
121 /* no-op */
122 }
123
124 /* restore_ops function. */
x86_hvm_localise_page(struct xc_sr_context * ctx,uint32_t type,void * page)125 static int x86_hvm_localise_page(struct xc_sr_context *ctx,
126 uint32_t type, void *page)
127 {
128 /* no-op */
129 return 0;
130 }
131
132 /*
133 * restore_ops function. Confirms the stream matches the domain.
134 */
x86_hvm_setup(struct xc_sr_context * ctx)135 static int x86_hvm_setup(struct xc_sr_context *ctx)
136 {
137 xc_interface *xch = ctx->xch;
138
139 if ( ctx->restore.guest_type != DHDR_TYPE_X86_HVM )
140 {
141 ERROR("Unable to restore %s domain into an x86_hvm domain",
142 dhdr_type_to_str(ctx->restore.guest_type));
143 return -1;
144 }
145 else if ( ctx->restore.guest_page_size != PAGE_SIZE )
146 {
147 ERROR("Invalid page size %u for x86_hvm domains",
148 ctx->restore.guest_page_size);
149 return -1;
150 }
151 #ifdef __i386__
152 /* Very large domains (> 1TB) will exhaust virtual address space. */
153 if ( ctx->restore.p2m_size > 0x0fffffff )
154 {
155 errno = E2BIG;
156 PERROR("Cannot restore this big a guest");
157 return -1;
158 }
159 #endif
160
161 return 0;
162 }
163
164 /*
165 * restore_ops function.
166 */
x86_hvm_process_record(struct xc_sr_context * ctx,struct xc_sr_record * rec)167 static int x86_hvm_process_record(struct xc_sr_context *ctx,
168 struct xc_sr_record *rec)
169 {
170 switch ( rec->type )
171 {
172 case REC_TYPE_TSC_INFO:
173 return handle_tsc_info(ctx, rec);
174
175 case REC_TYPE_HVM_CONTEXT:
176 return handle_hvm_context(ctx, rec);
177
178 case REC_TYPE_HVM_PARAMS:
179 return handle_hvm_params(ctx, rec);
180
181 default:
182 return RECORD_NOT_PROCESSED;
183 }
184 }
185
186 /*
187 * restore_ops function. Sets extra hvm parameters and seeds the grant table.
188 */
x86_hvm_stream_complete(struct xc_sr_context * ctx)189 static int x86_hvm_stream_complete(struct xc_sr_context *ctx)
190 {
191 xc_interface *xch = ctx->xch;
192 int rc;
193
194 rc = xc_hvm_param_set(xch, ctx->domid, HVM_PARAM_STORE_EVTCHN,
195 ctx->restore.xenstore_evtchn);
196 if ( rc )
197 {
198 PERROR("Failed to set HVM_PARAM_STORE_EVTCHN");
199 return rc;
200 }
201
202 rc = xc_hvm_param_set(xch, ctx->domid, HVM_PARAM_CONSOLE_EVTCHN,
203 ctx->restore.console_evtchn);
204 if ( rc )
205 {
206 PERROR("Failed to set HVM_PARAM_CONSOLE_EVTCHN");
207 return rc;
208 }
209
210 rc = xc_domain_hvm_setcontext(xch, ctx->domid,
211 ctx->x86_hvm.restore.context,
212 ctx->x86_hvm.restore.contextsz);
213 if ( rc < 0 )
214 {
215 PERROR("Unable to restore HVM context");
216 return rc;
217 }
218
219 rc = xc_dom_gnttab_hvm_seed(xch, ctx->domid,
220 ctx->restore.console_gfn,
221 ctx->restore.xenstore_gfn,
222 ctx->restore.console_domid,
223 ctx->restore.xenstore_domid);
224 if ( rc )
225 {
226 PERROR("Failed to seed grant table");
227 return rc;
228 }
229
230 return rc;
231 }
232
x86_hvm_cleanup(struct xc_sr_context * ctx)233 static int x86_hvm_cleanup(struct xc_sr_context *ctx)
234 {
235 free(ctx->x86_hvm.restore.context);
236
237 return 0;
238 }
239
240 struct xc_sr_restore_ops restore_ops_x86_hvm =
241 {
242 .pfn_is_valid = x86_hvm_pfn_is_valid,
243 .pfn_to_gfn = x86_hvm_pfn_to_gfn,
244 .set_gfn = x86_hvm_set_gfn,
245 .set_page_type = x86_hvm_set_page_type,
246 .localise_page = x86_hvm_localise_page,
247 .setup = x86_hvm_setup,
248 .process_record = x86_hvm_process_record,
249 .stream_complete = x86_hvm_stream_complete,
250 .cleanup = x86_hvm_cleanup,
251 };
252
253 /*
254 * Local variables:
255 * mode: C
256 * c-file-style: "BSD"
257 * c-basic-offset: 4
258 * tab-width: 4
259 * indent-tabs-mode: nil
260 * End:
261 */
262