1 /*
2 * hvm/save.c: Save and restore HVM guest's emulated hardware state.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2007, XenSource Inc.
6 * Copyright (c) 2007, Isaku Yamahata <yamahata at valinux co jp>
7 * VA Linux Systems Japan K.K.
8 * split x86 specific part
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; If not, see <http://www.gnu.org/licenses/>.
21 */
22
23 #include <xen/guest_access.h>
24 #include <xen/version.h>
25
26 #include <asm/hvm/support.h>
27
28 #include <public/hvm/save.h>
29
arch_hvm_save(struct domain * d,struct hvm_save_header * hdr)30 void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr)
31 {
32 uint32_t eax, ebx, ecx, edx;
33
34 /* Save some CPUID bits */
35 cpuid(1, &eax, &ebx, &ecx, &edx);
36 hdr->cpuid = eax;
37
38 /* Save guest's preferred TSC. */
39 hdr->gtsc_khz = d->arch.tsc_khz;
40
41 /* Time when saving started */
42 d->arch.hvm_domain.sync_tsc = rdtsc();
43 }
44
arch_hvm_load(struct domain * d,struct hvm_save_header * hdr)45 int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
46 {
47 uint32_t eax, ebx, ecx, edx;
48
49 if ( hdr->magic != HVM_FILE_MAGIC )
50 {
51 printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#"PRIx32"\n",
52 d->domain_id, hdr->magic);
53 return -1;
54 }
55
56 if ( hdr->version != HVM_FILE_VERSION )
57 {
58 printk(XENLOG_G_ERR "HVM%d restore: unsupported version %u\n",
59 d->domain_id, hdr->version);
60 return -1;
61 }
62
63 cpuid(1, &eax, &ebx, &ecx, &edx);
64 /* CPUs ought to match but with feature-masking they might not */
65 if ( (hdr->cpuid & ~0x0fUL) != (eax & ~0x0fUL) )
66 printk(XENLOG_G_INFO "HVM%d restore: VM saved on one CPU "
67 "(%#"PRIx32") and restored on another (%#"PRIx32").\n",
68 d->domain_id, hdr->cpuid, eax);
69
70 /* Restore guest's preferred TSC frequency. */
71 if ( hdr->gtsc_khz )
72 d->arch.tsc_khz = hdr->gtsc_khz;
73 if ( d->arch.vtsc )
74 hvm_set_rdtsc_exiting(d, 1);
75
76 /* Time when restore started */
77 d->arch.hvm_domain.sync_tsc = rdtsc();
78
79 /* VGA state is not saved/restored, so we nobble the cache. */
80 d->arch.hvm_domain.stdvga.cache = STDVGA_CACHE_DISABLED;
81
82 return 0;
83 }
84
85 /* List of handlers for various HVM save and restore types */
86 static struct {
87 hvm_save_handler save;
88 hvm_load_handler load;
89 const char *name;
90 size_t size;
91 int kind;
92 } hvm_sr_handlers[HVM_SAVE_CODE_MAX + 1] = { {NULL, NULL, "<?>"}, };
93
94 /* Init-time function to add entries to that list */
hvm_register_savevm(uint16_t typecode,const char * name,hvm_save_handler save_state,hvm_load_handler load_state,size_t size,int kind)95 void __init hvm_register_savevm(uint16_t typecode,
96 const char *name,
97 hvm_save_handler save_state,
98 hvm_load_handler load_state,
99 size_t size, int kind)
100 {
101 ASSERT(typecode <= HVM_SAVE_CODE_MAX);
102 ASSERT(hvm_sr_handlers[typecode].save == NULL);
103 ASSERT(hvm_sr_handlers[typecode].load == NULL);
104 hvm_sr_handlers[typecode].save = save_state;
105 hvm_sr_handlers[typecode].load = load_state;
106 hvm_sr_handlers[typecode].name = name;
107 hvm_sr_handlers[typecode].size = size;
108 hvm_sr_handlers[typecode].kind = kind;
109 }
110
hvm_save_size(struct domain * d)111 size_t hvm_save_size(struct domain *d)
112 {
113 struct vcpu *v;
114 size_t sz;
115 int i;
116
117 /* Basic overhead for header and footer */
118 sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER);
119
120 /* Plus space for each thing we will be saving */
121 for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
122 if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU )
123 for_each_vcpu(d, v)
124 sz += hvm_sr_handlers[i].size;
125 else
126 sz += hvm_sr_handlers[i].size;
127
128 return sz;
129 }
130
131 /*
132 * Extract a single instance of a save record, by marshalling all records of
133 * that type and copying out the one we need.
134 */
hvm_save_one(struct domain * d,unsigned int typecode,unsigned int instance,XEN_GUEST_HANDLE_64 (uint8)handle,uint64_t * bufsz)135 int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance,
136 XEN_GUEST_HANDLE_64(uint8) handle, uint64_t *bufsz)
137 {
138 int rv;
139 hvm_domain_context_t ctxt = { };
140 const struct hvm_save_descriptor *desc;
141
142 if ( d->is_dying ||
143 typecode > HVM_SAVE_CODE_MAX ||
144 hvm_sr_handlers[typecode].size < sizeof(*desc) ||
145 !hvm_sr_handlers[typecode].save )
146 return -EINVAL;
147
148 ctxt.size = hvm_sr_handlers[typecode].size;
149 if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU )
150 ctxt.size *= d->max_vcpus;
151 ctxt.data = xmalloc_bytes(ctxt.size);
152 if ( !ctxt.data )
153 return -ENOMEM;
154
155 if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt)) != 0 )
156 printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n",
157 d->domain_id, typecode, rv);
158 else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) )
159 {
160 uint32_t off;
161
162 for ( off = 0; off <= (ctxt.cur - sizeof(*desc)); off += desc->length )
163 {
164 desc = (void *)(ctxt.data + off);
165 /* Move past header */
166 off += sizeof(*desc);
167 if ( ctxt.cur < desc->length ||
168 off > ctxt.cur - desc->length )
169 break;
170 if ( instance == desc->instance )
171 {
172 rv = 0;
173 if ( guest_handle_is_null(handle) )
174 *bufsz = desc->length;
175 else if ( *bufsz < desc->length )
176 rv = -ENOBUFS;
177 else if ( copy_to_guest(handle, ctxt.data + off, desc->length) )
178 rv = -EFAULT;
179 else
180 *bufsz = desc->length;
181 break;
182 }
183 }
184 }
185
186 xfree(ctxt.data);
187 return rv;
188 }
189
hvm_save(struct domain * d,hvm_domain_context_t * h)190 int hvm_save(struct domain *d, hvm_domain_context_t *h)
191 {
192 char *c;
193 struct hvm_save_header hdr;
194 struct hvm_save_end end;
195 hvm_save_handler handler;
196 unsigned int i;
197
198 if ( d->is_dying )
199 return -EINVAL;
200
201 hdr.magic = HVM_FILE_MAGIC;
202 hdr.version = HVM_FILE_VERSION;
203
204 /* Save xen changeset */
205 c = strrchr(xen_changeset(), ':');
206 if ( c )
207 hdr.changeset = simple_strtoll(c, NULL, 16);
208 else
209 hdr.changeset = -1ULL; /* Unknown */
210
211 arch_hvm_save(d, &hdr);
212
213 if ( hvm_save_entry(HEADER, 0, h, &hdr) != 0 )
214 {
215 printk(XENLOG_G_ERR "HVM%d save: failed to write header\n",
216 d->domain_id);
217 return -EFAULT;
218 }
219
220 /* Save all available kinds of state */
221 for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
222 {
223 handler = hvm_sr_handlers[i].save;
224 if ( handler != NULL )
225 {
226 printk(XENLOG_G_INFO "HVM%d save: %s\n",
227 d->domain_id, hvm_sr_handlers[i].name);
228 if ( handler(d, h) != 0 )
229 {
230 printk(XENLOG_G_ERR
231 "HVM%d save: failed to save type %"PRIu16"\n",
232 d->domain_id, i);
233 return -EFAULT;
234 }
235 }
236 }
237
238 /* Save an end-of-file marker */
239 if ( hvm_save_entry(END, 0, h, &end) != 0 )
240 {
241 /* Run out of data */
242 printk(XENLOG_G_ERR "HVM%d save: no room for end marker\n",
243 d->domain_id);
244 return -EFAULT;
245 }
246
247 /* Save macros should not have let us overrun */
248 ASSERT(h->cur <= h->size);
249 return 0;
250 }
251
hvm_load(struct domain * d,hvm_domain_context_t * h)252 int hvm_load(struct domain *d, hvm_domain_context_t *h)
253 {
254 struct hvm_save_header hdr;
255 struct hvm_save_descriptor *desc;
256 hvm_load_handler handler;
257 struct vcpu *v;
258
259 if ( d->is_dying )
260 return -EINVAL;
261
262 /* Read the save header, which must be first */
263 if ( hvm_load_entry(HEADER, h, &hdr) != 0 )
264 return -1;
265
266 if ( arch_hvm_load(d, &hdr) )
267 return -1;
268
269 /* Down all the vcpus: we only re-enable the ones that had state saved. */
270 for_each_vcpu(d, v)
271 if ( test_and_set_bit(_VPF_down, &v->pause_flags) )
272 vcpu_sleep_nosync(v);
273
274 for ( ; ; )
275 {
276 if ( h->size - h->cur < sizeof(struct hvm_save_descriptor) )
277 {
278 /* Run out of data */
279 printk(XENLOG_G_ERR
280 "HVM%d restore: save did not end with a null entry\n",
281 d->domain_id);
282 return -1;
283 }
284
285 /* Read the typecode of the next entry and check for the end-marker */
286 desc = (struct hvm_save_descriptor *)(&h->data[h->cur]);
287 if ( desc->typecode == 0 )
288 return 0;
289
290 /* Find the handler for this entry */
291 if ( (desc->typecode > HVM_SAVE_CODE_MAX) ||
292 ((handler = hvm_sr_handlers[desc->typecode].load) == NULL) )
293 {
294 printk(XENLOG_G_ERR "HVM%d restore: unknown entry typecode %u\n",
295 d->domain_id, desc->typecode);
296 return -1;
297 }
298
299 /* Load the entry */
300 printk(XENLOG_G_INFO "HVM%d restore: %s %"PRIu16"\n", d->domain_id,
301 hvm_sr_handlers[desc->typecode].name, desc->instance);
302 if ( handler(d, h) != 0 )
303 {
304 printk(XENLOG_G_ERR "HVM%d restore: failed to load entry %u/%u\n",
305 d->domain_id, desc->typecode, desc->instance);
306 return -1;
307 }
308 }
309
310 /* Not reached */
311 }
312
_hvm_init_entry(struct hvm_domain_context * h,uint16_t tc,uint16_t inst,uint32_t len)313 int _hvm_init_entry(struct hvm_domain_context *h, uint16_t tc, uint16_t inst,
314 uint32_t len)
315 {
316 struct hvm_save_descriptor *d
317 = (struct hvm_save_descriptor *)&h->data[h->cur];
318
319 if ( h->size - h->cur < len + sizeof (*d) )
320 {
321 printk(XENLOG_G_WARNING "HVM save: no room for"
322 " %"PRIu32" + %zu bytes for typecode %"PRIu16"\n",
323 len, sizeof(*d), tc);
324 return -1;
325 }
326
327 d->typecode = tc;
328 d->instance = inst;
329 d->length = len;
330 h->cur += sizeof(*d);
331
332 return 0;
333 }
334
_hvm_write_entry(struct hvm_domain_context * h,void * src,uint32_t src_len)335 void _hvm_write_entry(struct hvm_domain_context *h, void *src,
336 uint32_t src_len)
337 {
338 memcpy(&h->data[h->cur], src, src_len);
339 h->cur += src_len;
340 }
341
_hvm_check_entry(struct hvm_domain_context * h,uint16_t type,uint32_t len,bool strict_length)342 int _hvm_check_entry(struct hvm_domain_context *h, uint16_t type, uint32_t len,
343 bool strict_length)
344 {
345 struct hvm_save_descriptor *d
346 = (struct hvm_save_descriptor *)&h->data[h->cur];
347
348 if ( sizeof(*d) > h->size - h->cur)
349 {
350 printk(XENLOG_G_WARNING
351 "HVM restore: not enough data left to read %zu bytes "
352 "for type %u header\n", sizeof(*d), type);
353 return -1;
354 }
355
356 if ( (type != d->typecode) ||
357 (strict_length ? (len != d->length) : (len < d->length)) ||
358 (d->length > (h->size - h->cur - sizeof(*d))) )
359 {
360 printk(XENLOG_G_WARNING
361 "HVM restore mismatch: expected %s type %u length %u, "
362 "saw type %u length %u. %zu bytes remaining\n",
363 strict_length ? "strict" : "zeroextended", type, len,
364 d->typecode, d->length, h->size - h->cur - sizeof(*d));
365 return -1;
366 }
367
368 h->cur += sizeof(*d);
369
370 return 0;
371 }
372
_hvm_read_entry(struct hvm_domain_context * h,void * dest,uint32_t dest_len)373 void _hvm_read_entry(struct hvm_domain_context *h, void *dest,
374 uint32_t dest_len)
375 {
376 struct hvm_save_descriptor *d
377 = (struct hvm_save_descriptor *)&h->data[h->cur - sizeof(*d)];
378
379 BUG_ON(d->length > dest_len);
380
381 memcpy(dest, &h->data[h->cur], d->length);
382
383 if ( d->length < dest_len )
384 memset((char *)dest + d->length, 0, dest_len - d->length);
385
386 h->cur += d->length;
387 }
388
389 /*
390 * Local variables:
391 * mode: C
392 * c-file-style: "BSD"
393 * c-basic-offset: 4
394 * tab-width: 4
395 * indent-tabs-mode: nil
396 * End:
397 */
398