1 /*
2 * hvm/io.c: hardware virtual machine I/O emulation
3 *
4 * Copyright (c) 2016 Citrix Systems Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <xen/ctype.h>
20 #include <xen/init.h>
21 #include <xen/lib.h>
22 #include <xen/trace.h>
23 #include <xen/sched.h>
24 #include <xen/irq.h>
25 #include <xen/softirq.h>
26 #include <xen/domain.h>
27 #include <xen/event.h>
28 #include <xen/paging.h>
29
30 #include <asm/hvm/hvm.h>
31 #include <asm/hvm/ioreq.h>
32 #include <asm/hvm/vmx/vmx.h>
33
34 #include <public/hvm/ioreq.h>
35
get_ioreq(struct hvm_ioreq_server * s,struct vcpu * v)36 static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)
37 {
38 shared_iopage_t *p = s->ioreq.va;
39
40 ASSERT((v == current) || !vcpu_runnable(v));
41 ASSERT(p != NULL);
42
43 return &p->vcpu_ioreq[v->vcpu_id];
44 }
45
hvm_io_pending(struct vcpu * v)46 bool hvm_io_pending(struct vcpu *v)
47 {
48 struct domain *d = v->domain;
49 struct hvm_ioreq_server *s;
50
51 list_for_each_entry ( s,
52 &d->arch.hvm_domain.ioreq_server.list,
53 list_entry )
54 {
55 struct hvm_ioreq_vcpu *sv;
56
57 list_for_each_entry ( sv,
58 &s->ioreq_vcpu_list,
59 list_entry )
60 {
61 if ( sv->vcpu == v && sv->pending )
62 return true;
63 }
64 }
65
66 return false;
67 }
68
hvm_io_assist(struct hvm_ioreq_vcpu * sv,uint64_t data)69 static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, uint64_t data)
70 {
71 struct vcpu *v = sv->vcpu;
72 struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
73
74 if ( hvm_vcpu_io_need_completion(vio) )
75 {
76 vio->io_req.state = STATE_IORESP_READY;
77 vio->io_req.data = data;
78 }
79 else
80 vio->io_req.state = STATE_IOREQ_NONE;
81
82 msix_write_completion(v);
83 vcpu_end_shutdown_deferral(v);
84
85 sv->pending = false;
86 }
87
hvm_wait_for_io(struct hvm_ioreq_vcpu * sv,ioreq_t * p)88 static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
89 {
90 while ( sv->pending )
91 {
92 unsigned int state = p->state;
93
94 rmb();
95 switch ( state )
96 {
97 case STATE_IOREQ_NONE:
98 /*
99 * The only reason we should see this case is when an
100 * emulator is dying and it races with an I/O being
101 * requested.
102 */
103 hvm_io_assist(sv, ~0ul);
104 break;
105 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
106 p->state = STATE_IOREQ_NONE;
107 hvm_io_assist(sv, p->data);
108 break;
109 case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
110 case STATE_IOREQ_INPROCESS:
111 wait_on_xen_event_channel(sv->ioreq_evtchn, p->state != state);
112 break;
113 default:
114 gdprintk(XENLOG_ERR, "Weird HVM iorequest state %u\n", state);
115 sv->pending = false;
116 domain_crash(sv->vcpu->domain);
117 return false; /* bail */
118 }
119 }
120
121 return true;
122 }
123
handle_hvm_io_completion(struct vcpu * v)124 bool handle_hvm_io_completion(struct vcpu *v)
125 {
126 struct domain *d = v->domain;
127 struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
128 struct hvm_ioreq_server *s;
129 enum hvm_io_completion io_completion;
130
131 list_for_each_entry ( s,
132 &d->arch.hvm_domain.ioreq_server.list,
133 list_entry )
134 {
135 struct hvm_ioreq_vcpu *sv;
136
137 list_for_each_entry ( sv,
138 &s->ioreq_vcpu_list,
139 list_entry )
140 {
141 if ( sv->vcpu == v && sv->pending )
142 {
143 if ( !hvm_wait_for_io(sv, get_ioreq(s, v)) )
144 return false;
145
146 break;
147 }
148 }
149 }
150
151 io_completion = vio->io_completion;
152 vio->io_completion = HVMIO_no_completion;
153
154 switch ( io_completion )
155 {
156 case HVMIO_no_completion:
157 break;
158
159 case HVMIO_mmio_completion:
160 return handle_mmio();
161
162 case HVMIO_pio_completion:
163 return handle_pio(vio->io_req.addr, vio->io_req.size,
164 vio->io_req.dir);
165
166 case HVMIO_realmode_completion:
167 {
168 struct hvm_emulate_ctxt ctxt;
169
170 hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
171 vmx_realmode_emulate_one(&ctxt);
172 hvm_emulate_writeback(&ctxt);
173
174 break;
175 }
176 default:
177 ASSERT_UNREACHABLE();
178 break;
179 }
180
181 return true;
182 }
183
hvm_alloc_ioreq_gfn(struct domain * d,unsigned long * gfn)184 static int hvm_alloc_ioreq_gfn(struct domain *d, unsigned long *gfn)
185 {
186 unsigned int i;
187 int rc;
188
189 rc = -ENOMEM;
190 for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
191 {
192 if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
193 {
194 *gfn = d->arch.hvm_domain.ioreq_gfn.base + i;
195 rc = 0;
196 break;
197 }
198 }
199
200 return rc;
201 }
202
hvm_free_ioreq_gfn(struct domain * d,unsigned long gfn)203 static void hvm_free_ioreq_gfn(struct domain *d, unsigned long gfn)
204 {
205 unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
206
207 if ( gfn != gfn_x(INVALID_GFN) )
208 set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
209 }
210
hvm_unmap_ioreq_page(struct hvm_ioreq_server * s,bool buf)211 static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool buf)
212 {
213 struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
214
215 destroy_ring_for_helper(&iorp->va, iorp->page);
216 }
217
hvm_map_ioreq_page(struct hvm_ioreq_server * s,bool buf,unsigned long gfn)218 static int hvm_map_ioreq_page(
219 struct hvm_ioreq_server *s, bool buf, unsigned long gfn)
220 {
221 struct domain *d = s->domain;
222 struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
223 struct page_info *page;
224 void *va;
225 int rc;
226
227 if ( (rc = prepare_ring_for_helper(d, gfn, &page, &va)) )
228 return rc;
229
230 if ( (iorp->va != NULL) || d->is_dying )
231 {
232 destroy_ring_for_helper(&va, page);
233 return -EINVAL;
234 }
235
236 iorp->va = va;
237 iorp->page = page;
238 iorp->gfn = gfn;
239
240 return 0;
241 }
242
is_ioreq_server_page(struct domain * d,const struct page_info * page)243 bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
244 {
245 const struct hvm_ioreq_server *s;
246 bool found = false;
247
248 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
249
250 list_for_each_entry ( s,
251 &d->arch.hvm_domain.ioreq_server.list,
252 list_entry )
253 {
254 if ( (s->ioreq.va && s->ioreq.page == page) ||
255 (s->bufioreq.va && s->bufioreq.page == page) )
256 {
257 found = true;
258 break;
259 }
260 }
261
262 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
263
264 return found;
265 }
266
hvm_remove_ioreq_gfn(struct domain * d,struct hvm_ioreq_page * iorp)267 static void hvm_remove_ioreq_gfn(
268 struct domain *d, struct hvm_ioreq_page *iorp)
269 {
270 if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
271 _mfn(page_to_mfn(iorp->page)), 0) )
272 domain_crash(d);
273 clear_page(iorp->va);
274 }
275
hvm_add_ioreq_gfn(struct domain * d,struct hvm_ioreq_page * iorp)276 static int hvm_add_ioreq_gfn(
277 struct domain *d, struct hvm_ioreq_page *iorp)
278 {
279 int rc;
280
281 clear_page(iorp->va);
282
283 rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
284 _mfn(page_to_mfn(iorp->page)), 0);
285 if ( rc == 0 )
286 paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
287
288 return rc;
289 }
290
hvm_update_ioreq_evtchn(struct hvm_ioreq_server * s,struct hvm_ioreq_vcpu * sv)291 static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s,
292 struct hvm_ioreq_vcpu *sv)
293 {
294 ASSERT(spin_is_locked(&s->lock));
295
296 if ( s->ioreq.va != NULL )
297 {
298 ioreq_t *p = get_ioreq(s, sv->vcpu);
299
300 p->vp_eport = sv->ioreq_evtchn;
301 }
302 }
303
hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server * s,bool is_default,struct vcpu * v)304 static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
305 bool is_default, struct vcpu *v)
306 {
307 struct hvm_ioreq_vcpu *sv;
308 int rc;
309
310 sv = xzalloc(struct hvm_ioreq_vcpu);
311
312 rc = -ENOMEM;
313 if ( !sv )
314 goto fail1;
315
316 spin_lock(&s->lock);
317
318 rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id, s->domid,
319 NULL);
320 if ( rc < 0 )
321 goto fail2;
322
323 sv->ioreq_evtchn = rc;
324
325 if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
326 {
327 struct domain *d = s->domain;
328
329 rc = alloc_unbound_xen_event_channel(v->domain, 0, s->domid, NULL);
330 if ( rc < 0 )
331 goto fail3;
332
333 s->bufioreq_evtchn = rc;
334 if ( is_default )
335 d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
336 s->bufioreq_evtchn;
337 }
338
339 sv->vcpu = v;
340
341 list_add(&sv->list_entry, &s->ioreq_vcpu_list);
342
343 if ( s->enabled )
344 hvm_update_ioreq_evtchn(s, sv);
345
346 spin_unlock(&s->lock);
347 return 0;
348
349 fail3:
350 free_xen_event_channel(v->domain, sv->ioreq_evtchn);
351
352 fail2:
353 spin_unlock(&s->lock);
354 xfree(sv);
355
356 fail1:
357 return rc;
358 }
359
hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server * s,struct vcpu * v)360 static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s,
361 struct vcpu *v)
362 {
363 struct hvm_ioreq_vcpu *sv;
364
365 spin_lock(&s->lock);
366
367 list_for_each_entry ( sv,
368 &s->ioreq_vcpu_list,
369 list_entry )
370 {
371 if ( sv->vcpu != v )
372 continue;
373
374 list_del(&sv->list_entry);
375
376 if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
377 free_xen_event_channel(v->domain, s->bufioreq_evtchn);
378
379 free_xen_event_channel(v->domain, sv->ioreq_evtchn);
380
381 xfree(sv);
382 break;
383 }
384
385 spin_unlock(&s->lock);
386 }
387
hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server * s)388 static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
389 {
390 struct hvm_ioreq_vcpu *sv, *next;
391
392 spin_lock(&s->lock);
393
394 list_for_each_entry_safe ( sv,
395 next,
396 &s->ioreq_vcpu_list,
397 list_entry )
398 {
399 struct vcpu *v = sv->vcpu;
400
401 list_del(&sv->list_entry);
402
403 if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
404 free_xen_event_channel(v->domain, s->bufioreq_evtchn);
405
406 free_xen_event_channel(v->domain, sv->ioreq_evtchn);
407
408 xfree(sv);
409 }
410
411 spin_unlock(&s->lock);
412 }
413
hvm_ioreq_server_map_pages(struct hvm_ioreq_server * s,unsigned long ioreq_gfn,unsigned long bufioreq_gfn)414 static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
415 unsigned long ioreq_gfn,
416 unsigned long bufioreq_gfn)
417 {
418 int rc;
419
420 rc = hvm_map_ioreq_page(s, false, ioreq_gfn);
421 if ( rc )
422 return rc;
423
424 if ( bufioreq_gfn != gfn_x(INVALID_GFN) )
425 rc = hvm_map_ioreq_page(s, true, bufioreq_gfn);
426
427 if ( rc )
428 hvm_unmap_ioreq_page(s, false);
429
430 return rc;
431 }
432
hvm_ioreq_server_setup_pages(struct hvm_ioreq_server * s,bool is_default,bool handle_bufioreq)433 static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
434 bool is_default,
435 bool handle_bufioreq)
436 {
437 struct domain *d = s->domain;
438 unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
439 unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
440 int rc;
441
442 if ( is_default )
443 {
444 /*
445 * The default ioreq server must handle buffered ioreqs, for
446 * backwards compatibility.
447 */
448 ASSERT(handle_bufioreq);
449 return hvm_ioreq_server_map_pages(s,
450 d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN],
451 d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
452 }
453
454 rc = hvm_alloc_ioreq_gfn(d, &ioreq_gfn);
455
456 if ( !rc && handle_bufioreq )
457 rc = hvm_alloc_ioreq_gfn(d, &bufioreq_gfn);
458
459 if ( !rc )
460 rc = hvm_ioreq_server_map_pages(s, ioreq_gfn, bufioreq_gfn);
461
462 if ( rc )
463 {
464 hvm_free_ioreq_gfn(d, ioreq_gfn);
465 hvm_free_ioreq_gfn(d, bufioreq_gfn);
466 }
467
468 return rc;
469 }
470
hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server * s,bool is_default)471 static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
472 bool is_default)
473 {
474 struct domain *d = s->domain;
475 bool handle_bufioreq = !!s->bufioreq.va;
476
477 if ( handle_bufioreq )
478 hvm_unmap_ioreq_page(s, true);
479
480 hvm_unmap_ioreq_page(s, false);
481
482 if ( !is_default )
483 {
484 if ( handle_bufioreq )
485 hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
486
487 hvm_free_ioreq_gfn(d, s->ioreq.gfn);
488 }
489 }
490
hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server * s,bool is_default)491 static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
492 bool is_default)
493 {
494 unsigned int i;
495
496 if ( is_default )
497 return;
498
499 for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
500 rangeset_destroy(s->range[i]);
501 }
502
hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server * s,bool is_default)503 static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
504 bool is_default)
505 {
506 unsigned int i;
507 int rc;
508
509 if ( is_default )
510 goto done;
511
512 for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
513 {
514 char *name;
515
516 rc = asprintf(&name, "ioreq_server %d %s", s->id,
517 (i == XEN_DMOP_IO_RANGE_PORT) ? "port" :
518 (i == XEN_DMOP_IO_RANGE_MEMORY) ? "memory" :
519 (i == XEN_DMOP_IO_RANGE_PCI) ? "pci" :
520 "");
521 if ( rc )
522 goto fail;
523
524 s->range[i] = rangeset_new(s->domain, name,
525 RANGESETF_prettyprint_hex);
526
527 xfree(name);
528
529 rc = -ENOMEM;
530 if ( !s->range[i] )
531 goto fail;
532
533 rangeset_limit(s->range[i], MAX_NR_IO_RANGES);
534 }
535
536 done:
537 return 0;
538
539 fail:
540 hvm_ioreq_server_free_rangesets(s, false);
541
542 return rc;
543 }
544
hvm_ioreq_server_enable(struct hvm_ioreq_server * s,bool is_default)545 static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
546 bool is_default)
547 {
548 struct domain *d = s->domain;
549 struct hvm_ioreq_vcpu *sv;
550 bool handle_bufioreq = !!s->bufioreq.va;
551
552 spin_lock(&s->lock);
553
554 if ( s->enabled )
555 goto done;
556
557 if ( !is_default )
558 {
559 hvm_remove_ioreq_gfn(d, &s->ioreq);
560
561 if ( handle_bufioreq )
562 hvm_remove_ioreq_gfn(d, &s->bufioreq);
563 }
564
565 s->enabled = true;
566
567 list_for_each_entry ( sv,
568 &s->ioreq_vcpu_list,
569 list_entry )
570 hvm_update_ioreq_evtchn(s, sv);
571
572 done:
573 spin_unlock(&s->lock);
574 }
575
hvm_ioreq_server_disable(struct hvm_ioreq_server * s,bool is_default)576 static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
577 bool is_default)
578 {
579 struct domain *d = s->domain;
580 bool handle_bufioreq = !!s->bufioreq.va;
581
582 spin_lock(&s->lock);
583
584 if ( !s->enabled )
585 goto done;
586
587 if ( !is_default )
588 {
589 if ( handle_bufioreq )
590 hvm_add_ioreq_gfn(d, &s->bufioreq);
591
592 hvm_add_ioreq_gfn(d, &s->ioreq);
593 }
594
595 s->enabled = false;
596
597 done:
598 spin_unlock(&s->lock);
599 }
600
hvm_ioreq_server_init(struct hvm_ioreq_server * s,struct domain * d,domid_t domid,bool is_default,int bufioreq_handling,ioservid_t id)601 static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
602 struct domain *d, domid_t domid,
603 bool is_default, int bufioreq_handling,
604 ioservid_t id)
605 {
606 struct vcpu *v;
607 int rc;
608
609 s->id = id;
610 s->domain = d;
611 s->domid = domid;
612
613 spin_lock_init(&s->lock);
614 INIT_LIST_HEAD(&s->ioreq_vcpu_list);
615 spin_lock_init(&s->bufioreq_lock);
616
617 rc = hvm_ioreq_server_alloc_rangesets(s, is_default);
618 if ( rc )
619 return rc;
620
621 if ( bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC )
622 s->bufioreq_atomic = true;
623
624 rc = hvm_ioreq_server_setup_pages(
625 s, is_default, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
626 if ( rc )
627 goto fail_map;
628
629 for_each_vcpu ( d, v )
630 {
631 rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
632 if ( rc )
633 goto fail_add;
634 }
635
636 return 0;
637
638 fail_add:
639 hvm_ioreq_server_remove_all_vcpus(s);
640 hvm_ioreq_server_unmap_pages(s, is_default);
641
642 fail_map:
643 hvm_ioreq_server_free_rangesets(s, is_default);
644
645 return rc;
646 }
647
hvm_ioreq_server_deinit(struct hvm_ioreq_server * s,bool is_default)648 static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s,
649 bool is_default)
650 {
651 ASSERT(!s->enabled);
652 hvm_ioreq_server_remove_all_vcpus(s);
653 hvm_ioreq_server_unmap_pages(s, is_default);
654 hvm_ioreq_server_free_rangesets(s, is_default);
655 }
656
next_ioservid(struct domain * d)657 static ioservid_t next_ioservid(struct domain *d)
658 {
659 struct hvm_ioreq_server *s;
660 ioservid_t id;
661
662 ASSERT(spin_is_locked(&d->arch.hvm_domain.ioreq_server.lock));
663
664 id = d->arch.hvm_domain.ioreq_server.id;
665
666 again:
667 id++;
668
669 /* Check for uniqueness */
670 list_for_each_entry ( s,
671 &d->arch.hvm_domain.ioreq_server.list,
672 list_entry )
673 {
674 if ( id == s->id )
675 goto again;
676 }
677
678 d->arch.hvm_domain.ioreq_server.id = id;
679
680 return id;
681 }
682
hvm_create_ioreq_server(struct domain * d,domid_t domid,bool is_default,int bufioreq_handling,ioservid_t * id)683 int hvm_create_ioreq_server(struct domain *d, domid_t domid,
684 bool is_default, int bufioreq_handling,
685 ioservid_t *id)
686 {
687 struct hvm_ioreq_server *s;
688 int rc;
689
690 if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
691 return -EINVAL;
692
693 rc = -ENOMEM;
694 s = xzalloc(struct hvm_ioreq_server);
695 if ( !s )
696 goto fail1;
697
698 domain_pause(d);
699 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
700
701 rc = -EEXIST;
702 if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
703 goto fail2;
704
705 rc = hvm_ioreq_server_init(s, d, domid, is_default, bufioreq_handling,
706 next_ioservid(d));
707 if ( rc )
708 goto fail3;
709
710 list_add(&s->list_entry,
711 &d->arch.hvm_domain.ioreq_server.list);
712
713 if ( is_default )
714 {
715 d->arch.hvm_domain.default_ioreq_server = s;
716 hvm_ioreq_server_enable(s, true);
717 }
718
719 if ( id )
720 *id = s->id;
721
722 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
723 domain_unpause(d);
724
725 return 0;
726
727 fail3:
728 fail2:
729 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
730 domain_unpause(d);
731
732 xfree(s);
733 fail1:
734 return rc;
735 }
736
hvm_destroy_ioreq_server(struct domain * d,ioservid_t id)737 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
738 {
739 struct hvm_ioreq_server *s;
740 int rc;
741
742 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
743
744 rc = -ENOENT;
745 list_for_each_entry ( s,
746 &d->arch.hvm_domain.ioreq_server.list,
747 list_entry )
748 {
749 if ( s == d->arch.hvm_domain.default_ioreq_server )
750 continue;
751
752 if ( s->id != id )
753 continue;
754
755 domain_pause(d);
756
757 p2m_set_ioreq_server(d, 0, s);
758
759 hvm_ioreq_server_disable(s, false);
760
761 list_del(&s->list_entry);
762
763 hvm_ioreq_server_deinit(s, false);
764
765 domain_unpause(d);
766
767 xfree(s);
768
769 rc = 0;
770 break;
771 }
772
773 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
774
775 return rc;
776 }
777
hvm_get_ioreq_server_info(struct domain * d,ioservid_t id,unsigned long * ioreq_gfn,unsigned long * bufioreq_gfn,evtchn_port_t * bufioreq_port)778 int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
779 unsigned long *ioreq_gfn,
780 unsigned long *bufioreq_gfn,
781 evtchn_port_t *bufioreq_port)
782 {
783 struct hvm_ioreq_server *s;
784 int rc;
785
786 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
787
788 rc = -ENOENT;
789 list_for_each_entry ( s,
790 &d->arch.hvm_domain.ioreq_server.list,
791 list_entry )
792 {
793 if ( s == d->arch.hvm_domain.default_ioreq_server )
794 continue;
795
796 if ( s->id != id )
797 continue;
798
799 *ioreq_gfn = s->ioreq.gfn;
800
801 if ( s->bufioreq.va != NULL )
802 {
803 *bufioreq_gfn = s->bufioreq.gfn;
804 *bufioreq_port = s->bufioreq_evtchn;
805 }
806
807 rc = 0;
808 break;
809 }
810
811 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
812
813 return rc;
814 }
815
hvm_map_io_range_to_ioreq_server(struct domain * d,ioservid_t id,uint32_t type,uint64_t start,uint64_t end)816 int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
817 uint32_t type, uint64_t start,
818 uint64_t end)
819 {
820 struct hvm_ioreq_server *s;
821 int rc;
822
823 if ( start > end )
824 return -EINVAL;
825
826 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
827
828 rc = -ENOENT;
829 list_for_each_entry ( s,
830 &d->arch.hvm_domain.ioreq_server.list,
831 list_entry )
832 {
833 if ( s == d->arch.hvm_domain.default_ioreq_server )
834 continue;
835
836 if ( s->id == id )
837 {
838 struct rangeset *r;
839
840 switch ( type )
841 {
842 case XEN_DMOP_IO_RANGE_PORT:
843 case XEN_DMOP_IO_RANGE_MEMORY:
844 case XEN_DMOP_IO_RANGE_PCI:
845 r = s->range[type];
846 break;
847
848 default:
849 r = NULL;
850 break;
851 }
852
853 rc = -EINVAL;
854 if ( !r )
855 break;
856
857 rc = -EEXIST;
858 if ( rangeset_overlaps_range(r, start, end) )
859 break;
860
861 rc = rangeset_add_range(r, start, end);
862 break;
863 }
864 }
865
866 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
867
868 return rc;
869 }
870
hvm_unmap_io_range_from_ioreq_server(struct domain * d,ioservid_t id,uint32_t type,uint64_t start,uint64_t end)871 int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
872 uint32_t type, uint64_t start,
873 uint64_t end)
874 {
875 struct hvm_ioreq_server *s;
876 int rc;
877
878 if ( start > end )
879 return -EINVAL;
880
881 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
882
883 rc = -ENOENT;
884 list_for_each_entry ( s,
885 &d->arch.hvm_domain.ioreq_server.list,
886 list_entry )
887 {
888 if ( s == d->arch.hvm_domain.default_ioreq_server )
889 continue;
890
891 if ( s->id == id )
892 {
893 struct rangeset *r;
894
895 switch ( type )
896 {
897 case XEN_DMOP_IO_RANGE_PORT:
898 case XEN_DMOP_IO_RANGE_MEMORY:
899 case XEN_DMOP_IO_RANGE_PCI:
900 r = s->range[type];
901 break;
902
903 default:
904 r = NULL;
905 break;
906 }
907
908 rc = -EINVAL;
909 if ( !r )
910 break;
911
912 rc = -ENOENT;
913 if ( !rangeset_contains_range(r, start, end) )
914 break;
915
916 rc = rangeset_remove_range(r, start, end);
917 break;
918 }
919 }
920
921 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
922
923 return rc;
924 }
925
926 /*
927 * Map or unmap an ioreq server to specific memory type. For now, only
928 * HVMMEM_ioreq_server is supported, and in the future new types can be
929 * introduced, e.g. HVMMEM_ioreq_serverX mapped to ioreq server X. And
930 * currently, only write operations are to be forwarded to an ioreq server.
931 * Support for the emulation of read operations can be added when an ioreq
932 * server has such requirement in the future.
933 */
hvm_map_mem_type_to_ioreq_server(struct domain * d,ioservid_t id,uint32_t type,uint32_t flags)934 int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
935 uint32_t type, uint32_t flags)
936 {
937 struct hvm_ioreq_server *s;
938 int rc;
939
940 if ( type != HVMMEM_ioreq_server )
941 return -EINVAL;
942
943 if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
944 return -EINVAL;
945
946 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
947
948 rc = -ENOENT;
949 list_for_each_entry ( s,
950 &d->arch.hvm_domain.ioreq_server.list,
951 list_entry )
952 {
953 if ( s == d->arch.hvm_domain.default_ioreq_server )
954 continue;
955
956 if ( s->id == id )
957 {
958 rc = p2m_set_ioreq_server(d, flags, s);
959 break;
960 }
961 }
962
963 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
964
965 if ( rc == 0 && flags == 0 )
966 {
967 struct p2m_domain *p2m = p2m_get_hostp2m(d);
968
969 if ( read_atomic(&p2m->ioreq.entry_count) )
970 p2m_change_entry_type_global(d, p2m_ioreq_server, p2m_ram_rw);
971 }
972
973 return rc;
974 }
975
hvm_set_ioreq_server_state(struct domain * d,ioservid_t id,bool enabled)976 int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
977 bool enabled)
978 {
979 struct list_head *entry;
980 int rc;
981
982 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
983
984 rc = -ENOENT;
985 list_for_each ( entry,
986 &d->arch.hvm_domain.ioreq_server.list )
987 {
988 struct hvm_ioreq_server *s = list_entry(entry,
989 struct hvm_ioreq_server,
990 list_entry);
991
992 if ( s == d->arch.hvm_domain.default_ioreq_server )
993 continue;
994
995 if ( s->id != id )
996 continue;
997
998 domain_pause(d);
999
1000 if ( enabled )
1001 hvm_ioreq_server_enable(s, false);
1002 else
1003 hvm_ioreq_server_disable(s, false);
1004
1005 domain_unpause(d);
1006
1007 rc = 0;
1008 break;
1009 }
1010
1011 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
1012 return rc;
1013 }
1014
hvm_all_ioreq_servers_add_vcpu(struct domain * d,struct vcpu * v)1015 int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
1016 {
1017 struct hvm_ioreq_server *s;
1018 int rc;
1019
1020 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
1021
1022 list_for_each_entry ( s,
1023 &d->arch.hvm_domain.ioreq_server.list,
1024 list_entry )
1025 {
1026 bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
1027
1028 rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
1029 if ( rc )
1030 goto fail;
1031 }
1032
1033 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
1034
1035 return 0;
1036
1037 fail:
1038 list_for_each_entry ( s,
1039 &d->arch.hvm_domain.ioreq_server.list,
1040 list_entry )
1041 hvm_ioreq_server_remove_vcpu(s, v);
1042
1043 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
1044
1045 return rc;
1046 }
1047
hvm_all_ioreq_servers_remove_vcpu(struct domain * d,struct vcpu * v)1048 void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
1049 {
1050 struct hvm_ioreq_server *s;
1051
1052 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
1053
1054 list_for_each_entry ( s,
1055 &d->arch.hvm_domain.ioreq_server.list,
1056 list_entry )
1057 hvm_ioreq_server_remove_vcpu(s, v);
1058
1059 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
1060 }
1061
hvm_destroy_all_ioreq_servers(struct domain * d)1062 void hvm_destroy_all_ioreq_servers(struct domain *d)
1063 {
1064 struct hvm_ioreq_server *s, *next;
1065
1066 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
1067
1068 /* No need to domain_pause() as the domain is being torn down */
1069
1070 list_for_each_entry_safe ( s,
1071 next,
1072 &d->arch.hvm_domain.ioreq_server.list,
1073 list_entry )
1074 {
1075 bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
1076
1077 hvm_ioreq_server_disable(s, is_default);
1078
1079 if ( is_default )
1080 d->arch.hvm_domain.default_ioreq_server = NULL;
1081
1082 list_del(&s->list_entry);
1083
1084 hvm_ioreq_server_deinit(s, is_default);
1085
1086 xfree(s);
1087 }
1088
1089 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
1090 }
1091
hvm_replace_event_channel(struct vcpu * v,domid_t remote_domid,evtchn_port_t * p_port)1092 static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
1093 evtchn_port_t *p_port)
1094 {
1095 int old_port, new_port;
1096
1097 new_port = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id,
1098 remote_domid, NULL);
1099 if ( new_port < 0 )
1100 return new_port;
1101
1102 /* xchg() ensures that only we call free_xen_event_channel(). */
1103 old_port = xchg(p_port, new_port);
1104 free_xen_event_channel(v->domain, old_port);
1105 return 0;
1106 }
1107
hvm_set_dm_domain(struct domain * d,domid_t domid)1108 int hvm_set_dm_domain(struct domain *d, domid_t domid)
1109 {
1110 struct hvm_ioreq_server *s;
1111 int rc = 0;
1112
1113 spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
1114
1115 /*
1116 * Lack of ioreq server is not a failure. HVM_PARAM_DM_DOMAIN will
1117 * still be set and thus, when the server is created, it will have
1118 * the correct domid.
1119 */
1120 s = d->arch.hvm_domain.default_ioreq_server;
1121 if ( !s )
1122 goto done;
1123
1124 domain_pause(d);
1125 spin_lock(&s->lock);
1126
1127 if ( s->domid != domid )
1128 {
1129 struct hvm_ioreq_vcpu *sv;
1130
1131 list_for_each_entry ( sv,
1132 &s->ioreq_vcpu_list,
1133 list_entry )
1134 {
1135 struct vcpu *v = sv->vcpu;
1136
1137 if ( v->vcpu_id == 0 )
1138 {
1139 rc = hvm_replace_event_channel(v, domid,
1140 &s->bufioreq_evtchn);
1141 if ( rc )
1142 break;
1143
1144 d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
1145 s->bufioreq_evtchn;
1146 }
1147
1148 rc = hvm_replace_event_channel(v, domid, &sv->ioreq_evtchn);
1149 if ( rc )
1150 break;
1151
1152 hvm_update_ioreq_evtchn(s, sv);
1153 }
1154
1155 s->domid = domid;
1156 }
1157
1158 spin_unlock(&s->lock);
1159 domain_unpause(d);
1160
1161 done:
1162 spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
1163 return rc;
1164 }
1165
hvm_select_ioreq_server(struct domain * d,ioreq_t * p)1166 struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
1167 ioreq_t *p)
1168 {
1169 struct hvm_ioreq_server *s;
1170 uint32_t cf8;
1171 uint8_t type;
1172 uint64_t addr;
1173
1174 if ( list_empty(&d->arch.hvm_domain.ioreq_server.list) )
1175 return NULL;
1176
1177 if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
1178 return d->arch.hvm_domain.default_ioreq_server;
1179
1180 cf8 = d->arch.hvm_domain.pci_cf8;
1181
1182 if ( p->type == IOREQ_TYPE_PIO &&
1183 (p->addr & ~3) == 0xcfc &&
1184 CF8_ENABLED(cf8) )
1185 {
1186 uint32_t sbdf, x86_fam;
1187 unsigned int bus, slot, func, reg;
1188
1189 reg = hvm_pci_decode_addr(cf8, p->addr, &bus, &slot, &func);
1190
1191 /* PCI config data cycle */
1192
1193 sbdf = XEN_DMOP_PCI_SBDF(0, bus, slot, func);
1194
1195 type = XEN_DMOP_IO_RANGE_PCI;
1196 addr = ((uint64_t)sbdf << 32) | reg;
1197 /* AMD extended configuration space access? */
1198 if ( CF8_ADDR_HI(cf8) &&
1199 d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
1200 (x86_fam = get_cpu_family(
1201 d->arch.cpuid->basic.raw_fms, NULL, NULL)) > 0x10 &&
1202 x86_fam <= 0x17 )
1203 {
1204 uint64_t msr_val;
1205
1206 if ( !rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) &&
1207 (msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) )
1208 addr |= CF8_ADDR_HI(cf8);
1209 }
1210 }
1211 else
1212 {
1213 type = (p->type == IOREQ_TYPE_PIO) ?
1214 XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
1215 addr = p->addr;
1216 }
1217
1218 list_for_each_entry ( s,
1219 &d->arch.hvm_domain.ioreq_server.list,
1220 list_entry )
1221 {
1222 struct rangeset *r;
1223
1224 if ( s == d->arch.hvm_domain.default_ioreq_server )
1225 continue;
1226
1227 if ( !s->enabled )
1228 continue;
1229
1230 r = s->range[type];
1231
1232 switch ( type )
1233 {
1234 unsigned long end;
1235
1236 case XEN_DMOP_IO_RANGE_PORT:
1237 end = addr + p->size - 1;
1238 if ( rangeset_contains_range(r, addr, end) )
1239 return s;
1240
1241 break;
1242 case XEN_DMOP_IO_RANGE_MEMORY:
1243 end = addr + (p->size * p->count) - 1;
1244 if ( rangeset_contains_range(r, addr, end) )
1245 return s;
1246
1247 break;
1248 case XEN_DMOP_IO_RANGE_PCI:
1249 if ( rangeset_contains_singleton(r, addr >> 32) )
1250 {
1251 p->type = IOREQ_TYPE_PCI_CONFIG;
1252 p->addr = addr;
1253 return s;
1254 }
1255
1256 break;
1257 }
1258 }
1259
1260 return d->arch.hvm_domain.default_ioreq_server;
1261 }
1262
hvm_send_buffered_ioreq(struct hvm_ioreq_server * s,ioreq_t * p)1263 static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
1264 {
1265 struct domain *d = current->domain;
1266 struct hvm_ioreq_page *iorp;
1267 buffered_iopage_t *pg;
1268 buf_ioreq_t bp = { .data = p->data,
1269 .addr = p->addr,
1270 .type = p->type,
1271 .dir = p->dir };
1272 /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */
1273 int qw = 0;
1274
1275 /* Ensure buffered_iopage fits in a page */
1276 BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
1277
1278 iorp = &s->bufioreq;
1279 pg = iorp->va;
1280
1281 if ( !pg )
1282 return X86EMUL_UNHANDLEABLE;
1283
1284 /*
1285 * Return 0 for the cases we can't deal with:
1286 * - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
1287 * - we cannot buffer accesses to guest memory buffers, as the guest
1288 * may expect the memory buffer to be synchronously accessed
1289 * - the count field is usually used with data_is_ptr and since we don't
1290 * support data_is_ptr we do not waste space for the count field either
1291 */
1292 if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) )
1293 return 0;
1294
1295 switch ( p->size )
1296 {
1297 case 1:
1298 bp.size = 0;
1299 break;
1300 case 2:
1301 bp.size = 1;
1302 break;
1303 case 4:
1304 bp.size = 2;
1305 break;
1306 case 8:
1307 bp.size = 3;
1308 qw = 1;
1309 break;
1310 default:
1311 gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
1312 return X86EMUL_UNHANDLEABLE;
1313 }
1314
1315 spin_lock(&s->bufioreq_lock);
1316
1317 if ( (pg->ptrs.write_pointer - pg->ptrs.read_pointer) >=
1318 (IOREQ_BUFFER_SLOT_NUM - qw) )
1319 {
1320 /* The queue is full: send the iopacket through the normal path. */
1321 spin_unlock(&s->bufioreq_lock);
1322 return X86EMUL_UNHANDLEABLE;
1323 }
1324
1325 pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
1326
1327 if ( qw )
1328 {
1329 bp.data = p->data >> 32;
1330 pg->buf_ioreq[(pg->ptrs.write_pointer+1) % IOREQ_BUFFER_SLOT_NUM] = bp;
1331 }
1332
1333 /* Make the ioreq_t visible /before/ write_pointer. */
1334 wmb();
1335 pg->ptrs.write_pointer += qw ? 2 : 1;
1336
1337 /* Canonicalize read/write pointers to prevent their overflow. */
1338 while ( s->bufioreq_atomic && qw++ < IOREQ_BUFFER_SLOT_NUM &&
1339 pg->ptrs.read_pointer >= IOREQ_BUFFER_SLOT_NUM )
1340 {
1341 union bufioreq_pointers old = pg->ptrs, new;
1342 unsigned int n = old.read_pointer / IOREQ_BUFFER_SLOT_NUM;
1343
1344 new.read_pointer = old.read_pointer - n * IOREQ_BUFFER_SLOT_NUM;
1345 new.write_pointer = old.write_pointer - n * IOREQ_BUFFER_SLOT_NUM;
1346 cmpxchg(&pg->ptrs.full, old.full, new.full);
1347 }
1348
1349 notify_via_xen_event_channel(d, s->bufioreq_evtchn);
1350 spin_unlock(&s->bufioreq_lock);
1351
1352 return X86EMUL_OKAY;
1353 }
1354
hvm_send_ioreq(struct hvm_ioreq_server * s,ioreq_t * proto_p,bool buffered)1355 int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
1356 bool buffered)
1357 {
1358 struct vcpu *curr = current;
1359 struct domain *d = curr->domain;
1360 struct hvm_ioreq_vcpu *sv;
1361
1362 ASSERT(s);
1363
1364 if ( buffered )
1365 return hvm_send_buffered_ioreq(s, proto_p);
1366
1367 if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
1368 return X86EMUL_RETRY;
1369
1370 list_for_each_entry ( sv,
1371 &s->ioreq_vcpu_list,
1372 list_entry )
1373 {
1374 if ( sv->vcpu == curr )
1375 {
1376 evtchn_port_t port = sv->ioreq_evtchn;
1377 ioreq_t *p = get_ioreq(s, curr);
1378
1379 if ( unlikely(p->state != STATE_IOREQ_NONE) )
1380 {
1381 gprintk(XENLOG_ERR, "device model set bad IO state %d\n",
1382 p->state);
1383 break;
1384 }
1385
1386 if ( unlikely(p->vp_eport != port) )
1387 {
1388 gprintk(XENLOG_ERR, "device model set bad event channel %d\n",
1389 p->vp_eport);
1390 break;
1391 }
1392
1393 proto_p->state = STATE_IOREQ_NONE;
1394 proto_p->vp_eport = port;
1395 *p = *proto_p;
1396
1397 prepare_wait_on_xen_event_channel(port);
1398
1399 /*
1400 * Following happens /after/ blocking and setting up ioreq
1401 * contents. prepare_wait_on_xen_event_channel() is an implicit
1402 * barrier.
1403 */
1404 p->state = STATE_IOREQ_READY;
1405 notify_via_xen_event_channel(d, port);
1406
1407 sv->pending = true;
1408 return X86EMUL_RETRY;
1409 }
1410 }
1411
1412 return X86EMUL_UNHANDLEABLE;
1413 }
1414
hvm_broadcast_ioreq(ioreq_t * p,bool buffered)1415 unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
1416 {
1417 struct domain *d = current->domain;
1418 struct hvm_ioreq_server *s;
1419 unsigned int failed = 0;
1420
1421 list_for_each_entry ( s,
1422 &d->arch.hvm_domain.ioreq_server.list,
1423 list_entry )
1424 if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
1425 failed++;
1426
1427 return failed;
1428 }
1429
hvm_access_cf8(int dir,unsigned int port,unsigned int bytes,uint32_t * val)1430 static int hvm_access_cf8(
1431 int dir, unsigned int port, unsigned int bytes, uint32_t *val)
1432 {
1433 struct domain *d = current->domain;
1434
1435 if ( dir == IOREQ_WRITE && bytes == 4 )
1436 d->arch.hvm_domain.pci_cf8 = *val;
1437
1438 /* We always need to fall through to the catch all emulator */
1439 return X86EMUL_UNHANDLEABLE;
1440 }
1441
hvm_ioreq_init(struct domain * d)1442 void hvm_ioreq_init(struct domain *d)
1443 {
1444 spin_lock_init(&d->arch.hvm_domain.ioreq_server.lock);
1445 INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
1446
1447 register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
1448 }
1449
1450 /*
1451 * Local variables:
1452 * mode: C
1453 * c-file-style: "BSD"
1454 * c-basic-offset: 4
1455 * tab-width: 4
1456 * indent-tabs-mode: nil
1457 * End:
1458 */
1459