Lines Matching refs:s

36 static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)  in get_ioreq()  argument
38 shared_iopage_t *p = s->ioreq.va; in get_ioreq()
49 struct hvm_ioreq_server *s; in hvm_io_pending() local
51 list_for_each_entry ( s, in hvm_io_pending()
58 &s->ioreq_vcpu_list, in hvm_io_pending()
128 struct hvm_ioreq_server *s; in handle_hvm_io_completion() local
131 list_for_each_entry ( s, in handle_hvm_io_completion()
138 &s->ioreq_vcpu_list, in handle_hvm_io_completion()
143 if ( !hvm_wait_for_io(sv, get_ioreq(s, v)) ) in handle_hvm_io_completion()
211 static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool buf) in hvm_unmap_ioreq_page() argument
213 struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq; in hvm_unmap_ioreq_page()
219 struct hvm_ioreq_server *s, bool buf, unsigned long gfn) in hvm_map_ioreq_page() argument
221 struct domain *d = s->domain; in hvm_map_ioreq_page()
222 struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq; in hvm_map_ioreq_page()
245 const struct hvm_ioreq_server *s; in is_ioreq_server_page() local
250 list_for_each_entry ( s, in is_ioreq_server_page()
254 if ( (s->ioreq.va && s->ioreq.page == page) || in is_ioreq_server_page()
255 (s->bufioreq.va && s->bufioreq.page == page) ) in is_ioreq_server_page()
291 static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s, in hvm_update_ioreq_evtchn() argument
294 ASSERT(spin_is_locked(&s->lock)); in hvm_update_ioreq_evtchn()
296 if ( s->ioreq.va != NULL ) in hvm_update_ioreq_evtchn()
298 ioreq_t *p = get_ioreq(s, sv->vcpu); in hvm_update_ioreq_evtchn()
304 static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, in hvm_ioreq_server_add_vcpu() argument
316 spin_lock(&s->lock); in hvm_ioreq_server_add_vcpu()
318 rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id, s->domid, in hvm_ioreq_server_add_vcpu()
325 if ( v->vcpu_id == 0 && s->bufioreq.va != NULL ) in hvm_ioreq_server_add_vcpu()
327 struct domain *d = s->domain; in hvm_ioreq_server_add_vcpu()
329 rc = alloc_unbound_xen_event_channel(v->domain, 0, s->domid, NULL); in hvm_ioreq_server_add_vcpu()
333 s->bufioreq_evtchn = rc; in hvm_ioreq_server_add_vcpu()
336 s->bufioreq_evtchn; in hvm_ioreq_server_add_vcpu()
341 list_add(&sv->list_entry, &s->ioreq_vcpu_list); in hvm_ioreq_server_add_vcpu()
343 if ( s->enabled ) in hvm_ioreq_server_add_vcpu()
344 hvm_update_ioreq_evtchn(s, sv); in hvm_ioreq_server_add_vcpu()
346 spin_unlock(&s->lock); in hvm_ioreq_server_add_vcpu()
353 spin_unlock(&s->lock); in hvm_ioreq_server_add_vcpu()
360 static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s, in hvm_ioreq_server_remove_vcpu() argument
365 spin_lock(&s->lock); in hvm_ioreq_server_remove_vcpu()
368 &s->ioreq_vcpu_list, in hvm_ioreq_server_remove_vcpu()
376 if ( v->vcpu_id == 0 && s->bufioreq.va != NULL ) in hvm_ioreq_server_remove_vcpu()
377 free_xen_event_channel(v->domain, s->bufioreq_evtchn); in hvm_ioreq_server_remove_vcpu()
385 spin_unlock(&s->lock); in hvm_ioreq_server_remove_vcpu()
388 static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s) in hvm_ioreq_server_remove_all_vcpus() argument
392 spin_lock(&s->lock); in hvm_ioreq_server_remove_all_vcpus()
396 &s->ioreq_vcpu_list, in hvm_ioreq_server_remove_all_vcpus()
403 if ( v->vcpu_id == 0 && s->bufioreq.va != NULL ) in hvm_ioreq_server_remove_all_vcpus()
404 free_xen_event_channel(v->domain, s->bufioreq_evtchn); in hvm_ioreq_server_remove_all_vcpus()
411 spin_unlock(&s->lock); in hvm_ioreq_server_remove_all_vcpus()
414 static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s, in hvm_ioreq_server_map_pages() argument
420 rc = hvm_map_ioreq_page(s, false, ioreq_gfn); in hvm_ioreq_server_map_pages()
425 rc = hvm_map_ioreq_page(s, true, bufioreq_gfn); in hvm_ioreq_server_map_pages()
428 hvm_unmap_ioreq_page(s, false); in hvm_ioreq_server_map_pages()
433 static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s, in hvm_ioreq_server_setup_pages() argument
437 struct domain *d = s->domain; in hvm_ioreq_server_setup_pages()
449 return hvm_ioreq_server_map_pages(s, in hvm_ioreq_server_setup_pages()
460 rc = hvm_ioreq_server_map_pages(s, ioreq_gfn, bufioreq_gfn); in hvm_ioreq_server_setup_pages()
471 static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s, in hvm_ioreq_server_unmap_pages() argument
474 struct domain *d = s->domain; in hvm_ioreq_server_unmap_pages()
475 bool handle_bufioreq = !!s->bufioreq.va; in hvm_ioreq_server_unmap_pages()
478 hvm_unmap_ioreq_page(s, true); in hvm_ioreq_server_unmap_pages()
480 hvm_unmap_ioreq_page(s, false); in hvm_ioreq_server_unmap_pages()
485 hvm_free_ioreq_gfn(d, s->bufioreq.gfn); in hvm_ioreq_server_unmap_pages()
487 hvm_free_ioreq_gfn(d, s->ioreq.gfn); in hvm_ioreq_server_unmap_pages()
491 static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s, in hvm_ioreq_server_free_rangesets() argument
500 rangeset_destroy(s->range[i]); in hvm_ioreq_server_free_rangesets()
503 static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s, in hvm_ioreq_server_alloc_rangesets() argument
516 rc = asprintf(&name, "ioreq_server %d %s", s->id, in hvm_ioreq_server_alloc_rangesets()
524 s->range[i] = rangeset_new(s->domain, name, in hvm_ioreq_server_alloc_rangesets()
530 if ( !s->range[i] ) in hvm_ioreq_server_alloc_rangesets()
533 rangeset_limit(s->range[i], MAX_NR_IO_RANGES); in hvm_ioreq_server_alloc_rangesets()
540 hvm_ioreq_server_free_rangesets(s, false); in hvm_ioreq_server_alloc_rangesets()
545 static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s, in hvm_ioreq_server_enable() argument
548 struct domain *d = s->domain; in hvm_ioreq_server_enable()
550 bool handle_bufioreq = !!s->bufioreq.va; in hvm_ioreq_server_enable()
552 spin_lock(&s->lock); in hvm_ioreq_server_enable()
554 if ( s->enabled ) in hvm_ioreq_server_enable()
559 hvm_remove_ioreq_gfn(d, &s->ioreq); in hvm_ioreq_server_enable()
562 hvm_remove_ioreq_gfn(d, &s->bufioreq); in hvm_ioreq_server_enable()
565 s->enabled = true; in hvm_ioreq_server_enable()
568 &s->ioreq_vcpu_list, in hvm_ioreq_server_enable()
570 hvm_update_ioreq_evtchn(s, sv); in hvm_ioreq_server_enable()
573 spin_unlock(&s->lock); in hvm_ioreq_server_enable()
576 static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s, in hvm_ioreq_server_disable() argument
579 struct domain *d = s->domain; in hvm_ioreq_server_disable()
580 bool handle_bufioreq = !!s->bufioreq.va; in hvm_ioreq_server_disable()
582 spin_lock(&s->lock); in hvm_ioreq_server_disable()
584 if ( !s->enabled ) in hvm_ioreq_server_disable()
590 hvm_add_ioreq_gfn(d, &s->bufioreq); in hvm_ioreq_server_disable()
592 hvm_add_ioreq_gfn(d, &s->ioreq); in hvm_ioreq_server_disable()
595 s->enabled = false; in hvm_ioreq_server_disable()
598 spin_unlock(&s->lock); in hvm_ioreq_server_disable()
601 static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, in hvm_ioreq_server_init() argument
609 s->id = id; in hvm_ioreq_server_init()
610 s->domain = d; in hvm_ioreq_server_init()
611 s->domid = domid; in hvm_ioreq_server_init()
613 spin_lock_init(&s->lock); in hvm_ioreq_server_init()
614 INIT_LIST_HEAD(&s->ioreq_vcpu_list); in hvm_ioreq_server_init()
615 spin_lock_init(&s->bufioreq_lock); in hvm_ioreq_server_init()
617 rc = hvm_ioreq_server_alloc_rangesets(s, is_default); in hvm_ioreq_server_init()
622 s->bufioreq_atomic = true; in hvm_ioreq_server_init()
625 s, is_default, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF); in hvm_ioreq_server_init()
631 rc = hvm_ioreq_server_add_vcpu(s, is_default, v); in hvm_ioreq_server_init()
639 hvm_ioreq_server_remove_all_vcpus(s); in hvm_ioreq_server_init()
640 hvm_ioreq_server_unmap_pages(s, is_default); in hvm_ioreq_server_init()
643 hvm_ioreq_server_free_rangesets(s, is_default); in hvm_ioreq_server_init()
648 static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s, in hvm_ioreq_server_deinit() argument
651 ASSERT(!s->enabled); in hvm_ioreq_server_deinit()
652 hvm_ioreq_server_remove_all_vcpus(s); in hvm_ioreq_server_deinit()
653 hvm_ioreq_server_unmap_pages(s, is_default); in hvm_ioreq_server_deinit()
654 hvm_ioreq_server_free_rangesets(s, is_default); in hvm_ioreq_server_deinit()
659 struct hvm_ioreq_server *s; in next_ioservid() local
670 list_for_each_entry ( s, in next_ioservid()
674 if ( id == s->id ) in next_ioservid()
687 struct hvm_ioreq_server *s; in hvm_create_ioreq_server() local
694 s = xzalloc(struct hvm_ioreq_server); in hvm_create_ioreq_server()
695 if ( !s ) in hvm_create_ioreq_server()
705 rc = hvm_ioreq_server_init(s, d, domid, is_default, bufioreq_handling, in hvm_create_ioreq_server()
710 list_add(&s->list_entry, in hvm_create_ioreq_server()
715 d->arch.hvm_domain.default_ioreq_server = s; in hvm_create_ioreq_server()
716 hvm_ioreq_server_enable(s, true); in hvm_create_ioreq_server()
720 *id = s->id; in hvm_create_ioreq_server()
732 xfree(s); in hvm_create_ioreq_server()
739 struct hvm_ioreq_server *s; in hvm_destroy_ioreq_server() local
745 list_for_each_entry ( s, in hvm_destroy_ioreq_server()
749 if ( s == d->arch.hvm_domain.default_ioreq_server ) in hvm_destroy_ioreq_server()
752 if ( s->id != id ) in hvm_destroy_ioreq_server()
757 p2m_set_ioreq_server(d, 0, s); in hvm_destroy_ioreq_server()
759 hvm_ioreq_server_disable(s, false); in hvm_destroy_ioreq_server()
761 list_del(&s->list_entry); in hvm_destroy_ioreq_server()
763 hvm_ioreq_server_deinit(s, false); in hvm_destroy_ioreq_server()
767 xfree(s); in hvm_destroy_ioreq_server()
783 struct hvm_ioreq_server *s; in hvm_get_ioreq_server_info() local
789 list_for_each_entry ( s, in hvm_get_ioreq_server_info()
793 if ( s == d->arch.hvm_domain.default_ioreq_server ) in hvm_get_ioreq_server_info()
796 if ( s->id != id ) in hvm_get_ioreq_server_info()
799 *ioreq_gfn = s->ioreq.gfn; in hvm_get_ioreq_server_info()
801 if ( s->bufioreq.va != NULL ) in hvm_get_ioreq_server_info()
803 *bufioreq_gfn = s->bufioreq.gfn; in hvm_get_ioreq_server_info()
804 *bufioreq_port = s->bufioreq_evtchn; in hvm_get_ioreq_server_info()
820 struct hvm_ioreq_server *s; in hvm_map_io_range_to_ioreq_server() local
829 list_for_each_entry ( s, in hvm_map_io_range_to_ioreq_server()
833 if ( s == d->arch.hvm_domain.default_ioreq_server ) in hvm_map_io_range_to_ioreq_server()
836 if ( s->id == id ) in hvm_map_io_range_to_ioreq_server()
845 r = s->range[type]; in hvm_map_io_range_to_ioreq_server()
875 struct hvm_ioreq_server *s; in hvm_unmap_io_range_from_ioreq_server() local
884 list_for_each_entry ( s, in hvm_unmap_io_range_from_ioreq_server()
888 if ( s == d->arch.hvm_domain.default_ioreq_server ) in hvm_unmap_io_range_from_ioreq_server()
891 if ( s->id == id ) in hvm_unmap_io_range_from_ioreq_server()
900 r = s->range[type]; in hvm_unmap_io_range_from_ioreq_server()
937 struct hvm_ioreq_server *s; in hvm_map_mem_type_to_ioreq_server() local
949 list_for_each_entry ( s, in hvm_map_mem_type_to_ioreq_server()
953 if ( s == d->arch.hvm_domain.default_ioreq_server ) in hvm_map_mem_type_to_ioreq_server()
956 if ( s->id == id ) in hvm_map_mem_type_to_ioreq_server()
958 rc = p2m_set_ioreq_server(d, flags, s); in hvm_map_mem_type_to_ioreq_server()
988 struct hvm_ioreq_server *s = list_entry(entry, in hvm_set_ioreq_server_state() local
992 if ( s == d->arch.hvm_domain.default_ioreq_server ) in hvm_set_ioreq_server_state()
995 if ( s->id != id ) in hvm_set_ioreq_server_state()
1001 hvm_ioreq_server_enable(s, false); in hvm_set_ioreq_server_state()
1003 hvm_ioreq_server_disable(s, false); in hvm_set_ioreq_server_state()
1017 struct hvm_ioreq_server *s; in hvm_all_ioreq_servers_add_vcpu() local
1022 list_for_each_entry ( s, in hvm_all_ioreq_servers_add_vcpu()
1026 bool is_default = (s == d->arch.hvm_domain.default_ioreq_server); in hvm_all_ioreq_servers_add_vcpu()
1028 rc = hvm_ioreq_server_add_vcpu(s, is_default, v); in hvm_all_ioreq_servers_add_vcpu()
1038 list_for_each_entry ( s, in hvm_all_ioreq_servers_add_vcpu()
1041 hvm_ioreq_server_remove_vcpu(s, v); in hvm_all_ioreq_servers_add_vcpu()
1050 struct hvm_ioreq_server *s; in hvm_all_ioreq_servers_remove_vcpu() local
1054 list_for_each_entry ( s, in hvm_all_ioreq_servers_remove_vcpu()
1057 hvm_ioreq_server_remove_vcpu(s, v); in hvm_all_ioreq_servers_remove_vcpu()
1064 struct hvm_ioreq_server *s, *next; in hvm_destroy_all_ioreq_servers() local
1070 list_for_each_entry_safe ( s, in hvm_destroy_all_ioreq_servers()
1075 bool is_default = (s == d->arch.hvm_domain.default_ioreq_server); in hvm_destroy_all_ioreq_servers()
1077 hvm_ioreq_server_disable(s, is_default); in hvm_destroy_all_ioreq_servers()
1082 list_del(&s->list_entry); in hvm_destroy_all_ioreq_servers()
1084 hvm_ioreq_server_deinit(s, is_default); in hvm_destroy_all_ioreq_servers()
1086 xfree(s); in hvm_destroy_all_ioreq_servers()
1110 struct hvm_ioreq_server *s; in hvm_set_dm_domain() local
1120 s = d->arch.hvm_domain.default_ioreq_server; in hvm_set_dm_domain()
1121 if ( !s ) in hvm_set_dm_domain()
1125 spin_lock(&s->lock); in hvm_set_dm_domain()
1127 if ( s->domid != domid ) in hvm_set_dm_domain()
1132 &s->ioreq_vcpu_list, in hvm_set_dm_domain()
1140 &s->bufioreq_evtchn); in hvm_set_dm_domain()
1145 s->bufioreq_evtchn; in hvm_set_dm_domain()
1152 hvm_update_ioreq_evtchn(s, sv); in hvm_set_dm_domain()
1155 s->domid = domid; in hvm_set_dm_domain()
1158 spin_unlock(&s->lock); in hvm_set_dm_domain()
1169 struct hvm_ioreq_server *s; in hvm_select_ioreq_server() local
1218 list_for_each_entry ( s, in hvm_select_ioreq_server()
1224 if ( s == d->arch.hvm_domain.default_ioreq_server ) in hvm_select_ioreq_server()
1227 if ( !s->enabled ) in hvm_select_ioreq_server()
1230 r = s->range[type]; in hvm_select_ioreq_server()
1239 return s; in hvm_select_ioreq_server()
1245 return s; in hvm_select_ioreq_server()
1253 return s; in hvm_select_ioreq_server()
1263 static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p) in hvm_send_buffered_ioreq() argument
1278 iorp = &s->bufioreq; in hvm_send_buffered_ioreq()
1315 spin_lock(&s->bufioreq_lock); in hvm_send_buffered_ioreq()
1321 spin_unlock(&s->bufioreq_lock); in hvm_send_buffered_ioreq()
1338 while ( s->bufioreq_atomic && qw++ < IOREQ_BUFFER_SLOT_NUM && in hvm_send_buffered_ioreq()
1349 notify_via_xen_event_channel(d, s->bufioreq_evtchn); in hvm_send_buffered_ioreq()
1350 spin_unlock(&s->bufioreq_lock); in hvm_send_buffered_ioreq()
1355 int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p, in hvm_send_ioreq() argument
1362 ASSERT(s); in hvm_send_ioreq()
1365 return hvm_send_buffered_ioreq(s, proto_p); in hvm_send_ioreq()
1371 &s->ioreq_vcpu_list, in hvm_send_ioreq()
1377 ioreq_t *p = get_ioreq(s, curr); in hvm_send_ioreq()
1418 struct hvm_ioreq_server *s; in hvm_broadcast_ioreq() local
1421 list_for_each_entry ( s, in hvm_broadcast_ioreq()
1424 if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE ) in hvm_broadcast_ioreq()