/xen-4.10.0-shim-comet/xen/arch/x86/ |
A D | physdev.c | 196 if ( eoi.irq >= currd->nr_pirqs ) in do_physdev_op() 198 spin_lock(&currd->event_lock); in do_physdev_op() 204 if ( currd->arch.auto_unmask ) in do_physdev_op() 206 if ( is_pv_domain(currd) || domain_pirq_to_irq(currd, eoi.irq) > 0 ) in do_physdev_op() 208 if ( is_hvm_domain(currd) && in do_physdev_op() 220 spin_unlock(&currd->event_lock); in do_physdev_op() 261 currd->arch.auto_unmask = 1; in do_physdev_op() 269 ret = pirq_guest_unmask(currd); in do_physdev_op() 283 if ( is_hvm_domain(currd) && in do_physdev_op() 300 if ( pirq_shared(currd, irq) ) in do_physdev_op() [all …]
|
A D | domctl.c | 359 struct domain *currd = curr->domain; in arch_do_domctl() local 419 paging_mode_external(currd) || in arch_do_domctl() 559 if ( (d == currd) || /* no domain_pause() */ in arch_do_domctl() 585 if ( (d == currd) || /* no domain_pause() */ in arch_do_domctl() 625 if ( (d == currd) || /* no domain_pause() */ in arch_do_domctl() 849 if ( ret && is_hardware_domain(currd) ) in arch_do_domctl() 963 if ( d == currd ) /* no domain_pause() */ in arch_do_domctl() 976 if ( d == currd ) /* no domain_pause() */ in arch_do_domctl() 991 if ( d == currd ) /* no domain_pause() */ in arch_do_domctl() 1063 currd->domain_id, v); in arch_do_domctl() [all …]
|
A D | mm.c | 2987 struct domain *currd = curr->domain; in do_mmuext_op() local 3052 if ( is_hvm_domain(currd) ) in do_mmuext_op() 3138 if ( unlikely(pg_owner != currd) ) in do_mmuext_op() 3203 if ( unlikely(currd != pg_owner) ) in do_mmuext_op() 3214 if ( unlikely(currd != pg_owner) ) in do_mmuext_op() 3274 if ( likely(currd == pg_owner) ) in do_mmuext_op() 3281 if ( unlikely(currd != pg_owner) ) in do_mmuext_op() 3292 if ( unlikely(currd != pg_owner) ) in do_mmuext_op() 3310 if ( likely(currd == pg_owner) ) in do_mmuext_op() 3317 if ( unlikely(currd != pg_owner) ) in do_mmuext_op() [all …]
|
A D | msi.c | 1001 struct domain *currd = current->domain; in msix_capability_init() local 1002 struct domain *d = dev->domain ?: currd; in msix_capability_init() 1004 if ( !is_hardware_domain(currd) || d != currd ) in msix_capability_init() 1006 is_hardware_domain(currd) in msix_capability_init() 1012 (!is_hardware_domain(currd) || d->tot_pages) ) in msix_capability_init()
|
/xen-4.10.0-shim-comet/xen/arch/x86/pv/ |
A D | emul-priv-op.c | 185 if ( !is_hardware_domain(currd) ) in pci_cfg_ok() 351 struct domain *currd) in guest_io_write() argument 382 currd->arch.cmos_idx = data; in guest_io_write() 399 currd->arch.pci_cf8 = data; in guest_io_write() 773 gfn = !is_pv_32bit_domain(currd) in write_cr() 859 if ( is_pv_32bit_domain(currd) ) in read_msr() 865 if ( is_pv_32bit_domain(currd) ) in read_msr() 872 if ( is_pv_32bit_domain(currd) ) in read_msr() 895 if ( is_pv_32bit_domain(currd) ) in read_msr() 1325 struct domain *currd = curr->domain; in pv_emulate_privileged_op() local [all …]
|
A D | grant_table.c | 57 struct domain *currd = curr->domain; in create_grant_pv_mapping() local 64 nl1e = adjust_guest_l1e(nl1e, currd); in create_grant_pv_mapping() 83 if ( !get_page_from_mfn(gl1mfn, currd) ) in create_grant_pv_mapping() 127 put_page_from_l1e(ol1e, currd); in create_grant_pv_mapping() 146 struct domain *currd = curr->domain; in steal_linear_address() local 152 ASSERT(is_pv_domain(currd)); in steal_linear_address() 162 if ( !get_page_from_mfn(gl1mfn, currd) ) in steal_linear_address() 198 struct domain *currd = curr->domain; in replace_grant_pv_mapping() local 210 if ( !is_pv_32bit_domain(currd) ) in replace_grant_pv_mapping() 245 if ( is_pv_32bit_domain(currd) ) in replace_grant_pv_mapping() [all …]
|
A D | mm.c | 93 struct domain *currd = curr->domain; in pv_map_ldt_shadow_page() local 108 if ( is_pv_32bit_domain(currd) ) in pv_map_ldt_shadow_page() 115 page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC); in pv_map_ldt_shadow_page()
|
A D | descriptor-tables.c | 131 struct domain *currd = current->domain; in do_update_descriptor() local 143 page = get_page_from_gfn(currd, gmfn, NULL, P2M_ALLOC); in do_update_descriptor() 146 !check_descriptor(currd, &d) ) in do_update_descriptor() 167 paging_mark_dirty(currd, _mfn(mfn)); in do_update_descriptor()
|
A D | ro-page-fault.c | 330 const struct domain *currd = current->domain; in pv_ro_page_fault() local 331 unsigned int addr_size = is_pv_32bit_domain(currd) ? 32 : BITS_PER_LONG; in pv_ro_page_fault() 334 .vendor = currd->arch.cpuid->x86_vendor, in pv_ro_page_fault() 349 mmio_ro = is_hardware_domain(currd) && in pv_ro_page_fault()
|
/xen-4.10.0-shim-comet/xen/arch/x86/hvm/ |
A D | hypercall.c | 147 struct domain *currd = curr->domain; in hvm_hypercall() local 158 if ( currd->arch.monitor.guest_request_userspace_enabled && in hvm_hypercall() 173 if ( (eax & 0x80000000) && is_viridian_domain(currd) ) in hvm_hypercall() 284 if ( unlikely(currd->arch.hvm_domain.qemu_mapcache_invalidate) && in hvm_hypercall() 285 test_and_clear_bool(currd->arch.hvm_domain.qemu_mapcache_invalidate) ) in hvm_hypercall()
|
A D | vioapic.c | 164 struct domain *currd = current->domain; in vioapic_hwdom_map_gsi() local 171 ASSERT(is_hardware_domain(currd)); in vioapic_hwdom_map_gsi() 184 ret = allocate_and_map_gsi_pirq(currd, pirq, &pirq); in vioapic_hwdom_map_gsi() 193 ret = pt_irq_create_bind(currd, &pt_irq_bind); in vioapic_hwdom_map_gsi() 198 spin_lock(&currd->event_lock); in vioapic_hwdom_map_gsi() 199 unmap_domain_pirq(currd, pirq); in vioapic_hwdom_map_gsi() 200 spin_unlock(&currd->event_lock); in vioapic_hwdom_map_gsi()
|
A D | hvm.c | 1702 struct domain *currd = curr->domain; in hvm_hap_nested_page_fault() local 1713 if ( nestedhvm_enabled(currd) in hvm_hap_nested_page_fault() 1765 ap2m_active = altp2m_active(currd); in hvm_hap_nested_page_fault() 1771 hostp2m = p2m_get_hostp2m(currd); in hvm_hap_nested_page_fault() 1896 paging_mark_dirty(currd, mfn); in hvm_hap_nested_page_fault() 1933 p2m_mem_paging_populate(currd, gfn); in hvm_hap_nested_page_fault() 3374 struct domain *currd = curr->domain; in _hvm_rdtsc_intercept() local 3376 if ( currd->arch.vtsc ) in _hvm_rdtsc_intercept() 3390 currd->arch.vtsc_kerncount++; in _hvm_rdtsc_intercept() 3709 struct domain *currd = curr->domain; in hvm_descriptor_access_intercept() local [all …]
|
A D | emulate.c | 125 struct domain *currd = curr->domain; in hvmemul_do_io() local 167 domain_crash(currd); in hvmemul_do_io() 242 get_gfn_query_unlocked(currd, gmfn, &p2mt); in hvmemul_do_io() 248 s = p2m_get_ioreq_server(currd, &flags); in hvmemul_do_io() 271 s = hvm_select_ioreq_server(currd, &p); in hvmemul_do_io() 282 if ( rc != X86EMUL_RETRY || currd->is_shutting_down ) in hvmemul_do_io() 634 struct domain *currd = current->domain; in hvmemul_unmap_linear_addr() local 650 paging_mark_dirty(currd, *mfn); in hvmemul_unmap_linear_addr()
|
A D | viridian.c | 837 struct domain *currd = curr->domain; in viridian_hypercall() local 865 ASSERT(is_viridian_domain(currd)); in viridian_hypercall() 937 for_each_vcpu ( currd, v ) in viridian_hypercall()
|
/xen-4.10.0-shim-comet/xen/common/ |
A D | grant_table.c | 457 const struct domain *currd = curr->domain; in steal_maptrack_handle() local 461 first = i = get_random() % currd->max_vcpus; in steal_maptrack_handle() 464 if ( currd->vcpu[i] ) in steal_maptrack_handle() 468 handle = _get_maptrack_handle(t, currd->vcpu[i]); in steal_maptrack_handle() 477 if ( i == currd->max_vcpus ) in steal_maptrack_handle() 489 struct domain *currd = current->domain; in put_maptrack_handle() local 497 v = currd->vcpu[maptrack_entry(t, handle).vcpu]; in put_maptrack_handle() 2866 struct domain *currd = current->domain; in gnttab_set_version() local 2867 struct grant_table *gt = currd->grant_table; in gnttab_set_version() 2909 res = gnttab_populate_status_frames(currd, gt, nr_grant_frames(gt)); in gnttab_set_version() [all …]
|
A D | memory.c | 439 const struct domain *currd = current->domain; in propagate_node() local 447 if ( is_hardware_domain(currd) || is_control_domain(currd) ) in propagate_node()
|