/xen-4.10.0-shim-comet/xen/arch/x86/pv/ |
A D | callback.c | 44 struct trap_info *t = &curr->arch.pv_vcpu.trap_ctxt[TRAP_nmi]; in register_guest_nmi_callback() 85 curr->arch.pv_vcpu.event_callback_eip = reg->address; in register_guest_callback() 89 curr->arch.pv_vcpu.failsafe_callback_eip = reg->address; in register_guest_callback() 99 curr->arch.pv_vcpu.syscall_callback_eip = reg->address; in register_guest_callback() 110 curr->arch.pv_vcpu.syscall32_disables_events = in register_guest_callback() 115 curr->arch.pv_vcpu.sysenter_callback_eip = reg->address; in register_guest_callback() 116 curr->arch.pv_vcpu.sysenter_disables_events = in register_guest_callback() 249 curr->arch.pv_vcpu.syscall32_disables_events = in compat_register_guest_callback() 256 curr->arch.pv_vcpu.sysenter_disables_events = in compat_register_guest_callback() 367 struct trap_info *dst = curr->arch.pv_vcpu.trap_ctxt; in do_set_trap_table() [all …]
|
A D | domain.c | 121 xfree(v->arch.pv_vcpu.trap_ctxt); in pv_vcpu_destroy() 122 v->arch.pv_vcpu.trap_ctxt = NULL; in pv_vcpu_destroy() 132 spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock); in pv_vcpu_initialise() 140 v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info, in pv_vcpu_initialise() 142 if ( !v->arch.pv_vcpu.trap_ctxt ) in pv_vcpu_initialise() 232 v->arch.pv_vcpu.gs_base_kernel = __rdgsbase(); in toggle_guest_mode() 234 v->arch.pv_vcpu.gs_base_user = __rdgsbase(); in toggle_guest_mode() 254 if ( v->arch.pv_vcpu.need_update_runstate_area && in toggle_guest_pt() 256 v->arch.pv_vcpu.need_update_runstate_area = 0; in toggle_guest_pt() 258 if ( v->arch.pv_vcpu.pending_system_time.version && in toggle_guest_pt() [all …]
|
A D | mm.c | 96 unsigned long linear = curr->arch.pv_vcpu.ldt_base + offset; in pv_map_ldt_shadow_page() 106 ASSERT((offset >> 3) <= curr->arch.pv_vcpu.ldt_ents); in pv_map_ldt_shadow_page() 128 spin_lock(&curr->arch.pv_vcpu.shadow_ldt_lock); in pv_map_ldt_shadow_page() 130 curr->arch.pv_vcpu.shadow_ldt_mapcnt++; in pv_map_ldt_shadow_page() 131 spin_unlock(&curr->arch.pv_vcpu.shadow_ldt_lock); in pv_map_ldt_shadow_page()
|
A D | traps.c | 72 tb = &curr->arch.pv_vcpu.trap_bounce; in pv_inject_event() 73 ti = &curr->arch.pv_vcpu.trap_ctxt[vector]; in pv_inject_event() 82 curr->arch.pv_vcpu.ctrlreg[2] = event->cr2; in pv_inject_event() 122 struct trap_bounce *tb = &curr->arch.pv_vcpu.trap_bounce; in set_guest_machinecheck_trapbounce() 137 struct trap_bounce *tb = &curr->arch.pv_vcpu.trap_bounce; in set_guest_nmi_trapbounce() 147 struct trap_info *ti = &v->arch.pv_vcpu.trap_ctxt[0x80]; in init_int80_direct_trap() 148 struct trap_bounce *tb = &v->arch.pv_vcpu.int80_bounce; in init_int80_direct_trap()
|
A D | misc-hypercalls.c | 44 return ((curr->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE) ? in do_get_debugreg() 57 v->arch.pv_vcpu.ctrlreg[0] |= X86_CR0_TS; in do_fpu_taskswitch() 62 v->arch.pv_vcpu.ctrlreg[0] &= ~X86_CR0_TS; in do_fpu_taskswitch()
|
A D | iret.c | 60 v->arch.pv_vcpu.iopl = iret_saved.rflags & X86_EFLAGS_IOPL; in do_iret() 124 v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL; in compat_iret() 139 u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40; in compat_iret() 167 regs->ss = v->arch.pv_vcpu.kernel_ss; in compat_iret() 169 ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault]; in compat_iret()
|
A D | emul-priv-op.c | 118 return IOPL(cpl) <= v->arch.pv_vcpu.iopl; in iopl_ok() 292 !(v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE) ) in check_guest_io_breakpoint() 713 *val = curr->arch.pv_vcpu.ctrlreg[reg]; in read_cr() 762 curr->arch.pv_vcpu.ctrlreg[2] = val; in write_cr() 874 *val = curr->arch.pv_vcpu.gs_base_user; in read_msr() 939 *val = curr->arch.pv_vcpu.dr_mask[0]; in read_msr() 1017 curr->arch.pv_vcpu.fs_base = val; in write_msr() 1024 curr->arch.pv_vcpu.gs_base_kernel = val; in write_msr() 1031 curr->arch.pv_vcpu.gs_base_user = val; in write_msr() 1137 curr->arch.pv_vcpu.dr_mask[0] = val; in write_msr() [all …]
|
A D | descriptor-tables.c | 44 v->arch.pv_vcpu.gdt_ents = 0; in pv_destroy_gdt() 52 v->arch.pv_vcpu.gdt_frames[i] = 0; in pv_destroy_gdt() 86 v->arch.pv_vcpu.gdt_ents = entries; in pv_set_gdt() 90 v->arch.pv_vcpu.gdt_frames[i] = frames[i]; in pv_set_gdt()
|
A D | emul-gate-op.c | 336 esp = v->arch.pv_vcpu.kernel_sp; in pv_emulate_gate_op() 337 ss = v->arch.pv_vcpu.kernel_ss; in pv_emulate_gate_op()
|
A D | dom0_build.c | 591 v->arch.pv_vcpu.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS; in dom0_construct_pv() 592 v->arch.pv_vcpu.event_callback_cs = FLAT_COMPAT_KERNEL_CS; in dom0_construct_pv()
|
/xen-4.10.0-shim-comet/xen/arch/x86/x86_64/ |
A D | asm-offsets.c | 69 arch.pv_vcpu.failsafe_callback_eip); in __dummy__() 71 arch.pv_vcpu.failsafe_callback_cs); in __dummy__() 73 arch.pv_vcpu.syscall_callback_eip); in __dummy__() 75 arch.pv_vcpu.syscall32_callback_eip); in __dummy__() 77 arch.pv_vcpu.syscall32_callback_cs); in __dummy__() 79 arch.pv_vcpu.syscall32_disables_events); in __dummy__() 81 arch.pv_vcpu.sysenter_callback_eip); in __dummy__() 83 arch.pv_vcpu.sysenter_callback_cs); in __dummy__() 85 arch.pv_vcpu.sysenter_disables_events); in __dummy__() 86 OFFSET(VCPU_trap_ctxt, struct vcpu, arch.pv_vcpu.trap_ctxt); in __dummy__() [all …]
|
A D | traps.c | 171 crs[0] = v->arch.pv_vcpu.ctrlreg[0]; in vcpu_show_registers() 176 crs[4] = v->arch.pv_vcpu.ctrlreg[4]; in vcpu_show_registers() 177 crs[5] = v->arch.pv_vcpu.fs_base; in vcpu_show_registers() 178 crs[6 + !kernel] = v->arch.pv_vcpu.gs_base_kernel; in vcpu_show_registers() 179 crs[7 - !kernel] = v->arch.pv_vcpu.gs_base_user; in vcpu_show_registers()
|
A D | mm.c | 1010 current->arch.pv_vcpu.kernel_ss = ss; in do_stack_switch() 1011 current->arch.pv_vcpu.kernel_sp = esp; in do_stack_switch() 1029 v->arch.pv_vcpu.fs_base = base; in do_set_segment_base() 1039 v->arch.pv_vcpu.gs_base_user = base; in do_set_segment_base() 1049 v->arch.pv_vcpu.gs_base_kernel = base; in do_set_segment_base()
|
/xen-4.10.0-shim-comet/xen/arch/x86/ |
A D | domain.c | 935 v->arch.pv_vcpu.kernel_ss = c(kernel_ss); in arch_set_info_guest() 936 v->arch.pv_vcpu.kernel_sp = c(kernel_sp); in arch_set_info_guest() 956 v->arch.pv_vcpu.ctrlreg[0] &= X86_CR0_TS; in arch_set_info_guest() 959 cr4 = v->arch.pv_vcpu.ctrlreg[4]; in arch_set_info_guest() 1328 wrfsbase(n->arch.pv_vcpu.fs_base); in load_segments() 1335 if ( n->arch.pv_vcpu.gs_base_user | in load_segments() 1346 struct pv_vcpu *pv = &n->arch.pv_vcpu; in load_segments() 1357 rflags |= n->arch.pv_vcpu.iopl; in load_segments() 1473 v->arch.pv_vcpu.fs_base = 0; in save_segments() 1475 if ( v->arch.pv_vcpu.fs_base ) in save_segments() [all …]
|
A D | domctl.c | 889 v->arch.pv_vcpu.sysenter_callback_cs; in arch_do_domctl() 891 v->arch.pv_vcpu.sysenter_callback_eip; in arch_do_domctl() 1362 if ( v->arch.pv_vcpu.dr_mask[0] ) in arch_do_domctl() 1639 c(ldt_base = v->arch.pv_vcpu.ldt_base); in arch_get_info_guest() 1640 c(ldt_ents = v->arch.pv_vcpu.ldt_ents); in arch_get_info_guest() 1647 c(gdt_ents = v->arch.pv_vcpu.gdt_ents); in arch_get_info_guest() 1648 c(kernel_ss = v->arch.pv_vcpu.kernel_ss); in arch_get_info_guest() 1649 c(kernel_sp = v->arch.pv_vcpu.kernel_sp); in arch_get_info_guest() 1651 c(ctrlreg[i] = v->arch.pv_vcpu.ctrlreg[i]); in arch_get_info_guest() 1657 c.nat->fs_base = v->arch.pv_vcpu.fs_base; in arch_get_info_guest() [all …]
|
A D | domain_page.c | 89 vcache = &v->arch.pv_vcpu.mapcache; in map_domain_page() 197 hashent = &v->arch.pv_vcpu.mapcache.hash[MAPHASH_HASHFN(mfn)]; in unmap_domain_page() 297 struct vcpu_maphash_entry *hashent = &v->arch.pv_vcpu.mapcache.hash[i]; in mapcache_vcpu_init()
|
A D | traps.c | 1142 curr->arch.pv_vcpu.ldt_base + offset)) ) in handle_gdt_ldt_mapping_fault() 1151 curr->arch.pv_vcpu.ldt_base + offset); in handle_gdt_ldt_mapping_fault() 1510 ti = &v->arch.pv_vcpu.trap_ctxt[vector]; in do_general_protection() 1719 if ( curr->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS ) in do_device_not_available() 1722 curr->arch.pv_vcpu.ctrlreg[0] &= ~X86_CR0_TS; in do_device_not_available() 1970 wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[0]); in activate_debugregs() 1971 wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[1]); in activate_debugregs() 1972 wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[2]); in activate_debugregs() 1973 wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[3]); in activate_debugregs() 2040 if ( !(v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE) ) in set_debugreg()
|
A D | physdev.c | 418 curr->arch.pv_vcpu.iopl = MASK_INSR(set_iopl.iopl, X86_EFLAGS_IOPL); in do_physdev_op() 435 curr->arch.pv_vcpu.iobmp = set_iobitmap.bitmap; in do_physdev_op() 437 guest_from_compat_handle(curr->arch.pv_vcpu.iobmp, in do_physdev_op() 440 curr->arch.pv_vcpu.iobmp_limit = set_iobitmap.nr_ports; in do_physdev_op()
|
A D | cpuid.c | 773 if ( (v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE) || in guest_cpuid() 845 ? v->arch.pv_vcpu.ctrlreg[4] in guest_cpuid()
|
A D | mm.c | 565 spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock); in invalidate_shadow_ldt() 567 if ( v->arch.pv_vcpu.shadow_ldt_mapcnt == 0 ) in invalidate_shadow_ldt() 570 v->arch.pv_vcpu.shadow_ldt_mapcnt = 0; in invalidate_shadow_ldt() 589 spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock); in invalidate_shadow_ldt() 3368 else if ( (curr->arch.pv_vcpu.ldt_ents != ents) || in do_mmuext_op() 3369 (curr->arch.pv_vcpu.ldt_base != ptr) ) in do_mmuext_op() 3373 curr->arch.pv_vcpu.ldt_base = ptr; in do_mmuext_op() 3374 curr->arch.pv_vcpu.ldt_ents = ents; in do_mmuext_op()
|
/xen-4.10.0-shim-comet/xen/include/asm-x86/ |
A D | ldt.h | 12 if ( (ents = v->arch.pv_vcpu.ldt_ents) == 0 ) in load_LDT()
|
A D | domain.h | 445 struct pv_vcpu struct 530 struct pv_vcpu pv_vcpu; member 614 (((v)->arch.pv_vcpu.ctrlreg[4] \
|
/xen-4.10.0-shim-comet/xen/include/asm-x86/pv/ |
A D | traps.h | 40 return v->arch.pv_vcpu.trap_ctxt[vector].address; in pv_trap_callback_registered()
|
/xen-4.10.0-shim-comet/xen/arch/x86/cpu/ |
A D | intel.c | 137 !(next->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE)) in intel_ctxt_switch_masking()
|
A D | amd.c | 223 !(next->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE)) in amd_ctxt_switch_masking()
|