1 /*
2 * Portions are:
3 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
4 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
5 */
6
7 #include <xen/acpi.h>
8 #include <xen/smp.h>
9 #include <asm/processor.h>
10 #include <asm/msr.h>
11 #include <asm/debugreg.h>
12 #include <asm/hvm/hvm.h>
13 #include <asm/hvm/support.h>
14 #include <asm/i387.h>
15 #include <asm/xstate.h>
16 #include <xen/hypercall.h>
17
18 static unsigned long saved_lstar, saved_cstar;
19 static unsigned long saved_sysenter_esp, saved_sysenter_eip;
20 static unsigned long saved_fs_base, saved_gs_base, saved_kernel_gs_base;
21 static uint16_t saved_segs[4];
22 static uint64_t saved_xcr0;
23
save_rest_processor_state(void)24 void save_rest_processor_state(void)
25 {
26 vcpu_save_fpu(current);
27
28 asm volatile (
29 "movw %%ds,(%0); movw %%es,2(%0); movw %%fs,4(%0); movw %%gs,6(%0)"
30 : : "r" (saved_segs) : "memory" );
31 saved_fs_base = rdfsbase();
32 saved_gs_base = rdgsbase();
33 rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
34 rdmsrl(MSR_CSTAR, saved_cstar);
35 rdmsrl(MSR_LSTAR, saved_lstar);
36 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
37 boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
38 {
39 rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
40 rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
41 }
42 if ( cpu_has_xsave )
43 saved_xcr0 = get_xcr0();
44 }
45
46
restore_rest_processor_state(void)47 void restore_rest_processor_state(void)
48 {
49 struct vcpu *curr = current;
50
51 load_TR();
52
53 /* Recover syscall MSRs */
54 wrmsrl(MSR_LSTAR, saved_lstar);
55 wrmsrl(MSR_CSTAR, saved_cstar);
56 wrmsrl(MSR_STAR, XEN_MSR_STAR);
57 wrmsrl(MSR_SYSCALL_MASK, XEN_SYSCALL_MASK);
58
59 wrfsbase(saved_fs_base);
60 wrgsbase(saved_gs_base);
61 wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
62
63 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
64 boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
65 {
66 /* Recover sysenter MSRs */
67 wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
68 wrmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
69 wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0);
70 }
71
72 if ( !is_idle_vcpu(curr) )
73 {
74 asm volatile (
75 "movw (%0),%%ds; movw 2(%0),%%es; movw 4(%0),%%fs"
76 : : "r" (saved_segs) : "memory" );
77 do_set_segment_base(SEGBASE_GS_USER_SEL, saved_segs[3]);
78 }
79
80 if ( cpu_has_xsave && !set_xcr0(saved_xcr0) )
81 BUG();
82
83 /* Maybe load the debug registers. */
84 BUG_ON(!is_pv_vcpu(curr));
85 if ( !is_idle_vcpu(curr) && curr->arch.debugreg[7] )
86 {
87 write_debugreg(0, curr->arch.debugreg[0]);
88 write_debugreg(1, curr->arch.debugreg[1]);
89 write_debugreg(2, curr->arch.debugreg[2]);
90 write_debugreg(3, curr->arch.debugreg[3]);
91 write_debugreg(6, curr->arch.debugreg[6]);
92 write_debugreg(7, curr->arch.debugreg[7]);
93 }
94
95 /* Reload FPU state on next FPU use. */
96 stts();
97
98 if (cpu_has_pat)
99 wrmsrl(MSR_IA32_CR_PAT, host_pat);
100
101 mtrr_bp_restore();
102 }
103