1 /*
2 * arch/x86/hvm/vm_event.c
3 *
4 * HVM vm_event handling routines
5 *
6 * Copyright (c) 2004, Intel Corporation.
7 * Copyright (c) 2005, International Business Machines Corporation.
8 * Copyright (c) 2008, Citrix Systems, Inc.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License v2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public
20 * License along with this program; If not, see <http://www.gnu.org/licenses/>.
21 */
22
23 #include <xen/sched.h>
24 #include <xen/vm_event.h>
25 #include <asm/hvm/support.h>
26 #include <asm/vm_event.h>
27
hvm_vm_event_set_registers(const struct vcpu * v)28 static void hvm_vm_event_set_registers(const struct vcpu *v)
29 {
30 ASSERT(v == current);
31
32 if ( unlikely(v->arch.vm_event->set_gprs) )
33 {
34 struct cpu_user_regs *regs = guest_cpu_user_regs();
35
36 regs->rax = v->arch.vm_event->gprs.rax;
37 regs->rbx = v->arch.vm_event->gprs.rbx;
38 regs->rcx = v->arch.vm_event->gprs.rcx;
39 regs->rdx = v->arch.vm_event->gprs.rdx;
40 regs->rsp = v->arch.vm_event->gprs.rsp;
41 regs->rbp = v->arch.vm_event->gprs.rbp;
42 regs->rsi = v->arch.vm_event->gprs.rsi;
43 regs->rdi = v->arch.vm_event->gprs.rdi;
44
45 regs->r8 = v->arch.vm_event->gprs.r8;
46 regs->r9 = v->arch.vm_event->gprs.r9;
47 regs->r10 = v->arch.vm_event->gprs.r10;
48 regs->r11 = v->arch.vm_event->gprs.r11;
49 regs->r12 = v->arch.vm_event->gprs.r12;
50 regs->r13 = v->arch.vm_event->gprs.r13;
51 regs->r14 = v->arch.vm_event->gprs.r14;
52 regs->r15 = v->arch.vm_event->gprs.r15;
53
54 regs->rflags = v->arch.vm_event->gprs.rflags;
55 regs->rip = v->arch.vm_event->gprs.rip;
56
57 v->arch.vm_event->set_gprs = false;
58 }
59 }
60
hvm_vm_event_do_resume(struct vcpu * v)61 void hvm_vm_event_do_resume(struct vcpu *v)
62 {
63 struct monitor_write_data *w;
64
65 ASSERT(v->arch.vm_event);
66
67 hvm_vm_event_set_registers(v);
68
69 w = &v->arch.vm_event->write_data;
70
71 if ( unlikely(v->arch.vm_event->emulate_flags) )
72 {
73 enum emul_kind kind = EMUL_KIND_NORMAL;
74
75 /*
76 * Please observe the order here to match the flag descriptions
77 * provided in public/vm_event.h
78 */
79 if ( v->arch.vm_event->emulate_flags &
80 VM_EVENT_FLAG_SET_EMUL_READ_DATA )
81 kind = EMUL_KIND_SET_CONTEXT_DATA;
82 else if ( v->arch.vm_event->emulate_flags &
83 VM_EVENT_FLAG_EMULATE_NOWRITE )
84 kind = EMUL_KIND_NOWRITE;
85 else if ( v->arch.vm_event->emulate_flags &
86 VM_EVENT_FLAG_SET_EMUL_INSN_DATA )
87 kind = EMUL_KIND_SET_CONTEXT_INSN;
88
89 hvm_emulate_one_vm_event(kind, TRAP_invalid_op,
90 X86_EVENT_NO_EC);
91
92 v->arch.vm_event->emulate_flags = 0;
93 }
94
95 if ( unlikely(w->do_write.cr0) )
96 {
97 if ( hvm_set_cr0(w->cr0, 0) == X86EMUL_EXCEPTION )
98 hvm_inject_hw_exception(TRAP_gp_fault, 0);
99
100 w->do_write.cr0 = 0;
101 }
102
103 if ( unlikely(w->do_write.cr4) )
104 {
105 if ( hvm_set_cr4(w->cr4, 0) == X86EMUL_EXCEPTION )
106 hvm_inject_hw_exception(TRAP_gp_fault, 0);
107
108 w->do_write.cr4 = 0;
109 }
110
111 if ( unlikely(w->do_write.cr3) )
112 {
113 if ( hvm_set_cr3(w->cr3, 0) == X86EMUL_EXCEPTION )
114 hvm_inject_hw_exception(TRAP_gp_fault, 0);
115
116 w->do_write.cr3 = 0;
117 }
118
119 if ( unlikely(w->do_write.msr) )
120 {
121 if ( hvm_msr_write_intercept(w->msr, w->value, 0) ==
122 X86EMUL_EXCEPTION )
123 hvm_inject_hw_exception(TRAP_gp_fault, 0);
124
125 w->do_write.msr = 0;
126 }
127 }
128
129 /*
130 * Local variables:
131 * mode: C
132 * c-file-style: "BSD"
133 * c-basic-offset: 4
134 * tab-width: 4
135 * indent-tabs-mode: nil
136 * End:
137 */
138