1 /******************************************************************************
2 * arch/x86/hvm/vmx/realmode.c
3 *
4 * Real-mode emulation for VMX.
5 *
6 * Copyright (c) 2007-2008 Citrix Systems, Inc.
7 *
8 * Authors:
9 * Keir Fraser <keir@xen.org>
10 */
11
12 #include <xen/init.h>
13 #include <xen/lib.h>
14 #include <xen/sched.h>
15 #include <xen/paging.h>
16 #include <xen/softirq.h>
17 #include <asm/event.h>
18 #include <asm/hvm/emulate.h>
19 #include <asm/hvm/hvm.h>
20 #include <asm/hvm/support.h>
21 #include <asm/hvm/vmx/vmx.h>
22 #include <asm/hvm/vmx/vmcs.h>
23
realmode_deliver_exception(unsigned int vector,unsigned int insn_len,struct hvm_emulate_ctxt * hvmemul_ctxt)24 static void realmode_deliver_exception(
25 unsigned int vector,
26 unsigned int insn_len,
27 struct hvm_emulate_ctxt *hvmemul_ctxt)
28 {
29 struct segment_register *idtr, *csr;
30 struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
31 uint32_t cs_eip, pstk;
32 uint16_t frame[3];
33 unsigned int last_byte;
34
35 idtr = hvmemul_get_seg_reg(x86_seg_idtr, hvmemul_ctxt);
36 csr = hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
37 __set_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty);
38
39 again:
40 last_byte = (vector * 4) + 3;
41 if ( idtr->limit < last_byte ||
42 hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) !=
43 HVMTRANS_okay )
44 {
45 /* Software interrupt? */
46 if ( insn_len != 0 )
47 {
48 insn_len = 0;
49 vector = TRAP_gp_fault;
50 goto again;
51 }
52
53 /* Exception or hardware interrupt. */
54 switch ( vector )
55 {
56 case TRAP_double_fault:
57 hvm_triple_fault();
58 return;
59 case TRAP_gp_fault:
60 vector = TRAP_double_fault;
61 goto again;
62 default:
63 vector = TRAP_gp_fault;
64 goto again;
65 }
66 }
67
68 frame[0] = regs->ip + insn_len;
69 frame[1] = csr->sel;
70 frame[2] = regs->flags & ~X86_EFLAGS_RF;
71
72 /* We can't test hvmemul_ctxt->ctxt.sp_size: it may not be initialised. */
73 if ( hvmemul_ctxt->seg_reg[x86_seg_ss].db )
74 pstk = regs->esp -= 6;
75 else
76 pstk = regs->sp -= 6;
77
78 pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base;
79 (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame), current);
80
81 csr->sel = cs_eip >> 16;
82 csr->base = (uint32_t)csr->sel << 4;
83 regs->ip = (uint16_t)cs_eip;
84 regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
85
86 /* Exception delivery clears STI and MOV-SS blocking. */
87 if ( hvmemul_ctxt->intr_shadow &
88 (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
89 {
90 hvmemul_ctxt->intr_shadow &=
91 ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
92 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, hvmemul_ctxt->intr_shadow);
93 }
94 }
95
vmx_realmode_emulate_one(struct hvm_emulate_ctxt * hvmemul_ctxt)96 void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
97 {
98 struct vcpu *curr = current;
99 struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
100 int rc;
101
102 perfc_incr(realmode_emulations);
103
104 rc = hvm_emulate_one(hvmemul_ctxt);
105
106 if ( hvm_vcpu_io_need_completion(vio) )
107 vio->io_completion = HVMIO_realmode_completion;
108
109 if ( rc == X86EMUL_UNHANDLEABLE )
110 {
111 gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
112 goto fail;
113 }
114
115 if ( rc == X86EMUL_UNRECOGNIZED )
116 {
117 gdprintk(XENLOG_ERR, "Unrecognized insn.\n");
118 if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
119 goto fail;
120
121 realmode_deliver_exception(TRAP_invalid_op, 0, hvmemul_ctxt);
122 }
123
124 if ( rc == X86EMUL_EXCEPTION )
125 {
126 if ( unlikely(curr->domain->debugger_attached) &&
127 ((hvmemul_ctxt->ctxt.event.vector == TRAP_debug) ||
128 (hvmemul_ctxt->ctxt.event.vector == TRAP_int3)) )
129 {
130 domain_pause_for_debugger();
131 }
132 else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
133 {
134 gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
135 hvmemul_ctxt->ctxt.event.vector);
136 goto fail;
137 }
138 else
139 {
140 realmode_deliver_exception(
141 hvmemul_ctxt->ctxt.event.vector,
142 hvmemul_ctxt->ctxt.event.insn_len,
143 hvmemul_ctxt);
144 }
145 }
146
147 return;
148
149 fail:
150 hvm_dump_emulation_state(XENLOG_G_ERR, "Real-mode", hvmemul_ctxt, rc);
151 domain_crash(curr->domain);
152 }
153
vmx_realmode(struct cpu_user_regs * regs)154 void vmx_realmode(struct cpu_user_regs *regs)
155 {
156 struct vcpu *curr = current;
157 struct hvm_emulate_ctxt hvmemul_ctxt;
158 struct segment_register *sreg;
159 struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
160 unsigned long intr_info;
161 unsigned int emulations = 0;
162
163 /* Get-and-clear VM_ENTRY_INTR_INFO. */
164 __vmread(VM_ENTRY_INTR_INFO, &intr_info);
165 if ( intr_info & INTR_INFO_VALID_MASK )
166 __vmwrite(VM_ENTRY_INTR_INFO, 0);
167
168 hvm_emulate_init_once(&hvmemul_ctxt, NULL, regs);
169
170 /* Only deliver interrupts into emulated real mode. */
171 if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
172 (intr_info & INTR_INFO_VALID_MASK) )
173 {
174 realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt);
175 intr_info = 0;
176 }
177
178 curr->arch.hvm_vmx.vmx_emulate = 1;
179 while ( curr->arch.hvm_vmx.vmx_emulate &&
180 !softirq_pending(smp_processor_id()) )
181 {
182 /*
183 * Check for pending interrupts only every 16 instructions, because
184 * hvm_local_events_need_delivery() is moderately expensive, and only
185 * in real mode, because we don't emulate protected-mode IDT vectoring.
186 */
187 if ( unlikely(!(++emulations & 15)) &&
188 curr->arch.hvm_vmx.vmx_realmode &&
189 hvm_local_events_need_delivery(curr) )
190 break;
191
192 vmx_realmode_emulate_one(&hvmemul_ctxt);
193
194 if ( vio->io_req.state != STATE_IOREQ_NONE || vio->mmio_retry )
195 break;
196
197 /* Stop emulating unless our segment state is not safe */
198 if ( curr->arch.hvm_vmx.vmx_realmode )
199 curr->arch.hvm_vmx.vmx_emulate =
200 (curr->arch.hvm_vmx.vm86_segment_mask != 0);
201 else
202 curr->arch.hvm_vmx.vmx_emulate =
203 ((hvmemul_ctxt.seg_reg[x86_seg_cs].sel & 3)
204 || (hvmemul_ctxt.seg_reg[x86_seg_ss].sel & 3));
205 }
206
207 /* Need to emulate next time if we've started an IO operation */
208 if ( vio->io_req.state != STATE_IOREQ_NONE )
209 curr->arch.hvm_vmx.vmx_emulate = 1;
210
211 if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
212 {
213 /*
214 * Cannot enter protected mode with bogus selector RPLs and DPLs.
215 * At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For
216 * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
217 */
218 sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt);
219 sreg->dpl = sreg->sel & 3;
220 sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt);
221 sreg->dpl = sreg->sel & 3;
222 sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt);
223 sreg->dpl = sreg->sel & 3;
224 sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt);
225 sreg->dpl = sreg->sel & 3;
226 hvmemul_ctxt.seg_reg_dirty |=
227 (1ul << x86_seg_ds) | (1ul << x86_seg_es) |
228 (1ul << x86_seg_fs) | (1ul << x86_seg_gs);
229 }
230
231 hvm_emulate_writeback(&hvmemul_ctxt);
232
233 /* Re-instate VM_ENTRY_INTR_INFO if we did not discharge it. */
234 if ( intr_info & INTR_INFO_VALID_MASK )
235 __vmwrite(VM_ENTRY_INTR_INFO, intr_info);
236 }
237
238 /*
239 * Local variables:
240 * mode: C
241 * c-file-style: "BSD"
242 * c-basic-offset: 4
243 * tab-width: 4
244 * indent-tabs-mode: nil
245 * End:
246 */
247