1 /******************************************************************************
2 * arch/x86/hvm/vmx/realmode.c
3 *
4 * Real-mode emulation for VMX.
5 *
6 * Copyright (c) 2007-2008 Citrix Systems, Inc.
7 *
8 * Authors:
9 * Keir Fraser <keir@xen.org>
10 */
11
12 #include <xen/init.h>
13 #include <xen/lib.h>
14 #include <xen/sched.h>
15 #include <xen/paging.h>
16 #include <xen/softirq.h>
17
18 #include <asm/gdbsx.h>
19 #include <asm/event.h>
20 #include <asm/hvm/emulate.h>
21 #include <asm/hvm/hvm.h>
22 #include <asm/hvm/support.h>
23 #include <asm/hvm/vmx/vmx.h>
24 #include <asm/hvm/vmx/vmcs.h>
25
realmode_deliver_exception(unsigned int vector,unsigned int insn_len,struct hvm_emulate_ctxt * hvmemul_ctxt)26 static void realmode_deliver_exception(
27 unsigned int vector,
28 unsigned int insn_len,
29 struct hvm_emulate_ctxt *hvmemul_ctxt)
30 {
31 struct segment_register *idtr, *csr;
32 struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
33 uint32_t cs_eip, pstk;
34 uint16_t frame[3];
35 unsigned int last_byte;
36
37 idtr = hvmemul_get_seg_reg(x86_seg_idtr, hvmemul_ctxt);
38 csr = hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
39 __set_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty);
40
41 again:
42 last_byte = (vector * 4) + 3;
43 if ( idtr->limit < last_byte ||
44 hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) !=
45 HVMTRANS_okay )
46 {
47 /* Software interrupt? */
48 if ( insn_len != 0 )
49 {
50 insn_len = 0;
51 vector = X86_EXC_GP;
52 goto again;
53 }
54
55 /* Exception or hardware interrupt. */
56 switch ( vector )
57 {
58 case X86_EXC_DF:
59 hvm_triple_fault();
60 return;
61 case X86_EXC_GP:
62 vector = X86_EXC_DF;
63 goto again;
64 default:
65 vector = X86_EXC_GP;
66 goto again;
67 }
68 }
69
70 frame[0] = regs->ip + insn_len;
71 frame[1] = csr->sel;
72 frame[2] = regs->flags & ~X86_EFLAGS_RF;
73
74 /* We can't test hvmemul_ctxt->ctxt.sp_size: it may not be initialised. */
75 if ( hvmemul_ctxt->seg_reg[x86_seg_ss].db )
76 pstk = regs->esp -= 6;
77 else
78 pstk = regs->sp -= 6;
79
80 pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base;
81 (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame), current);
82
83 csr->sel = cs_eip >> 16;
84 csr->base = (uint32_t)csr->sel << 4;
85 regs->ip = (uint16_t)cs_eip;
86 regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
87
88 /* Exception delivery clears STI and MOV-SS blocking. */
89 if ( hvmemul_ctxt->intr_shadow &
90 (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
91 {
92 hvmemul_ctxt->intr_shadow &=
93 ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
94 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, hvmemul_ctxt->intr_shadow);
95 }
96 }
97
vmx_realmode_emulate_one(struct hvm_emulate_ctxt * hvmemul_ctxt)98 void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
99 {
100 struct vcpu *curr = current;
101 int rc;
102
103 perfc_incr(realmode_emulations);
104
105 rc = hvm_emulate_one(hvmemul_ctxt, VIO_realmode_completion);
106
107 if ( rc == X86EMUL_UNHANDLEABLE )
108 {
109 gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
110 goto fail;
111 }
112
113 if ( rc == X86EMUL_UNRECOGNIZED )
114 {
115 gdprintk(XENLOG_ERR, "Unrecognized insn.\n");
116 if ( curr->arch.hvm.guest_cr[0] & X86_CR0_PE )
117 goto fail;
118
119 realmode_deliver_exception(X86_EXC_UD, 0, hvmemul_ctxt);
120 }
121
122 if ( rc == X86EMUL_EXCEPTION )
123 {
124 if ( unlikely(curr->domain->debugger_attached) &&
125 ((hvmemul_ctxt->ctxt.event.vector == X86_EXC_DB) ||
126 (hvmemul_ctxt->ctxt.event.vector == X86_EXC_BP)) )
127 {
128 domain_pause_for_debugger();
129 }
130 else if ( curr->arch.hvm.guest_cr[0] & X86_CR0_PE )
131 {
132 gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
133 hvmemul_ctxt->ctxt.event.vector);
134 goto fail;
135 }
136 else
137 {
138 realmode_deliver_exception(
139 hvmemul_ctxt->ctxt.event.vector,
140 hvmemul_ctxt->ctxt.event.insn_len,
141 hvmemul_ctxt);
142 }
143 }
144
145 return;
146
147 fail:
148 hvm_dump_emulation_state(XENLOG_G_ERR, "Real-mode", hvmemul_ctxt, rc);
149 domain_crash(curr->domain);
150 }
151
vmx_realmode(struct cpu_user_regs * regs)152 void vmx_realmode(struct cpu_user_regs *regs)
153 {
154 struct vcpu *curr = current;
155 struct hvm_emulate_ctxt hvmemul_ctxt;
156 struct segment_register *sreg;
157 struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io;
158 unsigned long intr_info;
159 unsigned int emulations = 0;
160
161 /* Get-and-clear VM_ENTRY_INTR_INFO. */
162 __vmread(VM_ENTRY_INTR_INFO, &intr_info);
163 if ( intr_info & INTR_INFO_VALID_MASK )
164 __vmwrite(VM_ENTRY_INTR_INFO, 0);
165
166 hvm_emulate_init_once(&hvmemul_ctxt, NULL, regs);
167
168 /* Only deliver interrupts into emulated real mode. */
169 if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PE) &&
170 (intr_info & INTR_INFO_VALID_MASK) )
171 {
172 realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt);
173 intr_info = 0;
174 }
175
176 curr->arch.hvm.vmx.vmx_emulate = 1;
177 while ( curr->arch.hvm.vmx.vmx_emulate &&
178 !softirq_pending(smp_processor_id()) )
179 {
180 /*
181 * Check for pending interrupts only every 16 instructions, because
182 * hvm_local_events_need_delivery() is moderately expensive, and only
183 * in real mode, because we don't emulate protected-mode IDT vectoring.
184 */
185 if ( unlikely(!(++emulations & 15)) &&
186 curr->arch.hvm.vmx.vmx_realmode &&
187 hvm_local_events_need_delivery(curr) )
188 break;
189
190 vmx_realmode_emulate_one(&hvmemul_ctxt);
191
192 if ( curr->io.req.state != STATE_IOREQ_NONE || hvio->mmio_retry )
193 break;
194
195 /* Stop emulating unless our segment state is not safe */
196 if ( curr->arch.hvm.vmx.vmx_realmode )
197 curr->arch.hvm.vmx.vmx_emulate =
198 (curr->arch.hvm.vmx.vm86_segment_mask != 0);
199 else
200 curr->arch.hvm.vmx.vmx_emulate =
201 ((hvmemul_ctxt.seg_reg[x86_seg_cs].sel & 3)
202 || (hvmemul_ctxt.seg_reg[x86_seg_ss].sel & 3));
203 }
204
205 /* Need to emulate next time if we've started an IO operation */
206 if ( curr->io.req.state != STATE_IOREQ_NONE )
207 curr->arch.hvm.vmx.vmx_emulate = 1;
208
209 if ( !curr->arch.hvm.vmx.vmx_emulate && !curr->arch.hvm.vmx.vmx_realmode )
210 {
211 /*
212 * Cannot enter protected mode with bogus selector RPLs and DPLs.
213 * At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For
214 * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
215 */
216 sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt);
217 sreg->dpl = sreg->sel & 3;
218 sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt);
219 sreg->dpl = sreg->sel & 3;
220 sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt);
221 sreg->dpl = sreg->sel & 3;
222 sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt);
223 sreg->dpl = sreg->sel & 3;
224 hvmemul_ctxt.seg_reg_dirty |=
225 (1ul << x86_seg_ds) | (1ul << x86_seg_es) |
226 (1ul << x86_seg_fs) | (1ul << x86_seg_gs);
227 }
228
229 hvm_emulate_writeback(&hvmemul_ctxt);
230
231 /* Re-instate VM_ENTRY_INTR_INFO if we did not discharge it. */
232 if ( intr_info & INTR_INFO_VALID_MASK )
233 __vmwrite(VM_ENTRY_INTR_INFO, intr_info);
234 }
235
236 /*
237 * Local variables:
238 * mode: C
239 * c-file-style: "BSD"
240 * c-basic-offset: 4
241 * tab-width: 4
242 * indent-tabs-mode: nil
243 * End:
244 */
245