1 /*
2 * io.c: Handling I/O and interrupts.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 * Copyright (c) 2008, Citrix Systems, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <xen/init.h>
22 #include <xen/mm.h>
23 #include <xen/lib.h>
24 #include <xen/errno.h>
25 #include <xen/trace.h>
26 #include <xen/event.h>
27 #include <xen/hypercall.h>
28 #include <asm/current.h>
29 #include <asm/cpufeature.h>
30 #include <asm/processor.h>
31 #include <asm/msr.h>
32 #include <asm/apic.h>
33 #include <asm/paging.h>
34 #include <asm/shadow.h>
35 #include <asm/p2m.h>
36 #include <asm/hvm/hvm.h>
37 #include <asm/hvm/ioreq.h>
38 #include <asm/hvm/support.h>
39 #include <asm/hvm/vpt.h>
40 #include <asm/hvm/vpic.h>
41 #include <asm/hvm/vlapic.h>
42 #include <asm/hvm/trace.h>
43 #include <asm/hvm/emulate.h>
44 #include <public/sched.h>
45 #include <xen/iocap.h>
46 #include <public/hvm/ioreq.h>
47
send_timeoffset_req(unsigned long timeoff)48 void send_timeoffset_req(unsigned long timeoff)
49 {
50 ioreq_t p = {
51 .type = IOREQ_TYPE_TIMEOFFSET,
52 .size = 8,
53 .count = 1,
54 .dir = IOREQ_WRITE,
55 .data = timeoff,
56 .state = STATE_IOREQ_READY,
57 };
58
59 if ( timeoff == 0 )
60 return;
61
62 if ( hvm_broadcast_ioreq(&p, true) != 0 )
63 gprintk(XENLOG_ERR, "Unsuccessful timeoffset update\n");
64 }
65
66 /* Ask ioemu mapcache to invalidate mappings. */
send_invalidate_req(void)67 void send_invalidate_req(void)
68 {
69 ioreq_t p = {
70 .type = IOREQ_TYPE_INVALIDATE,
71 .size = 4,
72 .dir = IOREQ_WRITE,
73 .data = ~0UL, /* flush all */
74 };
75
76 if ( hvm_broadcast_ioreq(&p, false) != 0 )
77 gprintk(XENLOG_ERR, "Unsuccessful map-cache invalidate\n");
78 }
79
hvm_emulate_one_insn(hvm_emulate_validate_t * validate,const char * descr)80 bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate, const char *descr)
81 {
82 struct hvm_emulate_ctxt ctxt;
83 struct vcpu *curr = current;
84 struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
85 int rc;
86
87 hvm_emulate_init_once(&ctxt, validate, guest_cpu_user_regs());
88
89 rc = hvm_emulate_one(&ctxt);
90
91 if ( hvm_vcpu_io_need_completion(vio) )
92 vio->io_completion = HVMIO_mmio_completion;
93 else
94 vio->mmio_access = (struct npfec){};
95
96 switch ( rc )
97 {
98 case X86EMUL_UNHANDLEABLE:
99 hvm_dump_emulation_state(XENLOG_G_WARNING, descr, &ctxt, rc);
100 return false;
101
102 case X86EMUL_UNRECOGNIZED:
103 hvm_dump_emulation_state(XENLOG_G_WARNING, descr, &ctxt, rc);
104 hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
105 break;
106
107 case X86EMUL_EXCEPTION:
108 hvm_inject_event(&ctxt.ctxt.event);
109 break;
110 }
111
112 hvm_emulate_writeback(&ctxt);
113
114 return true;
115 }
116
handle_mmio_with_translation(unsigned long gla,unsigned long gpfn,struct npfec access)117 bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
118 struct npfec access)
119 {
120 struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
121
122 vio->mmio_access = access.gla_valid &&
123 access.kind == npfec_kind_with_gla
124 ? access : (struct npfec){};
125 vio->mmio_gla = gla & PAGE_MASK;
126 vio->mmio_gpfn = gpfn;
127 return handle_mmio();
128 }
129
handle_pio(uint16_t port,unsigned int size,int dir)130 bool handle_pio(uint16_t port, unsigned int size, int dir)
131 {
132 struct vcpu *curr = current;
133 struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
134 unsigned long data;
135 int rc;
136
137 ASSERT((size - 1) < 4 && size != 3);
138
139 if ( dir == IOREQ_WRITE )
140 data = guest_cpu_user_regs()->eax;
141
142 rc = hvmemul_do_pio_buffer(port, size, dir, &data);
143
144 if ( hvm_vcpu_io_need_completion(vio) )
145 vio->io_completion = HVMIO_pio_completion;
146
147 switch ( rc )
148 {
149 case X86EMUL_OKAY:
150 if ( dir == IOREQ_READ )
151 {
152 if ( size == 4 ) /* Needs zero extension. */
153 guest_cpu_user_regs()->rax = (uint32_t)data;
154 else
155 memcpy(&guest_cpu_user_regs()->rax, &data, size);
156 }
157 break;
158
159 case X86EMUL_RETRY:
160 /* We should not advance RIP/EIP if the domain is shutting down */
161 if ( curr->domain->is_shutting_down )
162 return false;
163 break;
164
165 default:
166 gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc);
167 domain_crash(curr->domain);
168 return false;
169 }
170
171 return true;
172 }
173
g2m_portio_accept(const struct hvm_io_handler * handler,const ioreq_t * p)174 static bool_t g2m_portio_accept(const struct hvm_io_handler *handler,
175 const ioreq_t *p)
176 {
177 struct vcpu *curr = current;
178 const struct hvm_domain *hvm_domain = &curr->domain->arch.hvm_domain;
179 struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
180 struct g2m_ioport *g2m_ioport;
181 unsigned int start, end;
182
183 list_for_each_entry( g2m_ioport, &hvm_domain->g2m_ioport_list, list )
184 {
185 start = g2m_ioport->gport;
186 end = start + g2m_ioport->np;
187 if ( (p->addr >= start) && (p->addr + p->size <= end) )
188 {
189 vio->g2m_ioport = g2m_ioport;
190 return 1;
191 }
192 }
193
194 return 0;
195 }
196
g2m_portio_read(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t * data)197 static int g2m_portio_read(const struct hvm_io_handler *handler,
198 uint64_t addr, uint32_t size, uint64_t *data)
199 {
200 struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
201 const struct g2m_ioport *g2m_ioport = vio->g2m_ioport;
202 unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport;
203
204 switch ( size )
205 {
206 case 1:
207 *data = inb(mport);
208 break;
209 case 2:
210 *data = inw(mport);
211 break;
212 case 4:
213 *data = inl(mport);
214 break;
215 default:
216 BUG();
217 }
218
219 return X86EMUL_OKAY;
220 }
221
g2m_portio_write(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t data)222 static int g2m_portio_write(const struct hvm_io_handler *handler,
223 uint64_t addr, uint32_t size, uint64_t data)
224 {
225 struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
226 const struct g2m_ioport *g2m_ioport = vio->g2m_ioport;
227 unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport;
228
229 switch ( size )
230 {
231 case 1:
232 outb(data, mport);
233 break;
234 case 2:
235 outw(data, mport);
236 break;
237 case 4:
238 outl(data, mport);
239 break;
240 default:
241 BUG();
242 }
243
244 return X86EMUL_OKAY;
245 }
246
247 static const struct hvm_io_ops g2m_portio_ops = {
248 .accept = g2m_portio_accept,
249 .read = g2m_portio_read,
250 .write = g2m_portio_write
251 };
252
register_g2m_portio_handler(struct domain * d)253 void register_g2m_portio_handler(struct domain *d)
254 {
255 struct hvm_io_handler *handler = hvm_next_io_handler(d);
256
257 if ( handler == NULL )
258 return;
259
260 handler->type = IOREQ_TYPE_PIO;
261 handler->ops = &g2m_portio_ops;
262 }
263
hvm_pci_decode_addr(unsigned int cf8,unsigned int addr,unsigned int * bus,unsigned int * slot,unsigned int * func)264 unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr,
265 unsigned int *bus, unsigned int *slot,
266 unsigned int *func)
267 {
268 unsigned int bdf;
269
270 ASSERT(CF8_ENABLED(cf8));
271
272 bdf = CF8_BDF(cf8);
273 *bus = PCI_BUS(bdf);
274 *slot = PCI_SLOT(bdf);
275 *func = PCI_FUNC(bdf);
276 /*
277 * NB: the lower 2 bits of the register address are fetched from the
278 * offset into the 0xcfc register when reading/writing to it.
279 */
280 return CF8_ADDR_LO(cf8) | (addr & 3);
281 }
282
283 /*
284 * Local variables:
285 * mode: C
286 * c-file-style: "BSD"
287 * c-basic-offset: 4
288 * tab-width: 4
289 * indent-tabs-mode: nil
290 * End:
291 */
292