1 /*
2  * intercept.c: Handle performance critical I/O packets in hypervisor space
3  *
4  * Copyright (c) 2004, Intel Corporation.
5  * Copyright (c) 2008, Citrix Systems, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <xen/types.h>
21 #include <xen/sched.h>
22 #include <asm/regs.h>
23 #include <asm/hvm/hvm.h>
24 #include <asm/hvm/support.h>
25 #include <asm/hvm/domain.h>
26 #include <xen/lib.h>
27 #include <xen/sched.h>
28 #include <asm/current.h>
29 #include <io_ports.h>
30 #include <xen/event.h>
31 #include <xen/iommu.h>
32 
hvm_mmio_accept(const struct hvm_io_handler * handler,const ioreq_t * p)33 static bool_t hvm_mmio_accept(const struct hvm_io_handler *handler,
34                               const ioreq_t *p)
35 {
36     paddr_t first = hvm_mmio_first_byte(p), last;
37 
38     BUG_ON(handler->type != IOREQ_TYPE_COPY);
39 
40     if ( !handler->mmio.ops->check(current, first) )
41         return 0;
42 
43     /* Make sure the handler will accept the whole access. */
44     last = hvm_mmio_last_byte(p);
45     if ( last != first &&
46          !handler->mmio.ops->check(current, last) )
47         domain_crash(current->domain);
48 
49     return 1;
50 }
51 
hvm_mmio_read(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t * data)52 static int hvm_mmio_read(const struct hvm_io_handler *handler,
53                          uint64_t addr, uint32_t size, uint64_t *data)
54 {
55     BUG_ON(handler->type != IOREQ_TYPE_COPY);
56 
57     return handler->mmio.ops->read(current, addr, size, data);
58 }
59 
hvm_mmio_write(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t data)60 static int hvm_mmio_write(const struct hvm_io_handler *handler,
61                           uint64_t addr, uint32_t size, uint64_t data)
62 {
63     BUG_ON(handler->type != IOREQ_TYPE_COPY);
64 
65     return handler->mmio.ops->write(current, addr, size, data);
66 }
67 
68 static const struct hvm_io_ops mmio_ops = {
69     .accept = hvm_mmio_accept,
70     .read = hvm_mmio_read,
71     .write = hvm_mmio_write
72 };
73 
hvm_portio_accept(const struct hvm_io_handler * handler,const ioreq_t * p)74 static bool_t hvm_portio_accept(const struct hvm_io_handler *handler,
75                                 const ioreq_t *p)
76 {
77     unsigned int start = handler->portio.port;
78     unsigned int end = start + handler->portio.size;
79 
80     BUG_ON(handler->type != IOREQ_TYPE_PIO);
81 
82     return (p->addr >= start) && ((p->addr + p->size) <= end);
83 }
84 
hvm_portio_read(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t * data)85 static int hvm_portio_read(const struct hvm_io_handler *handler,
86                            uint64_t addr, uint32_t size, uint64_t *data)
87 {
88     uint32_t val = ~0u;
89     int rc;
90 
91     BUG_ON(handler->type != IOREQ_TYPE_PIO);
92 
93     rc = handler->portio.action(IOREQ_READ, addr, size, &val);
94     *data = val;
95 
96     return rc;
97 }
98 
hvm_portio_write(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t data)99 static int hvm_portio_write(const struct hvm_io_handler *handler,
100                             uint64_t addr, uint32_t size, uint64_t data)
101 {
102     uint32_t val = data;
103 
104     BUG_ON(handler->type != IOREQ_TYPE_PIO);
105 
106     return handler->portio.action(IOREQ_WRITE, addr, size, &val);
107 }
108 
109 static const struct hvm_io_ops portio_ops = {
110     .accept = hvm_portio_accept,
111     .read = hvm_portio_read,
112     .write = hvm_portio_write
113 };
114 
hvm_process_io_intercept(const struct hvm_io_handler * handler,ioreq_t * p)115 int hvm_process_io_intercept(const struct hvm_io_handler *handler,
116                              ioreq_t *p)
117 {
118     const struct hvm_io_ops *ops = handler->ops;
119     int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
120     uint64_t data;
121     uint64_t addr;
122 
123     if ( p->dir == IOREQ_READ )
124     {
125         for ( i = 0; i < p->count; i++ )
126         {
127             addr = (p->type == IOREQ_TYPE_COPY) ?
128                    p->addr + step * i :
129                    p->addr;
130             data = 0;
131             rc = ops->read(handler, addr, p->size, &data);
132             if ( rc != X86EMUL_OKAY )
133                 break;
134 
135             if ( p->data_is_ptr )
136             {
137                 switch ( hvm_copy_to_guest_phys(p->data + step * i,
138                                                 &data, p->size, current) )
139                 {
140                 case HVMTRANS_okay:
141                     break;
142                 case HVMTRANS_bad_gfn_to_mfn:
143                     /* Drop the write as real hardware would. */
144                     continue;
145                 case HVMTRANS_bad_linear_to_gfn:
146                 case HVMTRANS_gfn_paged_out:
147                 case HVMTRANS_gfn_shared:
148                     ASSERT_UNREACHABLE();
149                     /* fall through */
150                 default:
151                     domain_crash(current->domain);
152                     return X86EMUL_UNHANDLEABLE;
153                 }
154             }
155             else
156                 p->data = data;
157         }
158     }
159     else /* p->dir == IOREQ_WRITE */
160     {
161         for ( i = 0; i < p->count; i++ )
162         {
163             if ( p->data_is_ptr )
164             {
165                 data = 0;
166                 switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
167                                                   p->size) )
168                 {
169                 case HVMTRANS_okay:
170                     break;
171                 case HVMTRANS_bad_gfn_to_mfn:
172                     data = ~0;
173                     break;
174                 case HVMTRANS_bad_linear_to_gfn:
175                 case HVMTRANS_gfn_paged_out:
176                 case HVMTRANS_gfn_shared:
177                     ASSERT_UNREACHABLE();
178                     /* fall through */
179                 default:
180                     domain_crash(current->domain);
181                     return X86EMUL_UNHANDLEABLE;
182                 }
183             }
184             else
185                 data = p->data;
186 
187             addr = (p->type == IOREQ_TYPE_COPY) ?
188                    p->addr + step * i :
189                    p->addr;
190             rc = ops->write(handler, addr, p->size, data);
191             if ( rc != X86EMUL_OKAY )
192                 break;
193         }
194     }
195 
196     if ( i )
197     {
198         p->count = i;
199         rc = X86EMUL_OKAY;
200     }
201     else if ( rc == X86EMUL_UNHANDLEABLE )
202     {
203         /*
204          * Don't forward entire batches to the device model: This would
205          * prevent the internal handlers to see subsequent iterations of
206          * the request.
207          */
208         p->count = 1;
209     }
210 
211     return rc;
212 }
213 
hvm_find_io_handler(const ioreq_t * p)214 static const struct hvm_io_handler *hvm_find_io_handler(const ioreq_t *p)
215 {
216     struct domain *curr_d = current->domain;
217     unsigned int i;
218 
219     BUG_ON((p->type != IOREQ_TYPE_PIO) &&
220            (p->type != IOREQ_TYPE_COPY));
221 
222     for ( i = 0; i < curr_d->arch.hvm_domain.io_handler_count; i++ )
223     {
224         const struct hvm_io_handler *handler =
225             &curr_d->arch.hvm_domain.io_handler[i];
226         const struct hvm_io_ops *ops = handler->ops;
227 
228         if ( handler->type != p->type )
229             continue;
230 
231         if ( ops->accept(handler, p) )
232             return handler;
233     }
234 
235     return NULL;
236 }
237 
hvm_io_intercept(ioreq_t * p)238 int hvm_io_intercept(ioreq_t *p)
239 {
240     const struct hvm_io_handler *handler;
241     const struct hvm_io_ops *ops;
242     int rc;
243 
244     handler = hvm_find_io_handler(p);
245 
246     if ( handler == NULL )
247         return X86EMUL_UNHANDLEABLE;
248 
249     rc = hvm_process_io_intercept(handler, p);
250 
251     ops = handler->ops;
252     if ( ops->complete != NULL )
253         ops->complete(handler);
254 
255     return rc;
256 }
257 
hvm_next_io_handler(struct domain * d)258 struct hvm_io_handler *hvm_next_io_handler(struct domain *d)
259 {
260     unsigned int i = d->arch.hvm_domain.io_handler_count++;
261 
262     ASSERT(d->arch.hvm_domain.io_handler);
263 
264     if ( i == NR_IO_HANDLERS )
265     {
266         domain_crash(d);
267         return NULL;
268     }
269 
270     return &d->arch.hvm_domain.io_handler[i];
271 }
272 
register_mmio_handler(struct domain * d,const struct hvm_mmio_ops * ops)273 void register_mmio_handler(struct domain *d,
274                            const struct hvm_mmio_ops *ops)
275 {
276     struct hvm_io_handler *handler = hvm_next_io_handler(d);
277 
278     if ( handler == NULL )
279         return;
280 
281     handler->type = IOREQ_TYPE_COPY;
282     handler->ops = &mmio_ops;
283     handler->mmio.ops = ops;
284 }
285 
register_portio_handler(struct domain * d,unsigned int port,unsigned int size,portio_action_t action)286 void register_portio_handler(struct domain *d, unsigned int port,
287                              unsigned int size, portio_action_t action)
288 {
289     struct hvm_io_handler *handler = hvm_next_io_handler(d);
290 
291     if ( handler == NULL )
292         return;
293 
294     handler->type = IOREQ_TYPE_PIO;
295     handler->ops = &portio_ops;
296     handler->portio.port = port;
297     handler->portio.size = size;
298     handler->portio.action = action;
299 }
300 
relocate_portio_handler(struct domain * d,unsigned int old_port,unsigned int new_port,unsigned int size)301 void relocate_portio_handler(struct domain *d, unsigned int old_port,
302                              unsigned int new_port, unsigned int size)
303 {
304     unsigned int i;
305 
306     for ( i = 0; i < d->arch.hvm_domain.io_handler_count; i++ )
307     {
308         struct hvm_io_handler *handler =
309             &d->arch.hvm_domain.io_handler[i];
310 
311         if ( handler->type != IOREQ_TYPE_PIO )
312             continue;
313 
314         if ( (handler->portio.port == old_port) &&
315              (handler->portio.size = size) )
316         {
317             handler->portio.port = new_port;
318             break;
319         }
320     }
321 }
322 
hvm_mmio_internal(paddr_t gpa)323 bool_t hvm_mmio_internal(paddr_t gpa)
324 {
325     const struct hvm_io_handler *handler;
326     const struct hvm_io_ops *ops;
327     ioreq_t p = {
328         .type = IOREQ_TYPE_COPY,
329         .addr = gpa,
330         .count = 1,
331         .size = 1,
332     };
333 
334     handler = hvm_find_io_handler(&p);
335 
336     if ( handler == NULL )
337         return 0;
338 
339     ops = handler->ops;
340     if ( ops->complete != NULL )
341         ops->complete(handler);
342 
343     return 1;
344 }
345 
346 /*
347  * Local variables:
348  * mode: C
349  * c-file-style: "BSD"
350  * c-basic-offset: 4
351  * tab-width: 4
352  * indent-tabs-mode: nil
353  * End:
354  */
355