1 /*
2  * Copyright (c) 2017 Citrix Systems Inc.
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation;
7  * version 2.1 of the License.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <stdlib.h>
19 #include <string.h>
20 #include <errno.h>
21 
22 #include "private.h"
23 
all_restrict_cb(Xentoolcore__Active_Handle * ah,domid_t domid)24 static int all_restrict_cb(Xentoolcore__Active_Handle *ah, domid_t domid) {
25     xendevicemodel_handle *dmod = CONTAINER_OF(ah, *dmod, tc_ah);
26 
27     if (dmod->fd < 0)
28         /* just in case */
29         return 0;
30 
31     return xendevicemodel_restrict(dmod, domid);
32 }
33 
xendevicemodel_open(xentoollog_logger * logger,unsigned open_flags)34 xendevicemodel_handle *xendevicemodel_open(xentoollog_logger *logger,
35                                            unsigned open_flags)
36 {
37     xendevicemodel_handle *dmod = calloc(1, sizeof(*dmod));
38     int rc;
39 
40     if (!dmod)
41         return NULL;
42 
43     dmod->fd = -1;
44     dmod->tc_ah.restrict_callback = all_restrict_cb;
45     xentoolcore__register_active_handle(&dmod->tc_ah);
46 
47     dmod->flags = open_flags;
48     dmod->logger = logger;
49     dmod->logger_tofree = NULL;
50 
51     if (!dmod->logger) {
52         dmod->logger = dmod->logger_tofree =
53             (xentoollog_logger*)
54             xtl_createlogger_stdiostream(stderr, XTL_PROGRESS, 0);
55         if (!dmod->logger)
56             goto err;
57     }
58 
59     dmod->xcall = xencall_open(dmod->logger, 0);
60     if (!dmod->xcall)
61         goto err;
62 
63     rc = osdep_xendevicemodel_open(dmod);
64     if (rc)
65         goto err;
66 
67     return dmod;
68 
69 err:
70     xtl_logger_destroy(dmod->logger_tofree);
71     xentoolcore__deregister_active_handle(&dmod->tc_ah);
72     xencall_close(dmod->xcall);
73     free(dmod);
74     return NULL;
75 }
76 
xendevicemodel_close(xendevicemodel_handle * dmod)77 int xendevicemodel_close(xendevicemodel_handle *dmod)
78 {
79     int rc;
80 
81     if (!dmod)
82         return 0;
83 
84     rc = osdep_xendevicemodel_close(dmod);
85 
86     xentoolcore__deregister_active_handle(&dmod->tc_ah);
87     xencall_close(dmod->xcall);
88     xtl_logger_destroy(dmod->logger_tofree);
89     free(dmod);
90     return rc;
91 }
92 
xendevicemodel_xcall(xendevicemodel_handle * dmod,domid_t domid,unsigned int nr_bufs,struct xendevicemodel_buf bufs[])93 int xendevicemodel_xcall(xendevicemodel_handle *dmod,
94                          domid_t domid, unsigned int nr_bufs,
95                          struct xendevicemodel_buf bufs[])
96 {
97     int ret = -1;
98     void **xcall_bufs;
99     xen_dm_op_buf_t *op_bufs = NULL;
100     unsigned int i;
101 
102     xcall_bufs = calloc(nr_bufs, sizeof(*xcall_bufs));
103     if (xcall_bufs == NULL)
104         goto out;
105 
106     op_bufs = xencall_alloc_buffer(dmod->xcall, sizeof(xen_dm_op_buf_t) *
107                                    nr_bufs);
108     if (op_bufs == NULL)
109         goto out;
110 
111     for (i = 0; i < nr_bufs; i++)  {
112         xcall_bufs[i] = xencall_alloc_buffer(dmod->xcall, bufs[i].size);
113         if ( xcall_bufs[i] == NULL )
114             goto out;
115 
116         memcpy(xcall_bufs[i], bufs[i].ptr, bufs[i].size);
117         set_xen_guest_handle_raw(op_bufs[i].h, xcall_bufs[i]);
118 
119         op_bufs[i].size = bufs[i].size;
120     }
121 
122     ret = xencall3(dmod->xcall, __HYPERVISOR_dm_op,
123                    domid, nr_bufs, (unsigned long)op_bufs);
124     if (ret < 0)
125         goto out;
126 
127     for (i = 0; i < nr_bufs; i++)
128         memcpy(bufs[i].ptr, xcall_bufs[i], bufs[i].size);
129 
130 out:
131     if (xcall_bufs)
132         for (i = 0; i < nr_bufs; i++)
133             xencall_free_buffer(dmod->xcall, xcall_bufs[i]);
134 
135     xencall_free_buffer(dmod->xcall, op_bufs);
136     free(xcall_bufs);
137 
138     return ret;
139 }
140 
xendevicemodel_op(xendevicemodel_handle * dmod,domid_t domid,unsigned int nr_bufs,...)141 static int xendevicemodel_op(
142     xendevicemodel_handle *dmod, domid_t domid,  unsigned int nr_bufs, ...)
143 {
144     struct xendevicemodel_buf *bufs;
145     va_list args;
146     unsigned int i;
147     int ret;
148 
149     bufs = calloc(nr_bufs, sizeof(*bufs));
150     if (!bufs)
151         return -1;
152 
153     va_start(args, nr_bufs);
154     for (i = 0; i < nr_bufs; i++) {
155         bufs[i].ptr = va_arg(args, void *);
156         bufs[i].size = va_arg(args, size_t);
157     }
158     va_end(args);
159 
160     ret = osdep_xendevicemodel_op(dmod, domid, nr_bufs, bufs);
161 
162     free(bufs);
163 
164     return ret;
165 }
166 
xendevicemodel_create_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,int handle_bufioreq,ioservid_t * id)167 int xendevicemodel_create_ioreq_server(
168     xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
169     ioservid_t *id)
170 {
171     struct xen_dm_op op;
172     struct xen_dm_op_create_ioreq_server *data;
173     int rc;
174 
175     memset(&op, 0, sizeof(op));
176 
177     op.op = XEN_DMOP_create_ioreq_server;
178     data = &op.u.create_ioreq_server;
179 
180     data->handle_bufioreq = handle_bufioreq;
181 
182     rc = xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
183     if (rc)
184         return rc;
185 
186     *id = data->id;
187 
188     return 0;
189 }
190 
xendevicemodel_get_ioreq_server_info(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,xen_pfn_t * ioreq_gfn,xen_pfn_t * bufioreq_gfn,evtchn_port_t * bufioreq_port)191 int xendevicemodel_get_ioreq_server_info(
192     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
193     xen_pfn_t *ioreq_gfn, xen_pfn_t *bufioreq_gfn,
194     evtchn_port_t *bufioreq_port)
195 {
196     struct xen_dm_op op;
197     struct xen_dm_op_get_ioreq_server_info *data;
198     int rc;
199 
200     memset(&op, 0, sizeof(op));
201 
202     op.op = XEN_DMOP_get_ioreq_server_info;
203     data = &op.u.get_ioreq_server_info;
204 
205     data->id = id;
206 
207     rc = xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
208     if (rc)
209         return rc;
210 
211     if (ioreq_gfn)
212         *ioreq_gfn = data->ioreq_gfn;
213 
214     if (bufioreq_gfn)
215         *bufioreq_gfn = data->bufioreq_gfn;
216 
217     if (bufioreq_port)
218         *bufioreq_port = data->bufioreq_port;
219 
220     return 0;
221 }
222 
xendevicemodel_map_io_range_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int is_mmio,uint64_t start,uint64_t end)223 int xendevicemodel_map_io_range_to_ioreq_server(
224     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
225     uint64_t start, uint64_t end)
226 {
227     struct xen_dm_op op;
228     struct xen_dm_op_ioreq_server_range *data;
229 
230     memset(&op, 0, sizeof(op));
231 
232     op.op = XEN_DMOP_map_io_range_to_ioreq_server;
233     data = &op.u.map_io_range_to_ioreq_server;
234 
235     data->id = id;
236     data->type = is_mmio ? XEN_DMOP_IO_RANGE_MEMORY : XEN_DMOP_IO_RANGE_PORT;
237     data->start = start;
238     data->end = end;
239 
240     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
241 }
242 
xendevicemodel_unmap_io_range_from_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int is_mmio,uint64_t start,uint64_t end)243 int xendevicemodel_unmap_io_range_from_ioreq_server(
244     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
245     uint64_t start, uint64_t end)
246 {
247     struct xen_dm_op op;
248     struct xen_dm_op_ioreq_server_range *data;
249 
250     memset(&op, 0, sizeof(op));
251 
252     op.op = XEN_DMOP_unmap_io_range_from_ioreq_server;
253     data = &op.u.unmap_io_range_from_ioreq_server;
254 
255     data->id = id;
256     data->type = is_mmio ? XEN_DMOP_IO_RANGE_MEMORY : XEN_DMOP_IO_RANGE_PORT;
257     data->start = start;
258     data->end = end;
259 
260     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
261 }
262 
xendevicemodel_map_mem_type_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t type,uint32_t flags)263 int xendevicemodel_map_mem_type_to_ioreq_server(
264     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, uint16_t type,
265     uint32_t flags)
266 {
267     struct xen_dm_op op;
268     struct xen_dm_op_map_mem_type_to_ioreq_server *data;
269 
270     if (type != HVMMEM_ioreq_server ||
271         flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE) {
272         errno = EINVAL;
273         return -1;
274     }
275 
276     memset(&op, 0, sizeof(op));
277 
278     op.op = XEN_DMOP_map_mem_type_to_ioreq_server;
279     data = &op.u.map_mem_type_to_ioreq_server;
280 
281     data->id = id;
282     data->type = type;
283     data->flags = flags;
284 
285     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
286 }
287 
xendevicemodel_map_pcidev_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t segment,uint8_t bus,uint8_t device,uint8_t function)288 int xendevicemodel_map_pcidev_to_ioreq_server(
289     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
290     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
291 {
292     struct xen_dm_op op;
293     struct xen_dm_op_ioreq_server_range *data;
294 
295     if (device > 0x1f || function > 0x7) {
296         errno = EINVAL;
297         return -1;
298     }
299 
300     memset(&op, 0, sizeof(op));
301 
302     op.op = XEN_DMOP_map_io_range_to_ioreq_server;
303     data = &op.u.map_io_range_to_ioreq_server;
304 
305     data->id = id;
306     data->type = XEN_DMOP_IO_RANGE_PCI;
307 
308     /*
309      * The underlying hypercall will deal with ranges of PCI SBDF
310      * but, for simplicity, the API only uses singletons.
311      */
312     data->start = data->end = XEN_DMOP_PCI_SBDF((uint64_t)segment,
313                                                 (uint64_t)bus,
314                                                 (uint64_t)device,
315                                                 (uint64_t)function);
316 
317     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
318 }
319 
xendevicemodel_unmap_pcidev_from_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t segment,uint8_t bus,uint8_t device,uint8_t function)320 int xendevicemodel_unmap_pcidev_from_ioreq_server(
321     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
322     uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
323 {
324     struct xen_dm_op op;
325     struct xen_dm_op_ioreq_server_range *data;
326 
327     if (device > 0x1f || function > 0x7) {
328         errno = EINVAL;
329         return -1;
330     }
331 
332     memset(&op, 0, sizeof(op));
333 
334     op.op = XEN_DMOP_unmap_io_range_from_ioreq_server;
335     data = &op.u.unmap_io_range_from_ioreq_server;
336 
337     data->id = id;
338     data->type = XEN_DMOP_IO_RANGE_PCI;
339 
340     /*
341      * The underlying hypercall will deal with ranges of PCI SBDF
342      * but, for simplicity, the API only uses singletons.
343      */
344     data->start = data->end = XEN_DMOP_PCI_SBDF((uint64_t)segment,
345                                                 (uint64_t)bus,
346                                                 (uint64_t)device,
347                                                 (uint64_t)function);
348 
349     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
350 }
351 
xendevicemodel_destroy_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id)352 int xendevicemodel_destroy_ioreq_server(
353     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
354 {
355     struct xen_dm_op op;
356     struct xen_dm_op_destroy_ioreq_server *data;
357 
358     memset(&op, 0, sizeof(op));
359 
360     op.op = XEN_DMOP_destroy_ioreq_server;
361     data = &op.u.destroy_ioreq_server;
362 
363     data->id = id;
364 
365     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
366 }
367 
xendevicemodel_set_ioreq_server_state(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int enabled)368 int xendevicemodel_set_ioreq_server_state(
369     xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
370 {
371     struct xen_dm_op op;
372     struct xen_dm_op_set_ioreq_server_state *data;
373 
374     memset(&op, 0, sizeof(op));
375 
376     op.op = XEN_DMOP_set_ioreq_server_state;
377     data = &op.u.set_ioreq_server_state;
378 
379     data->id = id;
380     data->enabled = !!enabled;
381 
382     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
383 }
384 
xendevicemodel_set_pci_intx_level(xendevicemodel_handle * dmod,domid_t domid,uint16_t segment,uint8_t bus,uint8_t device,uint8_t intx,unsigned int level)385 int xendevicemodel_set_pci_intx_level(
386     xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
387     uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
388 {
389     struct xen_dm_op op;
390     struct xen_dm_op_set_pci_intx_level *data;
391 
392     memset(&op, 0, sizeof(op));
393 
394     op.op = XEN_DMOP_set_pci_intx_level;
395     data = &op.u.set_pci_intx_level;
396 
397     data->domain = segment;
398     data->bus = bus;
399     data->device = device;
400     data->intx = intx;
401     data->level = level;
402 
403     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
404 }
405 
xendevicemodel_set_isa_irq_level(xendevicemodel_handle * dmod,domid_t domid,uint8_t irq,unsigned int level)406 int xendevicemodel_set_isa_irq_level(
407     xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
408     unsigned int level)
409 {
410     struct xen_dm_op op;
411     struct xen_dm_op_set_isa_irq_level *data;
412 
413     memset(&op, 0, sizeof(op));
414 
415     op.op = XEN_DMOP_set_isa_irq_level;
416     data = &op.u.set_isa_irq_level;
417 
418     data->isa_irq = irq;
419     data->level = level;
420 
421     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
422 }
423 
xendevicemodel_set_pci_link_route(xendevicemodel_handle * dmod,domid_t domid,uint8_t link,uint8_t irq)424 int xendevicemodel_set_pci_link_route(
425     xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
426 {
427     struct xen_dm_op op;
428     struct xen_dm_op_set_pci_link_route *data;
429 
430     memset(&op, 0, sizeof(op));
431 
432     op.op = XEN_DMOP_set_pci_link_route;
433     data = &op.u.set_pci_link_route;
434 
435     data->link = link;
436     data->isa_irq = irq;
437 
438     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
439 }
440 
xendevicemodel_inject_msi(xendevicemodel_handle * dmod,domid_t domid,uint64_t msi_addr,uint32_t msi_data)441 int xendevicemodel_inject_msi(
442     xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
443     uint32_t msi_data)
444 {
445     struct xen_dm_op op;
446     struct xen_dm_op_inject_msi *data;
447 
448     memset(&op, 0, sizeof(op));
449 
450     op.op = XEN_DMOP_inject_msi;
451     data = &op.u.inject_msi;
452 
453     data->addr = msi_addr;
454     data->data = msi_data;
455 
456     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
457 }
458 
xendevicemodel_track_dirty_vram(xendevicemodel_handle * dmod,domid_t domid,uint64_t first_pfn,uint32_t nr,unsigned long * dirty_bitmap)459 int xendevicemodel_track_dirty_vram(
460     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
461     uint32_t nr, unsigned long *dirty_bitmap)
462 {
463     struct xen_dm_op op;
464     struct xen_dm_op_track_dirty_vram *data;
465 
466     memset(&op, 0, sizeof(op));
467 
468     op.op = XEN_DMOP_track_dirty_vram;
469     data = &op.u.track_dirty_vram;
470 
471     data->first_pfn = first_pfn;
472     data->nr = nr;
473 
474     return xendevicemodel_op(dmod, domid, 2, &op, sizeof(op),
475                              dirty_bitmap, (size_t)(nr + 7) / 8);
476 }
477 
xendevicemodel_modified_memory_bulk(xendevicemodel_handle * dmod,domid_t domid,struct xen_dm_op_modified_memory_extent * extents,uint32_t nr)478 int xendevicemodel_modified_memory_bulk(
479     xendevicemodel_handle *dmod, domid_t domid,
480     struct xen_dm_op_modified_memory_extent *extents, uint32_t nr)
481 {
482     struct xen_dm_op op;
483     struct xen_dm_op_modified_memory *header;
484     size_t extents_size = nr * sizeof(struct xen_dm_op_modified_memory_extent);
485 
486     memset(&op, 0, sizeof(op));
487 
488     op.op = XEN_DMOP_modified_memory;
489     header = &op.u.modified_memory;
490 
491     header->nr_extents = nr;
492     header->opaque = 0;
493 
494     return xendevicemodel_op(dmod, domid, 2, &op, sizeof(op),
495                              extents, extents_size);
496 }
497 
xendevicemodel_modified_memory(xendevicemodel_handle * dmod,domid_t domid,uint64_t first_pfn,uint32_t nr)498 int xendevicemodel_modified_memory(
499     xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
500     uint32_t nr)
501 {
502     struct xen_dm_op_modified_memory_extent extent = {
503         .first_pfn = first_pfn,
504         .nr = nr,
505     };
506 
507     return xendevicemodel_modified_memory_bulk(dmod, domid, &extent, 1);
508 }
509 
xendevicemodel_set_mem_type(xendevicemodel_handle * dmod,domid_t domid,hvmmem_type_t mem_type,uint64_t first_pfn,uint32_t nr)510 int xendevicemodel_set_mem_type(
511     xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
512     uint64_t first_pfn, uint32_t nr)
513 {
514     struct xen_dm_op op;
515     struct xen_dm_op_set_mem_type *data;
516 
517     memset(&op, 0, sizeof(op));
518 
519     op.op = XEN_DMOP_set_mem_type;
520     data = &op.u.set_mem_type;
521 
522     data->mem_type = mem_type;
523     data->first_pfn = first_pfn;
524     data->nr = nr;
525 
526     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
527 }
528 
xendevicemodel_inject_event(xendevicemodel_handle * dmod,domid_t domid,int vcpu,uint8_t vector,uint8_t type,uint32_t error_code,uint8_t insn_len,uint64_t cr2)529 int xendevicemodel_inject_event(
530     xendevicemodel_handle *dmod, domid_t domid, int vcpu, uint8_t vector,
531     uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t cr2)
532 {
533     struct xen_dm_op op;
534     struct xen_dm_op_inject_event *data;
535 
536     memset(&op, 0, sizeof(op));
537 
538     op.op = XEN_DMOP_inject_event;
539     data = &op.u.inject_event;
540 
541     data->vcpuid = vcpu;
542     data->vector = vector;
543     data->type = type;
544     data->error_code = error_code;
545     data->insn_len = insn_len;
546     data->cr2 = cr2;
547 
548     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
549 }
550 
xendevicemodel_shutdown(xendevicemodel_handle * dmod,domid_t domid,unsigned int reason)551 int xendevicemodel_shutdown(
552     xendevicemodel_handle *dmod, domid_t domid, unsigned int reason)
553 {
554     struct xen_dm_op op;
555     struct xen_dm_op_remote_shutdown *data;
556 
557     memset(&op, 0, sizeof(op));
558 
559     op.op = XEN_DMOP_remote_shutdown;
560     data = &op.u.remote_shutdown;
561 
562     data->reason = reason;
563 
564     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
565 }
566 
xendevicemodel_restrict(xendevicemodel_handle * dmod,domid_t domid)567 int xendevicemodel_restrict(xendevicemodel_handle *dmod, domid_t domid)
568 {
569     return osdep_xendevicemodel_restrict(dmod, domid);
570 }
571 
572 /*
573  * Local variables:
574  * mode: C
575  * c-file-style: "BSD"
576  * c-basic-offset: 4
577  * tab-width: 4
578  * indent-tabs-mode: nil
579  * End:
580  */
581