1 /******************************************************************************
2 * xc_domain.c
3 *
4 * API for manipulating and obtaining information on domains.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation;
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
18 *
19 * Copyright (c) 2003, K A Fraser.
20 */
21
22 #include "xc_private.h"
23 #include <xen/memory.h>
24 #include <xen/hvm/hvm_op.h>
25
xc_domain_create(xc_interface * xch,uint32_t * pdomid,struct xen_domctl_createdomain * config)26 int xc_domain_create(xc_interface *xch, uint32_t *pdomid,
27 struct xen_domctl_createdomain *config)
28 {
29 int err;
30 struct xen_domctl domctl = {};
31
32 domctl.cmd = XEN_DOMCTL_createdomain;
33 domctl.domain = *pdomid;
34 domctl.u.createdomain = *config;
35
36 if ( (err = do_domctl(xch, &domctl)) != 0 )
37 return err;
38
39 *pdomid = (uint16_t)domctl.domain;
40 *config = domctl.u.createdomain;
41
42 return 0;
43 }
44
xc_domain_cacheflush(xc_interface * xch,uint32_t domid,xen_pfn_t start_pfn,xen_pfn_t nr_pfns)45 int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
46 xen_pfn_t start_pfn, xen_pfn_t nr_pfns)
47 {
48 #if defined (__i386__) || defined (__x86_64__)
49 /*
50 * The x86 architecture provides cache coherency guarantees which prevent
51 * the need for this hypercall. Avoid the overhead of making a hypercall
52 * just for Xen to return -ENOSYS. It is safe to ignore this call on x86
53 * so we just return 0.
54 */
55 return 0;
56 #else
57 struct xen_domctl domctl = {};
58 domctl.cmd = XEN_DOMCTL_cacheflush;
59 domctl.domain = domid;
60 domctl.u.cacheflush.start_pfn = start_pfn;
61 domctl.u.cacheflush.nr_pfns = nr_pfns;
62 return do_domctl(xch, &domctl);
63 #endif
64 }
65
xc_domain_pause(xc_interface * xch,uint32_t domid)66 int xc_domain_pause(xc_interface *xch,
67 uint32_t domid)
68 {
69 struct xen_domctl domctl = {};
70 domctl.cmd = XEN_DOMCTL_pausedomain;
71 domctl.domain = domid;
72 return do_domctl(xch, &domctl);
73 }
74
75
xc_domain_unpause(xc_interface * xch,uint32_t domid)76 int xc_domain_unpause(xc_interface *xch,
77 uint32_t domid)
78 {
79 struct xen_domctl domctl = {};
80 domctl.cmd = XEN_DOMCTL_unpausedomain;
81 domctl.domain = domid;
82 return do_domctl(xch, &domctl);
83 }
84
85
xc_domain_destroy(xc_interface * xch,uint32_t domid)86 int xc_domain_destroy(xc_interface *xch,
87 uint32_t domid)
88 {
89 struct xen_domctl domctl = {};
90 domctl.cmd = XEN_DOMCTL_destroydomain;
91 domctl.domain = domid;
92 return do_domctl(xch, &domctl);
93 }
94
xc_domain_shutdown(xc_interface * xch,uint32_t domid,int reason)95 int xc_domain_shutdown(xc_interface *xch,
96 uint32_t domid,
97 int reason)
98 {
99 int ret = -1;
100 DECLARE_HYPERCALL_BUFFER(sched_remote_shutdown_t, arg);
101
102 arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
103 if ( arg == NULL )
104 {
105 PERROR("Could not allocate memory for xc_domain_shutdown hypercall");
106 goto out1;
107 }
108
109 arg->domain_id = domid;
110 arg->reason = reason;
111 ret = xencall2(xch->xcall, __HYPERVISOR_sched_op,
112 SCHEDOP_remote_shutdown,
113 HYPERCALL_BUFFER_AS_ARG(arg));
114
115 xc_hypercall_buffer_free(xch, arg);
116
117 out1:
118 return ret;
119 }
120
121
xc_domain_node_setaffinity(xc_interface * xch,uint32_t domid,xc_nodemap_t nodemap)122 int xc_domain_node_setaffinity(xc_interface *xch,
123 uint32_t domid,
124 xc_nodemap_t nodemap)
125 {
126 struct xen_domctl domctl = {};
127 DECLARE_HYPERCALL_BUFFER(uint8_t, local);
128 int ret = -1;
129 int nodesize;
130
131 nodesize = xc_get_nodemap_size(xch);
132 if (nodesize <= 0)
133 {
134 PERROR("Could not get number of nodes");
135 goto out;
136 }
137
138 local = xc_hypercall_buffer_alloc(xch, local, nodesize);
139 if ( local == NULL )
140 {
141 PERROR("Could not allocate memory for setnodeaffinity domctl hypercall");
142 goto out;
143 }
144
145 domctl.cmd = XEN_DOMCTL_setnodeaffinity;
146 domctl.domain = domid;
147
148 memcpy(local, nodemap, nodesize);
149 set_xen_guest_handle(domctl.u.nodeaffinity.nodemap.bitmap, local);
150 domctl.u.nodeaffinity.nodemap.nr_bits = nodesize * 8;
151
152 ret = do_domctl(xch, &domctl);
153
154 xc_hypercall_buffer_free(xch, local);
155
156 out:
157 return ret;
158 }
159
xc_domain_node_getaffinity(xc_interface * xch,uint32_t domid,xc_nodemap_t nodemap)160 int xc_domain_node_getaffinity(xc_interface *xch,
161 uint32_t domid,
162 xc_nodemap_t nodemap)
163 {
164 struct xen_domctl domctl = {};
165 DECLARE_HYPERCALL_BUFFER(uint8_t, local);
166 int ret = -1;
167 int nodesize;
168
169 nodesize = xc_get_nodemap_size(xch);
170 if (nodesize <= 0)
171 {
172 PERROR("Could not get number of nodes");
173 goto out;
174 }
175
176 local = xc_hypercall_buffer_alloc(xch, local, nodesize);
177 if ( local == NULL )
178 {
179 PERROR("Could not allocate memory for getnodeaffinity domctl hypercall");
180 goto out;
181 }
182
183 domctl.cmd = XEN_DOMCTL_getnodeaffinity;
184 domctl.domain = domid;
185
186 set_xen_guest_handle(domctl.u.nodeaffinity.nodemap.bitmap, local);
187 domctl.u.nodeaffinity.nodemap.nr_bits = nodesize * 8;
188
189 ret = do_domctl(xch, &domctl);
190
191 memcpy(nodemap, local, nodesize);
192
193 xc_hypercall_buffer_free(xch, local);
194
195 out:
196 return ret;
197 }
198
xc_vcpu_setaffinity(xc_interface * xch,uint32_t domid,int vcpu,xc_cpumap_t cpumap_hard_inout,xc_cpumap_t cpumap_soft_inout,uint32_t flags)199 int xc_vcpu_setaffinity(xc_interface *xch,
200 uint32_t domid,
201 int vcpu,
202 xc_cpumap_t cpumap_hard_inout,
203 xc_cpumap_t cpumap_soft_inout,
204 uint32_t flags)
205 {
206 struct xen_domctl domctl = {};
207 DECLARE_HYPERCALL_BOUNCE(cpumap_hard_inout, 0,
208 XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
209 DECLARE_HYPERCALL_BOUNCE(cpumap_soft_inout, 0,
210 XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
211 int ret = -1;
212 int cpusize;
213
214 cpusize = xc_get_cpumap_size(xch);
215 if (cpusize <= 0)
216 {
217 PERROR("Could not get number of cpus");
218 return -1;
219 }
220
221 HYPERCALL_BOUNCE_SET_SIZE(cpumap_hard_inout, cpusize);
222 HYPERCALL_BOUNCE_SET_SIZE(cpumap_soft_inout, cpusize);
223
224 if ( xc_hypercall_bounce_pre(xch, cpumap_hard_inout) ||
225 xc_hypercall_bounce_pre(xch, cpumap_soft_inout) )
226 {
227 PERROR("Could not allocate hcall buffers for DOMCTL_setvcpuaffinity");
228 goto out;
229 }
230
231 domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
232 domctl.domain = domid;
233 domctl.u.vcpuaffinity.vcpu = vcpu;
234 domctl.u.vcpuaffinity.flags = flags;
235
236 set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_hard.bitmap,
237 cpumap_hard_inout);
238 domctl.u.vcpuaffinity.cpumap_hard.nr_bits = cpusize * 8;
239 set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_soft.bitmap,
240 cpumap_soft_inout);
241 domctl.u.vcpuaffinity.cpumap_soft.nr_bits = cpusize * 8;
242
243 ret = do_domctl(xch, &domctl);
244
245 out:
246 xc_hypercall_bounce_post(xch, cpumap_hard_inout);
247 xc_hypercall_bounce_post(xch, cpumap_soft_inout);
248
249 return ret;
250 }
251
252
xc_vcpu_getaffinity(xc_interface * xch,uint32_t domid,int vcpu,xc_cpumap_t cpumap_hard,xc_cpumap_t cpumap_soft,uint32_t flags)253 int xc_vcpu_getaffinity(xc_interface *xch,
254 uint32_t domid,
255 int vcpu,
256 xc_cpumap_t cpumap_hard,
257 xc_cpumap_t cpumap_soft,
258 uint32_t flags)
259 {
260 struct xen_domctl domctl = {};
261 DECLARE_HYPERCALL_BOUNCE(cpumap_hard, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
262 DECLARE_HYPERCALL_BOUNCE(cpumap_soft, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
263 int ret = -1;
264 int cpusize;
265
266 cpusize = xc_get_cpumap_size(xch);
267 if (cpusize <= 0)
268 {
269 PERROR("Could not get number of cpus");
270 return -1;
271 }
272
273 HYPERCALL_BOUNCE_SET_SIZE(cpumap_hard, cpusize);
274 HYPERCALL_BOUNCE_SET_SIZE(cpumap_soft, cpusize);
275
276 if ( xc_hypercall_bounce_pre(xch, cpumap_hard) ||
277 xc_hypercall_bounce_pre(xch, cpumap_soft) )
278 {
279 PERROR("Could not allocate hcall buffers for DOMCTL_getvcpuaffinity");
280 goto out;
281 }
282
283 domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
284 domctl.domain = domid;
285 domctl.u.vcpuaffinity.vcpu = vcpu;
286 domctl.u.vcpuaffinity.flags = flags;
287
288 set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_hard.bitmap,
289 cpumap_hard);
290 domctl.u.vcpuaffinity.cpumap_hard.nr_bits = cpusize * 8;
291 set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_soft.bitmap,
292 cpumap_soft);
293 domctl.u.vcpuaffinity.cpumap_soft.nr_bits = cpusize * 8;
294
295 ret = do_domctl(xch, &domctl);
296
297 out:
298 xc_hypercall_bounce_post(xch, cpumap_hard);
299 xc_hypercall_bounce_post(xch, cpumap_soft);
300
301 return ret;
302 }
303
xc_domain_get_guest_width(xc_interface * xch,uint32_t domid,unsigned int * guest_width)304 int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
305 unsigned int *guest_width)
306 {
307 struct xen_domctl domctl = {};
308
309 memset(&domctl, 0, sizeof(domctl));
310 domctl.domain = domid;
311 domctl.cmd = XEN_DOMCTL_get_address_size;
312
313 if ( do_domctl(xch, &domctl) != 0 )
314 return 1;
315
316 /* We want the result in bytes */
317 *guest_width = domctl.u.address_size.size / 8;
318 return 0;
319 }
320
xc_dom_vuart_init(xc_interface * xch,uint32_t type,uint32_t domid,uint32_t console_domid,xen_pfn_t gfn,evtchn_port_t * evtchn)321 int xc_dom_vuart_init(xc_interface *xch,
322 uint32_t type,
323 uint32_t domid,
324 uint32_t console_domid,
325 xen_pfn_t gfn,
326 evtchn_port_t *evtchn)
327 {
328 struct xen_domctl domctl = {};
329 int rc = 0;
330
331 memset(&domctl, 0, sizeof(domctl));
332
333 domctl.cmd = XEN_DOMCTL_vuart_op;
334 domctl.domain = domid;
335 domctl.u.vuart_op.cmd = XEN_DOMCTL_VUART_OP_INIT;
336 domctl.u.vuart_op.type = type;
337 domctl.u.vuart_op.console_domid = console_domid;
338 domctl.u.vuart_op.gfn = gfn;
339
340 if ( (rc = do_domctl(xch, &domctl)) < 0 )
341 return rc;
342
343 *evtchn = domctl.u.vuart_op.evtchn;
344
345 return rc;
346 }
347
xc_domain_getinfo_single(xc_interface * xch,uint32_t domid,xc_domaininfo_t * info)348 int xc_domain_getinfo_single(xc_interface *xch,
349 uint32_t domid,
350 xc_domaininfo_t *info)
351 {
352 struct xen_domctl domctl = {
353 .cmd = XEN_DOMCTL_getdomaininfo,
354 .domain = domid,
355 };
356
357 if ( do_domctl(xch, &domctl) < 0 )
358 return -1;
359
360 if ( info )
361 *info = domctl.u.getdomaininfo;
362
363 return 0;
364 }
365
xc_domain_getinfolist(xc_interface * xch,uint32_t first_domain,unsigned int max_domains,xc_domaininfo_t * info)366 int xc_domain_getinfolist(xc_interface *xch,
367 uint32_t first_domain,
368 unsigned int max_domains,
369 xc_domaininfo_t *info)
370 {
371 int ret = 0;
372 struct xen_sysctl sysctl = {};
373 DECLARE_HYPERCALL_BOUNCE(info, max_domains*sizeof(*info), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
374
375 if ( xc_hypercall_bounce_pre(xch, info) )
376 return -1;
377
378 sysctl.cmd = XEN_SYSCTL_getdomaininfolist;
379 sysctl.u.getdomaininfolist.first_domain = first_domain;
380 sysctl.u.getdomaininfolist.max_domains = max_domains;
381 set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info);
382
383 if ( xc_sysctl(xch, &sysctl) < 0 )
384 ret = -1;
385 else
386 ret = sysctl.u.getdomaininfolist.num_domains;
387
388 xc_hypercall_bounce_post(xch, info);
389
390 return ret;
391 }
392
393 /* set broken page p2m */
xc_set_broken_page_p2m(xc_interface * xch,uint32_t domid,unsigned long pfn)394 int xc_set_broken_page_p2m(xc_interface *xch,
395 uint32_t domid,
396 unsigned long pfn)
397 {
398 int ret;
399 struct xen_domctl domctl = {};
400
401 domctl.cmd = XEN_DOMCTL_set_broken_page_p2m;
402 domctl.domain = domid;
403 domctl.u.set_broken_page_p2m.pfn = pfn;
404 ret = do_domctl(xch, &domctl);
405
406 return ret ? -1 : 0;
407 }
408
409 /* get info from hvm guest for save */
xc_domain_hvm_getcontext(xc_interface * xch,uint32_t domid,uint8_t * ctxt_buf,uint32_t size)410 int xc_domain_hvm_getcontext(xc_interface *xch,
411 uint32_t domid,
412 uint8_t *ctxt_buf,
413 uint32_t size)
414 {
415 int ret;
416 struct xen_domctl domctl = {};
417 DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
418
419 if ( xc_hypercall_bounce_pre(xch, ctxt_buf) )
420 return -1;
421
422 domctl.cmd = XEN_DOMCTL_gethvmcontext;
423 domctl.domain = domid;
424 domctl.u.hvmcontext.size = size;
425 set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
426
427 ret = do_domctl(xch, &domctl);
428
429 xc_hypercall_bounce_post(xch, ctxt_buf);
430
431 return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
432 }
433
434 /* Get just one element of the HVM guest context.
435 * size must be >= HVM_SAVE_LENGTH(type) */
xc_domain_hvm_getcontext_partial(xc_interface * xch,uint32_t domid,uint16_t typecode,uint16_t instance,void * ctxt_buf,uint32_t size)436 int xc_domain_hvm_getcontext_partial(xc_interface *xch,
437 uint32_t domid,
438 uint16_t typecode,
439 uint16_t instance,
440 void *ctxt_buf,
441 uint32_t size)
442 {
443 int ret;
444 struct xen_domctl domctl = {};
445 DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
446
447 if ( !ctxt_buf || xc_hypercall_bounce_pre(xch, ctxt_buf) )
448 return -1;
449
450 domctl.cmd = XEN_DOMCTL_gethvmcontext_partial;
451 domctl.domain = domid;
452 domctl.u.hvmcontext_partial.type = typecode;
453 domctl.u.hvmcontext_partial.instance = instance;
454 domctl.u.hvmcontext_partial.bufsz = size;
455 set_xen_guest_handle(domctl.u.hvmcontext_partial.buffer, ctxt_buf);
456
457 ret = do_domctl(xch, &domctl);
458
459 xc_hypercall_bounce_post(xch, ctxt_buf);
460
461 return ret ? -1 : 0;
462 }
463
464 /* set info to hvm guest for restore */
xc_domain_hvm_setcontext(xc_interface * xch,uint32_t domid,uint8_t * ctxt_buf,uint32_t size)465 int xc_domain_hvm_setcontext(xc_interface *xch,
466 uint32_t domid,
467 uint8_t *ctxt_buf,
468 uint32_t size)
469 {
470 int ret;
471 struct xen_domctl domctl = {};
472 DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
473
474 if ( xc_hypercall_bounce_pre(xch, ctxt_buf) )
475 return -1;
476
477 domctl.cmd = XEN_DOMCTL_sethvmcontext;
478 domctl.domain = domid;
479 domctl.u.hvmcontext.size = size;
480 set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
481
482 ret = do_domctl(xch, &domctl);
483
484 xc_hypercall_bounce_post(xch, ctxt_buf);
485
486 return ret;
487 }
488
xc_vcpu_getcontext(xc_interface * xch,uint32_t domid,uint32_t vcpu,vcpu_guest_context_any_t * ctxt)489 int xc_vcpu_getcontext(xc_interface *xch,
490 uint32_t domid,
491 uint32_t vcpu,
492 vcpu_guest_context_any_t *ctxt)
493 {
494 int rc;
495 struct xen_domctl domctl = {};
496 DECLARE_HYPERCALL_BOUNCE(ctxt, sizeof(vcpu_guest_context_any_t), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
497
498 if ( xc_hypercall_bounce_pre(xch, ctxt) )
499 return -1;
500
501 domctl.cmd = XEN_DOMCTL_getvcpucontext;
502 domctl.domain = domid;
503 domctl.u.vcpucontext.vcpu = (uint16_t)vcpu;
504 set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
505
506 rc = do_domctl(xch, &domctl);
507
508 xc_hypercall_bounce_post(xch, ctxt);
509
510 return rc;
511 }
512
xc_vcpu_get_extstate(xc_interface * xch,uint32_t domid,uint32_t vcpu,xc_vcpu_extstate_t * extstate)513 int xc_vcpu_get_extstate(xc_interface *xch,
514 uint32_t domid,
515 uint32_t vcpu,
516 xc_vcpu_extstate_t *extstate)
517 {
518 int rc = -ENODEV;
519 #if defined (__i386__) || defined(__x86_64__)
520 struct xen_domctl domctl = {};
521 DECLARE_HYPERCALL_BUFFER(void, buffer);
522 bool get_state;
523
524 if ( !extstate )
525 return -EINVAL;
526
527 domctl.cmd = XEN_DOMCTL_getvcpuextstate;
528 domctl.domain = domid;
529 domctl.u.vcpuextstate.vcpu = (uint16_t)vcpu;
530 domctl.u.vcpuextstate.xfeature_mask = extstate->xfeature_mask;
531 domctl.u.vcpuextstate.size = extstate->size;
532
533 get_state = (extstate->size != 0);
534
535 if ( get_state )
536 {
537 buffer = xc_hypercall_buffer_alloc(xch, buffer, extstate->size);
538
539 if ( !buffer )
540 {
541 PERROR("Unable to allocate memory for vcpu%u's xsave context",
542 vcpu);
543 rc = -ENOMEM;
544 goto out;
545 }
546
547 set_xen_guest_handle(domctl.u.vcpuextstate.buffer, buffer);
548 }
549
550 rc = do_domctl(xch, &domctl);
551
552 if ( rc )
553 goto out;
554
555 /* A query for the size of buffer to use. */
556 if ( !extstate->size && !extstate->xfeature_mask )
557 {
558 extstate->xfeature_mask = domctl.u.vcpuextstate.xfeature_mask;
559 extstate->size = domctl.u.vcpuextstate.size;
560 goto out;
561 }
562
563 if ( get_state )
564 memcpy(extstate->buffer, buffer, extstate->size);
565
566 out:
567 if ( get_state )
568 xc_hypercall_buffer_free(xch, buffer);
569 #endif
570
571 return rc;
572 }
573
xc_watchdog(xc_interface * xch,uint32_t id,uint32_t timeout)574 int xc_watchdog(xc_interface *xch,
575 uint32_t id,
576 uint32_t timeout)
577 {
578 int ret = -1;
579 DECLARE_HYPERCALL_BUFFER(sched_watchdog_t, arg);
580
581 arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
582 if ( arg == NULL )
583 {
584 PERROR("Could not allocate memory for xc_watchdog hypercall");
585 goto out1;
586 }
587
588 arg->id = id;
589 arg->timeout = timeout;
590
591 ret = xencall2(xch->xcall, __HYPERVISOR_sched_op,
592 SCHEDOP_watchdog,
593 HYPERCALL_BUFFER_AS_ARG(arg));
594
595 xc_hypercall_buffer_free(xch, arg);
596
597 out1:
598 return ret;
599 }
600
601
xc_shadow_control(xc_interface * xch,uint32_t domid,unsigned int sop,unsigned int * mb,unsigned int mode)602 int xc_shadow_control(xc_interface *xch,
603 uint32_t domid,
604 unsigned int sop,
605 unsigned int *mb,
606 unsigned int mode)
607 {
608 int rc;
609 struct xen_domctl domctl = {};
610
611 memset(&domctl, 0, sizeof(domctl));
612
613 domctl.cmd = XEN_DOMCTL_shadow_op;
614 domctl.domain = domid;
615 domctl.u.shadow_op.op = sop;
616 domctl.u.shadow_op.mb = mb ? *mb : 0;
617 domctl.u.shadow_op.mode = mode;
618
619 rc = do_domctl(xch, &domctl);
620
621 if ( mb )
622 *mb = domctl.u.shadow_op.mb;
623
624 return rc;
625 }
626
xc_logdirty_control(xc_interface * xch,uint32_t domid,unsigned int sop,xc_hypercall_buffer_t * dirty_bitmap,unsigned long pages,unsigned int mode,xc_shadow_op_stats_t * stats)627 long long xc_logdirty_control(xc_interface *xch,
628 uint32_t domid,
629 unsigned int sop,
630 xc_hypercall_buffer_t *dirty_bitmap,
631 unsigned long pages,
632 unsigned int mode,
633 xc_shadow_op_stats_t *stats)
634 {
635 int rc;
636 struct xen_domctl domctl = {
637 .cmd = XEN_DOMCTL_shadow_op,
638 .domain = domid,
639 .u.shadow_op = {
640 .op = sop,
641 .pages = pages,
642 .mode = mode,
643 }
644 };
645 DECLARE_HYPERCALL_BUFFER_ARGUMENT(dirty_bitmap);
646
647 if ( dirty_bitmap )
648 set_xen_guest_handle(domctl.u.shadow_op.dirty_bitmap,
649 dirty_bitmap);
650
651 rc = do_domctl(xch, &domctl);
652
653 if ( stats )
654 memcpy(stats, &domctl.u.shadow_op.stats,
655 sizeof(xc_shadow_op_stats_t));
656
657 return (rc == 0) ? domctl.u.shadow_op.pages : rc;
658 }
659
xc_get_paging_mempool_size(xc_interface * xch,uint32_t domid,uint64_t * size)660 int xc_get_paging_mempool_size(xc_interface *xch, uint32_t domid, uint64_t *size)
661 {
662 int rc;
663 struct xen_domctl domctl = {
664 .cmd = XEN_DOMCTL_get_paging_mempool_size,
665 .domain = domid,
666 };
667
668 rc = do_domctl(xch, &domctl);
669 if ( rc )
670 return rc;
671
672 *size = domctl.u.paging_mempool.size;
673 return 0;
674 }
675
xc_set_paging_mempool_size(xc_interface * xch,uint32_t domid,uint64_t size)676 int xc_set_paging_mempool_size(xc_interface *xch, uint32_t domid, uint64_t size)
677 {
678 struct xen_domctl domctl = {
679 .cmd = XEN_DOMCTL_set_paging_mempool_size,
680 .domain = domid,
681 .u.paging_mempool = {
682 .size = size,
683 },
684 };
685
686 return do_domctl(xch, &domctl);
687 }
688
xc_domain_setmaxmem(xc_interface * xch,uint32_t domid,uint64_t max_memkb)689 int xc_domain_setmaxmem(xc_interface *xch,
690 uint32_t domid,
691 uint64_t max_memkb)
692 {
693 struct xen_domctl domctl = {};
694 domctl.cmd = XEN_DOMCTL_max_mem;
695 domctl.domain = domid;
696 domctl.u.max_mem.max_memkb = max_memkb;
697 return do_domctl(xch, &domctl);
698 }
699
700 #if defined(__i386__) || defined(__x86_64__)
xc_domain_set_memory_map(xc_interface * xch,uint32_t domid,struct e820entry entries[],uint32_t nr_entries)701 int xc_domain_set_memory_map(xc_interface *xch,
702 uint32_t domid,
703 struct e820entry entries[],
704 uint32_t nr_entries)
705 {
706 int rc;
707 struct xen_foreign_memory_map fmap = {
708 .domid = domid,
709 .map = { .nr_entries = nr_entries }
710 };
711 DECLARE_HYPERCALL_BOUNCE(entries, nr_entries * sizeof(struct e820entry),
712 XC_HYPERCALL_BUFFER_BOUNCE_IN);
713
714 if ( !entries || xc_hypercall_bounce_pre(xch, entries) )
715 return -1;
716
717 set_xen_guest_handle(fmap.map.buffer, entries);
718
719 rc = xc_memory_op(xch, XENMEM_set_memory_map, &fmap, sizeof(fmap));
720
721 xc_hypercall_bounce_post(xch, entries);
722
723 return rc;
724 }
725
xc_get_machine_memory_map(xc_interface * xch,struct e820entry entries[],uint32_t max_entries)726 int xc_get_machine_memory_map(xc_interface *xch,
727 struct e820entry entries[],
728 uint32_t max_entries)
729 {
730 int rc;
731 struct xen_memory_map memmap = {
732 .nr_entries = max_entries
733 };
734 DECLARE_HYPERCALL_BOUNCE(entries, sizeof(struct e820entry) * max_entries,
735 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
736
737 if ( !entries || xc_hypercall_bounce_pre(xch, entries) || max_entries <= 1)
738 return -1;
739
740
741 set_xen_guest_handle(memmap.buffer, entries);
742
743 rc = xc_memory_op(xch, XENMEM_machine_memory_map, &memmap, sizeof(memmap));
744
745 xc_hypercall_bounce_post(xch, entries);
746
747 return rc ? rc : memmap.nr_entries;
748 }
xc_domain_set_memmap_limit(xc_interface * xch,uint32_t domid,unsigned long map_limitkb)749 int xc_domain_set_memmap_limit(xc_interface *xch,
750 uint32_t domid,
751 unsigned long map_limitkb)
752 {
753 struct e820entry e820;
754
755 e820.addr = 0;
756 e820.size = (uint64_t)map_limitkb << 10;
757 e820.type = E820_RAM;
758
759 return xc_domain_set_memory_map(xch, domid, &e820, 1);
760 }
761 #else
xc_domain_set_memmap_limit(xc_interface * xch,uint32_t domid,unsigned long map_limitkb)762 int xc_domain_set_memmap_limit(xc_interface *xch,
763 uint32_t domid,
764 unsigned long map_limitkb)
765 {
766 PERROR("Function not implemented");
767 errno = ENOSYS;
768 return -1;
769 }
770 #endif
771
xc_reserved_device_memory_map(xc_interface * xch,uint32_t flags,uint16_t seg,uint8_t bus,uint8_t devfn,struct xen_reserved_device_memory entries[],uint32_t * max_entries)772 int xc_reserved_device_memory_map(xc_interface *xch,
773 uint32_t flags,
774 uint16_t seg,
775 uint8_t bus,
776 uint8_t devfn,
777 struct xen_reserved_device_memory entries[],
778 uint32_t *max_entries)
779 {
780 int rc;
781 struct xen_reserved_device_memory_map xrdmmap = {
782 .flags = flags,
783 .dev.pci.seg = seg,
784 .dev.pci.bus = bus,
785 .dev.pci.devfn = devfn,
786 .nr_entries = *max_entries
787 };
788 DECLARE_HYPERCALL_BOUNCE(entries,
789 sizeof(struct xen_reserved_device_memory) *
790 *max_entries, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
791
792 if ( xc_hypercall_bounce_pre(xch, entries) )
793 return -1;
794
795 set_xen_guest_handle(xrdmmap.buffer, entries);
796
797 rc = xc_memory_op(xch, XENMEM_reserved_device_memory_map,
798 &xrdmmap, sizeof(xrdmmap));
799
800 xc_hypercall_bounce_post(xch, entries);
801
802 *max_entries = xrdmmap.nr_entries;
803
804 return rc;
805 }
806
xc_domain_set_time_offset(xc_interface * xch,uint32_t domid,int32_t time_offset_seconds)807 int xc_domain_set_time_offset(xc_interface *xch,
808 uint32_t domid,
809 int32_t time_offset_seconds)
810 {
811 struct xen_domctl domctl = {};
812 domctl.cmd = XEN_DOMCTL_settimeoffset;
813 domctl.domain = domid;
814 domctl.u.settimeoffset.time_offset_seconds = time_offset_seconds;
815 return do_domctl(xch, &domctl);
816 }
817
xc_domain_set_tsc_info(xc_interface * xch,uint32_t domid,uint32_t tsc_mode,uint64_t elapsed_nsec,uint32_t gtsc_khz,uint32_t incarnation)818 int xc_domain_set_tsc_info(xc_interface *xch,
819 uint32_t domid,
820 uint32_t tsc_mode,
821 uint64_t elapsed_nsec,
822 uint32_t gtsc_khz,
823 uint32_t incarnation)
824 {
825 struct xen_domctl domctl = {};
826 domctl.cmd = XEN_DOMCTL_settscinfo;
827 domctl.domain = domid;
828 domctl.u.tsc_info.tsc_mode = tsc_mode;
829 domctl.u.tsc_info.elapsed_nsec = elapsed_nsec;
830 domctl.u.tsc_info.gtsc_khz = gtsc_khz;
831 domctl.u.tsc_info.incarnation = incarnation;
832 return do_domctl(xch, &domctl);
833 }
834
xc_domain_get_tsc_info(xc_interface * xch,uint32_t domid,uint32_t * tsc_mode,uint64_t * elapsed_nsec,uint32_t * gtsc_khz,uint32_t * incarnation)835 int xc_domain_get_tsc_info(xc_interface *xch,
836 uint32_t domid,
837 uint32_t *tsc_mode,
838 uint64_t *elapsed_nsec,
839 uint32_t *gtsc_khz,
840 uint32_t *incarnation)
841 {
842 int rc;
843 struct xen_domctl domctl = {};
844
845 domctl.cmd = XEN_DOMCTL_gettscinfo;
846 domctl.domain = domid;
847 rc = do_domctl(xch, &domctl);
848 if ( rc == 0 )
849 {
850 *tsc_mode = domctl.u.tsc_info.tsc_mode;
851 *elapsed_nsec = domctl.u.tsc_info.elapsed_nsec;
852 *gtsc_khz = domctl.u.tsc_info.gtsc_khz;
853 *incarnation = domctl.u.tsc_info.incarnation;
854 }
855 return rc;
856 }
857
858
xc_domain_maximum_gpfn(xc_interface * xch,uint32_t domid,xen_pfn_t * gpfns)859 int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns)
860 {
861 struct xen_memory_domain dom = { .domid = domid };
862 long rc = xc_memory_op(xch, XENMEM_maximum_gpfn, &dom, sizeof(dom));
863
864 if ( rc >= 0 )
865 {
866 *gpfns = rc;
867 rc = 0;
868 }
869 return rc;
870 }
871
xc_domain_nr_gpfns(xc_interface * xch,uint32_t domid,xen_pfn_t * gpfns)872 int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns)
873 {
874 int rc = xc_domain_maximum_gpfn(xch, domid, gpfns);
875
876 if ( rc >= 0 )
877 *gpfns += 1;
878
879 return rc;
880 }
881
xc_domain_increase_reservation(xc_interface * xch,uint32_t domid,unsigned long nr_extents,unsigned int extent_order,unsigned int mem_flags,xen_pfn_t * extent_start)882 int xc_domain_increase_reservation(xc_interface *xch,
883 uint32_t domid,
884 unsigned long nr_extents,
885 unsigned int extent_order,
886 unsigned int mem_flags,
887 xen_pfn_t *extent_start)
888 {
889 int err;
890 DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
891 struct xen_memory_reservation reservation = {
892 .nr_extents = nr_extents,
893 .extent_order = extent_order,
894 .mem_flags = mem_flags,
895 .domid = domid
896 };
897
898 /* may be NULL */
899 if ( xc_hypercall_bounce_pre(xch, extent_start) )
900 {
901 PERROR("Could not bounce memory for XENMEM_increase_reservation hypercall");
902 return -1;
903 }
904
905 set_xen_guest_handle(reservation.extent_start, extent_start);
906
907 err = xc_memory_op(xch, XENMEM_increase_reservation, &reservation, sizeof(reservation));
908
909 xc_hypercall_bounce_post(xch, extent_start);
910
911 return err;
912 }
913
xc_domain_increase_reservation_exact(xc_interface * xch,uint32_t domid,unsigned long nr_extents,unsigned int extent_order,unsigned int mem_flags,xen_pfn_t * extent_start)914 int xc_domain_increase_reservation_exact(xc_interface *xch,
915 uint32_t domid,
916 unsigned long nr_extents,
917 unsigned int extent_order,
918 unsigned int mem_flags,
919 xen_pfn_t *extent_start)
920 {
921 int err;
922
923 err = xc_domain_increase_reservation(xch, domid, nr_extents,
924 extent_order, mem_flags, extent_start);
925
926 if ( err == nr_extents )
927 return 0;
928
929 if ( err >= 0 )
930 {
931 DPRINTF("Failed allocation for dom %d: "
932 "%ld extents of order %d, mem_flags %x\n",
933 domid, nr_extents, extent_order, mem_flags);
934 errno = ENOMEM;
935 err = -1;
936 }
937
938 return err;
939 }
940
xc_domain_decrease_reservation(xc_interface * xch,uint32_t domid,unsigned long nr_extents,unsigned int extent_order,xen_pfn_t * extent_start)941 int xc_domain_decrease_reservation(xc_interface *xch,
942 uint32_t domid,
943 unsigned long nr_extents,
944 unsigned int extent_order,
945 xen_pfn_t *extent_start)
946 {
947 int err;
948 DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
949 struct xen_memory_reservation reservation = {
950 .nr_extents = nr_extents,
951 .extent_order = extent_order,
952 .mem_flags = 0,
953 .domid = domid
954 };
955
956 if ( extent_start == NULL )
957 {
958 DPRINTF("decrease_reservation extent_start is NULL!\n");
959 errno = EINVAL;
960 return -1;
961 }
962
963 if ( xc_hypercall_bounce_pre(xch, extent_start) )
964 {
965 PERROR("Could not bounce memory for XENMEM_decrease_reservation hypercall");
966 return -1;
967 }
968 set_xen_guest_handle(reservation.extent_start, extent_start);
969
970 err = xc_memory_op(xch, XENMEM_decrease_reservation, &reservation, sizeof(reservation));
971
972 xc_hypercall_bounce_post(xch, extent_start);
973
974 return err;
975 }
976
xc_domain_decrease_reservation_exact(xc_interface * xch,uint32_t domid,unsigned long nr_extents,unsigned int extent_order,xen_pfn_t * extent_start)977 int xc_domain_decrease_reservation_exact(xc_interface *xch,
978 uint32_t domid,
979 unsigned long nr_extents,
980 unsigned int extent_order,
981 xen_pfn_t *extent_start)
982 {
983 int err;
984
985 err = xc_domain_decrease_reservation(xch, domid, nr_extents,
986 extent_order, extent_start);
987
988 if ( err == nr_extents )
989 return 0;
990
991 if ( err >= 0 )
992 {
993 DPRINTF("Failed deallocation for dom %d: %ld extents of order %d\n",
994 domid, nr_extents, extent_order);
995 errno = EINVAL;
996 err = -1;
997 }
998
999 return err;
1000 }
1001
xc_domain_add_to_physmap(xc_interface * xch,uint32_t domid,unsigned int space,unsigned long idx,xen_pfn_t gpfn)1002 int xc_domain_add_to_physmap(xc_interface *xch,
1003 uint32_t domid,
1004 unsigned int space,
1005 unsigned long idx,
1006 xen_pfn_t gpfn)
1007 {
1008 struct xen_add_to_physmap xatp = {
1009 .domid = domid,
1010 .space = space,
1011 .idx = idx,
1012 .gpfn = gpfn,
1013 };
1014 return xc_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
1015 }
1016
xc_domain_add_to_physmap_batch(xc_interface * xch,uint32_t domid,uint32_t foreign_domid,unsigned int space,unsigned int size,xen_ulong_t * idxs,xen_pfn_t * gpfns,int * errs)1017 int xc_domain_add_to_physmap_batch(xc_interface *xch,
1018 uint32_t domid,
1019 uint32_t foreign_domid,
1020 unsigned int space,
1021 unsigned int size,
1022 xen_ulong_t *idxs,
1023 xen_pfn_t *gpfns,
1024 int *errs)
1025 {
1026 int rc;
1027 DECLARE_HYPERCALL_BOUNCE(idxs, size * sizeof(*idxs), XC_HYPERCALL_BUFFER_BOUNCE_IN);
1028 DECLARE_HYPERCALL_BOUNCE(gpfns, size * sizeof(*gpfns), XC_HYPERCALL_BUFFER_BOUNCE_IN);
1029 DECLARE_HYPERCALL_BOUNCE(errs, size * sizeof(*errs), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
1030
1031 struct xen_add_to_physmap_batch xatp_batch = {
1032 .domid = domid,
1033 .space = space,
1034 .size = size,
1035 .u = { .foreign_domid = foreign_domid }
1036 };
1037
1038 if ( xc_hypercall_bounce_pre(xch, idxs) ||
1039 xc_hypercall_bounce_pre(xch, gpfns) ||
1040 xc_hypercall_bounce_pre(xch, errs) )
1041 {
1042 PERROR("Could not bounce memory for XENMEM_add_to_physmap_batch");
1043 rc = -1;
1044 goto out;
1045 }
1046
1047 set_xen_guest_handle(xatp_batch.idxs, idxs);
1048 set_xen_guest_handle(xatp_batch.gpfns, gpfns);
1049 set_xen_guest_handle(xatp_batch.errs, errs);
1050
1051 rc = xc_memory_op(xch, XENMEM_add_to_physmap_batch,
1052 &xatp_batch, sizeof(xatp_batch));
1053
1054 out:
1055 xc_hypercall_bounce_post(xch, idxs);
1056 xc_hypercall_bounce_post(xch, gpfns);
1057 xc_hypercall_bounce_post(xch, errs);
1058
1059 return rc;
1060 }
1061
xc_domain_remove_from_physmap(xc_interface * xch,uint32_t domid,xen_pfn_t gpfn)1062 int xc_domain_remove_from_physmap(xc_interface *xch,
1063 uint32_t domid,
1064 xen_pfn_t gpfn)
1065 {
1066 struct xen_remove_from_physmap xrfp = {
1067 .domid = domid,
1068 .gpfn = gpfn,
1069 };
1070 return xc_memory_op(xch, XENMEM_remove_from_physmap, &xrfp, sizeof(xrfp));
1071 }
1072
xc_domain_claim_pages(xc_interface * xch,uint32_t domid,unsigned long nr_pages)1073 int xc_domain_claim_pages(xc_interface *xch,
1074 uint32_t domid,
1075 unsigned long nr_pages)
1076 {
1077 int err;
1078 struct xen_memory_reservation reservation = {
1079 .nr_extents = nr_pages,
1080 .extent_order = 0,
1081 .mem_flags = 0, /* no flags */
1082 .domid = domid
1083 };
1084
1085 set_xen_guest_handle(reservation.extent_start, HYPERCALL_BUFFER_NULL);
1086
1087 err = xc_memory_op(xch, XENMEM_claim_pages, &reservation, sizeof(reservation));
1088 /* Ignore it if the hypervisor does not support the call. */
1089 if (err == -1 && errno == ENOSYS)
1090 err = errno = 0;
1091 return err;
1092 }
1093
xc_domain_populate_physmap(xc_interface * xch,uint32_t domid,unsigned long nr_extents,unsigned int extent_order,unsigned int mem_flags,xen_pfn_t * extent_start)1094 int xc_domain_populate_physmap(xc_interface *xch,
1095 uint32_t domid,
1096 unsigned long nr_extents,
1097 unsigned int extent_order,
1098 unsigned int mem_flags,
1099 xen_pfn_t *extent_start)
1100 {
1101 int err;
1102 DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
1103 struct xen_memory_reservation reservation = {
1104 .nr_extents = nr_extents,
1105 .extent_order = extent_order,
1106 .mem_flags = mem_flags,
1107 .domid = domid
1108 };
1109
1110 if ( xc_hypercall_bounce_pre(xch, extent_start) )
1111 {
1112 PERROR("Could not bounce memory for XENMEM_populate_physmap hypercall");
1113 return -1;
1114 }
1115 set_xen_guest_handle(reservation.extent_start, extent_start);
1116
1117 err = xc_memory_op(xch, XENMEM_populate_physmap, &reservation, sizeof(reservation));
1118
1119 xc_hypercall_bounce_post(xch, extent_start);
1120 return err;
1121 }
1122
xc_domain_populate_physmap_exact(xc_interface * xch,uint32_t domid,unsigned long nr_extents,unsigned int extent_order,unsigned int mem_flags,xen_pfn_t * extent_start)1123 int xc_domain_populate_physmap_exact(xc_interface *xch,
1124 uint32_t domid,
1125 unsigned long nr_extents,
1126 unsigned int extent_order,
1127 unsigned int mem_flags,
1128 xen_pfn_t *extent_start)
1129 {
1130 int err;
1131
1132 err = xc_domain_populate_physmap(xch, domid, nr_extents,
1133 extent_order, mem_flags, extent_start);
1134 if ( err == nr_extents )
1135 return 0;
1136
1137 if ( err >= 0 )
1138 {
1139 DPRINTF("Failed allocation for dom %d: %ld extents of order %d\n",
1140 domid, nr_extents, extent_order);
1141 errno = EBUSY;
1142 err = -1;
1143 }
1144
1145 return err;
1146 }
1147
xc_domain_memory_exchange_pages(xc_interface * xch,uint32_t domid,unsigned long nr_in_extents,unsigned int in_order,xen_pfn_t * in_extents,unsigned long nr_out_extents,unsigned int out_order,xen_pfn_t * out_extents)1148 int xc_domain_memory_exchange_pages(xc_interface *xch,
1149 uint32_t domid,
1150 unsigned long nr_in_extents,
1151 unsigned int in_order,
1152 xen_pfn_t *in_extents,
1153 unsigned long nr_out_extents,
1154 unsigned int out_order,
1155 xen_pfn_t *out_extents)
1156 {
1157 int rc = -1;
1158 DECLARE_HYPERCALL_BOUNCE(in_extents, nr_in_extents*sizeof(*in_extents), XC_HYPERCALL_BUFFER_BOUNCE_IN);
1159 DECLARE_HYPERCALL_BOUNCE(out_extents, nr_out_extents*sizeof(*out_extents), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
1160 struct xen_memory_exchange exchange = {
1161 .in = {
1162 .nr_extents = nr_in_extents,
1163 .extent_order = in_order,
1164 .domid = domid
1165 },
1166 .out = {
1167 .nr_extents = nr_out_extents,
1168 .extent_order = out_order,
1169 .domid = domid
1170 }
1171 };
1172
1173 if ( xc_hypercall_bounce_pre(xch, in_extents) ||
1174 xc_hypercall_bounce_pre(xch, out_extents))
1175 goto out;
1176
1177 set_xen_guest_handle(exchange.in.extent_start, in_extents);
1178 set_xen_guest_handle(exchange.out.extent_start, out_extents);
1179
1180 rc = xc_memory_op(xch, XENMEM_exchange, &exchange, sizeof(exchange));
1181
1182 out:
1183 xc_hypercall_bounce_post(xch, in_extents);
1184 xc_hypercall_bounce_post(xch, out_extents);
1185
1186 return rc;
1187 }
1188
1189 /* Currently only implemented on x86. This cannot be handled in the
1190 * caller, e.g. by looking for errno==ENOSYS because of the broken
1191 * error reporting style. Once this is fixed then this condition can
1192 * be removed.
1193 */
1194 #if defined(__i386__)||defined(__x86_64__)
xc_domain_pod_target(xc_interface * xch,int op,uint32_t domid,uint64_t target_pages,uint64_t * tot_pages,uint64_t * pod_cache_pages,uint64_t * pod_entries)1195 static int xc_domain_pod_target(xc_interface *xch,
1196 int op,
1197 uint32_t domid,
1198 uint64_t target_pages,
1199 uint64_t *tot_pages,
1200 uint64_t *pod_cache_pages,
1201 uint64_t *pod_entries)
1202 {
1203 int err;
1204
1205 struct xen_pod_target pod_target = {
1206 .domid = domid,
1207 .target_pages = target_pages
1208 };
1209
1210 err = xc_memory_op(xch, op, &pod_target, sizeof(pod_target));
1211
1212 if ( err < 0 )
1213 DPRINTF("Failed %s_pod_target dom %d\n",
1214 (op==XENMEM_set_pod_target)?"set":"get",
1215 domid);
1216 else
1217 err = 0;
1218
1219 if ( tot_pages )
1220 *tot_pages = pod_target.tot_pages;
1221 if ( pod_cache_pages )
1222 *pod_cache_pages = pod_target.pod_cache_pages;
1223 if ( pod_entries )
1224 *pod_entries = pod_target.pod_entries;
1225
1226 return err;
1227 }
1228
1229
xc_domain_set_pod_target(xc_interface * xch,uint32_t domid,uint64_t target_pages,uint64_t * tot_pages,uint64_t * pod_cache_pages,uint64_t * pod_entries)1230 int xc_domain_set_pod_target(xc_interface *xch,
1231 uint32_t domid,
1232 uint64_t target_pages,
1233 uint64_t *tot_pages,
1234 uint64_t *pod_cache_pages,
1235 uint64_t *pod_entries)
1236 {
1237 return xc_domain_pod_target(xch,
1238 XENMEM_set_pod_target,
1239 domid,
1240 target_pages,
1241 tot_pages,
1242 pod_cache_pages,
1243 pod_entries);
1244 }
1245
xc_domain_get_pod_target(xc_interface * xch,uint32_t domid,uint64_t * tot_pages,uint64_t * pod_cache_pages,uint64_t * pod_entries)1246 int xc_domain_get_pod_target(xc_interface *xch,
1247 uint32_t domid,
1248 uint64_t *tot_pages,
1249 uint64_t *pod_cache_pages,
1250 uint64_t *pod_entries)
1251 {
1252 return xc_domain_pod_target(xch,
1253 XENMEM_get_pod_target,
1254 domid,
1255 -1,
1256 tot_pages,
1257 pod_cache_pages,
1258 pod_entries);
1259 }
1260 #else
xc_domain_set_pod_target(xc_interface * xch,uint32_t domid,uint64_t target_pages,uint64_t * tot_pages,uint64_t * pod_cache_pages,uint64_t * pod_entries)1261 int xc_domain_set_pod_target(xc_interface *xch,
1262 uint32_t domid,
1263 uint64_t target_pages,
1264 uint64_t *tot_pages,
1265 uint64_t *pod_cache_pages,
1266 uint64_t *pod_entries)
1267 {
1268 return 0;
1269 }
xc_domain_get_pod_target(xc_interface * xch,uint32_t domid,uint64_t * tot_pages,uint64_t * pod_cache_pages,uint64_t * pod_entries)1270 int xc_domain_get_pod_target(xc_interface *xch,
1271 uint32_t domid,
1272 uint64_t *tot_pages,
1273 uint64_t *pod_cache_pages,
1274 uint64_t *pod_entries)
1275 {
1276 errno = EOPNOTSUPP;
1277 return -1;
1278 }
1279 #endif
1280
xc_domain_max_vcpus(xc_interface * xch,uint32_t domid,unsigned int max)1281 int xc_domain_max_vcpus(xc_interface *xch, uint32_t domid, unsigned int max)
1282 {
1283 struct xen_domctl domctl = {};
1284 domctl.cmd = XEN_DOMCTL_max_vcpus;
1285 domctl.domain = domid;
1286 domctl.u.max_vcpus.max = max;
1287 return do_domctl(xch, &domctl);
1288 }
1289
xc_domain_sethandle(xc_interface * xch,uint32_t domid,xen_domain_handle_t handle)1290 int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
1291 xen_domain_handle_t handle)
1292 {
1293 struct xen_domctl domctl = {};
1294 domctl.cmd = XEN_DOMCTL_setdomainhandle;
1295 domctl.domain = domid;
1296 memcpy(domctl.u.setdomainhandle.handle, handle,
1297 sizeof(xen_domain_handle_t));
1298 return do_domctl(xch, &domctl);
1299 }
1300
xc_vcpu_getinfo(xc_interface * xch,uint32_t domid,uint32_t vcpu,xc_vcpuinfo_t * info)1301 int xc_vcpu_getinfo(xc_interface *xch,
1302 uint32_t domid,
1303 uint32_t vcpu,
1304 xc_vcpuinfo_t *info)
1305 {
1306 int rc;
1307 struct xen_domctl domctl = {};
1308
1309 domctl.cmd = XEN_DOMCTL_getvcpuinfo;
1310 domctl.domain = domid;
1311 domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
1312
1313 rc = do_domctl(xch, &domctl);
1314
1315 memcpy(info, &domctl.u.getvcpuinfo, sizeof(*info));
1316
1317 return rc;
1318 }
1319
xc_domain_ioport_permission(xc_interface * xch,uint32_t domid,uint32_t first_port,uint32_t nr_ports,uint32_t allow_access)1320 int xc_domain_ioport_permission(xc_interface *xch,
1321 uint32_t domid,
1322 uint32_t first_port,
1323 uint32_t nr_ports,
1324 uint32_t allow_access)
1325 {
1326 struct xen_domctl domctl = {};
1327
1328 domctl.cmd = XEN_DOMCTL_ioport_permission;
1329 domctl.domain = domid;
1330 domctl.u.ioport_permission.first_port = first_port;
1331 domctl.u.ioport_permission.nr_ports = nr_ports;
1332 domctl.u.ioport_permission.allow_access = allow_access;
1333
1334 return do_domctl(xch, &domctl);
1335 }
1336
xc_availheap(xc_interface * xch,int min_width,int max_width,int node,uint64_t * bytes)1337 int xc_availheap(xc_interface *xch,
1338 int min_width,
1339 int max_width,
1340 int node,
1341 uint64_t *bytes)
1342 {
1343 struct xen_sysctl sysctl = {};
1344 int rc;
1345
1346 sysctl.cmd = XEN_SYSCTL_availheap;
1347 sysctl.u.availheap.min_bitwidth = min_width;
1348 sysctl.u.availheap.max_bitwidth = max_width;
1349 sysctl.u.availheap.node = node;
1350
1351 rc = xc_sysctl(xch, &sysctl);
1352
1353 *bytes = sysctl.u.availheap.avail_bytes;
1354
1355 return rc;
1356 }
1357
xc_vcpu_setcontext(xc_interface * xch,uint32_t domid,uint32_t vcpu,vcpu_guest_context_any_t * ctxt)1358 int xc_vcpu_setcontext(xc_interface *xch,
1359 uint32_t domid,
1360 uint32_t vcpu,
1361 vcpu_guest_context_any_t *ctxt)
1362 {
1363 struct xen_domctl domctl = {};
1364 DECLARE_HYPERCALL_BOUNCE(ctxt, sizeof(vcpu_guest_context_any_t), XC_HYPERCALL_BUFFER_BOUNCE_IN);
1365 int rc;
1366
1367 if ( xc_hypercall_bounce_pre(xch, ctxt) )
1368 return -1;
1369
1370 domctl.cmd = XEN_DOMCTL_setvcpucontext;
1371 domctl.domain = domid;
1372 domctl.u.vcpucontext.vcpu = vcpu;
1373 set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
1374
1375 rc = do_domctl(xch, &domctl);
1376
1377 xc_hypercall_bounce_post(xch, ctxt);
1378
1379 return rc;
1380 }
1381
xc_domain_irq_permission(xc_interface * xch,uint32_t domid,uint32_t pirq,bool allow_access)1382 int xc_domain_irq_permission(xc_interface *xch,
1383 uint32_t domid,
1384 uint32_t pirq,
1385 bool allow_access)
1386 {
1387 struct xen_domctl domctl = {};
1388
1389 domctl.cmd = XEN_DOMCTL_irq_permission;
1390 domctl.domain = domid;
1391 domctl.u.irq_permission.pirq = pirq;
1392 domctl.u.irq_permission.allow_access = allow_access;
1393
1394 return do_domctl(xch, &domctl);
1395 }
1396
xc_domain_gsi_permission(xc_interface * xch,uint32_t domid,uint32_t gsi,uint32_t flags)1397 int xc_domain_gsi_permission(xc_interface *xch,
1398 uint32_t domid,
1399 uint32_t gsi,
1400 uint32_t flags)
1401 {
1402 struct xen_domctl domctl = {
1403 .cmd = XEN_DOMCTL_gsi_permission,
1404 .domain = domid,
1405 .u.gsi_permission.gsi = gsi,
1406 .u.gsi_permission.flags = flags,
1407 };
1408
1409 return do_domctl(xch, &domctl);
1410 }
1411
xc_domain_iomem_permission(xc_interface * xch,uint32_t domid,unsigned long first_mfn,unsigned long nr_mfns,uint8_t allow_access)1412 int xc_domain_iomem_permission(xc_interface *xch,
1413 uint32_t domid,
1414 unsigned long first_mfn,
1415 unsigned long nr_mfns,
1416 uint8_t allow_access)
1417 {
1418 struct xen_domctl domctl = {};
1419
1420 domctl.cmd = XEN_DOMCTL_iomem_permission;
1421 domctl.domain = domid;
1422 domctl.u.iomem_permission.first_mfn = first_mfn;
1423 domctl.u.iomem_permission.nr_mfns = nr_mfns;
1424 domctl.u.iomem_permission.allow_access = allow_access;
1425
1426 return do_domctl(xch, &domctl);
1427 }
1428
xc_domain_send_trigger(xc_interface * xch,uint32_t domid,uint32_t trigger,uint32_t vcpu)1429 int xc_domain_send_trigger(xc_interface *xch,
1430 uint32_t domid,
1431 uint32_t trigger,
1432 uint32_t vcpu)
1433 {
1434 struct xen_domctl domctl = {};
1435
1436 domctl.cmd = XEN_DOMCTL_sendtrigger;
1437 domctl.domain = domid;
1438 domctl.u.sendtrigger.trigger = trigger;
1439 domctl.u.sendtrigger.vcpu = vcpu;
1440
1441 return do_domctl(xch, &domctl);
1442 }
1443
xc_hvm_param_set(xc_interface * handle,uint32_t dom,uint32_t param,uint64_t value)1444 int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value)
1445 {
1446 DECLARE_HYPERCALL_BUFFER(xen_hvm_param_t, arg);
1447 int rc;
1448
1449 arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
1450 if ( arg == NULL )
1451 return -1;
1452
1453 arg->domid = dom;
1454 arg->index = param;
1455 arg->value = value;
1456 rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op,
1457 HVMOP_set_param,
1458 HYPERCALL_BUFFER_AS_ARG(arg));
1459 xc_hypercall_buffer_free(handle, arg);
1460 return rc;
1461 }
1462
xc_hvm_param_get(xc_interface * handle,uint32_t dom,uint32_t param,uint64_t * value)1463 int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value)
1464 {
1465 DECLARE_HYPERCALL_BUFFER(xen_hvm_param_t, arg);
1466 int rc;
1467
1468 arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
1469 if ( arg == NULL )
1470 return -1;
1471
1472 arg->domid = dom;
1473 arg->index = param;
1474 rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op,
1475 HVMOP_get_param,
1476 HYPERCALL_BUFFER_AS_ARG(arg));
1477 *value = arg->value;
1478 xc_hypercall_buffer_free(handle, arg);
1479 return rc;
1480 }
1481
xc_set_hvm_param(xc_interface * handle,uint32_t dom,int param,unsigned long value)1482 int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value)
1483 {
1484 return xc_hvm_param_set(handle, dom, param, value);
1485 }
1486
xc_get_hvm_param(xc_interface * handle,uint32_t dom,int param,unsigned long * value)1487 int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value)
1488 {
1489 uint64_t v;
1490 int ret;
1491
1492 ret = xc_hvm_param_get(handle, dom, param, &v);
1493 if (ret < 0)
1494 return ret;
1495 *value = v;
1496 return 0;
1497 }
1498
xc_domain_setdebugging(xc_interface * xch,uint32_t domid,unsigned int enable)1499 int xc_domain_setdebugging(xc_interface *xch,
1500 uint32_t domid,
1501 unsigned int enable)
1502 {
1503 struct xen_domctl domctl = {};
1504
1505 domctl.cmd = XEN_DOMCTL_setdebugging;
1506 domctl.domain = domid;
1507 domctl.u.setdebugging.enable = enable;
1508 return do_domctl(xch, &domctl);
1509 }
1510
xc_assign_device(xc_interface * xch,uint32_t domid,uint32_t machine_sbdf,uint32_t flags)1511 int xc_assign_device(
1512 xc_interface *xch,
1513 uint32_t domid,
1514 uint32_t machine_sbdf,
1515 uint32_t flags)
1516 {
1517 struct xen_domctl domctl = {};
1518
1519 domctl.cmd = XEN_DOMCTL_assign_device;
1520 domctl.domain = domid;
1521 domctl.u.assign_device.dev = XEN_DOMCTL_DEV_PCI;
1522 domctl.u.assign_device.u.pci.machine_sbdf = machine_sbdf;
1523 domctl.u.assign_device.flags = flags;
1524
1525 return do_domctl(xch, &domctl);
1526 }
1527
xc_get_device_group(xc_interface * xch,uint32_t domid,uint32_t machine_sbdf,uint32_t max_sdevs,uint32_t * num_sdevs,uint32_t * sdev_array)1528 int xc_get_device_group(
1529 xc_interface *xch,
1530 uint32_t domid,
1531 uint32_t machine_sbdf,
1532 uint32_t max_sdevs,
1533 uint32_t *num_sdevs,
1534 uint32_t *sdev_array)
1535 {
1536 int rc;
1537 struct xen_domctl domctl = {};
1538 DECLARE_HYPERCALL_BOUNCE(sdev_array, max_sdevs * sizeof(*sdev_array),
1539 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
1540
1541 if ( xc_hypercall_bounce_pre(xch, sdev_array) )
1542 {
1543 PERROR("Could not bounce buffer for xc_get_device_group");
1544 return -1;
1545 }
1546
1547 domctl.cmd = XEN_DOMCTL_get_device_group;
1548 domctl.domain = domid;
1549
1550 domctl.u.get_device_group.machine_sbdf = machine_sbdf;
1551 domctl.u.get_device_group.max_sdevs = max_sdevs;
1552
1553 set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array);
1554
1555 rc = do_domctl(xch, &domctl);
1556
1557 *num_sdevs = domctl.u.get_device_group.num_sdevs;
1558
1559 xc_hypercall_bounce_post(xch, sdev_array);
1560
1561 return rc;
1562 }
1563
xc_test_assign_device(xc_interface * xch,uint32_t domid,uint32_t machine_sbdf)1564 int xc_test_assign_device(
1565 xc_interface *xch,
1566 uint32_t domid,
1567 uint32_t machine_sbdf)
1568 {
1569 struct xen_domctl domctl = {};
1570
1571 domctl.cmd = XEN_DOMCTL_test_assign_device;
1572 domctl.domain = domid;
1573 domctl.u.assign_device.dev = XEN_DOMCTL_DEV_PCI;
1574 domctl.u.assign_device.u.pci.machine_sbdf = machine_sbdf;
1575 domctl.u.assign_device.flags = 0;
1576
1577 return do_domctl(xch, &domctl);
1578 }
1579
xc_deassign_device(xc_interface * xch,uint32_t domid,uint32_t machine_sbdf)1580 int xc_deassign_device(
1581 xc_interface *xch,
1582 uint32_t domid,
1583 uint32_t machine_sbdf)
1584 {
1585 struct xen_domctl domctl = {};
1586
1587 domctl.cmd = XEN_DOMCTL_deassign_device;
1588 domctl.domain = domid;
1589 domctl.u.assign_device.dev = XEN_DOMCTL_DEV_PCI;
1590 domctl.u.assign_device.u.pci.machine_sbdf = machine_sbdf;
1591 domctl.u.assign_device.flags = 0;
1592
1593 return do_domctl(xch, &domctl);
1594 }
1595
xc_assign_dt_device(xc_interface * xch,uint32_t domid,char * path)1596 int xc_assign_dt_device(
1597 xc_interface *xch,
1598 uint32_t domid,
1599 char *path)
1600 {
1601 int rc;
1602 size_t size = strlen(path);
1603 struct xen_domctl domctl = {};
1604 DECLARE_HYPERCALL_BOUNCE(path, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
1605
1606 if ( xc_hypercall_bounce_pre(xch, path) )
1607 return -1;
1608
1609 domctl.cmd = XEN_DOMCTL_assign_device;
1610 domctl.domain = domid;
1611
1612 domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
1613 domctl.u.assign_device.u.dt.size = size;
1614 /*
1615 * DT doesn't own any RDM so actually DT has nothing to do
1616 * for any flag and here just fix that as 0.
1617 */
1618 domctl.u.assign_device.flags = 0;
1619 set_xen_guest_handle(domctl.u.assign_device.u.dt.path, path);
1620
1621 rc = do_domctl(xch, &domctl);
1622
1623 xc_hypercall_bounce_post(xch, path);
1624
1625 return rc;
1626 }
1627
xc_test_assign_dt_device(xc_interface * xch,uint32_t domid,char * path)1628 int xc_test_assign_dt_device(
1629 xc_interface *xch,
1630 uint32_t domid,
1631 char *path)
1632 {
1633 int rc;
1634 size_t size = strlen(path);
1635 struct xen_domctl domctl = {};
1636 DECLARE_HYPERCALL_BOUNCE(path, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
1637
1638 if ( xc_hypercall_bounce_pre(xch, path) )
1639 return -1;
1640
1641 domctl.cmd = XEN_DOMCTL_test_assign_device;
1642 domctl.domain = domid;
1643
1644 domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
1645 domctl.u.assign_device.u.dt.size = size;
1646 set_xen_guest_handle(domctl.u.assign_device.u.dt.path, path);
1647 domctl.u.assign_device.flags = 0;
1648
1649 rc = do_domctl(xch, &domctl);
1650
1651 xc_hypercall_bounce_post(xch, path);
1652
1653 return rc;
1654 }
1655
xc_deassign_dt_device(xc_interface * xch,uint32_t domid,char * path)1656 int xc_deassign_dt_device(
1657 xc_interface *xch,
1658 uint32_t domid,
1659 char *path)
1660 {
1661 int rc;
1662 size_t size = strlen(path);
1663 struct xen_domctl domctl = {};
1664 DECLARE_HYPERCALL_BOUNCE(path, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
1665
1666 if ( xc_hypercall_bounce_pre(xch, path) )
1667 return -1;
1668
1669 domctl.cmd = XEN_DOMCTL_deassign_device;
1670 domctl.domain = domid;
1671
1672 domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
1673 domctl.u.assign_device.u.dt.size = size;
1674 set_xen_guest_handle(domctl.u.assign_device.u.dt.path, path);
1675 domctl.u.assign_device.flags = 0;
1676
1677 rc = do_domctl(xch, &domctl);
1678
1679 xc_hypercall_bounce_post(xch, path);
1680
1681 return rc;
1682 }
1683
1684
1685
1686
xc_domain_update_msi_irq(xc_interface * xch,uint32_t domid,uint32_t gvec,uint32_t pirq,uint32_t gflags,uint64_t gtable)1687 int xc_domain_update_msi_irq(
1688 xc_interface *xch,
1689 uint32_t domid,
1690 uint32_t gvec,
1691 uint32_t pirq,
1692 uint32_t gflags,
1693 uint64_t gtable)
1694 {
1695 int rc;
1696 struct xen_domctl_bind_pt_irq *bind;
1697 struct xen_domctl domctl = {};
1698
1699 domctl.cmd = XEN_DOMCTL_bind_pt_irq;
1700 domctl.domain = domid;
1701
1702 bind = &(domctl.u.bind_pt_irq);
1703 bind->irq_type = PT_IRQ_TYPE_MSI;
1704 bind->machine_irq = pirq;
1705 bind->u.msi.gvec = gvec;
1706 bind->u.msi.gflags = gflags;
1707 bind->u.msi.gtable = gtable;
1708
1709 rc = do_domctl(xch, &domctl);
1710 return rc;
1711 }
1712
xc_domain_unbind_msi_irq(xc_interface * xch,uint32_t domid,uint32_t gvec,uint32_t pirq,uint32_t gflags)1713 int xc_domain_unbind_msi_irq(
1714 xc_interface *xch,
1715 uint32_t domid,
1716 uint32_t gvec,
1717 uint32_t pirq,
1718 uint32_t gflags)
1719 {
1720 int rc;
1721 struct xen_domctl_bind_pt_irq *bind;
1722 struct xen_domctl domctl = {};
1723
1724 domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
1725 domctl.domain = domid;
1726
1727 bind = &(domctl.u.bind_pt_irq);
1728 bind->irq_type = PT_IRQ_TYPE_MSI;
1729 bind->machine_irq = pirq;
1730 bind->u.msi.gvec = gvec;
1731 bind->u.msi.gflags = gflags;
1732
1733 rc = do_domctl(xch, &domctl);
1734 return rc;
1735 }
1736
1737 /* Pass-through: binds machine irq to guests irq */
xc_domain_bind_pt_irq_int(xc_interface * xch,uint32_t domid,uint32_t machine_irq,uint8_t irq_type,uint8_t bus,uint8_t device,uint8_t intx,uint8_t isa_irq,uint16_t spi)1738 static int xc_domain_bind_pt_irq_int(
1739 xc_interface *xch,
1740 uint32_t domid,
1741 uint32_t machine_irq,
1742 uint8_t irq_type,
1743 uint8_t bus,
1744 uint8_t device,
1745 uint8_t intx,
1746 uint8_t isa_irq,
1747 uint16_t spi)
1748 {
1749 int rc;
1750 struct xen_domctl_bind_pt_irq *bind;
1751 struct xen_domctl domctl = {};
1752
1753 domctl.cmd = XEN_DOMCTL_bind_pt_irq;
1754 domctl.domain = domid;
1755
1756 bind = &(domctl.u.bind_pt_irq);
1757 bind->irq_type = irq_type;
1758 bind->machine_irq = machine_irq;
1759 switch ( irq_type )
1760 {
1761 case PT_IRQ_TYPE_PCI:
1762 case PT_IRQ_TYPE_MSI_TRANSLATE:
1763 bind->u.pci.bus = bus;
1764 bind->u.pci.device = device;
1765 bind->u.pci.intx = intx;
1766 break;
1767 case PT_IRQ_TYPE_ISA:
1768 bind->u.isa.isa_irq = isa_irq;
1769 break;
1770 case PT_IRQ_TYPE_SPI:
1771 bind->u.spi.spi = spi;
1772 break;
1773 default:
1774 errno = EINVAL;
1775 return -1;
1776 }
1777
1778 rc = do_domctl(xch, &domctl);
1779 return rc;
1780 }
1781
xc_domain_bind_pt_irq(xc_interface * xch,uint32_t domid,uint8_t machine_irq,uint8_t irq_type,uint8_t bus,uint8_t device,uint8_t intx,uint8_t isa_irq)1782 int xc_domain_bind_pt_irq(
1783 xc_interface *xch,
1784 uint32_t domid,
1785 uint8_t machine_irq,
1786 uint8_t irq_type,
1787 uint8_t bus,
1788 uint8_t device,
1789 uint8_t intx,
1790 uint8_t isa_irq)
1791 {
1792 return xc_domain_bind_pt_irq_int(xch, domid, machine_irq, irq_type,
1793 bus, device, intx, isa_irq, 0);
1794 }
1795
xc_domain_unbind_pt_irq_int(xc_interface * xch,uint32_t domid,uint32_t machine_irq,uint8_t irq_type,uint8_t bus,uint8_t device,uint8_t intx,uint8_t isa_irq,uint8_t spi)1796 static int xc_domain_unbind_pt_irq_int(
1797 xc_interface *xch,
1798 uint32_t domid,
1799 uint32_t machine_irq,
1800 uint8_t irq_type,
1801 uint8_t bus,
1802 uint8_t device,
1803 uint8_t intx,
1804 uint8_t isa_irq,
1805 uint8_t spi)
1806 {
1807 int rc;
1808 struct xen_domctl_bind_pt_irq *bind;
1809 struct xen_domctl domctl = {};
1810
1811 domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
1812 domctl.domain = domid;
1813
1814 bind = &(domctl.u.bind_pt_irq);
1815 bind->irq_type = irq_type;
1816 bind->machine_irq = machine_irq;
1817 switch ( irq_type )
1818 {
1819 case PT_IRQ_TYPE_PCI:
1820 case PT_IRQ_TYPE_MSI_TRANSLATE:
1821 bind->u.pci.bus = bus;
1822 bind->u.pci.device = device;
1823 bind->u.pci.intx = intx;
1824 break;
1825 case PT_IRQ_TYPE_ISA:
1826 bind->u.isa.isa_irq = isa_irq;
1827 break;
1828 case PT_IRQ_TYPE_SPI:
1829 bind->u.spi.spi = spi;
1830 break;
1831 default:
1832 errno = EINVAL;
1833 return -1;
1834 }
1835
1836 rc = do_domctl(xch, &domctl);
1837 return rc;
1838 }
1839
xc_domain_unbind_pt_irq(xc_interface * xch,uint32_t domid,uint8_t machine_irq,uint8_t irq_type,uint8_t bus,uint8_t device,uint8_t intx,uint8_t isa_irq)1840 int xc_domain_unbind_pt_irq(
1841 xc_interface *xch,
1842 uint32_t domid,
1843 uint8_t machine_irq,
1844 uint8_t irq_type,
1845 uint8_t bus,
1846 uint8_t device,
1847 uint8_t intx,
1848 uint8_t isa_irq)
1849 {
1850 return xc_domain_unbind_pt_irq_int(xch, domid, machine_irq, irq_type,
1851 bus, device, intx, isa_irq, 0);
1852 }
1853
xc_domain_bind_pt_pci_irq(xc_interface * xch,uint32_t domid,uint8_t machine_irq,uint8_t bus,uint8_t device,uint8_t intx)1854 int xc_domain_bind_pt_pci_irq(
1855 xc_interface *xch,
1856 uint32_t domid,
1857 uint8_t machine_irq,
1858 uint8_t bus,
1859 uint8_t device,
1860 uint8_t intx)
1861 {
1862
1863 return (xc_domain_bind_pt_irq(xch, domid, machine_irq,
1864 PT_IRQ_TYPE_PCI, bus, device, intx, 0));
1865 }
1866
xc_domain_bind_pt_isa_irq(xc_interface * xch,uint32_t domid,uint8_t machine_irq)1867 int xc_domain_bind_pt_isa_irq(
1868 xc_interface *xch,
1869 uint32_t domid,
1870 uint8_t machine_irq)
1871 {
1872
1873 return (xc_domain_bind_pt_irq(xch, domid, machine_irq,
1874 PT_IRQ_TYPE_ISA, 0, 0, 0, machine_irq));
1875 }
1876
xc_domain_bind_pt_spi_irq(xc_interface * xch,uint32_t domid,uint16_t vspi,uint16_t spi)1877 int xc_domain_bind_pt_spi_irq(
1878 xc_interface *xch,
1879 uint32_t domid,
1880 uint16_t vspi,
1881 uint16_t spi)
1882 {
1883 return (xc_domain_bind_pt_irq_int(xch, domid, vspi,
1884 PT_IRQ_TYPE_SPI, 0, 0, 0, 0, spi));
1885 }
1886
xc_domain_unbind_pt_spi_irq(xc_interface * xch,uint32_t domid,uint16_t vspi,uint16_t spi)1887 int xc_domain_unbind_pt_spi_irq(xc_interface *xch,
1888 uint32_t domid,
1889 uint16_t vspi,
1890 uint16_t spi)
1891 {
1892 return (xc_domain_unbind_pt_irq_int(xch, domid, vspi,
1893 PT_IRQ_TYPE_SPI, 0, 0, 0, 0, spi));
1894 }
1895
xc_domain_memory_mapping(xc_interface * xch,uint32_t domid,unsigned long first_gfn,unsigned long first_mfn,unsigned long nr_mfns,uint32_t add_mapping)1896 int xc_domain_memory_mapping(
1897 xc_interface *xch,
1898 uint32_t domid,
1899 unsigned long first_gfn,
1900 unsigned long first_mfn,
1901 unsigned long nr_mfns,
1902 uint32_t add_mapping)
1903 {
1904 struct xen_domctl domctl = {};
1905 xc_domaininfo_t info;
1906 int ret = 0, rc;
1907 unsigned long done = 0, nr, max_batch_sz;
1908
1909 if ( xc_domain_getinfo_single(xch, domid, &info) < 0 )
1910 {
1911 PERROR("Could not get info for dom%u", domid);
1912 return -1;
1913 }
1914 if ( !xc_core_arch_auto_translated_physmap(&info) )
1915 return 0;
1916
1917 if ( !nr_mfns )
1918 return 0;
1919
1920 domctl.cmd = XEN_DOMCTL_memory_mapping;
1921 domctl.domain = domid;
1922 domctl.u.memory_mapping.add_mapping = add_mapping;
1923 max_batch_sz = nr_mfns;
1924 do
1925 {
1926 nr = min_t(unsigned long, nr_mfns - done, max_batch_sz);
1927 domctl.u.memory_mapping.nr_mfns = nr;
1928 domctl.u.memory_mapping.first_gfn = first_gfn + done;
1929 domctl.u.memory_mapping.first_mfn = first_mfn + done;
1930 rc = do_domctl(xch, &domctl);
1931 if ( rc < 0 && errno == E2BIG )
1932 {
1933 if ( max_batch_sz <= 1 )
1934 break;
1935 max_batch_sz >>= 1;
1936 continue;
1937 }
1938 if ( rc > 0 )
1939 {
1940 done += rc;
1941 continue;
1942 }
1943 /* Save the first error... */
1944 if ( !ret )
1945 ret = rc;
1946 /* .. and ignore the rest of them when removing. */
1947 if ( rc && add_mapping != DPCI_REMOVE_MAPPING )
1948 break;
1949
1950 done += nr;
1951 } while ( done < nr_mfns );
1952
1953 /*
1954 * Undo what we have done unless unmapping, by unmapping the entire region.
1955 * Errors here are ignored.
1956 */
1957 if ( ret && add_mapping != DPCI_REMOVE_MAPPING )
1958 xc_domain_memory_mapping(xch, domid, first_gfn, first_mfn, nr_mfns,
1959 DPCI_REMOVE_MAPPING);
1960
1961 /* We might get E2BIG so many times that we never advance. */
1962 if ( !done && !ret )
1963 ret = -1;
1964
1965 return ret;
1966 }
1967
xc_domain_ioport_mapping(xc_interface * xch,uint32_t domid,uint32_t first_gport,uint32_t first_mport,uint32_t nr_ports,uint32_t add_mapping)1968 int xc_domain_ioport_mapping(
1969 xc_interface *xch,
1970 uint32_t domid,
1971 uint32_t first_gport,
1972 uint32_t first_mport,
1973 uint32_t nr_ports,
1974 uint32_t add_mapping)
1975 {
1976 struct xen_domctl domctl = {};
1977
1978 domctl.cmd = XEN_DOMCTL_ioport_mapping;
1979 domctl.domain = domid;
1980 domctl.u.ioport_mapping.first_gport = first_gport;
1981 domctl.u.ioport_mapping.first_mport = first_mport;
1982 domctl.u.ioport_mapping.nr_ports = nr_ports;
1983 domctl.u.ioport_mapping.add_mapping = add_mapping;
1984
1985 return do_domctl(xch, &domctl);
1986 }
1987
xc_domain_set_target(xc_interface * xch,uint32_t domid,uint32_t target)1988 int xc_domain_set_target(
1989 xc_interface *xch,
1990 uint32_t domid,
1991 uint32_t target)
1992 {
1993 struct xen_domctl domctl = {};
1994
1995 domctl.cmd = XEN_DOMCTL_set_target;
1996 domctl.domain = domid;
1997 domctl.u.set_target.target = target;
1998
1999 return do_domctl(xch, &domctl);
2000 }
2001
xc_domain_subscribe_for_suspend(xc_interface * xch,uint32_t dom,evtchn_port_t port)2002 int xc_domain_subscribe_for_suspend(
2003 xc_interface *xch, uint32_t dom, evtchn_port_t port)
2004 {
2005 struct xen_domctl domctl = {};
2006
2007 domctl.cmd = XEN_DOMCTL_subscribe;
2008 domctl.domain = dom;
2009 domctl.u.subscribe.port = port;
2010
2011 return do_domctl(xch, &domctl);
2012 }
2013
xc_domain_debug_control(xc_interface * xc,uint32_t domid,uint32_t sop,uint32_t vcpu)2014 int xc_domain_debug_control(xc_interface *xc, uint32_t domid, uint32_t sop, uint32_t vcpu)
2015 {
2016 struct xen_domctl domctl = {};
2017
2018 memset(&domctl, 0, sizeof(domctl));
2019 domctl.domain = domid;
2020 domctl.cmd = XEN_DOMCTL_debug_op;
2021 domctl.u.debug_op.op = sop;
2022 domctl.u.debug_op.vcpu = vcpu;
2023
2024 return do_domctl(xc, &domctl);
2025 }
2026
xc_domain_p2m_audit(xc_interface * xch,uint32_t domid,uint64_t * orphans,uint64_t * m2p_bad,uint64_t * p2m_bad)2027 int xc_domain_p2m_audit(xc_interface *xch,
2028 uint32_t domid,
2029 uint64_t *orphans,
2030 uint64_t *m2p_bad,
2031 uint64_t *p2m_bad)
2032 {
2033 struct xen_domctl domctl = {};
2034 int rc;
2035
2036 domctl.cmd = XEN_DOMCTL_audit_p2m;
2037 domctl.domain = domid;
2038 rc = do_domctl(xch, &domctl);
2039
2040 *orphans = domctl.u.audit_p2m.orphans;
2041 *m2p_bad = domctl.u.audit_p2m.m2p_bad;
2042 *p2m_bad = domctl.u.audit_p2m.p2m_bad;
2043
2044 return rc;
2045 }
2046
xc_domain_set_access_required(xc_interface * xch,uint32_t domid,unsigned int required)2047 int xc_domain_set_access_required(xc_interface *xch,
2048 uint32_t domid,
2049 unsigned int required)
2050 {
2051 struct xen_domctl domctl = {};
2052
2053 domctl.cmd = XEN_DOMCTL_set_access_required;
2054 domctl.domain = domid;
2055 domctl.u.access_required.access_required = required;
2056 return do_domctl(xch, &domctl);
2057 }
2058
xc_domain_set_virq_handler(xc_interface * xch,uint32_t domid,int virq)2059 int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq)
2060 {
2061 struct xen_domctl domctl = {};
2062
2063 domctl.cmd = XEN_DOMCTL_set_virq_handler;
2064 domctl.domain = domid;
2065 domctl.u.set_virq_handler.virq = virq;
2066 return do_domctl(xch, &domctl);
2067 }
2068
2069 /* Plumbing Xen with vNUMA topology */
xc_domain_setvnuma(xc_interface * xch,uint32_t domid,uint32_t nr_vnodes,uint32_t nr_vmemranges,uint32_t nr_vcpus,xen_vmemrange_t * vmemrange,unsigned int * vdistance,unsigned int * vcpu_to_vnode,unsigned int * vnode_to_pnode)2070 int xc_domain_setvnuma(xc_interface *xch,
2071 uint32_t domid,
2072 uint32_t nr_vnodes,
2073 uint32_t nr_vmemranges,
2074 uint32_t nr_vcpus,
2075 xen_vmemrange_t *vmemrange,
2076 unsigned int *vdistance,
2077 unsigned int *vcpu_to_vnode,
2078 unsigned int *vnode_to_pnode)
2079 {
2080 int rc;
2081 struct xen_domctl domctl = {};
2082 DECLARE_HYPERCALL_BOUNCE(vmemrange, sizeof(*vmemrange) * nr_vmemranges,
2083 XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
2084 DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) *
2085 nr_vnodes * nr_vnodes,
2086 XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
2087 DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus,
2088 XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
2089 DECLARE_HYPERCALL_BOUNCE(vnode_to_pnode, sizeof(*vnode_to_pnode) *
2090 nr_vnodes,
2091 XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
2092 errno = EINVAL;
2093
2094 if ( nr_vnodes == 0 || nr_vmemranges == 0 || nr_vcpus == 0 )
2095 return -1;
2096
2097 if ( !vdistance || !vcpu_to_vnode || !vmemrange || !vnode_to_pnode )
2098 {
2099 PERROR("%s: Cant set vnuma without initializing topology", __func__);
2100 return -1;
2101 }
2102
2103 if ( xc_hypercall_bounce_pre(xch, vmemrange) ||
2104 xc_hypercall_bounce_pre(xch, vdistance) ||
2105 xc_hypercall_bounce_pre(xch, vcpu_to_vnode) ||
2106 xc_hypercall_bounce_pre(xch, vnode_to_pnode) )
2107 {
2108 rc = -1;
2109 goto vnumaset_fail;
2110
2111 }
2112
2113 set_xen_guest_handle(domctl.u.vnuma.vmemrange, vmemrange);
2114 set_xen_guest_handle(domctl.u.vnuma.vdistance, vdistance);
2115 set_xen_guest_handle(domctl.u.vnuma.vcpu_to_vnode, vcpu_to_vnode);
2116 set_xen_guest_handle(domctl.u.vnuma.vnode_to_pnode, vnode_to_pnode);
2117
2118 domctl.cmd = XEN_DOMCTL_setvnumainfo;
2119 domctl.domain = domid;
2120 domctl.u.vnuma.nr_vnodes = nr_vnodes;
2121 domctl.u.vnuma.nr_vmemranges = nr_vmemranges;
2122 domctl.u.vnuma.nr_vcpus = nr_vcpus;
2123 domctl.u.vnuma.pad = 0;
2124
2125 rc = do_domctl(xch, &domctl);
2126
2127 vnumaset_fail:
2128 xc_hypercall_bounce_post(xch, vmemrange);
2129 xc_hypercall_bounce_post(xch, vdistance);
2130 xc_hypercall_bounce_post(xch, vcpu_to_vnode);
2131 xc_hypercall_bounce_post(xch, vnode_to_pnode);
2132
2133 return rc;
2134 }
2135
xc_domain_getvnuma(xc_interface * xch,uint32_t domid,uint32_t * nr_vnodes,uint32_t * nr_vmemranges,uint32_t * nr_vcpus,xen_vmemrange_t * vmemrange,unsigned int * vdistance,unsigned int * vcpu_to_vnode)2136 int xc_domain_getvnuma(xc_interface *xch,
2137 uint32_t domid,
2138 uint32_t *nr_vnodes,
2139 uint32_t *nr_vmemranges,
2140 uint32_t *nr_vcpus,
2141 xen_vmemrange_t *vmemrange,
2142 unsigned int *vdistance,
2143 unsigned int *vcpu_to_vnode)
2144 {
2145 int rc;
2146 DECLARE_HYPERCALL_BOUNCE(vmemrange, sizeof(*vmemrange) * *nr_vmemranges,
2147 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
2148 DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) *
2149 *nr_vnodes * *nr_vnodes,
2150 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
2151 DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * *nr_vcpus,
2152 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
2153
2154 struct xen_vnuma_topology_info vnuma_topo;
2155
2156 if ( xc_hypercall_bounce_pre(xch, vmemrange) ||
2157 xc_hypercall_bounce_pre(xch, vdistance) ||
2158 xc_hypercall_bounce_pre(xch, vcpu_to_vnode) )
2159 {
2160 rc = -1;
2161 errno = ENOMEM;
2162 goto vnumaget_fail;
2163 }
2164
2165 set_xen_guest_handle(vnuma_topo.vmemrange.h, vmemrange);
2166 set_xen_guest_handle(vnuma_topo.vdistance.h, vdistance);
2167 set_xen_guest_handle(vnuma_topo.vcpu_to_vnode.h, vcpu_to_vnode);
2168
2169 vnuma_topo.nr_vnodes = *nr_vnodes;
2170 vnuma_topo.nr_vcpus = *nr_vcpus;
2171 vnuma_topo.nr_vmemranges = *nr_vmemranges;
2172 vnuma_topo.domid = domid;
2173 vnuma_topo.pad = 0;
2174
2175 rc = xc_memory_op(xch, XENMEM_get_vnumainfo, &vnuma_topo,
2176 sizeof(vnuma_topo));
2177
2178 *nr_vnodes = vnuma_topo.nr_vnodes;
2179 *nr_vcpus = vnuma_topo.nr_vcpus;
2180 *nr_vmemranges = vnuma_topo.nr_vmemranges;
2181
2182 vnumaget_fail:
2183 xc_hypercall_bounce_post(xch, vmemrange);
2184 xc_hypercall_bounce_post(xch, vdistance);
2185 xc_hypercall_bounce_post(xch, vcpu_to_vnode);
2186
2187 return rc;
2188 }
2189
xc_domain_soft_reset(xc_interface * xch,uint32_t domid)2190 int xc_domain_soft_reset(xc_interface *xch,
2191 uint32_t domid)
2192 {
2193 struct xen_domctl domctl = {};
2194 domctl.cmd = XEN_DOMCTL_soft_reset;
2195 domctl.domain = domid;
2196 return do_domctl(xch, &domctl);
2197 }
2198
xc_domain_set_llc_colors(xc_interface * xch,uint32_t domid,const uint32_t * llc_colors,uint32_t num_llc_colors)2199 int xc_domain_set_llc_colors(xc_interface *xch, uint32_t domid,
2200 const uint32_t *llc_colors,
2201 uint32_t num_llc_colors)
2202 {
2203 struct xen_domctl domctl = {};
2204 DECLARE_HYPERCALL_BUFFER(uint32_t, local);
2205 int ret = -1;
2206
2207 if ( num_llc_colors )
2208 {
2209 size_t bytes = sizeof(uint32_t) * num_llc_colors;
2210
2211 local = xc_hypercall_buffer_alloc(xch, local, bytes);
2212 if ( local == NULL )
2213 {
2214 PERROR("Could not allocate LLC colors for set_llc_colors");
2215 goto out;
2216 }
2217 memcpy(local, llc_colors, bytes);
2218 set_xen_guest_handle(domctl.u.set_llc_colors.llc_colors, local);
2219 }
2220
2221 domctl.cmd = XEN_DOMCTL_set_llc_colors;
2222 domctl.domain = domid;
2223 domctl.u.set_llc_colors.num_llc_colors = num_llc_colors;
2224
2225 ret = do_domctl(xch, &domctl);
2226
2227 out:
2228 xc_hypercall_buffer_free(xch, local);
2229
2230 return ret;
2231 }
2232 /*
2233 * Local variables:
2234 * mode: C
2235 * c-file-style: "BSD"
2236 * c-basic-offset: 4
2237 * tab-width: 4
2238 * indent-tabs-mode: nil
2239 * End:
2240 */
2241