1 /*
2 * Copyright (C) 2006-2007 XenSource Ltd.
3 * Copyright (C) 2008 Citrix Ltd.
4 * Author Vincent Hanquez <vincent.hanquez@eu.citrix.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published
8 * by the Free Software Foundation; version 2.1 only. with the special
9 * exception on linking described in file LICENSE.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 */
16
17 #define _XOPEN_SOURCE 600
18 #include <stdlib.h>
19 #include <errno.h>
20
21 #define CAML_NAME_SPACE
22 #include <caml/alloc.h>
23 #include <caml/memory.h>
24 #include <caml/signals.h>
25 #include <caml/fail.h>
26 #include <caml/callback.h>
27
28 #include <sys/mman.h>
29 #include <stdint.h>
30 #include <string.h>
31 #include <inttypes.h>
32
33 #define XC_WANT_COMPAT_MAP_FOREIGN_API
34 #include <xenctrl.h>
35 #include <xenguest.h>
36 #include <xen-tools/common-macros.h>
37
38 #include "mmap_stubs.h"
39
40 #ifndef Val_none
41 #define Val_none (Val_int(0))
42 #endif
43
44 #ifndef Tag_some
45 #define Tag_some 0
46 #endif
47
xch_of_val(value v)48 static inline xc_interface *xch_of_val(value v)
49 {
50 xc_interface *xch = *(xc_interface **)Data_custom_val(v);
51
52 return xch;
53 }
54
stub_xenctrl_finalize(value v)55 static void stub_xenctrl_finalize(value v)
56 {
57 xc_interface *xch = xch_of_val(v);
58
59 xc_interface_close(xch);
60 }
61
62 static struct custom_operations xenctrl_ops = {
63 .identifier = "xenctrl",
64 .finalize = stub_xenctrl_finalize,
65 .compare = custom_compare_default, /* Can't compare */
66 .hash = custom_hash_default, /* Can't hash */
67 .serialize = custom_serialize_default, /* Can't serialize */
68 .deserialize = custom_deserialize_default, /* Can't deserialize */
69 .compare_ext = custom_compare_ext_default, /* Can't compare */
70 };
71
72 #define string_of_option_array(array, index) \
73 ((Field(array, index) == Val_none) ? NULL : String_val(Field(Field(array, index), 0)))
74
failwith_xc(xc_interface * xch)75 static void Noreturn failwith_xc(xc_interface *xch)
76 {
77 char error_str[XC_MAX_ERROR_MSG_LEN + 6];
78 if (xch) {
79 const xc_error *error = xc_get_last_error(xch);
80 if (error->code == XC_ERROR_NONE)
81 snprintf(error_str, sizeof(error_str),
82 "%d: %s", errno, strerror(errno));
83 else
84 snprintf(error_str, sizeof(error_str),
85 "%d: %s: %s", error->code,
86 xc_error_code_to_desc(error->code),
87 error->message);
88 } else {
89 snprintf(error_str, sizeof(error_str),
90 "Unable to open XC interface");
91 }
92 caml_raise_with_string(*caml_named_value("xc.error"), error_str);
93 }
94
stub_xc_interface_open(value unit)95 CAMLprim value stub_xc_interface_open(value unit)
96 {
97 CAMLparam1(unit);
98 CAMLlocal1(result);
99 xc_interface *xch;
100
101 result = caml_alloc_custom(&xenctrl_ops, sizeof(xch), 0, 1);
102
103 caml_enter_blocking_section();
104 xch = xc_interface_open(NULL, NULL, 0);
105 caml_leave_blocking_section();
106
107 if ( !xch )
108 failwith_xc(xch);
109
110 *(xc_interface **)Data_custom_val(result) = xch;
111
112 CAMLreturn(result);
113 }
114
domain_handle_of_uuid_string(xen_domain_handle_t h,const char * uuid)115 static void domain_handle_of_uuid_string(xen_domain_handle_t h,
116 const char *uuid)
117 {
118 #define X "%02"SCNx8
119 #define UUID_FMT (X X X X "-" X X "-" X X "-" X X "-" X X X X X X)
120
121 if ( sscanf(uuid, UUID_FMT, &h[0], &h[1], &h[2], &h[3], &h[4],
122 &h[5], &h[6], &h[7], &h[8], &h[9], &h[10], &h[11],
123 &h[12], &h[13], &h[14], &h[15]) != 16 )
124 {
125 char buf[128];
126
127 snprintf(buf, sizeof(buf),
128 "Xc.int_array_of_uuid_string: %s", uuid);
129
130 caml_invalid_argument(buf);
131 }
132
133 #undef X
134 }
135
136 /*
137 * Various fields which are a bitmap in the C ABI are converted to lists of
138 * integers in the Ocaml ABI for more idiomatic handling.
139 */
c_bitmap_to_ocaml_list(unsigned int bitmap)140 static value c_bitmap_to_ocaml_list
141 /* ! */
142 /*
143 * All calls to this function must be in a form suitable
144 * for xenctrl_abi_check. The parsing there is ad-hoc.
145 */
146 (unsigned int bitmap)
147 {
148 CAMLparam0();
149 CAMLlocal2(list, tmp);
150
151 #if defined(__i386__) || defined(__x86_64__)
152 /*
153 * This check file contains a mixture of stuff, because it is
154 * generated from the whole of this xenctrl_stubs.c file (without
155 * regard to arch ifdefs) and the whole of xenctrl.ml (which does not
156 * have any arch ifdeffery). Currently, there is only x86 and
157 * arch-independent stuff, and there is no facility in the abi-check
158 * script for arch conditionals. So for now we make the checks
159 * effective on x86 only; this will suffice to defend even ARM
160 * because breaking changes to common code will break the build
161 * on x86 and not make it to master. This is a bit of a bodge.
162 */
163 #include "xenctrl_abi_check.h"
164 #endif
165
166 list = tmp = Val_emptylist;
167
168 for ( unsigned int i = 0; bitmap; i++, bitmap >>= 1 )
169 {
170 if ( !(bitmap & 1) )
171 continue;
172
173 tmp = caml_alloc_small(2, Tag_cons);
174 Field(tmp, 0) = Val_int(i);
175 Field(tmp, 1) = list;
176 list = tmp;
177 }
178
179 CAMLreturn(list);
180 }
181
ocaml_list_to_c_bitmap(value l)182 static unsigned int ocaml_list_to_c_bitmap(value l)
183 /* ! */
184 /*
185 * All calls to this function must be in a form suitable
186 * for xenctrl_abi_check. The parsing there is ad-hoc.
187 */
188 {
189 unsigned int val = 0;
190
191 for ( ; l != Val_none; l = Field(l, 1) )
192 val |= 1u << Int_val(Field(l, 0));
193
194 return val;
195 }
196
stub_xc_domain_create(value xch_val,value wanted_domid,value config)197 CAMLprim value stub_xc_domain_create(value xch_val, value wanted_domid, value config)
198 {
199 CAMLparam3(xch_val, wanted_domid, config);
200 CAMLlocal2(l, arch_domconfig);
201 xc_interface *xch = xch_of_val(xch_val);
202
203 /* Mnemonics for the named fields inside domctl_create_config */
204 #define VAL_SSIDREF Field(config, 0)
205 #define VAL_HANDLE Field(config, 1)
206 #define VAL_FLAGS Field(config, 2)
207 #define VAL_IOMMU_OPTS Field(config, 3)
208 #define VAL_MAX_VCPUS Field(config, 4)
209 #define VAL_MAX_EVTCHN_PORT Field(config, 5)
210 #define VAL_MAX_GRANT_FRAMES Field(config, 6)
211 #define VAL_MAX_MAPTRACK_FRAMES Field(config, 7)
212 #define VAL_MAX_GRANT_VERSION Field(config, 8)
213 #define VAL_VMTRACE_BUF_KB Field(config, 9)
214 #define VAL_CPUPOOL_ID Field(config, 10)
215 #define VAL_ARCH Field(config, 11)
216
217 uint32_t domid = Int_val(wanted_domid);
218 uint64_t vmtrace_size = Int32_val(VAL_VMTRACE_BUF_KB);
219
220 vmtrace_size = ROUNDUP(vmtrace_size << 10, XC_PAGE_SHIFT);
221 if ( vmtrace_size != (uint32_t)vmtrace_size )
222 caml_invalid_argument("vmtrace_buf_kb");
223
224 int result;
225 struct xen_domctl_createdomain cfg = {
226 .ssidref = Int32_val(VAL_SSIDREF),
227 .max_vcpus = Int_val(VAL_MAX_VCPUS),
228 .max_evtchn_port = Int_val(VAL_MAX_EVTCHN_PORT),
229 .max_grant_frames = Int_val(VAL_MAX_GRANT_FRAMES),
230 .max_maptrack_frames = Int_val(VAL_MAX_MAPTRACK_FRAMES),
231 .grant_opts =
232 XEN_DOMCTL_GRANT_version(Int_val(VAL_MAX_GRANT_VERSION)),
233 .vmtrace_size = vmtrace_size,
234 .cpupool_id = Int32_val(VAL_CPUPOOL_ID),
235 };
236
237 domain_handle_of_uuid_string(cfg.handle, String_val(VAL_HANDLE));
238
239 cfg.flags = ocaml_list_to_c_bitmap
240 /* ! domain_create_flag CDF_ lc */
241 /* ! XEN_DOMCTL_CDF_ XEN_DOMCTL_CDF_MAX max */
242 (VAL_FLAGS);
243
244 cfg.iommu_opts = ocaml_list_to_c_bitmap
245 /* ! domain_create_iommu_opts IOMMU_ lc */
246 /* ! XEN_DOMCTL_IOMMU_ XEN_DOMCTL_IOMMU_MAX max */
247 (VAL_IOMMU_OPTS);
248
249 arch_domconfig = Field(VAL_ARCH, 0);
250 switch ( Tag_val(VAL_ARCH) )
251 {
252 case 0: /* ARM - nothing to do */
253 caml_failwith("Unhandled: ARM");
254 break;
255
256 case 1: /* X86 - emulation flags in the block */
257 #if defined(__i386__) || defined(__x86_64__)
258
259 /* Quick & dirty check for ABI changes. */
260 BUILD_BUG_ON(sizeof(cfg) != 68);
261
262 /* Mnemonics for the named fields inside xen_x86_arch_domainconfig */
263 #define VAL_EMUL_FLAGS Field(arch_domconfig, 0)
264 #define VAL_MISC_FLAGS Field(arch_domconfig, 1)
265
266 cfg.arch.emulation_flags = ocaml_list_to_c_bitmap
267 /* ! x86_arch_emulation_flags X86_EMU_ none */
268 /* ! XEN_X86_EMU_ XEN_X86_EMU_ALL all */
269 (VAL_EMUL_FLAGS);
270
271 cfg.arch.misc_flags = ocaml_list_to_c_bitmap
272 /* ! x86_arch_misc_flags X86_ none */
273 /* ! XEN_X86_ XEN_X86_MSR_RELAXED all */
274 (VAL_MISC_FLAGS);
275
276 #undef VAL_MISC_FLAGS
277 #undef VAL_EMUL_FLAGS
278
279 #else
280 caml_failwith("Unhandled: x86");
281 #endif
282 break;
283
284 default:
285 caml_failwith("Unhandled domconfig type");
286 }
287
288 #undef VAL_ARCH
289 #undef VAL_CPUPOOL_ID
290 #undef VAL_VMTRACE_BUF_KB
291 #undef VAL_MAX_GRANT_VERSION
292 #undef VAL_MAX_MAPTRACK_FRAMES
293 #undef VAL_MAX_GRANT_FRAMES
294 #undef VAL_MAX_EVTCHN_PORT
295 #undef VAL_MAX_VCPUS
296 #undef VAL_IOMMU_OPTS
297 #undef VAL_FLAGS
298 #undef VAL_HANDLE
299 #undef VAL_SSIDREF
300
301 caml_enter_blocking_section();
302 result = xc_domain_create(xch, &domid, &cfg);
303 caml_leave_blocking_section();
304
305 if (result < 0)
306 failwith_xc(xch);
307
308 CAMLreturn(Val_int(domid));
309 }
310
stub_xc_domain_max_vcpus(value xch_val,value domid,value max_vcpus)311 CAMLprim value stub_xc_domain_max_vcpus(value xch_val, value domid,
312 value max_vcpus)
313 {
314 CAMLparam3(xch_val, domid, max_vcpus);
315 xc_interface *xch = xch_of_val(xch_val);
316 int r;
317
318 r = xc_domain_max_vcpus(xch, Int_val(domid), Int_val(max_vcpus));
319 if (r)
320 failwith_xc(xch);
321
322 CAMLreturn(Val_unit);
323 }
324
325
stub_xc_domain_sethandle(value xch_val,value domid,value handle)326 value stub_xc_domain_sethandle(value xch_val, value domid, value handle)
327 {
328 CAMLparam3(xch_val, domid, handle);
329 xc_interface *xch = xch_of_val(xch_val);
330 xen_domain_handle_t h;
331 int i;
332
333 domain_handle_of_uuid_string(h, String_val(handle));
334
335 i = xc_domain_sethandle(xch, Int_val(domid), h);
336 if (i)
337 failwith_xc(xch);
338
339 CAMLreturn(Val_unit);
340 }
341
dom_op(value xch_val,value domid,int (* fn)(xc_interface *,uint32_t))342 static value dom_op(value xch_val, value domid,
343 int (*fn)(xc_interface *, uint32_t))
344 {
345 CAMLparam2(xch_val, domid);
346 xc_interface *xch = xch_of_val(xch_val);
347 int result;
348
349 uint32_t c_domid = Int_val(domid);
350
351 caml_enter_blocking_section();
352 result = fn(xch, c_domid);
353 caml_leave_blocking_section();
354 if (result)
355 failwith_xc(xch);
356 CAMLreturn(Val_unit);
357 }
358
stub_xc_domain_pause(value xch_val,value domid)359 CAMLprim value stub_xc_domain_pause(value xch_val, value domid)
360 {
361 return dom_op(xch_val, domid, xc_domain_pause);
362 }
363
364
stub_xc_domain_unpause(value xch_val,value domid)365 CAMLprim value stub_xc_domain_unpause(value xch_val, value domid)
366 {
367 return dom_op(xch_val, domid, xc_domain_unpause);
368 }
369
stub_xc_domain_destroy(value xch_val,value domid)370 CAMLprim value stub_xc_domain_destroy(value xch_val, value domid)
371 {
372 return dom_op(xch_val, domid, xc_domain_destroy);
373 }
374
stub_xc_domain_resume_fast(value xch_val,value domid)375 CAMLprim value stub_xc_domain_resume_fast(value xch_val, value domid)
376 {
377 CAMLparam2(xch_val, domid);
378 xc_interface *xch = xch_of_val(xch_val);
379 int result;
380
381 uint32_t c_domid = Int_val(domid);
382
383 caml_enter_blocking_section();
384 result = xc_domain_resume(xch, c_domid, 1);
385 caml_leave_blocking_section();
386 if (result)
387 failwith_xc(xch);
388 CAMLreturn(Val_unit);
389 }
390
stub_xc_domain_shutdown(value xch_val,value domid,value reason)391 CAMLprim value stub_xc_domain_shutdown(value xch_val, value domid, value reason)
392 {
393 CAMLparam3(xch_val, domid, reason);
394 xc_interface *xch = xch_of_val(xch_val);
395 int ret;
396
397 ret = xc_domain_shutdown(xch, Int_val(domid), Int_val(reason));
398 if (ret < 0)
399 failwith_xc(xch);
400
401 CAMLreturn(Val_unit);
402 }
403
alloc_domaininfo(xc_domaininfo_t * info)404 static value alloc_domaininfo(xc_domaininfo_t * info)
405 {
406 CAMLparam0();
407 CAMLlocal5(result, tmp, arch_config, x86_arch_config, emul_list);
408 int i;
409
410 result = caml_alloc_tuple(17);
411
412 Store_field(result, 0, Val_int(info->domain));
413 Store_field(result, 1, Val_bool(info->flags & XEN_DOMINF_dying));
414 Store_field(result, 2, Val_bool(info->flags & XEN_DOMINF_shutdown));
415 Store_field(result, 3, Val_bool(info->flags & XEN_DOMINF_paused));
416 Store_field(result, 4, Val_bool(info->flags & XEN_DOMINF_blocked));
417 Store_field(result, 5, Val_bool(info->flags & XEN_DOMINF_running));
418 Store_field(result, 6, Val_bool(info->flags & XEN_DOMINF_hvm_guest));
419 Store_field(result, 7, Val_int((info->flags >> XEN_DOMINF_shutdownshift)
420 & XEN_DOMINF_shutdownmask));
421 Store_field(result, 8, caml_copy_nativeint(info->tot_pages));
422 Store_field(result, 9, caml_copy_nativeint(info->max_pages));
423 Store_field(result, 10, caml_copy_int64(info->shared_info_frame));
424 Store_field(result, 11, caml_copy_int64(info->cpu_time));
425 Store_field(result, 12, Val_int(info->nr_online_vcpus));
426 Store_field(result, 13, Val_int(info->max_vcpu_id));
427 Store_field(result, 14, caml_copy_int32(info->ssidref));
428
429 tmp = caml_alloc_small(16, 0);
430 for (i = 0; i < 16; i++) {
431 Field(tmp, i) = Val_int(info->handle[i]);
432 }
433
434 Store_field(result, 15, tmp);
435
436 #if defined(__i386__) || defined(__x86_64__)
437 /*
438 * emulation_flags: x86_arch_emulation_flags list;
439 */
440 emul_list = c_bitmap_to_ocaml_list
441 /* ! x86_arch_emulation_flags */
442 (info->arch_config.emulation_flags);
443
444 /* xen_x86_arch_domainconfig */
445 x86_arch_config = caml_alloc_tuple(1);
446 Store_field(x86_arch_config, 0, emul_list);
447
448 /* arch_config: arch_domainconfig */
449 arch_config = caml_alloc_small(1, 1);
450
451 Store_field(arch_config, 0, x86_arch_config);
452
453 Store_field(result, 16, arch_config);
454 #endif
455
456 CAMLreturn(result);
457 }
458
stub_xc_domain_getinfolist(value xch_val,value first_domain,value nb)459 CAMLprim value stub_xc_domain_getinfolist(value xch_val, value first_domain, value nb)
460 {
461 CAMLparam3(xch_val, first_domain, nb);
462 CAMLlocal2(result, temp);
463 xc_interface *xch = xch_of_val(xch_val);
464 xc_domaininfo_t * info;
465 int i, ret, toalloc, retval;
466 unsigned int c_max_domains;
467 uint32_t c_first_domain;
468
469 /* get the minimum number of allocate byte we need and bump it up to page boundary */
470 toalloc = (sizeof(xc_domaininfo_t) * Int_val(nb)) | 0xfff;
471 ret = posix_memalign((void **) ((void *) &info), 4096, toalloc);
472 if (ret)
473 caml_raise_out_of_memory();
474
475 result = temp = Val_emptylist;
476
477 c_first_domain = Int_val(first_domain);
478 c_max_domains = Int_val(nb);
479 caml_enter_blocking_section();
480 retval = xc_domain_getinfolist(xch, c_first_domain,
481 c_max_domains, info);
482 caml_leave_blocking_section();
483
484 if (retval < 0) {
485 free(info);
486 failwith_xc(xch);
487 }
488 for (i = 0; i < retval; i++) {
489 result = caml_alloc_small(2, Tag_cons);
490 Field(result, 0) = Val_int(0);
491 Field(result, 1) = temp;
492 temp = result;
493
494 Store_field(result, 0, alloc_domaininfo(info + i));
495 }
496
497 free(info);
498 CAMLreturn(result);
499 }
500
stub_xc_domain_getinfo(value xch_val,value domid)501 CAMLprim value stub_xc_domain_getinfo(value xch_val, value domid)
502 {
503 CAMLparam2(xch_val, domid);
504 CAMLlocal1(result);
505 xc_interface *xch = xch_of_val(xch_val);
506 xc_domaininfo_t info;
507 int ret;
508
509 ret = xc_domain_getinfo_single(xch, Int_val(domid), &info);
510 if (ret < 0)
511 failwith_xc(xch);
512
513 result = alloc_domaininfo(&info);
514 CAMLreturn(result);
515 }
516
stub_xc_vcpu_getinfo(value xch_val,value domid,value vcpu)517 CAMLprim value stub_xc_vcpu_getinfo(value xch_val, value domid, value vcpu)
518 {
519 CAMLparam3(xch_val, domid, vcpu);
520 CAMLlocal1(result);
521 xc_interface *xch = xch_of_val(xch_val);
522 xc_vcpuinfo_t info;
523 int retval;
524
525 uint32_t c_domid = Int_val(domid);
526 uint32_t c_vcpu = Int_val(vcpu);
527 caml_enter_blocking_section();
528 retval = xc_vcpu_getinfo(xch, c_domid, c_vcpu, &info);
529 caml_leave_blocking_section();
530 if (retval < 0)
531 failwith_xc(xch);
532
533 result = caml_alloc_tuple(5);
534 Store_field(result, 0, Val_bool(info.online));
535 Store_field(result, 1, Val_bool(info.blocked));
536 Store_field(result, 2, Val_bool(info.running));
537 Store_field(result, 3, caml_copy_int64(info.cpu_time));
538 Store_field(result, 4, caml_copy_int32(info.cpu));
539
540 CAMLreturn(result);
541 }
542
stub_xc_vcpu_context_get(value xch_val,value domid,value cpu)543 CAMLprim value stub_xc_vcpu_context_get(value xch_val, value domid,
544 value cpu)
545 {
546 CAMLparam3(xch_val, domid, cpu);
547 xc_interface *xch = xch_of_val(xch_val);
548 CAMLlocal1(context);
549 int ret;
550 vcpu_guest_context_any_t ctxt;
551
552 ret = xc_vcpu_getcontext(xch, Int_val(domid), Int_val(cpu), &ctxt);
553 if ( ret < 0 )
554 failwith_xc(xch);
555
556 context = caml_alloc_string(sizeof(ctxt));
557 memcpy((char *) String_val(context), &ctxt.c, sizeof(ctxt.c));
558
559 CAMLreturn(context);
560 }
561
get_cpumap_len(xc_interface * xch,value cpumap)562 static int get_cpumap_len(xc_interface *xch, value cpumap)
563 {
564 int ml_len = Wosize_val(cpumap);
565 int xc_len = xc_get_max_cpus(xch);
566
567 if (ml_len < xc_len)
568 return ml_len;
569 else
570 return xc_len;
571 }
572
stub_xc_vcpu_setaffinity(value xch_val,value domid,value vcpu,value cpumap)573 CAMLprim value stub_xc_vcpu_setaffinity(value xch_val, value domid,
574 value vcpu, value cpumap)
575 {
576 CAMLparam4(xch_val, domid, vcpu, cpumap);
577 xc_interface *xch = xch_of_val(xch_val);
578 int i, len = get_cpumap_len(xch, cpumap);
579 xc_cpumap_t c_cpumap;
580 int retval;
581
582 c_cpumap = xc_cpumap_alloc(xch);
583 if (c_cpumap == NULL)
584 failwith_xc(xch);
585
586 for (i=0; i<len; i++) {
587 if (Bool_val(Field(cpumap, i)))
588 c_cpumap[i/8] |= 1 << (i&7);
589 }
590 retval = xc_vcpu_setaffinity(xch, Int_val(domid),
591 Int_val(vcpu),
592 c_cpumap, NULL,
593 XEN_VCPUAFFINITY_HARD);
594 free(c_cpumap);
595
596 if (retval < 0)
597 failwith_xc(xch);
598 CAMLreturn(Val_unit);
599 }
600
stub_xc_vcpu_getaffinity(value xch_val,value domid,value vcpu)601 CAMLprim value stub_xc_vcpu_getaffinity(value xch_val, value domid,
602 value vcpu)
603 {
604 CAMLparam3(xch_val, domid, vcpu);
605 CAMLlocal1(ret);
606 xc_interface *xch = xch_of_val(xch_val);
607 xc_cpumap_t c_cpumap;
608 int i, len = xc_get_max_cpus(xch);
609 int retval;
610
611 if (len < 1)
612 failwith_xc(xch);
613
614 c_cpumap = xc_cpumap_alloc(xch);
615 if (c_cpumap == NULL)
616 failwith_xc(xch);
617
618 retval = xc_vcpu_getaffinity(xch, Int_val(domid),
619 Int_val(vcpu),
620 c_cpumap, NULL,
621 XEN_VCPUAFFINITY_HARD);
622 if (retval < 0) {
623 free(c_cpumap);
624 failwith_xc(xch);
625 }
626
627 ret = caml_alloc(len, 0);
628
629 for (i=0; i<len; i++) {
630 if (c_cpumap[i/8] & 1 << (i&7))
631 Store_field(ret, i, Val_true);
632 else
633 Store_field(ret, i, Val_false);
634 }
635
636 free(c_cpumap);
637
638 CAMLreturn(ret);
639 }
640
stub_xc_sched_id(value xch_val)641 CAMLprim value stub_xc_sched_id(value xch_val)
642 {
643 CAMLparam1(xch_val);
644 xc_interface *xch = xch_of_val(xch_val);
645 int sched_id;
646
647 if (xc_sched_id(xch, &sched_id))
648 failwith_xc(xch);
649
650 CAMLreturn(Val_int(sched_id));
651 }
652
stub_xc_evtchn_alloc_unbound(value xch_val,value local_domid,value remote_domid)653 CAMLprim value stub_xc_evtchn_alloc_unbound(value xch_val,
654 value local_domid,
655 value remote_domid)
656 {
657 CAMLparam3(xch_val, local_domid, remote_domid);
658 xc_interface *xch = xch_of_val(xch_val);
659 int result;
660
661 uint32_t c_local_domid = Int_val(local_domid);
662 uint32_t c_remote_domid = Int_val(remote_domid);
663
664 caml_enter_blocking_section();
665 result = xc_evtchn_alloc_unbound(xch, c_local_domid,
666 c_remote_domid);
667 caml_leave_blocking_section();
668
669 if (result < 0)
670 failwith_xc(xch);
671 CAMLreturn(Val_int(result));
672 }
673
stub_xc_evtchn_reset(value xch_val,value domid)674 CAMLprim value stub_xc_evtchn_reset(value xch_val, value domid)
675 {
676 CAMLparam2(xch_val, domid);
677 xc_interface *xch = xch_of_val(xch_val);
678 int r;
679
680 r = xc_evtchn_reset(xch, Int_val(domid));
681 if (r < 0)
682 failwith_xc(xch);
683 CAMLreturn(Val_unit);
684 }
685
stub_xc_evtchn_status(value xch_val,value domid,value port)686 CAMLprim value stub_xc_evtchn_status(value xch_val, value domid, value port)
687 {
688 CAMLparam3(xch_val, domid, port);
689 CAMLlocal4(result, result_status, stat, interdomain);
690 xc_interface *xch = xch_of_val(xch_val);
691 xc_evtchn_status_t status = {
692 .dom = Int_val(domid),
693 .port = Int_val(port),
694 };
695 int rc;
696
697 caml_enter_blocking_section();
698 rc = xc_evtchn_status(xch, &status);
699 caml_leave_blocking_section();
700
701 if ( rc < 0 )
702 failwith_xc(xch);
703
704 switch ( status.status )
705 {
706 case EVTCHNSTAT_closed:
707 CAMLreturn(Val_none); /* Early exit, no allocations needed */
708
709 case EVTCHNSTAT_unbound:
710 stat = caml_alloc(1, 0); /* 1st non-constant constructor */
711 Store_field(stat, 0, Val_int(status.u.unbound.dom));
712 break;
713
714 case EVTCHNSTAT_interdomain:
715 interdomain = caml_alloc_tuple(2);
716 Store_field(interdomain, 0, Val_int(status.u.interdomain.dom));
717 Store_field(interdomain, 1, Val_int(status.u.interdomain.port));
718 stat = caml_alloc(1, 1); /* 2nd non-constant constructor */
719 Store_field(stat, 0, interdomain);
720 break;
721
722 case EVTCHNSTAT_pirq:
723 stat = caml_alloc(1, 2); /* 3rd non-constant constructor */
724 Store_field(stat, 0, Val_int(status.u.pirq));
725 break;
726
727 case EVTCHNSTAT_virq:
728 stat = caml_alloc(1, 3); /* 4th non-constant constructor */
729 Store_field(stat, 0, Val_int(status.u.virq));
730 break;
731
732 case EVTCHNSTAT_ipi:
733 stat = Val_int(0); /* 1st constant constructor */
734 break;
735
736 default:
737 caml_failwith("Unknown evtchn status");
738 }
739
740 result_status = caml_alloc_tuple(2);
741 Store_field(result_status, 0, Val_int(status.vcpu));
742 Store_field(result_status, 1, stat);
743
744 result = caml_alloc_small(1, Tag_some);
745 Store_field(result, 0, result_status);
746
747 CAMLreturn(result);
748 }
749
stub_xc_readconsolering(value xch_val)750 CAMLprim value stub_xc_readconsolering(value xch_val)
751 {
752 /* Safe to use outside of blocking sections because of Ocaml GC lock. */
753 static unsigned int conring_size = 16384 + 1;
754
755 unsigned int count = conring_size, size = count, index = 0;
756 char *str = NULL, *ptr;
757 int ret;
758
759 CAMLparam1(xch_val);
760 CAMLlocal1(ring);
761 xc_interface *xch = xch_of_val(xch_val);
762
763 str = malloc(size);
764 if (!str)
765 caml_raise_out_of_memory();
766
767 /* Hopefully our conring_size guess is sufficient */
768 caml_enter_blocking_section();
769 ret = xc_readconsolering(xch, str, &count, 0, 0, &index);
770 caml_leave_blocking_section();
771
772 if (ret < 0) {
773 free(str);
774 failwith_xc(xch);
775 }
776
777 while (count == size && ret >= 0) {
778 size += count - 1;
779 if (size < count)
780 break;
781
782 ptr = realloc(str, size);
783 if (!ptr)
784 break;
785
786 str = ptr + count;
787 count = size - count;
788
789 caml_enter_blocking_section();
790 ret = xc_readconsolering(xch, str, &count, 0, 1, &index);
791 caml_leave_blocking_section();
792
793 count += str - ptr;
794 str = ptr;
795 }
796
797 /*
798 * If we didn't break because of an overflow with size, and we have
799 * needed to realloc() ourself more space, update our tracking of the
800 * real console ring size.
801 */
802 if (size > conring_size)
803 conring_size = size;
804
805 ring = caml_alloc_string(count);
806 memcpy((char *) String_val(ring), str, count);
807 free(str);
808
809 CAMLreturn(ring);
810 }
811
stub_xc_send_debug_keys(value xch_val,value keys)812 CAMLprim value stub_xc_send_debug_keys(value xch_val, value keys)
813 {
814 CAMLparam2(xch_val, keys);
815 xc_interface *xch = xch_of_val(xch_val);
816 int r;
817
818 r = xc_send_debug_keys(xch, String_val(keys));
819 if (r)
820 failwith_xc(xch);
821 CAMLreturn(Val_unit);
822 }
823
physinfo_arch_caps(const xc_physinfo_t * info)824 CAMLprim value physinfo_arch_caps(const xc_physinfo_t *info)
825 {
826 CAMLparam0();
827 CAMLlocal2(arch_cap_flags, arch_obj);
828 int tag = -1;
829
830 #if defined(__arm__) || defined(__aarch64__)
831
832 tag = 0; /* tag ARM */
833
834 arch_obj = caml_alloc_tuple(1);
835
836 Store_field(arch_obj, 0,
837 Val_int(MASK_EXTR(info->arch_capabilities,
838 XEN_SYSCTL_PHYSCAP_ARM_SVE_MASK) * 128));
839
840 #elif defined(__i386__) || defined(__x86_64__)
841
842 tag = 1; /* tag x86 */
843
844 arch_obj = Val_emptylist;
845
846 #endif
847
848 if ( tag < 0 )
849 caml_failwith("Unhandled architecture");
850
851 arch_cap_flags = caml_alloc_small(1, tag);
852 Store_field(arch_cap_flags, 0, arch_obj);
853
854 CAMLreturn(arch_cap_flags);
855 }
856
stub_xc_physinfo(value xch_val)857 CAMLprim value stub_xc_physinfo(value xch_val)
858 {
859 CAMLparam1(xch_val);
860 CAMLlocal2(physinfo, cap_list);
861 xc_interface *xch = xch_of_val(xch_val);
862 xc_physinfo_t c_physinfo;
863 int r;
864
865 caml_enter_blocking_section();
866 r = xc_physinfo(xch, &c_physinfo);
867 caml_leave_blocking_section();
868
869 if (r)
870 failwith_xc(xch);
871
872 /*
873 * capabilities: physinfo_cap_flag list;
874 */
875 cap_list = c_bitmap_to_ocaml_list
876 /* ! physinfo_cap_flag CAP_ lc */
877 /* ! XEN_SYSCTL_PHYSCAP_ XEN_SYSCTL_PHYSCAP_MAX max */
878 (c_physinfo.capabilities);
879
880 physinfo = caml_alloc_tuple(11);
881 Store_field(physinfo, 0, Val_int(c_physinfo.threads_per_core));
882 Store_field(physinfo, 1, Val_int(c_physinfo.cores_per_socket));
883 Store_field(physinfo, 2, Val_int(c_physinfo.nr_cpus));
884 Store_field(physinfo, 3, Val_int(c_physinfo.max_node_id));
885 Store_field(physinfo, 4, Val_int(c_physinfo.cpu_khz));
886 Store_field(physinfo, 5, caml_copy_nativeint(c_physinfo.total_pages));
887 Store_field(physinfo, 6, caml_copy_nativeint(c_physinfo.free_pages));
888 Store_field(physinfo, 7, caml_copy_nativeint(c_physinfo.scrub_pages));
889 Store_field(physinfo, 8, cap_list);
890 Store_field(physinfo, 9, Val_int(c_physinfo.max_cpu_id + 1));
891 Store_field(physinfo, 10, physinfo_arch_caps(&c_physinfo));
892
893 CAMLreturn(physinfo);
894 }
895
stub_xc_pcpu_info(value xch_val,value nr_cpus)896 CAMLprim value stub_xc_pcpu_info(value xch_val, value nr_cpus)
897 {
898 CAMLparam2(xch_val, nr_cpus);
899 CAMLlocal2(pcpus, v);
900 xc_interface *xch = xch_of_val(xch_val);
901 xc_cpuinfo_t *info;
902 int r, size;
903
904 if (Int_val(nr_cpus) < 1)
905 caml_invalid_argument("nr_cpus");
906
907 info = calloc(Int_val(nr_cpus) + 1, sizeof(*info));
908 if (!info)
909 caml_raise_out_of_memory();
910
911 caml_enter_blocking_section();
912 r = xc_getcpuinfo(xch, Int_val(nr_cpus), info, &size);
913 caml_leave_blocking_section();
914
915 if (r) {
916 free(info);
917 failwith_xc(xch);
918 }
919
920 if (size > 0) {
921 int i;
922 pcpus = caml_alloc(size, 0);
923 for (i = 0; i < size; i++) {
924 v = caml_copy_int64(info[i].idletime);
925 caml_modify(&Field(pcpus, i), v);
926 }
927 } else
928 pcpus = Atom(0);
929 free(info);
930 CAMLreturn(pcpus);
931 }
932
stub_xc_domain_setmaxmem(value xch_val,value domid,value max_memkb)933 CAMLprim value stub_xc_domain_setmaxmem(value xch_val, value domid,
934 value max_memkb)
935 {
936 CAMLparam3(xch_val, domid, max_memkb);
937 xc_interface *xch = xch_of_val(xch_val);
938 int retval;
939
940 uint32_t c_domid = Int_val(domid);
941 unsigned int c_max_memkb = Int64_val(max_memkb);
942 caml_enter_blocking_section();
943 retval = xc_domain_setmaxmem(xch, c_domid, c_max_memkb);
944 caml_leave_blocking_section();
945 if (retval)
946 failwith_xc(xch);
947 CAMLreturn(Val_unit);
948 }
949
stub_xc_domain_set_memmap_limit(value xch_val,value domid,value map_limitkb)950 CAMLprim value stub_xc_domain_set_memmap_limit(value xch_val, value domid,
951 value map_limitkb)
952 {
953 CAMLparam3(xch_val, domid, map_limitkb);
954 xc_interface *xch = xch_of_val(xch_val);
955 unsigned long v;
956 int retval;
957
958 v = Int64_val(map_limitkb);
959 retval = xc_domain_set_memmap_limit(xch, Int_val(domid), v);
960 if (retval)
961 failwith_xc(xch);
962
963 CAMLreturn(Val_unit);
964 }
965
stub_xc_domain_memory_increase_reservation(value xch_val,value domid,value mem_kb)966 CAMLprim value stub_xc_domain_memory_increase_reservation(value xch_val,
967 value domid,
968 value mem_kb)
969 {
970 CAMLparam3(xch_val, domid, mem_kb);
971 xc_interface *xch = xch_of_val(xch_val);
972 int retval;
973
974 unsigned long nr_extents = ((unsigned long)(Int64_val(mem_kb))) >> (XC_PAGE_SHIFT - 10);
975
976 uint32_t c_domid = Int_val(domid);
977 caml_enter_blocking_section();
978 retval = xc_domain_increase_reservation_exact(xch, c_domid,
979 nr_extents, 0, 0, NULL);
980 caml_leave_blocking_section();
981
982 if (retval)
983 failwith_xc(xch);
984 CAMLreturn(Val_unit);
985 }
986
stub_xc_version_version(value xch_val)987 CAMLprim value stub_xc_version_version(value xch_val)
988 {
989 CAMLparam1(xch_val);
990 CAMLlocal1(result);
991 xc_interface *xch = xch_of_val(xch_val);
992 xen_extraversion_t extra;
993 long packed;
994 int retval;
995
996 caml_enter_blocking_section();
997 packed = xc_version(xch, XENVER_version, NULL);
998 caml_leave_blocking_section();
999
1000 if (packed < 0)
1001 failwith_xc(xch);
1002
1003 caml_enter_blocking_section();
1004 retval = xc_version(xch, XENVER_extraversion, &extra);
1005 caml_leave_blocking_section();
1006
1007 if (retval)
1008 failwith_xc(xch);
1009
1010 result = caml_alloc_tuple(3);
1011
1012 Store_field(result, 0, Val_int(packed >> 16));
1013 Store_field(result, 1, Val_int(packed & 0xffff));
1014 Store_field(result, 2, caml_copy_string(extra));
1015
1016 CAMLreturn(result);
1017 }
1018
1019
stub_xc_version_compile_info(value xch_val)1020 CAMLprim value stub_xc_version_compile_info(value xch_val)
1021 {
1022 CAMLparam1(xch_val);
1023 CAMLlocal1(result);
1024 xc_interface *xch = xch_of_val(xch_val);
1025 xen_compile_info_t ci;
1026 int retval;
1027
1028 caml_enter_blocking_section();
1029 retval = xc_version(xch, XENVER_compile_info, &ci);
1030 caml_leave_blocking_section();
1031
1032 if (retval)
1033 failwith_xc(xch);
1034
1035 result = caml_alloc_tuple(4);
1036
1037 Store_field(result, 0, caml_copy_string(ci.compiler));
1038 Store_field(result, 1, caml_copy_string(ci.compile_by));
1039 Store_field(result, 2, caml_copy_string(ci.compile_domain));
1040 Store_field(result, 3, caml_copy_string(ci.compile_date));
1041
1042 CAMLreturn(result);
1043 }
1044
1045
xc_version_single_string(value xch_val,int code,void * info)1046 static value xc_version_single_string(value xch_val, int code, void *info)
1047 {
1048 CAMLparam1(xch_val);
1049 xc_interface *xch = xch_of_val(xch_val);
1050 int retval;
1051
1052 caml_enter_blocking_section();
1053 retval = xc_version(xch, code, info);
1054 caml_leave_blocking_section();
1055
1056 if (retval)
1057 failwith_xc(xch);
1058
1059 CAMLreturn(caml_copy_string((char *)info));
1060 }
1061
1062
stub_xc_version_changeset(value xch_val)1063 CAMLprim value stub_xc_version_changeset(value xch_val)
1064 {
1065 xen_changeset_info_t ci;
1066
1067 return xc_version_single_string(xch_val, XENVER_changeset, &ci);
1068 }
1069
1070
stub_xc_version_capabilities(value xch_val)1071 CAMLprim value stub_xc_version_capabilities(value xch_val)
1072 {
1073 xen_capabilities_info_t ci;
1074
1075 return xc_version_single_string(xch_val, XENVER_capabilities, &ci);
1076 }
1077
1078
stub_pages_to_kib(value pages)1079 CAMLprim value stub_pages_to_kib(value pages)
1080 {
1081 CAMLparam1(pages);
1082
1083 CAMLreturn(caml_copy_int64(Int64_val(pages) << (XC_PAGE_SHIFT - 10)));
1084 }
1085
1086
stub_map_foreign_range(value xch_val,value dom,value size,value mfn)1087 CAMLprim value stub_map_foreign_range(value xch_val, value dom,
1088 value size, value mfn)
1089 {
1090 CAMLparam4(xch_val, dom, size, mfn);
1091 CAMLlocal1(result);
1092 xc_interface *xch = xch_of_val(xch_val);
1093 struct mmap_interface *intf;
1094 unsigned long c_mfn = Nativeint_val(mfn);
1095 int len = Int_val(size);
1096 void *ptr;
1097
1098 BUILD_BUG_ON((sizeof(struct mmap_interface) % sizeof(value)) != 0);
1099 result = caml_alloc(Wsize_bsize(sizeof(struct mmap_interface)),
1100 Abstract_tag);
1101
1102 caml_enter_blocking_section();
1103 ptr = xc_map_foreign_range(xch, Int_val(dom), len,
1104 PROT_READ|PROT_WRITE, c_mfn);
1105 caml_leave_blocking_section();
1106
1107 if (!ptr)
1108 caml_failwith("xc_map_foreign_range error");
1109
1110 intf = Data_abstract_val(result);
1111 *intf = (struct mmap_interface){ ptr, len };
1112
1113 CAMLreturn(result);
1114 }
1115
stub_sched_credit_domain_get(value xch_val,value domid)1116 CAMLprim value stub_sched_credit_domain_get(value xch_val, value domid)
1117 {
1118 CAMLparam2(xch_val, domid);
1119 CAMLlocal1(sdom);
1120 xc_interface *xch = xch_of_val(xch_val);
1121 struct xen_domctl_sched_credit c_sdom;
1122 int ret;
1123
1124 caml_enter_blocking_section();
1125 ret = xc_sched_credit_domain_get(xch, Int_val(domid), &c_sdom);
1126 caml_leave_blocking_section();
1127 if (ret != 0)
1128 failwith_xc(xch);
1129
1130 sdom = caml_alloc_tuple(2);
1131 Store_field(sdom, 0, Val_int(c_sdom.weight));
1132 Store_field(sdom, 1, Val_int(c_sdom.cap));
1133
1134 CAMLreturn(sdom);
1135 }
1136
stub_sched_credit_domain_set(value xch_val,value domid,value sdom)1137 CAMLprim value stub_sched_credit_domain_set(value xch_val, value domid,
1138 value sdom)
1139 {
1140 CAMLparam3(xch_val, domid, sdom);
1141 xc_interface *xch = xch_of_val(xch_val);
1142 struct xen_domctl_sched_credit c_sdom;
1143 int ret;
1144
1145 c_sdom.weight = Int_val(Field(sdom, 0));
1146 c_sdom.cap = Int_val(Field(sdom, 1));
1147 caml_enter_blocking_section();
1148 ret = xc_sched_credit_domain_set(xch, Int_val(domid), &c_sdom);
1149 caml_leave_blocking_section();
1150 if (ret != 0)
1151 failwith_xc(xch);
1152
1153 CAMLreturn(Val_unit);
1154 }
1155
stub_shadow_allocation_get(value xch_val,value domid)1156 CAMLprim value stub_shadow_allocation_get(value xch_val, value domid)
1157 {
1158 CAMLparam2(xch_val, domid);
1159 CAMLlocal1(mb);
1160 xc_interface *xch = xch_of_val(xch_val);
1161 unsigned int c_mb;
1162 int ret;
1163
1164 caml_enter_blocking_section();
1165 ret = xc_shadow_control(xch, Int_val(domid),
1166 XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION,
1167 &c_mb, 0);
1168 caml_leave_blocking_section();
1169 if (ret != 0)
1170 failwith_xc(xch);
1171
1172 mb = Val_int(c_mb);
1173 CAMLreturn(mb);
1174 }
1175
stub_shadow_allocation_set(value xch_val,value domid,value mb)1176 CAMLprim value stub_shadow_allocation_set(value xch_val, value domid,
1177 value mb)
1178 {
1179 CAMLparam3(xch_val, domid, mb);
1180 xc_interface *xch = xch_of_val(xch_val);
1181 unsigned int c_mb;
1182 int ret;
1183
1184 c_mb = Int_val(mb);
1185 caml_enter_blocking_section();
1186 ret = xc_shadow_control(xch, Int_val(domid),
1187 XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION,
1188 &c_mb, 0);
1189 caml_leave_blocking_section();
1190 if (ret != 0)
1191 failwith_xc(xch);
1192
1193 CAMLreturn(Val_unit);
1194 }
1195
stub_xc_domain_ioport_permission(value xch_val,value domid,value start_port,value nr_ports,value allow)1196 CAMLprim value stub_xc_domain_ioport_permission(value xch_val, value domid,
1197 value start_port, value nr_ports,
1198 value allow)
1199 {
1200 CAMLparam5(xch_val, domid, start_port, nr_ports, allow);
1201 xc_interface *xch = xch_of_val(xch_val);
1202 uint32_t c_start_port, c_nr_ports;
1203 uint8_t c_allow;
1204 int ret;
1205
1206 c_start_port = Int_val(start_port);
1207 c_nr_ports = Int_val(nr_ports);
1208 c_allow = Bool_val(allow);
1209
1210 ret = xc_domain_ioport_permission(xch, Int_val(domid),
1211 c_start_port, c_nr_ports, c_allow);
1212 if (ret < 0)
1213 failwith_xc(xch);
1214
1215 CAMLreturn(Val_unit);
1216 }
1217
stub_xc_domain_iomem_permission(value xch_val,value domid,value start_pfn,value nr_pfns,value allow)1218 CAMLprim value stub_xc_domain_iomem_permission(value xch_val, value domid,
1219 value start_pfn, value nr_pfns,
1220 value allow)
1221 {
1222 CAMLparam5(xch_val, domid, start_pfn, nr_pfns, allow);
1223 xc_interface *xch = xch_of_val(xch_val);
1224 unsigned long c_start_pfn, c_nr_pfns;
1225 uint8_t c_allow;
1226 int ret;
1227
1228 c_start_pfn = Nativeint_val(start_pfn);
1229 c_nr_pfns = Nativeint_val(nr_pfns);
1230 c_allow = Bool_val(allow);
1231
1232 ret = xc_domain_iomem_permission(xch, Int_val(domid),
1233 c_start_pfn, c_nr_pfns, c_allow);
1234 if (ret < 0)
1235 failwith_xc(xch);
1236
1237 CAMLreturn(Val_unit);
1238 }
1239
stub_xc_domain_irq_permission(value xch_val,value domid,value pirq,value allow)1240 CAMLprim value stub_xc_domain_irq_permission(value xch_val, value domid,
1241 value pirq, value allow)
1242 {
1243 CAMLparam4(xch_val, domid, pirq, allow);
1244 xc_interface *xch = xch_of_val(xch_val);
1245 uint32_t c_pirq;
1246 bool c_allow;
1247 int ret;
1248
1249 c_pirq = Int_val(pirq);
1250 c_allow = Bool_val(allow);
1251
1252 ret = xc_domain_irq_permission(xch, Int_val(domid),
1253 c_pirq, c_allow);
1254 if (ret < 0)
1255 failwith_xc(xch);
1256
1257 CAMLreturn(Val_unit);
1258 }
1259
stub_xc_hvm_param_get(value xch_val,value domid,value param)1260 CAMLprim value stub_xc_hvm_param_get(value xch_val, value domid, value param)
1261 {
1262 CAMLparam3(xch_val, domid, param);
1263 xc_interface *xch = xch_of_val(xch_val);
1264 uint64_t val;
1265 int ret;
1266
1267 caml_enter_blocking_section();
1268 ret = xc_hvm_param_get(xch, Int_val(domid), Int_val(param), &val);
1269 caml_leave_blocking_section();
1270
1271 if ( ret )
1272 failwith_xc(xch);
1273
1274 CAMLreturn(caml_copy_int64(val));
1275 }
1276
stub_xc_hvm_param_set(value xch_val,value domid,value param,value val)1277 CAMLprim value stub_xc_hvm_param_set(value xch_val, value domid, value param, value val)
1278 {
1279 CAMLparam4(xch_val, domid, param, val);
1280 xc_interface *xch = xch_of_val(xch_val);
1281 uint64_t val64 = Int64_val(val);
1282 int ret;
1283
1284 caml_enter_blocking_section();
1285 ret = xc_hvm_param_set(xch, Int_val(domid), Int_val(param), val64);
1286 caml_leave_blocking_section();
1287
1288 if ( ret )
1289 failwith_xc(xch);
1290
1291 CAMLreturn(Val_unit);
1292 }
1293
encode_sbdf(int domain,int bus,int dev,int func)1294 static uint32_t encode_sbdf(int domain, int bus, int dev, int func)
1295 {
1296 return ((uint32_t)domain & 0xffff) << 16 |
1297 ((uint32_t)bus & 0xff) << 8 |
1298 ((uint32_t)dev & 0x1f) << 3 |
1299 ((uint32_t)func & 0x7);
1300 }
1301
stub_xc_domain_test_assign_device(value xch_val,value domid,value desc)1302 CAMLprim value stub_xc_domain_test_assign_device(value xch_val, value domid, value desc)
1303 {
1304 CAMLparam3(xch_val, domid, desc);
1305 xc_interface *xch = xch_of_val(xch_val);
1306 int ret;
1307 int domain, bus, dev, func;
1308 uint32_t sbdf;
1309
1310 domain = Int_val(Field(desc, 0));
1311 bus = Int_val(Field(desc, 1));
1312 dev = Int_val(Field(desc, 2));
1313 func = Int_val(Field(desc, 3));
1314 sbdf = encode_sbdf(domain, bus, dev, func);
1315
1316 ret = xc_test_assign_device(xch, Int_val(domid), sbdf);
1317
1318 CAMLreturn(Val_bool(ret == 0));
1319 }
1320
stub_xc_domain_assign_device(value xch_val,value domid,value desc)1321 CAMLprim value stub_xc_domain_assign_device(value xch_val, value domid, value desc)
1322 {
1323 CAMLparam3(xch_val, domid, desc);
1324 xc_interface *xch = xch_of_val(xch_val);
1325 int ret;
1326 int domain, bus, dev, func;
1327 uint32_t sbdf;
1328
1329 domain = Int_val(Field(desc, 0));
1330 bus = Int_val(Field(desc, 1));
1331 dev = Int_val(Field(desc, 2));
1332 func = Int_val(Field(desc, 3));
1333 sbdf = encode_sbdf(domain, bus, dev, func);
1334
1335 ret = xc_assign_device(xch, Int_val(domid), sbdf,
1336 XEN_DOMCTL_DEV_RDM_RELAXED);
1337
1338 if (ret < 0)
1339 failwith_xc(xch);
1340 CAMLreturn(Val_unit);
1341 }
1342
stub_xc_domain_deassign_device(value xch_val,value domid,value desc)1343 CAMLprim value stub_xc_domain_deassign_device(value xch_val, value domid, value desc)
1344 {
1345 CAMLparam3(xch_val, domid, desc);
1346 xc_interface *xch = xch_of_val(xch_val);
1347 int ret;
1348 int domain, bus, dev, func;
1349 uint32_t sbdf;
1350
1351 domain = Int_val(Field(desc, 0));
1352 bus = Int_val(Field(desc, 1));
1353 dev = Int_val(Field(desc, 2));
1354 func = Int_val(Field(desc, 3));
1355 sbdf = encode_sbdf(domain, bus, dev, func);
1356
1357 ret = xc_deassign_device(xch, Int_val(domid), sbdf);
1358
1359 if (ret < 0)
1360 failwith_xc(xch);
1361 CAMLreturn(Val_unit);
1362 }
1363
stub_xc_get_cpu_featureset(value xch_val,value idx)1364 CAMLprim value stub_xc_get_cpu_featureset(value xch_val, value idx)
1365 {
1366 CAMLparam2(xch_val, idx);
1367 CAMLlocal1(bitmap_val);
1368 #if defined(__i386__) || defined(__x86_64__)
1369 xc_interface *xch = xch_of_val(xch_val);
1370
1371 /* Safe, because of the global ocaml lock. */
1372 static uint32_t fs_len;
1373
1374 if (fs_len == 0)
1375 {
1376 int ret = xc_get_cpu_featureset(xch, 0, &fs_len, NULL);
1377
1378 if (ret || (fs_len == 0))
1379 failwith_xc(xch);
1380 }
1381
1382 {
1383 /* To/from hypervisor to retrieve actual featureset */
1384 uint32_t fs[fs_len], len = fs_len;
1385 unsigned int i;
1386
1387 int ret = xc_get_cpu_featureset(xch, Int_val(idx), &len, fs);
1388
1389 if (ret)
1390 failwith_xc(xch);
1391
1392 bitmap_val = caml_alloc(len, 0);
1393
1394 for (i = 0; i < len; ++i)
1395 Store_field(bitmap_val, i, caml_copy_int64(fs[i]));
1396 }
1397 #else
1398 caml_failwith("xc_get_cpu_featureset: not implemented");
1399 #endif
1400 CAMLreturn(bitmap_val);
1401 }
1402
stub_xc_watchdog(value xch_val,value domid,value timeout)1403 CAMLprim value stub_xc_watchdog(value xch_val, value domid, value timeout)
1404 {
1405 CAMLparam3(xch_val, domid, timeout);
1406 xc_interface *xch = xch_of_val(xch_val);
1407 int ret;
1408 unsigned int c_timeout = Int32_val(timeout);
1409
1410 ret = xc_watchdog(xch, Int_val(domid), c_timeout);
1411 if (ret < 0)
1412 failwith_xc(xch);
1413
1414 CAMLreturn(Val_int(ret));
1415 }
1416
1417 /*
1418 * Local variables:
1419 * indent-tabs-mode: t
1420 * c-basic-offset: 8
1421 * tab-width: 8
1422 * End:
1423 */
1424