1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation;
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #ifndef XENCTRL_H
23 #define XENCTRL_H
24
25 /* Tell the Xen public headers we are a user-space tools build. */
26 #ifndef __XEN_TOOLS__
27 #define __XEN_TOOLS__ 1
28 #endif
29
30 #include <unistd.h>
31 #include <stddef.h>
32 #include <stdint.h>
33 #include <stdio.h>
34 #include <stdbool.h>
35 #include <xen/xen.h>
36 #include <xen/domctl.h>
37 #include <xen/physdev.h>
38 #include <xen/sysctl.h>
39 #include <xen/version.h>
40 #include <xen/event_channel.h>
41 #include <xen/sched.h>
42 #include <xen/memory.h>
43 #include <xen/grant_table.h>
44 #include <xen/hvm/dm_op.h>
45 #include <xen/hvm/params.h>
46 #include <xen/xsm/flask_op.h>
47 #include <xen/kexec.h>
48 #include <xen/platform.h>
49
50 #include "xentoollog.h"
51 #include "xen-barrier.h"
52
53 #if defined(__i386__) || defined(__x86_64__)
54 #include <xen/foreign/x86_32.h>
55 #include <xen/foreign/x86_64.h>
56 #include <xen/arch-x86/xen-mca.h>
57 #endif
58
59 #define XC_PAGE_SHIFT 12
60 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
61 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
62
63 #define INVALID_MFN (~0UL)
64
65 #define XENCTRL_HAS_XC_INTERFACE 1
66 /* In Xen 4.0 and earlier, xc_interface_open and xc_evtchn_open would
67 * both return ints being the file descriptor. In 4.1 and later, they
68 * return an xc_interface* and xc_evtchn*, respectively - ie, a
69 * pointer to an opaque struct. This #define is provided in 4.1 and
70 * later, allowing out-of-tree callers to more easily distinguish
71 * between, and be compatible with, both versions.
72 */
73
74
75 /*
76 * GENERAL
77 *
78 * Unless otherwise specified, each function here returns zero or a
79 * non-null pointer on success; or in case of failure, sets errno and
80 * returns -1 or a null pointer.
81 *
82 * Unless otherwise specified, errors result in a call to the error
83 * handler function, which by default prints a message to the
84 * FILE* passed as the caller_data, which by default is stderr.
85 * (This is described below as "logging errors".)
86 *
87 * The error handler can safely trash errno, as libxc saves it across
88 * the callback.
89 */
90
91 typedef struct xc_interface_core xc_interface;
92
93 enum xc_error_code {
94 XC_ERROR_NONE = 0,
95 XC_INTERNAL_ERROR = 1,
96 XC_INVALID_KERNEL = 2,
97 XC_INVALID_PARAM = 3,
98 XC_OUT_OF_MEMORY = 4,
99 /* new codes need to be added to xc_error_level_to_desc too */
100 };
101
102 typedef enum xc_error_code xc_error_code;
103
104
105 /*
106 * INITIALIZATION FUNCTIONS
107 */
108
109 /**
110 * This function opens a handle to the hypervisor interface. This function can
111 * be called multiple times within a single process. Multiple processes can
112 * have an open hypervisor interface at the same time.
113 *
114 * Note:
115 * After fork a child process must not use any opened xc interface
116 * handle inherited from their parent. They must open a new handle if
117 * they want to interact with xc.
118 *
119 * Each call to this function should have a corresponding call to
120 * xc_interface_close().
121 *
122 * This function can fail if the caller does not have superuser permission or
123 * if a Xen-enabled kernel is not currently running.
124 *
125 * @return a handle to the hypervisor interface
126 */
127 xc_interface *xc_interface_open(xentoollog_logger *logger,
128 xentoollog_logger *dombuild_logger,
129 unsigned open_flags);
130 /* if logger==NULL, will log to stderr
131 * if dombuild_logger=NULL, will log to a file
132 */
133
134 /*
135 * Note: if XC_OPENFLAG_NON_REENTRANT is passed then libxc must not be
136 * called reentrantly and the calling application is responsible for
137 * providing mutual exclusion surrounding all libxc calls itself.
138 *
139 * In particular xc_{get,clear}_last_error only remain valid for the
140 * duration of the critical section containing the call which failed.
141 */
142 enum xc_open_flags {
143 XC_OPENFLAG_DUMMY = 1<<0, /* do not actually open a xenctrl interface */
144 XC_OPENFLAG_NON_REENTRANT = 1<<1, /* assume library is only every called from a single thread */
145 };
146
147 /**
148 * This function closes an open hypervisor interface.
149 *
150 * This function can fail if the handle does not represent an open interface or
151 * if there were problems closing the interface. In the latter case
152 * the interface is still closed.
153 *
154 * @parm xch a handle to an open hypervisor interface
155 * @return 0 on success, -1 otherwise.
156 */
157 int xc_interface_close(xc_interface *xch);
158
159 /**
160 * Return the handles which xch has opened and will use for
161 * hypercalls, foreign memory accesses and device model operations.
162 * These may be used with the corresponding libraries so long as the
163 * xch itself remains open.
164 */
165 struct xencall_handle *xc_interface_xcall_handle(xc_interface *xch);
166 struct xenforeignmemory_handle *xc_interface_fmem_handle(xc_interface *xch);
167 struct xendevicemodel_handle *xc_interface_dmod_handle(xc_interface *xch);
168
169 /*
170 * HYPERCALL SAFE MEMORY BUFFER
171 *
172 * Ensure that memory which is passed to a hypercall has been
173 * specially allocated in order to be safe to access from the
174 * hypervisor.
175 *
176 * Each user data pointer is shadowed by an xc_hypercall_buffer data
177 * structure. You should never define an xc_hypercall_buffer type
178 * directly, instead use the DECLARE_HYPERCALL_BUFFER* macros below.
179 *
180 * The strucuture should be considered opaque and all access should be
181 * via the macros and helper functions defined below.
182 *
183 * Once the buffer is declared the user is responsible for explicitly
184 * allocating and releasing the memory using
185 * xc_hypercall_buffer_alloc(_pages) and
186 * xc_hypercall_buffer_free(_pages).
187 *
188 * Once the buffer has been allocated the user can initialise the data
189 * via the normal pointer. The xc_hypercall_buffer structure is
190 * transparently referenced by the helper macros (such as
191 * xen_set_guest_handle) in order to check at compile time that the
192 * correct type of memory is being used.
193 */
194 struct xc_hypercall_buffer {
195 /* Hypercall safe memory buffer. */
196 void *hbuf;
197
198 /*
199 * Reference to xc_hypercall_buffer passed as argument to the
200 * current function.
201 */
202 struct xc_hypercall_buffer *param_shadow;
203
204 /*
205 * Direction of copy for bounce buffering.
206 */
207 int dir;
208
209 /* Used iff dir != 0. */
210 void *ubuf;
211 size_t sz;
212 };
213 typedef struct xc_hypercall_buffer xc_hypercall_buffer_t;
214
215 /*
216 * Construct the name of the hypercall buffer for a given variable.
217 * For internal use only
218 */
219 #define XC__HYPERCALL_BUFFER_NAME(_name) xc__hypercall_buffer_##_name
220
221 /*
222 * Returns the hypercall_buffer associated with a variable.
223 */
224 #define HYPERCALL_BUFFER(_name) \
225 ({ xc_hypercall_buffer_t *_hcbuf_buf = \
226 &XC__HYPERCALL_BUFFER_NAME(_name); \
227 _hcbuf_buf->param_shadow ?: _hcbuf_buf; \
228 })
229
230 #define HYPERCALL_BUFFER_INIT_NO_BOUNCE .dir = 0, .sz = 0, .ubuf = (void *)-1
231
232 /*
233 * Defines a hypercall buffer and user pointer with _name of _type.
234 *
235 * The user accesses the data as normal via _name which will be
236 * transparently converted to the hypercall buffer as necessary.
237 */
238 #define DECLARE_HYPERCALL_BUFFER(_type, _name) \
239 _type *(_name) = NULL; \
240 xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
241 .hbuf = NULL, \
242 .param_shadow = NULL, \
243 HYPERCALL_BUFFER_INIT_NO_BOUNCE \
244 }
245
246 /*
247 * Like DECLARE_HYPERCALL_BUFFER() but using an already allocated
248 * hypercall buffer, _hbuf.
249 *
250 * Useful when a hypercall buffer is passed to a function and access
251 * via the user pointer is required.
252 *
253 * See DECLARE_HYPERCALL_BUFFER_ARGUMENT() if the user pointer is not
254 * required.
255 */
256 #define DECLARE_HYPERCALL_BUFFER_SHADOW(_type, _name, _hbuf) \
257 _type *(_name) = (_hbuf)->hbuf; \
258 __attribute__((unused)) \
259 xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
260 .hbuf = (void *)-1, \
261 .param_shadow = (_hbuf), \
262 HYPERCALL_BUFFER_INIT_NO_BOUNCE \
263 }
264
265 /*
266 * Declare the necessary data structure to allow a hypercall buffer
267 * passed as an argument to a function to be used in the normal way.
268 */
269 #define DECLARE_HYPERCALL_BUFFER_ARGUMENT(_name) \
270 xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
271 .hbuf = (void *)-1, \
272 .param_shadow = (_name), \
273 HYPERCALL_BUFFER_INIT_NO_BOUNCE \
274 }
275
276 /*
277 * Get the hypercall buffer data pointer in a form suitable for use
278 * directly as a hypercall argument.
279 */
280 #define HYPERCALL_BUFFER_AS_ARG(_name) \
281 ({ xc_hypercall_buffer_t _hcbuf_arg1; \
282 typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_arg2 = \
283 HYPERCALL_BUFFER(_name); \
284 (void)(&_hcbuf_arg1 == _hcbuf_arg2); \
285 (unsigned long)(_hcbuf_arg2)->hbuf; \
286 })
287
288 /*
289 * Set a xen_guest_handle in a type safe manner, ensuring that the
290 * data pointer has been correctly allocated.
291 */
292 #define set_xen_guest_handle_impl(_hnd, _val, _byte_off) \
293 do { \
294 xc_hypercall_buffer_t _hcbuf_hnd1; \
295 typeof(XC__HYPERCALL_BUFFER_NAME(_val)) *_hcbuf_hnd2 = \
296 HYPERCALL_BUFFER(_val); \
297 (void) (&_hcbuf_hnd1 == _hcbuf_hnd2); \
298 set_xen_guest_handle_raw(_hnd, \
299 (_hcbuf_hnd2)->hbuf + (_byte_off)); \
300 } while (0)
301
302 #undef set_xen_guest_handle
303 #define set_xen_guest_handle(_hnd, _val) \
304 set_xen_guest_handle_impl(_hnd, _val, 0)
305
306 #define set_xen_guest_handle_offset(_hnd, _val, _off) \
307 set_xen_guest_handle_impl(_hnd, _val, \
308 ((sizeof(*_val)*(_off))))
309
310 /* Use with set_xen_guest_handle in place of NULL */
311 extern xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(HYPERCALL_BUFFER_NULL);
312
313 /*
314 * Allocate and free hypercall buffers with byte granularity.
315 */
316 void *xc__hypercall_buffer_alloc(xc_interface *xch, xc_hypercall_buffer_t *b, size_t size);
317 #define xc_hypercall_buffer_alloc(_xch, _name, _size) xc__hypercall_buffer_alloc(_xch, HYPERCALL_BUFFER(_name), _size)
318 void xc__hypercall_buffer_free(xc_interface *xch, xc_hypercall_buffer_t *b);
319 #define xc_hypercall_buffer_free(_xch, _name) xc__hypercall_buffer_free(_xch, HYPERCALL_BUFFER(_name))
320
321 /*
322 * Allocate and free hypercall buffers with page alignment.
323 */
324 void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
325 #define xc_hypercall_buffer_alloc_pages(_xch, _name, _nr) xc__hypercall_buffer_alloc_pages(_xch, HYPERCALL_BUFFER(_name), _nr)
326 void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
327 #define xc_hypercall_buffer_free_pages(_xch, _name, _nr) \
328 do { \
329 if ( _name ) \
330 xc__hypercall_buffer_free_pages(_xch, HYPERCALL_BUFFER(_name), \
331 _nr); \
332 } while (0)
333
334 /*
335 * Array of hypercall buffers.
336 *
337 * Create an array with xc_hypercall_buffer_array_create() and
338 * populate it by declaring one hypercall buffer in a loop and
339 * allocating the buffer with xc_hypercall_buffer_array_alloc().
340 *
341 * To access a previously allocated buffers, declare a new hypercall
342 * buffer and call xc_hypercall_buffer_array_get().
343 *
344 * Destroy the array with xc_hypercall_buffer_array_destroy() to free
345 * the array and all its allocated hypercall buffers.
346 */
347 struct xc_hypercall_buffer_array;
348 typedef struct xc_hypercall_buffer_array xc_hypercall_buffer_array_t;
349
350 xc_hypercall_buffer_array_t *xc_hypercall_buffer_array_create(xc_interface *xch, unsigned n);
351 void *xc__hypercall_buffer_array_alloc(xc_interface *xch, xc_hypercall_buffer_array_t *array,
352 unsigned index, xc_hypercall_buffer_t *hbuf, size_t size);
353 #define xc_hypercall_buffer_array_alloc(_xch, _array, _index, _name, _size) \
354 xc__hypercall_buffer_array_alloc(_xch, _array, _index, HYPERCALL_BUFFER(_name), _size)
355 void *xc__hypercall_buffer_array_get(xc_interface *xch, xc_hypercall_buffer_array_t *array,
356 unsigned index, xc_hypercall_buffer_t *hbuf);
357 #define xc_hypercall_buffer_array_get(_xch, _array, _index, _name, _size) \
358 xc__hypercall_buffer_array_get(_xch, _array, _index, HYPERCALL_BUFFER(_name))
359 void xc_hypercall_buffer_array_destroy(xc_interface *xc, xc_hypercall_buffer_array_t *array);
360
361 /*
362 * CPUMAP handling
363 */
364 typedef uint8_t *xc_cpumap_t;
365
366 /* return maximum number of cpus the hypervisor supports */
367 int xc_get_max_cpus(xc_interface *xch);
368
369 /* return the number of online cpus */
370 int xc_get_online_cpus(xc_interface *xch);
371
372 /* return array size for cpumap */
373 int xc_get_cpumap_size(xc_interface *xch);
374
375 /* allocate a cpumap */
376 xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
377
378 /* clear an CPU from the cpumap. */
379 void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map);
380
381 /* set an CPU in the cpumap. */
382 void xc_cpumap_setcpu(int cpu, xc_cpumap_t map);
383
384 /* Test whether the CPU in cpumap is set. */
385 int xc_cpumap_testcpu(int cpu, xc_cpumap_t map);
386
387 /*
388 * NODEMAP handling
389 */
390 typedef uint8_t *xc_nodemap_t;
391
392 /* return maximum number of NUMA nodes the hypervisor supports */
393 int xc_get_max_nodes(xc_interface *xch);
394
395 /* return array size for nodemap */
396 int xc_get_nodemap_size(xc_interface *xch);
397
398 /* allocate a nodemap */
399 xc_nodemap_t xc_nodemap_alloc(xc_interface *xch);
400
401 /*
402 * DOMAIN DEBUGGING FUNCTIONS
403 */
404
405 typedef struct xc_core_header {
406 unsigned int xch_magic;
407 unsigned int xch_nr_vcpus;
408 unsigned int xch_nr_pages;
409 unsigned int xch_ctxt_offset;
410 unsigned int xch_index_offset;
411 unsigned int xch_pages_offset;
412 } xc_core_header_t;
413
414 #define XC_CORE_MAGIC 0xF00FEBED
415 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
416
417 /*
418 * DOMAIN MANAGEMENT FUNCTIONS
419 */
420
421 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
422
dominfo_shutdown_reason(const xc_domaininfo_t * info)423 static inline unsigned int dominfo_shutdown_reason(const xc_domaininfo_t *info)
424 {
425 return (info->flags >> XEN_DOMINF_shutdownshift) & XEN_DOMINF_shutdownmask;
426 }
427
dominfo_shutdown_with(const xc_domaininfo_t * info,unsigned int expected_reason)428 static inline bool dominfo_shutdown_with(const xc_domaininfo_t *info,
429 unsigned int expected_reason)
430 {
431 /* The reason doesn't make sense unless the domain is actually shutdown */
432 return (info->flags & XEN_DOMINF_shutdown) &&
433 (dominfo_shutdown_reason(info) == expected_reason);
434 }
435
436 typedef union
437 {
438 #if defined(__i386__) || defined(__x86_64__)
439 vcpu_guest_context_x86_64_t x64;
440 vcpu_guest_context_x86_32_t x32;
441 #endif
442 vcpu_guest_context_t c;
443 } vcpu_guest_context_any_t;
444
445 typedef union
446 {
447 #if defined(__i386__) || defined(__x86_64__)
448 shared_info_x86_64_t x64;
449 shared_info_x86_32_t x32;
450 #endif
451 shared_info_t s;
452 } shared_info_any_t;
453
454 #if defined(__i386__) || defined(__x86_64__)
455 typedef union
456 {
457 start_info_x86_64_t x64;
458 start_info_x86_32_t x32;
459 start_info_t s;
460 } start_info_any_t;
461 #endif
462
463 typedef struct xc_vcpu_extstate {
464 uint64_t xfeature_mask;
465 uint64_t size;
466 void *buffer;
467 } xc_vcpu_extstate_t;
468
469 int xc_domain_create(xc_interface *xch, uint32_t *pdomid,
470 struct xen_domctl_createdomain *config);
471
472
473 /* Functions to produce a dump of a given domain
474 * xc_domain_dumpcore - produces a dump to a specified file
475 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
476 * callback function
477 */
478 int xc_domain_dumpcore(xc_interface *xch,
479 uint32_t domid,
480 const char *corename);
481
482 /* Define the callback function type for xc_domain_dumpcore_via_callback.
483 *
484 * This function is called by the coredump code for every "write",
485 * and passes an opaque object for the use of the function and
486 * created by the caller of xc_domain_dumpcore_via_callback.
487 */
488 typedef int (dumpcore_rtn_t)(xc_interface *xch,
489 void *arg, char *buffer, unsigned int length);
490
491 int xc_domain_dumpcore_via_callback(xc_interface *xch,
492 uint32_t domid,
493 void *arg,
494 dumpcore_rtn_t dump_rtn);
495
496 /*
497 * This function sets the maximum number of vcpus that a domain may create.
498 *
499 * @parm xch a handle to an open hypervisor interface.
500 * @parm domid the domain id in which vcpus are to be created.
501 * @parm max the maximum number of vcpus that the domain may create.
502 * @return 0 on success, -1 on failure.
503 */
504 int xc_domain_max_vcpus(xc_interface *xch,
505 uint32_t domid,
506 unsigned int max);
507
508 /**
509 * This function pauses a domain. A paused domain still exists in memory
510 * however it does not receive any timeslices from the hypervisor.
511 *
512 * @parm xch a handle to an open hypervisor interface
513 * @parm domid the domain id to pause
514 * @return 0 on success, -1 on failure.
515 */
516 int xc_domain_pause(xc_interface *xch,
517 uint32_t domid);
518 /**
519 * This function unpauses a domain. The domain should have been previously
520 * paused.
521 *
522 * @parm xch a handle to an open hypervisor interface
523 * @parm domid the domain id to unpause
524 * return 0 on success, -1 on failure
525 */
526 int xc_domain_unpause(xc_interface *xch,
527 uint32_t domid);
528
529 /**
530 * This function will destroy a domain. Destroying a domain removes the domain
531 * completely from memory. This function should be called after sending the
532 * domain a SHUTDOWN control message to free up the domain resources.
533 *
534 * @parm xch a handle to an open hypervisor interface
535 * @parm domid the domain id to destroy
536 * @return 0 on success, -1 on failure
537 */
538 int xc_domain_destroy(xc_interface *xch,
539 uint32_t domid);
540
541
542 /**
543 * This function will shutdown a domain. This is intended for use in
544 * fully-virtualized domains where this operation is analogous to the
545 * sched_op operations in a paravirtualized domain. The caller is
546 * expected to give the reason for the shutdown.
547 *
548 * @parm xch a handle to an open hypervisor interface
549 * @parm domid the domain id to destroy
550 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
551 * @return 0 on success, -1 on failure
552 */
553 int xc_domain_shutdown(xc_interface *xch,
554 uint32_t domid,
555 int reason);
556
557 int xc_watchdog(xc_interface *xch,
558 uint32_t id,
559 uint32_t timeout);
560
561 /**
562 * This function explicitly sets the host NUMA nodes the domain will
563 * have affinity with.
564 *
565 * @parm xch a handle to an open hypervisor interface.
566 * @parm domid the domain id one wants to set the affinity of.
567 * @parm nodemap the map of the affine nodes.
568 * @return 0 on success, -1 on failure.
569 */
570 int xc_domain_node_setaffinity(xc_interface *xch,
571 uint32_t domind,
572 xc_nodemap_t nodemap);
573
574 /**
575 * This function retrieves the host NUMA nodes the domain has
576 * affinity with.
577 *
578 * @parm xch a handle to an open hypervisor interface.
579 * @parm domid the domain id one wants to get the node affinity of.
580 * @parm nodemap the map of the affine nodes.
581 * @return 0 on success, -1 on failure.
582 */
583 int xc_domain_node_getaffinity(xc_interface *xch,
584 uint32_t domind,
585 xc_nodemap_t nodemap);
586
587 /**
588 * This function specifies the CPU affinity for a vcpu.
589 *
590 * There are two kinds of affinity. Soft affinity is on what CPUs a vcpu
591 * prefers to run. Hard affinity is on what CPUs a vcpu is allowed to run.
592 * If flags contains XEN_VCPUAFFINITY_SOFT, the soft affinity it is set to
593 * what cpumap_soft_inout contains. If flags contains XEN_VCPUAFFINITY_HARD,
594 * the hard affinity is set to what cpumap_hard_inout contains. Both flags
595 * can be set at the same time, in which case both soft and hard affinity are
596 * set to what the respective parameter contains.
597 *
598 * The function also returns the effective hard or/and soft affinity, still
599 * via the cpumap_soft_inout and cpumap_hard_inout parameters. Effective
600 * affinity is, in case of soft affinity, the intersection of soft affinity,
601 * hard affinity and the cpupool's online CPUs for the domain, and is returned
602 * in cpumap_soft_inout, if XEN_VCPUAFFINITY_SOFT is set in flags. In case of
603 * hard affinity, it is the intersection between hard affinity and the
604 * cpupool's online CPUs, and is returned in cpumap_hard_inout, if
605 * XEN_VCPUAFFINITY_HARD is set in flags. If both flags are set, both soft
606 * and hard affinity are returned in the respective parameter.
607 *
608 * We do report it back as effective affinity is what the Xen scheduler will
609 * actually use, and we thus allow checking whether or not that matches with,
610 * or at least is good enough for, the caller's purposes.
611 *
612 * @param xch a handle to an open hypervisor interface.
613 * @param domid the id of the domain to which the vcpu belongs
614 * @param vcpu the vcpu id wihin the domain
615 * @param cpumap_hard_inout specifies(/returns) the (effective) hard affinity
616 * @param cpumap_soft_inout specifies(/returns) the (effective) soft affinity
617 * @param flags what we want to set
618 */
619 int xc_vcpu_setaffinity(xc_interface *xch,
620 uint32_t domid,
621 int vcpu,
622 xc_cpumap_t cpumap_hard_inout,
623 xc_cpumap_t cpumap_soft_inout,
624 uint32_t flags);
625
626 /**
627 * This function retrieves hard and soft CPU affinity of a vcpu,
628 * depending on what flags are set.
629 *
630 * Soft affinity is returned in cpumap_soft if XEN_VCPUAFFINITY_SOFT is set.
631 * Hard affinity is returned in cpumap_hard if XEN_VCPUAFFINITY_HARD is set.
632 *
633 * @param xch a handle to an open hypervisor interface.
634 * @param domid the id of the domain to which the vcpu belongs
635 * @param vcpu the vcpu id wihin the domain
636 * @param cpumap_hard is where hard affinity is returned
637 * @param cpumap_soft is where soft affinity is returned
638 * @param flags what we want get
639 */
640 int xc_vcpu_getaffinity(xc_interface *xch,
641 uint32_t domid,
642 int vcpu,
643 xc_cpumap_t cpumap_hard,
644 xc_cpumap_t cpumap_soft,
645 uint32_t flags);
646
647
648 /**
649 * This function will return the guest_width (in bytes) for the
650 * specified domain.
651 *
652 * @param xch a handle to an open hypervisor interface.
653 * @param domid the domain id one wants the address size width of.
654 * @param addr_size the address size.
655 */
656 int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
657 unsigned int *guest_width);
658
659 /**
660 * This function will return information about a single domain. It looks
661 * up the domain by the provided domid and succeeds if the domain exists
662 * and is accesible by the current domain, or fails otherwise. A buffer
663 * may optionally passed on the `info` parameter in order to retrieve
664 * information about the domain. The buffer is ignored if NULL is
665 * passed instead.
666 *
667 * @parm xch a handle to an open hypervisor interface
668 * @parm domid domid to lookup
669 * @parm info Optional domain information buffer (may be NULL)
670 * @return 0 on success, otherwise the call failed and info is undefined
671 */
672 int xc_domain_getinfo_single(xc_interface *xch,
673 uint32_t domid,
674 xc_domaininfo_t *info);
675
676 /**
677 * This function will set the execution context for the specified vcpu.
678 *
679 * @parm xch a handle to an open hypervisor interface
680 * @parm domid the domain to set the vcpu context for
681 * @parm vcpu the vcpu number for the context
682 * @parm ctxt pointer to the the cpu context with the values to set
683 * @return the number of domains enumerated or -1 on error
684 */
685 int xc_vcpu_setcontext(xc_interface *xch,
686 uint32_t domid,
687 uint32_t vcpu,
688 vcpu_guest_context_any_t *ctxt);
689 /**
690 * This function will return information about one or more domains, using a
691 * single hypercall. The domain information will be stored into the supplied
692 * array of xc_domaininfo_t structures.
693 *
694 * @parm xch a handle to an open hypervisor interface
695 * @parm first_domain the first domain to enumerate information from.
696 * Domains are currently enumerate in order of creation.
697 * @parm max_domains the number of elements in info
698 * @parm info an array of max_doms size that will contain the information for
699 * the enumerated domains.
700 * @return the number of domains enumerated or -1 on error
701 */
702 int xc_domain_getinfolist(xc_interface *xch,
703 uint32_t first_domain,
704 unsigned int max_domains,
705 xc_domaininfo_t *info);
706
707 /**
708 * This function set p2m for broken page
709 * &parm xch a handle to an open hypervisor interface
710 * @parm domid the domain id which broken page belong to
711 * @parm pfn the pfn number of the broken page
712 * @return 0 on success, -1 on failure
713 */
714 int xc_set_broken_page_p2m(xc_interface *xch,
715 uint32_t domid,
716 unsigned long pfn);
717
718 /**
719 * This function returns information about the context of a hvm domain
720 * @parm xch a handle to an open hypervisor interface
721 * @parm domid the domain to get information from
722 * @parm ctxt_buf a pointer to a structure to store the execution context of
723 * the hvm domain
724 * @parm size the size of ctxt_buf in bytes
725 * @return 0 on success, -1 on failure
726 */
727 int xc_domain_hvm_getcontext(xc_interface *xch,
728 uint32_t domid,
729 uint8_t *ctxt_buf,
730 uint32_t size);
731
732
733 /**
734 * This function returns one element of the context of a hvm domain
735 * @parm xch a handle to an open hypervisor interface
736 * @parm domid the domain to get information from
737 * @parm typecode which type of elemnt required
738 * @parm instance which instance of the type
739 * @parm ctxt_buf a pointer to a structure to store the execution context of
740 * the hvm domain
741 * @parm size the size of ctxt_buf (must be >= HVM_SAVE_LENGTH(typecode))
742 * @return 0 on success, -1 on failure
743 */
744 int xc_domain_hvm_getcontext_partial(xc_interface *xch,
745 uint32_t domid,
746 uint16_t typecode,
747 uint16_t instance,
748 void *ctxt_buf,
749 uint32_t size);
750
751 /**
752 * This function will set the context for hvm domain
753 *
754 * @parm xch a handle to an open hypervisor interface
755 * @parm domid the domain to set the hvm domain context for
756 * @parm hvm_ctxt pointer to the the hvm context with the values to set
757 * @parm size the size of hvm_ctxt in bytes
758 * @return 0 on success, -1 on failure
759 */
760 int xc_domain_hvm_setcontext(xc_interface *xch,
761 uint32_t domid,
762 uint8_t *hvm_ctxt,
763 uint32_t size);
764
765 /**
766 * This function will return guest IO ABI protocol
767 *
768 * @parm xch a handle to an open hypervisor interface
769 * @parm domid the domain to get IO ABI protocol for
770 * @return guest protocol on success, NULL on failure
771 */
772 const char *xc_domain_get_native_protocol(xc_interface *xch,
773 uint32_t domid);
774
775 /**
776 * This function returns information about the execution context of a
777 * particular vcpu of a domain.
778 *
779 * @parm xch a handle to an open hypervisor interface
780 * @parm domid the domain to get information from
781 * @parm vcpu the vcpu number
782 * @parm ctxt a pointer to a structure to store the execution context of the
783 * domain
784 * @return 0 on success, -1 on failure
785 */
786 int xc_vcpu_getcontext(xc_interface *xch,
787 uint32_t domid,
788 uint32_t vcpu,
789 vcpu_guest_context_any_t *ctxt);
790
791 /**
792 * This function initializes the vuart emulation and returns
793 * the event to be used by the backend for communicating with
794 * the emulation code.
795 *
796 * @parm xch a handle to an open hypervisor interface
797 * #parm type type of vuart
798 * @parm domid the domain to get information from
799 * @parm console_domid the domid of the backend console
800 * @parm gfn the guest pfn to be used as the ring buffer
801 * @parm evtchn the event channel to be used for events
802 * @return 0 on success, negative error on failure
803 */
804 int xc_dom_vuart_init(xc_interface *xch,
805 uint32_t type,
806 uint32_t domid,
807 uint32_t console_domid,
808 xen_pfn_t gfn,
809 evtchn_port_t *evtchn);
810
811 /**
812 * This function returns information about the XSAVE state of a particular
813 * vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0,
814 * the call is considered a query to retrieve them and the buffer is not
815 * filled.
816 *
817 * @parm xch a handle to an open hypervisor interface
818 * @parm domid the domain to get information from
819 * @parm vcpu the vcpu number
820 * @parm extstate a pointer to a structure to store the XSAVE state of the
821 * domain
822 * @return 0 on success, negative error code on failure
823 */
824 int xc_vcpu_get_extstate(xc_interface *xch,
825 uint32_t domid,
826 uint32_t vcpu,
827 xc_vcpu_extstate_t *extstate);
828
829 typedef struct xen_domctl_getvcpuinfo xc_vcpuinfo_t;
830 int xc_vcpu_getinfo(xc_interface *xch,
831 uint32_t domid,
832 uint32_t vcpu,
833 xc_vcpuinfo_t *info);
834
835 long long xc_domain_get_cpu_usage(xc_interface *xch,
836 uint32_t domid,
837 int vcpu);
838
839 int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
840 xen_domain_handle_t handle);
841
842 typedef struct xen_domctl_shadow_op_stats xc_shadow_op_stats_t;
843 int xc_shadow_control(xc_interface *xch,
844 uint32_t domid,
845 unsigned int sop,
846 unsigned int *mb,
847 unsigned int mode);
848 long long xc_logdirty_control(xc_interface *xch,
849 uint32_t domid,
850 unsigned int sop,
851 xc_hypercall_buffer_t *dirty_bitmap,
852 unsigned long pages,
853 unsigned int mode,
854 xc_shadow_op_stats_t *stats);
855
856 int xc_get_paging_mempool_size(xc_interface *xch, uint32_t domid, uint64_t *size);
857 int xc_set_paging_mempool_size(xc_interface *xch, uint32_t domid, uint64_t size);
858
859 int xc_sched_credit_domain_set(xc_interface *xch,
860 uint32_t domid,
861 struct xen_domctl_sched_credit *sdom);
862
863 int xc_sched_credit_domain_get(xc_interface *xch,
864 uint32_t domid,
865 struct xen_domctl_sched_credit *sdom);
866 int xc_sched_credit_params_set(xc_interface *xch,
867 uint32_t cpupool_id,
868 struct xen_sysctl_credit_schedule *schedule);
869 int xc_sched_credit_params_get(xc_interface *xch,
870 uint32_t cpupool_id,
871 struct xen_sysctl_credit_schedule *schedule);
872
873 int xc_sched_credit2_params_set(xc_interface *xch,
874 uint32_t cpupool_id,
875 struct xen_sysctl_credit2_schedule *schedule);
876 int xc_sched_credit2_params_get(xc_interface *xch,
877 uint32_t cpupool_id,
878 struct xen_sysctl_credit2_schedule *schedule);
879 int xc_sched_credit2_domain_set(xc_interface *xch,
880 uint32_t domid,
881 struct xen_domctl_sched_credit2 *sdom);
882 int xc_sched_credit2_domain_get(xc_interface *xch,
883 uint32_t domid,
884 struct xen_domctl_sched_credit2 *sdom);
885
886 int xc_sched_rtds_domain_set(xc_interface *xch,
887 uint32_t domid,
888 struct xen_domctl_sched_rtds *sdom);
889 int xc_sched_rtds_domain_get(xc_interface *xch,
890 uint32_t domid,
891 struct xen_domctl_sched_rtds *sdom);
892 int xc_sched_rtds_vcpu_set(xc_interface *xch,
893 uint32_t domid,
894 struct xen_domctl_schedparam_vcpu *vcpus,
895 uint32_t num_vcpus);
896 int xc_sched_rtds_vcpu_get(xc_interface *xch,
897 uint32_t domid,
898 struct xen_domctl_schedparam_vcpu *vcpus,
899 uint32_t num_vcpus);
900
901 int
902 xc_sched_arinc653_schedule_set(
903 xc_interface *xch,
904 uint32_t cpupool_id,
905 struct xen_sysctl_arinc653_schedule *schedule);
906
907 int
908 xc_sched_arinc653_schedule_get(
909 xc_interface *xch,
910 uint32_t cpupool_id,
911 struct xen_sysctl_arinc653_schedule *schedule);
912
913 /**
914 * This function sends a trigger to a domain.
915 *
916 * @parm xch a handle to an open hypervisor interface
917 * @parm domid the domain id to send trigger
918 * @parm trigger the trigger type
919 * @parm vcpu the vcpu number to send trigger
920 * return 0 on success, -1 on failure
921 */
922 int xc_domain_send_trigger(xc_interface *xch,
923 uint32_t domid,
924 uint32_t trigger,
925 uint32_t vcpu);
926
927 /**
928 * This function enables or disable debugging of a domain.
929 *
930 * @parm xch a handle to an open hypervisor interface
931 * @parm domid the domain id to send trigger
932 * @parm enable true to enable debugging
933 * return 0 on success, -1 on failure
934 */
935 int xc_domain_setdebugging(xc_interface *xch,
936 uint32_t domid,
937 unsigned int enable);
938
939 /**
940 * This function audits the (top level) p2m of a domain
941 * and returns the different error counts, if any.
942 *
943 * @parm xch a handle to an open hypervisor interface
944 * @parm domid the domain id whose top level p2m we
945 * want to audit
946 * @parm orphans count of m2p entries for valid
947 * domain pages containing an invalid value
948 * @parm m2p_bad count of m2p entries mismatching the
949 * associated p2m entry for this domain
950 * @parm p2m_bad count of p2m entries for this domain
951 * mismatching the associated m2p entry
952 * return 0 on success, -1 on failure
953 * errno values on failure include:
954 * -ENOSYS: not implemented
955 * -EFAULT: could not copy results back to guest
956 */
957 int xc_domain_p2m_audit(xc_interface *xch,
958 uint32_t domid,
959 uint64_t *orphans,
960 uint64_t *m2p_bad,
961 uint64_t *p2m_bad);
962
963 /**
964 * This function sets or clears the requirement that an access memory
965 * event listener is required on the domain.
966 *
967 * @parm xch a handle to an open hypervisor interface
968 * @parm domid the domain id to send trigger
969 * @parm enable true to require a listener
970 * return 0 on success, -1 on failure
971 */
972 int xc_domain_set_access_required(xc_interface *xch,
973 uint32_t domid,
974 unsigned int required);
975 /**
976 * This function sets the handler of global VIRQs sent by the hypervisor
977 *
978 * @parm xch a handle to an open hypervisor interface
979 * @parm domid the domain id which will handle the VIRQ
980 * @parm virq the virq number (VIRQ_*)
981 * return 0 on success, -1 on failure
982 */
983 int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq);
984
985 /*
986 * CPUPOOL MANAGEMENT FUNCTIONS
987 */
988
989 typedef struct xc_cpupoolinfo {
990 uint32_t cpupool_id;
991 uint32_t sched_id;
992 uint32_t n_dom;
993 xc_cpumap_t cpumap;
994 } xc_cpupoolinfo_t;
995
996 #define XC_CPUPOOL_POOLID_ANY 0xFFFFFFFF
997
998 /**
999 * Create a new cpupool.
1000 *
1001 * @parm xc_handle a handle to an open hypervisor interface
1002 * @parm ppoolid pointer to the new cpupool id (in/out)
1003 * @parm sched_id id of scheduler to use for pool
1004 * return 0 on success, -1 on failure
1005 */
1006 int xc_cpupool_create(xc_interface *xch,
1007 uint32_t *ppoolid,
1008 uint32_t sched_id);
1009
1010 /**
1011 * Destroy a cpupool. Pool must be unused and have no cpu assigned.
1012 *
1013 * @parm xc_handle a handle to an open hypervisor interface
1014 * @parm poolid id of the cpupool to destroy
1015 * return 0 on success, -1 on failure
1016 */
1017 int xc_cpupool_destroy(xc_interface *xch,
1018 uint32_t poolid);
1019
1020 /**
1021 * Get cpupool info. Returns info for up to the specified number of cpupools
1022 * starting at the given id.
1023 * @parm xc_handle a handle to an open hypervisor interface
1024 * @parm poolid lowest id for which info is returned
1025 * return cpupool info ptr (to be freed via xc_cpupool_infofree)
1026 */
1027 xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
1028 uint32_t poolid);
1029
1030 /**
1031 * Free cpupool info. Used to free info obtained via xc_cpupool_getinfo.
1032 * @parm xc_handle a handle to an open hypervisor interface
1033 * @parm info area to free
1034 */
1035 void xc_cpupool_infofree(xc_interface *xch,
1036 xc_cpupoolinfo_t *info);
1037
1038 /**
1039 * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
1040 *
1041 * @parm xc_handle a handle to an open hypervisor interface
1042 * @parm poolid id of the cpupool
1043 * @parm cpu cpu number to add
1044 * return 0 on success, -1 on failure
1045 */
1046 int xc_cpupool_addcpu(xc_interface *xch,
1047 uint32_t poolid,
1048 int cpu);
1049
1050 /**
1051 * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
1052 *
1053 * @parm xc_handle a handle to an open hypervisor interface
1054 * @parm poolid id of the cpupool
1055 * @parm cpu cpu number to remove
1056 * return 0 on success, -1 on failure
1057 */
1058 int xc_cpupool_removecpu(xc_interface *xch,
1059 uint32_t poolid,
1060 int cpu);
1061
1062 /**
1063 * Move domain to another cpupool.
1064 *
1065 * @parm xc_handle a handle to an open hypervisor interface
1066 * @parm poolid id of the destination cpupool
1067 * @parm domid id of the domain to move
1068 * return 0 on success, -1 on failure
1069 */
1070 int xc_cpupool_movedomain(xc_interface *xch,
1071 uint32_t poolid,
1072 uint32_t domid);
1073
1074 /**
1075 * Return map of cpus not in any cpupool.
1076 *
1077 * @parm xc_handle a handle to an open hypervisor interface
1078 * return cpumap array on success, NULL else
1079 */
1080 xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch);
1081
1082 /*
1083 * EVENT CHANNEL FUNCTIONS
1084 *
1085 * None of these do any logging.
1086 */
1087
1088 /* A port identifier is guaranteed to fit in 31 bits. */
1089 typedef int xc_evtchn_port_or_error_t;
1090
1091 /**
1092 * This function allocates an unbound port. Ports are named endpoints used for
1093 * interdomain communication. This function is most useful in opening a
1094 * well-known port within a domain to receive events on.
1095 *
1096 * NOTE: If you are allocating a *local* unbound port, you probably want to
1097 * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
1098 * ports *only* during domain creation.
1099 *
1100 * @parm xch a handle to an open hypervisor interface
1101 * @parm dom the ID of the local domain (the 'allocatee')
1102 * @parm remote_dom the ID of the domain who will later bind
1103 * @return allocated port (in @dom) on success, -1 on failure
1104 */
1105 xc_evtchn_port_or_error_t
1106 xc_evtchn_alloc_unbound(xc_interface *xch,
1107 uint32_t dom,
1108 uint32_t remote_dom);
1109
1110 int xc_evtchn_reset(xc_interface *xch,
1111 uint32_t dom);
1112
1113 typedef struct evtchn_status xc_evtchn_status_t;
1114 int xc_evtchn_status(xc_interface *xch, xc_evtchn_status_t *status);
1115
1116
1117
1118 int xc_physdev_pci_access_modify(xc_interface *xch,
1119 uint32_t domid,
1120 int bus,
1121 int dev,
1122 int func,
1123 int enable);
1124
1125 int xc_readconsolering(xc_interface *xch,
1126 char *buffer,
1127 unsigned int *pnr_chars,
1128 int clear, int incremental, uint32_t *pindex);
1129
1130 int xc_send_debug_keys(xc_interface *xch, const char *keys);
1131
1132 typedef struct xen_sysctl_physinfo xc_physinfo_t;
1133 typedef struct xen_sysctl_cputopo xc_cputopo_t;
1134 typedef struct xen_sysctl_numainfo xc_numainfo_t;
1135 typedef struct xen_sysctl_meminfo xc_meminfo_t;
1136 typedef struct xen_sysctl_pcitopoinfo xc_pcitopoinfo_t;
1137
1138 typedef uint32_t xc_cpu_to_node_t;
1139 typedef uint32_t xc_cpu_to_socket_t;
1140 typedef uint32_t xc_cpu_to_core_t;
1141 typedef uint64_t xc_node_to_memsize_t;
1142 typedef uint64_t xc_node_to_memfree_t;
1143 typedef uint32_t xc_node_to_node_dist_t;
1144
1145 int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
1146 int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
1147 xc_cputopo_t *cputopo);
1148 int xc_microcode_update(xc_interface *xch, const void *buf,
1149 size_t len, unsigned int flags);
1150 int xc_get_cpu_version(xc_interface *xch, struct xenpf_pcpu_version *cpu_ver);
1151 int xc_get_ucode_revision(xc_interface *xch,
1152 struct xenpf_ucode_revision *ucode_rev);
1153 int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
1154 xc_meminfo_t *meminfo, uint32_t *distance);
1155 int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
1156 physdev_pci_device_t *devs, uint32_t *nodes);
1157
1158 int xc_sched_id(xc_interface *xch,
1159 int *sched_id);
1160
1161 int xc_machphys_mfn_list(xc_interface *xch,
1162 unsigned long max_extents,
1163 xen_pfn_t *extent_start);
1164
1165 typedef struct xen_sysctl_cpuinfo xc_cpuinfo_t;
1166 int xc_getcpuinfo(xc_interface *xch, int max_cpus,
1167 xc_cpuinfo_t *info, int *nr_cpus);
1168
1169 int xc_domain_setmaxmem(xc_interface *xch,
1170 uint32_t domid,
1171 uint64_t max_memkb);
1172
1173 int xc_domain_set_memmap_limit(xc_interface *xch,
1174 uint32_t domid,
1175 unsigned long map_limitkb);
1176
1177 int xc_domain_setvnuma(xc_interface *xch,
1178 uint32_t domid,
1179 uint32_t nr_vnodes,
1180 uint32_t nr_regions,
1181 uint32_t nr_vcpus,
1182 xen_vmemrange_t *vmemrange,
1183 unsigned int *vdistance,
1184 unsigned int *vcpu_to_vnode,
1185 unsigned int *vnode_to_pnode);
1186 /*
1187 * Retrieve vnuma configuration
1188 * domid: IN, target domid
1189 * nr_vnodes: IN/OUT, number of vnodes, not NULL
1190 * nr_vmemranges: IN/OUT, number of vmemranges, not NULL
1191 * nr_vcpus: IN/OUT, number of vcpus, not NULL
1192 * vmemranges: OUT, an array which has length of nr_vmemranges
1193 * vdistance: OUT, an array which has length of nr_vnodes * nr_vnodes
1194 * vcpu_to_vnode: OUT, an array which has length of nr_vcpus
1195 */
1196 int xc_domain_getvnuma(xc_interface *xch,
1197 uint32_t domid,
1198 uint32_t *nr_vnodes,
1199 uint32_t *nr_vmemranges,
1200 uint32_t *nr_vcpus,
1201 xen_vmemrange_t *vmemrange,
1202 unsigned int *vdistance,
1203 unsigned int *vcpu_to_vnode);
1204
1205 int xc_domain_soft_reset(xc_interface *xch,
1206 uint32_t domid);
1207
1208 #if defined(__i386__) || defined(__x86_64__)
1209 /*
1210 * PC BIOS standard E820 types and structure.
1211 */
1212 #define E820_RAM 1
1213 #define E820_RESERVED 2
1214 #define E820_ACPI 3
1215 #define E820_NVS 4
1216 #define E820_UNUSABLE 5
1217
1218 #define E820MAX (128)
1219
1220 struct e820entry {
1221 uint64_t addr;
1222 uint64_t size;
1223 uint32_t type;
1224 } __attribute__((packed));
1225 int xc_domain_set_memory_map(xc_interface *xch,
1226 uint32_t domid,
1227 struct e820entry entries[],
1228 uint32_t nr_entries);
1229
1230 int xc_get_machine_memory_map(xc_interface *xch,
1231 struct e820entry entries[],
1232 uint32_t max_entries);
1233 #endif
1234
1235 int xc_reserved_device_memory_map(xc_interface *xch,
1236 uint32_t flags,
1237 uint16_t seg,
1238 uint8_t bus,
1239 uint8_t devfn,
1240 struct xen_reserved_device_memory entries[],
1241 uint32_t *max_entries);
1242 int xc_domain_set_time_offset(xc_interface *xch,
1243 uint32_t domid,
1244 int32_t time_offset_seconds);
1245
1246 int xc_domain_set_tsc_info(xc_interface *xch,
1247 uint32_t domid,
1248 uint32_t tsc_mode,
1249 uint64_t elapsed_nsec,
1250 uint32_t gtsc_khz,
1251 uint32_t incarnation);
1252
1253 int xc_domain_get_tsc_info(xc_interface *xch,
1254 uint32_t domid,
1255 uint32_t *tsc_mode,
1256 uint64_t *elapsed_nsec,
1257 uint32_t *gtsc_khz,
1258 uint32_t *incarnation);
1259
1260 int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
1261
1262 int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
1263
1264 int xc_domain_increase_reservation(xc_interface *xch,
1265 uint32_t domid,
1266 unsigned long nr_extents,
1267 unsigned int extent_order,
1268 unsigned int mem_flags,
1269 xen_pfn_t *extent_start);
1270
1271 int xc_domain_increase_reservation_exact(xc_interface *xch,
1272 uint32_t domid,
1273 unsigned long nr_extents,
1274 unsigned int extent_order,
1275 unsigned int mem_flags,
1276 xen_pfn_t *extent_start);
1277
1278 int xc_domain_decrease_reservation(xc_interface *xch,
1279 uint32_t domid,
1280 unsigned long nr_extents,
1281 unsigned int extent_order,
1282 xen_pfn_t *extent_start);
1283
1284 int xc_domain_decrease_reservation_exact(xc_interface *xch,
1285 uint32_t domid,
1286 unsigned long nr_extents,
1287 unsigned int extent_order,
1288 xen_pfn_t *extent_start);
1289
1290 int xc_domain_add_to_physmap(xc_interface *xch,
1291 uint32_t domid,
1292 unsigned int space,
1293 unsigned long idx,
1294 xen_pfn_t gpfn);
1295
1296 int xc_domain_add_to_physmap_batch(xc_interface *xch,
1297 uint32_t domid,
1298 uint32_t foreign_domid,
1299 unsigned int space,
1300 unsigned int size,
1301 xen_ulong_t *idxs,
1302 xen_pfn_t *gfpns,
1303 int *errs);
1304
1305 int xc_domain_remove_from_physmap(xc_interface *xch,
1306 uint32_t domid,
1307 xen_pfn_t gpfn);
1308
1309 int xc_domain_populate_physmap(xc_interface *xch,
1310 uint32_t domid,
1311 unsigned long nr_extents,
1312 unsigned int extent_order,
1313 unsigned int mem_flags,
1314 xen_pfn_t *extent_start);
1315
1316 int xc_domain_populate_physmap_exact(xc_interface *xch,
1317 uint32_t domid,
1318 unsigned long nr_extents,
1319 unsigned int extent_order,
1320 unsigned int mem_flags,
1321 xen_pfn_t *extent_start);
1322
1323 int xc_domain_claim_pages(xc_interface *xch,
1324 uint32_t domid,
1325 unsigned long nr_pages);
1326
1327 int xc_domain_memory_exchange_pages(xc_interface *xch,
1328 uint32_t domid,
1329 unsigned long nr_in_extents,
1330 unsigned int in_order,
1331 xen_pfn_t *in_extents,
1332 unsigned long nr_out_extents,
1333 unsigned int out_order,
1334 xen_pfn_t *out_extents);
1335
1336 int xc_domain_set_pod_target(xc_interface *xch,
1337 uint32_t domid,
1338 uint64_t target_pages,
1339 uint64_t *tot_pages,
1340 uint64_t *pod_cache_pages,
1341 uint64_t *pod_entries);
1342
1343 int xc_domain_get_pod_target(xc_interface *xch,
1344 uint32_t domid,
1345 uint64_t *tot_pages,
1346 uint64_t *pod_cache_pages,
1347 uint64_t *pod_entries);
1348
1349 int xc_domain_ioport_permission(xc_interface *xch,
1350 uint32_t domid,
1351 uint32_t first_port,
1352 uint32_t nr_ports,
1353 uint32_t allow_access);
1354
1355 int xc_domain_irq_permission(xc_interface *xch,
1356 uint32_t domid,
1357 uint32_t pirq,
1358 bool allow_access);
1359
1360 int xc_domain_gsi_permission(xc_interface *xch,
1361 uint32_t domid,
1362 uint32_t gsi,
1363 uint32_t flags);
1364
1365 int xc_domain_iomem_permission(xc_interface *xch,
1366 uint32_t domid,
1367 unsigned long first_mfn,
1368 unsigned long nr_mfns,
1369 uint8_t allow_access);
1370
1371 unsigned long xc_make_page_below_4G(xc_interface *xch, uint32_t domid,
1372 unsigned long mfn);
1373
1374 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
1375 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
1376 int xc_perfc_reset(xc_interface *xch);
1377 int xc_perfc_query_number(xc_interface *xch,
1378 int *nbr_desc,
1379 int *nbr_val);
1380 int xc_perfc_query(xc_interface *xch,
1381 xc_hypercall_buffer_t *desc,
1382 xc_hypercall_buffer_t *val);
1383
1384 typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
1385 int xc_lockprof_reset(xc_interface *xch);
1386 int xc_lockprof_query_number(xc_interface *xch,
1387 uint32_t *n_elems);
1388 int xc_lockprof_query(xc_interface *xch,
1389 uint32_t *n_elems,
1390 uint64_t *time,
1391 xc_hypercall_buffer_t *data);
1392
1393 void *xc_memalign(xc_interface *xch, size_t alignment, size_t size);
1394
1395 /**
1396 * Avoid using this function, as it does not work for all cases (such
1397 * as 4M superpages, or guests using PSE36). Only used for debugging.
1398 *
1399 * Translates a virtual address in the context of a given domain and
1400 * vcpu returning the GFN containing the address (that is, an MFN for
1401 * PV guests, a PFN for HVM guests). Returns 0 for failure.
1402 *
1403 * @parm xch a handle on an open hypervisor interface
1404 * @parm dom the domain to perform the translation in
1405 * @parm vcpu the vcpu to perform the translation on
1406 * @parm virt the virtual address to translate
1407 */
1408 unsigned long xc_translate_foreign_address(xc_interface *xch, uint32_t dom,
1409 int vcpu, unsigned long long virt);
1410
1411
1412 int xc_copy_to_domain_page(xc_interface *xch, uint32_t domid,
1413 unsigned long dst_pfn, const char *src_page);
1414
1415 int xc_clear_domain_pages(xc_interface *xch, uint32_t domid,
1416 unsigned long dst_pfn, int num);
1417
xc_clear_domain_page(xc_interface * xch,uint32_t domid,unsigned long dst_pfn)1418 static inline int xc_clear_domain_page(xc_interface *xch, uint32_t domid,
1419 unsigned long dst_pfn)
1420 {
1421 return xc_clear_domain_pages(xch, domid, dst_pfn, 1);
1422 }
1423
1424 int xc_mmuext_op(xc_interface *xch, struct mmuext_op *op, unsigned int nr_ops,
1425 uint32_t dom);
1426
1427 /* System wide memory properties */
1428 int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn);
1429
1430 /* Get current total pages allocated to a domain. */
1431 long xc_get_tot_pages(xc_interface *xch, uint32_t domid);
1432
1433 /**
1434 * This function retrieves the the number of bytes available
1435 * in the heap in a specific range of address-widths and nodes.
1436 *
1437 * @parm xch a handle to an open hypervisor interface
1438 * @parm domid the domain to query
1439 * @parm min_width the smallest address width to query (0 if don't care)
1440 * @parm max_width the largest address width to query (0 if don't care)
1441 * @parm node the node to query (-1 for all)
1442 * @parm *bytes caller variable to put total bytes counted
1443 * @return 0 on success, <0 on failure.
1444 */
1445 int xc_availheap(xc_interface *xch, int min_width, int max_width, int node,
1446 uint64_t *bytes);
1447
1448 /*
1449 * Trace Buffer Operations
1450 */
1451
1452 /**
1453 * xc_tbuf_enable - enable tracing buffers
1454 *
1455 * @parm xch a handle to an open hypervisor interface
1456 * @parm cnt size of tracing buffers to create (in pages)
1457 * @parm mfn location to store mfn of the trace buffers to
1458 * @parm size location to store the size (in bytes) of a trace buffer to
1459 *
1460 * Gets the machine address of the trace pointer area and the size of the
1461 * per CPU buffers.
1462 */
1463 int xc_tbuf_enable(xc_interface *xch, unsigned long pages,
1464 unsigned long *mfn, unsigned long *size);
1465
1466 /*
1467 * Disable tracing buffers.
1468 */
1469 int xc_tbuf_disable(xc_interface *xch);
1470
1471 /**
1472 * This function sets the size of the trace buffers. Setting the size
1473 * is currently a one-shot operation that may be performed either at boot
1474 * time or via this interface, not both. The buffer size must be set before
1475 * enabling tracing.
1476 *
1477 * @parm xch a handle to an open hypervisor interface
1478 * @parm size the size in pages per cpu for the trace buffers
1479 * @return 0 on success, -1 on failure.
1480 */
1481 int xc_tbuf_set_size(xc_interface *xch, unsigned long size);
1482
1483 /**
1484 * This function retrieves the current size of the trace buffers.
1485 * Note that the size returned is in terms of bytes, not pages.
1486
1487 * @parm xch a handle to an open hypervisor interface
1488 * @parm size will contain the size in bytes for the trace buffers
1489 * @return 0 on success, -1 on failure.
1490 */
1491 int xc_tbuf_get_size(xc_interface *xch, unsigned long *size);
1492
1493 int xc_tbuf_set_cpu_mask(xc_interface *xch, xc_cpumap_t mask);
1494
1495 int xc_tbuf_set_evt_mask(xc_interface *xch, uint32_t mask);
1496
1497 /**
1498 * Enable vmtrace for given vCPU.
1499 *
1500 * @parm xch a handle to an open hypervisor interface
1501 * @parm domid domain identifier
1502 * @parm vcpu vcpu identifier
1503 * @return 0 on success, -1 on failure
1504 */
1505 int xc_vmtrace_enable(xc_interface *xch, uint32_t domid, uint32_t vcpu);
1506
1507 /**
1508 * Enable vmtrace for given vCPU.
1509 *
1510 * @parm xch a handle to an open hypervisor interface
1511 * @parm domid domain identifier
1512 * @parm vcpu vcpu identifier
1513 * @return 0 on success, -1 on failure
1514 */
1515 int xc_vmtrace_disable(xc_interface *xch, uint32_t domid, uint32_t vcpu);
1516
1517 /**
1518 * Enable vmtrace for a given vCPU, along with resetting status/offset
1519 * details.
1520 *
1521 * @parm xch a handle to an open hypervisor interface
1522 * @parm domid domain identifier
1523 * @parm vcpu vcpu identifier
1524 * @return 0 on success, -1 on failure
1525 */
1526 int xc_vmtrace_reset_and_enable(xc_interface *xch, uint32_t domid,
1527 uint32_t vcpu);
1528
1529 /**
1530 * Get current output position inside the trace buffer.
1531 *
1532 * Repeated calls will return different values if tracing is enabled. It is
1533 * platform specific what happens when the buffer fills completely.
1534 *
1535 * @parm xch a handle to an open hypervisor interface
1536 * @parm domid domain identifier
1537 * @parm vcpu vcpu identifier
1538 * @parm pos current output position in bytes
1539 * @return 0 on success, -1 on failure
1540 */
1541 int xc_vmtrace_output_position(xc_interface *xch, uint32_t domid,
1542 uint32_t vcpu, uint64_t *pos);
1543
1544 /**
1545 * Get platform specific vmtrace options.
1546 *
1547 * @parm xch a handle to an open hypervisor interface
1548 * @parm domid domain identifier
1549 * @parm vcpu vcpu identifier
1550 * @parm key platform-specific input
1551 * @parm value platform-specific output
1552 * @return 0 on success, -1 on failure
1553 */
1554 int xc_vmtrace_get_option(xc_interface *xch, uint32_t domid,
1555 uint32_t vcpu, uint64_t key, uint64_t *value);
1556
1557 /**
1558 * Set platform specific vmtrace options.
1559 *
1560 * @parm xch a handle to an open hypervisor interface
1561 * @parm domid domain identifier
1562 * @parm vcpu vcpu identifier
1563 * @parm key platform-specific input
1564 * @parm value platform-specific input
1565 * @return 0 on success, -1 on failure
1566 */
1567 int xc_vmtrace_set_option(xc_interface *xch, uint32_t domid,
1568 uint32_t vcpu, uint64_t key, uint64_t value);
1569
1570 int xc_domctl(xc_interface *xch, struct xen_domctl *domctl);
1571 int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl);
1572 long xc_memory_op(xc_interface *xch, unsigned int cmd, void *arg, size_t len);
1573
1574 int xc_version(xc_interface *xch, int cmd, void *arg);
1575
1576 /*
1577 * Wrappers around XENVER_* subops. Callers must pass the returned pointer to
1578 * free().
1579 */
1580 char *xc_xenver_extraversion(xc_interface *xch);
1581 char *xc_xenver_capabilities(xc_interface *xch);
1582 char *xc_xenver_changeset(xc_interface *xch);
1583 char *xc_xenver_commandline(xc_interface *xch);
1584 char *xc_xenver_buildid(xc_interface *xch);
1585
1586 int xc_flask_op(xc_interface *xch, xen_flask_op_t *op);
1587
1588 /*
1589 * Subscribe to domain suspend via evtchn.
1590 * Returns -1 on failure, in which case errno will be set appropriately.
1591 * Just calls XEN_DOMCTL_subscribe - see the caveats for that domctl
1592 * (in its doc comment in domctl.h).
1593 */
1594 int xc_domain_subscribe_for_suspend(
1595 xc_interface *xch, uint32_t domid, evtchn_port_t port);
1596
1597 /**************************
1598 * GRANT TABLE OPERATIONS *
1599 **************************/
1600
1601 /*
1602 * These functions sometimes log messages as above, but not always.
1603 */
1604
1605
1606 int xc_gnttab_op(xc_interface *xch, int cmd,
1607 void * op, int op_size, int count);
1608 /* Logs iff hypercall bounce fails, otherwise doesn't. */
1609
1610 int xc_gnttab_query_size(xc_interface *xch, struct gnttab_query_size *query);
1611 int xc_gnttab_get_version(xc_interface *xch, uint32_t domid); /* Never logs */
1612 grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, uint32_t domid, int *gnt_num);
1613 grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, uint32_t domid, int *gnt_num);
1614 /* Sometimes these don't set errno [fixme], and sometimes they don't log. */
1615
1616 int xc_physdev_map_pirq(xc_interface *xch,
1617 uint32_t domid,
1618 int index,
1619 int *pirq);
1620
1621 int xc_physdev_map_pirq_msi(xc_interface *xch,
1622 uint32_t domid,
1623 int index,
1624 int *pirq,
1625 int devfn,
1626 int bus,
1627 int entry_nr,
1628 uint64_t table_base);
1629
1630 int xc_physdev_map_pirq_gsi(xc_interface *xch,
1631 uint32_t domid,
1632 int gsi,
1633 int *pirq);
1634
1635 int xc_physdev_unmap_pirq(xc_interface *xch,
1636 uint32_t domid,
1637 int pirq);
1638
1639 int xc_pcidev_get_gsi(xc_interface *xch, uint32_t sbdf);
1640
1641 /*
1642 * LOGGING AND ERROR REPORTING
1643 */
1644
1645
1646 #define XC_MAX_ERROR_MSG_LEN 1024
1647 typedef struct xc_error {
1648 enum xc_error_code code;
1649 char message[XC_MAX_ERROR_MSG_LEN];
1650 } xc_error;
1651
1652
1653 /*
1654 * Convert an error code or level into a text description. Return values
1655 * are pointers to fixed strings and do not need to be freed.
1656 * Do not fail, but return pointers to generic strings if fed bogus input.
1657 */
1658 const char *xc_error_code_to_desc(int code);
1659
1660 /*
1661 * Convert an errno value to a text description.
1662 */
1663 const char *xc_strerror(xc_interface *xch, int errcode);
1664
1665
1666 /*
1667 * Return a pointer to the last error with level XC_REPORT_ERROR. This
1668 * pointer and the data pointed to are only valid until the next call
1669 * to libxc in the same thread.
1670 */
1671 const xc_error *xc_get_last_error(xc_interface *handle);
1672
1673 /*
1674 * Clear the last error
1675 */
1676 void xc_clear_last_error(xc_interface *xch);
1677
1678 int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value);
1679 int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value);
1680
1681 /* Deprecated: use xc_hvm_param_set/get() instead. */
1682 int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value);
1683 int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value);
1684
1685 /* HVM guest pass-through */
1686 int xc_assign_device(xc_interface *xch,
1687 uint32_t domid,
1688 uint32_t machine_sbdf,
1689 uint32_t flag);
1690
1691 int xc_get_device_group(xc_interface *xch,
1692 uint32_t domid,
1693 uint32_t machine_sbdf,
1694 uint32_t max_sdevs,
1695 uint32_t *num_sdevs,
1696 uint32_t *sdev_array);
1697
1698 int xc_test_assign_device(xc_interface *xch,
1699 uint32_t domid,
1700 uint32_t machine_sbdf);
1701
1702 int xc_deassign_device(xc_interface *xch,
1703 uint32_t domid,
1704 uint32_t machine_sbdf);
1705
1706 int xc_assign_dt_device(xc_interface *xch,
1707 uint32_t domid,
1708 char *path);
1709 int xc_test_assign_dt_device(xc_interface *xch,
1710 uint32_t domid,
1711 char *path);
1712 int xc_deassign_dt_device(xc_interface *xch,
1713 uint32_t domid,
1714 char *path);
1715
1716 int xc_domain_memory_mapping(xc_interface *xch,
1717 uint32_t domid,
1718 unsigned long first_gfn,
1719 unsigned long first_mfn,
1720 unsigned long nr_mfns,
1721 uint32_t add_mapping);
1722
1723 int xc_domain_ioport_mapping(xc_interface *xch,
1724 uint32_t domid,
1725 uint32_t first_gport,
1726 uint32_t first_mport,
1727 uint32_t nr_ports,
1728 uint32_t add_mapping);
1729
1730 int xc_domain_update_msi_irq(
1731 xc_interface *xch,
1732 uint32_t domid,
1733 uint32_t gvec,
1734 uint32_t pirq,
1735 uint32_t gflags,
1736 uint64_t gtable);
1737
1738 int xc_domain_unbind_msi_irq(xc_interface *xch,
1739 uint32_t domid,
1740 uint32_t gvec,
1741 uint32_t pirq,
1742 uint32_t gflags);
1743
1744 int xc_domain_bind_pt_irq(xc_interface *xch,
1745 uint32_t domid,
1746 uint8_t machine_irq,
1747 uint8_t irq_type,
1748 uint8_t bus,
1749 uint8_t device,
1750 uint8_t intx,
1751 uint8_t isa_irq);
1752
1753 int xc_domain_unbind_pt_irq(xc_interface *xch,
1754 uint32_t domid,
1755 uint8_t machine_irq,
1756 uint8_t irq_type,
1757 uint8_t bus,
1758 uint8_t device,
1759 uint8_t intx,
1760 uint8_t isa_irq);
1761
1762 int xc_domain_bind_pt_pci_irq(xc_interface *xch,
1763 uint32_t domid,
1764 uint8_t machine_irq,
1765 uint8_t bus,
1766 uint8_t device,
1767 uint8_t intx);
1768
1769 int xc_domain_bind_pt_isa_irq(xc_interface *xch,
1770 uint32_t domid,
1771 uint8_t machine_irq);
1772
1773 int xc_domain_bind_pt_spi_irq(xc_interface *xch,
1774 uint32_t domid,
1775 uint16_t vspi,
1776 uint16_t spi);
1777
1778 int xc_domain_unbind_pt_spi_irq(xc_interface *xch,
1779 uint32_t domid,
1780 uint16_t vspi,
1781 uint16_t spi);
1782
1783 /* Set the target domain */
1784 int xc_domain_set_target(xc_interface *xch,
1785 uint32_t domid,
1786 uint32_t target);
1787
1788 /* Control the domain for debug */
1789 int xc_domain_debug_control(xc_interface *xch,
1790 uint32_t domid,
1791 uint32_t sop,
1792 uint32_t vcpu);
1793
1794 #if defined(__i386__) || defined(__x86_64__)
1795
1796 /*
1797 * CPUID policy data, expressed in the legacy XEND format.
1798 *
1799 * Policy is an array of strings, 32 chars long:
1800 * policy[0] = eax
1801 * policy[1] = ebx
1802 * policy[2] = ecx
1803 * policy[3] = edx
1804 *
1805 * The format of the string is the following:
1806 * '1' -> force to 1
1807 * '0' -> force to 0
1808 * 'x' -> we don't care (use default)
1809 * 'k' -> pass through host value
1810 * 's' -> legacy alias for 'k'
1811 */
1812 struct xc_xend_cpuid {
1813 union {
1814 struct {
1815 uint32_t leaf, subleaf;
1816 };
1817 uint32_t input[2];
1818 };
1819 char *policy[4];
1820 };
1821
1822 /*
1823 * MSR policy data.
1824 *
1825 * The format of the policy string is the following:
1826 * '1' -> force to 1
1827 * '0' -> force to 0
1828 * 'x' -> we don't care (use default)
1829 * 'k' -> pass through host value
1830 */
1831 struct xc_msr {
1832 uint32_t index;
1833 char policy[65];
1834 };
1835 #define XC_MSR_INPUT_UNUSED 0xffffffffu
1836
1837 /*
1838 * Make adjustments to the CPUID settings for a domain.
1839 *
1840 * This path is used in two cases. First, for fresh boots of the domain, and
1841 * secondly for migrate-in/restore of pre-4.14 guests (where CPUID data was
1842 * missing from the stream). The @restore parameter distinguishes these
1843 * cases, and the generated policy must be compatible with a 4.13.
1844 *
1845 * Either pass a full new @featureset (and @nr_features), or adjust individual
1846 * features (@pae, @itsc, @nested_virt).
1847 *
1848 * Then (optionally) apply legacy XEND CPUID overrides (@xend) or MSR (@msr)
1849 * to the result.
1850 */
1851 int xc_cpuid_apply_policy(xc_interface *xch,
1852 uint32_t domid, bool restore,
1853 const uint32_t *featureset,
1854 unsigned int nr_features, bool pae, bool itsc,
1855 bool nested_virt, const struct xc_xend_cpuid *xend,
1856 const struct xc_msr *msr);
1857 int xc_mca_op(xc_interface *xch, struct xen_mc *mc);
1858 int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
1859 xc_cpumap_t cpumap, unsigned int nr_cpus);
1860 #endif
1861
1862 struct xc_px_val {
1863 uint64_t freq; /* Px core frequency */
1864 uint64_t residency; /* Px residency time */
1865 uint64_t count; /* Px transition count */
1866 };
1867
1868 struct xc_px_stat {
1869 uint8_t total; /* total Px states */
1870 uint8_t usable; /* usable Px states */
1871 uint8_t last; /* last Px state */
1872 uint8_t cur; /* current Px state */
1873 uint64_t *trans_pt; /* Px transition table */
1874 struct xc_px_val *pt;
1875 };
1876
1877 int xc_pm_get_max_px(xc_interface *xch, int cpuid, int *max_px);
1878 int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt);
1879 int xc_pm_reset_pxstat(xc_interface *xch, int cpuid);
1880
1881 struct xc_cx_stat {
1882 uint32_t nr; /* entry nr in triggers[]/residencies[], incl C0 */
1883 uint32_t last; /* last Cx state */
1884 uint64_t idle_time; /* idle time from boot */
1885 uint64_t *triggers; /* Cx trigger counts */
1886 uint64_t *residencies; /* Cx residencies */
1887 uint32_t nr_pc; /* entry nr in pc[] */
1888 uint32_t nr_cc; /* entry nr in cc[] */
1889 uint64_t *pc; /* 1-biased indexing (i.e. excl C0) */
1890 uint64_t *cc; /* 1-biased indexing (i.e. excl C0) */
1891 };
1892 typedef struct xc_cx_stat xc_cx_stat_t;
1893
1894 int xc_pm_get_max_cx(xc_interface *xch, int cpuid, int *max_cx);
1895 int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt);
1896 int xc_pm_reset_cxstat(xc_interface *xch, int cpuid);
1897
1898 int xc_cpu_online(xc_interface *xch, int cpu);
1899 int xc_cpu_offline(xc_interface *xch, int cpu);
1900 int xc_smt_enable(xc_interface *xch);
1901 int xc_smt_disable(xc_interface *xch);
1902
1903 /*
1904 * cpufreq para name of this structure named
1905 * same as sysfs file name of native linux
1906 */
1907 typedef struct xen_userspace xc_userspace_t;
1908 typedef struct xen_ondemand xc_ondemand_t;
1909 typedef struct xen_cppc_para xc_cppc_para_t;
1910
1911 struct xc_get_cpufreq_para {
1912 /* IN/OUT variable */
1913 uint32_t cpu_num;
1914 uint32_t freq_num;
1915 uint32_t gov_num;
1916
1917 /* for all governors */
1918 /* OUT variable */
1919 uint32_t *affected_cpus;
1920 uint32_t *scaling_available_frequencies;
1921 char *scaling_available_governors;
1922 char scaling_driver[CPUFREQ_NAME_LEN];
1923
1924 uint32_t cpuinfo_cur_freq;
1925 uint32_t cpuinfo_max_freq;
1926 uint32_t cpuinfo_min_freq;
1927 union {
1928 struct {
1929 uint32_t scaling_cur_freq;
1930
1931 char scaling_governor[CPUFREQ_NAME_LEN];
1932 uint32_t scaling_max_freq;
1933 uint32_t scaling_min_freq;
1934
1935 /* for specific governor */
1936 union {
1937 xc_userspace_t userspace;
1938 xc_ondemand_t ondemand;
1939 } u;
1940 } s;
1941 xc_cppc_para_t cppc_para;
1942 } u;
1943
1944 int32_t turbo_enabled;
1945 };
1946
1947 typedef struct xen_set_cppc_para xc_set_cppc_para_t;
1948
1949 int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
1950 struct xc_get_cpufreq_para *user_para);
1951 int xc_set_cpufreq_gov(xc_interface *xch, int cpuid, char *govname);
1952 int xc_set_cpufreq_para(xc_interface *xch, int cpuid,
1953 int ctrl_type, int ctrl_value);
1954 int xc_set_cpufreq_cppc(xc_interface *xch, int cpuid,
1955 xc_set_cppc_para_t *set_cppc);
1956 int xc_get_cpufreq_avgfreq(xc_interface *xch, int cpuid, int *avg_freq);
1957
1958 int xc_set_sched_opt_smt(xc_interface *xch, uint32_t value);
1959
1960 int xc_get_cpuidle_max_cstate(xc_interface *xch, uint32_t *value);
1961 int xc_set_cpuidle_max_cstate(xc_interface *xch, uint32_t value);
1962
1963 int xc_get_cpuidle_max_csubstate(xc_interface *xch, uint32_t *value);
1964 int xc_set_cpuidle_max_csubstate(xc_interface *xch, uint32_t value);
1965
1966 int xc_enable_turbo(xc_interface *xch, int cpuid);
1967 int xc_disable_turbo(xc_interface *xch, int cpuid);
1968
1969 /**
1970 * altp2m operations
1971 */
1972
1973 int xc_altp2m_get_domain_state(xc_interface *handle, uint32_t dom, bool *state);
1974 int xc_altp2m_set_domain_state(xc_interface *handle, uint32_t dom, bool state);
1975 int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, uint32_t domid,
1976 uint32_t vcpuid, xen_pfn_t gfn);
1977 int xc_altp2m_set_vcpu_disable_notify(xc_interface *handle, uint32_t domid,
1978 uint32_t vcpuid);
1979 int xc_altp2m_create_view(xc_interface *handle, uint32_t domid,
1980 xenmem_access_t default_access, uint16_t *view_id);
1981 int xc_altp2m_destroy_view(xc_interface *handle, uint32_t domid,
1982 uint16_t view_id);
1983 /* Switch all vCPUs of the domain to the specified altp2m view */
1984 int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
1985 uint16_t view_id);
1986 int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
1987 uint16_t view_id, xen_pfn_t gfn, bool sve);
1988 int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
1989 uint16_t view_id, xen_pfn_t first_gfn,
1990 xen_pfn_t last_gfn, bool sve,
1991 xen_pfn_t *error_gfn, int32_t *error_code);
1992 int xc_altp2m_get_suppress_ve(xc_interface *handle, uint32_t domid,
1993 uint16_t view_id, xen_pfn_t gfn, bool *sve);
1994 int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
1995 uint16_t view_id, xen_pfn_t gfn,
1996 xenmem_access_t access);
1997 int xc_altp2m_set_mem_access_multi(xc_interface *handle, uint32_t domid,
1998 uint16_t view_id, uint8_t *access,
1999 uint64_t *gfns, uint32_t nr);
2000 int xc_altp2m_get_mem_access(xc_interface *handle, uint32_t domid,
2001 uint16_t view_id, xen_pfn_t gfn,
2002 xenmem_access_t *access);
2003 int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
2004 uint16_t view_id, xen_pfn_t old_gfn,
2005 xen_pfn_t new_gfn);
2006 int xc_altp2m_get_vcpu_p2m_idx(xc_interface *handle, uint32_t domid,
2007 uint32_t vcpuid, uint16_t *p2midx);
2008 /*
2009 * Set view visibility for xc_altp2m_switch_to_view and vmfunc.
2010 * Note: If altp2m mode is set to mixed the guest is able to change the view
2011 * visibility and then call vmfunc.
2012 */
2013 int xc_altp2m_set_visibility(xc_interface *handle, uint32_t domid,
2014 uint16_t view_id, bool visible);
2015
2016 /**
2017 * Mem paging operations.
2018 * Paging is supported only on the x86 architecture in 64 bit mode, with
2019 * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
2020 * support is considered experimental.
2021 */
2022 int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
2023 int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id);
2024 int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id);
2025 int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id,
2026 uint64_t gfn);
2027 int xc_mem_paging_evict(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
2028 int xc_mem_paging_prep(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
2029 int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
2030 uint64_t gfn, void *buffer);
2031
2032 /**
2033 * Access tracking operations.
2034 * Supported only on Intel EPT 64 bit processors.
2035 */
2036
2037 /*
2038 * Set a range of memory to a specific access.
2039 * Allowed types are XENMEM_access_default, XENMEM_access_n, any combination of
2040 * XENMEM_access_ + (rwx), and XENMEM_access_rx2rw
2041 */
2042 int xc_set_mem_access(xc_interface *xch, uint32_t domain_id,
2043 xenmem_access_t access, uint64_t first_pfn,
2044 uint32_t nr);
2045
2046 /*
2047 * Set an array of pages to their respective access in the access array.
2048 * The nr parameter specifies the size of the pages and access arrays.
2049 * The same allowed access types as for xc_set_mem_access() apply.
2050 */
2051 int xc_set_mem_access_multi(xc_interface *xch, uint32_t domain_id,
2052 uint8_t *access, uint64_t *pages,
2053 uint32_t nr);
2054
2055 /*
2056 * Gets the mem access for the given page (returned in access on success)
2057 */
2058 int xc_get_mem_access(xc_interface *xch, uint32_t domain_id,
2059 uint64_t pfn, xenmem_access_t *access);
2060
2061 /*
2062 * Returns the VM_EVENT_INTERFACE version.
2063 */
2064 int xc_vm_event_get_version(xc_interface *xch);
2065
2066 /***
2067 * Monitor control operations.
2068 *
2069 * Enables the VM event monitor ring and returns the mapped ring page.
2070 * This ring is used to deliver mem_access events, as well a set of additional
2071 * events that can be enabled with the xc_monitor_* functions.
2072 *
2073 * Will return NULL on error.
2074 * Caller has to unmap this page when done.
2075 */
2076 void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
2077 int xc_monitor_disable(xc_interface *xch, uint32_t domain_id);
2078 int xc_monitor_resume(xc_interface *xch, uint32_t domain_id);
2079 /*
2080 * Get a bitmap of supported monitor events in the form
2081 * (1 << XEN_DOMCTL_MONITOR_EVENT_*).
2082 */
2083 int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
2084 uint32_t *capabilities);
2085 int xc_monitor_write_ctrlreg(xc_interface *xch, uint32_t domain_id,
2086 uint16_t index, bool enable, bool sync,
2087 uint64_t bitmask, bool onchangeonly);
2088 /*
2089 * A list of MSR indices can usually be found in /usr/include/asm/msr-index.h.
2090 * Please consult the Intel/AMD manuals for more information on
2091 * non-architectural indices.
2092 */
2093 int xc_monitor_mov_to_msr(xc_interface *xch, uint32_t domain_id, uint32_t msr,
2094 bool enable, bool onchangeonly);
2095 int xc_monitor_singlestep(xc_interface *xch, uint32_t domain_id, bool enable);
2096 int xc_monitor_software_breakpoint(xc_interface *xch, uint32_t domain_id,
2097 bool enable);
2098 int xc_monitor_descriptor_access(xc_interface *xch, uint32_t domain_id,
2099 bool enable);
2100 int xc_monitor_guest_request(xc_interface *xch, uint32_t domain_id,
2101 bool enable, bool sync, bool allow_userspace);
2102 /*
2103 * Disables page-walk mem_access events by emulating. If the
2104 * emulation can not be performed then a VM_EVENT_REASON_EMUL_UNIMPLEMENTED
2105 * event will be issued.
2106 */
2107 int xc_monitor_inguest_pagefault(xc_interface *xch, uint32_t domain_id,
2108 bool disable);
2109 int xc_monitor_debug_exceptions(xc_interface *xch, uint32_t domain_id,
2110 bool enable, bool sync);
2111 int xc_monitor_cpuid(xc_interface *xch, uint32_t domain_id, bool enable);
2112 int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
2113 bool enable);
2114 int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
2115 bool enable);
2116 int xc_monitor_vmexit(xc_interface *xch, uint32_t domain_id, bool enable,
2117 bool sync);
2118 int xc_monitor_io(xc_interface *xch, uint32_t domain_id, bool enable);
2119 /**
2120 * This function enables / disables emulation for each REP for a
2121 * REP-compatible instruction.
2122 *
2123 * @parm xch a handle to an open hypervisor interface.
2124 * @parm domain_id the domain id one wants to get the node affinity of.
2125 * @parm enable if 0 optimize when possible, else emulate each REP.
2126 * @return 0 on success, -1 on failure.
2127 */
2128 int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id,
2129 bool enable);
2130
2131 /***
2132 * Memory sharing operations.
2133 *
2134 * Unles otherwise noted, these calls return 0 on succes, -1 and errno on
2135 * failure.
2136 *
2137 * Sharing is supported only on the x86 architecture in 64 bit mode, with
2138 * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
2139 * support is considered experimental.
2140
2141 * Calls below return ENOSYS if not in the x86_64 architecture.
2142 * Calls below return ENODEV if the domain does not support HAP.
2143 * Calls below return ESRCH if the specified domain does not exist.
2144 * Calls below return EPERM if the caller is unprivileged for this domain.
2145 */
2146
2147 /* Turn on/off sharing for the domid, depending on the enable flag.
2148 *
2149 * Returns EXDEV if trying to enable and the domain has had a PCI device
2150 * assigned for passthrough (these two features are mutually exclusive).
2151 *
2152 * When sharing for a domain is turned off, the domain may still reference
2153 * shared pages. Unsharing happens lazily. */
2154 int xc_memshr_control(xc_interface *xch,
2155 uint32_t domid,
2156 int enable);
2157
2158 /* Create a communication ring in which the hypervisor will place ENOMEM
2159 * notifications.
2160 *
2161 * ENOMEM happens when unsharing pages: a Copy-on-Write duplicate needs to be
2162 * allocated, and thus the out-of-memory error occurr.
2163 *
2164 * For complete examples on how to plumb a notification ring, look into
2165 * xenpaging or xen-access.
2166 *
2167 * On receipt of a notification, the helper should ensure there is memory
2168 * available to the domain before retrying.
2169 *
2170 * If a domain encounters an ENOMEM condition when sharing and this ring
2171 * has not been set up, the hypervisor will crash the domain.
2172 *
2173 * Fails with:
2174 * EINVAL if port is NULL
2175 * EINVAL if the sharing ring has already been enabled
2176 * ENOSYS if no guest gfn has been specified to host the ring via an hvm param
2177 * EINVAL if the gfn for the ring has not been populated
2178 * ENOENT if the gfn for the ring is paged out, or cannot be unshared
2179 * EINVAL if the gfn for the ring cannot be written to
2180 * EINVAL if the domain is dying
2181 * ENOSPC if an event channel cannot be allocated for the ring
2182 * ENOMEM if memory cannot be allocated for internal data structures
2183 * EINVAL or EACCESS if the request is denied by the security policy
2184 */
2185
2186 int xc_memshr_ring_enable(xc_interface *xch,
2187 uint32_t domid,
2188 uint32_t *port);
2189 /* Disable the ring for ENOMEM communication.
2190 * May fail with EINVAL if the ring was not enabled in the first place.
2191 */
2192 int xc_memshr_ring_disable(xc_interface *xch,
2193 uint32_t domid);
2194
2195 /*
2196 * Calls below return EINVAL if sharing has not been enabled for the domain
2197 * Calls below return EINVAL if the domain is dying
2198 */
2199 /* Once a reponse to an ENOMEM notification is prepared, the tool can
2200 * notify the hypervisor to re-schedule the faulting vcpu of the domain with an
2201 * event channel kick and/or this call. */
2202 int xc_memshr_domain_resume(xc_interface *xch,
2203 uint32_t domid);
2204
2205 /* Select a page for sharing.
2206 *
2207 * A 64 bit opaque handle will be stored in handle. The hypervisor ensures
2208 * that if the page is modified, the handle will be invalidated, and future
2209 * users of it will fail. If the page has already been selected and is still
2210 * associated to a valid handle, the existing handle will be returned.
2211 *
2212 * May fail with:
2213 * EINVAL if the gfn is not populated or not sharable (mmio, etc)
2214 * ENOMEM if internal data structures cannot be allocated
2215 * E2BIG if the page is being referenced by other subsystems (e.g. qemu)
2216 * ENOENT or EEXIST if there are internal hypervisor errors.
2217 */
2218 int xc_memshr_nominate_gfn(xc_interface *xch,
2219 uint32_t domid,
2220 unsigned long gfn,
2221 uint64_t *handle);
2222 /* Same as above, but instead of a guest frame number, the input is a grant
2223 * reference provided by the guest.
2224 *
2225 * May fail with EINVAL if the grant reference is invalid.
2226 */
2227 int xc_memshr_nominate_gref(xc_interface *xch,
2228 uint32_t domid,
2229 grant_ref_t gref,
2230 uint64_t *handle);
2231
2232 /* The three calls below may fail with
2233 * 10 (or -XENMEM_SHARING_OP_S_HANDLE_INVALID) if the handle passed as source
2234 * is invalid.
2235 * 9 (or -XENMEM_SHARING_OP_C_HANDLE_INVALID) if the handle passed as client is
2236 * invalid.
2237 */
2238 /* Share two nominated guest pages.
2239 *
2240 * If the call succeeds, both pages will point to the same backing frame (or
2241 * mfn). The hypervisor will verify the handles are still valid, but it will
2242 * not perform any sanity checking on the contens of the pages (the selection
2243 * mechanism for sharing candidates is entirely up to the user-space tool).
2244 *
2245 * After successful sharing, the client handle becomes invalid. Both <domain,
2246 * gfn> tuples point to the same mfn with the same handle, the one specified as
2247 * source. Either 3-tuple can be specified later for further re-sharing.
2248 */
2249 int xc_memshr_share_gfns(xc_interface *xch,
2250 uint32_t source_domain,
2251 unsigned long source_gfn,
2252 uint64_t source_handle,
2253 uint32_t client_domain,
2254 unsigned long client_gfn,
2255 uint64_t client_handle);
2256
2257 /* Same as above, but share two grant references instead.
2258 *
2259 * May fail with EINVAL if either grant reference is invalid.
2260 */
2261 int xc_memshr_share_grefs(xc_interface *xch,
2262 uint32_t source_domain,
2263 grant_ref_t source_gref,
2264 uint64_t source_handle,
2265 uint32_t client_domain,
2266 grant_ref_t client_gref,
2267 uint64_t client_handle);
2268
2269 /* Allows to add to the guest physmap of the client domain a shared frame
2270 * directly.
2271 *
2272 * May additionally fail with
2273 * 9 (-XENMEM_SHARING_OP_C_HANDLE_INVALID) if the physmap entry for the gfn is
2274 * not suitable.
2275 * ENOMEM if internal data structures cannot be allocated.
2276 * ENOENT if there is an internal hypervisor error.
2277 */
2278 int xc_memshr_add_to_physmap(xc_interface *xch,
2279 uint32_t source_domain,
2280 unsigned long source_gfn,
2281 uint64_t source_handle,
2282 uint32_t client_domain,
2283 unsigned long client_gfn);
2284
2285 /* Allows to deduplicate a range of memory of a client domain. Using
2286 * this function is equivalent of calling xc_memshr_nominate_gfn for each gfn
2287 * in the two domains followed by xc_memshr_share_gfns.
2288 *
2289 * May fail with -EINVAL if the source and client domain have different
2290 * memory size or if memory sharing is not enabled on either of the domains.
2291 * May also fail with -ENOMEM if there isn't enough memory available to store
2292 * the sharing metadata before deduplication can happen.
2293 */
2294 int xc_memshr_range_share(xc_interface *xch,
2295 uint32_t source_domain,
2296 uint32_t client_domain,
2297 uint64_t first_gfn,
2298 uint64_t last_gfn);
2299
2300 int xc_memshr_fork(xc_interface *xch,
2301 uint32_t source_domain,
2302 uint32_t client_domain,
2303 bool allow_with_iommu,
2304 bool block_interrupts);
2305
2306 /*
2307 * Note: this function is only intended to be used on short-lived forks that
2308 * haven't yet aquired a lot of memory. In case the fork has a lot of memory
2309 * it is likely more performant to create a new fork with xc_memshr_fork.
2310 *
2311 * With VMs that have a lot of memory this call may block for a long time.
2312 */
2313 int xc_memshr_fork_reset(xc_interface *xch, uint32_t forked_domain,
2314 bool reset_state, bool reset_memory);
2315
2316 /* Debug calls: return the number of pages referencing the shared frame backing
2317 * the input argument. Should be one or greater.
2318 *
2319 * May fail with EINVAL if there is no backing shared frame for the input
2320 * argument.
2321 */
2322 int xc_memshr_debug_gfn(xc_interface *xch,
2323 uint32_t domid,
2324 unsigned long gfn);
2325 /* May additionally fail with EINVAL if the grant reference is invalid. */
2326 int xc_memshr_debug_gref(xc_interface *xch,
2327 uint32_t domid,
2328 grant_ref_t gref);
2329
2330 /* Audits the share subsystem.
2331 *
2332 * Returns ENOSYS if not supported (may not be compiled into the hypervisor).
2333 *
2334 * Returns the number of errors found during auditing otherwise. May be (should
2335 * be!) zero.
2336 *
2337 * If debugtrace support has been compiled into the hypervisor and is enabled,
2338 * verbose descriptions for the errors are available in the hypervisor console.
2339 */
2340 int xc_memshr_audit(xc_interface *xch);
2341
2342 /* Stats reporting.
2343 *
2344 * At any point in time, the following equality should hold for a host:
2345 *
2346 * Let dominfo(d) be the xc_dominfo_t struct filled by a call to
2347 * xc_domain_getinfo(d)
2348 *
2349 * The summation of dominfo(d)->shr_pages for all domains in the system
2350 * should be equal to
2351 * xc_sharing_freed_pages + xc_sharing_used_frames
2352 */
2353 /*
2354 * This function returns the total number of pages freed by using sharing
2355 * on the system. For example, if two domains contain a single entry in
2356 * their p2m table that points to the same shared page (and no other pages
2357 * in the system are shared), then this function should return 1.
2358 */
2359 long xc_sharing_freed_pages(xc_interface *xch);
2360
2361 /*
2362 * This function returns the total number of frames occupied by shared
2363 * pages on the system. This is independent of the number of domains
2364 * pointing at these frames. For example, in the above scenario this
2365 * should return 1. (And dominfo(d) for each of the two domains should return 1
2366 * as well).
2367 *
2368 * Note that some of these sharing_used_frames may be referenced by
2369 * a single domain page, and thus not realize any savings. The same
2370 * applies to some of the pages counted in dominfo(d)->shr_pages.
2371 */
2372 long xc_sharing_used_frames(xc_interface *xch);
2373 /*** End sharing interface ***/
2374
2375 int xc_flask_load(xc_interface *xc_handle, char *buf, uint32_t size);
2376 int xc_flask_context_to_sid(xc_interface *xc_handle, char *buf, uint32_t size, uint32_t *sid);
2377 int xc_flask_sid_to_context(xc_interface *xc_handle, int sid, char *buf, uint32_t size);
2378 int xc_flask_getenforce(xc_interface *xc_handle);
2379 int xc_flask_setenforce(xc_interface *xc_handle, int mode);
2380 int xc_flask_getbool_byid(xc_interface *xc_handle, int id, char *name, uint32_t size, int *curr, int *pend);
2381 int xc_flask_getbool_byname(xc_interface *xc_handle, char *name, int *curr, int *pend);
2382 int xc_flask_setbool(xc_interface *xc_handle, char *name, int value, int commit);
2383 int xc_flask_add_pirq(xc_interface *xc_handle, unsigned int pirq, char *scontext);
2384 int xc_flask_add_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high,
2385 char *scontext);
2386 int xc_flask_add_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high,
2387 char *scontext);
2388 int xc_flask_add_device(xc_interface *xc_handle, unsigned long device, char *scontext);
2389 int xc_flask_del_pirq(xc_interface *xc_handle, unsigned int pirq);
2390 int xc_flask_del_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high);
2391 int xc_flask_del_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high);
2392 int xc_flask_del_device(xc_interface *xc_handle, unsigned long device);
2393 int xc_flask_access(xc_interface *xc_handle, const char *scon, const char *tcon,
2394 uint16_t tclass, uint32_t req,
2395 uint32_t *allowed, uint32_t *decided,
2396 uint32_t *auditallow, uint32_t *auditdeny,
2397 uint32_t *seqno);
2398 int xc_flask_avc_cachestats(xc_interface *xc_handle, char *buf, int size);
2399 int xc_flask_policyvers(xc_interface *xc_handle);
2400 int xc_flask_avc_hashstats(xc_interface *xc_handle, char *buf, int size);
2401 int xc_flask_getavc_threshold(xc_interface *xc_handle);
2402 int xc_flask_setavc_threshold(xc_interface *xc_handle, int threshold);
2403 int xc_flask_relabel_domain(xc_interface *xch, uint32_t domid, uint32_t sid);
2404
2405 struct elf_binary;
2406 void xc_elf_set_logfile(xc_interface *xch, struct elf_binary *elf,
2407 int verbose);
2408 /* Useful for callers who also use libelf. */
2409
2410 /*
2411 * Execute an image previously loaded with xc_kexec_load().
2412 *
2413 * Does not return on success.
2414 *
2415 * Fails with:
2416 * ENOENT if the specified image has not been loaded.
2417 */
2418 int xc_kexec_exec(xc_interface *xch, int type);
2419
2420 /*
2421 * Find the machine address and size of certain memory areas.
2422 *
2423 * KEXEC_RANGE_MA_CRASH crash area
2424 * KEXEC_RANGE_MA_XEN Xen itself
2425 * KEXEC_RANGE_MA_CPU CPU note for CPU number 'nr'
2426 * KEXEC_RANGE_MA_XENHEAP xenheap
2427 * KEXEC_RANGE_MA_EFI_MEMMAP EFI Memory Map
2428 * KEXEC_RANGE_MA_VMCOREINFO vmcoreinfo
2429 *
2430 * Fails with:
2431 * EINVAL if the range or CPU number isn't valid.
2432 */
2433 int xc_kexec_get_range(xc_interface *xch, int range, int nr,
2434 uint64_t *size, uint64_t *start);
2435
2436 /*
2437 * Load a kexec image into memory.
2438 *
2439 * The image may be of type KEXEC_TYPE_DEFAULT (executed on request)
2440 * or KEXEC_TYPE_CRASH (executed on a crash).
2441 *
2442 * The image architecture may be a 32-bit variant of the hypervisor
2443 * architecture (e.g, EM_386 on a x86-64 hypervisor).
2444 *
2445 * Fails with:
2446 * ENOMEM if there is insufficient memory for the new image.
2447 * EINVAL if the image does not fit into the crash area or the entry
2448 * point isn't within one of segments.
2449 * EBUSY if another image is being executed.
2450 */
2451 int xc_kexec_load(xc_interface *xch, uint8_t type, uint16_t arch,
2452 uint64_t entry_maddr,
2453 uint32_t nr_segments, xen_kexec_segment_t *segments);
2454
2455 /*
2456 * Unload a kexec image.
2457 *
2458 * This prevents a KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH image from
2459 * being executed. The crash images are not cleared from the crash
2460 * region.
2461 */
2462 int xc_kexec_unload(xc_interface *xch, int type);
2463
2464 /*
2465 * Find out whether the image has been succesfully loaded.
2466 *
2467 * The type can be either KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH.
2468 * If zero is returned, that means no image is loaded for the type.
2469 * If one is returned, that means an image is loaded for the type.
2470 * Otherwise, negative return value indicates error.
2471 */
2472 int xc_kexec_status(xc_interface *xch, int type);
2473
2474 typedef xenpf_resource_entry_t xc_resource_entry_t;
2475
2476 /*
2477 * Generic resource operation which contains multiple non-preemptible
2478 * resource access entries that passed to xc_resource_op().
2479 */
2480 struct xc_resource_op {
2481 uint64_t result; /* on return, check this field first */
2482 uint32_t cpu; /* which cpu to run */
2483 uint32_t nr_entries; /* number of resource entries */
2484 xc_resource_entry_t *entries;
2485 };
2486
2487 typedef struct xc_resource_op xc_resource_op_t;
2488 int xc_resource_op(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops);
2489
2490 #if defined(__i386__) || defined(__x86_64__)
2491 enum xc_psr_cmt_type {
2492 XC_PSR_CMT_L3_OCCUPANCY,
2493 XC_PSR_CMT_TOTAL_MEM_COUNT,
2494 XC_PSR_CMT_LOCAL_MEM_COUNT,
2495 };
2496 typedef enum xc_psr_cmt_type xc_psr_cmt_type;
2497
2498 enum xc_psr_type {
2499 XC_PSR_CAT_L3_CBM = 1,
2500 XC_PSR_CAT_L3_CBM_CODE = 2,
2501 XC_PSR_CAT_L3_CBM_DATA = 3,
2502 XC_PSR_CAT_L2_CBM = 4,
2503 XC_PSR_MBA_THRTL = 5,
2504 };
2505 typedef enum xc_psr_type xc_psr_type;
2506
2507 enum xc_psr_feat_type {
2508 XC_PSR_CAT_L3,
2509 XC_PSR_CAT_L2,
2510 XC_PSR_MBA,
2511 };
2512 typedef enum xc_psr_feat_type xc_psr_feat_type;
2513
2514 union xc_psr_hw_info {
2515 struct {
2516 uint32_t cos_max;
2517 uint32_t cbm_len;
2518 bool cdp_enabled;
2519 } cat;
2520
2521 struct {
2522 uint32_t cos_max;
2523 uint32_t thrtl_max;
2524 bool linear;
2525 } mba;
2526 };
2527 typedef union xc_psr_hw_info xc_psr_hw_info;
2528
2529 int xc_psr_cmt_attach(xc_interface *xch, uint32_t domid);
2530 int xc_psr_cmt_detach(xc_interface *xch, uint32_t domid);
2531 int xc_psr_cmt_get_domain_rmid(xc_interface *xch, uint32_t domid,
2532 uint32_t *rmid);
2533 int xc_psr_cmt_get_total_rmid(xc_interface *xch, uint32_t *total_rmid);
2534 int xc_psr_cmt_get_l3_upscaling_factor(xc_interface *xch,
2535 uint32_t *upscaling_factor);
2536 int xc_psr_cmt_get_l3_event_mask(xc_interface *xch, uint32_t *event_mask);
2537 int xc_psr_cmt_get_l3_cache_size(xc_interface *xch, uint32_t cpu,
2538 uint32_t *l3_cache_size);
2539 int xc_psr_cmt_get_data(xc_interface *xch, uint32_t rmid, uint32_t cpu,
2540 xc_psr_cmt_type type, uint64_t *monitor_data,
2541 uint64_t *tsc);
2542 int xc_psr_cmt_enabled(xc_interface *xch);
2543
2544 int xc_psr_set_domain_data(xc_interface *xch, uint32_t domid,
2545 xc_psr_type type, uint32_t target,
2546 uint64_t data);
2547 int xc_psr_get_domain_data(xc_interface *xch, uint32_t domid,
2548 xc_psr_type type, uint32_t target,
2549 uint64_t *data);
2550 int xc_psr_get_hw_info(xc_interface *xch, uint32_t socket,
2551 xc_psr_feat_type type, xc_psr_hw_info *hw_info);
2552 #endif
2553
2554 int xc_livepatch_upload(xc_interface *xch,
2555 char *name, unsigned char *payload, uint32_t size,
2556 bool force);
2557
2558 int xc_livepatch_get(xc_interface *xch,
2559 char *name,
2560 xen_livepatch_status_t *status);
2561
2562 /*
2563 * Get a number of available payloads and get actual total size of
2564 * the payloads' name and metadata arrays.
2565 *
2566 * This functions is typically executed first before the xc_livepatch_list()
2567 * to obtain the sizes and correctly allocate all necessary data resources.
2568 *
2569 * The return value is zero if the hypercall completed successfully.
2570 *
2571 * If there was an error performing the sysctl operation, the return value
2572 * will contain the hypercall error code value.
2573 */
2574 int xc_livepatch_list_get_sizes(xc_interface *xch, unsigned int *nr,
2575 uint32_t *name_total_size,
2576 uint32_t *metadata_total_size);
2577
2578 /*
2579 * The heart of this function is to get an array of the following objects:
2580 * - xen_livepatch_status_t: states and return codes of payloads
2581 * - name: names of payloads
2582 * - len: lengths of corresponding payloads' names
2583 * - metadata: payloads' metadata
2584 * - metadata_len: lengths of corresponding payloads' metadata
2585 *
2586 * However it is complex because it has to deal with the hypervisor
2587 * returning some of the requested data or data being stale
2588 * (another hypercall might alter the list).
2589 *
2590 * The parameters that the function expects to contain data from
2591 * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
2592 * 'left' are also updated with the number of entries filled out
2593 * and respectively the number of entries left to get from hypervisor.
2594 *
2595 * It is expected that the caller of this function will first issue the
2596 * xc_livepatch_list_get_sizes() in order to obtain total sizes of names
2597 * and all metadata as well as the current number of payload entries.
2598 * The total sizes are required and supplied via the 'name_total_size' and
2599 * 'metadata_total_size' parameters.
2600 *
2601 * The 'max' is to be provided by the caller with the maximum number of
2602 * entries that 'info', 'name', 'len', 'metadata' and 'metadata_len' arrays
2603 * can be filled up with.
2604 *
2605 * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
2606 * structure size.
2607 *
2608 * Each entry in the 'name' array may have an arbitrary size.
2609 *
2610 * Each entry in the 'len' array is expected to be of uint32_t size.
2611 *
2612 * Each entry in the 'metadata' array may have an arbitrary size.
2613 *
2614 * Each entry in the 'metadata_len' array is expected to be of uint32_t size.
2615 *
2616 * The return value is zero if the hypercall completed successfully.
2617 * Note that the return value is _not_ the amount of entries filled
2618 * out - that is saved in 'done'.
2619 *
2620 * If there was an error performing the operation, the return value
2621 * will contain an negative -EXX type value. The 'done' and 'left'
2622 * will contain the number of entries that had been succesfully
2623 * retrieved (if any).
2624 */
2625 int xc_livepatch_list(xc_interface *xch, const unsigned int max,
2626 const unsigned int start,
2627 struct xen_livepatch_status *info,
2628 char *name, uint32_t *len,
2629 const uint32_t name_total_size,
2630 char *metadata, uint32_t *metadata_len,
2631 const uint32_t metadata_total_size,
2632 unsigned int *done, unsigned int *left);
2633
2634 /*
2635 * The operations are asynchronous and the hypervisor may take a while
2636 * to complete them. The `timeout` offers an option to expire the
2637 * operation if it could not be completed within the specified time
2638 * (in ns). Value of 0 means let hypervisor decide the best timeout.
2639 * The `flags` allows to pass extra parameters to the actions.
2640 */
2641 int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
2642 int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
2643 int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
2644 int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
2645
2646 /*
2647 * Ensure cache coherency after memory modifications. A call to this function
2648 * is only required on ARM as the x86 architecture provides cache coherency
2649 * guarantees. Calling this function on x86 is allowed but has no effect.
2650 */
2651 int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
2652 xen_pfn_t start_pfn, xen_pfn_t nr_pfns);
2653
2654 /*
2655 * Set LLC colors for a domain.
2656 * It can only be used directly after domain creation. An attempt to use it
2657 * afterwards will result in an error.
2658 */
2659 int xc_domain_set_llc_colors(xc_interface *xch, uint32_t domid,
2660 const uint32_t *llc_colors,
2661 uint32_t num_llc_colors);
2662
2663 #if defined(__arm__) || defined(__aarch64__)
2664 int xc_dt_overlay(xc_interface *xch, void *overlay_fdt,
2665 uint32_t overlay_fdt_size, uint8_t overlay_op);
2666 int xc_dt_overlay_domain(xc_interface *xch, void *overlay_fdt,
2667 uint32_t overlay_fdt_size, uint8_t overlay_op,
2668 uint32_t domain_id);
2669 #endif
2670
2671 /* Compat shims */
2672 #include "xenctrl_compat.h"
2673
2674 #endif /* XENCTRL_H */
2675
2676 /*
2677 * Local variables:
2678 * mode: C
2679 * c-file-style: "BSD"
2680 * c-basic-offset: 4
2681 * tab-width: 4
2682 * indent-tabs-mode: nil
2683 * End:
2684 */
2685