1 /******************************************************************************
2  * xenctrl.h
3  *
4  * A library for low-level access to the Xen control interfaces.
5  *
6  * Copyright (c) 2003-2004, K A Fraser.
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation;
11  * version 2.1 of the License.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #ifndef XENCTRL_H
23 #define XENCTRL_H
24 
25 /* Tell the Xen public headers we are a user-space tools build. */
26 #ifndef __XEN_TOOLS__
27 #define __XEN_TOOLS__ 1
28 #endif
29 
30 #include <unistd.h>
31 #include <stddef.h>
32 #include <stdint.h>
33 #include <stdio.h>
34 #include <stdbool.h>
35 #include <xen/xen.h>
36 #include <xen/domctl.h>
37 #include <xen/physdev.h>
38 #include <xen/sysctl.h>
39 #include <xen/version.h>
40 #include <xen/event_channel.h>
41 #include <xen/sched.h>
42 #include <xen/memory.h>
43 #include <xen/grant_table.h>
44 #include <xen/hvm/dm_op.h>
45 #include <xen/hvm/params.h>
46 #include <xen/xsm/flask_op.h>
47 #include <xen/kexec.h>
48 #include <xen/platform.h>
49 
50 #include "xentoollog.h"
51 
52 #if defined(__i386__) || defined(__x86_64__)
53 #include <xen/foreign/x86_32.h>
54 #include <xen/foreign/x86_64.h>
55 #include <xen/arch-x86/xen-mca.h>
56 #endif
57 
58 #define XC_PAGE_SHIFT           12
59 #define XC_PAGE_SIZE            (1UL << XC_PAGE_SHIFT)
60 #define XC_PAGE_MASK            (~(XC_PAGE_SIZE-1))
61 
62 #define INVALID_MFN  (~0UL)
63 
64 /*
65  *  DEFINITIONS FOR CPU BARRIERS
66  */
67 
68 #define xen_barrier() asm volatile ( "" : : : "memory")
69 
70 #if defined(__i386__)
71 #define xen_mb()  asm volatile ( "lock addl $0, -4(%%esp)" ::: "memory" )
72 #define xen_rmb() xen_barrier()
73 #define xen_wmb() xen_barrier()
74 #elif defined(__x86_64__)
75 #define xen_mb()  asm volatile ( "lock addl $0, -32(%%rsp)" ::: "memory" )
76 #define xen_rmb() xen_barrier()
77 #define xen_wmb() xen_barrier()
78 #elif defined(__arm__)
79 #define xen_mb()   asm volatile ("dmb" : : : "memory")
80 #define xen_rmb()  asm volatile ("dmb" : : : "memory")
81 #define xen_wmb()  asm volatile ("dmb" : : : "memory")
82 #elif defined(__aarch64__)
83 #define xen_mb()   asm volatile ("dmb sy" : : : "memory")
84 #define xen_rmb()  asm volatile ("dmb sy" : : : "memory")
85 #define xen_wmb()  asm volatile ("dmb sy" : : : "memory")
86 #else
87 #error "Define barriers"
88 #endif
89 
90 
91 #define XENCTRL_HAS_XC_INTERFACE 1
92 /* In Xen 4.0 and earlier, xc_interface_open and xc_evtchn_open would
93  * both return ints being the file descriptor.  In 4.1 and later, they
94  * return an xc_interface* and xc_evtchn*, respectively - ie, a
95  * pointer to an opaque struct.  This #define is provided in 4.1 and
96  * later, allowing out-of-tree callers to more easily distinguish
97  * between, and be compatible with, both versions.
98  */
99 
100 
101 /*
102  *  GENERAL
103  *
104  * Unless otherwise specified, each function here returns zero or a
105  * non-null pointer on success; or in case of failure, sets errno and
106  * returns -1 or a null pointer.
107  *
108  * Unless otherwise specified, errors result in a call to the error
109  * handler function, which by default prints a message to the
110  * FILE* passed as the caller_data, which by default is stderr.
111  * (This is described below as "logging errors".)
112  *
113  * The error handler can safely trash errno, as libxc saves it across
114  * the callback.
115  */
116 
117 typedef struct xc_interface_core xc_interface;
118 
119 enum xc_error_code {
120   XC_ERROR_NONE = 0,
121   XC_INTERNAL_ERROR = 1,
122   XC_INVALID_KERNEL = 2,
123   XC_INVALID_PARAM = 3,
124   XC_OUT_OF_MEMORY = 4,
125   /* new codes need to be added to xc_error_level_to_desc too */
126 };
127 
128 typedef enum xc_error_code xc_error_code;
129 
130 
131 /*
132  *  INITIALIZATION FUNCTIONS
133  */
134 
135 /**
136  * This function opens a handle to the hypervisor interface.  This function can
137  * be called multiple times within a single process.  Multiple processes can
138  * have an open hypervisor interface at the same time.
139  *
140  * Note:
141  * After fork a child process must not use any opened xc interface
142  * handle inherited from their parent. They must open a new handle if
143  * they want to interact with xc.
144  *
145  * Each call to this function should have a corresponding call to
146  * xc_interface_close().
147  *
148  * This function can fail if the caller does not have superuser permission or
149  * if a Xen-enabled kernel is not currently running.
150  *
151  * @return a handle to the hypervisor interface
152  */
153 xc_interface *xc_interface_open(xentoollog_logger *logger,
154                                 xentoollog_logger *dombuild_logger,
155                                 unsigned open_flags);
156   /* if logger==NULL, will log to stderr
157    * if dombuild_logger=NULL, will log to a file
158    */
159 
160 /*
161  * Note: if XC_OPENFLAG_NON_REENTRANT is passed then libxc must not be
162  * called reentrantly and the calling application is responsible for
163  * providing mutual exclusion surrounding all libxc calls itself.
164  *
165  * In particular xc_{get,clear}_last_error only remain valid for the
166  * duration of the critical section containing the call which failed.
167  */
168 enum xc_open_flags {
169     XC_OPENFLAG_DUMMY =  1<<0, /* do not actually open a xenctrl interface */
170     XC_OPENFLAG_NON_REENTRANT = 1<<1, /* assume library is only every called from a single thread */
171 };
172 
173 /**
174  * This function closes an open hypervisor interface.
175  *
176  * This function can fail if the handle does not represent an open interface or
177  * if there were problems closing the interface.  In the latter case
178  * the interface is still closed.
179  *
180  * @parm xch a handle to an open hypervisor interface
181  * @return 0 on success, -1 otherwise.
182  */
183 int xc_interface_close(xc_interface *xch);
184 
185 /**
186  * Return the handles which xch has opened and will use for
187  * hypercalls, foreign memory accesses and device model operations.
188  * These may be used with the corresponding libraries so long as the
189  * xch itself remains open.
190  */
191 struct xencall_handle *xc_interface_xcall_handle(xc_interface *xch);
192 struct xenforeignmemory_handle *xc_interface_fmem_handle(xc_interface *xch);
193 struct xendevicemodel_handle *xc_interface_dmod_handle(xc_interface *xch);
194 
195 /*
196  * HYPERCALL SAFE MEMORY BUFFER
197  *
198  * Ensure that memory which is passed to a hypercall has been
199  * specially allocated in order to be safe to access from the
200  * hypervisor.
201  *
202  * Each user data pointer is shadowed by an xc_hypercall_buffer data
203  * structure. You should never define an xc_hypercall_buffer type
204  * directly, instead use the DECLARE_HYPERCALL_BUFFER* macros below.
205  *
206  * The strucuture should be considered opaque and all access should be
207  * via the macros and helper functions defined below.
208  *
209  * Once the buffer is declared the user is responsible for explicitly
210  * allocating and releasing the memory using
211  * xc_hypercall_buffer_alloc(_pages) and
212  * xc_hypercall_buffer_free(_pages).
213  *
214  * Once the buffer has been allocated the user can initialise the data
215  * via the normal pointer. The xc_hypercall_buffer structure is
216  * transparently referenced by the helper macros (such as
217  * xen_set_guest_handle) in order to check at compile time that the
218  * correct type of memory is being used.
219  */
220 struct xc_hypercall_buffer {
221     /* Hypercall safe memory buffer. */
222     void *hbuf;
223 
224     /*
225      * Reference to xc_hypercall_buffer passed as argument to the
226      * current function.
227      */
228     struct xc_hypercall_buffer *param_shadow;
229 
230     /*
231      * Direction of copy for bounce buffering.
232      */
233     int dir;
234 
235     /* Used iff dir != 0. */
236     void *ubuf;
237     size_t sz;
238 };
239 typedef struct xc_hypercall_buffer xc_hypercall_buffer_t;
240 
241 /*
242  * Construct the name of the hypercall buffer for a given variable.
243  * For internal use only
244  */
245 #define XC__HYPERCALL_BUFFER_NAME(_name) xc__hypercall_buffer_##_name
246 
247 /*
248  * Returns the hypercall_buffer associated with a variable.
249  */
250 #define HYPERCALL_BUFFER(_name)                                \
251     ({  xc_hypercall_buffer_t *_hcbuf_buf =                    \
252                 &XC__HYPERCALL_BUFFER_NAME(_name);             \
253         _hcbuf_buf->param_shadow ?: _hcbuf_buf;                \
254      })
255 
256 #define HYPERCALL_BUFFER_INIT_NO_BOUNCE .dir = 0, .sz = 0, .ubuf = (void *)-1
257 
258 /*
259  * Defines a hypercall buffer and user pointer with _name of _type.
260  *
261  * The user accesses the data as normal via _name which will be
262  * transparently converted to the hypercall buffer as necessary.
263  */
264 #define DECLARE_HYPERCALL_BUFFER(_type, _name)                 \
265     _type *(_name) = NULL;                                     \
266     xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
267         .hbuf = NULL,                                          \
268         .param_shadow = NULL,                                  \
269         HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
270     }
271 
272 /*
273  * Like DECLARE_HYPERCALL_BUFFER() but using an already allocated
274  * hypercall buffer, _hbuf.
275  *
276  * Useful when a hypercall buffer is passed to a function and access
277  * via the user pointer is required.
278  *
279  * See DECLARE_HYPERCALL_BUFFER_ARGUMENT() if the user pointer is not
280  * required.
281  */
282 #define DECLARE_HYPERCALL_BUFFER_SHADOW(_type, _name, _hbuf)   \
283     _type *(_name) = (_hbuf)->hbuf;                            \
284     __attribute__((unused))                                    \
285     xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
286         .hbuf = (void *)-1,                                    \
287         .param_shadow = (_hbuf),                               \
288         HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
289     }
290 
291 /*
292  * Declare the necessary data structure to allow a hypercall buffer
293  * passed as an argument to a function to be used in the normal way.
294  */
295 #define DECLARE_HYPERCALL_BUFFER_ARGUMENT(_name)               \
296     xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
297         .hbuf = (void *)-1,                                    \
298         .param_shadow = (_name),                               \
299         HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
300     }
301 
302 /*
303  * Get the hypercall buffer data pointer in a form suitable for use
304  * directly as a hypercall argument.
305  */
306 #define HYPERCALL_BUFFER_AS_ARG(_name)                          \
307     ({  xc_hypercall_buffer_t _hcbuf_arg1;                      \
308         typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_arg2 = \
309                 HYPERCALL_BUFFER(_name);                        \
310         (void)(&_hcbuf_arg1 == _hcbuf_arg2);                    \
311         (unsigned long)(_hcbuf_arg2)->hbuf;                     \
312      })
313 
314 /*
315  * Set a xen_guest_handle in a type safe manner, ensuring that the
316  * data pointer has been correctly allocated.
317  */
318 #define set_xen_guest_handle_impl(_hnd, _val, _byte_off)        \
319     do {                                                        \
320         xc_hypercall_buffer_t _hcbuf_hnd1;                      \
321         typeof(XC__HYPERCALL_BUFFER_NAME(_val)) *_hcbuf_hnd2 =  \
322                 HYPERCALL_BUFFER(_val);                         \
323         (void) (&_hcbuf_hnd1 == _hcbuf_hnd2);                   \
324         set_xen_guest_handle_raw(_hnd,                          \
325                 (_hcbuf_hnd2)->hbuf + (_byte_off));             \
326     } while (0)
327 
328 #undef set_xen_guest_handle
329 #define set_xen_guest_handle(_hnd, _val)                        \
330     set_xen_guest_handle_impl(_hnd, _val, 0)
331 
332 #define set_xen_guest_handle_offset(_hnd, _val, _off)           \
333     set_xen_guest_handle_impl(_hnd, _val,                       \
334             ((sizeof(*_val)*(_off))))
335 
336 /* Use with set_xen_guest_handle in place of NULL */
337 extern xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(HYPERCALL_BUFFER_NULL);
338 
339 /*
340  * Allocate and free hypercall buffers with byte granularity.
341  */
342 void *xc__hypercall_buffer_alloc(xc_interface *xch, xc_hypercall_buffer_t *b, size_t size);
343 #define xc_hypercall_buffer_alloc(_xch, _name, _size) xc__hypercall_buffer_alloc(_xch, HYPERCALL_BUFFER(_name), _size)
344 void xc__hypercall_buffer_free(xc_interface *xch, xc_hypercall_buffer_t *b);
345 #define xc_hypercall_buffer_free(_xch, _name) xc__hypercall_buffer_free(_xch, HYPERCALL_BUFFER(_name))
346 
347 /*
348  * Allocate and free hypercall buffers with page alignment.
349  */
350 void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
351 #define xc_hypercall_buffer_alloc_pages(_xch, _name, _nr) xc__hypercall_buffer_alloc_pages(_xch, HYPERCALL_BUFFER(_name), _nr)
352 void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
353 #define xc_hypercall_buffer_free_pages(_xch, _name, _nr)                    \
354     do {                                                                    \
355         if ( _name )                                                        \
356             xc__hypercall_buffer_free_pages(_xch, HYPERCALL_BUFFER(_name),  \
357                                             _nr);                           \
358     } while (0)
359 
360 /*
361  * Array of hypercall buffers.
362  *
363  * Create an array with xc_hypercall_buffer_array_create() and
364  * populate it by declaring one hypercall buffer in a loop and
365  * allocating the buffer with xc_hypercall_buffer_array_alloc().
366  *
367  * To access a previously allocated buffers, declare a new hypercall
368  * buffer and call xc_hypercall_buffer_array_get().
369  *
370  * Destroy the array with xc_hypercall_buffer_array_destroy() to free
371  * the array and all its allocated hypercall buffers.
372  */
373 struct xc_hypercall_buffer_array;
374 typedef struct xc_hypercall_buffer_array xc_hypercall_buffer_array_t;
375 
376 xc_hypercall_buffer_array_t *xc_hypercall_buffer_array_create(xc_interface *xch, unsigned n);
377 void *xc__hypercall_buffer_array_alloc(xc_interface *xch, xc_hypercall_buffer_array_t *array,
378                                        unsigned index, xc_hypercall_buffer_t *hbuf, size_t size);
379 #define xc_hypercall_buffer_array_alloc(_xch, _array, _index, _name, _size) \
380     xc__hypercall_buffer_array_alloc(_xch, _array, _index, HYPERCALL_BUFFER(_name), _size)
381 void *xc__hypercall_buffer_array_get(xc_interface *xch, xc_hypercall_buffer_array_t *array,
382                                      unsigned index, xc_hypercall_buffer_t *hbuf);
383 #define xc_hypercall_buffer_array_get(_xch, _array, _index, _name, _size) \
384     xc__hypercall_buffer_array_get(_xch, _array, _index, HYPERCALL_BUFFER(_name))
385 void xc_hypercall_buffer_array_destroy(xc_interface *xc, xc_hypercall_buffer_array_t *array);
386 
387 /*
388  * CPUMAP handling
389  */
390 typedef uint8_t *xc_cpumap_t;
391 
392 /* return maximum number of cpus the hypervisor supports */
393 int xc_get_max_cpus(xc_interface *xch);
394 
395 /* return the number of online cpus */
396 int xc_get_online_cpus(xc_interface *xch);
397 
398 /* return array size for cpumap */
399 int xc_get_cpumap_size(xc_interface *xch);
400 
401 /* allocate a cpumap */
402 xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
403 
404 /* clear an CPU from the cpumap. */
405 void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map);
406 
407 /* set an CPU in the cpumap. */
408 void xc_cpumap_setcpu(int cpu, xc_cpumap_t map);
409 
410 /* Test whether the CPU in cpumap is set. */
411 int xc_cpumap_testcpu(int cpu, xc_cpumap_t map);
412 
413 /*
414  * NODEMAP handling
415  */
416 typedef uint8_t *xc_nodemap_t;
417 
418 /* return maximum number of NUMA nodes the hypervisor supports */
419 int xc_get_max_nodes(xc_interface *xch);
420 
421 /* return array size for nodemap */
422 int xc_get_nodemap_size(xc_interface *xch);
423 
424 /* allocate a nodemap */
425 xc_nodemap_t xc_nodemap_alloc(xc_interface *xch);
426 
427 /*
428  * DOMAIN DEBUGGING FUNCTIONS
429  */
430 
431 typedef struct xc_core_header {
432     unsigned int xch_magic;
433     unsigned int xch_nr_vcpus;
434     unsigned int xch_nr_pages;
435     unsigned int xch_ctxt_offset;
436     unsigned int xch_index_offset;
437     unsigned int xch_pages_offset;
438 } xc_core_header_t;
439 
440 #define XC_CORE_MAGIC     0xF00FEBED
441 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
442 
443 /*
444  * DOMAIN MANAGEMENT FUNCTIONS
445  */
446 
447 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
448 
dominfo_shutdown_reason(const xc_domaininfo_t * info)449 static inline unsigned int dominfo_shutdown_reason(const xc_domaininfo_t *info)
450 {
451     return (info->flags >> XEN_DOMINF_shutdownshift) & XEN_DOMINF_shutdownmask;
452 }
453 
dominfo_shutdown_with(const xc_domaininfo_t * info,unsigned int expected_reason)454 static inline bool dominfo_shutdown_with(const xc_domaininfo_t *info,
455                                          unsigned int expected_reason)
456 {
457     /* The reason doesn't make sense unless the domain is actually shutdown */
458     return (info->flags & XEN_DOMINF_shutdown) &&
459            (dominfo_shutdown_reason(info) == expected_reason);
460 }
461 
462 typedef union
463 {
464 #if defined(__i386__) || defined(__x86_64__)
465     vcpu_guest_context_x86_64_t x64;
466     vcpu_guest_context_x86_32_t x32;
467 #endif
468     vcpu_guest_context_t c;
469 } vcpu_guest_context_any_t;
470 
471 typedef union
472 {
473 #if defined(__i386__) || defined(__x86_64__)
474     shared_info_x86_64_t x64;
475     shared_info_x86_32_t x32;
476 #endif
477     shared_info_t s;
478 } shared_info_any_t;
479 
480 #if defined(__i386__) || defined(__x86_64__)
481 typedef union
482 {
483     start_info_x86_64_t x64;
484     start_info_x86_32_t x32;
485     start_info_t s;
486 } start_info_any_t;
487 #endif
488 
489 typedef struct xc_vcpu_extstate {
490     uint64_t xfeature_mask;
491     uint64_t size;
492     void *buffer;
493 } xc_vcpu_extstate_t;
494 
495 int xc_domain_create(xc_interface *xch, uint32_t *pdomid,
496                      struct xen_domctl_createdomain *config);
497 
498 
499 /* Functions to produce a dump of a given domain
500  *  xc_domain_dumpcore - produces a dump to a specified file
501  *  xc_domain_dumpcore_via_callback - produces a dump, using a specified
502  *                                    callback function
503  */
504 int xc_domain_dumpcore(xc_interface *xch,
505                        uint32_t domid,
506                        const char *corename);
507 
508 /* Define the callback function type for xc_domain_dumpcore_via_callback.
509  *
510  * This function is called by the coredump code for every "write",
511  * and passes an opaque object for the use of the function and
512  * created by the caller of xc_domain_dumpcore_via_callback.
513  */
514 typedef int (dumpcore_rtn_t)(xc_interface *xch,
515                              void *arg, char *buffer, unsigned int length);
516 
517 int xc_domain_dumpcore_via_callback(xc_interface *xch,
518                                     uint32_t domid,
519                                     void *arg,
520                                     dumpcore_rtn_t dump_rtn);
521 
522 /*
523  * This function sets the maximum number of vcpus that a domain may create.
524  *
525  * @parm xch a handle to an open hypervisor interface.
526  * @parm domid the domain id in which vcpus are to be created.
527  * @parm max the maximum number of vcpus that the domain may create.
528  * @return 0 on success, -1 on failure.
529  */
530 int xc_domain_max_vcpus(xc_interface *xch,
531                         uint32_t domid,
532                         unsigned int max);
533 
534 /**
535  * This function pauses a domain. A paused domain still exists in memory
536  * however it does not receive any timeslices from the hypervisor.
537  *
538  * @parm xch a handle to an open hypervisor interface
539  * @parm domid the domain id to pause
540  * @return 0 on success, -1 on failure.
541  */
542 int xc_domain_pause(xc_interface *xch,
543                     uint32_t domid);
544 /**
545  * This function unpauses a domain.  The domain should have been previously
546  * paused.
547  *
548  * @parm xch a handle to an open hypervisor interface
549  * @parm domid the domain id to unpause
550  * return 0 on success, -1 on failure
551  */
552 int xc_domain_unpause(xc_interface *xch,
553                       uint32_t domid);
554 
555 /**
556  * This function will destroy a domain.  Destroying a domain removes the domain
557  * completely from memory.  This function should be called after sending the
558  * domain a SHUTDOWN control message to free up the domain resources.
559  *
560  * @parm xch a handle to an open hypervisor interface
561  * @parm domid the domain id to destroy
562  * @return 0 on success, -1 on failure
563  */
564 int xc_domain_destroy(xc_interface *xch,
565                       uint32_t domid);
566 
567 
568 /**
569  * This function will shutdown a domain. This is intended for use in
570  * fully-virtualized domains where this operation is analogous to the
571  * sched_op operations in a paravirtualized domain. The caller is
572  * expected to give the reason for the shutdown.
573  *
574  * @parm xch a handle to an open hypervisor interface
575  * @parm domid the domain id to destroy
576  * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
577  * @return 0 on success, -1 on failure
578  */
579 int xc_domain_shutdown(xc_interface *xch,
580                        uint32_t domid,
581                        int reason);
582 
583 int xc_watchdog(xc_interface *xch,
584 		uint32_t id,
585 		uint32_t timeout);
586 
587 /**
588  * This function explicitly sets the host NUMA nodes the domain will
589  * have affinity with.
590  *
591  * @parm xch a handle to an open hypervisor interface.
592  * @parm domid the domain id one wants to set the affinity of.
593  * @parm nodemap the map of the affine nodes.
594  * @return 0 on success, -1 on failure.
595  */
596 int xc_domain_node_setaffinity(xc_interface *xch,
597                                uint32_t domind,
598                                xc_nodemap_t nodemap);
599 
600 /**
601  * This function retrieves the host NUMA nodes the domain has
602  * affinity with.
603  *
604  * @parm xch a handle to an open hypervisor interface.
605  * @parm domid the domain id one wants to get the node affinity of.
606  * @parm nodemap the map of the affine nodes.
607  * @return 0 on success, -1 on failure.
608  */
609 int xc_domain_node_getaffinity(xc_interface *xch,
610                                uint32_t domind,
611                                xc_nodemap_t nodemap);
612 
613 /**
614  * This function specifies the CPU affinity for a vcpu.
615  *
616  * There are two kinds of affinity. Soft affinity is on what CPUs a vcpu
617  * prefers to run. Hard affinity is on what CPUs a vcpu is allowed to run.
618  * If flags contains XEN_VCPUAFFINITY_SOFT, the soft affinity it is set to
619  * what cpumap_soft_inout contains. If flags contains XEN_VCPUAFFINITY_HARD,
620  * the hard affinity is set to what cpumap_hard_inout contains. Both flags
621  * can be set at the same time, in which case both soft and hard affinity are
622  * set to what the respective parameter contains.
623  *
624  * The function also returns the effective hard or/and soft affinity, still
625  * via the cpumap_soft_inout and cpumap_hard_inout parameters. Effective
626  * affinity is, in case of soft affinity, the intersection of soft affinity,
627  * hard affinity and the cpupool's online CPUs for the domain, and is returned
628  * in cpumap_soft_inout, if XEN_VCPUAFFINITY_SOFT is set in flags. In case of
629  * hard affinity, it is the intersection between hard affinity and the
630  * cpupool's online CPUs, and is returned in cpumap_hard_inout, if
631  * XEN_VCPUAFFINITY_HARD is set in flags. If both flags are set, both soft
632  * and hard affinity are returned in the respective parameter.
633  *
634  * We do report it back as effective affinity is what the Xen scheduler will
635  * actually use, and we thus allow checking whether or not that matches with,
636  * or at least is good enough for, the caller's purposes.
637  *
638  * @param xch a handle to an open hypervisor interface.
639  * @param domid the id of the domain to which the vcpu belongs
640  * @param vcpu the vcpu id wihin the domain
641  * @param cpumap_hard_inout specifies(/returns) the (effective) hard affinity
642  * @param cpumap_soft_inout specifies(/returns) the (effective) soft affinity
643  * @param flags what we want to set
644  */
645 int xc_vcpu_setaffinity(xc_interface *xch,
646                         uint32_t domid,
647                         int vcpu,
648                         xc_cpumap_t cpumap_hard_inout,
649                         xc_cpumap_t cpumap_soft_inout,
650                         uint32_t flags);
651 
652 /**
653  * This function retrieves hard and soft CPU affinity of a vcpu,
654  * depending on what flags are set.
655  *
656  * Soft affinity is returned in cpumap_soft if XEN_VCPUAFFINITY_SOFT is set.
657  * Hard affinity is returned in cpumap_hard if XEN_VCPUAFFINITY_HARD is set.
658  *
659  * @param xch a handle to an open hypervisor interface.
660  * @param domid the id of the domain to which the vcpu belongs
661  * @param vcpu the vcpu id wihin the domain
662  * @param cpumap_hard is where hard affinity is returned
663  * @param cpumap_soft is where soft affinity is returned
664  * @param flags what we want get
665  */
666 int xc_vcpu_getaffinity(xc_interface *xch,
667                         uint32_t domid,
668                         int vcpu,
669                         xc_cpumap_t cpumap_hard,
670                         xc_cpumap_t cpumap_soft,
671                         uint32_t flags);
672 
673 
674 /**
675  * This function will return the guest_width (in bytes) for the
676  * specified domain.
677  *
678  * @param xch a handle to an open hypervisor interface.
679  * @param domid the domain id one wants the address size width of.
680  * @param addr_size the address size.
681  */
682 int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
683                               unsigned int *guest_width);
684 
685 /**
686  * This function will return information about a single domain. It looks
687  * up the domain by the provided domid and succeeds if the domain exists
688  * and is accesible by the current domain, or fails otherwise. A buffer
689  * may optionally passed on the `info` parameter in order to retrieve
690  * information about the domain. The buffer is ignored if NULL is
691  * passed instead.
692  *
693  * @parm xch a handle to an open hypervisor interface
694  * @parm domid domid to lookup
695  * @parm info Optional domain information buffer (may be NULL)
696  * @return 0 on success, otherwise the call failed and info is undefined
697  */
698 int xc_domain_getinfo_single(xc_interface *xch,
699                              uint32_t domid,
700                              xc_domaininfo_t *info);
701 
702 /**
703  * This function will set the execution context for the specified vcpu.
704  *
705  * @parm xch a handle to an open hypervisor interface
706  * @parm domid the domain to set the vcpu context for
707  * @parm vcpu the vcpu number for the context
708  * @parm ctxt pointer to the the cpu context with the values to set
709  * @return the number of domains enumerated or -1 on error
710  */
711 int xc_vcpu_setcontext(xc_interface *xch,
712                        uint32_t domid,
713                        uint32_t vcpu,
714                        vcpu_guest_context_any_t *ctxt);
715 /**
716  * This function will return information about one or more domains, using a
717  * single hypercall.  The domain information will be stored into the supplied
718  * array of xc_domaininfo_t structures.
719  *
720  * @parm xch a handle to an open hypervisor interface
721  * @parm first_domain the first domain to enumerate information from.
722  *                    Domains are currently enumerate in order of creation.
723  * @parm max_domains the number of elements in info
724  * @parm info an array of max_doms size that will contain the information for
725  *            the enumerated domains.
726  * @return the number of domains enumerated or -1 on error
727  */
728 int xc_domain_getinfolist(xc_interface *xch,
729                           uint32_t first_domain,
730                           unsigned int max_domains,
731                           xc_domaininfo_t *info);
732 
733 /**
734  * This function set p2m for broken page
735  * &parm xch a handle to an open hypervisor interface
736  * @parm domid the domain id which broken page belong to
737  * @parm pfn the pfn number of the broken page
738  * @return 0 on success, -1 on failure
739  */
740 int xc_set_broken_page_p2m(xc_interface *xch,
741                            uint32_t domid,
742                            unsigned long pfn);
743 
744 /**
745  * This function returns information about the context of a hvm domain
746  * @parm xch a handle to an open hypervisor interface
747  * @parm domid the domain to get information from
748  * @parm ctxt_buf a pointer to a structure to store the execution context of
749  *            the hvm domain
750  * @parm size the size of ctxt_buf in bytes
751  * @return 0 on success, -1 on failure
752  */
753 int xc_domain_hvm_getcontext(xc_interface *xch,
754                              uint32_t domid,
755                              uint8_t *ctxt_buf,
756                              uint32_t size);
757 
758 
759 /**
760  * This function returns one element of the context of a hvm domain
761  * @parm xch a handle to an open hypervisor interface
762  * @parm domid the domain to get information from
763  * @parm typecode which type of elemnt required
764  * @parm instance which instance of the type
765  * @parm ctxt_buf a pointer to a structure to store the execution context of
766  *            the hvm domain
767  * @parm size the size of ctxt_buf (must be >= HVM_SAVE_LENGTH(typecode))
768  * @return 0 on success, -1 on failure
769  */
770 int xc_domain_hvm_getcontext_partial(xc_interface *xch,
771                                      uint32_t domid,
772                                      uint16_t typecode,
773                                      uint16_t instance,
774                                      void *ctxt_buf,
775                                      uint32_t size);
776 
777 /**
778  * This function will set the context for hvm domain
779  *
780  * @parm xch a handle to an open hypervisor interface
781  * @parm domid the domain to set the hvm domain context for
782  * @parm hvm_ctxt pointer to the the hvm context with the values to set
783  * @parm size the size of hvm_ctxt in bytes
784  * @return 0 on success, -1 on failure
785  */
786 int xc_domain_hvm_setcontext(xc_interface *xch,
787                              uint32_t domid,
788                              uint8_t *hvm_ctxt,
789                              uint32_t size);
790 
791 /**
792  * This function will return guest IO ABI protocol
793  *
794  * @parm xch a handle to an open hypervisor interface
795  * @parm domid the domain to get IO ABI protocol for
796  * @return guest protocol on success, NULL on failure
797  */
798 const char *xc_domain_get_native_protocol(xc_interface *xch,
799                                           uint32_t domid);
800 
801 /**
802  * This function returns information about the execution context of a
803  * particular vcpu of a domain.
804  *
805  * @parm xch a handle to an open hypervisor interface
806  * @parm domid the domain to get information from
807  * @parm vcpu the vcpu number
808  * @parm ctxt a pointer to a structure to store the execution context of the
809  *            domain
810  * @return 0 on success, -1 on failure
811  */
812 int xc_vcpu_getcontext(xc_interface *xch,
813                        uint32_t domid,
814                        uint32_t vcpu,
815                        vcpu_guest_context_any_t *ctxt);
816 
817 /**
818  * This function initializes the vuart emulation and returns
819  * the event to be used by the backend for communicating with
820  * the emulation code.
821  *
822  * @parm xch a handle to an open hypervisor interface
823  * #parm type type of vuart
824  * @parm domid the domain to get information from
825  * @parm console_domid the domid of the backend console
826  * @parm gfn the guest pfn to be used as the ring buffer
827  * @parm evtchn the event channel to be used for events
828  * @return 0 on success, negative error on failure
829  */
830 int xc_dom_vuart_init(xc_interface *xch,
831                       uint32_t type,
832                       uint32_t domid,
833                       uint32_t console_domid,
834                       xen_pfn_t gfn,
835                       evtchn_port_t *evtchn);
836 
837 /**
838  * This function returns information about the XSAVE state of a particular
839  * vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0,
840  * the call is considered a query to retrieve them and the buffer is not
841  * filled.
842  *
843  * @parm xch a handle to an open hypervisor interface
844  * @parm domid the domain to get information from
845  * @parm vcpu the vcpu number
846  * @parm extstate a pointer to a structure to store the XSAVE state of the
847  *                domain
848  * @return 0 on success, negative error code on failure
849  */
850 int xc_vcpu_get_extstate(xc_interface *xch,
851                          uint32_t domid,
852                          uint32_t vcpu,
853                          xc_vcpu_extstate_t *extstate);
854 
855 typedef struct xen_domctl_getvcpuinfo xc_vcpuinfo_t;
856 int xc_vcpu_getinfo(xc_interface *xch,
857                     uint32_t domid,
858                     uint32_t vcpu,
859                     xc_vcpuinfo_t *info);
860 
861 long long xc_domain_get_cpu_usage(xc_interface *xch,
862                                   uint32_t domid,
863                                   int vcpu);
864 
865 int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
866                         xen_domain_handle_t handle);
867 
868 typedef struct xen_domctl_shadow_op_stats xc_shadow_op_stats_t;
869 int xc_shadow_control(xc_interface *xch,
870                       uint32_t domid,
871                       unsigned int sop,
872                       unsigned int *mb,
873                       unsigned int mode);
874 long long xc_logdirty_control(xc_interface *xch,
875                               uint32_t domid,
876                               unsigned int sop,
877                               xc_hypercall_buffer_t *dirty_bitmap,
878                               unsigned long pages,
879                               unsigned int mode,
880                               xc_shadow_op_stats_t *stats);
881 
882 int xc_get_paging_mempool_size(xc_interface *xch, uint32_t domid, uint64_t *size);
883 int xc_set_paging_mempool_size(xc_interface *xch, uint32_t domid, uint64_t size);
884 
885 int xc_sched_credit_domain_set(xc_interface *xch,
886                                uint32_t domid,
887                                struct xen_domctl_sched_credit *sdom);
888 
889 int xc_sched_credit_domain_get(xc_interface *xch,
890                                uint32_t domid,
891                                struct xen_domctl_sched_credit *sdom);
892 int xc_sched_credit_params_set(xc_interface *xch,
893                                uint32_t cpupool_id,
894                                struct xen_sysctl_credit_schedule *schedule);
895 int xc_sched_credit_params_get(xc_interface *xch,
896                                uint32_t cpupool_id,
897                                struct xen_sysctl_credit_schedule *schedule);
898 
899 int xc_sched_credit2_params_set(xc_interface *xch,
900                                 uint32_t cpupool_id,
901                                 struct xen_sysctl_credit2_schedule *schedule);
902 int xc_sched_credit2_params_get(xc_interface *xch,
903                                 uint32_t cpupool_id,
904                                 struct xen_sysctl_credit2_schedule *schedule);
905 int xc_sched_credit2_domain_set(xc_interface *xch,
906                                 uint32_t domid,
907                                 struct xen_domctl_sched_credit2 *sdom);
908 int xc_sched_credit2_domain_get(xc_interface *xch,
909                                 uint32_t domid,
910                                 struct xen_domctl_sched_credit2 *sdom);
911 
912 int xc_sched_rtds_domain_set(xc_interface *xch,
913                              uint32_t domid,
914                              struct xen_domctl_sched_rtds *sdom);
915 int xc_sched_rtds_domain_get(xc_interface *xch,
916                              uint32_t domid,
917                              struct xen_domctl_sched_rtds *sdom);
918 int xc_sched_rtds_vcpu_set(xc_interface *xch,
919                            uint32_t domid,
920                            struct xen_domctl_schedparam_vcpu *vcpus,
921                            uint32_t num_vcpus);
922 int xc_sched_rtds_vcpu_get(xc_interface *xch,
923                            uint32_t domid,
924                            struct xen_domctl_schedparam_vcpu *vcpus,
925                            uint32_t num_vcpus);
926 
927 int
928 xc_sched_arinc653_schedule_set(
929     xc_interface *xch,
930     uint32_t cpupool_id,
931     struct xen_sysctl_arinc653_schedule *schedule);
932 
933 int
934 xc_sched_arinc653_schedule_get(
935     xc_interface *xch,
936     uint32_t cpupool_id,
937     struct xen_sysctl_arinc653_schedule *schedule);
938 
939 /**
940  * This function sends a trigger to a domain.
941  *
942  * @parm xch a handle to an open hypervisor interface
943  * @parm domid the domain id to send trigger
944  * @parm trigger the trigger type
945  * @parm vcpu the vcpu number to send trigger
946  * return 0 on success, -1 on failure
947  */
948 int xc_domain_send_trigger(xc_interface *xch,
949                            uint32_t domid,
950                            uint32_t trigger,
951                            uint32_t vcpu);
952 
953 /**
954  * This function enables or disable debugging of a domain.
955  *
956  * @parm xch a handle to an open hypervisor interface
957  * @parm domid the domain id to send trigger
958  * @parm enable true to enable debugging
959  * return 0 on success, -1 on failure
960  */
961 int xc_domain_setdebugging(xc_interface *xch,
962                            uint32_t domid,
963                            unsigned int enable);
964 
965 /**
966  * This function audits the (top level) p2m of a domain
967  * and returns the different error counts, if any.
968  *
969  * @parm xch a handle to an open hypervisor interface
970  * @parm domid the domain id whose top level p2m we
971  *       want to audit
972  * @parm orphans count of m2p entries for valid
973  *       domain pages containing an invalid value
974  * @parm m2p_bad count of m2p entries mismatching the
975  *       associated p2m entry for this domain
976  * @parm p2m_bad count of p2m entries for this domain
977  *       mismatching the associated m2p entry
978  * return 0 on success, -1 on failure
979  * errno values on failure include:
980  *          -ENOSYS: not implemented
981  *          -EFAULT: could not copy results back to guest
982  */
983 int xc_domain_p2m_audit(xc_interface *xch,
984                         uint32_t domid,
985                         uint64_t *orphans,
986                         uint64_t *m2p_bad,
987                         uint64_t *p2m_bad);
988 
989 /**
990  * This function sets or clears the requirement that an access memory
991  * event listener is required on the domain.
992  *
993  * @parm xch a handle to an open hypervisor interface
994  * @parm domid the domain id to send trigger
995  * @parm enable true to require a listener
996  * return 0 on success, -1 on failure
997  */
998 int xc_domain_set_access_required(xc_interface *xch,
999 				  uint32_t domid,
1000 				  unsigned int required);
1001 /**
1002  * This function sets the handler of global VIRQs sent by the hypervisor
1003  *
1004  * @parm xch a handle to an open hypervisor interface
1005  * @parm domid the domain id which will handle the VIRQ
1006  * @parm virq the virq number (VIRQ_*)
1007  * return 0 on success, -1 on failure
1008  */
1009 int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq);
1010 
1011 /*
1012  * CPUPOOL MANAGEMENT FUNCTIONS
1013  */
1014 
1015 typedef struct xc_cpupoolinfo {
1016     uint32_t cpupool_id;
1017     uint32_t sched_id;
1018     uint32_t n_dom;
1019     xc_cpumap_t cpumap;
1020 } xc_cpupoolinfo_t;
1021 
1022 #define XC_CPUPOOL_POOLID_ANY 0xFFFFFFFF
1023 
1024 /**
1025  * Create a new cpupool.
1026  *
1027  * @parm xc_handle a handle to an open hypervisor interface
1028  * @parm ppoolid pointer to the new cpupool id (in/out)
1029  * @parm sched_id id of scheduler to use for pool
1030  * return 0 on success, -1 on failure
1031  */
1032 int xc_cpupool_create(xc_interface *xch,
1033                       uint32_t *ppoolid,
1034                       uint32_t sched_id);
1035 
1036 /**
1037  * Destroy a cpupool. Pool must be unused and have no cpu assigned.
1038  *
1039  * @parm xc_handle a handle to an open hypervisor interface
1040  * @parm poolid id of the cpupool to destroy
1041  * return 0 on success, -1 on failure
1042  */
1043 int xc_cpupool_destroy(xc_interface *xch,
1044                        uint32_t poolid);
1045 
1046 /**
1047  * Get cpupool info. Returns info for up to the specified number of cpupools
1048  * starting at the given id.
1049  * @parm xc_handle a handle to an open hypervisor interface
1050  * @parm poolid lowest id for which info is returned
1051  * return cpupool info ptr (to be freed via xc_cpupool_infofree)
1052  */
1053 xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
1054                        uint32_t poolid);
1055 
1056 /**
1057  * Free cpupool info. Used to free info obtained via xc_cpupool_getinfo.
1058  * @parm xc_handle a handle to an open hypervisor interface
1059  * @parm info area to free
1060  */
1061 void xc_cpupool_infofree(xc_interface *xch,
1062                          xc_cpupoolinfo_t *info);
1063 
1064 /**
1065  * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
1066  *
1067  * @parm xc_handle a handle to an open hypervisor interface
1068  * @parm poolid id of the cpupool
1069  * @parm cpu cpu number to add
1070  * return 0 on success, -1 on failure
1071  */
1072 int xc_cpupool_addcpu(xc_interface *xch,
1073                       uint32_t poolid,
1074                       int cpu);
1075 
1076 /**
1077  * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
1078  *
1079  * @parm xc_handle a handle to an open hypervisor interface
1080  * @parm poolid id of the cpupool
1081  * @parm cpu cpu number to remove
1082  * return 0 on success, -1 on failure
1083  */
1084 int xc_cpupool_removecpu(xc_interface *xch,
1085                          uint32_t poolid,
1086                          int cpu);
1087 
1088 /**
1089  * Move domain to another cpupool.
1090  *
1091  * @parm xc_handle a handle to an open hypervisor interface
1092  * @parm poolid id of the destination cpupool
1093  * @parm domid id of the domain to move
1094  * return 0 on success, -1 on failure
1095  */
1096 int xc_cpupool_movedomain(xc_interface *xch,
1097                           uint32_t poolid,
1098                           uint32_t domid);
1099 
1100 /**
1101  * Return map of cpus not in any cpupool.
1102  *
1103  * @parm xc_handle a handle to an open hypervisor interface
1104  * return cpumap array on success, NULL else
1105  */
1106 xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch);
1107 
1108 /*
1109  * EVENT CHANNEL FUNCTIONS
1110  *
1111  * None of these do any logging.
1112  */
1113 
1114 /* A port identifier is guaranteed to fit in 31 bits. */
1115 typedef int xc_evtchn_port_or_error_t;
1116 
1117 /**
1118  * This function allocates an unbound port.  Ports are named endpoints used for
1119  * interdomain communication.  This function is most useful in opening a
1120  * well-known port within a domain to receive events on.
1121  *
1122  * NOTE: If you are allocating a *local* unbound port, you probably want to
1123  * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
1124  * ports *only* during domain creation.
1125  *
1126  * @parm xch a handle to an open hypervisor interface
1127  * @parm dom the ID of the local domain (the 'allocatee')
1128  * @parm remote_dom the ID of the domain who will later bind
1129  * @return allocated port (in @dom) on success, -1 on failure
1130  */
1131 xc_evtchn_port_or_error_t
1132 xc_evtchn_alloc_unbound(xc_interface *xch,
1133                         uint32_t dom,
1134                         uint32_t remote_dom);
1135 
1136 int xc_evtchn_reset(xc_interface *xch,
1137                     uint32_t dom);
1138 
1139 typedef struct evtchn_status xc_evtchn_status_t;
1140 int xc_evtchn_status(xc_interface *xch, xc_evtchn_status_t *status);
1141 
1142 
1143 
1144 int xc_physdev_pci_access_modify(xc_interface *xch,
1145                                  uint32_t domid,
1146                                  int bus,
1147                                  int dev,
1148                                  int func,
1149                                  int enable);
1150 
1151 int xc_readconsolering(xc_interface *xch,
1152                        char *buffer,
1153                        unsigned int *pnr_chars,
1154                        int clear, int incremental, uint32_t *pindex);
1155 
1156 int xc_send_debug_keys(xc_interface *xch, const char *keys);
1157 
1158 typedef struct xen_sysctl_physinfo xc_physinfo_t;
1159 typedef struct xen_sysctl_cputopo xc_cputopo_t;
1160 typedef struct xen_sysctl_numainfo xc_numainfo_t;
1161 typedef struct xen_sysctl_meminfo xc_meminfo_t;
1162 typedef struct xen_sysctl_pcitopoinfo xc_pcitopoinfo_t;
1163 
1164 typedef uint32_t xc_cpu_to_node_t;
1165 typedef uint32_t xc_cpu_to_socket_t;
1166 typedef uint32_t xc_cpu_to_core_t;
1167 typedef uint64_t xc_node_to_memsize_t;
1168 typedef uint64_t xc_node_to_memfree_t;
1169 typedef uint32_t xc_node_to_node_dist_t;
1170 
1171 int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
1172 int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
1173                    xc_cputopo_t *cputopo);
1174 int xc_microcode_update(xc_interface *xch, const void *buf, size_t len);
1175 int xc_get_cpu_version(xc_interface *xch, struct xenpf_pcpu_version *cpu_ver);
1176 int xc_get_ucode_revision(xc_interface *xch,
1177                           struct xenpf_ucode_revision *ucode_rev);
1178 int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
1179                 xc_meminfo_t *meminfo, uint32_t *distance);
1180 int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
1181                    physdev_pci_device_t *devs, uint32_t *nodes);
1182 
1183 int xc_sched_id(xc_interface *xch,
1184                 int *sched_id);
1185 
1186 int xc_machphys_mfn_list(xc_interface *xch,
1187                          unsigned long max_extents,
1188                          xen_pfn_t *extent_start);
1189 
1190 typedef struct xen_sysctl_cpuinfo xc_cpuinfo_t;
1191 int xc_getcpuinfo(xc_interface *xch, int max_cpus,
1192                   xc_cpuinfo_t *info, int *nr_cpus);
1193 
1194 int xc_domain_setmaxmem(xc_interface *xch,
1195                         uint32_t domid,
1196                         uint64_t max_memkb);
1197 
1198 int xc_domain_set_memmap_limit(xc_interface *xch,
1199                                uint32_t domid,
1200                                unsigned long map_limitkb);
1201 
1202 int xc_domain_setvnuma(xc_interface *xch,
1203                         uint32_t domid,
1204                         uint32_t nr_vnodes,
1205                         uint32_t nr_regions,
1206                         uint32_t nr_vcpus,
1207                         xen_vmemrange_t *vmemrange,
1208                         unsigned int *vdistance,
1209                         unsigned int *vcpu_to_vnode,
1210                         unsigned int *vnode_to_pnode);
1211 /*
1212  * Retrieve vnuma configuration
1213  * domid: IN, target domid
1214  * nr_vnodes: IN/OUT, number of vnodes, not NULL
1215  * nr_vmemranges: IN/OUT, number of vmemranges, not NULL
1216  * nr_vcpus: IN/OUT, number of vcpus, not NULL
1217  * vmemranges: OUT, an array which has length of nr_vmemranges
1218  * vdistance: OUT, an array which has length of nr_vnodes * nr_vnodes
1219  * vcpu_to_vnode: OUT, an array which has length of nr_vcpus
1220  */
1221 int xc_domain_getvnuma(xc_interface *xch,
1222                        uint32_t domid,
1223                        uint32_t *nr_vnodes,
1224                        uint32_t *nr_vmemranges,
1225                        uint32_t *nr_vcpus,
1226                        xen_vmemrange_t *vmemrange,
1227                        unsigned int *vdistance,
1228                        unsigned int *vcpu_to_vnode);
1229 
1230 int xc_domain_soft_reset(xc_interface *xch,
1231                          uint32_t domid);
1232 
1233 #if defined(__i386__) || defined(__x86_64__)
1234 /*
1235  * PC BIOS standard E820 types and structure.
1236  */
1237 #define E820_RAM          1
1238 #define E820_RESERVED     2
1239 #define E820_ACPI         3
1240 #define E820_NVS          4
1241 #define E820_UNUSABLE     5
1242 
1243 #define E820MAX           (128)
1244 
1245 struct e820entry {
1246     uint64_t addr;
1247     uint64_t size;
1248     uint32_t type;
1249 } __attribute__((packed));
1250 int xc_domain_set_memory_map(xc_interface *xch,
1251                                uint32_t domid,
1252                                struct e820entry entries[],
1253                                uint32_t nr_entries);
1254 
1255 int xc_get_machine_memory_map(xc_interface *xch,
1256                               struct e820entry entries[],
1257                               uint32_t max_entries);
1258 #endif
1259 
1260 int xc_reserved_device_memory_map(xc_interface *xch,
1261                                   uint32_t flags,
1262                                   uint16_t seg,
1263                                   uint8_t bus,
1264                                   uint8_t devfn,
1265                                   struct xen_reserved_device_memory entries[],
1266                                   uint32_t *max_entries);
1267 int xc_domain_set_time_offset(xc_interface *xch,
1268                               uint32_t domid,
1269                               int32_t time_offset_seconds);
1270 
1271 int xc_domain_set_tsc_info(xc_interface *xch,
1272                            uint32_t domid,
1273                            uint32_t tsc_mode,
1274                            uint64_t elapsed_nsec,
1275                            uint32_t gtsc_khz,
1276                            uint32_t incarnation);
1277 
1278 int xc_domain_get_tsc_info(xc_interface *xch,
1279                            uint32_t domid,
1280                            uint32_t *tsc_mode,
1281                            uint64_t *elapsed_nsec,
1282                            uint32_t *gtsc_khz,
1283                            uint32_t *incarnation);
1284 
1285 int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
1286 
1287 int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
1288 
1289 int xc_domain_increase_reservation(xc_interface *xch,
1290                                    uint32_t domid,
1291                                    unsigned long nr_extents,
1292                                    unsigned int extent_order,
1293                                    unsigned int mem_flags,
1294                                    xen_pfn_t *extent_start);
1295 
1296 int xc_domain_increase_reservation_exact(xc_interface *xch,
1297                                          uint32_t domid,
1298                                          unsigned long nr_extents,
1299                                          unsigned int extent_order,
1300                                          unsigned int mem_flags,
1301                                          xen_pfn_t *extent_start);
1302 
1303 int xc_domain_decrease_reservation(xc_interface *xch,
1304                                    uint32_t domid,
1305                                    unsigned long nr_extents,
1306                                    unsigned int extent_order,
1307                                    xen_pfn_t *extent_start);
1308 
1309 int xc_domain_decrease_reservation_exact(xc_interface *xch,
1310                                          uint32_t domid,
1311                                          unsigned long nr_extents,
1312                                          unsigned int extent_order,
1313                                          xen_pfn_t *extent_start);
1314 
1315 int xc_domain_add_to_physmap(xc_interface *xch,
1316                              uint32_t domid,
1317                              unsigned int space,
1318                              unsigned long idx,
1319                              xen_pfn_t gpfn);
1320 
1321 int xc_domain_add_to_physmap_batch(xc_interface *xch,
1322                                    uint32_t domid,
1323                                    uint32_t foreign_domid,
1324                                    unsigned int space,
1325                                    unsigned int size,
1326                                    xen_ulong_t *idxs,
1327                                    xen_pfn_t *gfpns,
1328                                    int *errs);
1329 
1330 int xc_domain_remove_from_physmap(xc_interface *xch,
1331                                   uint32_t domid,
1332                                   xen_pfn_t gpfn);
1333 
1334 int xc_domain_populate_physmap(xc_interface *xch,
1335                                uint32_t domid,
1336                                unsigned long nr_extents,
1337                                unsigned int extent_order,
1338                                unsigned int mem_flags,
1339                                xen_pfn_t *extent_start);
1340 
1341 int xc_domain_populate_physmap_exact(xc_interface *xch,
1342                                      uint32_t domid,
1343                                      unsigned long nr_extents,
1344                                      unsigned int extent_order,
1345                                      unsigned int mem_flags,
1346                                      xen_pfn_t *extent_start);
1347 
1348 int xc_domain_claim_pages(xc_interface *xch,
1349                                uint32_t domid,
1350                                unsigned long nr_pages);
1351 
1352 int xc_domain_memory_exchange_pages(xc_interface *xch,
1353                                     uint32_t domid,
1354                                     unsigned long nr_in_extents,
1355                                     unsigned int in_order,
1356                                     xen_pfn_t *in_extents,
1357                                     unsigned long nr_out_extents,
1358                                     unsigned int out_order,
1359                                     xen_pfn_t *out_extents);
1360 
1361 int xc_domain_set_pod_target(xc_interface *xch,
1362                              uint32_t domid,
1363                              uint64_t target_pages,
1364                              uint64_t *tot_pages,
1365                              uint64_t *pod_cache_pages,
1366                              uint64_t *pod_entries);
1367 
1368 int xc_domain_get_pod_target(xc_interface *xch,
1369                              uint32_t domid,
1370                              uint64_t *tot_pages,
1371                              uint64_t *pod_cache_pages,
1372                              uint64_t *pod_entries);
1373 
1374 int xc_domain_ioport_permission(xc_interface *xch,
1375                                 uint32_t domid,
1376                                 uint32_t first_port,
1377                                 uint32_t nr_ports,
1378                                 uint32_t allow_access);
1379 
1380 int xc_domain_irq_permission(xc_interface *xch,
1381                              uint32_t domid,
1382                              uint32_t pirq,
1383                              bool allow_access);
1384 
1385 int xc_domain_iomem_permission(xc_interface *xch,
1386                                uint32_t domid,
1387                                unsigned long first_mfn,
1388                                unsigned long nr_mfns,
1389                                uint8_t allow_access);
1390 
1391 unsigned long xc_make_page_below_4G(xc_interface *xch, uint32_t domid,
1392                                     unsigned long mfn);
1393 
1394 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
1395 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
1396 int xc_perfc_reset(xc_interface *xch);
1397 int xc_perfc_query_number(xc_interface *xch,
1398                           int *nbr_desc,
1399                           int *nbr_val);
1400 int xc_perfc_query(xc_interface *xch,
1401                    xc_hypercall_buffer_t *desc,
1402                    xc_hypercall_buffer_t *val);
1403 
1404 typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
1405 int xc_lockprof_reset(xc_interface *xch);
1406 int xc_lockprof_query_number(xc_interface *xch,
1407                              uint32_t *n_elems);
1408 int xc_lockprof_query(xc_interface *xch,
1409                       uint32_t *n_elems,
1410                       uint64_t *time,
1411                       xc_hypercall_buffer_t *data);
1412 
1413 void *xc_memalign(xc_interface *xch, size_t alignment, size_t size);
1414 
1415 /**
1416  * Avoid using this function, as it does not work for all cases (such
1417  * as 4M superpages, or guests using PSE36). Only used for debugging.
1418  *
1419  * Translates a virtual address in the context of a given domain and
1420  * vcpu returning the GFN containing the address (that is, an MFN for
1421  * PV guests, a PFN for HVM guests).  Returns 0 for failure.
1422  *
1423  * @parm xch a handle on an open hypervisor interface
1424  * @parm dom the domain to perform the translation in
1425  * @parm vcpu the vcpu to perform the translation on
1426  * @parm virt the virtual address to translate
1427  */
1428 unsigned long xc_translate_foreign_address(xc_interface *xch, uint32_t dom,
1429                                            int vcpu, unsigned long long virt);
1430 
1431 
1432 int xc_copy_to_domain_page(xc_interface *xch, uint32_t domid,
1433                            unsigned long dst_pfn, const char *src_page);
1434 
1435 int xc_clear_domain_pages(xc_interface *xch, uint32_t domid,
1436                           unsigned long dst_pfn, int num);
1437 
xc_clear_domain_page(xc_interface * xch,uint32_t domid,unsigned long dst_pfn)1438 static inline int xc_clear_domain_page(xc_interface *xch, uint32_t domid,
1439                                        unsigned long dst_pfn)
1440 {
1441     return xc_clear_domain_pages(xch, domid, dst_pfn, 1);
1442 }
1443 
1444 int xc_mmuext_op(xc_interface *xch, struct mmuext_op *op, unsigned int nr_ops,
1445                  uint32_t dom);
1446 
1447 /* System wide memory properties */
1448 int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn);
1449 
1450 /* Get current total pages allocated to a domain. */
1451 long xc_get_tot_pages(xc_interface *xch, uint32_t domid);
1452 
1453 /**
1454  * This function retrieves the the number of bytes available
1455  * in the heap in a specific range of address-widths and nodes.
1456  *
1457  * @parm xch a handle to an open hypervisor interface
1458  * @parm domid the domain to query
1459  * @parm min_width the smallest address width to query (0 if don't care)
1460  * @parm max_width the largest address width to query (0 if don't care)
1461  * @parm node the node to query (-1 for all)
1462  * @parm *bytes caller variable to put total bytes counted
1463  * @return 0 on success, <0 on failure.
1464  */
1465 int xc_availheap(xc_interface *xch, int min_width, int max_width, int node,
1466                  uint64_t *bytes);
1467 
1468 /*
1469  * Trace Buffer Operations
1470  */
1471 
1472 /**
1473  * xc_tbuf_enable - enable tracing buffers
1474  *
1475  * @parm xch a handle to an open hypervisor interface
1476  * @parm cnt size of tracing buffers to create (in pages)
1477  * @parm mfn location to store mfn of the trace buffers to
1478  * @parm size location to store the size (in bytes) of a trace buffer to
1479  *
1480  * Gets the machine address of the trace pointer area and the size of the
1481  * per CPU buffers.
1482  */
1483 int xc_tbuf_enable(xc_interface *xch, unsigned long pages,
1484                    unsigned long *mfn, unsigned long *size);
1485 
1486 /*
1487  * Disable tracing buffers.
1488  */
1489 int xc_tbuf_disable(xc_interface *xch);
1490 
1491 /**
1492  * This function sets the size of the trace buffers. Setting the size
1493  * is currently a one-shot operation that may be performed either at boot
1494  * time or via this interface, not both. The buffer size must be set before
1495  * enabling tracing.
1496  *
1497  * @parm xch a handle to an open hypervisor interface
1498  * @parm size the size in pages per cpu for the trace buffers
1499  * @return 0 on success, -1 on failure.
1500  */
1501 int xc_tbuf_set_size(xc_interface *xch, unsigned long size);
1502 
1503 /**
1504  * This function retrieves the current size of the trace buffers.
1505  * Note that the size returned is in terms of bytes, not pages.
1506 
1507  * @parm xch a handle to an open hypervisor interface
1508  * @parm size will contain the size in bytes for the trace buffers
1509  * @return 0 on success, -1 on failure.
1510  */
1511 int xc_tbuf_get_size(xc_interface *xch, unsigned long *size);
1512 
1513 int xc_tbuf_set_cpu_mask(xc_interface *xch, xc_cpumap_t mask);
1514 
1515 int xc_tbuf_set_evt_mask(xc_interface *xch, uint32_t mask);
1516 
1517 /**
1518  * Enable vmtrace for given vCPU.
1519  *
1520  * @parm xch a handle to an open hypervisor interface
1521  * @parm domid domain identifier
1522  * @parm vcpu vcpu identifier
1523  * @return 0 on success, -1 on failure
1524  */
1525 int xc_vmtrace_enable(xc_interface *xch, uint32_t domid, uint32_t vcpu);
1526 
1527 /**
1528  * Enable vmtrace for given vCPU.
1529  *
1530  * @parm xch a handle to an open hypervisor interface
1531  * @parm domid domain identifier
1532  * @parm vcpu vcpu identifier
1533  * @return 0 on success, -1 on failure
1534  */
1535 int xc_vmtrace_disable(xc_interface *xch, uint32_t domid, uint32_t vcpu);
1536 
1537 /**
1538  * Enable vmtrace for a given vCPU, along with resetting status/offset
1539  * details.
1540  *
1541  * @parm xch a handle to an open hypervisor interface
1542  * @parm domid domain identifier
1543  * @parm vcpu vcpu identifier
1544  * @return 0 on success, -1 on failure
1545  */
1546 int xc_vmtrace_reset_and_enable(xc_interface *xch, uint32_t domid,
1547                                 uint32_t vcpu);
1548 
1549 /**
1550  * Get current output position inside the trace buffer.
1551  *
1552  * Repeated calls will return different values if tracing is enabled.  It is
1553  * platform specific what happens when the buffer fills completely.
1554  *
1555  * @parm xch a handle to an open hypervisor interface
1556  * @parm domid domain identifier
1557  * @parm vcpu vcpu identifier
1558  * @parm pos current output position in bytes
1559  * @return 0 on success, -1 on failure
1560  */
1561 int xc_vmtrace_output_position(xc_interface *xch, uint32_t domid,
1562                                uint32_t vcpu, uint64_t *pos);
1563 
1564 /**
1565  * Get platform specific vmtrace options.
1566  *
1567  * @parm xch a handle to an open hypervisor interface
1568  * @parm domid domain identifier
1569  * @parm vcpu vcpu identifier
1570  * @parm key platform-specific input
1571  * @parm value platform-specific output
1572  * @return 0 on success, -1 on failure
1573  */
1574 int xc_vmtrace_get_option(xc_interface *xch, uint32_t domid,
1575                           uint32_t vcpu, uint64_t key, uint64_t *value);
1576 
1577 /**
1578  * Set platform specific vmtrace options.
1579  *
1580  * @parm xch a handle to an open hypervisor interface
1581  * @parm domid domain identifier
1582  * @parm vcpu vcpu identifier
1583  * @parm key platform-specific input
1584  * @parm value platform-specific input
1585  * @return 0 on success, -1 on failure
1586  */
1587 int xc_vmtrace_set_option(xc_interface *xch, uint32_t domid,
1588                           uint32_t vcpu, uint64_t key, uint64_t value);
1589 
1590 int xc_domctl(xc_interface *xch, struct xen_domctl *domctl);
1591 int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl);
1592 long xc_memory_op(xc_interface *xch, unsigned int cmd, void *arg, size_t len);
1593 
1594 int xc_version(xc_interface *xch, int cmd, void *arg);
1595 
1596 int xc_flask_op(xc_interface *xch, xen_flask_op_t *op);
1597 
1598 /*
1599  * Subscribe to domain suspend via evtchn.
1600  * Returns -1 on failure, in which case errno will be set appropriately.
1601  * Just calls XEN_DOMCTL_subscribe - see the caveats for that domctl
1602  * (in its doc comment in domctl.h).
1603  */
1604 int xc_domain_subscribe_for_suspend(
1605     xc_interface *xch, uint32_t domid, evtchn_port_t port);
1606 
1607 /**************************
1608  * GRANT TABLE OPERATIONS *
1609  **************************/
1610 
1611 /*
1612  * These functions sometimes log messages as above, but not always.
1613  */
1614 
1615 
1616 int xc_gnttab_op(xc_interface *xch, int cmd,
1617                  void * op, int op_size, int count);
1618 /* Logs iff hypercall bounce fails, otherwise doesn't. */
1619 
1620 int xc_gnttab_query_size(xc_interface *xch, struct gnttab_query_size *query);
1621 int xc_gnttab_get_version(xc_interface *xch, uint32_t domid); /* Never logs */
1622 grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, uint32_t domid, int *gnt_num);
1623 grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, uint32_t domid, int *gnt_num);
1624 /* Sometimes these don't set errno [fixme], and sometimes they don't log. */
1625 
1626 int xc_physdev_map_pirq(xc_interface *xch,
1627                         uint32_t domid,
1628                         int index,
1629                         int *pirq);
1630 
1631 int xc_physdev_map_pirq_msi(xc_interface *xch,
1632                             uint32_t domid,
1633                             int index,
1634                             int *pirq,
1635                             int devfn,
1636                             int bus,
1637                             int entry_nr,
1638                             uint64_t table_base);
1639 
1640 int xc_physdev_unmap_pirq(xc_interface *xch,
1641                           uint32_t domid,
1642                           int pirq);
1643 
1644 /*
1645  *  LOGGING AND ERROR REPORTING
1646  */
1647 
1648 
1649 #define XC_MAX_ERROR_MSG_LEN 1024
1650 typedef struct xc_error {
1651   enum xc_error_code code;
1652   char message[XC_MAX_ERROR_MSG_LEN];
1653 } xc_error;
1654 
1655 
1656 /*
1657  * Convert an error code or level into a text description.  Return values
1658  * are pointers to fixed strings and do not need to be freed.
1659  * Do not fail, but return pointers to generic strings if fed bogus input.
1660  */
1661 const char *xc_error_code_to_desc(int code);
1662 
1663 /*
1664  * Convert an errno value to a text description.
1665  */
1666 const char *xc_strerror(xc_interface *xch, int errcode);
1667 
1668 
1669 /*
1670  * Return a pointer to the last error with level XC_REPORT_ERROR. This
1671  * pointer and the data pointed to are only valid until the next call
1672  * to libxc in the same thread.
1673  */
1674 const xc_error *xc_get_last_error(xc_interface *handle);
1675 
1676 /*
1677  * Clear the last error
1678  */
1679 void xc_clear_last_error(xc_interface *xch);
1680 
1681 int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value);
1682 int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value);
1683 
1684 /* Deprecated: use xc_hvm_param_set/get() instead. */
1685 int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value);
1686 int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value);
1687 
1688 /* HVM guest pass-through */
1689 int xc_assign_device(xc_interface *xch,
1690                      uint32_t domid,
1691                      uint32_t machine_sbdf,
1692                      uint32_t flag);
1693 
1694 int xc_get_device_group(xc_interface *xch,
1695                      uint32_t domid,
1696                      uint32_t machine_sbdf,
1697                      uint32_t max_sdevs,
1698                      uint32_t *num_sdevs,
1699                      uint32_t *sdev_array);
1700 
1701 int xc_test_assign_device(xc_interface *xch,
1702                           uint32_t domid,
1703                           uint32_t machine_sbdf);
1704 
1705 int xc_deassign_device(xc_interface *xch,
1706                      uint32_t domid,
1707                      uint32_t machine_sbdf);
1708 
1709 int xc_assign_dt_device(xc_interface *xch,
1710                         uint32_t domid,
1711                         char *path);
1712 int xc_test_assign_dt_device(xc_interface *xch,
1713                              uint32_t domid,
1714                              char *path);
1715 int xc_deassign_dt_device(xc_interface *xch,
1716                           uint32_t domid,
1717                           char *path);
1718 
1719 int xc_domain_memory_mapping(xc_interface *xch,
1720                              uint32_t domid,
1721                              unsigned long first_gfn,
1722                              unsigned long first_mfn,
1723                              unsigned long nr_mfns,
1724                              uint32_t add_mapping);
1725 
1726 int xc_domain_ioport_mapping(xc_interface *xch,
1727                              uint32_t domid,
1728                              uint32_t first_gport,
1729                              uint32_t first_mport,
1730                              uint32_t nr_ports,
1731                              uint32_t add_mapping);
1732 
1733 int xc_domain_update_msi_irq(
1734     xc_interface *xch,
1735     uint32_t domid,
1736     uint32_t gvec,
1737     uint32_t pirq,
1738     uint32_t gflags,
1739     uint64_t gtable);
1740 
1741 int xc_domain_unbind_msi_irq(xc_interface *xch,
1742                              uint32_t domid,
1743                              uint32_t gvec,
1744                              uint32_t pirq,
1745                              uint32_t gflags);
1746 
1747 int xc_domain_bind_pt_irq(xc_interface *xch,
1748                           uint32_t domid,
1749                           uint8_t machine_irq,
1750                           uint8_t irq_type,
1751                           uint8_t bus,
1752                           uint8_t device,
1753                           uint8_t intx,
1754                           uint8_t isa_irq);
1755 
1756 int xc_domain_unbind_pt_irq(xc_interface *xch,
1757                           uint32_t domid,
1758                           uint8_t machine_irq,
1759                           uint8_t irq_type,
1760                           uint8_t bus,
1761                           uint8_t device,
1762                           uint8_t intx,
1763                           uint8_t isa_irq);
1764 
1765 int xc_domain_bind_pt_pci_irq(xc_interface *xch,
1766                               uint32_t domid,
1767                               uint8_t machine_irq,
1768                               uint8_t bus,
1769                               uint8_t device,
1770                               uint8_t intx);
1771 
1772 int xc_domain_bind_pt_isa_irq(xc_interface *xch,
1773                               uint32_t domid,
1774                               uint8_t machine_irq);
1775 
1776 int xc_domain_bind_pt_spi_irq(xc_interface *xch,
1777                               uint32_t domid,
1778                               uint16_t vspi,
1779                               uint16_t spi);
1780 
1781 int xc_domain_unbind_pt_spi_irq(xc_interface *xch,
1782                                 uint32_t domid,
1783                                 uint16_t vspi,
1784                                 uint16_t spi);
1785 
1786 /* Set the target domain */
1787 int xc_domain_set_target(xc_interface *xch,
1788                          uint32_t domid,
1789                          uint32_t target);
1790 
1791 /* Control the domain for debug */
1792 int xc_domain_debug_control(xc_interface *xch,
1793                             uint32_t domid,
1794                             uint32_t sop,
1795                             uint32_t vcpu);
1796 
1797 #if defined(__i386__) || defined(__x86_64__)
1798 
1799 /*
1800  * CPUID policy data, expressed in the legacy XEND format.
1801  *
1802  * Policy is an array of strings, 32 chars long:
1803  *   policy[0] = eax
1804  *   policy[1] = ebx
1805  *   policy[2] = ecx
1806  *   policy[3] = edx
1807  *
1808  * The format of the string is the following:
1809  *   '1' -> force to 1
1810  *   '0' -> force to 0
1811  *   'x' -> we don't care (use default)
1812  *   'k' -> pass through host value
1813  *   's' -> legacy alias for 'k'
1814  */
1815 struct xc_xend_cpuid {
1816     union {
1817         struct {
1818             uint32_t leaf, subleaf;
1819         };
1820         uint32_t input[2];
1821     };
1822     char *policy[4];
1823 };
1824 
1825 /*
1826  * MSR policy data.
1827  *
1828  * The format of the policy string is the following:
1829  *   '1' -> force to 1
1830  *   '0' -> force to 0
1831  *   'x' -> we don't care (use default)
1832  *   'k' -> pass through host value
1833  */
1834 struct xc_msr {
1835     uint32_t index;
1836     char policy[65];
1837 };
1838 #define XC_MSR_INPUT_UNUSED 0xffffffffu
1839 
1840 /*
1841  * Make adjustments to the CPUID settings for a domain.
1842  *
1843  * This path is used in two cases.  First, for fresh boots of the domain, and
1844  * secondly for migrate-in/restore of pre-4.14 guests (where CPUID data was
1845  * missing from the stream).  The @restore parameter distinguishes these
1846  * cases, and the generated policy must be compatible with a 4.13.
1847  *
1848  * Either pass a full new @featureset (and @nr_features), or adjust individual
1849  * features (@pae, @itsc, @nested_virt).
1850  *
1851  * Then (optionally) apply legacy XEND CPUID overrides (@xend) or MSR (@msr)
1852  * to the result.
1853  */
1854 int xc_cpuid_apply_policy(xc_interface *xch,
1855                           uint32_t domid, bool restore,
1856                           const uint32_t *featureset,
1857                           unsigned int nr_features, bool pae, bool itsc,
1858                           bool nested_virt, const struct xc_xend_cpuid *xend,
1859                           const struct xc_msr *msr);
1860 int xc_mca_op(xc_interface *xch, struct xen_mc *mc);
1861 int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
1862                         xc_cpumap_t cpumap, unsigned int nr_cpus);
1863 #endif
1864 
1865 struct xc_px_val {
1866     uint64_t freq;        /* Px core frequency */
1867     uint64_t residency;   /* Px residency time */
1868     uint64_t count;       /* Px transition count */
1869 };
1870 
1871 struct xc_px_stat {
1872     uint8_t total;        /* total Px states */
1873     uint8_t usable;       /* usable Px states */
1874     uint8_t last;         /* last Px state */
1875     uint8_t cur;          /* current Px state */
1876     uint64_t *trans_pt;   /* Px transition table */
1877     struct xc_px_val *pt;
1878 };
1879 
1880 int xc_pm_get_max_px(xc_interface *xch, int cpuid, int *max_px);
1881 int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt);
1882 int xc_pm_reset_pxstat(xc_interface *xch, int cpuid);
1883 
1884 struct xc_cx_stat {
1885     uint32_t nr;           /* entry nr in triggers[]/residencies[], incl C0 */
1886     uint32_t last;         /* last Cx state */
1887     uint64_t idle_time;    /* idle time from boot */
1888     uint64_t *triggers;    /* Cx trigger counts */
1889     uint64_t *residencies; /* Cx residencies */
1890     uint32_t nr_pc;        /* entry nr in pc[] */
1891     uint32_t nr_cc;        /* entry nr in cc[] */
1892     uint64_t *pc;          /* 1-biased indexing (i.e. excl C0) */
1893     uint64_t *cc;          /* 1-biased indexing (i.e. excl C0) */
1894 };
1895 typedef struct xc_cx_stat xc_cx_stat_t;
1896 
1897 int xc_pm_get_max_cx(xc_interface *xch, int cpuid, int *max_cx);
1898 int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt);
1899 int xc_pm_reset_cxstat(xc_interface *xch, int cpuid);
1900 
1901 int xc_cpu_online(xc_interface *xch, int cpu);
1902 int xc_cpu_offline(xc_interface *xch, int cpu);
1903 int xc_smt_enable(xc_interface *xch);
1904 int xc_smt_disable(xc_interface *xch);
1905 
1906 /*
1907  * cpufreq para name of this structure named
1908  * same as sysfs file name of native linux
1909  */
1910 typedef struct xen_userspace xc_userspace_t;
1911 typedef struct xen_ondemand xc_ondemand_t;
1912 typedef struct xen_cppc_para xc_cppc_para_t;
1913 
1914 struct xc_get_cpufreq_para {
1915     /* IN/OUT variable */
1916     uint32_t cpu_num;
1917     uint32_t freq_num;
1918     uint32_t gov_num;
1919 
1920     /* for all governors */
1921     /* OUT variable */
1922     uint32_t *affected_cpus;
1923     uint32_t *scaling_available_frequencies;
1924     char     *scaling_available_governors;
1925     char scaling_driver[CPUFREQ_NAME_LEN];
1926 
1927     uint32_t cpuinfo_cur_freq;
1928     uint32_t cpuinfo_max_freq;
1929     uint32_t cpuinfo_min_freq;
1930     union {
1931         struct {
1932             uint32_t scaling_cur_freq;
1933 
1934             char scaling_governor[CPUFREQ_NAME_LEN];
1935             uint32_t scaling_max_freq;
1936             uint32_t scaling_min_freq;
1937 
1938             /* for specific governor */
1939             union {
1940                 xc_userspace_t userspace;
1941                 xc_ondemand_t ondemand;
1942             } u;
1943         } s;
1944         xc_cppc_para_t cppc_para;
1945     } u;
1946 
1947     int32_t turbo_enabled;
1948 };
1949 
1950 typedef struct xen_set_cppc_para xc_set_cppc_para_t;
1951 
1952 int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
1953                         struct xc_get_cpufreq_para *user_para);
1954 int xc_set_cpufreq_gov(xc_interface *xch, int cpuid, char *govname);
1955 int xc_set_cpufreq_para(xc_interface *xch, int cpuid,
1956                         int ctrl_type, int ctrl_value);
1957 int xc_set_cpufreq_cppc(xc_interface *xch, int cpuid,
1958                         xc_set_cppc_para_t *set_cppc);
1959 int xc_get_cpufreq_avgfreq(xc_interface *xch, int cpuid, int *avg_freq);
1960 
1961 int xc_set_sched_opt_smt(xc_interface *xch, uint32_t value);
1962 
1963 int xc_get_cpuidle_max_cstate(xc_interface *xch, uint32_t *value);
1964 int xc_set_cpuidle_max_cstate(xc_interface *xch, uint32_t value);
1965 
1966 int xc_get_cpuidle_max_csubstate(xc_interface *xch, uint32_t *value);
1967 int xc_set_cpuidle_max_csubstate(xc_interface *xch, uint32_t value);
1968 
1969 int xc_enable_turbo(xc_interface *xch, int cpuid);
1970 int xc_disable_turbo(xc_interface *xch, int cpuid);
1971 
1972 /**
1973  * altp2m operations
1974  */
1975 
1976 int xc_altp2m_get_domain_state(xc_interface *handle, uint32_t dom, bool *state);
1977 int xc_altp2m_set_domain_state(xc_interface *handle, uint32_t dom, bool state);
1978 int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, uint32_t domid,
1979                                      uint32_t vcpuid, xen_pfn_t gfn);
1980 int xc_altp2m_set_vcpu_disable_notify(xc_interface *handle, uint32_t domid,
1981                                       uint32_t vcpuid);
1982 int xc_altp2m_create_view(xc_interface *handle, uint32_t domid,
1983                           xenmem_access_t default_access, uint16_t *view_id);
1984 int xc_altp2m_destroy_view(xc_interface *handle, uint32_t domid,
1985                            uint16_t view_id);
1986 /* Switch all vCPUs of the domain to the specified altp2m view */
1987 int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
1988                              uint16_t view_id);
1989 int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
1990                               uint16_t view_id, xen_pfn_t gfn, bool sve);
1991 int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
1992                                    uint16_t view_id, xen_pfn_t first_gfn,
1993                                    xen_pfn_t last_gfn, bool sve,
1994                                    xen_pfn_t *error_gfn, int32_t *error_code);
1995 int xc_altp2m_get_suppress_ve(xc_interface *handle, uint32_t domid,
1996                               uint16_t view_id, xen_pfn_t gfn, bool *sve);
1997 int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
1998                              uint16_t view_id, xen_pfn_t gfn,
1999                              xenmem_access_t access);
2000 int xc_altp2m_set_mem_access_multi(xc_interface *handle, uint32_t domid,
2001                                    uint16_t view_id, uint8_t *access,
2002                                    uint64_t *gfns, uint32_t nr);
2003 int xc_altp2m_get_mem_access(xc_interface *handle, uint32_t domid,
2004                              uint16_t view_id, xen_pfn_t gfn,
2005                              xenmem_access_t *access);
2006 int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
2007                          uint16_t view_id, xen_pfn_t old_gfn,
2008                          xen_pfn_t new_gfn);
2009 int xc_altp2m_get_vcpu_p2m_idx(xc_interface *handle, uint32_t domid,
2010                                uint32_t vcpuid, uint16_t *p2midx);
2011 /*
2012  * Set view visibility for xc_altp2m_switch_to_view and vmfunc.
2013  * Note: If altp2m mode is set to mixed the guest is able to change the view
2014  * visibility and then call vmfunc.
2015  */
2016 int xc_altp2m_set_visibility(xc_interface *handle, uint32_t domid,
2017                              uint16_t view_id, bool visible);
2018 
2019 /**
2020  * Mem paging operations.
2021  * Paging is supported only on the x86 architecture in 64 bit mode, with
2022  * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
2023  * support is considered experimental.
2024  */
2025 int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
2026 int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id);
2027 int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id);
2028 int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id,
2029                            uint64_t gfn);
2030 int xc_mem_paging_evict(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
2031 int xc_mem_paging_prep(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
2032 int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
2033                        uint64_t gfn, void *buffer);
2034 
2035 /**
2036  * Access tracking operations.
2037  * Supported only on Intel EPT 64 bit processors.
2038  */
2039 
2040 /*
2041  * Set a range of memory to a specific access.
2042  * Allowed types are XENMEM_access_default, XENMEM_access_n, any combination of
2043  * XENMEM_access_ + (rwx), and XENMEM_access_rx2rw
2044  */
2045 int xc_set_mem_access(xc_interface *xch, uint32_t domain_id,
2046                       xenmem_access_t access, uint64_t first_pfn,
2047                       uint32_t nr);
2048 
2049 /*
2050  * Set an array of pages to their respective access in the access array.
2051  * The nr parameter specifies the size of the pages and access arrays.
2052  * The same allowed access types as for xc_set_mem_access() apply.
2053  */
2054 int xc_set_mem_access_multi(xc_interface *xch, uint32_t domain_id,
2055                             uint8_t *access, uint64_t *pages,
2056                             uint32_t nr);
2057 
2058 /*
2059  * Gets the mem access for the given page (returned in access on success)
2060  */
2061 int xc_get_mem_access(xc_interface *xch, uint32_t domain_id,
2062                       uint64_t pfn, xenmem_access_t *access);
2063 
2064 /*
2065  * Returns the VM_EVENT_INTERFACE version.
2066  */
2067 int xc_vm_event_get_version(xc_interface *xch);
2068 
2069 /***
2070  * Monitor control operations.
2071  *
2072  * Enables the VM event monitor ring and returns the mapped ring page.
2073  * This ring is used to deliver mem_access events, as well a set of additional
2074  * events that can be enabled with the xc_monitor_* functions.
2075  *
2076  * Will return NULL on error.
2077  * Caller has to unmap this page when done.
2078  */
2079 void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
2080 int xc_monitor_disable(xc_interface *xch, uint32_t domain_id);
2081 int xc_monitor_resume(xc_interface *xch, uint32_t domain_id);
2082 /*
2083  * Get a bitmap of supported monitor events in the form
2084  * (1 << XEN_DOMCTL_MONITOR_EVENT_*).
2085  */
2086 int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
2087                                 uint32_t *capabilities);
2088 int xc_monitor_write_ctrlreg(xc_interface *xch, uint32_t domain_id,
2089                              uint16_t index, bool enable, bool sync,
2090                              uint64_t bitmask, bool onchangeonly);
2091 /*
2092  * A list of MSR indices can usually be found in /usr/include/asm/msr-index.h.
2093  * Please consult the Intel/AMD manuals for more information on
2094  * non-architectural indices.
2095  */
2096 int xc_monitor_mov_to_msr(xc_interface *xch, uint32_t domain_id, uint32_t msr,
2097                           bool enable, bool onchangeonly);
2098 int xc_monitor_singlestep(xc_interface *xch, uint32_t domain_id, bool enable);
2099 int xc_monitor_software_breakpoint(xc_interface *xch, uint32_t domain_id,
2100                                    bool enable);
2101 int xc_monitor_descriptor_access(xc_interface *xch, uint32_t domain_id,
2102                                  bool enable);
2103 int xc_monitor_guest_request(xc_interface *xch, uint32_t domain_id,
2104                              bool enable, bool sync, bool allow_userspace);
2105 /*
2106  * Disables page-walk mem_access events by emulating. If the
2107  * emulation can not be performed then a VM_EVENT_REASON_EMUL_UNIMPLEMENTED
2108  * event will be issued.
2109  */
2110 int xc_monitor_inguest_pagefault(xc_interface *xch, uint32_t domain_id,
2111                                  bool disable);
2112 int xc_monitor_debug_exceptions(xc_interface *xch, uint32_t domain_id,
2113                                 bool enable, bool sync);
2114 int xc_monitor_cpuid(xc_interface *xch, uint32_t domain_id, bool enable);
2115 int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
2116                                bool enable);
2117 int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
2118                                   bool enable);
2119 int xc_monitor_vmexit(xc_interface *xch, uint32_t domain_id, bool enable,
2120                       bool sync);
2121 int xc_monitor_io(xc_interface *xch, uint32_t domain_id, bool enable);
2122 /**
2123  * This function enables / disables emulation for each REP for a
2124  * REP-compatible instruction.
2125  *
2126  * @parm xch a handle to an open hypervisor interface.
2127  * @parm domain_id the domain id one wants to get the node affinity of.
2128  * @parm enable if 0 optimize when possible, else emulate each REP.
2129  * @return 0 on success, -1 on failure.
2130  */
2131 int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id,
2132                                 bool enable);
2133 
2134 /***
2135  * Memory sharing operations.
2136  *
2137  * Unles otherwise noted, these calls return 0 on succes, -1 and errno on
2138  * failure.
2139  *
2140  * Sharing is supported only on the x86 architecture in 64 bit mode, with
2141  * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
2142  * support is considered experimental.
2143 
2144  * Calls below return ENOSYS if not in the x86_64 architecture.
2145  * Calls below return ENODEV if the domain does not support HAP.
2146  * Calls below return ESRCH if the specified domain does not exist.
2147  * Calls below return EPERM if the caller is unprivileged for this domain.
2148  */
2149 
2150 /* Turn on/off sharing for the domid, depending on the enable flag.
2151  *
2152  * Returns EXDEV if trying to enable and the domain has had a PCI device
2153  * assigned for passthrough (these two features are mutually exclusive).
2154  *
2155  * When sharing for a domain is turned off, the domain may still reference
2156  * shared pages. Unsharing happens lazily. */
2157 int xc_memshr_control(xc_interface *xch,
2158                       uint32_t domid,
2159                       int enable);
2160 
2161 /* Create a communication ring in which the hypervisor will place ENOMEM
2162  * notifications.
2163  *
2164  * ENOMEM happens when unsharing pages: a Copy-on-Write duplicate needs to be
2165  * allocated, and thus the out-of-memory error occurr.
2166  *
2167  * For complete examples on how to plumb a notification ring, look into
2168  * xenpaging or xen-access.
2169  *
2170  * On receipt of a notification, the helper should ensure there is memory
2171  * available to the domain before retrying.
2172  *
2173  * If a domain encounters an ENOMEM condition when sharing and this ring
2174  * has not been set up, the hypervisor will crash the domain.
2175  *
2176  * Fails with:
2177  *  EINVAL if port is NULL
2178  *  EINVAL if the sharing ring has already been enabled
2179  *  ENOSYS if no guest gfn has been specified to host the ring via an hvm param
2180  *  EINVAL if the gfn for the ring has not been populated
2181  *  ENOENT if the gfn for the ring is paged out, or cannot be unshared
2182  *  EINVAL if the gfn for the ring cannot be written to
2183  *  EINVAL if the domain is dying
2184  *  ENOSPC if an event channel cannot be allocated for the ring
2185  *  ENOMEM if memory cannot be allocated for internal data structures
2186  *  EINVAL or EACCESS if the request is denied by the security policy
2187  */
2188 
2189 int xc_memshr_ring_enable(xc_interface *xch,
2190                           uint32_t domid,
2191                           uint32_t *port);
2192 /* Disable the ring for ENOMEM communication.
2193  * May fail with EINVAL if the ring was not enabled in the first place.
2194  */
2195 int xc_memshr_ring_disable(xc_interface *xch,
2196                            uint32_t domid);
2197 
2198 /*
2199  * Calls below return EINVAL if sharing has not been enabled for the domain
2200  * Calls below return EINVAL if the domain is dying
2201  */
2202 /* Once a reponse to an ENOMEM notification is prepared, the tool can
2203  * notify the hypervisor to re-schedule the faulting vcpu of the domain with an
2204  * event channel kick and/or this call. */
2205 int xc_memshr_domain_resume(xc_interface *xch,
2206                             uint32_t domid);
2207 
2208 /* Select a page for sharing.
2209  *
2210  * A 64 bit opaque handle will be stored in handle.  The hypervisor ensures
2211  * that if the page is modified, the handle will be invalidated, and future
2212  * users of it will fail. If the page has already been selected and is still
2213  * associated to a valid handle, the existing handle will be returned.
2214  *
2215  * May fail with:
2216  *  EINVAL if the gfn is not populated or not sharable (mmio, etc)
2217  *  ENOMEM if internal data structures cannot be allocated
2218  *  E2BIG if the page is being referenced by other subsytems (e.g. qemu)
2219  *  ENOENT or EEXIST if there are internal hypervisor errors.
2220  */
2221 int xc_memshr_nominate_gfn(xc_interface *xch,
2222                            uint32_t domid,
2223                            unsigned long gfn,
2224                            uint64_t *handle);
2225 /* Same as above, but instead of a guest frame number, the input is a grant
2226  * reference provided by the guest.
2227  *
2228  * May fail with EINVAL if the grant reference is invalid.
2229  */
2230 int xc_memshr_nominate_gref(xc_interface *xch,
2231                             uint32_t domid,
2232                             grant_ref_t gref,
2233                             uint64_t *handle);
2234 
2235 /* The three calls below may fail with
2236  * 10 (or -XENMEM_SHARING_OP_S_HANDLE_INVALID) if the handle passed as source
2237  * is invalid.
2238  * 9 (or -XENMEM_SHARING_OP_C_HANDLE_INVALID) if the handle passed as client is
2239  * invalid.
2240  */
2241 /* Share two nominated guest pages.
2242  *
2243  * If the call succeeds, both pages will point to the same backing frame (or
2244  * mfn). The hypervisor will verify the handles are still valid, but it will
2245  * not perform any sanity checking on the contens of the pages (the selection
2246  * mechanism for sharing candidates is entirely up to the user-space tool).
2247  *
2248  * After successful sharing, the client handle becomes invalid. Both <domain,
2249  * gfn> tuples point to the same mfn with the same handle, the one specified as
2250  * source. Either 3-tuple can be specified later for further re-sharing.
2251  */
2252 int xc_memshr_share_gfns(xc_interface *xch,
2253                     uint32_t source_domain,
2254                     unsigned long source_gfn,
2255                     uint64_t source_handle,
2256                     uint32_t client_domain,
2257                     unsigned long client_gfn,
2258                     uint64_t client_handle);
2259 
2260 /* Same as above, but share two grant references instead.
2261  *
2262  * May fail with EINVAL if either grant reference is invalid.
2263  */
2264 int xc_memshr_share_grefs(xc_interface *xch,
2265                     uint32_t source_domain,
2266                     grant_ref_t source_gref,
2267                     uint64_t source_handle,
2268                     uint32_t client_domain,
2269                     grant_ref_t client_gref,
2270                     uint64_t client_handle);
2271 
2272 /* Allows to add to the guest physmap of the client domain a shared frame
2273  * directly.
2274  *
2275  * May additionally fail with
2276  *  9 (-XENMEM_SHARING_OP_C_HANDLE_INVALID) if the physmap entry for the gfn is
2277  *  not suitable.
2278  *  ENOMEM if internal data structures cannot be allocated.
2279  *  ENOENT if there is an internal hypervisor error.
2280  */
2281 int xc_memshr_add_to_physmap(xc_interface *xch,
2282                     uint32_t source_domain,
2283                     unsigned long source_gfn,
2284                     uint64_t source_handle,
2285                     uint32_t client_domain,
2286                     unsigned long client_gfn);
2287 
2288 /* Allows to deduplicate a range of memory of a client domain. Using
2289  * this function is equivalent of calling xc_memshr_nominate_gfn for each gfn
2290  * in the two domains followed by xc_memshr_share_gfns.
2291  *
2292  * May fail with -EINVAL if the source and client domain have different
2293  * memory size or if memory sharing is not enabled on either of the domains.
2294  * May also fail with -ENOMEM if there isn't enough memory available to store
2295  * the sharing metadata before deduplication can happen.
2296  */
2297 int xc_memshr_range_share(xc_interface *xch,
2298                           uint32_t source_domain,
2299                           uint32_t client_domain,
2300                           uint64_t first_gfn,
2301                           uint64_t last_gfn);
2302 
2303 int xc_memshr_fork(xc_interface *xch,
2304                    uint32_t source_domain,
2305                    uint32_t client_domain,
2306                    bool allow_with_iommu,
2307                    bool block_interrupts);
2308 
2309 /*
2310  * Note: this function is only intended to be used on short-lived forks that
2311  * haven't yet aquired a lot of memory. In case the fork has a lot of memory
2312  * it is likely more performant to create a new fork with xc_memshr_fork.
2313  *
2314  * With VMs that have a lot of memory this call may block for a long time.
2315  */
2316 int xc_memshr_fork_reset(xc_interface *xch, uint32_t forked_domain,
2317                          bool reset_state, bool reset_memory);
2318 
2319 /* Debug calls: return the number of pages referencing the shared frame backing
2320  * the input argument. Should be one or greater.
2321  *
2322  * May fail with EINVAL if there is no backing shared frame for the input
2323  * argument.
2324  */
2325 int xc_memshr_debug_gfn(xc_interface *xch,
2326                         uint32_t domid,
2327                         unsigned long gfn);
2328 /* May additionally fail with EINVAL if the grant reference is invalid. */
2329 int xc_memshr_debug_gref(xc_interface *xch,
2330                          uint32_t domid,
2331                          grant_ref_t gref);
2332 
2333 /* Audits the share subsystem.
2334  *
2335  * Returns ENOSYS if not supported (may not be compiled into the hypervisor).
2336  *
2337  * Returns the number of errors found during auditing otherwise. May be (should
2338  * be!) zero.
2339  *
2340  * If debugtrace support has been compiled into the hypervisor and is enabled,
2341  * verbose descriptions for the errors are available in the hypervisor console.
2342  */
2343 int xc_memshr_audit(xc_interface *xch);
2344 
2345 /* Stats reporting.
2346  *
2347  * At any point in time, the following equality should hold for a host:
2348  *
2349  *  Let dominfo(d) be the xc_dominfo_t struct filled by a call to
2350  *  xc_domain_getinfo(d)
2351  *
2352  *  The summation of dominfo(d)->shr_pages for all domains in the system
2353  *      should be equal to
2354  *  xc_sharing_freed_pages + xc_sharing_used_frames
2355  */
2356 /*
2357  * This function returns the total number of pages freed by using sharing
2358  * on the system.  For example, if two domains contain a single entry in
2359  * their p2m table that points to the same shared page (and no other pages
2360  * in the system are shared), then this function should return 1.
2361  */
2362 long xc_sharing_freed_pages(xc_interface *xch);
2363 
2364 /*
2365  * This function returns the total number of frames occupied by shared
2366  * pages on the system.  This is independent of the number of domains
2367  * pointing at these frames.  For example, in the above scenario this
2368  * should return 1. (And dominfo(d) for each of the two domains should return 1
2369  * as well).
2370  *
2371  * Note that some of these sharing_used_frames may be referenced by
2372  * a single domain page, and thus not realize any savings. The same
2373  * applies to some of the pages counted in dominfo(d)->shr_pages.
2374  */
2375 long xc_sharing_used_frames(xc_interface *xch);
2376 /*** End sharing interface ***/
2377 
2378 int xc_flask_load(xc_interface *xc_handle, char *buf, uint32_t size);
2379 int xc_flask_context_to_sid(xc_interface *xc_handle, char *buf, uint32_t size, uint32_t *sid);
2380 int xc_flask_sid_to_context(xc_interface *xc_handle, int sid, char *buf, uint32_t size);
2381 int xc_flask_getenforce(xc_interface *xc_handle);
2382 int xc_flask_setenforce(xc_interface *xc_handle, int mode);
2383 int xc_flask_getbool_byid(xc_interface *xc_handle, int id, char *name, uint32_t size, int *curr, int *pend);
2384 int xc_flask_getbool_byname(xc_interface *xc_handle, char *name, int *curr, int *pend);
2385 int xc_flask_setbool(xc_interface *xc_handle, char *name, int value, int commit);
2386 int xc_flask_add_pirq(xc_interface *xc_handle, unsigned int pirq, char *scontext);
2387 int xc_flask_add_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high,
2388                       char *scontext);
2389 int xc_flask_add_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high,
2390                      char *scontext);
2391 int xc_flask_add_device(xc_interface *xc_handle, unsigned long device, char *scontext);
2392 int xc_flask_del_pirq(xc_interface *xc_handle, unsigned int pirq);
2393 int xc_flask_del_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high);
2394 int xc_flask_del_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high);
2395 int xc_flask_del_device(xc_interface *xc_handle, unsigned long device);
2396 int xc_flask_access(xc_interface *xc_handle, const char *scon, const char *tcon,
2397                   uint16_t tclass, uint32_t req,
2398                   uint32_t *allowed, uint32_t *decided,
2399                   uint32_t *auditallow, uint32_t *auditdeny,
2400                   uint32_t *seqno);
2401 int xc_flask_avc_cachestats(xc_interface *xc_handle, char *buf, int size);
2402 int xc_flask_policyvers(xc_interface *xc_handle);
2403 int xc_flask_avc_hashstats(xc_interface *xc_handle, char *buf, int size);
2404 int xc_flask_getavc_threshold(xc_interface *xc_handle);
2405 int xc_flask_setavc_threshold(xc_interface *xc_handle, int threshold);
2406 int xc_flask_relabel_domain(xc_interface *xch, uint32_t domid, uint32_t sid);
2407 
2408 struct elf_binary;
2409 void xc_elf_set_logfile(xc_interface *xch, struct elf_binary *elf,
2410                         int verbose);
2411 /* Useful for callers who also use libelf. */
2412 
2413 /*
2414  * Execute an image previously loaded with xc_kexec_load().
2415  *
2416  * Does not return on success.
2417  *
2418  * Fails with:
2419  *   ENOENT if the specified image has not been loaded.
2420  */
2421 int xc_kexec_exec(xc_interface *xch, int type);
2422 
2423 /*
2424  * Find the machine address and size of certain memory areas.
2425  *
2426  *   KEXEC_RANGE_MA_CRASH       crash area
2427  *   KEXEC_RANGE_MA_XEN         Xen itself
2428  *   KEXEC_RANGE_MA_CPU         CPU note for CPU number 'nr'
2429  *   KEXEC_RANGE_MA_XENHEAP     xenheap
2430  *   KEXEC_RANGE_MA_EFI_MEMMAP  EFI Memory Map
2431  *   KEXEC_RANGE_MA_VMCOREINFO  vmcoreinfo
2432  *
2433  * Fails with:
2434  *   EINVAL if the range or CPU number isn't valid.
2435  */
2436 int xc_kexec_get_range(xc_interface *xch, int range,  int nr,
2437                        uint64_t *size, uint64_t *start);
2438 
2439 /*
2440  * Load a kexec image into memory.
2441  *
2442  * The image may be of type KEXEC_TYPE_DEFAULT (executed on request)
2443  * or KEXEC_TYPE_CRASH (executed on a crash).
2444  *
2445  * The image architecture may be a 32-bit variant of the hypervisor
2446  * architecture (e.g, EM_386 on a x86-64 hypervisor).
2447  *
2448  * Fails with:
2449  *   ENOMEM if there is insufficient memory for the new image.
2450  *   EINVAL if the image does not fit into the crash area or the entry
2451  *          point isn't within one of segments.
2452  *   EBUSY  if another image is being executed.
2453  */
2454 int xc_kexec_load(xc_interface *xch, uint8_t type, uint16_t arch,
2455                   uint64_t entry_maddr,
2456                   uint32_t nr_segments, xen_kexec_segment_t *segments);
2457 
2458 /*
2459  * Unload a kexec image.
2460  *
2461  * This prevents a KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH image from
2462  * being executed.  The crash images are not cleared from the crash
2463  * region.
2464  */
2465 int xc_kexec_unload(xc_interface *xch, int type);
2466 
2467 /*
2468  * Find out whether the image has been succesfully loaded.
2469  *
2470  * The type can be either KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH.
2471  * If zero is returned, that means no image is loaded for the type.
2472  * If one is returned, that means an image is loaded for the type.
2473  * Otherwise, negative return value indicates error.
2474  */
2475 int xc_kexec_status(xc_interface *xch, int type);
2476 
2477 typedef xenpf_resource_entry_t xc_resource_entry_t;
2478 
2479 /*
2480  * Generic resource operation which contains multiple non-preemptible
2481  * resource access entries that passed to xc_resource_op().
2482  */
2483 struct xc_resource_op {
2484     uint64_t result;        /* on return, check this field first */
2485     uint32_t cpu;           /* which cpu to run */
2486     uint32_t nr_entries;    /* number of resource entries */
2487     xc_resource_entry_t *entries;
2488 };
2489 
2490 typedef struct xc_resource_op xc_resource_op_t;
2491 int xc_resource_op(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops);
2492 
2493 #if defined(__i386__) || defined(__x86_64__)
2494 enum xc_psr_cmt_type {
2495     XC_PSR_CMT_L3_OCCUPANCY,
2496     XC_PSR_CMT_TOTAL_MEM_COUNT,
2497     XC_PSR_CMT_LOCAL_MEM_COUNT,
2498 };
2499 typedef enum xc_psr_cmt_type xc_psr_cmt_type;
2500 
2501 enum xc_psr_type {
2502     XC_PSR_CAT_L3_CBM      = 1,
2503     XC_PSR_CAT_L3_CBM_CODE = 2,
2504     XC_PSR_CAT_L3_CBM_DATA = 3,
2505     XC_PSR_CAT_L2_CBM      = 4,
2506     XC_PSR_MBA_THRTL       = 5,
2507 };
2508 typedef enum xc_psr_type xc_psr_type;
2509 
2510 enum xc_psr_feat_type {
2511     XC_PSR_CAT_L3,
2512     XC_PSR_CAT_L2,
2513     XC_PSR_MBA,
2514 };
2515 typedef enum xc_psr_feat_type xc_psr_feat_type;
2516 
2517 union xc_psr_hw_info {
2518     struct {
2519         uint32_t cos_max;
2520         uint32_t cbm_len;
2521         bool     cdp_enabled;
2522     } cat;
2523 
2524     struct {
2525         uint32_t cos_max;
2526         uint32_t thrtl_max;
2527         bool     linear;
2528     } mba;
2529 };
2530 typedef union xc_psr_hw_info xc_psr_hw_info;
2531 
2532 int xc_psr_cmt_attach(xc_interface *xch, uint32_t domid);
2533 int xc_psr_cmt_detach(xc_interface *xch, uint32_t domid);
2534 int xc_psr_cmt_get_domain_rmid(xc_interface *xch, uint32_t domid,
2535                                uint32_t *rmid);
2536 int xc_psr_cmt_get_total_rmid(xc_interface *xch, uint32_t *total_rmid);
2537 int xc_psr_cmt_get_l3_upscaling_factor(xc_interface *xch,
2538                                        uint32_t *upscaling_factor);
2539 int xc_psr_cmt_get_l3_event_mask(xc_interface *xch, uint32_t *event_mask);
2540 int xc_psr_cmt_get_l3_cache_size(xc_interface *xch, uint32_t cpu,
2541                                  uint32_t *l3_cache_size);
2542 int xc_psr_cmt_get_data(xc_interface *xch, uint32_t rmid, uint32_t cpu,
2543                         xc_psr_cmt_type type, uint64_t *monitor_data,
2544                         uint64_t *tsc);
2545 int xc_psr_cmt_enabled(xc_interface *xch);
2546 
2547 int xc_psr_set_domain_data(xc_interface *xch, uint32_t domid,
2548                            xc_psr_type type, uint32_t target,
2549                            uint64_t data);
2550 int xc_psr_get_domain_data(xc_interface *xch, uint32_t domid,
2551                            xc_psr_type type, uint32_t target,
2552                            uint64_t *data);
2553 int xc_psr_get_hw_info(xc_interface *xch, uint32_t socket,
2554                        xc_psr_feat_type type, xc_psr_hw_info *hw_info);
2555 #endif
2556 
2557 int xc_livepatch_upload(xc_interface *xch,
2558                         char *name, unsigned char *payload, uint32_t size,
2559                         bool force);
2560 
2561 int xc_livepatch_get(xc_interface *xch,
2562                      char *name,
2563                      xen_livepatch_status_t *status);
2564 
2565 /*
2566  * Get a number of available payloads and get actual total size of
2567  * the payloads' name and metadata arrays.
2568  *
2569  * This functions is typically executed first before the xc_livepatch_list()
2570  * to obtain the sizes and correctly allocate all necessary data resources.
2571  *
2572  * The return value is zero if the hypercall completed successfully.
2573  *
2574  * If there was an error performing the sysctl operation, the return value
2575  * will contain the hypercall error code value.
2576  */
2577 int xc_livepatch_list_get_sizes(xc_interface *xch, unsigned int *nr,
2578                                 uint32_t *name_total_size,
2579                                 uint32_t *metadata_total_size);
2580 
2581 /*
2582  * The heart of this function is to get an array of the following objects:
2583  *   - xen_livepatch_status_t: states and return codes of payloads
2584  *   - name: names of payloads
2585  *   - len: lengths of corresponding payloads' names
2586  *   - metadata: payloads' metadata
2587  *   - metadata_len: lengths of corresponding payloads' metadata
2588  *
2589  * However it is complex because it has to deal with the hypervisor
2590  * returning some of the requested data or data being stale
2591  * (another hypercall might alter the list).
2592  *
2593  * The parameters that the function expects to contain data from
2594  * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
2595  * 'left' are also updated with the number of entries filled out
2596  * and respectively the number of entries left to get from hypervisor.
2597  *
2598  * It is expected that the caller of this function will first issue the
2599  * xc_livepatch_list_get_sizes() in order to obtain total sizes of names
2600  * and all metadata as well as the current number of payload entries.
2601  * The total sizes are required and supplied via the 'name_total_size' and
2602  * 'metadata_total_size' parameters.
2603  *
2604  * The 'max' is to be provided by the caller with the maximum number of
2605  * entries that 'info', 'name', 'len', 'metadata' and 'metadata_len' arrays
2606  * can be filled up with.
2607  *
2608  * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
2609  * structure size.
2610  *
2611  * Each entry in the 'name' array may have an arbitrary size.
2612  *
2613  * Each entry in the 'len' array is expected to be of uint32_t size.
2614  *
2615  * Each entry in the 'metadata' array may have an arbitrary size.
2616  *
2617  * Each entry in the 'metadata_len' array is expected to be of uint32_t size.
2618  *
2619  * The return value is zero if the hypercall completed successfully.
2620  * Note that the return value is _not_ the amount of entries filled
2621  * out - that is saved in 'done'.
2622  *
2623  * If there was an error performing the operation, the return value
2624  * will contain an negative -EXX type value. The 'done' and 'left'
2625  * will contain the number of entries that had been succesfully
2626  * retrieved (if any).
2627  */
2628 int xc_livepatch_list(xc_interface *xch, const unsigned int max,
2629                       const unsigned int start,
2630                       struct xen_livepatch_status *info,
2631                       char *name, uint32_t *len,
2632                       const uint32_t name_total_size,
2633                       char *metadata, uint32_t *metadata_len,
2634                       const uint32_t metadata_total_size,
2635                       unsigned int *done, unsigned int *left);
2636 
2637 /*
2638  * The operations are asynchronous and the hypervisor may take a while
2639  * to complete them. The `timeout` offers an option to expire the
2640  * operation if it could not be completed within the specified time
2641  * (in ns). Value of 0 means let hypervisor decide the best timeout.
2642  * The `flags` allows to pass extra parameters to the actions.
2643  */
2644 int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
2645 int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
2646 int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
2647 int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
2648 
2649 /*
2650  * Ensure cache coherency after memory modifications. A call to this function
2651  * is only required on ARM as the x86 architecture provides cache coherency
2652  * guarantees. Calling this function on x86 is allowed but has no effect.
2653  */
2654 int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
2655                          xen_pfn_t start_pfn, xen_pfn_t nr_pfns);
2656 
2657 #if defined(__arm__) || defined(__aarch64__)
2658 int xc_dt_overlay(xc_interface *xch, void *overlay_fdt,
2659                   uint32_t overlay_fdt_size, uint8_t overlay_op);
2660 int xc_dt_overlay_domain(xc_interface *xch, void *overlay_fdt,
2661                          uint32_t overlay_fdt_size, uint8_t overlay_op,
2662                          uint32_t domain_id);
2663 #endif
2664 
2665 /* Compat shims */
2666 #include "xenctrl_compat.h"
2667 
2668 #endif /* XENCTRL_H */
2669 
2670 /*
2671  * Local variables:
2672  * mode: C
2673  * c-file-style: "BSD"
2674  * c-basic-offset: 4
2675  * tab-width: 4
2676  * indent-tabs-mode: nil
2677  * End:
2678  */
2679