1 /******************************************************************************
2  * xenctrl.h
3  *
4  * A library for low-level access to the Xen control interfaces.
5  *
6  * Copyright (c) 2003-2004, K A Fraser.
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation;
11  * version 2.1 of the License.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #ifndef XENCTRL_H
23 #define XENCTRL_H
24 
25 /* Tell the Xen public headers we are a user-space tools build. */
26 #ifndef __XEN_TOOLS__
27 #define __XEN_TOOLS__ 1
28 #endif
29 
30 #include <unistd.h>
31 #include <stddef.h>
32 #include <stdint.h>
33 #include <stdio.h>
34 #include <stdbool.h>
35 #include <xen/xen.h>
36 #include <xen/domctl.h>
37 #include <xen/physdev.h>
38 #include <xen/sysctl.h>
39 #include <xen/version.h>
40 #include <xen/event_channel.h>
41 #include <xen/sched.h>
42 #include <xen/memory.h>
43 #include <xen/grant_table.h>
44 #include <xen/hvm/dm_op.h>
45 #include <xen/hvm/params.h>
46 #include <xen/xsm/flask_op.h>
47 #include <xen/tmem.h>
48 #include <xen/kexec.h>
49 #include <xen/platform.h>
50 
51 #include "xentoollog.h"
52 
53 #if defined(__i386__) || defined(__x86_64__)
54 #include <xen/foreign/x86_32.h>
55 #include <xen/foreign/x86_64.h>
56 #include <xen/arch-x86/xen-mca.h>
57 #endif
58 
59 #define XC_PAGE_SHIFT           12
60 #define XC_PAGE_SIZE            (1UL << XC_PAGE_SHIFT)
61 #define XC_PAGE_MASK            (~(XC_PAGE_SIZE-1))
62 
63 #define INVALID_MFN  (~0UL)
64 
65 /*
66  *  DEFINITIONS FOR CPU BARRIERS
67  */
68 
69 #define xen_barrier() asm volatile ( "" : : : "memory")
70 
71 #if defined(__i386__)
72 #define xen_mb()  asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
73 #define xen_rmb() xen_barrier()
74 #define xen_wmb() xen_barrier()
75 #elif defined(__x86_64__)
76 #define xen_mb()  asm volatile ( "mfence" : : : "memory")
77 #define xen_rmb() xen_barrier()
78 #define xen_wmb() xen_barrier()
79 #elif defined(__arm__)
80 #define xen_mb()   asm volatile ("dmb" : : : "memory")
81 #define xen_rmb()  asm volatile ("dmb" : : : "memory")
82 #define xen_wmb()  asm volatile ("dmb" : : : "memory")
83 #elif defined(__aarch64__)
84 #define xen_mb()   asm volatile ("dmb sy" : : : "memory")
85 #define xen_rmb()  asm volatile ("dmb sy" : : : "memory")
86 #define xen_wmb()  asm volatile ("dmb sy" : : : "memory")
87 #else
88 #error "Define barriers"
89 #endif
90 
91 
92 #define XENCTRL_HAS_XC_INTERFACE 1
93 /* In Xen 4.0 and earlier, xc_interface_open and xc_evtchn_open would
94  * both return ints being the file descriptor.  In 4.1 and later, they
95  * return an xc_interface* and xc_evtchn*, respectively - ie, a
96  * pointer to an opaque struct.  This #define is provided in 4.1 and
97  * later, allowing out-of-tree callers to more easily distinguish
98  * between, and be compatible with, both versions.
99  */
100 
101 
102 /*
103  *  GENERAL
104  *
105  * Unless otherwise specified, each function here returns zero or a
106  * non-null pointer on success; or in case of failure, sets errno and
107  * returns -1 or a null pointer.
108  *
109  * Unless otherwise specified, errors result in a call to the error
110  * handler function, which by default prints a message to the
111  * FILE* passed as the caller_data, which by default is stderr.
112  * (This is described below as "logging errors".)
113  *
114  * The error handler can safely trash errno, as libxc saves it across
115  * the callback.
116  */
117 
118 typedef struct xc_interface_core xc_interface;
119 
120 enum xc_error_code {
121   XC_ERROR_NONE = 0,
122   XC_INTERNAL_ERROR = 1,
123   XC_INVALID_KERNEL = 2,
124   XC_INVALID_PARAM = 3,
125   XC_OUT_OF_MEMORY = 4,
126   /* new codes need to be added to xc_error_level_to_desc too */
127 };
128 
129 typedef enum xc_error_code xc_error_code;
130 
131 
132 /*
133  *  INITIALIZATION FUNCTIONS
134  */
135 
136 /**
137  * This function opens a handle to the hypervisor interface.  This function can
138  * be called multiple times within a single process.  Multiple processes can
139  * have an open hypervisor interface at the same time.
140  *
141  * Note:
142  * After fork a child process must not use any opened xc interface
143  * handle inherited from their parent. They must open a new handle if
144  * they want to interact with xc.
145  *
146  * Each call to this function should have a corresponding call to
147  * xc_interface_close().
148  *
149  * This function can fail if the caller does not have superuser permission or
150  * if a Xen-enabled kernel is not currently running.
151  *
152  * @return a handle to the hypervisor interface
153  */
154 xc_interface *xc_interface_open(xentoollog_logger *logger,
155                                 xentoollog_logger *dombuild_logger,
156                                 unsigned open_flags);
157   /* if logger==NULL, will log to stderr
158    * if dombuild_logger=NULL, will log to a file
159    */
160 
161 /*
162  * Note: if XC_OPENFLAG_NON_REENTRANT is passed then libxc must not be
163  * called reentrantly and the calling application is responsible for
164  * providing mutual exclusion surrounding all libxc calls itself.
165  *
166  * In particular xc_{get,clear}_last_error only remain valid for the
167  * duration of the critical section containing the call which failed.
168  */
169 enum xc_open_flags {
170     XC_OPENFLAG_DUMMY =  1<<0, /* do not actually open a xenctrl interface */
171     XC_OPENFLAG_NON_REENTRANT = 1<<1, /* assume library is only every called from a single thread */
172 };
173 
174 /**
175  * This function closes an open hypervisor interface.
176  *
177  * This function can fail if the handle does not represent an open interface or
178  * if there were problems closing the interface.  In the latter case
179  * the interface is still closed.
180  *
181  * @parm xch a handle to an open hypervisor interface
182  * @return 0 on success, -1 otherwise.
183  */
184 int xc_interface_close(xc_interface *xch);
185 
186 /*
187  * HYPERCALL SAFE MEMORY BUFFER
188  *
189  * Ensure that memory which is passed to a hypercall has been
190  * specially allocated in order to be safe to access from the
191  * hypervisor.
192  *
193  * Each user data pointer is shadowed by an xc_hypercall_buffer data
194  * structure. You should never define an xc_hypercall_buffer type
195  * directly, instead use the DECLARE_HYPERCALL_BUFFER* macros below.
196  *
197  * The strucuture should be considered opaque and all access should be
198  * via the macros and helper functions defined below.
199  *
200  * Once the buffer is declared the user is responsible for explicitly
201  * allocating and releasing the memory using
202  * xc_hypercall_buffer_alloc(_pages) and
203  * xc_hypercall_buffer_free(_pages).
204  *
205  * Once the buffer has been allocated the user can initialise the data
206  * via the normal pointer. The xc_hypercall_buffer structure is
207  * transparently referenced by the helper macros (such as
208  * xen_set_guest_handle) in order to check at compile time that the
209  * correct type of memory is being used.
210  */
211 struct xc_hypercall_buffer {
212     /* Hypercall safe memory buffer. */
213     void *hbuf;
214 
215     /*
216      * Reference to xc_hypercall_buffer passed as argument to the
217      * current function.
218      */
219     struct xc_hypercall_buffer *param_shadow;
220 
221     /*
222      * Direction of copy for bounce buffering.
223      */
224     int dir;
225 
226     /* Used iff dir != 0. */
227     void *ubuf;
228     size_t sz;
229 };
230 typedef struct xc_hypercall_buffer xc_hypercall_buffer_t;
231 
232 /*
233  * Construct the name of the hypercall buffer for a given variable.
234  * For internal use only
235  */
236 #define XC__HYPERCALL_BUFFER_NAME(_name) xc__hypercall_buffer_##_name
237 
238 /*
239  * Returns the hypercall_buffer associated with a variable.
240  */
241 #define HYPERCALL_BUFFER(_name)                                 \
242     ({  xc_hypercall_buffer_t _hcbuf_buf1;                      \
243         typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_buf2 = \
244                 &XC__HYPERCALL_BUFFER_NAME(_name);              \
245         (void)(&_hcbuf_buf1 == _hcbuf_buf2);                    \
246         (_hcbuf_buf2)->param_shadow ?                           \
247                 (_hcbuf_buf2)->param_shadow : (_hcbuf_buf2);    \
248      })
249 
250 #define HYPERCALL_BUFFER_INIT_NO_BOUNCE .dir = 0, .sz = 0, .ubuf = (void *)-1
251 
252 /*
253  * Defines a hypercall buffer and user pointer with _name of _type.
254  *
255  * The user accesses the data as normal via _name which will be
256  * transparently converted to the hypercall buffer as necessary.
257  */
258 #define DECLARE_HYPERCALL_BUFFER(_type, _name)                 \
259     _type *(_name) = NULL;                                     \
260     xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
261         .hbuf = NULL,                                          \
262         .param_shadow = NULL,                                  \
263         HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
264     }
265 
266 /*
267  * Like DECLARE_HYPERCALL_BUFFER() but using an already allocated
268  * hypercall buffer, _hbuf.
269  *
270  * Useful when a hypercall buffer is passed to a function and access
271  * via the user pointer is required.
272  *
273  * See DECLARE_HYPERCALL_BUFFER_ARGUMENT() if the user pointer is not
274  * required.
275  */
276 #define DECLARE_HYPERCALL_BUFFER_SHADOW(_type, _name, _hbuf)   \
277     _type *(_name) = (_hbuf)->hbuf;                            \
278     __attribute__((unused))                                    \
279     xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
280         .hbuf = (void *)-1,                                    \
281         .param_shadow = (_hbuf),                               \
282         HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
283     }
284 
285 /*
286  * Declare the necessary data structure to allow a hypercall buffer
287  * passed as an argument to a function to be used in the normal way.
288  */
289 #define DECLARE_HYPERCALL_BUFFER_ARGUMENT(_name)               \
290     xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
291         .hbuf = (void *)-1,                                    \
292         .param_shadow = (_name),                               \
293         HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
294     }
295 
296 /*
297  * Get the hypercall buffer data pointer in a form suitable for use
298  * directly as a hypercall argument.
299  */
300 #define HYPERCALL_BUFFER_AS_ARG(_name)                          \
301     ({  xc_hypercall_buffer_t _hcbuf_arg1;                      \
302         typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_arg2 = \
303                 HYPERCALL_BUFFER(_name);                        \
304         (void)(&_hcbuf_arg1 == _hcbuf_arg2);                    \
305         (unsigned long)(_hcbuf_arg2)->hbuf;                     \
306      })
307 
308 /*
309  * Set a xen_guest_handle in a type safe manner, ensuring that the
310  * data pointer has been correctly allocated.
311  */
312 #define set_xen_guest_handle_impl(_hnd, _val, _byte_off)        \
313     do {                                                        \
314         xc_hypercall_buffer_t _hcbuf_hnd1;                      \
315         typeof(XC__HYPERCALL_BUFFER_NAME(_val)) *_hcbuf_hnd2 =  \
316                 HYPERCALL_BUFFER(_val);                         \
317         (void) (&_hcbuf_hnd1 == _hcbuf_hnd2);                   \
318         set_xen_guest_handle_raw(_hnd,                          \
319                 (_hcbuf_hnd2)->hbuf + (_byte_off));             \
320     } while (0)
321 
322 #undef set_xen_guest_handle
323 #define set_xen_guest_handle(_hnd, _val)                        \
324     set_xen_guest_handle_impl(_hnd, _val, 0)
325 
326 #define set_xen_guest_handle_offset(_hnd, _val, _off)           \
327     set_xen_guest_handle_impl(_hnd, _val,                       \
328             ((sizeof(*_val)*(_off))))
329 
330 /* Use with set_xen_guest_handle in place of NULL */
331 extern xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(HYPERCALL_BUFFER_NULL);
332 
333 /*
334  * Allocate and free hypercall buffers with byte granularity.
335  */
336 void *xc__hypercall_buffer_alloc(xc_interface *xch, xc_hypercall_buffer_t *b, size_t size);
337 #define xc_hypercall_buffer_alloc(_xch, _name, _size) xc__hypercall_buffer_alloc(_xch, HYPERCALL_BUFFER(_name), _size)
338 void xc__hypercall_buffer_free(xc_interface *xch, xc_hypercall_buffer_t *b);
339 #define xc_hypercall_buffer_free(_xch, _name) xc__hypercall_buffer_free(_xch, HYPERCALL_BUFFER(_name))
340 
341 /*
342  * Allocate and free hypercall buffers with page alignment.
343  */
344 void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
345 #define xc_hypercall_buffer_alloc_pages(_xch, _name, _nr) xc__hypercall_buffer_alloc_pages(_xch, HYPERCALL_BUFFER(_name), _nr)
346 void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
347 #define xc_hypercall_buffer_free_pages(_xch, _name, _nr)                    \
348     do {                                                                    \
349         if ( _name )                                                        \
350             xc__hypercall_buffer_free_pages(_xch, HYPERCALL_BUFFER(_name),  \
351                                             _nr);                           \
352     } while (0)
353 
354 /*
355  * Array of hypercall buffers.
356  *
357  * Create an array with xc_hypercall_buffer_array_create() and
358  * populate it by declaring one hypercall buffer in a loop and
359  * allocating the buffer with xc_hypercall_buffer_array_alloc().
360  *
361  * To access a previously allocated buffers, declare a new hypercall
362  * buffer and call xc_hypercall_buffer_array_get().
363  *
364  * Destroy the array with xc_hypercall_buffer_array_destroy() to free
365  * the array and all its allocated hypercall buffers.
366  */
367 struct xc_hypercall_buffer_array;
368 typedef struct xc_hypercall_buffer_array xc_hypercall_buffer_array_t;
369 
370 xc_hypercall_buffer_array_t *xc_hypercall_buffer_array_create(xc_interface *xch, unsigned n);
371 void *xc__hypercall_buffer_array_alloc(xc_interface *xch, xc_hypercall_buffer_array_t *array,
372                                        unsigned index, xc_hypercall_buffer_t *hbuf, size_t size);
373 #define xc_hypercall_buffer_array_alloc(_xch, _array, _index, _name, _size) \
374     xc__hypercall_buffer_array_alloc(_xch, _array, _index, HYPERCALL_BUFFER(_name), _size)
375 void *xc__hypercall_buffer_array_get(xc_interface *xch, xc_hypercall_buffer_array_t *array,
376                                      unsigned index, xc_hypercall_buffer_t *hbuf);
377 #define xc_hypercall_buffer_array_get(_xch, _array, _index, _name, _size) \
378     xc__hypercall_buffer_array_get(_xch, _array, _index, HYPERCALL_BUFFER(_name))
379 void xc_hypercall_buffer_array_destroy(xc_interface *xc, xc_hypercall_buffer_array_t *array);
380 
381 /*
382  * CPUMAP handling
383  */
384 typedef uint8_t *xc_cpumap_t;
385 
386 /* return maximum number of cpus the hypervisor supports */
387 int xc_get_max_cpus(xc_interface *xch);
388 
389 /* return the number of online cpus */
390 int xc_get_online_cpus(xc_interface *xch);
391 
392 /* return array size for cpumap */
393 int xc_get_cpumap_size(xc_interface *xch);
394 
395 /* allocate a cpumap */
396 xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
397 
398 /* clear an CPU from the cpumap. */
399 void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map);
400 
401 /* set an CPU in the cpumap. */
402 void xc_cpumap_setcpu(int cpu, xc_cpumap_t map);
403 
404 /* Test whether the CPU in cpumap is set. */
405 int xc_cpumap_testcpu(int cpu, xc_cpumap_t map);
406 
407 /*
408  * NODEMAP handling
409  */
410 typedef uint8_t *xc_nodemap_t;
411 
412 /* return maximum number of NUMA nodes the hypervisor supports */
413 int xc_get_max_nodes(xc_interface *xch);
414 
415 /* return array size for nodemap */
416 int xc_get_nodemap_size(xc_interface *xch);
417 
418 /* allocate a nodemap */
419 xc_nodemap_t xc_nodemap_alloc(xc_interface *xch);
420 
421 /*
422  * DOMAIN DEBUGGING FUNCTIONS
423  */
424 
425 typedef struct xc_core_header {
426     unsigned int xch_magic;
427     unsigned int xch_nr_vcpus;
428     unsigned int xch_nr_pages;
429     unsigned int xch_ctxt_offset;
430     unsigned int xch_index_offset;
431     unsigned int xch_pages_offset;
432 } xc_core_header_t;
433 
434 #define XC_CORE_MAGIC     0xF00FEBED
435 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
436 
437 /*
438  * DOMAIN MANAGEMENT FUNCTIONS
439  */
440 
441 typedef struct xc_dominfo {
442     uint32_t      domid;
443     uint32_t      ssidref;
444     unsigned int  dying:1, crashed:1, shutdown:1,
445                   paused:1, blocked:1, running:1,
446                   hvm:1, debugged:1, xenstore:1, hap:1;
447     unsigned int  shutdown_reason; /* only meaningful if shutdown==1 */
448     unsigned long nr_pages; /* current number, not maximum */
449     unsigned long nr_outstanding_pages;
450     unsigned long nr_shared_pages;
451     unsigned long nr_paged_pages;
452     unsigned long shared_info_frame;
453     uint64_t      cpu_time;
454     unsigned long max_memkb;
455     unsigned int  nr_online_vcpus;
456     unsigned int  max_vcpu_id;
457     xen_domain_handle_t handle;
458     unsigned int  cpupool;
459     struct xen_arch_domainconfig arch_config;
460 } xc_dominfo_t;
461 
462 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
463 
464 typedef union
465 {
466 #if defined(__i386__) || defined(__x86_64__)
467     vcpu_guest_context_x86_64_t x64;
468     vcpu_guest_context_x86_32_t x32;
469 #endif
470     vcpu_guest_context_t c;
471 } vcpu_guest_context_any_t;
472 
473 typedef union
474 {
475 #if defined(__i386__) || defined(__x86_64__)
476     shared_info_x86_64_t x64;
477     shared_info_x86_32_t x32;
478 #endif
479     shared_info_t s;
480 } shared_info_any_t;
481 
482 #if defined(__i386__) || defined(__x86_64__)
483 typedef union
484 {
485     start_info_x86_64_t x64;
486     start_info_x86_32_t x32;
487     start_info_t s;
488 } start_info_any_t;
489 #endif
490 
491 typedef struct xc_vcpu_extstate {
492     uint64_t xfeature_mask;
493     uint64_t size;
494     void *buffer;
495 } xc_vcpu_extstate_t;
496 
497 typedef struct xen_arch_domainconfig xc_domain_configuration_t;
498 int xc_domain_create(xc_interface *xch, uint32_t ssidref,
499                      xen_domain_handle_t handle, uint32_t flags,
500                      uint32_t *pdomid, xc_domain_configuration_t *config);
501 
502 
503 /* Functions to produce a dump of a given domain
504  *  xc_domain_dumpcore - produces a dump to a specified file
505  *  xc_domain_dumpcore_via_callback - produces a dump, using a specified
506  *                                    callback function
507  */
508 int xc_domain_dumpcore(xc_interface *xch,
509                        uint32_t domid,
510                        const char *corename);
511 
512 /* Define the callback function type for xc_domain_dumpcore_via_callback.
513  *
514  * This function is called by the coredump code for every "write",
515  * and passes an opaque object for the use of the function and
516  * created by the caller of xc_domain_dumpcore_via_callback.
517  */
518 typedef int (dumpcore_rtn_t)(xc_interface *xch,
519                              void *arg, char *buffer, unsigned int length);
520 
521 int xc_domain_dumpcore_via_callback(xc_interface *xch,
522                                     uint32_t domid,
523                                     void *arg,
524                                     dumpcore_rtn_t dump_rtn);
525 
526 /*
527  * This function sets the maximum number of vcpus that a domain may create.
528  *
529  * @parm xch a handle to an open hypervisor interface.
530  * @parm domid the domain id in which vcpus are to be created.
531  * @parm max the maximum number of vcpus that the domain may create.
532  * @return 0 on success, -1 on failure.
533  */
534 int xc_domain_max_vcpus(xc_interface *xch,
535                         uint32_t domid,
536                         unsigned int max);
537 
538 /**
539  * This function pauses a domain. A paused domain still exists in memory
540  * however it does not receive any timeslices from the hypervisor.
541  *
542  * @parm xch a handle to an open hypervisor interface
543  * @parm domid the domain id to pause
544  * @return 0 on success, -1 on failure.
545  */
546 int xc_domain_pause(xc_interface *xch,
547                     uint32_t domid);
548 /**
549  * This function unpauses a domain.  The domain should have been previously
550  * paused.
551  *
552  * @parm xch a handle to an open hypervisor interface
553  * @parm domid the domain id to unpause
554  * return 0 on success, -1 on failure
555  */
556 int xc_domain_unpause(xc_interface *xch,
557                       uint32_t domid);
558 
559 /**
560  * This function will destroy a domain.  Destroying a domain removes the domain
561  * completely from memory.  This function should be called after sending the
562  * domain a SHUTDOWN control message to free up the domain resources.
563  *
564  * @parm xch a handle to an open hypervisor interface
565  * @parm domid the domain id to destroy
566  * @return 0 on success, -1 on failure
567  */
568 int xc_domain_destroy(xc_interface *xch,
569                       uint32_t domid);
570 
571 
572 /**
573  * This function resumes a suspended domain. The domain should have
574  * been previously suspended.
575  *
576  * Note that there are 'xc_domain_suspend' as suspending a domain
577  * is quite the endeavour.
578  *
579  * For the purpose of this explanation there are three guests:
580  * PV (using hypercalls for privilgied operations), HVM
581  * (fully hardware virtualized guests using emulated devices for everything),
582  * and PVHVM (PV aware with hardware virtualisation).
583  *
584  * HVM guest are the simplest - they suspend via S3 / S4 and resume from
585  * S3 / S4. Upon resume they have to re-negotiate with the emulated devices.
586  *
587  * PV and PVHVM communicate via hypercalls for suspend (and resume).
588  * For suspend the toolstack initiates the process by writing an value
589  * in XenBus "control/shutdown" with the string "suspend".
590  *
591  * The PV guest stashes anything it deems neccessary in 'struct
592  * start_info' in case of failure (PVHVM may ignore this) and calls
593  * the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall (for PV as
594  * argument it passes the MFN to 'struct start_info').
595  *
596  * And then the guest is suspended.
597  *
598  * The checkpointing or notifying a guest that the suspend failed or
599  * cancelled (in case of checkpoint) is by having the
600  * SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return a non-zero
601  * value.
602  *
603  * The PV and PVHVM resume path are similar. For PV it would be
604  * similar to bootup - figure out where the 'struct start_info' is (or
605  * if the suspend was cancelled aka checkpointed - reuse the saved
606  * values).
607  *
608  * From here on they differ depending whether the guest is PV or PVHVM
609  * in specifics but follow overall the same path:
610  *  - PV: Bringing up the vCPUS,
611  *  - PVHVM: Setup vector callback,
612  *  - Bring up vCPU runstates,
613  *  - Remap the grant tables if checkpointing or setup from scratch,
614  *
615  *
616  * If the resume was not checkpointing (or if suspend was succesful) we would
617  * setup the PV timers and the different PV events. Lastly the PV drivers
618  * re-negotiate with the backend.
619  *
620  * This function would return before the guest started resuming. That is
621  * the guest would be in non-running state and its vCPU context would be
622  * in the the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return path
623  * (for PV and PVHVM). For HVM it would be in would be in QEMU emulated
624  * BIOS handling S3 suspend.
625  *
626  * @parm xch a handle to an open hypervisor interface
627  * @parm domid the domain id to resume
628  * @parm fast use cooperative resume (guest must support this)
629  * return 0 on success, -1 on failure
630  */
631 int xc_domain_resume(xc_interface *xch,
632 		     uint32_t domid,
633 		     int fast);
634 
635 /**
636  * This function will shutdown a domain. This is intended for use in
637  * fully-virtualized domains where this operation is analogous to the
638  * sched_op operations in a paravirtualized domain. The caller is
639  * expected to give the reason for the shutdown.
640  *
641  * @parm xch a handle to an open hypervisor interface
642  * @parm domid the domain id to destroy
643  * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
644  * @return 0 on success, -1 on failure
645  */
646 int xc_domain_shutdown(xc_interface *xch,
647                        uint32_t domid,
648                        int reason);
649 
650 int xc_watchdog(xc_interface *xch,
651 		uint32_t id,
652 		uint32_t timeout);
653 
654 /**
655  * This function explicitly sets the host NUMA nodes the domain will
656  * have affinity with.
657  *
658  * @parm xch a handle to an open hypervisor interface.
659  * @parm domid the domain id one wants to set the affinity of.
660  * @parm nodemap the map of the affine nodes.
661  * @return 0 on success, -1 on failure.
662  */
663 int xc_domain_node_setaffinity(xc_interface *xch,
664                                uint32_t domind,
665                                xc_nodemap_t nodemap);
666 
667 /**
668  * This function retrieves the host NUMA nodes the domain has
669  * affinity with.
670  *
671  * @parm xch a handle to an open hypervisor interface.
672  * @parm domid the domain id one wants to get the node affinity of.
673  * @parm nodemap the map of the affine nodes.
674  * @return 0 on success, -1 on failure.
675  */
676 int xc_domain_node_getaffinity(xc_interface *xch,
677                                uint32_t domind,
678                                xc_nodemap_t nodemap);
679 
680 /**
681  * This function specifies the CPU affinity for a vcpu.
682  *
683  * There are two kinds of affinity. Soft affinity is on what CPUs a vcpu
684  * prefers to run. Hard affinity is on what CPUs a vcpu is allowed to run.
685  * If flags contains XEN_VCPUAFFINITY_SOFT, the soft affinity it is set to
686  * what cpumap_soft_inout contains. If flags contains XEN_VCPUAFFINITY_HARD,
687  * the hard affinity is set to what cpumap_hard_inout contains. Both flags
688  * can be set at the same time, in which case both soft and hard affinity are
689  * set to what the respective parameter contains.
690  *
691  * The function also returns the effective hard or/and soft affinity, still
692  * via the cpumap_soft_inout and cpumap_hard_inout parameters. Effective
693  * affinity is, in case of soft affinity, the intersection of soft affinity,
694  * hard affinity and the cpupool's online CPUs for the domain, and is returned
695  * in cpumap_soft_inout, if XEN_VCPUAFFINITY_SOFT is set in flags. In case of
696  * hard affinity, it is the intersection between hard affinity and the
697  * cpupool's online CPUs, and is returned in cpumap_hard_inout, if
698  * XEN_VCPUAFFINITY_HARD is set in flags. If both flags are set, both soft
699  * and hard affinity are returned in the respective parameter.
700  *
701  * We do report it back as effective affinity is what the Xen scheduler will
702  * actually use, and we thus allow checking whether or not that matches with,
703  * or at least is good enough for, the caller's purposes.
704  *
705  * @param xch a handle to an open hypervisor interface.
706  * @param domid the id of the domain to which the vcpu belongs
707  * @param vcpu the vcpu id wihin the domain
708  * @param cpumap_hard_inout specifies(/returns) the (effective) hard affinity
709  * @param cpumap_soft_inout specifies(/returns) the (effective) soft affinity
710  * @param flags what we want to set
711  */
712 int xc_vcpu_setaffinity(xc_interface *xch,
713                         uint32_t domid,
714                         int vcpu,
715                         xc_cpumap_t cpumap_hard_inout,
716                         xc_cpumap_t cpumap_soft_inout,
717                         uint32_t flags);
718 
719 /**
720  * This function retrieves hard and soft CPU affinity of a vcpu,
721  * depending on what flags are set.
722  *
723  * Soft affinity is returned in cpumap_soft if XEN_VCPUAFFINITY_SOFT is set.
724  * Hard affinity is returned in cpumap_hard if XEN_VCPUAFFINITY_HARD is set.
725  *
726  * @param xch a handle to an open hypervisor interface.
727  * @param domid the id of the domain to which the vcpu belongs
728  * @param vcpu the vcpu id wihin the domain
729  * @param cpumap_hard is where hard affinity is returned
730  * @param cpumap_soft is where soft affinity is returned
731  * @param flags what we want get
732  */
733 int xc_vcpu_getaffinity(xc_interface *xch,
734                         uint32_t domid,
735                         int vcpu,
736                         xc_cpumap_t cpumap_hard,
737                         xc_cpumap_t cpumap_soft,
738                         uint32_t flags);
739 
740 
741 /**
742  * This function will return the guest_width (in bytes) for the
743  * specified domain.
744  *
745  * @param xch a handle to an open hypervisor interface.
746  * @param domid the domain id one wants the address size width of.
747  * @param addr_size the address size.
748  */
749 int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
750                               unsigned int *guest_width);
751 
752 
753 /**
754  * This function will return information about one or more domains. It is
755  * designed to iterate over the list of domains. If a single domain is
756  * requested, this function will return the next domain in the list - if
757  * one exists. It is, therefore, important in this case to make sure the
758  * domain requested was the one returned.
759  *
760  * @parm xch a handle to an open hypervisor interface
761  * @parm first_domid the first domain to enumerate information from.  Domains
762  *                   are currently enumerate in order of creation.
763  * @parm max_doms the number of elements in info
764  * @parm info an array of max_doms size that will contain the information for
765  *            the enumerated domains.
766  * @return the number of domains enumerated or -1 on error
767  */
768 int xc_domain_getinfo(xc_interface *xch,
769                       uint32_t first_domid,
770                       unsigned int max_doms,
771                       xc_dominfo_t *info);
772 
773 
774 /**
775  * This function will set the execution context for the specified vcpu.
776  *
777  * @parm xch a handle to an open hypervisor interface
778  * @parm domid the domain to set the vcpu context for
779  * @parm vcpu the vcpu number for the context
780  * @parm ctxt pointer to the the cpu context with the values to set
781  * @return the number of domains enumerated or -1 on error
782  */
783 int xc_vcpu_setcontext(xc_interface *xch,
784                        uint32_t domid,
785                        uint32_t vcpu,
786                        vcpu_guest_context_any_t *ctxt);
787 /**
788  * This function will return information about one or more domains, using a
789  * single hypercall.  The domain information will be stored into the supplied
790  * array of xc_domaininfo_t structures.
791  *
792  * @parm xch a handle to an open hypervisor interface
793  * @parm first_domain the first domain to enumerate information from.
794  *                    Domains are currently enumerate in order of creation.
795  * @parm max_domains the number of elements in info
796  * @parm info an array of max_doms size that will contain the information for
797  *            the enumerated domains.
798  * @return the number of domains enumerated or -1 on error
799  */
800 int xc_domain_getinfolist(xc_interface *xch,
801                           uint32_t first_domain,
802                           unsigned int max_domains,
803                           xc_domaininfo_t *info);
804 
805 /**
806  * This function set p2m for broken page
807  * &parm xch a handle to an open hypervisor interface
808  * @parm domid the domain id which broken page belong to
809  * @parm pfn the pfn number of the broken page
810  * @return 0 on success, -1 on failure
811  */
812 int xc_set_broken_page_p2m(xc_interface *xch,
813                            uint32_t domid,
814                            unsigned long pfn);
815 
816 /**
817  * This function returns information about the context of a hvm domain
818  * @parm xch a handle to an open hypervisor interface
819  * @parm domid the domain to get information from
820  * @parm ctxt_buf a pointer to a structure to store the execution context of
821  *            the hvm domain
822  * @parm size the size of ctxt_buf in bytes
823  * @return 0 on success, -1 on failure
824  */
825 int xc_domain_hvm_getcontext(xc_interface *xch,
826                              uint32_t domid,
827                              uint8_t *ctxt_buf,
828                              uint32_t size);
829 
830 
831 /**
832  * This function returns one element of the context of a hvm domain
833  * @parm xch a handle to an open hypervisor interface
834  * @parm domid the domain to get information from
835  * @parm typecode which type of elemnt required
836  * @parm instance which instance of the type
837  * @parm ctxt_buf a pointer to a structure to store the execution context of
838  *            the hvm domain
839  * @parm size the size of ctxt_buf (must be >= HVM_SAVE_LENGTH(typecode))
840  * @return 0 on success, -1 on failure
841  */
842 int xc_domain_hvm_getcontext_partial(xc_interface *xch,
843                                      uint32_t domid,
844                                      uint16_t typecode,
845                                      uint16_t instance,
846                                      void *ctxt_buf,
847                                      uint32_t size);
848 
849 /**
850  * This function will set the context for hvm domain
851  *
852  * @parm xch a handle to an open hypervisor interface
853  * @parm domid the domain to set the hvm domain context for
854  * @parm hvm_ctxt pointer to the the hvm context with the values to set
855  * @parm size the size of hvm_ctxt in bytes
856  * @return 0 on success, -1 on failure
857  */
858 int xc_domain_hvm_setcontext(xc_interface *xch,
859                              uint32_t domid,
860                              uint8_t *hvm_ctxt,
861                              uint32_t size);
862 
863 /**
864  * This function will return guest IO ABI protocol
865  *
866  * @parm xch a handle to an open hypervisor interface
867  * @parm domid the domain to get IO ABI protocol for
868  * @return guest protocol on success, NULL on failure
869  */
870 const char *xc_domain_get_native_protocol(xc_interface *xch,
871                                           uint32_t domid);
872 
873 /**
874  * This function returns information about the execution context of a
875  * particular vcpu of a domain.
876  *
877  * @parm xch a handle to an open hypervisor interface
878  * @parm domid the domain to get information from
879  * @parm vcpu the vcpu number
880  * @parm ctxt a pointer to a structure to store the execution context of the
881  *            domain
882  * @return 0 on success, -1 on failure
883  */
884 int xc_vcpu_getcontext(xc_interface *xch,
885                        uint32_t domid,
886                        uint32_t vcpu,
887                        vcpu_guest_context_any_t *ctxt);
888 
889 /**
890  * This function initializes the vuart emulation and returns
891  * the event to be used by the backend for communicating with
892  * the emulation code.
893  *
894  * @parm xch a handle to an open hypervisor interface
895  * #parm type type of vuart
896  * @parm domid the domain to get information from
897  * @parm console_domid the domid of the backend console
898  * @parm gfn the guest pfn to be used as the ring buffer
899  * @parm evtchn the event channel to be used for events
900  * @return 0 on success, negative error on failure
901  */
902 int xc_dom_vuart_init(xc_interface *xch,
903                       uint32_t type,
904                       uint32_t domid,
905                       uint32_t console_domid,
906                       xen_pfn_t gfn,
907                       evtchn_port_t *evtchn);
908 
909 /**
910  * This function returns information about the XSAVE state of a particular
911  * vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0,
912  * the call is considered a query to retrieve them and the buffer is not
913  * filled.
914  *
915  * @parm xch a handle to an open hypervisor interface
916  * @parm domid the domain to get information from
917  * @parm vcpu the vcpu number
918  * @parm extstate a pointer to a structure to store the XSAVE state of the
919  *                domain
920  * @return 0 on success, negative error code on failure
921  */
922 int xc_vcpu_get_extstate(xc_interface *xch,
923                          uint32_t domid,
924                          uint32_t vcpu,
925                          xc_vcpu_extstate_t *extstate);
926 
927 typedef struct xen_domctl_getvcpuinfo xc_vcpuinfo_t;
928 int xc_vcpu_getinfo(xc_interface *xch,
929                     uint32_t domid,
930                     uint32_t vcpu,
931                     xc_vcpuinfo_t *info);
932 
933 long long xc_domain_get_cpu_usage(xc_interface *xch,
934                                   uint32_t domid,
935                                   int vcpu);
936 
937 int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
938                         xen_domain_handle_t handle);
939 
940 typedef struct xen_domctl_shadow_op_stats xc_shadow_op_stats_t;
941 int xc_shadow_control(xc_interface *xch,
942                       uint32_t domid,
943                       unsigned int sop,
944                       xc_hypercall_buffer_t *dirty_bitmap,
945                       unsigned long pages,
946                       unsigned long *mb,
947                       uint32_t mode,
948                       xc_shadow_op_stats_t *stats);
949 
950 int xc_sched_credit_domain_set(xc_interface *xch,
951                                uint32_t domid,
952                                struct xen_domctl_sched_credit *sdom);
953 
954 int xc_sched_credit_domain_get(xc_interface *xch,
955                                uint32_t domid,
956                                struct xen_domctl_sched_credit *sdom);
957 int xc_sched_credit_params_set(xc_interface *xch,
958                                uint32_t cpupool_id,
959                                struct xen_sysctl_credit_schedule *schedule);
960 int xc_sched_credit_params_get(xc_interface *xch,
961                                uint32_t cpupool_id,
962                                struct xen_sysctl_credit_schedule *schedule);
963 
964 int xc_sched_credit2_params_set(xc_interface *xch,
965                                 uint32_t cpupool_id,
966                                 struct xen_sysctl_credit2_schedule *schedule);
967 int xc_sched_credit2_params_get(xc_interface *xch,
968                                 uint32_t cpupool_id,
969                                 struct xen_sysctl_credit2_schedule *schedule);
970 int xc_sched_credit2_domain_set(xc_interface *xch,
971                                 uint32_t domid,
972                                 struct xen_domctl_sched_credit2 *sdom);
973 int xc_sched_credit2_domain_get(xc_interface *xch,
974                                 uint32_t domid,
975                                 struct xen_domctl_sched_credit2 *sdom);
976 
977 int xc_sched_rtds_domain_set(xc_interface *xch,
978                              uint32_t domid,
979                              struct xen_domctl_sched_rtds *sdom);
980 int xc_sched_rtds_domain_get(xc_interface *xch,
981                              uint32_t domid,
982                              struct xen_domctl_sched_rtds *sdom);
983 int xc_sched_rtds_vcpu_set(xc_interface *xch,
984                            uint32_t domid,
985                            struct xen_domctl_schedparam_vcpu *vcpus,
986                            uint32_t num_vcpus);
987 int xc_sched_rtds_vcpu_get(xc_interface *xch,
988                            uint32_t domid,
989                            struct xen_domctl_schedparam_vcpu *vcpus,
990                            uint32_t num_vcpus);
991 
992 int
993 xc_sched_arinc653_schedule_set(
994     xc_interface *xch,
995     uint32_t cpupool_id,
996     struct xen_sysctl_arinc653_schedule *schedule);
997 
998 int
999 xc_sched_arinc653_schedule_get(
1000     xc_interface *xch,
1001     uint32_t cpupool_id,
1002     struct xen_sysctl_arinc653_schedule *schedule);
1003 
1004 /**
1005  * This function sends a trigger to a domain.
1006  *
1007  * @parm xch a handle to an open hypervisor interface
1008  * @parm domid the domain id to send trigger
1009  * @parm trigger the trigger type
1010  * @parm vcpu the vcpu number to send trigger
1011  * return 0 on success, -1 on failure
1012  */
1013 int xc_domain_send_trigger(xc_interface *xch,
1014                            uint32_t domid,
1015                            uint32_t trigger,
1016                            uint32_t vcpu);
1017 
1018 /**
1019  * This function enables or disable debugging of a domain.
1020  *
1021  * @parm xch a handle to an open hypervisor interface
1022  * @parm domid the domain id to send trigger
1023  * @parm enable true to enable debugging
1024  * return 0 on success, -1 on failure
1025  */
1026 int xc_domain_setdebugging(xc_interface *xch,
1027                            uint32_t domid,
1028                            unsigned int enable);
1029 
1030 /**
1031  * This function audits the (top level) p2m of a domain
1032  * and returns the different error counts, if any.
1033  *
1034  * @parm xch a handle to an open hypervisor interface
1035  * @parm domid the domain id whose top level p2m we
1036  *       want to audit
1037  * @parm orphans count of m2p entries for valid
1038  *       domain pages containing an invalid value
1039  * @parm m2p_bad count of m2p entries mismatching the
1040  *       associated p2m entry for this domain
1041  * @parm p2m_bad count of p2m entries for this domain
1042  *       mismatching the associated m2p entry
1043  * return 0 on success, -1 on failure
1044  * errno values on failure include:
1045  *          -ENOSYS: not implemented
1046  *          -EFAULT: could not copy results back to guest
1047  */
1048 int xc_domain_p2m_audit(xc_interface *xch,
1049                         uint32_t domid,
1050                         uint64_t *orphans,
1051                         uint64_t *m2p_bad,
1052                         uint64_t *p2m_bad);
1053 
1054 /**
1055  * This function sets or clears the requirement that an access memory
1056  * event listener is required on the domain.
1057  *
1058  * @parm xch a handle to an open hypervisor interface
1059  * @parm domid the domain id to send trigger
1060  * @parm enable true to require a listener
1061  * return 0 on success, -1 on failure
1062  */
1063 int xc_domain_set_access_required(xc_interface *xch,
1064 				  uint32_t domid,
1065 				  unsigned int required);
1066 /**
1067  * This function sets the handler of global VIRQs sent by the hypervisor
1068  *
1069  * @parm xch a handle to an open hypervisor interface
1070  * @parm domid the domain id which will handle the VIRQ
1071  * @parm virq the virq number (VIRQ_*)
1072  * return 0 on success, -1 on failure
1073  */
1074 int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq);
1075 
1076 /**
1077  * Set the maximum event channel port a domain may bind.
1078  *
1079  * This does not affect ports that are already bound.
1080  *
1081  * @param xch a handle to an open hypervisor interface
1082  * @param domid the domain id
1083  * @param max_port maximum port number
1084  */
1085 int xc_domain_set_max_evtchn(xc_interface *xch, uint32_t domid,
1086                              uint32_t max_port);
1087 
1088 /**
1089  * Set the maximum number of grant frames and maptrack frames a domain
1090  * can have. Must be used at domain setup time and only then.
1091  *
1092  * @param xch a handle to an open hypervisor interface
1093  * @param domid the domain id
1094  * @param grant_frames max. number of grant frames
1095  * @param maptrack_frames max. number of maptrack frames
1096  */
1097 int xc_domain_set_gnttab_limits(xc_interface *xch, uint32_t domid,
1098                                 uint32_t grant_frames,
1099                                 uint32_t maptrack_frames);
1100 
1101 /*
1102  * CPUPOOL MANAGEMENT FUNCTIONS
1103  */
1104 
1105 typedef struct xc_cpupoolinfo {
1106     uint32_t cpupool_id;
1107     uint32_t sched_id;
1108     uint32_t n_dom;
1109     xc_cpumap_t cpumap;
1110 } xc_cpupoolinfo_t;
1111 
1112 #define XC_CPUPOOL_POOLID_ANY 0xFFFFFFFF
1113 
1114 /**
1115  * Create a new cpupool.
1116  *
1117  * @parm xc_handle a handle to an open hypervisor interface
1118  * @parm ppoolid pointer to the new cpupool id (in/out)
1119  * @parm sched_id id of scheduler to use for pool
1120  * return 0 on success, -1 on failure
1121  */
1122 int xc_cpupool_create(xc_interface *xch,
1123                       uint32_t *ppoolid,
1124                       uint32_t sched_id);
1125 
1126 /**
1127  * Destroy a cpupool. Pool must be unused and have no cpu assigned.
1128  *
1129  * @parm xc_handle a handle to an open hypervisor interface
1130  * @parm poolid id of the cpupool to destroy
1131  * return 0 on success, -1 on failure
1132  */
1133 int xc_cpupool_destroy(xc_interface *xch,
1134                        uint32_t poolid);
1135 
1136 /**
1137  * Get cpupool info. Returns info for up to the specified number of cpupools
1138  * starting at the given id.
1139  * @parm xc_handle a handle to an open hypervisor interface
1140  * @parm poolid lowest id for which info is returned
1141  * return cpupool info ptr (to be freed via xc_cpupool_infofree)
1142  */
1143 xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
1144                        uint32_t poolid);
1145 
1146 /**
1147  * Free cpupool info. Used to free info obtained via xc_cpupool_getinfo.
1148  * @parm xc_handle a handle to an open hypervisor interface
1149  * @parm info area to free
1150  */
1151 void xc_cpupool_infofree(xc_interface *xch,
1152                          xc_cpupoolinfo_t *info);
1153 
1154 /**
1155  * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
1156  *
1157  * @parm xc_handle a handle to an open hypervisor interface
1158  * @parm poolid id of the cpupool
1159  * @parm cpu cpu number to add
1160  * return 0 on success, -1 on failure
1161  */
1162 int xc_cpupool_addcpu(xc_interface *xch,
1163                       uint32_t poolid,
1164                       int cpu);
1165 
1166 /**
1167  * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
1168  *
1169  * @parm xc_handle a handle to an open hypervisor interface
1170  * @parm poolid id of the cpupool
1171  * @parm cpu cpu number to remove
1172  * return 0 on success, -1 on failure
1173  */
1174 int xc_cpupool_removecpu(xc_interface *xch,
1175                          uint32_t poolid,
1176                          int cpu);
1177 
1178 /**
1179  * Move domain to another cpupool.
1180  *
1181  * @parm xc_handle a handle to an open hypervisor interface
1182  * @parm poolid id of the destination cpupool
1183  * @parm domid id of the domain to move
1184  * return 0 on success, -1 on failure
1185  */
1186 int xc_cpupool_movedomain(xc_interface *xch,
1187                           uint32_t poolid,
1188                           uint32_t domid);
1189 
1190 /**
1191  * Return map of cpus not in any cpupool.
1192  *
1193  * @parm xc_handle a handle to an open hypervisor interface
1194  * return cpumap array on success, NULL else
1195  */
1196 xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch);
1197 
1198 /*
1199  * EVENT CHANNEL FUNCTIONS
1200  *
1201  * None of these do any logging.
1202  */
1203 
1204 /* A port identifier is guaranteed to fit in 31 bits. */
1205 typedef int xc_evtchn_port_or_error_t;
1206 
1207 /**
1208  * This function allocates an unbound port.  Ports are named endpoints used for
1209  * interdomain communication.  This function is most useful in opening a
1210  * well-known port within a domain to receive events on.
1211  *
1212  * NOTE: If you are allocating a *local* unbound port, you probably want to
1213  * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
1214  * ports *only* during domain creation.
1215  *
1216  * @parm xch a handle to an open hypervisor interface
1217  * @parm dom the ID of the local domain (the 'allocatee')
1218  * @parm remote_dom the ID of the domain who will later bind
1219  * @return allocated port (in @dom) on success, -1 on failure
1220  */
1221 xc_evtchn_port_or_error_t
1222 xc_evtchn_alloc_unbound(xc_interface *xch,
1223                         uint32_t dom,
1224                         uint32_t remote_dom);
1225 
1226 int xc_evtchn_reset(xc_interface *xch,
1227                     uint32_t dom);
1228 
1229 typedef struct evtchn_status xc_evtchn_status_t;
1230 int xc_evtchn_status(xc_interface *xch, xc_evtchn_status_t *status);
1231 
1232 
1233 
1234 int xc_physdev_pci_access_modify(xc_interface *xch,
1235                                  uint32_t domid,
1236                                  int bus,
1237                                  int dev,
1238                                  int func,
1239                                  int enable);
1240 
1241 int xc_readconsolering(xc_interface *xch,
1242                        char *buffer,
1243                        unsigned int *pnr_chars,
1244                        int clear, int incremental, uint32_t *pindex);
1245 
1246 int xc_send_debug_keys(xc_interface *xch, char *keys);
1247 int xc_set_parameters(xc_interface *xch, char *params);
1248 
1249 typedef struct xen_sysctl_physinfo xc_physinfo_t;
1250 typedef struct xen_sysctl_cputopo xc_cputopo_t;
1251 typedef struct xen_sysctl_numainfo xc_numainfo_t;
1252 typedef struct xen_sysctl_meminfo xc_meminfo_t;
1253 typedef struct xen_sysctl_pcitopoinfo xc_pcitopoinfo_t;
1254 
1255 typedef uint32_t xc_cpu_to_node_t;
1256 typedef uint32_t xc_cpu_to_socket_t;
1257 typedef uint32_t xc_cpu_to_core_t;
1258 typedef uint64_t xc_node_to_memsize_t;
1259 typedef uint64_t xc_node_to_memfree_t;
1260 typedef uint32_t xc_node_to_node_dist_t;
1261 
1262 int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
1263 int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
1264                    xc_cputopo_t *cputopo);
1265 int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
1266                 xc_meminfo_t *meminfo, uint32_t *distance);
1267 int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
1268                    physdev_pci_device_t *devs, uint32_t *nodes);
1269 
1270 int xc_sched_id(xc_interface *xch,
1271                 int *sched_id);
1272 
1273 int xc_machphys_mfn_list(xc_interface *xch,
1274                          unsigned long max_extents,
1275                          xen_pfn_t *extent_start);
1276 
1277 typedef struct xen_sysctl_cpuinfo xc_cpuinfo_t;
1278 int xc_getcpuinfo(xc_interface *xch, int max_cpus,
1279                   xc_cpuinfo_t *info, int *nr_cpus);
1280 
1281 int xc_domain_setmaxmem(xc_interface *xch,
1282                         uint32_t domid,
1283                         uint64_t max_memkb);
1284 
1285 int xc_domain_set_memmap_limit(xc_interface *xch,
1286                                uint32_t domid,
1287                                unsigned long map_limitkb);
1288 
1289 int xc_domain_setvnuma(xc_interface *xch,
1290                         uint32_t domid,
1291                         uint32_t nr_vnodes,
1292                         uint32_t nr_regions,
1293                         uint32_t nr_vcpus,
1294                         xen_vmemrange_t *vmemrange,
1295                         unsigned int *vdistance,
1296                         unsigned int *vcpu_to_vnode,
1297                         unsigned int *vnode_to_pnode);
1298 /*
1299  * Retrieve vnuma configuration
1300  * domid: IN, target domid
1301  * nr_vnodes: IN/OUT, number of vnodes, not NULL
1302  * nr_vmemranges: IN/OUT, number of vmemranges, not NULL
1303  * nr_vcpus: IN/OUT, number of vcpus, not NULL
1304  * vmemranges: OUT, an array which has length of nr_vmemranges
1305  * vdistance: OUT, an array which has length of nr_vnodes * nr_vnodes
1306  * vcpu_to_vnode: OUT, an array which has length of nr_vcpus
1307  */
1308 int xc_domain_getvnuma(xc_interface *xch,
1309                        uint32_t domid,
1310                        uint32_t *nr_vnodes,
1311                        uint32_t *nr_vmemranges,
1312                        uint32_t *nr_vcpus,
1313                        xen_vmemrange_t *vmemrange,
1314                        unsigned int *vdistance,
1315                        unsigned int *vcpu_to_vnode);
1316 
1317 int xc_domain_soft_reset(xc_interface *xch,
1318                          uint32_t domid);
1319 
1320 #if defined(__i386__) || defined(__x86_64__)
1321 /*
1322  * PC BIOS standard E820 types and structure.
1323  */
1324 #define E820_RAM          1
1325 #define E820_RESERVED     2
1326 #define E820_ACPI         3
1327 #define E820_NVS          4
1328 #define E820_UNUSABLE     5
1329 
1330 #define E820MAX           (128)
1331 
1332 struct e820entry {
1333     uint64_t addr;
1334     uint64_t size;
1335     uint32_t type;
1336 } __attribute__((packed));
1337 int xc_domain_set_memory_map(xc_interface *xch,
1338                                uint32_t domid,
1339                                struct e820entry entries[],
1340                                uint32_t nr_entries);
1341 
1342 int xc_get_machine_memory_map(xc_interface *xch,
1343                               struct e820entry entries[],
1344                               uint32_t max_entries);
1345 #endif
1346 
1347 int xc_reserved_device_memory_map(xc_interface *xch,
1348                                   uint32_t flags,
1349                                   uint16_t seg,
1350                                   uint8_t bus,
1351                                   uint8_t devfn,
1352                                   struct xen_reserved_device_memory entries[],
1353                                   uint32_t *max_entries);
1354 int xc_domain_set_time_offset(xc_interface *xch,
1355                               uint32_t domid,
1356                               int32_t time_offset_seconds);
1357 
1358 int xc_domain_set_tsc_info(xc_interface *xch,
1359                            uint32_t domid,
1360                            uint32_t tsc_mode,
1361                            uint64_t elapsed_nsec,
1362                            uint32_t gtsc_khz,
1363                            uint32_t incarnation);
1364 
1365 int xc_domain_get_tsc_info(xc_interface *xch,
1366                            uint32_t domid,
1367                            uint32_t *tsc_mode,
1368                            uint64_t *elapsed_nsec,
1369                            uint32_t *gtsc_khz,
1370                            uint32_t *incarnation);
1371 
1372 int xc_domain_disable_migrate(xc_interface *xch, uint32_t domid);
1373 
1374 int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
1375 
1376 int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
1377 
1378 int xc_domain_increase_reservation(xc_interface *xch,
1379                                    uint32_t domid,
1380                                    unsigned long nr_extents,
1381                                    unsigned int extent_order,
1382                                    unsigned int mem_flags,
1383                                    xen_pfn_t *extent_start);
1384 
1385 int xc_domain_increase_reservation_exact(xc_interface *xch,
1386                                          uint32_t domid,
1387                                          unsigned long nr_extents,
1388                                          unsigned int extent_order,
1389                                          unsigned int mem_flags,
1390                                          xen_pfn_t *extent_start);
1391 
1392 int xc_domain_decrease_reservation(xc_interface *xch,
1393                                    uint32_t domid,
1394                                    unsigned long nr_extents,
1395                                    unsigned int extent_order,
1396                                    xen_pfn_t *extent_start);
1397 
1398 int xc_domain_decrease_reservation_exact(xc_interface *xch,
1399                                          uint32_t domid,
1400                                          unsigned long nr_extents,
1401                                          unsigned int extent_order,
1402                                          xen_pfn_t *extent_start);
1403 
1404 int xc_domain_add_to_physmap(xc_interface *xch,
1405                              uint32_t domid,
1406                              unsigned int space,
1407                              unsigned long idx,
1408                              xen_pfn_t gpfn);
1409 
1410 int xc_domain_add_to_physmap_batch(xc_interface *xch,
1411                                    uint32_t domid,
1412                                    uint32_t foreign_domid,
1413                                    unsigned int space,
1414                                    unsigned int size,
1415                                    xen_ulong_t *idxs,
1416                                    xen_pfn_t *gfpns,
1417                                    int *errs);
1418 
1419 int xc_domain_populate_physmap(xc_interface *xch,
1420                                uint32_t domid,
1421                                unsigned long nr_extents,
1422                                unsigned int extent_order,
1423                                unsigned int mem_flags,
1424                                xen_pfn_t *extent_start);
1425 
1426 int xc_domain_populate_physmap_exact(xc_interface *xch,
1427                                      uint32_t domid,
1428                                      unsigned long nr_extents,
1429                                      unsigned int extent_order,
1430                                      unsigned int mem_flags,
1431                                      xen_pfn_t *extent_start);
1432 
1433 int xc_domain_claim_pages(xc_interface *xch,
1434                                uint32_t domid,
1435                                unsigned long nr_pages);
1436 
1437 int xc_domain_memory_exchange_pages(xc_interface *xch,
1438                                     uint32_t domid,
1439                                     unsigned long nr_in_extents,
1440                                     unsigned int in_order,
1441                                     xen_pfn_t *in_extents,
1442                                     unsigned long nr_out_extents,
1443                                     unsigned int out_order,
1444                                     xen_pfn_t *out_extents);
1445 
1446 int xc_domain_set_pod_target(xc_interface *xch,
1447                              uint32_t domid,
1448                              uint64_t target_pages,
1449                              uint64_t *tot_pages,
1450                              uint64_t *pod_cache_pages,
1451                              uint64_t *pod_entries);
1452 
1453 int xc_domain_get_pod_target(xc_interface *xch,
1454                              uint32_t domid,
1455                              uint64_t *tot_pages,
1456                              uint64_t *pod_cache_pages,
1457                              uint64_t *pod_entries);
1458 
1459 int xc_domain_ioport_permission(xc_interface *xch,
1460                                 uint32_t domid,
1461                                 uint32_t first_port,
1462                                 uint32_t nr_ports,
1463                                 uint32_t allow_access);
1464 
1465 int xc_domain_irq_permission(xc_interface *xch,
1466                              uint32_t domid,
1467                              uint8_t pirq,
1468                              uint8_t allow_access);
1469 
1470 int xc_domain_iomem_permission(xc_interface *xch,
1471                                uint32_t domid,
1472                                unsigned long first_mfn,
1473                                unsigned long nr_mfns,
1474                                uint8_t allow_access);
1475 
1476 int xc_domain_pin_memory_cacheattr(xc_interface *xch,
1477                                    uint32_t domid,
1478                                    uint64_t start,
1479                                    uint64_t end,
1480                                    uint32_t type);
1481 
1482 unsigned long xc_make_page_below_4G(xc_interface *xch, uint32_t domid,
1483                                     unsigned long mfn);
1484 
1485 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
1486 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
1487 int xc_perfc_reset(xc_interface *xch);
1488 int xc_perfc_query_number(xc_interface *xch,
1489                           int *nbr_desc,
1490                           int *nbr_val);
1491 int xc_perfc_query(xc_interface *xch,
1492                    xc_hypercall_buffer_t *desc,
1493                    xc_hypercall_buffer_t *val);
1494 
1495 typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
1496 int xc_lockprof_reset(xc_interface *xch);
1497 int xc_lockprof_query_number(xc_interface *xch,
1498                              uint32_t *n_elems);
1499 int xc_lockprof_query(xc_interface *xch,
1500                       uint32_t *n_elems,
1501                       uint64_t *time,
1502                       xc_hypercall_buffer_t *data);
1503 
1504 void *xc_memalign(xc_interface *xch, size_t alignment, size_t size);
1505 
1506 /**
1507  * Avoid using this function, as it does not work for all cases (such
1508  * as 4M superpages, or guests using PSE36). Only used for debugging.
1509  *
1510  * Translates a virtual address in the context of a given domain and
1511  * vcpu returning the GFN containing the address (that is, an MFN for
1512  * PV guests, a PFN for HVM guests).  Returns 0 for failure.
1513  *
1514  * @parm xch a handle on an open hypervisor interface
1515  * @parm dom the domain to perform the translation in
1516  * @parm vcpu the vcpu to perform the translation on
1517  * @parm virt the virtual address to translate
1518  */
1519 unsigned long xc_translate_foreign_address(xc_interface *xch, uint32_t dom,
1520                                            int vcpu, unsigned long long virt);
1521 
1522 
1523 /**
1524  * DEPRECATED.  Avoid using this, as it does not correctly account for PFNs
1525  * without a backing MFN.
1526  */
1527 int xc_get_pfn_list(xc_interface *xch, uint32_t domid, uint64_t *pfn_buf,
1528                     unsigned long max_pfns);
1529 
1530 int xc_copy_to_domain_page(xc_interface *xch, uint32_t domid,
1531                            unsigned long dst_pfn, const char *src_page);
1532 
1533 int xc_clear_domain_pages(xc_interface *xch, uint32_t domid,
1534                           unsigned long dst_pfn, int num);
1535 
xc_clear_domain_page(xc_interface * xch,uint32_t domid,unsigned long dst_pfn)1536 static inline int xc_clear_domain_page(xc_interface *xch, uint32_t domid,
1537                                        unsigned long dst_pfn)
1538 {
1539     return xc_clear_domain_pages(xch, domid, dst_pfn, 1);
1540 }
1541 
1542 int xc_mmuext_op(xc_interface *xch, struct mmuext_op *op, unsigned int nr_ops,
1543                  uint32_t dom);
1544 
1545 /* System wide memory properties */
1546 int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn);
1547 
1548 /* Get current total pages allocated to a domain. */
1549 long xc_get_tot_pages(xc_interface *xch, uint32_t domid);
1550 
1551 /**
1552  * This function retrieves the the number of bytes available
1553  * in the heap in a specific range of address-widths and nodes.
1554  *
1555  * @parm xch a handle to an open hypervisor interface
1556  * @parm domid the domain to query
1557  * @parm min_width the smallest address width to query (0 if don't care)
1558  * @parm max_width the largest address width to query (0 if don't care)
1559  * @parm node the node to query (-1 for all)
1560  * @parm *bytes caller variable to put total bytes counted
1561  * @return 0 on success, <0 on failure.
1562  */
1563 int xc_availheap(xc_interface *xch, int min_width, int max_width, int node,
1564                  uint64_t *bytes);
1565 
1566 /*
1567  * Trace Buffer Operations
1568  */
1569 
1570 /**
1571  * xc_tbuf_enable - enable tracing buffers
1572  *
1573  * @parm xch a handle to an open hypervisor interface
1574  * @parm cnt size of tracing buffers to create (in pages)
1575  * @parm mfn location to store mfn of the trace buffers to
1576  * @parm size location to store the size (in bytes) of a trace buffer to
1577  *
1578  * Gets the machine address of the trace pointer area and the size of the
1579  * per CPU buffers.
1580  */
1581 int xc_tbuf_enable(xc_interface *xch, unsigned long pages,
1582                    unsigned long *mfn, unsigned long *size);
1583 
1584 /*
1585  * Disable tracing buffers.
1586  */
1587 int xc_tbuf_disable(xc_interface *xch);
1588 
1589 /**
1590  * This function sets the size of the trace buffers. Setting the size
1591  * is currently a one-shot operation that may be performed either at boot
1592  * time or via this interface, not both. The buffer size must be set before
1593  * enabling tracing.
1594  *
1595  * @parm xch a handle to an open hypervisor interface
1596  * @parm size the size in pages per cpu for the trace buffers
1597  * @return 0 on success, -1 on failure.
1598  */
1599 int xc_tbuf_set_size(xc_interface *xch, unsigned long size);
1600 
1601 /**
1602  * This function retrieves the current size of the trace buffers.
1603  * Note that the size returned is in terms of bytes, not pages.
1604 
1605  * @parm xch a handle to an open hypervisor interface
1606  * @parm size will contain the size in bytes for the trace buffers
1607  * @return 0 on success, -1 on failure.
1608  */
1609 int xc_tbuf_get_size(xc_interface *xch, unsigned long *size);
1610 
1611 int xc_tbuf_set_cpu_mask(xc_interface *xch, xc_cpumap_t mask);
1612 
1613 int xc_tbuf_set_evt_mask(xc_interface *xch, uint32_t mask);
1614 
1615 int xc_domctl(xc_interface *xch, struct xen_domctl *domctl);
1616 int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl);
1617 
1618 int xc_version(xc_interface *xch, int cmd, void *arg);
1619 
1620 int xc_flask_op(xc_interface *xch, xen_flask_op_t *op);
1621 
1622 /*
1623  * Subscribe to domain suspend via evtchn.
1624  * Returns -1 on failure, in which case errno will be set appropriately.
1625  * Just calls XEN_DOMCTL_subscribe - see the caveats for that domctl
1626  * (in its doc comment in domctl.h).
1627  */
1628 int xc_domain_subscribe_for_suspend(
1629     xc_interface *xch, uint32_t domid, evtchn_port_t port);
1630 
1631 /**************************
1632  * GRANT TABLE OPERATIONS *
1633  **************************/
1634 
1635 /*
1636  * These functions sometimes log messages as above, but not always.
1637  */
1638 
1639 
1640 int xc_gnttab_op(xc_interface *xch, int cmd,
1641                  void * op, int op_size, int count);
1642 /* Logs iff hypercall bounce fails, otherwise doesn't. */
1643 
1644 int xc_gnttab_query_size(xc_interface *xch, struct gnttab_query_size *query);
1645 int xc_gnttab_get_version(xc_interface *xch, uint32_t domid); /* Never logs */
1646 grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, uint32_t domid, int *gnt_num);
1647 grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, uint32_t domid, int *gnt_num);
1648 /* Sometimes these don't set errno [fixme], and sometimes they don't log. */
1649 
1650 int xc_physdev_map_pirq(xc_interface *xch,
1651                         uint32_t domid,
1652                         int index,
1653                         int *pirq);
1654 
1655 int xc_physdev_map_pirq_msi(xc_interface *xch,
1656                             uint32_t domid,
1657                             int index,
1658                             int *pirq,
1659                             int devfn,
1660                             int bus,
1661                             int entry_nr,
1662                             uint64_t table_base);
1663 
1664 int xc_physdev_unmap_pirq(xc_interface *xch,
1665                           uint32_t domid,
1666                           int pirq);
1667 
1668 /*
1669  *  LOGGING AND ERROR REPORTING
1670  */
1671 
1672 
1673 #define XC_MAX_ERROR_MSG_LEN 1024
1674 typedef struct xc_error {
1675   enum xc_error_code code;
1676   char message[XC_MAX_ERROR_MSG_LEN];
1677 } xc_error;
1678 
1679 
1680 /*
1681  * Convert an error code or level into a text description.  Return values
1682  * are pointers to fixed strings and do not need to be freed.
1683  * Do not fail, but return pointers to generic strings if fed bogus input.
1684  */
1685 const char *xc_error_code_to_desc(int code);
1686 
1687 /*
1688  * Convert an errno value to a text description.
1689  */
1690 const char *xc_strerror(xc_interface *xch, int errcode);
1691 
1692 
1693 /*
1694  * Return a pointer to the last error with level XC_REPORT_ERROR. This
1695  * pointer and the data pointed to are only valid until the next call
1696  * to libxc in the same thread.
1697  */
1698 const xc_error *xc_get_last_error(xc_interface *handle);
1699 
1700 /*
1701  * Clear the last error
1702  */
1703 void xc_clear_last_error(xc_interface *xch);
1704 
1705 int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value);
1706 int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value);
1707 
1708 /* Deprecated: use xc_hvm_param_set/get() instead. */
1709 int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value);
1710 int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value);
1711 
1712 /* HVM guest pass-through */
1713 int xc_assign_device(xc_interface *xch,
1714                      uint32_t domid,
1715                      uint32_t machine_sbdf,
1716                      uint32_t flag);
1717 
1718 int xc_get_device_group(xc_interface *xch,
1719                      uint32_t domid,
1720                      uint32_t machine_sbdf,
1721                      uint32_t max_sdevs,
1722                      uint32_t *num_sdevs,
1723                      uint32_t *sdev_array);
1724 
1725 int xc_test_assign_device(xc_interface *xch,
1726                           uint32_t domid,
1727                           uint32_t machine_sbdf);
1728 
1729 int xc_deassign_device(xc_interface *xch,
1730                      uint32_t domid,
1731                      uint32_t machine_sbdf);
1732 
1733 int xc_assign_dt_device(xc_interface *xch,
1734                         uint32_t domid,
1735                         char *path);
1736 int xc_test_assign_dt_device(xc_interface *xch,
1737                              uint32_t domid,
1738                              char *path);
1739 int xc_deassign_dt_device(xc_interface *xch,
1740                           uint32_t domid,
1741                           char *path);
1742 
1743 int xc_domain_memory_mapping(xc_interface *xch,
1744                              uint32_t domid,
1745                              unsigned long first_gfn,
1746                              unsigned long first_mfn,
1747                              unsigned long nr_mfns,
1748                              uint32_t add_mapping);
1749 
1750 int xc_domain_ioport_mapping(xc_interface *xch,
1751                              uint32_t domid,
1752                              uint32_t first_gport,
1753                              uint32_t first_mport,
1754                              uint32_t nr_ports,
1755                              uint32_t add_mapping);
1756 
1757 int xc_domain_update_msi_irq(
1758     xc_interface *xch,
1759     uint32_t domid,
1760     uint32_t gvec,
1761     uint32_t pirq,
1762     uint32_t gflags,
1763     uint64_t gtable);
1764 
1765 int xc_domain_unbind_msi_irq(xc_interface *xch,
1766                              uint32_t domid,
1767                              uint32_t gvec,
1768                              uint32_t pirq,
1769                              uint32_t gflags);
1770 
1771 int xc_domain_bind_pt_irq(xc_interface *xch,
1772                           uint32_t domid,
1773                           uint8_t machine_irq,
1774                           uint8_t irq_type,
1775                           uint8_t bus,
1776                           uint8_t device,
1777                           uint8_t intx,
1778                           uint8_t isa_irq);
1779 
1780 int xc_domain_unbind_pt_irq(xc_interface *xch,
1781                           uint32_t domid,
1782                           uint8_t machine_irq,
1783                           uint8_t irq_type,
1784                           uint8_t bus,
1785                           uint8_t device,
1786                           uint8_t intx,
1787                           uint8_t isa_irq);
1788 
1789 int xc_domain_bind_pt_pci_irq(xc_interface *xch,
1790                               uint32_t domid,
1791                               uint8_t machine_irq,
1792                               uint8_t bus,
1793                               uint8_t device,
1794                               uint8_t intx);
1795 
1796 int xc_domain_bind_pt_isa_irq(xc_interface *xch,
1797                               uint32_t domid,
1798                               uint8_t machine_irq);
1799 
1800 int xc_domain_bind_pt_spi_irq(xc_interface *xch,
1801                               uint32_t domid,
1802                               uint16_t vspi,
1803                               uint16_t spi);
1804 
1805 int xc_domain_unbind_pt_spi_irq(xc_interface *xch,
1806                                 uint32_t domid,
1807                                 uint16_t vspi,
1808                                 uint16_t spi);
1809 
1810 int xc_domain_set_machine_address_size(xc_interface *xch,
1811 				       uint32_t domid,
1812 				       unsigned int width);
1813 int xc_domain_get_machine_address_size(xc_interface *xch,
1814 				       uint32_t domid);
1815 
1816 int xc_domain_suppress_spurious_page_faults(xc_interface *xch,
1817 					  uint32_t domid);
1818 
1819 /* Set the target domain */
1820 int xc_domain_set_target(xc_interface *xch,
1821                          uint32_t domid,
1822                          uint32_t target);
1823 
1824 /* Control the domain for debug */
1825 int xc_domain_debug_control(xc_interface *xch,
1826                             uint32_t domid,
1827                             uint32_t sop,
1828                             uint32_t vcpu);
1829 
1830 #if defined(__i386__) || defined(__x86_64__)
1831 int xc_cpuid_set(xc_interface *xch,
1832                  uint32_t domid,
1833                  const unsigned int *input,
1834                  const char **config,
1835                  char **config_transformed);
1836 int xc_cpuid_apply_policy(xc_interface *xch,
1837                           uint32_t domid,
1838                           uint32_t *featureset,
1839                           unsigned int nr_features);
1840 void xc_cpuid_to_str(const unsigned int *regs,
1841                      char **strs); /* some strs[] may be NULL if ENOMEM */
1842 int xc_mca_op(xc_interface *xch, struct xen_mc *mc);
1843 int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
1844                         xc_cpumap_t cpumap, unsigned int nr_cpus);
1845 #endif
1846 
1847 struct xc_px_val {
1848     uint64_t freq;        /* Px core frequency */
1849     uint64_t residency;   /* Px residency time */
1850     uint64_t count;       /* Px transition count */
1851 };
1852 
1853 struct xc_px_stat {
1854     uint8_t total;        /* total Px states */
1855     uint8_t usable;       /* usable Px states */
1856     uint8_t last;         /* last Px state */
1857     uint8_t cur;          /* current Px state */
1858     uint64_t *trans_pt;   /* Px transition table */
1859     struct xc_px_val *pt;
1860 };
1861 
1862 int xc_pm_get_max_px(xc_interface *xch, int cpuid, int *max_px);
1863 int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt);
1864 int xc_pm_reset_pxstat(xc_interface *xch, int cpuid);
1865 
1866 struct xc_cx_stat {
1867     uint32_t nr;           /* entry nr in triggers[]/residencies[], incl C0 */
1868     uint32_t last;         /* last Cx state */
1869     uint64_t idle_time;    /* idle time from boot */
1870     uint64_t *triggers;    /* Cx trigger counts */
1871     uint64_t *residencies; /* Cx residencies */
1872     uint32_t nr_pc;        /* entry nr in pc[] */
1873     uint32_t nr_cc;        /* entry nr in cc[] */
1874     uint64_t *pc;          /* 1-biased indexing (i.e. excl C0) */
1875     uint64_t *cc;          /* 1-biased indexing (i.e. excl C0) */
1876 };
1877 typedef struct xc_cx_stat xc_cx_stat_t;
1878 
1879 int xc_pm_get_max_cx(xc_interface *xch, int cpuid, int *max_cx);
1880 int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt);
1881 int xc_pm_reset_cxstat(xc_interface *xch, int cpuid);
1882 
1883 int xc_cpu_online(xc_interface *xch, int cpu);
1884 int xc_cpu_offline(xc_interface *xch, int cpu);
1885 
1886 /*
1887  * cpufreq para name of this structure named
1888  * same as sysfs file name of native linux
1889  */
1890 typedef struct xen_userspace xc_userspace_t;
1891 typedef struct xen_ondemand xc_ondemand_t;
1892 
1893 struct xc_get_cpufreq_para {
1894     /* IN/OUT variable */
1895     uint32_t cpu_num;
1896     uint32_t freq_num;
1897     uint32_t gov_num;
1898 
1899     /* for all governors */
1900     /* OUT variable */
1901     uint32_t *affected_cpus;
1902     uint32_t *scaling_available_frequencies;
1903     char     *scaling_available_governors;
1904     char scaling_driver[CPUFREQ_NAME_LEN];
1905 
1906     uint32_t cpuinfo_cur_freq;
1907     uint32_t cpuinfo_max_freq;
1908     uint32_t cpuinfo_min_freq;
1909     uint32_t scaling_cur_freq;
1910 
1911     char scaling_governor[CPUFREQ_NAME_LEN];
1912     uint32_t scaling_max_freq;
1913     uint32_t scaling_min_freq;
1914 
1915     /* for specific governor */
1916     union {
1917         xc_userspace_t userspace;
1918         xc_ondemand_t ondemand;
1919     } u;
1920 
1921     int32_t turbo_enabled;
1922 };
1923 
1924 int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
1925                         struct xc_get_cpufreq_para *user_para);
1926 int xc_set_cpufreq_gov(xc_interface *xch, int cpuid, char *govname);
1927 int xc_set_cpufreq_para(xc_interface *xch, int cpuid,
1928                         int ctrl_type, int ctrl_value);
1929 int xc_get_cpufreq_avgfreq(xc_interface *xch, int cpuid, int *avg_freq);
1930 
1931 int xc_set_sched_opt_smt(xc_interface *xch, uint32_t value);
1932 int xc_set_vcpu_migration_delay(xc_interface *xch, uint32_t value);
1933 int xc_get_vcpu_migration_delay(xc_interface *xch, uint32_t *value);
1934 
1935 int xc_get_cpuidle_max_cstate(xc_interface *xch, uint32_t *value);
1936 int xc_set_cpuidle_max_cstate(xc_interface *xch, uint32_t value);
1937 
1938 int xc_enable_turbo(xc_interface *xch, int cpuid);
1939 int xc_disable_turbo(xc_interface *xch, int cpuid);
1940 /**
1941  * tmem operations
1942  */
1943 
1944 int xc_tmem_control_oid(xc_interface *xch, int32_t pool_id, uint32_t subop,
1945                         uint32_t cli_id, uint32_t len, uint32_t arg,
1946                         struct xen_tmem_oid oid, void *buf);
1947 int xc_tmem_control(xc_interface *xch,
1948                     int32_t pool_id, uint32_t subop, uint32_t cli_id,
1949                     uint32_t len, uint32_t arg, void *buf);
1950 int xc_tmem_auth(xc_interface *xch, int cli_id, char *uuid_str, int enable);
1951 int xc_tmem_save(xc_interface *xch, uint32_t domid, int live, int fd, int field_marker);
1952 int xc_tmem_save_extra(xc_interface *xch, uint32_t domid, int fd, int field_marker);
1953 void xc_tmem_save_done(xc_interface *xch, uint32_t domid);
1954 int xc_tmem_restore(xc_interface *xch, uint32_t domid, int fd);
1955 int xc_tmem_restore_extra(xc_interface *xch, uint32_t domid, int fd);
1956 
1957 /**
1958  * altp2m operations
1959  */
1960 
1961 int xc_altp2m_get_domain_state(xc_interface *handle, uint32_t dom, bool *state);
1962 int xc_altp2m_set_domain_state(xc_interface *handle, uint32_t dom, bool state);
1963 int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, uint32_t domid,
1964                                      uint32_t vcpuid, xen_pfn_t gfn);
1965 int xc_altp2m_create_view(xc_interface *handle, uint32_t domid,
1966                           xenmem_access_t default_access, uint16_t *view_id);
1967 int xc_altp2m_destroy_view(xc_interface *handle, uint32_t domid,
1968                            uint16_t view_id);
1969 /* Switch all vCPUs of the domain to the specified altp2m view */
1970 int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
1971                              uint16_t view_id);
1972 int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
1973                              uint16_t view_id, xen_pfn_t gfn,
1974                              xenmem_access_t access);
1975 int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
1976                          uint16_t view_id, xen_pfn_t old_gfn,
1977                          xen_pfn_t new_gfn);
1978 
1979 /**
1980  * Mem paging operations.
1981  * Paging is supported only on the x86 architecture in 64 bit mode, with
1982  * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
1983  * support is considered experimental.
1984  */
1985 int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
1986 int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id);
1987 int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id);
1988 int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id,
1989                            uint64_t gfn);
1990 int xc_mem_paging_evict(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
1991 int xc_mem_paging_prep(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
1992 int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
1993                        uint64_t gfn, void *buffer);
1994 
1995 /**
1996  * Access tracking operations.
1997  * Supported only on Intel EPT 64 bit processors.
1998  */
1999 
2000 /*
2001  * Set a range of memory to a specific access.
2002  * Allowed types are XENMEM_access_default, XENMEM_access_n, any combination of
2003  * XENMEM_access_ + (rwx), and XENMEM_access_rx2rw
2004  */
2005 int xc_set_mem_access(xc_interface *xch, uint32_t domain_id,
2006                       xenmem_access_t access, uint64_t first_pfn,
2007                       uint32_t nr);
2008 
2009 /*
2010  * Set an array of pages to their respective access in the access array.
2011  * The nr parameter specifies the size of the pages and access arrays.
2012  * The same allowed access types as for xc_set_mem_access() apply.
2013  */
2014 int xc_set_mem_access_multi(xc_interface *xch, uint32_t domain_id,
2015                             uint8_t *access, uint64_t *pages,
2016                             uint32_t nr);
2017 
2018 /*
2019  * Gets the mem access for the given page (returned in access on success)
2020  */
2021 int xc_get_mem_access(xc_interface *xch, uint32_t domain_id,
2022                       uint64_t pfn, xenmem_access_t *access);
2023 
2024 /***
2025  * Monitor control operations.
2026  *
2027  * Enables the VM event monitor ring and returns the mapped ring page.
2028  * This ring is used to deliver mem_access events, as well a set of additional
2029  * events that can be enabled with the xc_monitor_* functions.
2030  *
2031  * Will return NULL on error.
2032  * Caller has to unmap this page when done.
2033  */
2034 void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
2035 int xc_monitor_disable(xc_interface *xch, uint32_t domain_id);
2036 int xc_monitor_resume(xc_interface *xch, uint32_t domain_id);
2037 /*
2038  * Get a bitmap of supported monitor events in the form
2039  * (1 << XEN_DOMCTL_MONITOR_EVENT_*).
2040  */
2041 int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
2042                                 uint32_t *capabilities);
2043 int xc_monitor_write_ctrlreg(xc_interface *xch, uint32_t domain_id,
2044                              uint16_t index, bool enable, bool sync,
2045                              uint64_t bitmask, bool onchangeonly);
2046 /*
2047  * A list of MSR indices can usually be found in /usr/include/asm/msr-index.h.
2048  * Please consult the Intel/AMD manuals for more information on
2049  * non-architectural indices.
2050  */
2051 int xc_monitor_mov_to_msr(xc_interface *xch, uint32_t domain_id, uint32_t msr,
2052                           bool enable);
2053 int xc_monitor_singlestep(xc_interface *xch, uint32_t domain_id, bool enable);
2054 int xc_monitor_software_breakpoint(xc_interface *xch, uint32_t domain_id,
2055                                    bool enable);
2056 int xc_monitor_descriptor_access(xc_interface *xch, uint32_t domain_id,
2057                                  bool enable);
2058 int xc_monitor_guest_request(xc_interface *xch, uint32_t domain_id,
2059                              bool enable, bool sync, bool allow_userspace);
2060 int xc_monitor_debug_exceptions(xc_interface *xch, uint32_t domain_id,
2061                                 bool enable, bool sync);
2062 int xc_monitor_cpuid(xc_interface *xch, uint32_t domain_id, bool enable);
2063 int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
2064                                bool enable);
2065 int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
2066                                   bool enable);
2067 /**
2068  * This function enables / disables emulation for each REP for a
2069  * REP-compatible instruction.
2070  *
2071  * @parm xch a handle to an open hypervisor interface.
2072  * @parm domain_id the domain id one wants to get the node affinity of.
2073  * @parm enable if 0 optimize when possible, else emulate each REP.
2074  * @return 0 on success, -1 on failure.
2075  */
2076 int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id,
2077                                 bool enable);
2078 
2079 /***
2080  * Memory sharing operations.
2081  *
2082  * Unles otherwise noted, these calls return 0 on succes, -1 and errno on
2083  * failure.
2084  *
2085  * Sharing is supported only on the x86 architecture in 64 bit mode, with
2086  * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
2087  * support is considered experimental.
2088 
2089  * Calls below return ENOSYS if not in the x86_64 architecture.
2090  * Calls below return ENODEV if the domain does not support HAP.
2091  * Calls below return ESRCH if the specified domain does not exist.
2092  * Calls below return EPERM if the caller is unprivileged for this domain.
2093  */
2094 
2095 /* Turn on/off sharing for the domid, depending on the enable flag.
2096  *
2097  * Returns EXDEV if trying to enable and the domain has had a PCI device
2098  * assigned for passthrough (these two features are mutually exclusive).
2099  *
2100  * When sharing for a domain is turned off, the domain may still reference
2101  * shared pages. Unsharing happens lazily. */
2102 int xc_memshr_control(xc_interface *xch,
2103                       uint32_t domid,
2104                       int enable);
2105 
2106 /* Create a communication ring in which the hypervisor will place ENOMEM
2107  * notifications.
2108  *
2109  * ENOMEM happens when unsharing pages: a Copy-on-Write duplicate needs to be
2110  * allocated, and thus the out-of-memory error occurr.
2111  *
2112  * For complete examples on how to plumb a notification ring, look into
2113  * xenpaging or xen-access.
2114  *
2115  * On receipt of a notification, the helper should ensure there is memory
2116  * available to the domain before retrying.
2117  *
2118  * If a domain encounters an ENOMEM condition when sharing and this ring
2119  * has not been set up, the hypervisor will crash the domain.
2120  *
2121  * Fails with:
2122  *  EINVAL if port is NULL
2123  *  EINVAL if the sharing ring has already been enabled
2124  *  ENOSYS if no guest gfn has been specified to host the ring via an hvm param
2125  *  EINVAL if the gfn for the ring has not been populated
2126  *  ENOENT if the gfn for the ring is paged out, or cannot be unshared
2127  *  EINVAL if the gfn for the ring cannot be written to
2128  *  EINVAL if the domain is dying
2129  *  ENOSPC if an event channel cannot be allocated for the ring
2130  *  ENOMEM if memory cannot be allocated for internal data structures
2131  *  EINVAL or EACCESS if the request is denied by the security policy
2132  */
2133 
2134 int xc_memshr_ring_enable(xc_interface *xch,
2135                           uint32_t domid,
2136                           uint32_t *port);
2137 /* Disable the ring for ENOMEM communication.
2138  * May fail with EINVAL if the ring was not enabled in the first place.
2139  */
2140 int xc_memshr_ring_disable(xc_interface *xch,
2141                            uint32_t domid);
2142 
2143 /*
2144  * Calls below return EINVAL if sharing has not been enabled for the domain
2145  * Calls below return EINVAL if the domain is dying
2146  */
2147 /* Once a reponse to an ENOMEM notification is prepared, the tool can
2148  * notify the hypervisor to re-schedule the faulting vcpu of the domain with an
2149  * event channel kick and/or this call. */
2150 int xc_memshr_domain_resume(xc_interface *xch,
2151                             uint32_t domid);
2152 
2153 /* Select a page for sharing.
2154  *
2155  * A 64 bit opaque handle will be stored in handle.  The hypervisor ensures
2156  * that if the page is modified, the handle will be invalidated, and future
2157  * users of it will fail. If the page has already been selected and is still
2158  * associated to a valid handle, the existing handle will be returned.
2159  *
2160  * May fail with:
2161  *  EINVAL if the gfn is not populated or not sharable (mmio, etc)
2162  *  ENOMEM if internal data structures cannot be allocated
2163  *  E2BIG if the page is being referenced by other subsytems (e.g. qemu)
2164  *  ENOENT or EEXIST if there are internal hypervisor errors.
2165  */
2166 int xc_memshr_nominate_gfn(xc_interface *xch,
2167                            uint32_t domid,
2168                            unsigned long gfn,
2169                            uint64_t *handle);
2170 /* Same as above, but instead of a guest frame number, the input is a grant
2171  * reference provided by the guest.
2172  *
2173  * May fail with EINVAL if the grant reference is invalid.
2174  */
2175 int xc_memshr_nominate_gref(xc_interface *xch,
2176                             uint32_t domid,
2177                             grant_ref_t gref,
2178                             uint64_t *handle);
2179 
2180 /* The three calls below may fail with
2181  * 10 (or -XENMEM_SHARING_OP_S_HANDLE_INVALID) if the handle passed as source
2182  * is invalid.
2183  * 9 (or -XENMEM_SHARING_OP_C_HANDLE_INVALID) if the handle passed as client is
2184  * invalid.
2185  */
2186 /* Share two nominated guest pages.
2187  *
2188  * If the call succeeds, both pages will point to the same backing frame (or
2189  * mfn). The hypervisor will verify the handles are still valid, but it will
2190  * not perform any sanity checking on the contens of the pages (the selection
2191  * mechanism for sharing candidates is entirely up to the user-space tool).
2192  *
2193  * After successful sharing, the client handle becomes invalid. Both <domain,
2194  * gfn> tuples point to the same mfn with the same handle, the one specified as
2195  * source. Either 3-tuple can be specified later for further re-sharing.
2196  */
2197 int xc_memshr_share_gfns(xc_interface *xch,
2198                     uint32_t source_domain,
2199                     unsigned long source_gfn,
2200                     uint64_t source_handle,
2201                     uint32_t client_domain,
2202                     unsigned long client_gfn,
2203                     uint64_t client_handle);
2204 
2205 /* Same as above, but share two grant references instead.
2206  *
2207  * May fail with EINVAL if either grant reference is invalid.
2208  */
2209 int xc_memshr_share_grefs(xc_interface *xch,
2210                     uint32_t source_domain,
2211                     grant_ref_t source_gref,
2212                     uint64_t source_handle,
2213                     uint32_t client_domain,
2214                     grant_ref_t client_gref,
2215                     uint64_t client_handle);
2216 
2217 /* Allows to add to the guest physmap of the client domain a shared frame
2218  * directly.
2219  *
2220  * May additionally fail with
2221  *  9 (-XENMEM_SHARING_OP_C_HANDLE_INVALID) if the physmap entry for the gfn is
2222  *  not suitable.
2223  *  ENOMEM if internal data structures cannot be allocated.
2224  *  ENOENT if there is an internal hypervisor error.
2225  */
2226 int xc_memshr_add_to_physmap(xc_interface *xch,
2227                     uint32_t source_domain,
2228                     unsigned long source_gfn,
2229                     uint64_t source_handle,
2230                     uint32_t client_domain,
2231                     unsigned long client_gfn);
2232 
2233 /* Allows to deduplicate a range of memory of a client domain. Using
2234  * this function is equivalent of calling xc_memshr_nominate_gfn for each gfn
2235  * in the two domains followed by xc_memshr_share_gfns.
2236  *
2237  * May fail with -EINVAL if the source and client domain have different
2238  * memory size or if memory sharing is not enabled on either of the domains.
2239  * May also fail with -ENOMEM if there isn't enough memory available to store
2240  * the sharing metadata before deduplication can happen.
2241  */
2242 int xc_memshr_range_share(xc_interface *xch,
2243                           uint32_t source_domain,
2244                           uint32_t client_domain,
2245                           uint64_t first_gfn,
2246                           uint64_t last_gfn);
2247 
2248 /* Debug calls: return the number of pages referencing the shared frame backing
2249  * the input argument. Should be one or greater.
2250  *
2251  * May fail with EINVAL if there is no backing shared frame for the input
2252  * argument.
2253  */
2254 int xc_memshr_debug_gfn(xc_interface *xch,
2255                         uint32_t domid,
2256                         unsigned long gfn);
2257 /* May additionally fail with EINVAL if the grant reference is invalid. */
2258 int xc_memshr_debug_gref(xc_interface *xch,
2259                          uint32_t domid,
2260                          grant_ref_t gref);
2261 
2262 /* Audits the share subsystem.
2263  *
2264  * Returns ENOSYS if not supported (may not be compiled into the hypervisor).
2265  *
2266  * Returns the number of errors found during auditing otherwise. May be (should
2267  * be!) zero.
2268  *
2269  * If debugtrace support has been compiled into the hypervisor and is enabled,
2270  * verbose descriptions for the errors are available in the hypervisor console.
2271  */
2272 int xc_memshr_audit(xc_interface *xch);
2273 
2274 /* Stats reporting.
2275  *
2276  * At any point in time, the following equality should hold for a host:
2277  *
2278  *  Let dominfo(d) be the xc_dominfo_t struct filled by a call to
2279  *  xc_domain_getinfo(d)
2280  *
2281  *  The summation of dominfo(d)->shr_pages for all domains in the system
2282  *      should be equal to
2283  *  xc_sharing_freed_pages + xc_sharing_used_frames
2284  */
2285 /*
2286  * This function returns the total number of pages freed by using sharing
2287  * on the system.  For example, if two domains contain a single entry in
2288  * their p2m table that points to the same shared page (and no other pages
2289  * in the system are shared), then this function should return 1.
2290  */
2291 long xc_sharing_freed_pages(xc_interface *xch);
2292 
2293 /*
2294  * This function returns the total number of frames occupied by shared
2295  * pages on the system.  This is independent of the number of domains
2296  * pointing at these frames.  For example, in the above scenario this
2297  * should return 1. (And dominfo(d) for each of the two domains should return 1
2298  * as well).
2299  *
2300  * Note that some of these sharing_used_frames may be referenced by
2301  * a single domain page, and thus not realize any savings. The same
2302  * applies to some of the pages counted in dominfo(d)->shr_pages.
2303  */
2304 long xc_sharing_used_frames(xc_interface *xch);
2305 /*** End sharing interface ***/
2306 
2307 int xc_flask_load(xc_interface *xc_handle, char *buf, uint32_t size);
2308 int xc_flask_context_to_sid(xc_interface *xc_handle, char *buf, uint32_t size, uint32_t *sid);
2309 int xc_flask_sid_to_context(xc_interface *xc_handle, int sid, char *buf, uint32_t size);
2310 int xc_flask_getenforce(xc_interface *xc_handle);
2311 int xc_flask_setenforce(xc_interface *xc_handle, int mode);
2312 int xc_flask_getbool_byid(xc_interface *xc_handle, int id, char *name, uint32_t size, int *curr, int *pend);
2313 int xc_flask_getbool_byname(xc_interface *xc_handle, char *name, int *curr, int *pend);
2314 int xc_flask_setbool(xc_interface *xc_handle, char *name, int value, int commit);
2315 int xc_flask_add_pirq(xc_interface *xc_handle, unsigned int pirq, char *scontext);
2316 int xc_flask_add_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high,
2317                       char *scontext);
2318 int xc_flask_add_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high,
2319                      char *scontext);
2320 int xc_flask_add_device(xc_interface *xc_handle, unsigned long device, char *scontext);
2321 int xc_flask_del_pirq(xc_interface *xc_handle, unsigned int pirq);
2322 int xc_flask_del_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high);
2323 int xc_flask_del_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high);
2324 int xc_flask_del_device(xc_interface *xc_handle, unsigned long device);
2325 int xc_flask_access(xc_interface *xc_handle, const char *scon, const char *tcon,
2326                   uint16_t tclass, uint32_t req,
2327                   uint32_t *allowed, uint32_t *decided,
2328                   uint32_t *auditallow, uint32_t *auditdeny,
2329                   uint32_t *seqno);
2330 int xc_flask_avc_cachestats(xc_interface *xc_handle, char *buf, int size);
2331 int xc_flask_policyvers(xc_interface *xc_handle);
2332 int xc_flask_avc_hashstats(xc_interface *xc_handle, char *buf, int size);
2333 int xc_flask_getavc_threshold(xc_interface *xc_handle);
2334 int xc_flask_setavc_threshold(xc_interface *xc_handle, int threshold);
2335 int xc_flask_relabel_domain(xc_interface *xch, uint32_t domid, uint32_t sid);
2336 
2337 struct elf_binary;
2338 void xc_elf_set_logfile(xc_interface *xch, struct elf_binary *elf,
2339                         int verbose);
2340 /* Useful for callers who also use libelf. */
2341 
2342 /**
2343  * Checkpoint Compression
2344  */
2345 typedef struct compression_ctx comp_ctx;
2346 comp_ctx *xc_compression_create_context(xc_interface *xch,
2347 					unsigned long p2m_size);
2348 void xc_compression_free_context(xc_interface *xch, comp_ctx *ctx);
2349 
2350 /**
2351  * Add a page to compression page buffer, to be compressed later.
2352  *
2353  * returns 0 if the page was successfully added to the page buffer
2354  *
2355  * returns -1 if there is no space in buffer. In this case, the
2356  *  application should call xc_compression_compress_pages to compress
2357  *  the buffer (or atleast part of it), thereby freeing some space in
2358  *  the page buffer.
2359  *
2360  * returns -2 if the pfn is out of bounds, where the bound is p2m_size
2361  *  parameter passed during xc_compression_create_context.
2362  */
2363 int xc_compression_add_page(xc_interface *xch, comp_ctx *ctx, char *page,
2364 			    unsigned long pfn, int israw);
2365 
2366 /**
2367  * Delta compress pages in the compression buffer and inserts the
2368  * compressed data into the supplied compression buffer compbuf, whose
2369  * size is compbuf_size.
2370  * After compression, the pages are copied to the internal LRU cache.
2371  *
2372  * This function compresses as many pages as possible into the
2373  * supplied compression buffer. It maintains an internal iterator to
2374  * keep track of pages in the input buffer that are yet to be compressed.
2375  *
2376  * returns -1 if the compression buffer has run out of space.
2377  * returns 1 on success.
2378  * returns 0 if no more pages are left to be compressed.
2379  *  When the return value is non-zero, compbuf_len indicates the actual
2380  *  amount of data present in compbuf (<=compbuf_size).
2381  */
2382 int xc_compression_compress_pages(xc_interface *xch, comp_ctx *ctx,
2383 				  char *compbuf, unsigned long compbuf_size,
2384 				  unsigned long *compbuf_len);
2385 
2386 /**
2387  * Resets the internal page buffer that holds dirty pages before compression.
2388  * Also resets the iterators.
2389  */
2390 void xc_compression_reset_pagebuf(xc_interface *xch, comp_ctx *ctx);
2391 
2392 /**
2393  * Caller must supply the compression buffer (compbuf),
2394  * its size (compbuf_size) and a reference to index variable (compbuf_pos)
2395  * that is used internally. Each call pulls out one page from the compressed
2396  * chunk and copies it to dest.
2397  */
2398 int xc_compression_uncompress_page(xc_interface *xch, char *compbuf,
2399 				   unsigned long compbuf_size,
2400 				   unsigned long *compbuf_pos, char *dest);
2401 
2402 /*
2403  * Execute an image previously loaded with xc_kexec_load().
2404  *
2405  * Does not return on success.
2406  *
2407  * Fails with:
2408  *   ENOENT if the specified image has not been loaded.
2409  */
2410 int xc_kexec_exec(xc_interface *xch, int type);
2411 
2412 /*
2413  * Find the machine address and size of certain memory areas.
2414  *
2415  *   KEXEC_RANGE_MA_CRASH       crash area
2416  *   KEXEC_RANGE_MA_XEN         Xen itself
2417  *   KEXEC_RANGE_MA_CPU         CPU note for CPU number 'nr'
2418  *   KEXEC_RANGE_MA_XENHEAP     xenheap
2419  *   KEXEC_RANGE_MA_EFI_MEMMAP  EFI Memory Map
2420  *   KEXEC_RANGE_MA_VMCOREINFO  vmcoreinfo
2421  *
2422  * Fails with:
2423  *   EINVAL if the range or CPU number isn't valid.
2424  */
2425 int xc_kexec_get_range(xc_interface *xch, int range,  int nr,
2426                        uint64_t *size, uint64_t *start);
2427 
2428 /*
2429  * Load a kexec image into memory.
2430  *
2431  * The image may be of type KEXEC_TYPE_DEFAULT (executed on request)
2432  * or KEXEC_TYPE_CRASH (executed on a crash).
2433  *
2434  * The image architecture may be a 32-bit variant of the hypervisor
2435  * architecture (e.g, EM_386 on a x86-64 hypervisor).
2436  *
2437  * Fails with:
2438  *   ENOMEM if there is insufficient memory for the new image.
2439  *   EINVAL if the image does not fit into the crash area or the entry
2440  *          point isn't within one of segments.
2441  *   EBUSY  if another image is being executed.
2442  */
2443 int xc_kexec_load(xc_interface *xch, uint8_t type, uint16_t arch,
2444                   uint64_t entry_maddr,
2445                   uint32_t nr_segments, xen_kexec_segment_t *segments);
2446 
2447 /*
2448  * Unload a kexec image.
2449  *
2450  * This prevents a KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH image from
2451  * being executed.  The crash images are not cleared from the crash
2452  * region.
2453  */
2454 int xc_kexec_unload(xc_interface *xch, int type);
2455 
2456 /*
2457  * Find out whether the image has been succesfully loaded.
2458  *
2459  * The type can be either KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH.
2460  * If zero is returned, that means no image is loaded for the type.
2461  * If one is returned, that means an image is loaded for the type.
2462  * Otherwise, negative return value indicates error.
2463  */
2464 int xc_kexec_status(xc_interface *xch, int type);
2465 
2466 typedef xenpf_resource_entry_t xc_resource_entry_t;
2467 
2468 /*
2469  * Generic resource operation which contains multiple non-preemptible
2470  * resource access entries that passed to xc_resource_op().
2471  */
2472 struct xc_resource_op {
2473     uint64_t result;        /* on return, check this field first */
2474     uint32_t cpu;           /* which cpu to run */
2475     uint32_t nr_entries;    /* number of resource entries */
2476     xc_resource_entry_t *entries;
2477 };
2478 
2479 typedef struct xc_resource_op xc_resource_op_t;
2480 int xc_resource_op(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops);
2481 
2482 #if defined(__i386__) || defined(__x86_64__)
2483 enum xc_psr_cmt_type {
2484     XC_PSR_CMT_L3_OCCUPANCY,
2485     XC_PSR_CMT_TOTAL_MEM_COUNT,
2486     XC_PSR_CMT_LOCAL_MEM_COUNT,
2487 };
2488 typedef enum xc_psr_cmt_type xc_psr_cmt_type;
2489 
2490 enum xc_psr_cat_type {
2491     XC_PSR_CAT_L3_CBM      = 1,
2492     XC_PSR_CAT_L3_CBM_CODE = 2,
2493     XC_PSR_CAT_L3_CBM_DATA = 3,
2494     XC_PSR_CAT_L2_CBM      = 4,
2495 };
2496 typedef enum xc_psr_cat_type xc_psr_cat_type;
2497 
2498 int xc_psr_cmt_attach(xc_interface *xch, uint32_t domid);
2499 int xc_psr_cmt_detach(xc_interface *xch, uint32_t domid);
2500 int xc_psr_cmt_get_domain_rmid(xc_interface *xch, uint32_t domid,
2501                                uint32_t *rmid);
2502 int xc_psr_cmt_get_total_rmid(xc_interface *xch, uint32_t *total_rmid);
2503 int xc_psr_cmt_get_l3_upscaling_factor(xc_interface *xch,
2504                                        uint32_t *upscaling_factor);
2505 int xc_psr_cmt_get_l3_event_mask(xc_interface *xch, uint32_t *event_mask);
2506 int xc_psr_cmt_get_l3_cache_size(xc_interface *xch, uint32_t cpu,
2507                                  uint32_t *l3_cache_size);
2508 int xc_psr_cmt_get_data(xc_interface *xch, uint32_t rmid, uint32_t cpu,
2509                         uint32_t psr_cmt_type, uint64_t *monitor_data,
2510                         uint64_t *tsc);
2511 int xc_psr_cmt_enabled(xc_interface *xch);
2512 
2513 int xc_psr_cat_set_domain_data(xc_interface *xch, uint32_t domid,
2514                                xc_psr_cat_type type, uint32_t target,
2515                                uint64_t data);
2516 int xc_psr_cat_get_domain_data(xc_interface *xch, uint32_t domid,
2517                                xc_psr_cat_type type, uint32_t target,
2518                                uint64_t *data);
2519 int xc_psr_cat_get_info(xc_interface *xch, uint32_t socket, unsigned int lvl,
2520                         uint32_t *cos_max, uint32_t *cbm_len,
2521                         bool *cdp_enabled);
2522 
2523 int xc_get_cpu_levelling_caps(xc_interface *xch, uint32_t *caps);
2524 int xc_get_cpu_featureset(xc_interface *xch, uint32_t index,
2525                           uint32_t *nr_features, uint32_t *featureset);
2526 
2527 uint32_t xc_get_cpu_featureset_size(void);
2528 
2529 enum xc_static_cpu_featuremask {
2530     XC_FEATUREMASK_KNOWN,
2531     XC_FEATUREMASK_SPECIAL,
2532     XC_FEATUREMASK_PV,
2533     XC_FEATUREMASK_HVM_SHADOW,
2534     XC_FEATUREMASK_HVM_HAP,
2535     XC_FEATUREMASK_DEEP_FEATURES,
2536 };
2537 const uint32_t *xc_get_static_cpu_featuremask(enum xc_static_cpu_featuremask);
2538 const uint32_t *xc_get_feature_deep_deps(uint32_t feature);
2539 
2540 #endif
2541 
2542 int xc_livepatch_upload(xc_interface *xch,
2543                         char *name, unsigned char *payload, uint32_t size);
2544 
2545 int xc_livepatch_get(xc_interface *xch,
2546                      char *name,
2547                      xen_livepatch_status_t *status);
2548 
2549 /*
2550  * The heart of this function is to get an array of xen_livepatch_status_t.
2551  *
2552  * However it is complex because it has to deal with the hypervisor
2553  * returning some of the requested data or data being stale
2554  * (another hypercall might alter the list).
2555  *
2556  * The parameters that the function expects to contain data from
2557  * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
2558  * 'left' are also updated with the number of entries filled out
2559  * and respectively the number of entries left to get from hypervisor.
2560  *
2561  * It is expected that the caller of this function will take the
2562  * 'left' and use the value for 'start'. This way we have an
2563  * cursor in the array. Note that the 'info','name', and 'len' will
2564  * be updated at the subsequent calls.
2565  *
2566  * The 'max' is to be provided by the caller with the maximum
2567  * number of entries that 'info', 'name', and 'len' arrays can
2568  * be filled up with.
2569  *
2570  * Each entry in the 'name' array is expected to be of XEN_LIVEPATCH_NAME_SIZE
2571  * length.
2572  *
2573  * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
2574  * structure size.
2575  *
2576  * Each entry in the 'len' array is expected to be of uint32_t size.
2577  *
2578  * The return value is zero if the hypercall completed successfully.
2579  * Note that the return value is _not_ the amount of entries filled
2580  * out - that is saved in 'done'.
2581  *
2582  * If there was an error performing the operation, the return value
2583  * will contain an negative -EXX type value. The 'done' and 'left'
2584  * will contain the number of entries that had been succesfully
2585  * retrieved (if any).
2586  */
2587 int xc_livepatch_list(xc_interface *xch, unsigned int max, unsigned int start,
2588                       xen_livepatch_status_t *info, char *name,
2589                       uint32_t *len, unsigned int *done,
2590                       unsigned int *left);
2591 
2592 /*
2593  * The operations are asynchronous and the hypervisor may take a while
2594  * to complete them. The `timeout` offers an option to expire the
2595  * operation if it could not be completed within the specified time
2596  * (in ns). Value of 0 means let hypervisor decide the best timeout.
2597  */
2598 int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout);
2599 int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout);
2600 int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout);
2601 int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout);
2602 
2603 /*
2604  * Ensure cache coherency after memory modifications. A call to this function
2605  * is only required on ARM as the x86 architecture provides cache coherency
2606  * guarantees. Calling this function on x86 is allowed but has no effect.
2607  */
2608 int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
2609                          xen_pfn_t start_pfn, xen_pfn_t nr_pfns);
2610 
2611 /* Compat shims */
2612 #include "xenctrl_compat.h"
2613 
2614 #endif /* XENCTRL_H */
2615 
2616 /*
2617  * Local variables:
2618  * mode: C
2619  * c-file-style: "BSD"
2620  * c-basic-offset: 4
2621  * tab-width: 4
2622  * indent-tabs-mode: nil
2623  * End:
2624  */
2625