1 /******************************************************************************
2  * sysctl.h
3  *
4  * System management operations. For use by node control stack.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Copyright (c) 2002-2006, K Fraser
25  */
26 
27 #ifndef __XEN_PUBLIC_SYSCTL_H__
28 #define __XEN_PUBLIC_SYSCTL_H__
29 
30 #if !defined(__XEN__) && !defined(__XEN_TOOLS__)
31 #error "sysctl operations are intended for use by node control tools only"
32 #endif
33 
34 #include "xen.h"
35 #include "domctl.h"
36 #include "physdev.h"
37 #include "tmem.h"
38 
39 #define XEN_SYSCTL_INTERFACE_VERSION 0x00000010
40 
41 /*
42  * Read console content from Xen buffer ring.
43  */
44 /* XEN_SYSCTL_readconsole */
45 struct xen_sysctl_readconsole {
46     /* IN: Non-zero -> clear after reading. */
47     uint8_t clear;
48     /* IN: Non-zero -> start index specified by @index field. */
49     uint8_t incremental;
50     uint8_t pad0, pad1;
51     /*
52      * IN:  Start index for consuming from ring buffer (if @incremental);
53      * OUT: End index after consuming from ring buffer.
54      */
55     uint32_t index;
56     /* IN: Virtual address to write console data. */
57     XEN_GUEST_HANDLE_64(char) buffer;
58     /* IN: Size of buffer; OUT: Bytes written to buffer. */
59     uint32_t count;
60 };
61 
62 /* Get trace buffers machine base address */
63 /* XEN_SYSCTL_tbuf_op */
64 struct xen_sysctl_tbuf_op {
65     /* IN variables */
66 #define XEN_SYSCTL_TBUFOP_get_info     0
67 #define XEN_SYSCTL_TBUFOP_set_cpu_mask 1
68 #define XEN_SYSCTL_TBUFOP_set_evt_mask 2
69 #define XEN_SYSCTL_TBUFOP_set_size     3
70 #define XEN_SYSCTL_TBUFOP_enable       4
71 #define XEN_SYSCTL_TBUFOP_disable      5
72     uint32_t cmd;
73     /* IN/OUT variables */
74     struct xenctl_bitmap cpu_mask;
75     uint32_t             evt_mask;
76     /* OUT variables */
77     uint64_aligned_t buffer_mfn;
78     uint32_t size;  /* Also an IN variable! */
79 };
80 
81 /*
82  * Get physical information about the host machine
83  */
84 /* XEN_SYSCTL_physinfo */
85  /* (x86) The platform supports HVM guests. */
86 #define _XEN_SYSCTL_PHYSCAP_hvm          0
87 #define XEN_SYSCTL_PHYSCAP_hvm           (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
88  /* (x86) The platform supports HVM-guest direct access to I/O devices. */
89 #define _XEN_SYSCTL_PHYSCAP_hvm_directio 1
90 #define XEN_SYSCTL_PHYSCAP_hvm_directio  (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio)
91 struct xen_sysctl_physinfo {
92     uint32_t threads_per_core;
93     uint32_t cores_per_socket;
94     uint32_t nr_cpus;     /* # CPUs currently online */
95     uint32_t max_cpu_id;  /* Largest possible CPU ID on this host */
96     uint32_t nr_nodes;    /* # nodes currently online */
97     uint32_t max_node_id; /* Largest possible node ID on this host */
98     uint32_t cpu_khz;
99     uint32_t capabilities;/* XEN_SYSCTL_PHYSCAP_??? */
100     uint64_aligned_t total_pages;
101     uint64_aligned_t free_pages;
102     uint64_aligned_t scrub_pages;
103     uint64_aligned_t outstanding_pages;
104     uint64_aligned_t max_mfn; /* Largest possible MFN on this host */
105     uint32_t hw_cap[8];
106 };
107 
108 /*
109  * Get the ID of the current scheduler.
110  */
111 /* XEN_SYSCTL_sched_id */
112 struct xen_sysctl_sched_id {
113     /* OUT variable */
114     uint32_t sched_id;
115 };
116 
117 /* Interface for controlling Xen software performance counters. */
118 /* XEN_SYSCTL_perfc_op */
119 /* Sub-operations: */
120 #define XEN_SYSCTL_PERFCOP_reset 1   /* Reset all counters to zero. */
121 #define XEN_SYSCTL_PERFCOP_query 2   /* Get perfctr information. */
122 struct xen_sysctl_perfc_desc {
123     char         name[80];             /* name of perf counter */
124     uint32_t     nr_vals;              /* number of values for this counter */
125 };
126 typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t;
127 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t);
128 typedef uint32_t xen_sysctl_perfc_val_t;
129 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t);
130 
131 struct xen_sysctl_perfc_op {
132     /* IN variables. */
133     uint32_t       cmd;                /*  XEN_SYSCTL_PERFCOP_??? */
134     /* OUT variables. */
135     uint32_t       nr_counters;       /*  number of counters description  */
136     uint32_t       nr_vals;           /*  number of values  */
137     /* counter information (or NULL) */
138     XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc;
139     /* counter values (or NULL) */
140     XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
141 };
142 
143 /* XEN_SYSCTL_getdomaininfolist */
144 struct xen_sysctl_getdomaininfolist {
145     /* IN variables. */
146     domid_t               first_domain;
147     uint32_t              max_domains;
148     XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer;
149     /* OUT variables. */
150     uint32_t              num_domains;
151 };
152 
153 /* Inject debug keys into Xen. */
154 /* XEN_SYSCTL_debug_keys */
155 struct xen_sysctl_debug_keys {
156     /* IN variables. */
157     XEN_GUEST_HANDLE_64(char) keys;
158     uint32_t nr_keys;
159 };
160 
161 /* Get physical CPU information. */
162 /* XEN_SYSCTL_getcpuinfo */
163 struct xen_sysctl_cpuinfo {
164     uint64_aligned_t idletime;
165 };
166 typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t;
167 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t);
168 struct xen_sysctl_getcpuinfo {
169     /* IN variables. */
170     uint32_t max_cpus;
171     XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info;
172     /* OUT variables. */
173     uint32_t nr_cpus;
174 };
175 
176 /* XEN_SYSCTL_availheap */
177 struct xen_sysctl_availheap {
178     /* IN variables. */
179     uint32_t min_bitwidth;  /* Smallest address width (zero if don't care). */
180     uint32_t max_bitwidth;  /* Largest address width (zero if don't care). */
181     int32_t  node;          /* NUMA node of interest (-1 for all nodes). */
182     /* OUT variables. */
183     uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
184 };
185 
186 /* XEN_SYSCTL_get_pmstat */
187 struct pm_px_val {
188     uint64_aligned_t freq;        /* Px core frequency */
189     uint64_aligned_t residency;   /* Px residency time */
190     uint64_aligned_t count;       /* Px transition count */
191 };
192 typedef struct pm_px_val pm_px_val_t;
193 DEFINE_XEN_GUEST_HANDLE(pm_px_val_t);
194 
195 struct pm_px_stat {
196     uint8_t total;        /* total Px states */
197     uint8_t usable;       /* usable Px states */
198     uint8_t last;         /* last Px state */
199     uint8_t cur;          /* current Px state */
200     XEN_GUEST_HANDLE_64(uint64) trans_pt;   /* Px transition table */
201     XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
202 };
203 
204 struct pm_cx_stat {
205     uint32_t nr;    /* entry nr in triggers & residencies, including C0 */
206     uint32_t last;  /* last Cx state */
207     uint64_aligned_t idle_time;                 /* idle time from boot */
208     XEN_GUEST_HANDLE_64(uint64) triggers;    /* Cx trigger counts */
209     XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */
210     uint32_t nr_pc;                          /* entry nr in pc[] */
211     uint32_t nr_cc;                          /* entry nr in cc[] */
212     /*
213      * These two arrays may (and generally will) have unused slots; slots not
214      * having a corresponding hardware register will not be written by the
215      * hypervisor. It is therefore up to the caller to put a suitable sentinel
216      * into all slots before invoking the function.
217      * Indexing is 1-biased (PC1/CC1 being at index 0).
218      */
219     XEN_GUEST_HANDLE_64(uint64) pc;
220     XEN_GUEST_HANDLE_64(uint64) cc;
221 };
222 
223 struct xen_sysctl_get_pmstat {
224 #define PMSTAT_CATEGORY_MASK 0xf0
225 #define PMSTAT_PX            0x10
226 #define PMSTAT_CX            0x20
227 #define PMSTAT_get_max_px    (PMSTAT_PX | 0x1)
228 #define PMSTAT_get_pxstat    (PMSTAT_PX | 0x2)
229 #define PMSTAT_reset_pxstat  (PMSTAT_PX | 0x3)
230 #define PMSTAT_get_max_cx    (PMSTAT_CX | 0x1)
231 #define PMSTAT_get_cxstat    (PMSTAT_CX | 0x2)
232 #define PMSTAT_reset_cxstat  (PMSTAT_CX | 0x3)
233     uint32_t type;
234     uint32_t cpuid;
235     union {
236         struct pm_px_stat getpx;
237         struct pm_cx_stat getcx;
238         /* other struct for tx, etc */
239     } u;
240 };
241 
242 /* XEN_SYSCTL_cpu_hotplug */
243 struct xen_sysctl_cpu_hotplug {
244     /* IN variables */
245     uint32_t cpu;   /* Physical cpu. */
246 #define XEN_SYSCTL_CPU_HOTPLUG_ONLINE  0
247 #define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1
248     uint32_t op;    /* hotplug opcode */
249 };
250 
251 /*
252  * Get/set xen power management, include
253  * 1. cpufreq governors and related parameters
254  */
255 /* XEN_SYSCTL_pm_op */
256 struct xen_userspace {
257     uint32_t scaling_setspeed;
258 };
259 
260 struct xen_ondemand {
261     uint32_t sampling_rate_max;
262     uint32_t sampling_rate_min;
263 
264     uint32_t sampling_rate;
265     uint32_t up_threshold;
266 };
267 
268 /*
269  * cpufreq para name of this structure named
270  * same as sysfs file name of native linux
271  */
272 #define CPUFREQ_NAME_LEN 16
273 struct xen_get_cpufreq_para {
274     /* IN/OUT variable */
275     uint32_t cpu_num;
276     uint32_t freq_num;
277     uint32_t gov_num;
278 
279     /* for all governors */
280     /* OUT variable */
281     XEN_GUEST_HANDLE_64(uint32) affected_cpus;
282     XEN_GUEST_HANDLE_64(uint32) scaling_available_frequencies;
283     XEN_GUEST_HANDLE_64(char)   scaling_available_governors;
284     char scaling_driver[CPUFREQ_NAME_LEN];
285 
286     uint32_t cpuinfo_cur_freq;
287     uint32_t cpuinfo_max_freq;
288     uint32_t cpuinfo_min_freq;
289     uint32_t scaling_cur_freq;
290 
291     char scaling_governor[CPUFREQ_NAME_LEN];
292     uint32_t scaling_max_freq;
293     uint32_t scaling_min_freq;
294 
295     /* for specific governor */
296     union {
297         struct  xen_userspace userspace;
298         struct  xen_ondemand ondemand;
299     } u;
300 
301     int32_t turbo_enabled;
302 };
303 
304 struct xen_set_cpufreq_gov {
305     char scaling_governor[CPUFREQ_NAME_LEN];
306 };
307 
308 struct xen_set_cpufreq_para {
309     #define SCALING_MAX_FREQ           1
310     #define SCALING_MIN_FREQ           2
311     #define SCALING_SETSPEED           3
312     #define SAMPLING_RATE              4
313     #define UP_THRESHOLD               5
314 
315     uint32_t ctrl_type;
316     uint32_t ctrl_value;
317 };
318 
319 struct xen_sysctl_pm_op {
320     #define PM_PARA_CATEGORY_MASK      0xf0
321     #define CPUFREQ_PARA               0x10
322 
323     /* cpufreq command type */
324     #define GET_CPUFREQ_PARA           (CPUFREQ_PARA | 0x01)
325     #define SET_CPUFREQ_GOV            (CPUFREQ_PARA | 0x02)
326     #define SET_CPUFREQ_PARA           (CPUFREQ_PARA | 0x03)
327     #define GET_CPUFREQ_AVGFREQ        (CPUFREQ_PARA | 0x04)
328 
329     /* set/reset scheduler power saving option */
330     #define XEN_SYSCTL_pm_op_set_sched_opt_smt    0x21
331 
332     /* cpuidle max_cstate access command */
333     #define XEN_SYSCTL_pm_op_get_max_cstate       0x22
334     #define XEN_SYSCTL_pm_op_set_max_cstate       0x23
335 
336     /* set scheduler migration cost value */
337     #define XEN_SYSCTL_pm_op_set_vcpu_migration_delay   0x24
338     #define XEN_SYSCTL_pm_op_get_vcpu_migration_delay   0x25
339 
340     /* enable/disable turbo mode when in dbs governor */
341     #define XEN_SYSCTL_pm_op_enable_turbo               0x26
342     #define XEN_SYSCTL_pm_op_disable_turbo              0x27
343 
344     uint32_t cmd;
345     uint32_t cpuid;
346     union {
347         struct xen_get_cpufreq_para get_para;
348         struct xen_set_cpufreq_gov  set_gov;
349         struct xen_set_cpufreq_para set_para;
350         uint64_aligned_t get_avgfreq;
351         uint32_t                    set_sched_opt_smt;
352         uint32_t                    get_max_cstate;
353         uint32_t                    set_max_cstate;
354         uint32_t                    get_vcpu_migration_delay;
355         uint32_t                    set_vcpu_migration_delay;
356     } u;
357 };
358 
359 /* XEN_SYSCTL_page_offline_op */
360 struct xen_sysctl_page_offline_op {
361     /* IN: range of page to be offlined */
362 #define sysctl_page_offline     1
363 #define sysctl_page_online      2
364 #define sysctl_query_page_offline  3
365     uint32_t cmd;
366     uint32_t start;
367     uint32_t end;
368     /* OUT: result of page offline request */
369     /*
370      * bit 0~15: result flags
371      * bit 16~31: owner
372      */
373     XEN_GUEST_HANDLE(uint32) status;
374 };
375 
376 #define PG_OFFLINE_STATUS_MASK    (0xFFUL)
377 
378 /* The result is invalid, i.e. HV does not handle it */
379 #define PG_OFFLINE_INVALID   (0x1UL << 0)
380 
381 #define PG_OFFLINE_OFFLINED  (0x1UL << 1)
382 #define PG_OFFLINE_PENDING   (0x1UL << 2)
383 #define PG_OFFLINE_FAILED    (0x1UL << 3)
384 #define PG_OFFLINE_AGAIN     (0x1UL << 4)
385 
386 #define PG_ONLINE_FAILED     PG_OFFLINE_FAILED
387 #define PG_ONLINE_ONLINED    PG_OFFLINE_OFFLINED
388 
389 #define PG_OFFLINE_STATUS_OFFLINED              (0x1UL << 1)
390 #define PG_OFFLINE_STATUS_ONLINE                (0x1UL << 2)
391 #define PG_OFFLINE_STATUS_OFFLINE_PENDING       (0x1UL << 3)
392 #define PG_OFFLINE_STATUS_BROKEN                (0x1UL << 4)
393 
394 #define PG_OFFLINE_MISC_MASK    (0xFFUL << 4)
395 
396 /* valid when PG_OFFLINE_FAILED or PG_OFFLINE_PENDING */
397 #define PG_OFFLINE_XENPAGE   (0x1UL << 8)
398 #define PG_OFFLINE_DOM0PAGE  (0x1UL << 9)
399 #define PG_OFFLINE_ANONYMOUS (0x1UL << 10)
400 #define PG_OFFLINE_NOT_CONV_RAM   (0x1UL << 11)
401 #define PG_OFFLINE_OWNED     (0x1UL << 12)
402 
403 #define PG_OFFLINE_BROKEN    (0x1UL << 13)
404 #define PG_ONLINE_BROKEN     PG_OFFLINE_BROKEN
405 
406 #define PG_OFFLINE_OWNER_SHIFT 16
407 
408 /* XEN_SYSCTL_lockprof_op */
409 /* Sub-operations: */
410 #define XEN_SYSCTL_LOCKPROF_reset 1   /* Reset all profile data to zero. */
411 #define XEN_SYSCTL_LOCKPROF_query 2   /* Get lock profile information. */
412 /* Record-type: */
413 #define LOCKPROF_TYPE_GLOBAL      0   /* global lock, idx meaningless */
414 #define LOCKPROF_TYPE_PERDOM      1   /* per-domain lock, idx is domid */
415 #define LOCKPROF_TYPE_N           2   /* number of types */
416 struct xen_sysctl_lockprof_data {
417     char     name[40];     /* lock name (may include up to 2 %d specifiers) */
418     int32_t  type;         /* LOCKPROF_TYPE_??? */
419     int32_t  idx;          /* index (e.g. domain id) */
420     uint64_aligned_t lock_cnt;     /* # of locking succeeded */
421     uint64_aligned_t block_cnt;    /* # of wait for lock */
422     uint64_aligned_t lock_time;    /* nsecs lock held */
423     uint64_aligned_t block_time;   /* nsecs waited for lock */
424 };
425 typedef struct xen_sysctl_lockprof_data xen_sysctl_lockprof_data_t;
426 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_data_t);
427 struct xen_sysctl_lockprof_op {
428     /* IN variables. */
429     uint32_t       cmd;               /* XEN_SYSCTL_LOCKPROF_??? */
430     uint32_t       max_elem;          /* size of output buffer */
431     /* OUT variables (query only). */
432     uint32_t       nr_elem;           /* number of elements available */
433     uint64_aligned_t time;            /* nsecs of profile measurement */
434     /* profile information (or NULL) */
435     XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data;
436 };
437 
438 /* XEN_SYSCTL_cputopoinfo */
439 #define XEN_INVALID_CORE_ID     (~0U)
440 #define XEN_INVALID_SOCKET_ID   (~0U)
441 #define XEN_INVALID_NODE_ID     (~0U)
442 
443 struct xen_sysctl_cputopo {
444     uint32_t core;
445     uint32_t socket;
446     uint32_t node;
447 };
448 typedef struct xen_sysctl_cputopo xen_sysctl_cputopo_t;
449 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopo_t);
450 
451 /*
452  * IN:
453  *  - a NULL 'cputopo' handle is a request for maximun 'num_cpus'.
454  *  - otherwise it's the number of entries in 'cputopo'
455  *
456  * OUT:
457  *  - If 'num_cpus' is less than the number Xen wants to write but the handle
458  *    handle is not a NULL one, partial data gets returned and 'num_cpus' gets
459  *    updated to reflect the intended number.
460  *  - Otherwise, 'num_cpus' shall indicate the number of entries written, which
461  *    may be less than the input value.
462  */
463 struct xen_sysctl_cputopoinfo {
464     uint32_t num_cpus;
465     XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
466 };
467 
468 /* XEN_SYSCTL_numainfo */
469 #define XEN_INVALID_MEM_SZ     (~0U)
470 #define XEN_INVALID_NODE_DIST  (~0U)
471 
472 struct xen_sysctl_meminfo {
473     uint64_t memsize;
474     uint64_t memfree;
475 };
476 typedef struct xen_sysctl_meminfo xen_sysctl_meminfo_t;
477 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_meminfo_t);
478 
479 /*
480  * IN:
481  *  - Both 'meminfo' and 'distance' handles being null is a request
482  *    for maximum value of 'num_nodes'.
483  *  - Otherwise it's the number of entries in 'meminfo' and square root
484  *    of number of entries in 'distance' (when corresponding handle is
485  *    non-null)
486  *
487  * OUT:
488  *  - If 'num_nodes' is less than the number Xen wants to write but either
489  *    handle is not a NULL one, partial data gets returned and 'num_nodes'
490  *    gets updated to reflect the intended number.
491  *  - Otherwise, 'num_nodes' shall indicate the number of entries written, which
492  *    may be less than the input value.
493  */
494 
495 struct xen_sysctl_numainfo {
496     uint32_t num_nodes;
497 
498     XEN_GUEST_HANDLE_64(xen_sysctl_meminfo_t) meminfo;
499 
500     /*
501      * Distance between nodes 'i' and 'j' is stored in index 'i*N + j',
502      * where N is the number of nodes that will be returned in 'num_nodes'
503      * (i.e. not 'num_nodes' provided by the caller)
504      */
505     XEN_GUEST_HANDLE_64(uint32) distance;
506 };
507 
508 /* XEN_SYSCTL_cpupool_op */
509 #define XEN_SYSCTL_CPUPOOL_OP_CREATE                1  /* C */
510 #define XEN_SYSCTL_CPUPOOL_OP_DESTROY               2  /* D */
511 #define XEN_SYSCTL_CPUPOOL_OP_INFO                  3  /* I */
512 #define XEN_SYSCTL_CPUPOOL_OP_ADDCPU                4  /* A */
513 #define XEN_SYSCTL_CPUPOOL_OP_RMCPU                 5  /* R */
514 #define XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN            6  /* M */
515 #define XEN_SYSCTL_CPUPOOL_OP_FREEINFO              7  /* F */
516 #define XEN_SYSCTL_CPUPOOL_PAR_ANY     0xFFFFFFFF
517 struct xen_sysctl_cpupool_op {
518     uint32_t op;          /* IN */
519     uint32_t cpupool_id;  /* IN: CDIARM OUT: CI */
520     uint32_t sched_id;    /* IN: C      OUT: I  */
521     uint32_t domid;       /* IN: M              */
522     uint32_t cpu;         /* IN: AR             */
523     uint32_t n_dom;       /*            OUT: I  */
524     struct xenctl_bitmap cpumap; /*     OUT: IF */
525 };
526 
527 /*
528  * Error return values of cpupool operations:
529  *
530  * -EADDRINUSE:
531  *  XEN_SYSCTL_CPUPOOL_OP_RMCPU: A vcpu is temporarily pinned to the cpu
532  *    which is to be removed from a cpupool.
533  * -EADDRNOTAVAIL:
534  *  XEN_SYSCTL_CPUPOOL_OP_ADDCPU, XEN_SYSCTL_CPUPOOL_OP_RMCPU: A previous
535  *    request to remove a cpu from a cpupool was terminated with -EAGAIN
536  *    and has not been retried using the same parameters.
537  * -EAGAIN:
538  *  XEN_SYSCTL_CPUPOOL_OP_RMCPU: The cpu can't be removed from the cpupool
539  *    as it is active in the hypervisor. A retry will succeed soon.
540  * -EBUSY:
541  *  XEN_SYSCTL_CPUPOOL_OP_DESTROY, XEN_SYSCTL_CPUPOOL_OP_RMCPU: A cpupool
542  *    can't be destroyed or the last cpu can't be removed as there is still
543  *    a running domain in that cpupool.
544  * -EEXIST:
545  *  XEN_SYSCTL_CPUPOOL_OP_CREATE: A cpupool_id was specified and is already
546  *    existing.
547  * -EINVAL:
548  *  XEN_SYSCTL_CPUPOOL_OP_ADDCPU, XEN_SYSCTL_CPUPOOL_OP_RMCPU: An illegal
549  *    cpu was specified (cpu does not exist).
550  *  XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN: An illegal domain was specified
551  *    (domain id illegal or not suitable for operation).
552  * -ENODEV:
553  *  XEN_SYSCTL_CPUPOOL_OP_ADDCPU, XEN_SYSCTL_CPUPOOL_OP_RMCPU: The specified
554  *    cpu is either not free (add) or not member of the specified cpupool
555  *    (remove).
556  * -ENOENT:
557  *  all: The cpupool with the specified cpupool_id doesn't exist.
558  *
559  * Some common error return values like -ENOMEM and -EFAULT are possible for
560  * all the operations.
561  */
562 
563 #define ARINC653_MAX_DOMAINS_PER_SCHEDULE   64
564 /*
565  * This structure is used to pass a new ARINC653 schedule from a
566  * privileged domain (ie dom0) to Xen.
567  */
568 struct xen_sysctl_arinc653_schedule {
569     /* major_frame holds the time for the new schedule's major frame
570      * in nanoseconds. */
571     uint64_aligned_t     major_frame;
572     /* num_sched_entries holds how many of the entries in the
573      * sched_entries[] array are valid. */
574     uint8_t     num_sched_entries;
575     /* The sched_entries array holds the actual schedule entries. */
576     struct {
577         /* dom_handle must match a domain's UUID */
578         xen_domain_handle_t dom_handle;
579         /* If a domain has multiple VCPUs, vcpu_id specifies which one
580          * this schedule entry applies to. It should be set to 0 if
581          * there is only one VCPU for the domain. */
582         unsigned int vcpu_id;
583         /* runtime specifies the amount of time that should be allocated
584          * to this VCPU per major frame. It is specified in nanoseconds */
585         uint64_aligned_t runtime;
586     } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
587 };
588 typedef struct xen_sysctl_arinc653_schedule xen_sysctl_arinc653_schedule_t;
589 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_arinc653_schedule_t);
590 
591 /*
592  * Valid range for context switch rate limit (in microseconds).
593  * Applicable to Credit and Credit2 schedulers.
594  */
595 #define XEN_SYSCTL_SCHED_RATELIMIT_MAX 500000
596 #define XEN_SYSCTL_SCHED_RATELIMIT_MIN 100
597 
598 struct xen_sysctl_credit_schedule {
599     /* Length of timeslice in milliseconds */
600 #define XEN_SYSCTL_CSCHED_TSLICE_MAX 1000
601 #define XEN_SYSCTL_CSCHED_TSLICE_MIN 1
602     unsigned tslice_ms;
603     unsigned ratelimit_us;
604 };
605 
606 struct xen_sysctl_credit2_schedule {
607     unsigned ratelimit_us;
608 };
609 
610 /* XEN_SYSCTL_scheduler_op */
611 /* Set or get info? */
612 #define XEN_SYSCTL_SCHEDOP_putinfo 0
613 #define XEN_SYSCTL_SCHEDOP_getinfo 1
614 struct xen_sysctl_scheduler_op {
615     uint32_t cpupool_id; /* Cpupool whose scheduler is to be targetted. */
616     uint32_t sched_id;   /* XEN_SCHEDULER_* (domctl.h) */
617     uint32_t cmd;        /* XEN_SYSCTL_SCHEDOP_* */
618     union {
619         struct xen_sysctl_sched_arinc653 {
620             XEN_GUEST_HANDLE_64(xen_sysctl_arinc653_schedule_t) schedule;
621         } sched_arinc653;
622         struct xen_sysctl_credit_schedule sched_credit;
623         struct xen_sysctl_credit2_schedule sched_credit2;
624     } u;
625 };
626 
627 /*
628  * Output format of gcov data:
629  *
630  * XEN_GCOV_FORMAT_MAGIC XEN_GCOV_RECORD ... XEN_GCOV_RECORD
631  *
632  * That is, one magic number followed by 0 or more record.
633  *
634  * The magic number is stored as an uint32_t field.
635  *
636  * The record is packed and variable in length. It has the form:
637  *
638  *  filename: a NULL terminated path name extracted from gcov, used to
639  *            create the name of gcda file.
640  *  size:     a uint32_t field indicating the size of the payload, the
641  *            unit is byte.
642  *  payload:  the actual payload, length is `size' bytes.
643  *
644  * Userspace tool will split the record to different files.
645  */
646 
647 #define XEN_GCOV_FORMAT_MAGIC    0x58434f56 /* XCOV */
648 
649 #define XEN_SYSCTL_GCOV_get_size 0 /* Get total size of output data */
650 #define XEN_SYSCTL_GCOV_read     1 /* Read output data */
651 #define XEN_SYSCTL_GCOV_reset    2 /* Reset all counters */
652 
653 struct xen_sysctl_gcov_op {
654     uint32_t cmd;
655     uint32_t size; /* IN/OUT: size of the buffer  */
656     XEN_GUEST_HANDLE_64(char) buffer; /* OUT */
657 };
658 
659 #define XEN_SYSCTL_PSR_CMT_get_total_rmid            0
660 #define XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor   1
661 /* The L3 cache size is returned in KB unit */
662 #define XEN_SYSCTL_PSR_CMT_get_l3_cache_size         2
663 #define XEN_SYSCTL_PSR_CMT_enabled                   3
664 #define XEN_SYSCTL_PSR_CMT_get_l3_event_mask         4
665 struct xen_sysctl_psr_cmt_op {
666     uint32_t cmd;       /* IN: XEN_SYSCTL_PSR_CMT_* */
667     uint32_t flags;     /* padding variable, may be extended for future use */
668     union {
669         uint64_t data;  /* OUT */
670         struct {
671             uint32_t cpu;   /* IN */
672             uint32_t rsvd;
673         } l3_cache;
674     } u;
675 };
676 
677 /* XEN_SYSCTL_pcitopoinfo */
678 #define XEN_INVALID_DEV (XEN_INVALID_NODE_ID - 1)
679 struct xen_sysctl_pcitopoinfo {
680     /*
681      * IN: Number of elements in 'pcitopo' and 'nodes' arrays.
682      * OUT: Number of processed elements of those arrays.
683      */
684     uint32_t num_devs;
685 
686     /* IN: list of devices for which node IDs are requested. */
687     XEN_GUEST_HANDLE_64(physdev_pci_device_t) devs;
688 
689     /*
690      * OUT: node identifier for each device.
691      * If information for a particular device is not available then
692      * corresponding entry will be set to XEN_INVALID_NODE_ID. If
693      * device is not known to the hypervisor then XEN_INVALID_DEV
694      * will be provided.
695      */
696     XEN_GUEST_HANDLE_64(uint32) nodes;
697 };
698 
699 #define XEN_SYSCTL_PSR_CAT_get_l3_info               0
700 #define XEN_SYSCTL_PSR_CAT_get_l2_info               1
701 struct xen_sysctl_psr_cat_op {
702     uint32_t cmd;       /* IN: XEN_SYSCTL_PSR_CAT_* */
703     uint32_t target;    /* IN */
704     union {
705         struct {
706             uint32_t cbm_len;   /* OUT: CBM length */
707             uint32_t cos_max;   /* OUT: Maximum COS */
708 #define XEN_SYSCTL_PSR_CAT_L3_CDP       (1u << 0)
709             uint32_t flags;     /* OUT: CAT flags */
710         } cat_info;
711     } u;
712 };
713 
714 #define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
715 
716 #define XEN_SYSCTL_TMEM_OP_THAW                   0
717 #define XEN_SYSCTL_TMEM_OP_FREEZE                 1
718 #define XEN_SYSCTL_TMEM_OP_FLUSH                  2
719 #define XEN_SYSCTL_TMEM_OP_DESTROY                3
720 #define XEN_SYSCTL_TMEM_OP_LIST                   4
721 #define XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO        5
722 #define XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO        6
723 #define XEN_SYSCTL_TMEM_OP_GET_POOLS              7
724 #define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB      8
725 #define XEN_SYSCTL_TMEM_OP_SET_POOLS              9
726 #define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN             10
727 #define XEN_SYSCTL_TMEM_OP_SET_AUTH               11
728 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE     19
729 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV      20
730 #define XEN_SYSCTL_TMEM_OP_SAVE_END               21
731 #define XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN          30
732 #define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE       32
733 #define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE     33
734 
735 /*
736  * XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_[PAGE|INV] override the 'buf' in
737  * xen_sysctl_tmem_op with this structure - sometimes with an extra
738  * page tackled on.
739  */
740 struct tmem_handle {
741     uint32_t pool_id;
742     uint32_t index;
743     xen_tmem_oid_t oid;
744 };
745 
746 /*
747  * XEN_SYSCTL_TMEM_OP_[GET,SAVE]_CLIENT uses the 'client' in
748  * xen_tmem_op with this structure, which is mostly used during migration.
749  */
750 struct xen_tmem_client {
751     uint32_t version;   /* If mismatched we will get XEN_EOPNOTSUPP. */
752     uint32_t maxpools;  /* If greater than what hypervisor supports, will get
753                            XEN_ERANGE. */
754     uint32_t nr_pools;  /* Current amount of pools. Ignored on SET*/
755     union {             /* See TMEM_CLIENT_[COMPRESS,FROZEN] */
756         uint32_t raw;
757         struct {
758             uint8_t frozen:1,
759                     compress:1,
760                     migrating:1;
761         } u;
762     } flags;
763     uint32_t weight;
764 };
765 typedef struct xen_tmem_client xen_tmem_client_t;
766 DEFINE_XEN_GUEST_HANDLE(xen_tmem_client_t);
767 
768 /*
769  * XEN_SYSCTL_TMEM_OP_[GET|SET]_POOLS or XEN_SYSCTL_TMEM_OP_SET_AUTH
770  * uses the 'pool' array in * xen_sysctl_tmem_op with this structure.
771  * The XEN_SYSCTL_TMEM_OP_GET_POOLS hypercall will
772  * return the number of entries in 'pool' or a negative value
773  * if an error was encountered.
774  * The XEN_SYSCTL_TMEM_OP_SET_[AUTH|POOLS] will return the number of
775  * entries in 'pool' processed or a negative value if an error
776  * was encountered.
777  */
778 struct xen_tmem_pool_info {
779     union {
780         uint32_t raw;
781         struct {
782             uint32_t persist:1,    /* See TMEM_POOL_PERSIST. */
783                      shared:1,     /* See TMEM_POOL_SHARED. */
784                      auth:1,       /* See TMEM_POOL_AUTH. */
785                      rsv1:1,
786                      pagebits:8,   /* TMEM_POOL_PAGESIZE_[SHIFT,MASK]. */
787                      rsv2:12,
788                      version:8;    /* TMEM_POOL_VERSION_[SHIFT,MASK]. */
789         } u;
790     } flags;
791     uint32_t id;                  /* Less than tmem_client.maxpools. */
792     uint64_t n_pages;             /* Zero on XEN_SYSCTL_TMEM_OP_SET_[AUTH|POOLS]. */
793     uint64_aligned_t uuid[2];
794 };
795 typedef struct xen_tmem_pool_info xen_tmem_pool_info_t;
796 DEFINE_XEN_GUEST_HANDLE(xen_tmem_pool_info_t);
797 
798 struct xen_sysctl_tmem_op {
799     uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
800     int32_t pool_id;    /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
801     uint32_t cli_id;    /* IN: client id, 0 for XEN_SYSCTL_TMEM_QUERY_FREEABLE_MB
802                            for all others can be the domain id or
803                            XEN_SYSCTL_TMEM_OP_ALL_CLIENTS for all. */
804     uint32_t len;       /* IN: length of 'buf'. If not applicable to use 0. */
805     uint32_t arg;       /* IN: If not applicable to command use 0. */
806     uint32_t pad;       /* Padding so structure is the same under 32 and 64. */
807     xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
808     union {
809         XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save/restore */
810         XEN_GUEST_HANDLE_64(xen_tmem_client_t) client; /* IN/OUT for */
811                         /*  XEN_SYSCTL_TMEM_OP_[GET,SAVE]_CLIENT. */
812         XEN_GUEST_HANDLE_64(xen_tmem_pool_info_t) pool; /* OUT for */
813                         /* XEN_SYSCTL_TMEM_OP_GET_POOLS. Must have 'len' */
814                         /* of them. */
815     } u;
816 };
817 
818 /*
819  * XEN_SYSCTL_get_cpu_levelling_caps (x86 specific)
820  *
821  * Return hardware capabilities concerning masking or faulting of the cpuid
822  * instruction for PV guests.
823  */
824 struct xen_sysctl_cpu_levelling_caps {
825 #define XEN_SYSCTL_CPU_LEVELCAP_faulting    (1ul <<  0) /* CPUID faulting    */
826 #define XEN_SYSCTL_CPU_LEVELCAP_ecx         (1ul <<  1) /* 0x00000001.ecx    */
827 #define XEN_SYSCTL_CPU_LEVELCAP_edx         (1ul <<  2) /* 0x00000001.edx    */
828 #define XEN_SYSCTL_CPU_LEVELCAP_extd_ecx    (1ul <<  3) /* 0x80000001.ecx    */
829 #define XEN_SYSCTL_CPU_LEVELCAP_extd_edx    (1ul <<  4) /* 0x80000001.edx    */
830 #define XEN_SYSCTL_CPU_LEVELCAP_xsave_eax   (1ul <<  5) /* 0x0000000D:1.eax  */
831 #define XEN_SYSCTL_CPU_LEVELCAP_thermal_ecx (1ul <<  6) /* 0x00000006.ecx    */
832 #define XEN_SYSCTL_CPU_LEVELCAP_l7s0_eax    (1ul <<  7) /* 0x00000007:0.eax  */
833 #define XEN_SYSCTL_CPU_LEVELCAP_l7s0_ebx    (1ul <<  8) /* 0x00000007:0.ebx  */
834     uint32_t caps;
835 };
836 
837 /*
838  * XEN_SYSCTL_get_cpu_featureset (x86 specific)
839  *
840  * Return information about featuresets available on this host.
841  *  -  Raw: The real cpuid values.
842  *  - Host: The values Xen is using, (after command line overrides, etc).
843  *  -   PV: Maximum set of features which can be given to a PV guest.
844  *  -  HVM: Maximum set of features which can be given to a HVM guest.
845  */
846 struct xen_sysctl_cpu_featureset {
847 #define XEN_SYSCTL_cpu_featureset_raw      0
848 #define XEN_SYSCTL_cpu_featureset_host     1
849 #define XEN_SYSCTL_cpu_featureset_pv       2
850 #define XEN_SYSCTL_cpu_featureset_hvm      3
851     uint32_t index;       /* IN: Which featureset to query? */
852     uint32_t nr_features; /* IN/OUT: Number of entries in/written to
853                            * 'features', or the maximum number of features if
854                            * the guest handle is NULL.  NB. All featuresets
855                            * come from the same numberspace, so have the same
856                            * maximum length. */
857     XEN_GUEST_HANDLE_64(uint32) features; /* OUT: */
858 };
859 
860 /*
861  * XEN_SYSCTL_LIVEPATCH_op
862  *
863  * Refer to the docs/unstable/misc/livepatch.markdown
864  * for the design details of this hypercall.
865  *
866  * There are four sub-ops:
867  *  XEN_SYSCTL_LIVEPATCH_UPLOAD (0)
868  *  XEN_SYSCTL_LIVEPATCH_GET (1)
869  *  XEN_SYSCTL_LIVEPATCH_LIST (2)
870  *  XEN_SYSCTL_LIVEPATCH_ACTION (3)
871  *
872  * The normal sequence of sub-ops is to:
873  *  1) XEN_SYSCTL_LIVEPATCH_UPLOAD to upload the payload. If errors STOP.
874  *  2) XEN_SYSCTL_LIVEPATCH_GET to check the `->rc`. If -XEN_EAGAIN spin.
875  *     If zero go to next step.
876  *  3) XEN_SYSCTL_LIVEPATCH_ACTION with LIVEPATCH_ACTION_APPLY to apply the patch.
877  *  4) XEN_SYSCTL_LIVEPATCH_GET to check the `->rc`. If in -XEN_EAGAIN spin.
878  *     If zero exit with success.
879  */
880 
881 #define LIVEPATCH_PAYLOAD_VERSION 1
882 /*
883  * .livepatch.funcs structure layout defined in the `Payload format`
884  * section in the Live Patch design document.
885  *
886  * We guard this with __XEN__ as toolstacks SHOULD not use it.
887  */
888 #ifdef __XEN__
889 struct livepatch_func {
890     const char *name;       /* Name of function to be patched. */
891     void *new_addr;
892     void *old_addr;
893     uint32_t new_size;
894     uint32_t old_size;
895     uint8_t version;        /* MUST be LIVEPATCH_PAYLOAD_VERSION. */
896     uint8_t opaque[31];
897 };
898 typedef struct livepatch_func livepatch_func_t;
899 #endif
900 
901 /*
902  * Structure describing an ELF payload. Uniquely identifies the
903  * payload. Should be human readable.
904  * Recommended length is upto XEN_LIVEPATCH_NAME_SIZE.
905  * Includes the NUL terminator.
906  */
907 #define XEN_LIVEPATCH_NAME_SIZE 128
908 struct xen_livepatch_name {
909     XEN_GUEST_HANDLE_64(char) name;         /* IN: pointer to name. */
910     uint16_t size;                          /* IN: size of name. May be upto
911                                                XEN_LIVEPATCH_NAME_SIZE. */
912     uint16_t pad[3];                        /* IN: MUST be zero. */
913 };
914 
915 /*
916  * Upload a payload to the hypervisor. The payload is verified
917  * against basic checks and if there are any issues the proper return code
918  * will be returned. The payload is not applied at this time - that is
919  * controlled by XEN_SYSCTL_LIVEPATCH_ACTION.
920  *
921  * The return value is zero if the payload was succesfully uploaded.
922  * Otherwise an EXX return value is provided. Duplicate `name` are not
923  * supported.
924  *
925  * The payload at this point is verified against basic checks.
926  *
927  * The `payload` is the ELF payload as mentioned in the `Payload format`
928  * section in the Live Patch design document.
929  */
930 #define XEN_SYSCTL_LIVEPATCH_UPLOAD 0
931 struct xen_sysctl_livepatch_upload {
932     struct xen_livepatch_name name;         /* IN, name of the patch. */
933     uint64_t size;                          /* IN, size of the ELF file. */
934     XEN_GUEST_HANDLE_64(uint8) payload;     /* IN, the ELF file. */
935 };
936 
937 /*
938  * Retrieve an status of an specific payload.
939  *
940  * Upon completion the `struct xen_livepatch_status` is updated.
941  *
942  * The return value is zero on success and XEN_EXX on failure. This operation
943  * is synchronous and does not require preemption.
944  */
945 #define XEN_SYSCTL_LIVEPATCH_GET 1
946 
947 struct xen_livepatch_status {
948 #define LIVEPATCH_STATE_CHECKED      1
949 #define LIVEPATCH_STATE_APPLIED      2
950     uint32_t state;                /* OUT: LIVEPATCH_STATE_*. */
951     int32_t rc;                    /* OUT: 0 if no error, otherwise -XEN_EXX. */
952 };
953 typedef struct xen_livepatch_status xen_livepatch_status_t;
954 DEFINE_XEN_GUEST_HANDLE(xen_livepatch_status_t);
955 
956 struct xen_sysctl_livepatch_get {
957     struct xen_livepatch_name name;         /* IN, name of the payload. */
958     struct xen_livepatch_status status;     /* IN/OUT, state of it. */
959 };
960 
961 /*
962  * Retrieve an array of abbreviated status and names of payloads that are
963  * loaded in the hypervisor.
964  *
965  * If the hypercall returns an positive number, it is the number (up to `nr`)
966  * of the payloads returned, along with `nr` updated with the number of remaining
967  * payloads, `version` updated (it may be the same across hypercalls. If it
968  * varies the data is stale and further calls could fail). The `status`,
969  * `name`, and `len`' are updated at their designed index value (`idx`) with
970  * the returned value of data.
971  *
972  * If the hypercall returns E2BIG the `nr` is too big and should be
973  * lowered. The upper limit of `nr` is left to the implemention.
974  *
975  * Note that due to the asynchronous nature of hypercalls the domain might have
976  * added or removed the number of payloads making this information stale. It is
977  * the responsibility of the toolstack to use the `version` field to check
978  * between each invocation. if the version differs it should discard the stale
979  * data and start from scratch. It is OK for the toolstack to use the new
980  * `version` field.
981  */
982 #define XEN_SYSCTL_LIVEPATCH_LIST 2
983 struct xen_sysctl_livepatch_list {
984     uint32_t version;                       /* OUT: Hypervisor stamps value.
985                                                If varies between calls, we are
986                                              * getting stale data. */
987     uint32_t idx;                           /* IN: Index into hypervisor list. */
988     uint32_t nr;                            /* IN: How many status, name, and len
989                                                should fill out. Can be zero to get
990                                                amount of payloads and version.
991                                                OUT: How many payloads left. */
992     uint32_t pad;                           /* IN: Must be zero. */
993     XEN_GUEST_HANDLE_64(xen_livepatch_status_t) status;  /* OUT. Must have enough
994                                                space allocate for nr of them. */
995     XEN_GUEST_HANDLE_64(char) name;         /* OUT: Array of names. Each member
996                                                MUST XEN_LIVEPATCH_NAME_SIZE in size.
997                                                Must have nr of them. */
998     XEN_GUEST_HANDLE_64(uint32) len;        /* OUT: Array of lengths of name's.
999                                                Must have nr of them. */
1000 };
1001 
1002 /*
1003  * Perform an operation on the payload structure referenced by the `name` field.
1004  * The operation request is asynchronous and the status should be retrieved
1005  * by using either XEN_SYSCTL_LIVEPATCH_GET or XEN_SYSCTL_LIVEPATCH_LIST hypercall.
1006  */
1007 #define XEN_SYSCTL_LIVEPATCH_ACTION 3
1008 struct xen_sysctl_livepatch_action {
1009     struct xen_livepatch_name name;         /* IN, name of the patch. */
1010 #define LIVEPATCH_ACTION_UNLOAD       1
1011 #define LIVEPATCH_ACTION_REVERT       2
1012 #define LIVEPATCH_ACTION_APPLY        3
1013 #define LIVEPATCH_ACTION_REPLACE      4
1014     uint32_t cmd;                           /* IN: LIVEPATCH_ACTION_*. */
1015     uint32_t timeout;                       /* IN: If zero then uses */
1016                                             /* hypervisor default. */
1017                                             /* Or upper bound of time (ns) */
1018                                             /* for operation to take. */
1019 };
1020 
1021 struct xen_sysctl_livepatch_op {
1022     uint32_t cmd;                           /* IN: XEN_SYSCTL_LIVEPATCH_*. */
1023     uint32_t pad;                           /* IN: Always zero. */
1024     union {
1025         struct xen_sysctl_livepatch_upload upload;
1026         struct xen_sysctl_livepatch_list list;
1027         struct xen_sysctl_livepatch_get get;
1028         struct xen_sysctl_livepatch_action action;
1029     } u;
1030 };
1031 
1032 /*
1033  * XEN_SYSCTL_set_parameter
1034  *
1035  * Change hypervisor parameters at runtime.
1036  * The input string is parsed similar to the boot parameters.
1037  * Parameters are a single string terminated by a NUL byte of max. size
1038  * characters. Multiple settings can be specified by separating them
1039  * with blanks.
1040  */
1041 
1042 struct xen_sysctl_set_parameter {
1043     XEN_GUEST_HANDLE_64(char) params;       /* IN: pointer to parameters. */
1044     uint16_t size;                          /* IN: size of parameters. */
1045     uint16_t pad[3];                        /* IN: MUST be zero. */
1046 };
1047 
1048 struct xen_sysctl {
1049     uint32_t cmd;
1050 #define XEN_SYSCTL_readconsole                    1
1051 #define XEN_SYSCTL_tbuf_op                        2
1052 #define XEN_SYSCTL_physinfo                       3
1053 #define XEN_SYSCTL_sched_id                       4
1054 #define XEN_SYSCTL_perfc_op                       5
1055 #define XEN_SYSCTL_getdomaininfolist              6
1056 #define XEN_SYSCTL_debug_keys                     7
1057 #define XEN_SYSCTL_getcpuinfo                     8
1058 #define XEN_SYSCTL_availheap                      9
1059 #define XEN_SYSCTL_get_pmstat                    10
1060 #define XEN_SYSCTL_cpu_hotplug                   11
1061 #define XEN_SYSCTL_pm_op                         12
1062 #define XEN_SYSCTL_page_offline_op               14
1063 #define XEN_SYSCTL_lockprof_op                   15
1064 #define XEN_SYSCTL_cputopoinfo                   16
1065 #define XEN_SYSCTL_numainfo                      17
1066 #define XEN_SYSCTL_cpupool_op                    18
1067 #define XEN_SYSCTL_scheduler_op                  19
1068 #define XEN_SYSCTL_gcov_op                       20
1069 #define XEN_SYSCTL_psr_cmt_op                    21
1070 #define XEN_SYSCTL_pcitopoinfo                   22
1071 #define XEN_SYSCTL_psr_cat_op                    23
1072 #define XEN_SYSCTL_tmem_op                       24
1073 #define XEN_SYSCTL_get_cpu_levelling_caps        25
1074 #define XEN_SYSCTL_get_cpu_featureset            26
1075 #define XEN_SYSCTL_livepatch_op                  27
1076 #define XEN_SYSCTL_set_parameter                 28
1077     uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
1078     union {
1079         struct xen_sysctl_readconsole       readconsole;
1080         struct xen_sysctl_tbuf_op           tbuf_op;
1081         struct xen_sysctl_physinfo          physinfo;
1082         struct xen_sysctl_cputopoinfo       cputopoinfo;
1083         struct xen_sysctl_pcitopoinfo       pcitopoinfo;
1084         struct xen_sysctl_numainfo          numainfo;
1085         struct xen_sysctl_sched_id          sched_id;
1086         struct xen_sysctl_perfc_op          perfc_op;
1087         struct xen_sysctl_getdomaininfolist getdomaininfolist;
1088         struct xen_sysctl_debug_keys        debug_keys;
1089         struct xen_sysctl_getcpuinfo        getcpuinfo;
1090         struct xen_sysctl_availheap         availheap;
1091         struct xen_sysctl_get_pmstat        get_pmstat;
1092         struct xen_sysctl_cpu_hotplug       cpu_hotplug;
1093         struct xen_sysctl_pm_op             pm_op;
1094         struct xen_sysctl_page_offline_op   page_offline;
1095         struct xen_sysctl_lockprof_op       lockprof_op;
1096         struct xen_sysctl_cpupool_op        cpupool_op;
1097         struct xen_sysctl_scheduler_op      scheduler_op;
1098         struct xen_sysctl_gcov_op           gcov_op;
1099         struct xen_sysctl_psr_cmt_op        psr_cmt_op;
1100         struct xen_sysctl_psr_cat_op        psr_cat_op;
1101         struct xen_sysctl_tmem_op           tmem_op;
1102         struct xen_sysctl_cpu_levelling_caps cpu_levelling_caps;
1103         struct xen_sysctl_cpu_featureset    cpu_featureset;
1104         struct xen_sysctl_livepatch_op      livepatch;
1105         struct xen_sysctl_set_parameter     set_parameter;
1106         uint8_t                             pad[128];
1107     } u;
1108 };
1109 typedef struct xen_sysctl xen_sysctl_t;
1110 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
1111 
1112 #endif /* __XEN_PUBLIC_SYSCTL_H__ */
1113 
1114 /*
1115  * Local variables:
1116  * mode: C
1117  * c-file-style: "BSD"
1118  * c-basic-offset: 4
1119  * tab-width: 4
1120  * indent-tabs-mode: nil
1121  * End:
1122  */
1123