1 /* SPDX-License-Identifier: MIT */ 2 /****************************************************************************** 3 * vcpu.h 4 * 5 * VCPU initialisation, query, and hotplug. 6 * 7 * Copyright (c) 2005, Keir Fraser <keir@xensource.com> 8 */ 9 10 #ifndef __XEN_PUBLIC_VCPU_H__ 11 #define __XEN_PUBLIC_VCPU_H__ 12 13 #include "xen.h" 14 15 /* 16 * Prototype for this hypercall is: 17 * long vcpu_op(int cmd, unsigned int vcpuid, void *extra_args) 18 * @cmd == VCPUOP_??? (VCPU operation). 19 * @vcpuid == VCPU to operate on. 20 * @extra_args == Operation-specific extra arguments (NULL if none). 21 */ 22 23 /* 24 * Initialise a VCPU. Each VCPU can be initialised only once. A 25 * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. 26 * 27 * @extra_arg == For PV or ARM guests this is a pointer to a vcpu_guest_context 28 * structure containing the initial state for the VCPU. For x86 29 * HVM based guests this is a pointer to a vcpu_hvm_context 30 * structure. 31 */ 32 #define VCPUOP_initialise 0 33 34 /* 35 * Bring up a VCPU. This makes the VCPU runnable. This operation will fail 36 * if the VCPU has not been initialised (VCPUOP_initialise). 37 */ 38 #define VCPUOP_up 1 39 40 /* 41 * Bring down a VCPU (i.e., make it non-runnable). 42 * There are a few caveats that callers should observe: 43 * 1. This operation may return, and VCPU_is_up may return false, before the 44 * VCPU stops running (i.e., the command is asynchronous). It is a good 45 * idea to ensure that the VCPU has entered a non-critical loop before 46 * bringing it down. Alternatively, this operation is guaranteed 47 * synchronous if invoked by the VCPU itself. 48 * 2. After a VCPU is initialised, there is currently no way to drop all its 49 * references to domain memory. Even a VCPU that is down still holds 50 * memory references via its pagetable base pointer and GDT. It is good 51 * practise to move a VCPU onto an 'idle' or default page table, LDT and 52 * GDT before bringing it down. 53 */ 54 #define VCPUOP_down 2 55 56 /* Returns 1 if the given VCPU is up. */ 57 #define VCPUOP_is_up 3 58 59 /* 60 * Return information about the state and running time of a VCPU. 61 * @extra_arg == pointer to vcpu_runstate_info structure. 62 */ 63 #define VCPUOP_get_runstate_info 4 64 struct vcpu_runstate_info { 65 /* VCPU's current state (RUNSTATE_*). */ 66 int state; 67 /* When was current state entered (system time, ns)? */ 68 uint64_t state_entry_time; 69 /* 70 * Update indicator set in state_entry_time: 71 * When activated via VMASST_TYPE_runstate_update_flag, set during 72 * updates in guest memory mapped copy of vcpu_runstate_info. 73 */ 74 #define XEN_RUNSTATE_UPDATE (xen_mk_ullong(1) << 63) 75 /* 76 * Time spent in each RUNSTATE_* (ns). The sum of these times is 77 * guaranteed not to drift from system time. 78 */ 79 uint64_t time[4]; 80 }; 81 typedef struct vcpu_runstate_info vcpu_runstate_info_t; 82 DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t); 83 84 /* VCPU is currently running on a physical CPU. */ 85 #define RUNSTATE_running 0 86 87 /* VCPU is runnable, but not currently scheduled on any physical CPU. */ 88 #define RUNSTATE_runnable 1 89 90 /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ 91 #define RUNSTATE_blocked 2 92 93 /* 94 * VCPU is not runnable, but it is not blocked. 95 * This is a 'catch all' state for things like hotplug and pauses by the 96 * system administrator (or for critical sections in the hypervisor). 97 * RUNSTATE_blocked dominates this state (it is the preferred state). 98 */ 99 #define RUNSTATE_offline 3 100 101 /* 102 * Register a shared memory area from which the guest may obtain its own 103 * runstate information without needing to execute a hypercall. 104 * Notes: 105 * 1. The registered address may be virtual or physical or guest handle, 106 * depending on the platform. Virtual address or guest handle should be 107 * registered on x86 systems. 108 * 2. Only one shared area may be registered per VCPU. The shared area is 109 * updated by the hypervisor each time the VCPU is scheduled. Thus 110 * runstate.state will always be RUNSTATE_running and 111 * runstate.state_entry_time will indicate the system time at which the 112 * VCPU was last scheduled to run. 113 * 3. New code wants to prefer VCPUOP_register_runstate_phys_area, and only 114 * fall back to the operation here for backwards compatibility. 115 * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. 116 */ 117 #define VCPUOP_register_runstate_memory_area 5 118 struct vcpu_register_runstate_memory_area { 119 union { 120 XEN_GUEST_HANDLE(vcpu_runstate_info_t) h; 121 struct vcpu_runstate_info *v; 122 uint64_t p; 123 } addr; 124 }; 125 typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t; 126 DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t); 127 128 /* 129 * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer 130 * which can be set via these commands. Periods smaller than one millisecond 131 * may not be supported. 132 */ 133 #define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ 134 #define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ 135 struct vcpu_set_periodic_timer { 136 uint64_t period_ns; 137 }; 138 typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t; 139 DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t); 140 141 /* 142 * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot 143 * timer which can be set via these commands. 144 */ 145 #define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ 146 #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ 147 struct vcpu_set_singleshot_timer { 148 uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */ 149 uint32_t flags; /* VCPU_SSHOTTMR_??? */ 150 }; 151 typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t; 152 DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t); 153 154 /* Flags to VCPUOP_set_singleshot_timer. */ 155 /* 156 * Request the timeout to be in the future (return -ETIME if it's passed) 157 * but can be ignored by the hypervisor. 158 */ 159 #define _VCPU_SSHOTTMR_future (0) 160 #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) 161 162 /* 163 * Register a memory location in the guest address space for the 164 * vcpu_info structure. This allows the guest to place the vcpu_info 165 * structure in a convenient place, such as in a per-cpu data area. 166 * The pointer need not be page aligned, but the structure must not 167 * cross a page boundary. 168 * 169 * This may be called only once per vcpu. 170 */ 171 #define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */ 172 struct vcpu_register_vcpu_info { 173 uint64_t mfn; /* mfn of page to place vcpu_info */ 174 uint32_t offset; /* offset within page */ 175 uint32_t rsvd; /* unused */ 176 }; 177 typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; 178 DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); 179 180 /* Send an NMI to the specified VCPU. @extra_arg == NULL. */ 181 #define VCPUOP_send_nmi 11 182 183 /* 184 * Get the physical ID information for a pinned vcpu's underlying physical 185 * processor. The physical ID informmation is architecture-specific. 186 * On x86: id[31:0]=apic_id, id[63:32]=acpi_id. 187 * This command returns -EINVAL if it is not a valid operation for this VCPU. 188 */ 189 #define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */ 190 struct vcpu_get_physid { 191 uint64_t phys_id; 192 }; 193 typedef struct vcpu_get_physid vcpu_get_physid_t; 194 DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t); 195 #define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid)) 196 #define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32)) 197 198 /* 199 * Register a memory location to get a secondary copy of the vcpu time 200 * parameters. The master copy still exists as part of the vcpu shared 201 * memory area, and this secondary copy is updated whenever the master copy 202 * is updated (and using the same versioning scheme for synchronisation). 203 * 204 * The intent is that this copy may be mapped (RO) into userspace so 205 * that usermode can compute system time using the time info and the 206 * tsc. Usermode will see an array of vcpu_time_info structures, one 207 * for each vcpu, and choose the right one by an existing mechanism 208 * which allows it to get the current vcpu number (such as via a 209 * segment limit). It can then apply the normal algorithm to compute 210 * system time from the tsc. 211 * 212 * New code wants to prefer VCPUOP_register_vcpu_time_phys_area, and only 213 * fall back to the operation here for backwards compatibility. 214 * 215 * @extra_arg == pointer to vcpu_register_time_info_memory_area structure. 216 */ 217 #define VCPUOP_register_vcpu_time_memory_area 13 218 DEFINE_XEN_GUEST_HANDLE(vcpu_time_info_t); 219 struct vcpu_register_time_memory_area { 220 union { 221 XEN_GUEST_HANDLE(vcpu_time_info_t) h; 222 struct vcpu_time_info *v; 223 uint64_t p; 224 } addr; 225 }; 226 typedef struct vcpu_register_time_memory_area vcpu_register_time_memory_area_t; 227 DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t); 228 229 /* 230 * Like the respective VCPUOP_register_*_memory_area, just using the "addr.p" 231 * field of the supplied struct as a guest physical address (i.e. in GFN space). 232 * The respective area may not cross a page boundary. Pass ~0 to unregister an 233 * area. Note that as long as an area is registered by physical address, the 234 * linear address based area will not be serviced (updated) by the hypervisor. 235 * 236 * Note that the area registered via VCPUOP_register_runstate_memory_area will 237 * be updated in the same manner as the one registered via virtual address PLUS 238 * VMASST_TYPE_runstate_update_flag engaged by the domain. 239 * 240 * XENFEAT_{runstate,vcpu_time}_phys_area feature bits signal the availability 241 * of these ops. 242 */ 243 #define VCPUOP_register_runstate_phys_area 14 244 #define VCPUOP_register_vcpu_time_phys_area 15 245 246 #endif /* __XEN_PUBLIC_VCPU_H__ */ 247 248 /* 249 * Local variables: 250 * mode: C 251 * c-file-style: "BSD" 252 * c-basic-offset: 4 253 * tab-width: 4 254 * indent-tabs-mode: nil 255 * End: 256 */ 257