1 /* SPDX-License-Identifier: MIT */
2 /******************************************************************************
3  * vm_event.h
4  *
5  * Memory event common structures.
6  *
7  * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
8  */
9 
10 #ifndef _XEN_PUBLIC_VM_EVENT_H
11 #define _XEN_PUBLIC_VM_EVENT_H
12 
13 #include "xen.h"
14 
15 #define VM_EVENT_INTERFACE_VERSION 0x00000007
16 
17 #if defined(__XEN__) || defined(__XEN_TOOLS__)
18 
19 #include "io/ring.h"
20 
21 /*
22  * Memory event flags
23  */
24 
25 /*
26  * VCPU_PAUSED in a request signals that the vCPU triggering the event has been
27  *  paused
28  * VCPU_PAUSED in a response signals to unpause the vCPU
29  */
30 #define VM_EVENT_FLAG_VCPU_PAUSED        (1 << 0)
31 /* Flags to aid debugging vm_event */
32 #define VM_EVENT_FLAG_FOREIGN            (1 << 1)
33 /*
34  * The following flags can be set in response to a mem_access event.
35  *
36  * Emulate the fault-causing instruction (if set in the event response flags).
37  * This will allow the guest to continue execution without lifting the page
38  * access restrictions.
39  */
40 #define VM_EVENT_FLAG_EMULATE            (1 << 2)
41 /*
42  * Same as VM_EVENT_FLAG_EMULATE, but with write operations or operations
43  * potentially having side effects (like memory mapped or port I/O) disabled.
44  */
45 #define VM_EVENT_FLAG_EMULATE_NOWRITE    (1 << 3)
46 /*
47  * Toggle singlestepping on vm_event response.
48  * Requires the vCPU to be paused already (synchronous events only).
49  */
50 #define VM_EVENT_FLAG_TOGGLE_SINGLESTEP  (1 << 4)
51 /*
52  * Data is being sent back to the hypervisor in the event response, to be
53  * returned by the read function when emulating an instruction.
54  * This flag is only useful when combined with VM_EVENT_FLAG_EMULATE
55  * and takes precedence if combined with VM_EVENT_FLAG_EMULATE_NOWRITE
56  * (i.e. if both VM_EVENT_FLAG_EMULATE_NOWRITE and
57  * VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored).
58  */
59 #define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5)
60 /*
61  * Deny completion of the operation that triggered the event.
62  * Currently only useful for MSR and control-register write events.
63  * Requires the vCPU to be paused already (synchronous events only).
64  */
65 #define VM_EVENT_FLAG_DENY               (1 << 6)
66 /*
67  * This flag can be set in a request or a response
68  *
69  * On a request, indicates that the event occurred in the alternate p2m
70  * specified by the altp2m_idx request field.
71  *
72  * On a response, indicates that the VCPU should resume in the alternate p2m
73  * specified by the altp2m_idx response field if possible.
74  */
75 #define VM_EVENT_FLAG_ALTERNATE_P2M      (1 << 7)
76 /*
77  * Set the vCPU registers to the values in the  vm_event response.
78  * At the moment x86-only, applies to EAX-EDX, ESP, EBP, ESI, EDI, R8-R15,
79  * EFLAGS, and EIP.
80  * Requires the vCPU to be paused already (synchronous events only).
81  */
82 #define VM_EVENT_FLAG_SET_REGISTERS      (1 << 8)
83 /*
84  * Instruction cache is being sent back to the hypervisor in the event response
85  * to be used by the emulator. This flag is only useful when combined with
86  * VM_EVENT_FLAG_EMULATE and does not take presedence if combined with
87  * VM_EVENT_FLAG_EMULATE_NOWRITE or VM_EVENT_FLAG_SET_EMUL_READ_DATA, (i.e.
88  * if any of those flags are set, only those will be honored).
89  */
90 #define VM_EVENT_FLAG_SET_EMUL_INSN_DATA (1 << 9)
91 /*
92  * Have a one-shot VM_EVENT_REASON_INTERRUPT event sent for the first
93  * interrupt pending after resuming the VCPU.
94  */
95 #define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
96 /*
97  * Execute fast singlestepping on vm_event response.
98  * Requires the vCPU to be paused already (synchronous events only).
99  *
100  * On a response requires setting the  p2midx field of fast_singlestep to which
101  * Xen will switch the vCPU to on the occurance of the first singlestep, after
102  * which singlestep gets automatically disabled.
103  */
104 #define VM_EVENT_FLAG_FAST_SINGLESTEP    (1 << 11)
105 /*
106  * Set if the event comes from a nested VM and thus npt_base is valid.
107  */
108 #define VM_EVENT_FLAG_NESTED_P2M         (1 << 12)
109 /*
110  * Reset the vmtrace buffer (if vmtrace is enabled)
111  */
112 #define VM_EVENT_FLAG_RESET_VMTRACE      (1 << 13)
113 /*
114  * Reset the VM state (if VM is fork)
115  */
116 #define VM_EVENT_FLAG_RESET_FORK_STATE   (1 << 14)
117 /*
118  * Remove unshared entries from physmap (if VM is fork)
119  */
120 #define VM_EVENT_FLAG_RESET_FORK_MEMORY  (1 << 15)
121 
122 /*
123  * Reasons for the vm event request
124  */
125 
126 /* Default case */
127 #define VM_EVENT_REASON_UNKNOWN                 0
128 /* Memory access violation */
129 #define VM_EVENT_REASON_MEM_ACCESS              1
130 /* Memory sharing event */
131 #define VM_EVENT_REASON_MEM_SHARING             2
132 /* Memory paging event */
133 #define VM_EVENT_REASON_MEM_PAGING              3
134 /* A control register was updated */
135 #define VM_EVENT_REASON_WRITE_CTRLREG           4
136 /* An MSR was updated. */
137 #define VM_EVENT_REASON_MOV_TO_MSR              5
138 /* Debug operation executed (e.g. int3) */
139 #define VM_EVENT_REASON_SOFTWARE_BREAKPOINT     6
140 /* Single-step (e.g. MTF) */
141 #define VM_EVENT_REASON_SINGLESTEP              7
142 /* An event has been requested via HVMOP_guest_request_vm_event. */
143 #define VM_EVENT_REASON_GUEST_REQUEST           8
144 /* A debug exception was caught */
145 #define VM_EVENT_REASON_DEBUG_EXCEPTION         9
146 /* CPUID executed */
147 #define VM_EVENT_REASON_CPUID                   10
148 /*
149  * Privileged call executed (e.g. SMC).
150  * Note: event may be generated even if SMC condition check fails on some CPUs.
151  *       As this behavior is CPU-specific, users are advised to not rely on it.
152  *       These kinds of events will be filtered out in future versions.
153  */
154 #define VM_EVENT_REASON_PRIVILEGED_CALL         11
155 /* An interrupt has been delivered. */
156 #define VM_EVENT_REASON_INTERRUPT               12
157 /* A descriptor table register was accessed. */
158 #define VM_EVENT_REASON_DESCRIPTOR_ACCESS       13
159 /* Current instruction is not implemented by the emulator */
160 #define VM_EVENT_REASON_EMUL_UNIMPLEMENTED      14
161 /* VMEXIT */
162 #define VM_EVENT_REASON_VMEXIT                  15
163 /* IN/OUT Instruction executed */
164 #define VM_EVENT_REASON_IO_INSTRUCTION          16
165 
166 /* Supported values for the vm_event_write_ctrlreg index. */
167 #define VM_EVENT_X86_CR0    0
168 #define VM_EVENT_X86_CR3    1
169 #define VM_EVENT_X86_CR4    2
170 #define VM_EVENT_X86_XCR0   3
171 
172 /* The limit field is right-shifted by 12 bits if .ar.g is set. */
173 struct vm_event_x86_selector_reg {
174     uint32_t limit  :    20;
175     uint32_t ar     :    12;
176 };
177 
178 /*
179  * Using custom vCPU structs (i.e. not hvm_hw_cpu) for both x86 and ARM
180  * so as to not fill the vm_event ring buffer too quickly.
181  */
182 struct vm_event_regs_x86 {
183     uint64_t rax;
184     uint64_t rcx;
185     uint64_t rdx;
186     uint64_t rbx;
187     uint64_t rsp;
188     uint64_t rbp;
189     uint64_t rsi;
190     uint64_t rdi;
191     uint64_t r8;
192     uint64_t r9;
193     uint64_t r10;
194     uint64_t r11;
195     uint64_t r12;
196     uint64_t r13;
197     uint64_t r14;
198     uint64_t r15;
199     uint64_t rflags;
200     uint64_t dr6;
201     uint64_t dr7;
202     uint64_t rip;
203     uint64_t cr0;
204     uint64_t cr2;
205     uint64_t cr3;
206     uint64_t cr4;
207     uint64_t sysenter_cs;
208     uint64_t sysenter_esp;
209     uint64_t sysenter_eip;
210     uint64_t msr_efer;
211     uint64_t msr_star;
212     uint64_t msr_lstar;
213     uint64_t gdtr_base;
214 
215     /*
216      * When VM_EVENT_FLAG_NESTED_P2M is set, this event comes from a nested
217      * VM.  npt_base is the guest physical address of the L1 hypervisors
218      * EPT/NPT tables for the nested guest.
219      *
220      * All bits outside of architectural address ranges are reserved for
221      * future metadata.
222      */
223     uint64_t npt_base;
224 
225     /*
226      * Current position in the vmtrace buffer, or ~0 if vmtrace is not active.
227      *
228      * For Intel Processor Trace, it is the upper half of MSR_RTIT_OUTPUT_MASK.
229      */
230     uint64_t vmtrace_pos;
231 
232     uint32_t cs_base;
233     uint32_t ss_base;
234     uint32_t ds_base;
235     uint32_t es_base;
236     uint64_t fs_base;
237     uint64_t gs_base;
238     struct vm_event_x86_selector_reg cs;
239     struct vm_event_x86_selector_reg ss;
240     struct vm_event_x86_selector_reg ds;
241     struct vm_event_x86_selector_reg es;
242     struct vm_event_x86_selector_reg fs;
243     struct vm_event_x86_selector_reg gs;
244     uint64_t shadow_gs;
245     uint16_t gdtr_limit;
246     uint16_t cs_sel;
247     uint16_t ss_sel;
248     uint16_t ds_sel;
249     uint16_t es_sel;
250     uint16_t fs_sel;
251     uint16_t gs_sel;
252     uint16_t _pad;
253 };
254 
255 /*
256  * Only the register 'pc' can be set on a vm_event response using the
257  * VM_EVENT_FLAG_SET_REGISTERS flag.
258  */
259 struct vm_event_regs_arm {
260     uint64_t ttbr0;
261     uint64_t ttbr1;
262     uint64_t ttbcr;
263     uint64_t pc;
264     uint64_t cpsr;
265 };
266 
267 /*
268  * mem_access flag definitions
269  *
270  * These flags are set only as part of a mem_event request.
271  *
272  * R/W/X: Defines the type of violation that has triggered the event
273  *        Multiple types can be set in a single violation!
274  * GLA_VALID: If the gla field holds a guest VA associated with the event
275  * FAULT_WITH_GLA: If the violation was triggered by accessing gla
276  * FAULT_IN_GPT: If the violation was triggered during translating gla
277  */
278 #define MEM_ACCESS_R                (1 << 0)
279 #define MEM_ACCESS_W                (1 << 1)
280 #define MEM_ACCESS_X                (1 << 2)
281 #define MEM_ACCESS_RWX              (MEM_ACCESS_R | MEM_ACCESS_W | MEM_ACCESS_X)
282 #define MEM_ACCESS_RW               (MEM_ACCESS_R | MEM_ACCESS_W)
283 #define MEM_ACCESS_RX               (MEM_ACCESS_R | MEM_ACCESS_X)
284 #define MEM_ACCESS_WX               (MEM_ACCESS_W | MEM_ACCESS_X)
285 #define MEM_ACCESS_GLA_VALID        (1 << 3)
286 #define MEM_ACCESS_FAULT_WITH_GLA   (1 << 4)
287 #define MEM_ACCESS_FAULT_IN_GPT     (1 << 5)
288 
289 struct vm_event_mem_access {
290     uint64_t gfn;
291     uint64_t offset;
292     uint64_t gla;   /* if flags has MEM_ACCESS_GLA_VALID set */
293     uint32_t flags; /* MEM_ACCESS_* */
294     uint32_t _pad;
295 };
296 
297 struct vm_event_write_ctrlreg {
298     uint32_t index;
299     uint32_t _pad;
300     uint64_t new_value;
301     uint64_t old_value;
302 };
303 
304 struct vm_event_singlestep {
305     uint64_t gfn;
306 };
307 
308 struct vm_event_fast_singlestep {
309     uint16_t p2midx;
310 };
311 
312 struct vm_event_debug {
313     uint64_t gfn;
314     uint64_t pending_dbg; /* Behaves like the VT-x PENDING_DBG field. */
315     uint32_t insn_length;
316     uint8_t type;        /* HVMOP_TRAP_* */
317     uint8_t _pad[3];
318 };
319 
320 struct vm_event_mov_to_msr {
321     uint64_t msr;
322     uint64_t new_value;
323     uint64_t old_value;
324 };
325 
326 #define VM_EVENT_DESC_IDTR           1
327 #define VM_EVENT_DESC_GDTR           2
328 #define VM_EVENT_DESC_LDTR           3
329 #define VM_EVENT_DESC_TR             4
330 
331 struct vm_event_desc_access {
332     union {
333         struct {
334             uint32_t instr_info;         /* VMX: VMCS Instruction-Information */
335             uint32_t _pad1;
336             uint64_t exit_qualification; /* VMX: VMCS Exit Qualification */
337         } vmx;
338     } arch;
339     uint8_t descriptor;                  /* VM_EVENT_DESC_* */
340     uint8_t is_write;
341     uint8_t _pad[6];
342 };
343 
344 struct vm_event_cpuid {
345     uint32_t insn_length;
346     uint32_t leaf;
347     uint32_t subleaf;
348     uint32_t _pad;
349 };
350 
351 struct vm_event_interrupt_x86 {
352     uint32_t vector;
353     uint32_t type;
354     uint32_t error_code;
355     uint32_t _pad;
356     uint64_t cr2;
357 };
358 
359 #define MEM_PAGING_DROP_PAGE       (1 << 0)
360 #define MEM_PAGING_EVICT_FAIL      (1 << 1)
361 
362 struct vm_event_paging {
363     uint64_t gfn;
364     uint32_t p2mt;
365     uint32_t flags;
366 };
367 
368 struct vm_event_sharing {
369     uint64_t gfn;
370     uint32_t p2mt;
371     uint32_t _pad;
372 };
373 
374 struct vm_event_emul_read_data {
375     uint32_t size;
376     /* The struct is used in a union with vm_event_regs_x86. */
377     uint8_t  data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
378 };
379 
380 struct vm_event_emul_insn_data {
381     uint8_t data[16]; /* Has to be completely filled */
382 };
383 
384 struct vm_event_vmexit {
385     struct {
386         struct {
387             uint64_t reason;
388             uint64_t qualification;
389         } vmx;
390     } arch;
391 };
392 
393 struct vm_event_io {
394     uint32_t bytes; /* size of access */
395     uint16_t port;  /* port number */
396     uint8_t  in;    /* direction (0 = OUT, 1 = IN) */
397     uint8_t  str;   /* string instruction (0 = not string, 1 = string) */
398 };
399 
400 typedef struct vm_event_st {
401     uint32_t version;   /* VM_EVENT_INTERFACE_VERSION */
402     uint32_t flags;     /* VM_EVENT_FLAG_* */
403     uint32_t reason;    /* VM_EVENT_REASON_* */
404     uint32_t vcpu_id;
405     uint16_t altp2m_idx; /* may be used during request and response */
406     uint16_t _pad[3];
407 
408     union {
409         struct vm_event_paging                mem_paging;
410         struct vm_event_sharing               mem_sharing;
411         struct vm_event_mem_access            mem_access;
412         struct vm_event_write_ctrlreg         write_ctrlreg;
413         struct vm_event_mov_to_msr            mov_to_msr;
414         struct vm_event_desc_access           desc_access;
415         struct vm_event_singlestep            singlestep;
416         struct vm_event_fast_singlestep       fast_singlestep;
417         struct vm_event_debug                 software_breakpoint;
418         struct vm_event_debug                 debug_exception;
419         struct vm_event_cpuid                 cpuid;
420         struct vm_event_vmexit                vmexit;
421         struct vm_event_io                    io;
422         union {
423             struct vm_event_interrupt_x86     x86;
424         } interrupt;
425     } u;
426 
427     union {
428         union {
429             struct vm_event_regs_x86 x86;
430             struct vm_event_regs_arm arm;
431         } regs;
432 
433         union {
434             struct vm_event_emul_read_data read;
435             struct vm_event_emul_insn_data insn;
436         } emul;
437     } data;
438 } vm_event_request_t, vm_event_response_t;
439 
440 DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);
441 
442 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
443 #endif /* _XEN_PUBLIC_VM_EVENT_H */
444 
445 /*
446  * Local variables:
447  * mode: C
448  * c-file-style: "BSD"
449  * c-basic-offset: 4
450  * tab-width: 4
451  * indent-tabs-mode: nil
452  * End:
453  */
454