1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright (c) 2015 Oracle and/or its affiliates. All rights reserved. 4 */ 5 6 #ifndef __XEN_PUBLIC_ARCH_X86_PMU_H__ 7 #define __XEN_PUBLIC_ARCH_X86_PMU_H__ 8 9 /* x86-specific PMU definitions */ 10 11 /* AMD PMU registers and structures */ 12 struct xen_pmu_amd_ctxt { 13 /* 14 * Offsets to counter and control MSRs (relative to xen_pmu_arch.c.amd). 15 * For PV(H) guests these fields are RO. 16 */ 17 uint32_t counters; 18 uint32_t ctrls; 19 20 /* Counter MSRs */ 21 uint64_t regs[XEN_FLEX_ARRAY_DIM]; 22 }; 23 typedef struct xen_pmu_amd_ctxt xen_pmu_amd_ctxt_t; 24 DEFINE_XEN_GUEST_HANDLE(xen_pmu_amd_ctxt_t); 25 26 /* Intel PMU registers and structures */ 27 struct xen_pmu_cntr_pair { 28 uint64_t counter; 29 uint64_t control; 30 }; 31 typedef struct xen_pmu_cntr_pair xen_pmu_cntr_pair_t; 32 DEFINE_XEN_GUEST_HANDLE(xen_pmu_cntr_pair_t); 33 34 struct xen_pmu_intel_ctxt { 35 /* 36 * Offsets to fixed and architectural counter MSRs (relative to 37 * xen_pmu_arch.c.intel). 38 * For PV(H) guests these fields are RO. 39 */ 40 uint32_t fixed_counters; 41 uint32_t arch_counters; 42 43 /* PMU registers */ 44 uint64_t global_ctrl; 45 uint64_t global_ovf_ctrl; 46 uint64_t global_status; 47 uint64_t fixed_ctrl; 48 uint64_t ds_area; 49 uint64_t pebs_enable; 50 uint64_t debugctl; 51 52 /* Fixed and architectural counter MSRs */ 53 uint64_t regs[XEN_FLEX_ARRAY_DIM]; 54 }; 55 typedef struct xen_pmu_intel_ctxt xen_pmu_intel_ctxt_t; 56 DEFINE_XEN_GUEST_HANDLE(xen_pmu_intel_ctxt_t); 57 58 /* Sampled domain's registers */ 59 struct xen_pmu_regs { 60 uint64_t ip; 61 uint64_t sp; 62 uint64_t flags; 63 uint16_t cs; 64 uint16_t ss; 65 uint8_t cpl; 66 uint8_t pad[3]; 67 }; 68 typedef struct xen_pmu_regs xen_pmu_regs_t; 69 DEFINE_XEN_GUEST_HANDLE(xen_pmu_regs_t); 70 71 /* PMU flags */ 72 #define PMU_CACHED (1<<0) /* PMU MSRs are cached in the context */ 73 #define PMU_SAMPLE_USER (1<<1) /* Sample is from user or kernel mode */ 74 #define PMU_SAMPLE_REAL (1<<2) /* Sample is from realmode */ 75 #define PMU_SAMPLE_PV (1<<3) /* Sample from a PV guest */ 76 77 /* 78 * Architecture-specific information describing state of the processor at 79 * the time of PMU interrupt. 80 * Fields of this structure marked as RW for guest should only be written by 81 * the guest when PMU_CACHED bit in pmu_flags is set (which is done by the 82 * hypervisor during PMU interrupt). Hypervisor will read updated data in 83 * XENPMU_flush hypercall and clear PMU_CACHED bit. 84 */ 85 struct xen_pmu_arch { 86 union { 87 /* 88 * Processor's registers at the time of interrupt. 89 * WO for hypervisor, RO for guests. 90 */ 91 xen_pmu_regs_t regs; 92 /* Padding for adding new registers to xen_pmu_regs in the future */ 93 #define XENPMU_REGS_PAD_SZ 64 94 uint8_t pad[XENPMU_REGS_PAD_SZ]; 95 } r; 96 97 /* WO for hypervisor, RO for guest */ 98 uint64_t pmu_flags; 99 100 /* 101 * APIC LVTPC register. 102 * RW for both hypervisor and guest. 103 * Only APIC_LVT_MASKED bit is loaded by the hypervisor into hardware 104 * during XENPMU_flush or XENPMU_lvtpc_set. 105 */ 106 union { 107 uint32_t lapic_lvtpc; 108 uint64_t pad; 109 } l; 110 111 /* 112 * Vendor-specific PMU registers. 113 * RW for both hypervisor and guest (see exceptions above). 114 * Guest's updates to this field are verified and then loaded by the 115 * hypervisor into hardware during XENPMU_flush 116 */ 117 union { 118 xen_pmu_amd_ctxt_t amd; 119 xen_pmu_intel_ctxt_t intel; 120 121 /* 122 * Padding for contexts (fixed parts only, does not include MSR banks 123 * that are specified by offsets) 124 */ 125 #define XENPMU_CTXT_PAD_SZ 128 126 uint8_t pad[XENPMU_CTXT_PAD_SZ]; 127 } c; 128 }; 129 typedef struct xen_pmu_arch xen_pmu_arch_t; 130 DEFINE_XEN_GUEST_HANDLE(xen_pmu_arch_t); 131 132 #endif /* __XEN_PUBLIC_ARCH_X86_PMU_H__ */ 133 /* 134 * Local variables: 135 * mode: C 136 * c-file-style: "BSD" 137 * c-basic-offset: 4 138 * tab-width: 4 139 * indent-tabs-mode: nil 140 * End: 141 */ 142 143