1 /*
2 * vpmu.h: PMU virtualization for HVM domain.
3 *
4 * Copyright (c) 2007, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Haitao Shan <haitao.shan@intel.com>
19 */
20
21 #ifndef __ASM_X86_HVM_VPMU_H_
22 #define __ASM_X86_HVM_VPMU_H_
23
24 #include <public/pmu.h>
25
26 #define vcpu_vpmu(vcpu) (&(vcpu)->arch.vpmu)
27 #define vpmu_vcpu(vpmu) container_of((vpmu), struct vcpu, arch.vpmu)
28 #define vpmu_available(vcpu) vpmu_is_set(vcpu_vpmu(vcpu), VPMU_AVAILABLE)
29
30 #define MSR_TYPE_COUNTER 0
31 #define MSR_TYPE_CTRL 1
32 #define MSR_TYPE_GLOBAL 2
33 #define MSR_TYPE_ARCH_COUNTER 3
34 #define MSR_TYPE_ARCH_CTRL 4
35
36 /* Start of PMU register bank */
37 #define vpmu_reg_pointer(ctxt, offset) ((void *)((uintptr_t)ctxt + \
38 (uintptr_t)ctxt->offset))
39
40 /* Arch specific operations shared by all vpmus */
41 struct arch_vpmu_ops {
42 int (*do_wrmsr)(unsigned int msr, uint64_t msr_content,
43 uint64_t supported);
44 int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content);
45 int (*do_interrupt)(struct cpu_user_regs *regs);
46 void (*arch_vpmu_destroy)(struct vcpu *v);
47 int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest);
48 int (*arch_vpmu_load)(struct vcpu *v, bool_t from_guest);
49 void (*arch_vpmu_dump)(const struct vcpu *);
50 };
51
52 int core2_vpmu_init(void);
53 int vmx_vpmu_initialise(struct vcpu *);
54 int amd_vpmu_init(void);
55 int svm_vpmu_initialise(struct vcpu *);
56
57 struct vpmu_struct {
58 u32 flags;
59 u32 last_pcpu;
60 u32 hw_lapic_lvtpc;
61 void *context; /* May be shared with PV guest */
62 void *priv_context; /* hypervisor-only */
63 const struct arch_vpmu_ops *arch_vpmu_ops;
64 struct xen_pmu_data *xenpmu_data;
65 spinlock_t vpmu_lock;
66 };
67
68 /* VPMU states */
69 #define VPMU_CONTEXT_ALLOCATED 0x1
70 #define VPMU_CONTEXT_LOADED 0x2
71 #define VPMU_RUNNING 0x4
72 #define VPMU_CONTEXT_SAVE 0x8 /* Force context save */
73 #define VPMU_FROZEN 0x10 /* Stop counters while VCPU is not running */
74 #define VPMU_PASSIVE_DOMAIN_ALLOCATED 0x20
75 /* PV(H) guests: VPMU registers are accessed by guest from shared page */
76 #define VPMU_CACHED 0x40
77 #define VPMU_AVAILABLE 0x80
78
79 /* Intel-specific VPMU features */
80 #define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */
81 #define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */
82
vpmu_set(struct vpmu_struct * vpmu,const u32 mask)83 static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask)
84 {
85 vpmu->flags |= mask;
86 }
vpmu_reset(struct vpmu_struct * vpmu,const u32 mask)87 static inline void vpmu_reset(struct vpmu_struct *vpmu, const u32 mask)
88 {
89 vpmu->flags &= ~mask;
90 }
vpmu_clear(struct vpmu_struct * vpmu)91 static inline void vpmu_clear(struct vpmu_struct *vpmu)
92 {
93 /* VPMU_AVAILABLE should be altered by get/put_vpmu(). */
94 vpmu->flags &= VPMU_AVAILABLE;
95 }
vpmu_is_set(const struct vpmu_struct * vpmu,const u32 mask)96 static inline bool_t vpmu_is_set(const struct vpmu_struct *vpmu, const u32 mask)
97 {
98 return !!(vpmu->flags & mask);
99 }
vpmu_are_all_set(const struct vpmu_struct * vpmu,const u32 mask)100 static inline bool_t vpmu_are_all_set(const struct vpmu_struct *vpmu,
101 const u32 mask)
102 {
103 return !!((vpmu->flags & mask) == mask);
104 }
105
106 void vpmu_lvtpc_update(uint32_t val);
107 int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
108 uint64_t supported, bool_t is_write);
109 void vpmu_do_interrupt(struct cpu_user_regs *regs);
110 void vpmu_initialise(struct vcpu *v);
111 void vpmu_destroy(struct vcpu *v);
112 void vpmu_save(struct vcpu *v);
113 int vpmu_load(struct vcpu *v, bool_t from_guest);
114 void vpmu_dump(struct vcpu *v);
115
vpmu_do_wrmsr(unsigned int msr,uint64_t msr_content,uint64_t supported)116 static inline int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
117 uint64_t supported)
118 {
119 return vpmu_do_msr(msr, &msr_content, supported, 1);
120 }
vpmu_do_rdmsr(unsigned int msr,uint64_t * msr_content)121 static inline int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
122 {
123 return vpmu_do_msr(msr, msr_content, 0, 0);
124 }
125
126 extern unsigned int vpmu_mode;
127 extern unsigned int vpmu_features;
128
129 /* Context switch */
vpmu_switch_from(struct vcpu * prev)130 static inline void vpmu_switch_from(struct vcpu *prev)
131 {
132 if ( vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV) )
133 vpmu_save(prev);
134 }
135
vpmu_switch_to(struct vcpu * next)136 static inline void vpmu_switch_to(struct vcpu *next)
137 {
138 if ( vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV) )
139 vpmu_load(next, 0);
140 }
141
142 #endif /* __ASM_X86_HVM_VPMU_H_*/
143
144