1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  * SPDX-License-Identifier: BSD-3-Clause
4  */
5 
6 #include <types.h>
7 #include <asm/guest/vmtrr.h>
8 #include <asm/msr.h>
9 #include <asm/pgtable.h>
10 #include <asm/guest/ept.h>
11 #include <asm/guest/vcpu.h>
12 #include <asm/guest/vm.h>
13 #include <logmsg.h>
14 
15 #define MTRR_FIXED_RANGE_ALL_WB (MTRR_MEM_TYPE_WB \
16 					| (MTRR_MEM_TYPE_WB << 8U) \
17 					| (MTRR_MEM_TYPE_WB << 16U) \
18 					| (MTRR_MEM_TYPE_WB << 24U) \
19 					| (MTRR_MEM_TYPE_WB << 32U) \
20 					| (MTRR_MEM_TYPE_WB << 40U) \
21 					| (MTRR_MEM_TYPE_WB << 48U) \
22 					| (MTRR_MEM_TYPE_WB << 56U))
23 
24 struct fixed_range_mtrr_maps {
25 	uint32_t msr;
26 	uint32_t start;
27 	uint32_t sub_range_size;
28 };
29 
30 #define MAX_FIXED_RANGE_ADDR	0x100000UL
31 #define FIXED_MTRR_INVALID_INDEX	~0U
32 static struct fixed_range_mtrr_maps fixed_mtrr_map[FIXED_RANGE_MTRR_NUM] = {
33 	{ MSR_IA32_MTRR_FIX64K_00000, 0x0U, 0x10000U },
34 	{ MSR_IA32_MTRR_FIX16K_80000, 0x80000U, 0x4000U },
35 	{ MSR_IA32_MTRR_FIX16K_A0000, 0xA0000U, 0x4000U },
36 	{ MSR_IA32_MTRR_FIX4K_C0000, 0xC0000U, 0x1000U },
37 	{ MSR_IA32_MTRR_FIX4K_C8000, 0xC8000U, 0x1000U },
38 	{ MSR_IA32_MTRR_FIX4K_D0000, 0xD0000U, 0x1000U },
39 	{ MSR_IA32_MTRR_FIX4K_D8000, 0xD8000U, 0x1000U },
40 	{ MSR_IA32_MTRR_FIX4K_E0000, 0xE0000U, 0x1000U },
41 	{ MSR_IA32_MTRR_FIX4K_E8000, 0xE8000U, 0x1000U },
42 	{ MSR_IA32_MTRR_FIX4K_F0000, 0xF0000U, 0x1000U },
43 	{ MSR_IA32_MTRR_FIX4K_F8000, 0xF8000U, 0x1000U },
44 };
45 
vmtrr2vcpu(const struct acrn_vmtrr * vmtrr)46 static inline struct acrn_vcpu *vmtrr2vcpu(const struct acrn_vmtrr *vmtrr)
47 {
48 	return container_of(container_of(vmtrr, struct acrn_vcpu_arch, vmtrr), struct acrn_vcpu, arch);
49 }
50 
get_index_of_fixed_mtrr(uint32_t msr)51 static uint32_t get_index_of_fixed_mtrr(uint32_t msr)
52 {
53 	uint32_t i;
54 
55 	for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) {
56 		if (fixed_mtrr_map[i].msr == msr) {
57 			break;
58 		}
59 	}
60 
61 	return (i < FIXED_RANGE_MTRR_NUM) ? i : FIXED_MTRR_INVALID_INDEX;
62 }
63 
64 static uint32_t
get_subrange_size_of_fixed_mtrr(uint32_t subrange_id)65 get_subrange_size_of_fixed_mtrr(uint32_t subrange_id)
66 {
67 	return fixed_mtrr_map[subrange_id].sub_range_size;
68 }
69 
70 static uint32_t
get_subrange_start_of_fixed_mtrr(uint32_t index,uint32_t subrange_id)71 get_subrange_start_of_fixed_mtrr(uint32_t index, uint32_t subrange_id)
72 {
73 	return (fixed_mtrr_map[index].start + subrange_id *
74 		get_subrange_size_of_fixed_mtrr(index));
75 }
76 
is_mtrr_enabled(const struct acrn_vmtrr * vmtrr)77 static inline bool is_mtrr_enabled(const struct acrn_vmtrr *vmtrr)
78 {
79 	return (vmtrr->def_type.bits.enable != 0U);
80 }
81 
is_fixed_range_mtrr_enabled(const struct acrn_vmtrr * vmtrr)82 static inline bool is_fixed_range_mtrr_enabled(const struct acrn_vmtrr *vmtrr)
83 {
84 	return ((vmtrr->cap.bits.fix != 0U) &&
85 		(vmtrr->def_type.bits.fixed_enable != 0U));
86 }
87 
get_default_memory_type(const struct acrn_vmtrr * vmtrr)88 static inline uint8_t get_default_memory_type(const struct acrn_vmtrr *vmtrr)
89 {
90 	return (uint8_t)(vmtrr->def_type.bits.type);
91 }
92 
93 /* initialize virtual MTRR for particular vcpu */
init_vmtrr(struct acrn_vcpu * vcpu)94 void init_vmtrr(struct acrn_vcpu *vcpu)
95 {
96 	struct acrn_vmtrr *vmtrr = &vcpu->arch.vmtrr;
97 	union mtrr_cap_reg cap = {0};
98 	uint32_t i;
99 
100 	/*
101 	 * We emulate fixed range MTRRs only
102 	 * And expecting the guests won't write variable MTRRs
103 	 * since MTRRCap.vcnt is 0
104 	 */
105 	vmtrr->cap.bits.vcnt = 0U;
106 	vmtrr->cap.bits.fix = 1U;
107 	vmtrr->def_type.bits.enable = 1U;
108 	vmtrr->def_type.bits.fixed_enable = 1U;
109 	vmtrr->def_type.bits.type = MTRR_MEM_TYPE_UC;
110 
111 	if (is_service_vm(vcpu->vm)) {
112 		cap.value = msr_read(MSR_IA32_MTRR_CAP);
113 	}
114 
115 	for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) {
116 		if (cap.bits.fix != 0U) {
117 			/*
118 			 * The system firmware runs in VMX non-root mode on Service VM.
119 			 * In some cases, the firmware needs particular mem type
120 			 * at certain mmeory locations (e.g. UC for some
121 			 * hardware registers), so we need to configure EPT
122 			 * according to the content of physical MTRRs.
123 			 */
124 			vmtrr->fixed_range[i].value = msr_read(fixed_mtrr_map[i].msr);
125 		} else {
126 			/*
127 			 * For non-Service VM EPT, all memory is setup with WB type in
128 			 * EPT, so we setup fixed range MTRRs accordingly.
129 			 */
130 			vmtrr->fixed_range[i].value = MTRR_FIXED_RANGE_ALL_WB;
131 		}
132 
133 		pr_dbg("vm%d vcpu%hu fixed-range MTRR[%u]: %16lx",
134 			vcpu->vm->vm_id, vcpu->vcpu_id, i,
135 			vmtrr->fixed_range[i].value);
136 	}
137 }
138 
update_ept(struct acrn_vm * vm,uint64_t start,uint64_t size,uint8_t type)139 static void update_ept(struct acrn_vm *vm, uint64_t start,
140 	uint64_t size, uint8_t type)
141 {
142 	uint64_t attr;
143 
144 	switch ((uint64_t)type) {
145 	case MTRR_MEM_TYPE_WC:
146 		attr = EPT_WC;
147 		break;
148 	case MTRR_MEM_TYPE_WT:
149 		attr = EPT_WT;
150 		break;
151 	case MTRR_MEM_TYPE_WP:
152 		attr = EPT_WP;
153 		break;
154 	case MTRR_MEM_TYPE_WB:
155 		attr = EPT_WB;
156 		break;
157 	case MTRR_MEM_TYPE_UC:
158 	default:
159 		attr = EPT_UNCACHED;
160 		break;
161 	}
162 
163 	ept_modify_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, start, size, attr, EPT_MT_MASK);
164 }
165 
update_ept_mem_type(const struct acrn_vmtrr * vmtrr)166 static void update_ept_mem_type(const struct acrn_vmtrr *vmtrr)
167 {
168 	uint8_t type;
169 	uint64_t start, size;
170 	uint32_t i, j;
171 	struct acrn_vm *vm = vmtrr2vcpu(vmtrr)->vm;
172 
173 	/*
174 	 * Intel SDM, Vol 3, 11.11.2.1 Section "IA32_MTRR_DEF_TYPE MSR":
175 	 * - when def_type.E is clear, UC memory type is applied
176 	 * - when def_type.FE is clear, MTRRdefType.type is applied
177 	 */
178 	if (!is_mtrr_enabled(vmtrr) || !is_fixed_range_mtrr_enabled(vmtrr)) {
179 		update_ept(vm, 0U, MAX_FIXED_RANGE_ADDR, get_default_memory_type(vmtrr));
180 	} else {
181 		/* Deal with fixed-range MTRRs only */
182 		for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) {
183 			type = vmtrr->fixed_range[i].type[0];
184 			start = get_subrange_start_of_fixed_mtrr(i, 0U);
185 			size = get_subrange_size_of_fixed_mtrr(i);
186 
187 			for (j = 1U; j < MTRR_SUB_RANGE_NUM; j++) {
188 				/* If it's same type, combine the subrange together */
189 				if (type == vmtrr->fixed_range[i].type[j]) {
190 					size += get_subrange_size_of_fixed_mtrr(i);
191 				} else {
192 					update_ept(vm, start, size, type);
193 					type = vmtrr->fixed_range[i].type[j];
194 					start = get_subrange_start_of_fixed_mtrr(i, j);
195 					size = get_subrange_size_of_fixed_mtrr(i);
196 				}
197 			}
198 
199 			update_ept(vm, start, size, type);
200 		}
201 	}
202 }
203 
204 /* virtual MTRR MSR write API */
write_vmtrr(struct acrn_vcpu * vcpu,uint32_t msr,uint64_t value)205 void write_vmtrr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t value)
206 {
207 	struct acrn_vmtrr *vmtrr = &vcpu->arch.vmtrr;
208 	uint32_t index;
209 
210 	if (msr == MSR_IA32_MTRR_DEF_TYPE) {
211 		if (vmtrr->def_type.value != value) {
212 			vmtrr->def_type.value = value;
213 
214 			/*
215 			 * Guests follow this guide line to update MTRRs:
216 			 * Intel SDM, Volume 3, 11.11.8 Section "MTRR
217 			 * Considerations in MP Systems"
218 			 * 1. Broadcast to all processors
219 			 * 2. Disable Interrupts
220 			 * 3. Wait for all procs to do so
221 			 * 4. Enter no-fill cache mode (CR0.CD=1, CR0.NW=0)
222 			 * 5. Flush caches
223 			 * 6. Clear CR4.PGE bit
224 			 * 7. Flush all TLBs
225 			 * 8. Disable all range registers by MTRRdefType.E
226 			 * 9. Update the MTRRs
227 			 * 10. Enable all range registers by MTRRdeftype.E
228 			 * 11. Flush all TLBs and caches again
229 			 * 12. Enter normal cache mode to re-enable caching
230 			 * 13. Set CR4.PGE
231 			 * 14. Wait for all processors to reach this point
232 			 * 15. Enable interrupts.
233 			 *
234 			 * we don't have to update EPT in step 9
235 			 * but in step 8 and 10 only
236 			 */
237 			update_ept_mem_type(vmtrr);
238 		}
239 	} else {
240 		index = get_index_of_fixed_mtrr(msr);
241 		if (index != FIXED_MTRR_INVALID_INDEX) {
242 			vmtrr->fixed_range[index].value = value;
243 		} else {
244 			pr_err("Write to unexpected MSR: 0x%x", msr);
245 		}
246 	}
247 }
248 
249 /* virtual MTRR MSR read API */
read_vmtrr(const struct acrn_vcpu * vcpu,uint32_t msr)250 uint64_t read_vmtrr(const struct acrn_vcpu *vcpu, uint32_t msr)
251 {
252 	const struct acrn_vmtrr *vmtrr = &vcpu->arch.vmtrr;
253 	uint64_t ret = 0UL;
254 	uint32_t index;
255 
256 	if (msr == MSR_IA32_MTRR_CAP) {
257 		ret = vmtrr->cap.value;
258 	} else if (msr == MSR_IA32_MTRR_DEF_TYPE) {
259 		ret = vmtrr->def_type.value;
260 	} else {
261 		index = get_index_of_fixed_mtrr(msr);
262 		if (index != FIXED_MTRR_INVALID_INDEX) {
263 			ret = vmtrr->fixed_range[index].value;
264 		} else {
265 			pr_err("read unexpected MSR: 0x%x", msr);
266 		}
267 	}
268 
269 	return ret;
270 }
271