1 /*
2  * Copyright (C) 2021-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <errno.h>
9 #include <logmsg.h>
10 #include <asm/cpufeatures.h>
11 #include <asm/cpuid.h>
12 #include <asm/rdt.h>
13 #include <asm/lib/bits.h>
14 #include <asm/board.h>
15 #include <asm/vm_config.h>
16 #include <asm/msr.h>
17 #include <asm/guest/vcpu.h>
18 #include <asm/guest/vm.h>
19 #include <asm/guest/vcat.h>
20 #include <asm/per_cpu.h>
21 
22 /**
23  * @pre vm != NULL
24  */
is_l2_vcat_configured(const struct acrn_vm * vm)25 bool is_l2_vcat_configured(const struct acrn_vm *vm)
26 {
27 	uint16_t pcpu = ffs64(vm->hw.cpu_affinity);
28 	const struct rdt_ins *ins = get_rdt_res_ins(RDT_RESOURCE_L2, pcpu);
29 
30 	return is_vcat_configured(vm) && ins != NULL && (ins->num_closids > 0U);
31 }
32 
33 /**
34  * @pre vm != NULL
35  */
is_l3_vcat_configured(const struct acrn_vm * vm)36 bool is_l3_vcat_configured(const struct acrn_vm *vm)
37 {
38 	uint16_t pcpu = ffs64(vm->hw.cpu_affinity);
39 	const struct rdt_ins *ins = get_rdt_res_ins(RDT_RESOURCE_L3, pcpu);
40 
41 	return is_vcat_configured(vm) && ins != NULL && (ins->num_closids > 0U);
42 }
43 
44 /**
45  * @brief Return number of vCLOSIDs of vm
46  *
47  * @pre vm != NULL && vm->vm_id < CONFIG_MAX_VM_NUM
48  */
vcat_get_num_vclosids(const struct acrn_vm * vm)49 uint16_t vcat_get_num_vclosids(const struct acrn_vm *vm)
50 {
51 	uint16_t num_vclosids = 0U;
52 
53 	if (is_vcat_configured(vm)) {
54 		/*
55 		 * For performance and simplicity, here number of vCLOSIDs (num_vclosids) is set
56 		 * equal to the number of pCLOSIDs assigned to this VM (get_vm_config(vm->vm_id)->num_pclosids).
57 		 * But technically, we do not have to make such an assumption. For example,
58 		 * Hypervisor could implement CLOSID context switch, then number of vCLOSIDs
59 		 * can be greater than the number of pCLOSIDs assigned. etc.
60 		 */
61 		num_vclosids = get_vm_config(vm->vm_id)->num_pclosids;
62 	}
63 
64 	return num_vclosids;
65 }
66 
67 /**
68  * @brief Map vCLOSID to pCLOSID
69  *
70  * @pre vm != NULL && vm->vm_id < CONFIG_MAX_VM_NUM
71  * @pre (get_vm_config(vm->vm_id)->pclosids != NULL) && (vclosid < get_vm_config(vm->vm_id)->num_pclosids)
72  */
vclosid_to_pclosid(const struct acrn_vm * vm,uint16_t vclosid)73 static uint16_t vclosid_to_pclosid(const struct acrn_vm *vm, uint16_t vclosid)
74 {
75 	ASSERT(vclosid < vcat_get_num_vclosids(vm), "vclosid is out of range!");
76 
77 	/*
78 	 * pclosids points to an array of assigned pCLOSIDs
79 	 * Use vCLOSID as the index into the pclosids array, returning the corresponding pCLOSID
80 	 *
81 	 * Note that write_vcbm() calls vclosid_to_pclosid() indirectly, in write_vcbm(),
82 	 * the is_l2_vcbm_msr()/is_l3_vcbm_msr() calls ensure that vclosid is always less than
83 	 * get_vm_config(vm->vm_id)->num_pclosids, so vclosid is always an array index within bound here
84 	 */
85 	return get_vm_config(vm->vm_id)->pclosids[vclosid];
86 }
87 
88 /**
89  * @brief Return max_pcbm of vm
90  * @pre vm != NULL && vm->vm_id < CONFIG_MAX_VM_NUM
91  * @pre res == RDT_RESOURCE_L2 || res == RDT_RESOURCE_L3
92  */
get_max_pcbm(const struct acrn_vm * vm,int res)93 static uint64_t get_max_pcbm(const struct acrn_vm *vm, int res)
94 {
95 	/*
96 	 * max_pcbm/CLOS_MASK is defined in scenario file and is a contiguous bitmask starting
97 	 * at bit position low (the lowest assigned physical cache way) and ending at position
98 	 * high (the highest assigned physical cache way, inclusive). As CBM only allows
99 	 * contiguous '1' combinations, so max_pcbm essentially is a bitmask that selects/covers
100 	 * all the physical cache ways assigned to the VM.
101 	 *
102 	 * For illustrative purpose, here we assume that we have the two functions
103 	 * GENMASK() and BIT() defined as follows:
104 	 * GENMASK(high, low): create a contiguous bitmask starting at bit position low and
105 	 * ending at position high, inclusive.
106 	 * BIT(n): create a bitmask with bit n set.
107 	 *
108 	 * max_pcbm, min_pcbm, max_vcbm, min_vcbm and the relationship between them
109 	 * can be expressed as:
110 	 * max_pcbm = GENMASK(high, low)
111 	 * min_pcbm = BIT(low)
112 	 *
113 	 * max_vcbm = GENMASK(high - low, 0)
114 	 * min_vcbm = BIT(0)
115 	 * vcbm_len = bitmap_weight(max_pcbm) = high - low + 1
116 	 *
117 	 * pcbm to vcbm (mask off the unwanted bits to prevent erroneous mask values):
118 	 * vcbm = (pcbm & max_pcbm) >> low
119 	 *
120 	 * vcbm to pcbm:
121 	 * pcbm = (vcbm & max_vcbm) << low
122 	 *
123 	 * max_pcbm will be mapped to max_vcbm
124 	 * min_pcbm will be mapped to min_vcbm
125 	 */
126 	uint64_t max_pcbm = 0UL;
127 
128 	if (is_l2_vcat_configured(vm) && (res == RDT_RESOURCE_L2)) {
129 		max_pcbm = get_vm_config(vm->vm_id)->max_l2_pcbm;
130 	} else if (is_l3_vcat_configured(vm) && (res == RDT_RESOURCE_L3)) {
131 		max_pcbm = get_vm_config(vm->vm_id)->max_l3_pcbm;
132 	}
133 
134 	return max_pcbm;
135 }
136 
137 /**
138  * @brief Return vcbm_len of vm
139  * @pre vm != NULL
140  */
vcat_get_vcbm_len(const struct acrn_vm * vm,int res)141 uint16_t vcat_get_vcbm_len(const struct acrn_vm *vm, int res)
142 {
143 	return bitmap_weight(get_max_pcbm(vm, res));
144 }
145 
146 /**
147  * @brief Return max_vcbm of vm
148  * @pre vm != NULL
149  */
vcat_get_max_vcbm(const struct acrn_vm * vm,int res)150 static uint64_t vcat_get_max_vcbm(const struct acrn_vm *vm, int res)
151 {
152 	uint64_t max_pcbm = get_max_pcbm(vm, res);
153 	/* Find the position low (the first bit set) in max_pcbm */
154 	uint16_t low = ffs64(max_pcbm);
155 
156 	/* Right shift max_pcbm by low to get max_vcbm */
157 	return max_pcbm >> low;
158 }
159 
160 /**
161  * @brief Map pCBM to vCBM
162  *
163  * @pre vm != NULL
164  */
vcat_pcbm_to_vcbm(const struct acrn_vm * vm,uint64_t pcbm,int res)165 uint64_t vcat_pcbm_to_vcbm(const struct acrn_vm *vm, uint64_t pcbm, int res)
166 {
167 	uint64_t max_pcbm = get_max_pcbm(vm, res);
168 
169 	/* Find the position low (the first bit set) in max_pcbm */
170 	uint16_t low = ffs64(max_pcbm);
171 
172 	/* pcbm set bits should only be in the range of [low, high] */
173 	return (pcbm & max_pcbm) >> low;
174 }
175 
176 /**
177  * @pre vm != NULL
178  */
is_l2_vcbm_msr(const struct acrn_vm * vm,uint32_t vmsr)179 static bool is_l2_vcbm_msr(const struct acrn_vm *vm, uint32_t vmsr)
180 {
181 	/* num_vcbm_msrs = num_vclosids */
182 	uint16_t num_vcbm_msrs = vcat_get_num_vclosids(vm);
183 
184 	return (is_l2_vcat_configured(vm)
185 		&& (vmsr >= MSR_IA32_L2_MASK_BASE) && (vmsr < (MSR_IA32_L2_MASK_BASE + num_vcbm_msrs)));
186 }
187 
188 /**
189  * @pre vm != NULL
190  */
is_l3_vcbm_msr(const struct acrn_vm * vm,uint32_t vmsr)191 static bool is_l3_vcbm_msr(const struct acrn_vm *vm, uint32_t vmsr)
192 {
193 	/* num_vcbm_msrs = num_vclosids */
194 	uint16_t num_vcbm_msrs = vcat_get_num_vclosids(vm);
195 
196 	return (is_l3_vcat_configured(vm)
197 		&& (vmsr >= MSR_IA32_L3_MASK_BASE) && (vmsr < (MSR_IA32_L3_MASK_BASE + num_vcbm_msrs)));
198 }
199 
200 /**
201  * @brief vCBM MSR read handler
202  *
203  * @pre vcpu != NULL && vcpu->vm != NULL && rval != NULL
204  */
read_vcbm(const struct acrn_vcpu * vcpu,uint32_t vmsr,uint64_t * rval)205 int32_t read_vcbm(const struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t *rval)
206 {
207 	int ret = -EACCES;
208 	struct acrn_vm *vm = vcpu->vm;
209 
210 	if (is_vcat_configured(vm) && (is_l2_vcbm_msr(vm, vmsr) || is_l3_vcbm_msr(vm, vmsr))) {
211 		*rval = vcpu_get_guest_msr(vcpu, vmsr);
212 		ret = 0;
213 	}
214 
215 	return ret;
216 }
217 
218 /**
219  * @brief Map vCBM to pCBM
220  *
221  * @pre vm != NULL && ((vcbm & vcat_get_max_vcbm(vm, res)) == vcbm)
222  */
vcbm_to_pcbm(const struct acrn_vm * vm,uint64_t vcbm,int res)223 static uint64_t vcbm_to_pcbm(const struct acrn_vm *vm, uint64_t vcbm, int res)
224 {
225 	uint64_t max_pcbm = get_max_pcbm(vm, res);
226 
227 	/* Find the position low (the first bit set) in max_pcbm */
228 	uint16_t low = ffs64(max_pcbm);
229 
230 	return vcbm << low;
231 }
232 
233 void get_cache_shift(uint32_t *l2_shift, uint32_t *l3_shift);
234 /**
235  * @pre vcpu != NULL && l2_id != NULL && l3_id != NULL
236  */
get_cache_id(struct acrn_vcpu * vcpu,uint32_t * l2_id,uint32_t * l3_id)237 static void get_cache_id(struct acrn_vcpu *vcpu, uint32_t *l2_id, uint32_t *l3_id)
238 {
239 	uint32_t l2_shift, l3_shift;
240 	uint32_t apicid = vlapic_get_apicid(vcpu_vlapic(vcpu));
241 
242 	get_cache_shift(&l2_shift, &l3_shift);
243 
244 	/*
245 	 * Relationship between APIC ID and cache ID:
246 	 * Intel SDM Vol 2, CPUID 04H:
247 	 * EAX: bits 25 - 14: Maximum number of addressable IDs for logical processors sharing this cache.
248 	 * The nearest power-of-2 integer that is not smaller than (1 + EAX[25:14]) is the number of unique
249 	 * initial APIC IDs reserved for addressing different logical processors sharing this cache
250 	 */
251 	*l2_id = apicid >> l2_shift;
252 	*l3_id = apicid >> l3_shift;
253 }
254 
255 /**
256  * @brief Propagate vCBM to other vCPUs that share cache with vcpu
257  * @pre vcpu != NULL && vcpu->vm != NULL
258  */
propagate_vcbm(struct acrn_vcpu * vcpu,uint32_t vmsr,uint64_t val)259 static void propagate_vcbm(struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t val)
260 {
261 	uint16_t i;
262 	struct acrn_vcpu *tmp_vcpu;
263 	uint32_t l2_id, l3_id;
264 	struct acrn_vm *vm = vcpu->vm;
265 
266 	get_cache_id(vcpu, &l2_id, &l3_id);
267 
268 	/*
269 	 * Determine which logical processors share an MSR (for instance local
270 	 * to a core, or shared across multiple cores) by checking if they have the same
271 	 * L2/L3 cache id
272 	 */
273 	foreach_vcpu(i, vm, tmp_vcpu) {
274 		uint32_t tmp_l2_id, tmp_l3_id;
275 
276 		get_cache_id(tmp_vcpu, &tmp_l2_id, &tmp_l3_id);
277 
278 		if ((is_l2_vcbm_msr(vm, vmsr) && (l2_id == tmp_l2_id))
279 			|| (is_l3_vcbm_msr(vm, vmsr) && (l3_id == tmp_l3_id))) {
280 			vcpu_set_guest_msr(tmp_vcpu, vmsr, val);
281 		}
282 	}
283 }
284 
285 /*
286  * Check if bitmask is contiguous:
287  * All (and only) contiguous '1' combinations are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.)
288  */
is_contiguous(uint64_t bitmask)289 static bool is_contiguous(uint64_t bitmask)
290 {
291 	bool ret = false;
292 
293 	if (bitmask != 0UL) {
294 		uint16_t low = ffs64(bitmask);
295 		uint16_t high = fls64(bitmask);
296 
297 		if (((2UL << high) - (1UL << low)) == bitmask) {
298 			ret = true;
299 		}
300 	}
301 
302 	return ret;
303 }
304 
305 /**
306  * @brief vCBM MSR write handler
307  *
308  * @pre vcpu != NULL && vcpu->vm != NULL
309  */
write_vcbm(struct acrn_vcpu * vcpu,uint32_t vmsr,uint64_t val)310 int32_t write_vcbm(struct acrn_vcpu *vcpu, uint32_t vmsr, uint64_t val)
311 {
312 	int ret = -EACCES;
313 	struct acrn_vm *vm = vcpu->vm;
314 	int res = -1;
315 	uint32_t msr_base;
316 
317 	if (is_vcat_configured(vm)) {
318 		if (is_l2_vcbm_msr(vm, vmsr)) {
319 			res = RDT_RESOURCE_L2;
320 			msr_base = MSR_IA32_L2_MASK_BASE;
321 		} else if (is_l3_vcbm_msr(vm, vmsr)) {
322 			res = RDT_RESOURCE_L3;
323 			msr_base = MSR_IA32_L3_MASK_BASE;
324 		}
325 	}
326 
327 	if (res >= 0) {
328 		/*
329 		 * vcbm set bits should only be in the range of [0, vcbm_len) (vcat_get_max_vcbm),
330 		 * so mask with vcat_get_max_vcbm to prevent erroneous vCBM value
331 		 */
332 		uint64_t masked_vcbm = val & vcat_get_max_vcbm(vm, res);
333 
334 		/*
335 		 * Validity check on val:
336 		 * Bits 63:32 of val are reserved and must be written with zeros
337 		 * (satisfied by the masked_vcbm == val condition)
338 		 * vCBM must be contiguous
339 		 */
340 		if ((masked_vcbm == val) && is_contiguous(val)) {
341 			uint32_t pmsr;
342 			uint16_t vclosid;
343 			uint64_t pcbm, pvalue;
344 
345 			/*
346 			 * Write vCBM first:
347 			 * The L2 mask MSRs are scoped at the same level as the L2 cache (similarly,
348 			 * the L3 mask MSRs are scoped at the same level as the L3 cache).
349 			 *
350 			 * For example, the MSR_IA32_L3_MASK_n MSRs are scoped at socket level, which means if
351 			 * we program MSR_IA32_L3_MASK_n on one cpu and the same MSR_IA32_L3_MASK_n on all other cpus
352 			 * of the same socket will also get the change!
353 			 * Set vcbm to all the vCPUs that share cache with vcpu to mimic this hardware behavior.
354 			 */
355 			propagate_vcbm(vcpu, vmsr, val);
356 
357 			/* Write pCBM: */
358 			vclosid = (uint16_t)(vmsr - msr_base);
359 			pmsr = msr_base + (uint32_t)vclosid_to_pclosid(vm, vclosid);
360 			pcbm = vcbm_to_pcbm(vm, val, res);
361 			/* Preserve reserved bits, and only set the pCBM bits */
362 			pvalue = (msr_read(pmsr) & ~get_max_pcbm(vm, res)) | pcbm;
363 			msr_write(pmsr, pvalue);
364 
365 			ret = 0;
366 		}
367 	}
368 
369 	return ret;
370 }
371 
372 /**
373  * @brief vCLOSID MSR read handler
374  *
375  * @pre vcpu != NULL && vcpu->vm != NULL
376  */
read_vclosid(const struct acrn_vcpu * vcpu,uint64_t * rval)377 int32_t read_vclosid(const struct acrn_vcpu *vcpu, uint64_t *rval)
378 {
379 	int ret = -EACCES;
380 
381 	if (is_vcat_configured(vcpu->vm)) {
382 		*rval = vcpu_get_guest_msr(vcpu, MSR_IA32_PQR_ASSOC);
383 		ret = 0;
384 	}
385 
386 	return ret;
387 }
388 
389 /**
390  * @brief vCLOSID MSR write handler
391  *
392  * @pre vcpu != NULL && vcpu->vm != NULL
393  */
write_vclosid(struct acrn_vcpu * vcpu,uint64_t val)394 int32_t write_vclosid(struct acrn_vcpu *vcpu, uint64_t val)
395 {
396 	int32_t ret = -EACCES;
397 
398 	if (is_vcat_configured(vcpu->vm)) {
399 		uint32_t vclosid = (uint32_t)((val >> 32U) & 0xFFFFFFFFUL);
400 
401 		/*
402 		 * Validity check on val:
403 		 * Bits 9:0: RMID (always 0 for now)
404 		 * Bits 31:10: reserved and must be written with zeros
405 		 * Bits 63:32: vclosid (must be within permitted range)
406 		 */
407 		if (((val & 0xFFFFFFFFUL) == 0UL) && (vclosid < (uint32_t)vcat_get_num_vclosids(vcpu->vm))) {
408 			uint16_t pclosid;
409 
410 			/* Write the new vCLOSID value */
411 			vcpu_set_guest_msr(vcpu, MSR_IA32_PQR_ASSOC, val);
412 
413 			pclosid = vclosid_to_pclosid(vcpu->vm, (uint16_t)vclosid);
414 			/*
415 			 * Write the new pCLOSID value to the guest msr area
416 			 *
417 			 * The prepare_auto_msr_area() function has already initialized the vcpu->arch.msr_area.
418 			 * Here we only need to update the vcpu->arch.msr_area.guest[].value field for IA32_PQR_ASSOC,
419 			 * all other vcpu->arch.msr_area fields remains unchanged at runtime.
420 			 */
421 			vcpu->arch.msr_area.guest[vcpu->arch.msr_area.index_of_pqr_assoc].value = clos2pqr_msr(pclosid);
422 
423 			ret = 0;
424 		}
425 	}
426 
427 	return ret;
428 }
429 
430 /**
431  * @brief Initialize vCBM MSRs
432  *
433  * @pre vcpu != NULL && vcpu->vm != NULL
434  */
init_vcbms(struct acrn_vcpu * vcpu,int res,uint32_t msr_base)435 static void init_vcbms(struct acrn_vcpu *vcpu, int res, uint32_t msr_base)
436 {
437 	uint64_t max_vcbm = vcat_get_max_vcbm(vcpu->vm, res);
438 
439 	if (max_vcbm != 0UL) {
440 		uint32_t vmsr;
441 		/* num_vcbm_msrs = num_vclosids */
442 		uint16_t num_vcbm_msrs = vcat_get_num_vclosids(vcpu->vm);
443 
444 		/*
445 		 * For each vCBM MSR, its initial vCBM is set to max_vcbm,
446 		 * a bitmask with vcbm_len bits (from 0 to vcbm_len - 1, inclusive)
447 		 * set to 1 and all other bits set to 0.
448 		 *
449 		 * As CBM only allows contiguous '1' combinations, so max_vcbm essentially
450 		 * is a bitmask that selects all the virtual cache ways assigned to the VM.
451 		 * It covers all the virtual cache ways the guest VM may access, i.e. the
452 		 * superset bitmask.
453 		 */
454 		for (vmsr = msr_base; vmsr < (msr_base + num_vcbm_msrs); vmsr++) {
455 			/* Write vCBM MSR */
456 			(void)write_vcbm(vcpu, vmsr, max_vcbm);
457 		}
458 	}
459 }
460 
461 /**
462  * @brief Initialize vCAT MSRs
463  *
464  * @pre vcpu != NULL && vcpu->vm != NULL
465  */
init_vcat_msrs(struct acrn_vcpu * vcpu)466 void init_vcat_msrs(struct acrn_vcpu *vcpu)
467 {
468 	if (is_vcat_configured(vcpu->vm)) {
469 		init_vcbms(vcpu, RDT_RESOURCE_L2, MSR_IA32_L2_MASK_BASE);
470 
471 		init_vcbms(vcpu, RDT_RESOURCE_L3, MSR_IA32_L3_MASK_BASE);
472 
473 		(void)write_vclosid(vcpu, 0U);
474 	}
475 }
476