1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <asm/msr.h>
9 #include <asm/cpufeatures.h>
10 #include <asm/cpu.h>
11 #include <asm/per_cpu.h>
12 #include <asm/cpu_caps.h>
13 #include <asm/security.h>
14 #include <logmsg.h>
15 
16 static bool skip_l1dfl_vmentry;
17 static bool cpu_md_clear;
18 static int32_t ibrs_type;
19 
detect_ibrs(void)20 static void detect_ibrs(void)
21 {
22 	/* For speculation defence.
23 	 * The default way is to set IBRS at vmexit and then do IBPB at vcpu
24 	 * context switch(ibrs_type == IBRS_RAW).
25 	 * Now provide an optimized way (ibrs_type == IBRS_OPT) which set
26 	 * STIBP and do IBPB at vmexit,since having STIBP always set has less
27 	 * impact than having IBRS always set. Also since IBPB is already done
28 	 * at vmexit, it is no necessary to do so at vcpu context switch then.
29 	 */
30 	ibrs_type = IBRS_NONE;
31 
32 	/* Currently for APL, if we enabled retpoline, then IBRS should not
33 	 * take effect
34 	 * TODO: add IA32_ARCH_CAPABILITIES[1] check, if this bit is set, IBRS
35 	 * should be set all the time instead of relying on retpoline
36 	 */
37 #ifndef CONFIG_RETPOLINE
38 	if (pcpu_has_cap(X86_FEATURE_IBRS_IBPB)) {
39 		ibrs_type = IBRS_RAW;
40 		if (pcpu_has_cap(X86_FEATURE_STIBP)) {
41 			ibrs_type = IBRS_OPT;
42 		}
43 	}
44 #endif
45 }
46 
47 #ifdef CONFIG_RETPOLINE
48 /* For platform that supports RRSBA (Restricted Return Stack Buffer Alternate),
49  * using retpoline may not be sufficient to guard against branch history injection (BHI)
50  * or Intra-mode branch target injection (IMBTI). RRSBA must be disabled to
51  * prevent CPUs from using alternate predictors for RETs.
52  *
53  * Quoting Intel CVE-2022-0001/CVE-2022-0002 documentation:
54  *
55  * Where software is using retpoline as a mitigation for BHI or intra-mode BTI,
56  * and the processor both enumerates RRSBA and enumerates RRSBA_DIS controls,
57  * it should disable this behavior.
58  * ...
59  * Software using retpoline as a mitigation for BHI or intra-mode BTI should use
60  * these new indirect predictor controls to disable alternate predictors for RETs.
61  *
62  * See: https://www.intel.com/content/www/us/en/developer/articles/technical/
63  * software-security-guidance/technical-documentation/branch-history-injection.html
64  */
disable_rrsba(void)65 void disable_rrsba(void) {
66 	uint64_t v, x86_arch_caps;
67 	bool rrsba_behavior = false;
68 
69 	if (pcpu_has_cap(X86_FEATURE_ARCH_CAP)) {
70 		x86_arch_caps = msr_read(MSR_IA32_ARCH_CAPABILITIES);
71 		rrsba_behavior = ((x86_arch_caps & IA32_ARCH_CAP_RESTRICTED_RSBA) != 0UL);
72 	}
73 
74 	if (rrsba_behavior && pcpu_has_cap(X86_FEATURE_RRSBA_CTRL)) {
75 		v = msr_read(MSR_IA32_SPEC_CTRL);
76 		/* Setting SPEC_RRSBA_DIS_S disables RRSBA behavior for CPL0/1/2 */
77 		v |= SPEC_RRSBA_DIS_S;
78 		msr_write(MSR_IA32_SPEC_CTRL, v);
79 	}
80 }
81 #endif
82 
get_ibrs_type(void)83 int32_t get_ibrs_type(void)
84 {
85 	return ibrs_type;
86 }
87 
check_cpu_security_cap(void)88 bool check_cpu_security_cap(void)
89 {
90 	bool ret = true;
91 	bool mds_no = false;
92 	bool ssb_no = false;
93 	uint64_t x86_arch_capabilities;
94 
95 	detect_ibrs();
96 
97 	if (pcpu_has_cap(X86_FEATURE_ARCH_CAP)) {
98 		x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES);
99 		skip_l1dfl_vmentry = ((x86_arch_capabilities
100 			& IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) != 0UL);
101 
102 		mds_no = ((x86_arch_capabilities & IA32_ARCH_CAP_MDS_NO) != 0UL);
103 
104 		/* SSB_NO: Processor is not susceptble to Speculative Store Bypass(SSB) */
105 		ssb_no = ((x86_arch_capabilities & IA32_ARCH_CAP_SSB_NO) != 0UL);
106 	}
107 
108 	if ((!pcpu_has_cap(X86_FEATURE_L1D_FLUSH)) && (!skip_l1dfl_vmentry)) {
109 		/* Processor is affected by L1TF CPU vulnerability,
110 		 * but no L1D_FLUSH command support.
111 		 */
112 		ret = false;
113 	}
114 
115 	if ((!pcpu_has_cap(X86_FEATURE_SSBD)) && (!ssb_no)) {
116 		/* Processor is susceptble to Speculative Store Bypass(SSB),
117 		 * but no support for Speculative Store Bypass Disable(SSBD).
118 		 */
119 		ret = false;
120 	}
121 
122 	if ((!pcpu_has_cap(X86_FEATURE_IBRS_IBPB)) && (!pcpu_has_cap(X86_FEATURE_STIBP))) {
123 		ret = false;
124 	}
125 
126 	if (!mds_no) { /* Processor is affected by MDS vulnerability.*/
127 		if (pcpu_has_cap(X86_FEATURE_MDS_CLEAR)) {
128 			cpu_md_clear = true;
129 #ifdef CONFIG_L1D_FLUSH_VMENTRY_ENABLED
130 			if (!skip_l1dfl_vmentry) {
131 				/* L1D cache flush will also overwrite CPU internal buffers,
132 				 * additional MDS buffers clear operation is not required.
133 				 */
134 				cpu_md_clear = false;
135 			}
136 #endif
137 		} else {
138 			/* Processor is affected by MDS but no mitigation software
139 			 * interface is enumerated, CPU microcode need to be udpated.
140 			 */
141 			ret = false;
142 		}
143 	}
144 
145 	return ret;
146 }
147 
cpu_l1d_flush(void)148 void cpu_l1d_flush(void)
149 {
150 	/*
151 	 * 'skip_l1dfl_vmentry' will be true on platform that
152 	 * is not affected by L1TF.
153 	 *
154 	 */
155 	if (!skip_l1dfl_vmentry) {
156 		if (pcpu_has_cap(X86_FEATURE_L1D_FLUSH)) {
157 			msr_write(MSR_IA32_FLUSH_CMD, IA32_L1D_FLUSH);
158 		}
159 	}
160 
161 }
162 
163 /*
164  * VERW instruction (with microcode update) will overwrite
165  * CPU internal buffers.
166  */
verw_buffer_overwriting(void)167 static inline void verw_buffer_overwriting(void)
168 {
169 	uint16_t ds = HOST_GDT_RING0_DATA_SEL;
170 
171 	asm volatile ("verw %[ds]" : : [ds] "m" (ds) : "cc");
172 }
173 
174 /*
175  * On processors that enumerate MD_CLEAR:CPUID.(EAX=7H,ECX=0):EDX[MD_CLEAR=10],
176  * the VERW instruction or L1D_FLUSH command should be used to cause the
177  * processor to overwrite buffer values that are affected by MDS
178  * (Microarchitectural Data Sampling) vulnerabilities.
179  *
180  * The VERW instruction and L1D_FLUSH command will overwrite below buffer values:
181  *  - Store buffer value for the current logical processor on processors affected
182  *    by MSBDS (Microarchitectural Store Buffer Data Sampling).
183  *  - Fill buffer for all logical processors on the physical core for processors
184  *    affected by MFBDS (Microarchitectural Fill Buffer Data Sampling).
185  *  - Load port for all logical processors on the physical core for processors
186  *    affected by MLPDS(Microarchitectural Load Port Data Sampling).
187  *
188  * If processor is affected by L1TF vulnerability and the mitigation is enabled,
189  * L1D_FLUSH will overwrite internal buffers on processors affected by MDS, no
190  * additional buffer overwriting is required before VM entry. For other cases,
191  * VERW instruction is used to overwrite buffer values for processors affected
192  * by MDS.
193  */
cpu_internal_buffers_clear(void)194 void cpu_internal_buffers_clear(void)
195 {
196 	if (cpu_md_clear) {
197 		verw_buffer_overwriting();
198 	}
199 }
200 
get_random_value(void)201 uint64_t get_random_value(void)
202 {
203 	uint64_t random;
204 
205 	asm volatile ("1: rdrand %%rax\n"
206 			"jnc 1b\n"
207 			"mov %%rax, %0\n"
208 			: "=r"(random)
209 			:
210 			:"%rax");
211 	return random;
212 }
213 
214 #ifdef STACK_PROTECTOR
set_fs_base(void)215 void set_fs_base(void)
216 {
217 	int retry;
218 	struct stack_canary *psc = &get_cpu_var(stk_canary);
219 
220 	/*
221 	 *  1) Leave initialized canary untouched when this function
222 	 *     is called again such as on resuming from S3.
223 	 *  2) Do some retries in case 'get_random_value()' returns 0.
224 	 */
225 	for (retry = 0; (retry < 5) && (psc->canary == 0UL); retry++) {
226 		psc->canary = get_random_value();
227 	}
228 
229 	if (psc->canary == 0UL) {
230 		panic("Failed to setup stack protector!");
231 	}
232 
233 	msr_write(MSR_IA32_FS_BASE, (uint64_t)psc);
234 }
235 #endif
236 
237 #ifndef CONFIG_MCE_ON_PSC_WORKAROUND_ENABLED
is_ept_force_4k_ipage(void)238 bool is_ept_force_4k_ipage(void)
239 {
240 	return false;
241 }
242 #else
is_ept_force_4k_ipage(void)243 bool is_ept_force_4k_ipage(void)
244 {
245 	bool force_4k_ipage = true;
246 	const struct cpuinfo_x86 *info = get_pcpu_info();
247 	uint64_t x86_arch_capabilities;
248 
249 	if (info->displayfamily == 0x6U) {
250 		switch (info->displaymodel) {
251 		case 0x26U:
252 		case 0x27U:
253 		case 0x35U:
254 		case 0x36U:
255 		case 0x37U:
256 		case 0x86U:
257 		case 0x1CU:
258 		case 0x4AU:
259 		case 0x4CU:
260 		case 0x4DU:
261 		case 0x5AU:
262 		case 0x5CU:
263 		case 0x5DU:
264 		case 0x5FU:
265 		case 0x6EU:
266 		case 0x7AU:
267 			/* Atom processor is not affected by the issue
268 			 * "Machine Check Error on Page Size Change"
269 			 */
270 			force_4k_ipage = false;
271 			break;
272 		default:
273 			force_4k_ipage = true;
274 			break;
275 		}
276 	}
277 
278 	if (pcpu_has_cap(X86_FEATURE_ARCH_CAP)) {
279 		x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES);
280 		if ((x86_arch_capabilities & IA32_ARCH_CAP_IF_PSCHANGE_MC_NO) != 0UL) {
281 			force_4k_ipage = false;
282 		}
283 	}
284 
285 	return force_4k_ipage;
286 }
287 #endif
288