1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD Memory Encryption Support
4 *
5 * Copyright (C) 2019 SUSE
6 *
7 * Author: Joerg Roedel <jroedel@suse.de>
8 */
9
10 #define pr_fmt(fmt) "SEV: " fmt
11
12 #include <linux/percpu-defs.h>
13 #include <linux/cc_platform.h>
14 #include <linux/printk.h>
15 #include <linux/mm_types.h>
16 #include <linux/set_memory.h>
17 #include <linux/memblock.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/cpumask.h>
21 #include <linux/efi.h>
22 #include <linux/io.h>
23 #include <linux/psp-sev.h>
24 #include <uapi/linux/sev-guest.h>
25
26 #include <asm/init.h>
27 #include <asm/cpu_entry_area.h>
28 #include <asm/stacktrace.h>
29 #include <asm/sev.h>
30 #include <asm/sev-internal.h>
31 #include <asm/insn-eval.h>
32 #include <asm/fpu/xcr.h>
33 #include <asm/processor.h>
34 #include <asm/realmode.h>
35 #include <asm/setup.h>
36 #include <asm/traps.h>
37 #include <asm/svm.h>
38 #include <asm/smp.h>
39 #include <asm/cpu.h>
40 #include <asm/apic.h>
41 #include <asm/cpuid/api.h>
42 #include <asm/cmdline.h>
43
44 /* For early boot hypervisor communication in SEV-ES enabled guests */
45 struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
46
47 /*
48 * Needs to be in the .data section because we need it NULL before bss is
49 * cleared
50 */
51 struct ghcb *boot_ghcb __section(".data");
52
53 /* Bitmap of SEV features supported by the hypervisor */
54 u64 sev_hv_features __ro_after_init;
55
56 /* Secrets page physical address from the CC blob */
57 u64 sev_secrets_pa __ro_after_init;
58
59 /* For early boot SVSM communication */
60 struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
61
62 DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
63 DEFINE_PER_CPU(u64, svsm_caa_pa);
64
65 /*
66 * Nothing shall interrupt this code path while holding the per-CPU
67 * GHCB. The backup GHCB is only for NMIs interrupting this path.
68 *
69 * Callers must disable local interrupts around it.
70 */
__sev_get_ghcb(struct ghcb_state * state)71 noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
72 {
73 struct sev_es_runtime_data *data;
74 struct ghcb *ghcb;
75
76 WARN_ON(!irqs_disabled());
77
78 data = this_cpu_read(runtime_data);
79 ghcb = &data->ghcb_page;
80
81 if (unlikely(data->ghcb_active)) {
82 /* GHCB is already in use - save its contents */
83
84 if (unlikely(data->backup_ghcb_active)) {
85 /*
86 * Backup-GHCB is also already in use. There is no way
87 * to continue here so just kill the machine. To make
88 * panic() work, mark GHCBs inactive so that messages
89 * can be printed out.
90 */
91 data->ghcb_active = false;
92 data->backup_ghcb_active = false;
93
94 instrumentation_begin();
95 panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
96 instrumentation_end();
97 }
98
99 /* Mark backup_ghcb active before writing to it */
100 data->backup_ghcb_active = true;
101
102 state->ghcb = &data->backup_ghcb;
103
104 /* Backup GHCB content */
105 *state->ghcb = *ghcb;
106 } else {
107 state->ghcb = NULL;
108 data->ghcb_active = true;
109 }
110
111 return ghcb;
112 }
113
114 /* Include code shared with pre-decompression boot stage */
115 #include "sev-shared.c"
116
__sev_put_ghcb(struct ghcb_state * state)117 noinstr void __sev_put_ghcb(struct ghcb_state *state)
118 {
119 struct sev_es_runtime_data *data;
120 struct ghcb *ghcb;
121
122 WARN_ON(!irqs_disabled());
123
124 data = this_cpu_read(runtime_data);
125 ghcb = &data->ghcb_page;
126
127 if (state->ghcb) {
128 /* Restore GHCB from Backup */
129 *ghcb = *state->ghcb;
130 data->backup_ghcb_active = false;
131 state->ghcb = NULL;
132 } else {
133 /*
134 * Invalidate the GHCB so a VMGEXIT instruction issued
135 * from userspace won't appear to be valid.
136 */
137 vc_ghcb_invalidate(ghcb);
138 data->ghcb_active = false;
139 }
140 }
141
svsm_perform_call_protocol(struct svsm_call * call)142 int svsm_perform_call_protocol(struct svsm_call *call)
143 {
144 struct ghcb_state state;
145 unsigned long flags;
146 struct ghcb *ghcb;
147 int ret;
148
149 /*
150 * This can be called very early in the boot, use native functions in
151 * order to avoid paravirt issues.
152 */
153 flags = native_local_irq_save();
154
155 if (sev_cfg.ghcbs_initialized)
156 ghcb = __sev_get_ghcb(&state);
157 else if (boot_ghcb)
158 ghcb = boot_ghcb;
159 else
160 ghcb = NULL;
161
162 do {
163 ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
164 : svsm_perform_msr_protocol(call);
165 } while (ret == -EAGAIN);
166
167 if (sev_cfg.ghcbs_initialized)
168 __sev_put_ghcb(&state);
169
170 native_local_irq_restore(flags);
171
172 return ret;
173 }
174
175 void __head
early_set_pages_state(unsigned long vaddr,unsigned long paddr,unsigned long npages,enum psc_op op)176 early_set_pages_state(unsigned long vaddr, unsigned long paddr,
177 unsigned long npages, enum psc_op op)
178 {
179 unsigned long paddr_end;
180 u64 val;
181
182 vaddr = vaddr & PAGE_MASK;
183
184 paddr = paddr & PAGE_MASK;
185 paddr_end = paddr + (npages << PAGE_SHIFT);
186
187 while (paddr < paddr_end) {
188 /* Page validation must be rescinded before changing to shared */
189 if (op == SNP_PAGE_STATE_SHARED)
190 pvalidate_4k_page(vaddr, paddr, false);
191
192 /*
193 * Use the MSR protocol because this function can be called before
194 * the GHCB is established.
195 */
196 sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
197 VMGEXIT();
198
199 val = sev_es_rd_ghcb_msr();
200
201 if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
202 goto e_term;
203
204 if (GHCB_MSR_PSC_RESP_VAL(val))
205 goto e_term;
206
207 /* Page validation must be performed after changing to private */
208 if (op == SNP_PAGE_STATE_PRIVATE)
209 pvalidate_4k_page(vaddr, paddr, true);
210
211 vaddr += PAGE_SIZE;
212 paddr += PAGE_SIZE;
213 }
214
215 return;
216
217 e_term:
218 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
219 }
220
early_snp_set_memory_private(unsigned long vaddr,unsigned long paddr,unsigned long npages)221 void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
222 unsigned long npages)
223 {
224 /*
225 * This can be invoked in early boot while running identity mapped, so
226 * use an open coded check for SNP instead of using cc_platform_has().
227 * This eliminates worries about jump tables or checking boot_cpu_data
228 * in the cc_platform_has() function.
229 */
230 if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
231 return;
232
233 /*
234 * Ask the hypervisor to mark the memory pages as private in the RMP
235 * table.
236 */
237 early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
238 }
239
early_snp_set_memory_shared(unsigned long vaddr,unsigned long paddr,unsigned long npages)240 void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
241 unsigned long npages)
242 {
243 /*
244 * This can be invoked in early boot while running identity mapped, so
245 * use an open coded check for SNP instead of using cc_platform_has().
246 * This eliminates worries about jump tables or checking boot_cpu_data
247 * in the cc_platform_has() function.
248 */
249 if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
250 return;
251
252 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
253 early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
254 }
255
256 /*
257 * Initial set up of SNP relies on information provided by the
258 * Confidential Computing blob, which can be passed to the kernel
259 * in the following ways, depending on how it is booted:
260 *
261 * - when booted via the boot/decompress kernel:
262 * - via boot_params
263 *
264 * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
265 * - via a setup_data entry, as defined by the Linux Boot Protocol
266 *
267 * Scan for the blob in that order.
268 */
find_cc_blob(struct boot_params * bp)269 static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
270 {
271 struct cc_blob_sev_info *cc_info;
272
273 /* Boot kernel would have passed the CC blob via boot_params. */
274 if (bp->cc_blob_address) {
275 cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
276 goto found_cc_info;
277 }
278
279 /*
280 * If kernel was booted directly, without the use of the
281 * boot/decompression kernel, the CC blob may have been passed via
282 * setup_data instead.
283 */
284 cc_info = find_cc_blob_setup_data(bp);
285 if (!cc_info)
286 return NULL;
287
288 found_cc_info:
289 if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
290 snp_abort();
291
292 return cc_info;
293 }
294
svsm_setup(struct cc_blob_sev_info * cc_info)295 static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
296 {
297 struct svsm_call call = {};
298 int ret;
299 u64 pa;
300
301 /*
302 * Record the SVSM Calling Area address (CAA) if the guest is not
303 * running at VMPL0. The CA will be used to communicate with the
304 * SVSM to perform the SVSM services.
305 */
306 if (!svsm_setup_ca(cc_info))
307 return;
308
309 /*
310 * It is very early in the boot and the kernel is running identity
311 * mapped but without having adjusted the pagetables to where the
312 * kernel was loaded (physbase), so the get the CA address using
313 * RIP-relative addressing.
314 */
315 pa = (u64)rip_rel_ptr(&boot_svsm_ca_page);
316
317 /*
318 * Switch over to the boot SVSM CA while the current CA is still
319 * addressable. There is no GHCB at this point so use the MSR protocol.
320 *
321 * SVSM_CORE_REMAP_CA call:
322 * RAX = 0 (Protocol=0, CallID=0)
323 * RCX = New CA GPA
324 */
325 call.caa = svsm_get_caa();
326 call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
327 call.rcx = pa;
328 ret = svsm_perform_call_protocol(&call);
329 if (ret)
330 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
331
332 boot_svsm_caa = (struct svsm_ca *)pa;
333 boot_svsm_caa_pa = pa;
334 }
335
snp_init(struct boot_params * bp)336 bool __head snp_init(struct boot_params *bp)
337 {
338 struct cc_blob_sev_info *cc_info;
339
340 if (!bp)
341 return false;
342
343 cc_info = find_cc_blob(bp);
344 if (!cc_info)
345 return false;
346
347 if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
348 sev_secrets_pa = cc_info->secrets_phys;
349 else
350 return false;
351
352 setup_cpuid_table(cc_info);
353
354 svsm_setup(cc_info);
355
356 /*
357 * The CC blob will be used later to access the secrets page. Cache
358 * it here like the boot kernel does.
359 */
360 bp->cc_blob_address = (u32)(unsigned long)cc_info;
361
362 return true;
363 }
364
snp_abort(void)365 void __head __noreturn snp_abort(void)
366 {
367 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
368 }
369