1 /*
2 * Copyright (C) 2018-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <types.h>
8 #include <asm/lib/bits.h>
9 #include <crypto_api.h>
10 #include <asm/guest/trusty.h>
11 #include <asm/page.h>
12 #include <asm/pgtable.h>
13 #include <asm/mmu.h>
14 #include <asm/guest/ept.h>
15 #include <asm/guest/vm.h>
16 #include <asm/vmx.h>
17 #include <asm/security.h>
18 #include <logmsg.h>
19 #include <asm/seed.h>
20 #include <asm/tsc.h>
21
22 #define TRUSTY_VERSION 1U
23 #define TRUSTY_VERSION_2 2U
24
25 struct trusty_mem {
26 /* The first page of trusty memory is reserved for key_info and trusty_startup_param. */
27 struct {
28 struct trusty_key_info key_info;
29 struct trusty_startup_param startup_param;
30 } first_page;
31
32 /* The left memory is for trusty's code/data/heap/stack */
33 } __aligned(PAGE_SIZE);
34
35 /**
36 * @defgroup trusty_apis Trusty APIs
37 *
38 * This is a special group that includes all APIs
39 * related to Trusty
40 *
41 * @{
42 */
43
44 /**
45 * @brief Create Secure World EPT hierarchy
46 *
47 * Create Secure World EPT hierarchy, construct new PML4/PDPT, reuse PD/PT parse from
48 * vm->arch_vm->ept
49 *
50 * @param vm pointer to a VM with 2 Worlds
51 * @param gpa_orig original gpa allocated from vSBL
52 * @param size LK size (16M by default)
53 * @param gpa_rebased gpa rebased to offset xxx (511G_OFFSET)
54 *
55 */
create_secure_world_ept(struct acrn_vm * vm,uint64_t gpa_orig,uint64_t size,uint64_t gpa_rebased)56 static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
57 uint64_t size, uint64_t gpa_rebased)
58 {
59 /* Check the HPA of parameter gpa_orig when invoking check_continuos_hpa */
60 uint64_t hpa;
61
62 hpa = gpa2hpa(vm, gpa_orig);
63
64 /* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
65 ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, gpa_orig, size);
66
67 vm->arch_vm.sworld_eptp = pgtable_create_trusty_root(&vm->arch_vm.ept_pgtable,
68 vm->arch_vm.nworld_eptp, EPT_RWX, EPT_EXE);
69
70 /* Map [gpa_rebased, gpa_rebased + size) to secure ept mapping */
71 ept_add_mr(vm, (uint64_t *)vm->arch_vm.sworld_eptp, hpa, gpa_rebased, size, EPT_RWX | EPT_WB);
72
73 /* Backup secure world info, will be used when destroy secure world and suspend User VM */
74 vm->sworld_control.sworld_memory.base_gpa_in_user_vm = gpa_orig;
75 vm->sworld_control.sworld_memory.base_hpa = hpa;
76 vm->sworld_control.sworld_memory.length = size;
77 }
78
destroy_secure_world(struct acrn_vm * vm,bool need_clr_mem)79 void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
80 {
81 uint64_t hpa = vm->sworld_control.sworld_memory.base_hpa;
82 uint64_t gpa_user_vm = vm->sworld_control.sworld_memory.base_gpa_in_user_vm;
83 uint64_t size = vm->sworld_control.sworld_memory.length;
84
85 if (vm->arch_vm.sworld_eptp != NULL) {
86 if (need_clr_mem) {
87 /* clear trusty memory space */
88 stac();
89 (void)memset(hpa2hva(hpa), 0U, (size_t)size);
90 clac();
91 }
92
93 ept_del_mr(vm, vm->arch_vm.sworld_eptp, gpa_user_vm, size);
94 vm->arch_vm.sworld_eptp = NULL;
95
96 /* Restore memory to guest normal world */
97 ept_add_mr(vm, vm->arch_vm.nworld_eptp, hpa, gpa_user_vm, size, EPT_RWX | EPT_WB);
98 } else {
99 pr_err("sworld eptp is NULL, it's not created");
100 }
101 }
102
save_world_ctx(struct acrn_vcpu * vcpu,struct ext_context * ext_ctx)103 static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx)
104 {
105 uint32_t i;
106
107 /* cache on-demand run_context for efer/rflags/rsp/rip/cr0/cr4 */
108 (void)vcpu_get_efer(vcpu);
109 (void)vcpu_get_rflags(vcpu);
110 (void)vcpu_get_rsp(vcpu);
111 (void)vcpu_get_rip(vcpu);
112 (void)vcpu_get_cr0(vcpu);
113 (void)vcpu_get_cr4(vcpu);
114
115 /* VMCS GUEST field */
116 ext_ctx->tsc_offset = exec_vmread(VMX_TSC_OFFSET_FULL);
117 ext_ctx->cr3 = exec_vmread(VMX_GUEST_CR3);
118 ext_ctx->dr7 = exec_vmread(VMX_GUEST_DR7);
119 ext_ctx->ia32_debugctl = exec_vmread64(VMX_GUEST_IA32_DEBUGCTL_FULL);
120
121 /*
122 * Similar to CR0 and CR4, the actual value of guest's IA32_PAT MSR
123 * (represented by ext_ctx->ia32_pat) could be different from the
124 * value that guest reads (guest_msrs[IA32_PAT]).
125 *
126 * the wrmsr handler keeps track of 'guest_msrs', and we only
127 * need to save/load 'ext_ctx->ia32_pat' in world switch.
128 */
129 ext_ctx->ia32_pat = exec_vmread64(VMX_GUEST_IA32_PAT_FULL);
130 ext_ctx->ia32_sysenter_esp = exec_vmread(VMX_GUEST_IA32_SYSENTER_ESP);
131 ext_ctx->ia32_sysenter_eip = exec_vmread(VMX_GUEST_IA32_SYSENTER_EIP);
132 ext_ctx->ia32_sysenter_cs = exec_vmread32(VMX_GUEST_IA32_SYSENTER_CS);
133 save_segment(ext_ctx->cs, VMX_GUEST_CS);
134 save_segment(ext_ctx->ss, VMX_GUEST_SS);
135 save_segment(ext_ctx->ds, VMX_GUEST_DS);
136 save_segment(ext_ctx->es, VMX_GUEST_ES);
137 save_segment(ext_ctx->fs, VMX_GUEST_FS);
138 save_segment(ext_ctx->gs, VMX_GUEST_GS);
139 save_segment(ext_ctx->tr, VMX_GUEST_TR);
140 save_segment(ext_ctx->ldtr, VMX_GUEST_LDTR);
141 /* Only base and limit for IDTR and GDTR */
142 ext_ctx->idtr.base = exec_vmread(VMX_GUEST_IDTR_BASE);
143 ext_ctx->gdtr.base = exec_vmread(VMX_GUEST_GDTR_BASE);
144 ext_ctx->idtr.limit = exec_vmread32(VMX_GUEST_IDTR_LIMIT);
145 ext_ctx->gdtr.limit = exec_vmread32(VMX_GUEST_GDTR_LIMIT);
146
147 /* MSRs which not in the VMCS */
148 ext_ctx->ia32_star = msr_read(MSR_IA32_STAR);
149 ext_ctx->ia32_lstar = msr_read(MSR_IA32_LSTAR);
150 ext_ctx->ia32_fmask = msr_read(MSR_IA32_FMASK);
151 ext_ctx->ia32_kernel_gs_base = msr_read(MSR_IA32_KERNEL_GS_BASE);
152 ext_ctx->tsc_aux = msr_read(MSR_IA32_TSC_AUX);
153
154 /* XSAVE area */
155 save_xsave_area(vcpu, ext_ctx);
156
157 /* For MSRs need isolation between worlds */
158 for (i = 0U; i < NUM_WORLD_MSRS; i++) {
159 vcpu->arch.contexts[vcpu->arch.cur_context].world_msrs[i] = vcpu->arch.guest_msrs[i];
160 }
161 }
162
load_world_ctx(struct acrn_vcpu * vcpu,const struct ext_context * ext_ctx)163 static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext_ctx)
164 {
165 uint32_t i;
166
167 /* mark to update on-demand run_context for efer/rflags/rsp/rip/cr0/cr4 */
168 bitmap_set_nolock(CPU_REG_EFER, &vcpu->reg_updated);
169 bitmap_set_nolock(CPU_REG_RFLAGS, &vcpu->reg_updated);
170 bitmap_set_nolock(CPU_REG_RSP, &vcpu->reg_updated);
171 bitmap_set_nolock(CPU_REG_RIP, &vcpu->reg_updated);
172 bitmap_set_nolock(CPU_REG_CR0, &vcpu->reg_updated);
173 bitmap_set_nolock(CPU_REG_CR4, &vcpu->reg_updated);
174
175 /* VMCS Execution field */
176 exec_vmwrite64(VMX_TSC_OFFSET_FULL, ext_ctx->tsc_offset);
177
178 /* VMCS GUEST field */
179 exec_vmwrite(VMX_GUEST_CR3, ext_ctx->cr3);
180 exec_vmwrite(VMX_GUEST_DR7, ext_ctx->dr7);
181 exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, ext_ctx->ia32_debugctl);
182 exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, ext_ctx->ia32_pat);
183 exec_vmwrite32(VMX_GUEST_IA32_SYSENTER_CS, ext_ctx->ia32_sysenter_cs);
184 exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, ext_ctx->ia32_sysenter_esp);
185 exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, ext_ctx->ia32_sysenter_eip);
186 load_segment(ext_ctx->cs, VMX_GUEST_CS);
187 load_segment(ext_ctx->ss, VMX_GUEST_SS);
188 load_segment(ext_ctx->ds, VMX_GUEST_DS);
189 load_segment(ext_ctx->es, VMX_GUEST_ES);
190 load_segment(ext_ctx->fs, VMX_GUEST_FS);
191 load_segment(ext_ctx->gs, VMX_GUEST_GS);
192 load_segment(ext_ctx->tr, VMX_GUEST_TR);
193 load_segment(ext_ctx->ldtr, VMX_GUEST_LDTR);
194 /* Only base and limit for IDTR and GDTR */
195 exec_vmwrite(VMX_GUEST_IDTR_BASE, ext_ctx->idtr.base);
196 exec_vmwrite(VMX_GUEST_GDTR_BASE, ext_ctx->gdtr.base);
197 exec_vmwrite32(VMX_GUEST_IDTR_LIMIT, ext_ctx->idtr.limit);
198 exec_vmwrite32(VMX_GUEST_GDTR_LIMIT, ext_ctx->gdtr.limit);
199
200 /* MSRs which not in the VMCS */
201 msr_write(MSR_IA32_STAR, ext_ctx->ia32_star);
202 msr_write(MSR_IA32_LSTAR, ext_ctx->ia32_lstar);
203 msr_write(MSR_IA32_FMASK, ext_ctx->ia32_fmask);
204 msr_write(MSR_IA32_KERNEL_GS_BASE, ext_ctx->ia32_kernel_gs_base);
205 msr_write(MSR_IA32_TSC_AUX, ext_ctx->tsc_aux);
206
207 /* XSAVE area */
208 rstore_xsave_area(vcpu, ext_ctx);
209
210 /* For MSRs need isolation between worlds */
211 for (i = 0U; i < NUM_WORLD_MSRS; i++) {
212 vcpu->arch.guest_msrs[i] = vcpu->arch.contexts[!vcpu->arch.cur_context].world_msrs[i];
213 }
214 }
215
copy_smc_param(const struct run_context * prev_ctx,struct run_context * next_ctx)216 static void copy_smc_param(const struct run_context *prev_ctx,
217 struct run_context *next_ctx)
218 {
219 next_ctx->cpu_regs.regs.rdi = prev_ctx->cpu_regs.regs.rdi;
220 next_ctx->cpu_regs.regs.rsi = prev_ctx->cpu_regs.regs.rsi;
221 next_ctx->cpu_regs.regs.rdx = prev_ctx->cpu_regs.regs.rdx;
222 next_ctx->cpu_regs.regs.rbx = prev_ctx->cpu_regs.regs.rbx;
223 }
224
switch_world(struct acrn_vcpu * vcpu,int32_t next_world)225 void switch_world(struct acrn_vcpu *vcpu, int32_t next_world)
226 {
227 struct acrn_vcpu_arch *arch = &vcpu->arch;
228
229 /* save previous world context */
230 save_world_ctx(vcpu, &arch->contexts[!next_world].ext_ctx);
231
232 /* load next world context */
233 load_world_ctx(vcpu, &arch->contexts[next_world].ext_ctx);
234
235 /* Copy SMC parameters: RDI, RSI, RDX, RBX */
236 copy_smc_param(&arch->contexts[!next_world].run_ctx,
237 &arch->contexts[next_world].run_ctx);
238
239 if (next_world == NORMAL_WORLD) {
240 /* load EPTP for next world */
241 exec_vmwrite64(VMX_EPT_POINTER_FULL,
242 hva2hpa(vcpu->vm->arch_vm.nworld_eptp) |
243 (3UL << 3U) | 0x6UL);
244
245 #ifndef CONFIG_L1D_FLUSH_VMENTRY_ENABLED
246 cpu_l1d_flush();
247 #endif
248 } else {
249 exec_vmwrite64(VMX_EPT_POINTER_FULL,
250 hva2hpa(vcpu->vm->arch_vm.sworld_eptp) |
251 (3UL << 3U) | 0x6UL);
252 }
253
254 /* Update world index */
255 arch->cur_context = next_world;
256 }
257
258 /* Put key_info and trusty_startup_param in the first Page of Trusty
259 * runtime memory
260 */
setup_trusty_info(struct acrn_vcpu * vcpu,uint32_t mem_size,uint64_t mem_base_hpa,uint8_t * rkey)261 static bool setup_trusty_info(struct acrn_vcpu *vcpu, uint32_t mem_size, uint64_t mem_base_hpa, uint8_t *rkey)
262 {
263 bool success = false;
264 struct trusty_mem *mem;
265 struct trusty_key_info key_info;
266 struct trusty_startup_param startup_param;
267
268 (void)memset(&key_info, 0U, sizeof(key_info));
269
270 key_info.size_of_this_struct = sizeof(struct trusty_key_info);
271 key_info.version = 0U;
272 key_info.platform = 3U;
273
274 if (rkey != NULL) {
275 (void)memcpy_s(key_info.rpmb_key, 64U, rkey, 64U);
276 (void)memset(rkey, 0U, 64U);
277 }
278
279 /* Derive dvseed from dseed for Trusty */
280 if (derive_virtual_seed(&key_info.dseed_list[0U], &key_info.num_seeds,
281 NULL, 0U,
282 (uint8_t *)vcpu->vm->name, strnlen_s(vcpu->vm->name, MAX_VM_NAME_LEN))) {
283 /* Derive encryption key of attestation keybox from dseed */
284 if (derive_attkb_enc_key(key_info.attkb_enc_key)) {
285 /* Prepare trusty startup param */
286 startup_param.size_of_this_struct = sizeof(struct trusty_startup_param);
287 startup_param.mem_size = mem_size;
288 startup_param.tsc_per_ms = TSC_PER_MS;
289 startup_param.trusty_mem_base = TRUSTY_EPT_REBASE_GPA;
290
291 /* According to trusty boot protocol, it will use RDI as the
292 * address(GPA) of startup_param on boot. Currently, the startup_param
293 * is put in the first page of trusty memory just followed by key_info.
294 */
295 vcpu->arch.contexts[SECURE_WORLD].run_ctx.cpu_regs.regs.rdi
296 = (uint64_t)TRUSTY_EPT_REBASE_GPA + sizeof(struct trusty_key_info);
297
298 stac();
299 mem = (struct trusty_mem *)(hpa2hva(mem_base_hpa));
300 (void)memcpy_s((void *)&mem->first_page.key_info, sizeof(struct trusty_key_info),
301 &key_info, sizeof(key_info));
302 (void)memcpy_s((void *)&mem->first_page.startup_param, sizeof(struct trusty_startup_param),
303 &startup_param, sizeof(startup_param));
304 clac();
305 success = true;
306 }
307 }
308
309 (void)memset(&key_info, 0U, sizeof(key_info));
310
311 return success;
312 }
313
314 /* Secure World will reuse environment of User VM Loder since they are
315 * both booting from and running in 64bit mode, except GP registers.
316 * RIP, RSP and RDI are specified below, other GP registers are leaved
317 * as 0.
318 */
init_secure_world_env(struct acrn_vcpu * vcpu,uint64_t entry_gpa,uint64_t base_hpa,uint32_t size,uint8_t * rpmb_key)319 static bool init_secure_world_env(struct acrn_vcpu *vcpu,
320 uint64_t entry_gpa,
321 uint64_t base_hpa,
322 uint32_t size,
323 uint8_t *rpmb_key)
324 {
325 uint32_t i;
326
327 vcpu->arch.inst_len = 0U;
328 vcpu->arch.contexts[SECURE_WORLD].run_ctx.rip = entry_gpa;
329 vcpu->arch.contexts[SECURE_WORLD].run_ctx.cpu_regs.regs.rsp =
330 TRUSTY_EPT_REBASE_GPA + size;
331
332 vcpu->arch.contexts[SECURE_WORLD].ext_ctx.tsc_offset = 0UL;
333
334 /* Init per world MSRs */
335 for (i = 0U; i < NUM_WORLD_MSRS; i++) {
336 vcpu->arch.contexts[NORMAL_WORLD].world_msrs[i] = vcpu->arch.guest_msrs[i];
337 vcpu->arch.contexts[SECURE_WORLD].world_msrs[i] = vcpu->arch.guest_msrs[i];
338 }
339
340 return setup_trusty_info(vcpu, size, base_hpa, rpmb_key);
341 }
342
initialize_trusty(struct acrn_vcpu * vcpu,struct trusty_boot_param * boot_param)343 bool initialize_trusty(struct acrn_vcpu *vcpu, struct trusty_boot_param *boot_param)
344 {
345 bool success = true;
346 uint64_t trusty_entry_gpa, trusty_base_gpa, trusty_base_hpa;
347 uint32_t trusty_mem_size;
348 struct acrn_vm *vm = vcpu->vm;
349 uint8_t *rpmb_key = NULL;
350
351 switch (boot_param->version) {
352 case TRUSTY_VERSION_2:
353 trusty_entry_gpa = ((uint64_t)boot_param->entry_point) |
354 (((uint64_t)boot_param->entry_point_high) << 32U);
355 trusty_base_gpa = ((uint64_t)boot_param->base_addr) |
356 (((uint64_t)boot_param->base_addr_high) << 32U);
357 rpmb_key = boot_param->rpmb_key;
358 break;
359 case TRUSTY_VERSION:
360 trusty_entry_gpa = (uint64_t)boot_param->entry_point;
361 trusty_base_gpa = (uint64_t)boot_param->base_addr;
362 break;
363 default:
364 pr_err("%s: Version(%u) not supported!\n", __func__, boot_param->version);
365 success = false;
366 break;
367 }
368
369 if (success) {
370 if ((vm->sworld_control.flag.supported == 0UL)
371 || (vm->arch_vm.sworld_eptp != NULL)) {
372 pr_err("Sworld is not supported or Sworld eptp is not NULL");
373 success = false;
374 } else {
375 trusty_mem_size = boot_param->mem_size;
376 create_secure_world_ept(vm, trusty_base_gpa, trusty_mem_size,
377 TRUSTY_EPT_REBASE_GPA);
378 trusty_base_hpa = vm->sworld_control.sworld_memory.base_hpa;
379
380 exec_vmwrite64(VMX_EPT_POINTER_FULL,
381 hva2hpa(vm->arch_vm.sworld_eptp) | (3UL << 3U) | 0x6UL);
382
383 /* save Normal World context */
384 save_world_ctx(vcpu, &vcpu->arch.contexts[NORMAL_WORLD].ext_ctx);
385
386 /* init secure world environment */
387 if (init_secure_world_env(vcpu,
388 (trusty_entry_gpa - trusty_base_gpa) + TRUSTY_EPT_REBASE_GPA,
389 trusty_base_hpa, trusty_mem_size, rpmb_key)) {
390
391 /* switch to Secure World */
392 vcpu->arch.cur_context = SECURE_WORLD;
393 } else {
394 success = false;
395 }
396 }
397 }
398
399 return success;
400 }
401
save_sworld_context(struct acrn_vcpu * vcpu)402 void save_sworld_context(struct acrn_vcpu *vcpu)
403 {
404 (void)memcpy_s((void *)&vcpu->vm->sworld_snapshot, sizeof(struct guest_cpu_context),
405 (void *)&vcpu->arch.contexts[SECURE_WORLD], sizeof(struct guest_cpu_context));
406 }
407
restore_sworld_context(struct acrn_vcpu * vcpu)408 void restore_sworld_context(struct acrn_vcpu *vcpu)
409 {
410 struct secure_world_control *sworld_ctl =
411 &vcpu->vm->sworld_control;
412
413 create_secure_world_ept(vcpu->vm,
414 sworld_ctl->sworld_memory.base_gpa_in_user_vm,
415 sworld_ctl->sworld_memory.length,
416 TRUSTY_EPT_REBASE_GPA);
417
418 (void)memcpy_s((void *)&vcpu->arch.contexts[SECURE_WORLD], sizeof(struct guest_cpu_context),
419 (void *)&vcpu->vm->sworld_snapshot, sizeof(struct guest_cpu_context));
420 }
421
422 /**
423 * @}
424 */
425 /* End of trusty_apis */
426