1 /*
2 * Copyright (C) 2019-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <types.h>
8 #include <errno.h>
9 #include <asm/cpufeatures.h>
10 #include <asm/cpu_caps.h>
11 #include <asm/sgx.h>
12 #include <asm/cpuid.h>
13 #include <asm/guest/vm.h>
14 #include <logmsg.h>
15
16 #define SGX_OPTED_IN (MSR_IA32_FEATURE_CONTROL_SGX_GE | MSR_IA32_FEATURE_CONTROL_LOCK)
17
18 /* For the static variables, which are not explicitly initialzed will be inited to 0 */
19 static int32_t init_sgx_ret = 0;
20 static struct epc_section pepc_sections[MAX_EPC_SECTIONS]; /* physcial epc sections */
21 static struct epc_map vm_epc_maps[MAX_EPC_SECTIONS][CONFIG_MAX_VM_NUM]; /* epc resource mapping for VMs */
22
get_epc_section(uint32_t sec_id,uint64_t * base,uint64_t * size)23 static int32_t get_epc_section(uint32_t sec_id, uint64_t* base, uint64_t* size)
24 {
25 uint32_t eax = 0U, ebx = 0U, ecx = 0U, edx = 0U, type;
26 int32_t ret = 0;
27
28 cpuid_subleaf(CPUID_SGX_LEAF, sec_id + CPUID_SGX_EPC_SUBLEAF_BASE, &eax, &ebx, &ecx, &edx);
29 type = eax & CPUID_SGX_EPC_TYPE_MASK;
30 if (type == CPUID_SGX_EPC_TYPE_VALID) {
31 *base = (((uint64_t)ebx & CPUID_SGX_EPC_HIGH_MASK) << 32U) |
32 ((uint64_t)eax & CPUID_SGX_EPC_LOW_MASK);
33 *size = (((uint64_t)edx & CPUID_SGX_EPC_HIGH_MASK) << 32U) |
34 ((uint64_t)ecx & CPUID_SGX_EPC_LOW_MASK);
35 if (*size != 0UL) {
36 pepc_sections[sec_id].base = *base;
37 pepc_sections[sec_id].size = *size;
38 } else {
39 ret = -EINVAL;
40 }
41 } else if (type == CPUID_SGX_EPC_TYPE_INVALID) {
42 /* indicate the end of epc enumeration */
43 } else {
44 pr_err("%s: unsupport EPC type %u", __func__, type);
45 ret = -EINVAL;
46 }
47
48 return ret;
49 }
50
51 /* Enumerate physcial EPC resource and partition it according to VM configurations.
52 * Build the mappings between HPA and GPA for EPT mapping later.
53 * EPC resource partition and mapping relationship will stay unchanged after sgx init.
54 */
partition_epc(void)55 static int32_t partition_epc(void)
56 {
57 uint16_t vm_id = 0U;
58 uint32_t psec_id = 0U, mid = 0U;
59 uint64_t psec_addr = 0UL, psec_size = 0UL;
60 uint64_t free_size = 0UL, alloc_size;
61 struct acrn_vm_config *vm_config = get_vm_config(vm_id);
62 uint64_t vm_request_size = vm_config->epc.size;
63 int32_t ret = 0;
64
65 while (psec_id < MAX_EPC_SECTIONS) {
66 if (vm_request_size == 0UL) {
67 vm_id++;
68 if (vm_id == CONFIG_MAX_VM_NUM) {
69 break;
70 }
71 mid = 0U;
72 vm_config = get_vm_config(vm_id);
73 vm_request_size = vm_config->epc.size;
74 } else {
75 if (free_size == 0UL) {
76 ret = get_epc_section(psec_id, &psec_addr, &psec_size);
77 free_size = psec_size;
78 if ((ret != 0) || (free_size == 0UL)) {
79 break;
80 }
81 psec_id++;
82 }
83 alloc_size = min(vm_request_size, free_size);
84 vm_epc_maps[mid][vm_id].size = alloc_size;
85 vm_epc_maps[mid][vm_id].hpa = psec_addr + psec_size - free_size;
86 vm_epc_maps[mid][vm_id].gpa = vm_config->epc.base + vm_config->epc.size - vm_request_size;
87 vm_request_size -= alloc_size;
88 free_size -= alloc_size;
89 mid++;
90 }
91 }
92 if (vm_request_size != 0UL) {
93 ret = -ENOMEM;
94 }
95
96 return ret;
97 }
98
get_phys_epc(void)99 struct epc_section* get_phys_epc(void)
100 {
101 return pepc_sections;
102 }
103
get_epc_mapping(uint16_t vm_id)104 struct epc_map* get_epc_mapping(uint16_t vm_id)
105 {
106 return &vm_epc_maps[0][vm_id];
107 }
108
init_sgx(void)109 int32_t init_sgx(void)
110 {
111 if (pcpu_has_cap(X86_FEATURE_SGX)) {
112 if ((msr_read(MSR_IA32_FEATURE_CONTROL) & SGX_OPTED_IN) == SGX_OPTED_IN){
113 init_sgx_ret = partition_epc();
114 if (init_sgx_ret != 0) {
115 pr_err("Please change SGX/PRM setting in BIOS or EPC setting in VM config");
116 }
117 }
118 }
119
120 return init_sgx_ret;
121 }
122
is_vsgx_supported(uint16_t vm_id)123 bool is_vsgx_supported(uint16_t vm_id)
124 {
125 return ((init_sgx_ret == 0) && (vm_epc_maps[0][vm_id].size != 0U));
126 }
127