1 /*
2  * Copyright (C) 2021-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <asm/guest/vm.h>
8 #include <asm/guest/ept.h>
9 #include <asm/vm_config.h>
10 #include <asm/mmu.h>
11 #include <asm/guest/optee.h>
12 #include <asm/trampoline.h>
13 #include <asm/guest/vlapic.h>
14 #include <asm/guest/virq.h>
15 #include <asm/lapic.h>
16 #include <reloc.h>
17 #include <hypercall.h>
18 #include <logmsg.h>
19 
is_tee_vm(struct acrn_vm * vm)20 int is_tee_vm(struct acrn_vm *vm)
21 {
22 	return (get_vm_config(vm->vm_id)->guest_flags & GUEST_FLAG_TEE) != 0;
23 }
24 
is_ree_vm(struct acrn_vm * vm)25 int is_ree_vm(struct acrn_vm *vm)
26 {
27 	return (get_vm_config(vm->vm_id)->guest_flags & GUEST_FLAG_REE) != 0;
28 }
29 
prepare_tee_vm_memmap(struct acrn_vm * vm,const struct acrn_vm_config * vm_config)30 void prepare_tee_vm_memmap(struct acrn_vm *vm, const struct acrn_vm_config *vm_config)
31 {
32 	uint64_t hv_hpa;
33 
34 	/*
35 	 * Only need to map following things, let init_vpci to map the secure devices
36 	 * if any.
37 	 *
38 	 * 1. go through physical e820 table, to ept add all system memory entries.
39 	 * 2. remove hv owned memory.
40 	 */
41 	if ((vm_config->guest_flags & GUEST_FLAG_TEE) != 0U) {
42 		vm->e820_entry_num = get_e820_entries_count();
43 		vm->e820_entries = (struct e820_entry *)get_e820_entry();
44 
45 		prepare_vm_identical_memmap(vm, E820_TYPE_RAM, EPT_WB | EPT_RWX);
46 
47 		hv_hpa = hva2hpa((void *)(get_hv_image_base()));
48 		ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, hv_hpa, get_hv_image_size());
49 	}
50 }
51 
get_companion_vm(struct acrn_vm * vm)52 static struct acrn_vm *get_companion_vm(struct acrn_vm *vm)
53 {
54 	return get_vm_from_vmid(get_vm_config(vm->vm_id)->companion_vm_id);
55 }
56 
tee_switch_to_ree(struct acrn_vcpu * vcpu)57 static int32_t tee_switch_to_ree(struct acrn_vcpu *vcpu)
58 {
59 	uint64_t rdi, rsi, rdx, rbx;
60 	struct acrn_vm *ree_vm;
61 	struct acrn_vcpu *ree_vcpu;
62 	uint32_t pending_intr;
63 	int32_t ret = -EINVAL;
64 
65 	rdi = vcpu_get_gpreg(vcpu, CPU_REG_RDI);
66 	rsi = vcpu_get_gpreg(vcpu, CPU_REG_RSI);
67 	rdx = vcpu_get_gpreg(vcpu, CPU_REG_RDX);
68 	rbx = vcpu_get_gpreg(vcpu, CPU_REG_RBX);
69 
70 	ree_vm = get_companion_vm(vcpu->vm);
71 	ree_vcpu = vcpu_from_pid(ree_vm, get_pcpu_id());
72 
73 	if (ree_vcpu != NULL) {
74 		/*
75 		 * We should avoid copy any values to REE registers,
76 		 * If this is a FIQ return.
77 		 */
78 		if (rdi != OPTEE_RETURN_FIQ_DONE) {
79 			vcpu_set_gpreg(ree_vcpu, CPU_REG_RDI, rdi);
80 			vcpu_set_gpreg(ree_vcpu, CPU_REG_RSI, rsi);
81 			vcpu_set_gpreg(ree_vcpu, CPU_REG_RDX, rdx);
82 			vcpu_set_gpreg(ree_vcpu, CPU_REG_RBX, rbx);
83 		}
84 
85 		pending_intr = vlapic_get_next_pending_intr(vcpu);
86 		if (prio(pending_intr) > prio(TEE_FIXED_NONSECURE_VECTOR)) {
87 			/* For TEE, all non-secure interrupts are represented as
88 			 * TEE_FIXED_NONSECURE_VECTOR that has lower priority than all
89 			 * secure interrupts.
90 			 *
91 			 * If there are secure interrupts pending, we inject TEE's PI
92 			 * ANV and schedules REE. This way REE gets trapped immediately
93 			 * after VM Entry and will go through the secure interrupt handling
94 			 * flow in handle_x86_tee_int.
95 			 */
96 			send_single_ipi(pcpuid_from_vcpu(ree_vcpu),
97 				(uint32_t)(vcpu->arch.pid.control.bits.nv));
98 		} else if (prio(pending_intr) == prio(TEE_FIXED_NONSECURE_VECTOR)) {
99 			/* The TEE_FIXED_NONSECURE_VECTOR needs to be cleared as the
100 			 * pending non-secure interrupts will be handled immediately
101 			 * after resuming to REE. On ARM this is automatically done
102 			 * by hardware and ACRN emulates this behavior.
103 			 */
104 			vlapic_clear_pending_intr(vcpu, TEE_FIXED_NONSECURE_VECTOR);
105 		}
106 
107 		sleep_thread(&vcpu->thread_obj);
108 		ret = 0;
109 	} else {
110 		pr_fatal("No REE vCPU running on this pCPU%u, \n", get_pcpu_id());
111 	}
112 
113 	return ret;
114 }
115 
ree_switch_to_tee(struct acrn_vcpu * vcpu)116 static int32_t ree_switch_to_tee(struct acrn_vcpu *vcpu)
117 {
118 	uint64_t rax, rdi, rsi, rdx, rbx, rcx;
119 	struct acrn_vm *tee_vm;
120 	struct acrn_vcpu *tee_vcpu;
121 	int32_t ret = -EINVAL;
122 
123 	rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
124 	rdi = vcpu_get_gpreg(vcpu, CPU_REG_RDI);
125 	rsi = vcpu_get_gpreg(vcpu, CPU_REG_RSI);
126 	rdx = vcpu_get_gpreg(vcpu, CPU_REG_RDX);
127 	rbx = vcpu_get_gpreg(vcpu, CPU_REG_RBX);
128 	rcx = vcpu_get_gpreg(vcpu, CPU_REG_RCX);
129 
130 	tee_vm = get_companion_vm(vcpu->vm);
131 	tee_vcpu = vcpu_from_pid(tee_vm, get_pcpu_id());
132 	if (tee_vcpu != NULL) {
133 		vcpu_set_gpreg(tee_vcpu, CPU_REG_RAX, rax);
134 		vcpu_set_gpreg(tee_vcpu, CPU_REG_RDI, rdi);
135 		vcpu_set_gpreg(tee_vcpu, CPU_REG_RSI, rsi);
136 		vcpu_set_gpreg(tee_vcpu, CPU_REG_RDX, rdx);
137 		vcpu_set_gpreg(tee_vcpu, CPU_REG_RBX, rbx);
138 		vcpu_set_gpreg(tee_vcpu, CPU_REG_RCX, rcx);
139 
140 		wake_thread(&tee_vcpu->thread_obj);
141 
142 		ret = 0;
143 	} else {
144 		pr_fatal("No TEE vCPU running on this pCPU%u, \n", get_pcpu_id());
145 	}
146 
147 	return ret;
148 }
149 
hcall_handle_tee_vcpu_boot_done(struct acrn_vcpu * vcpu,__unused struct acrn_vm * target_vm,__unused uint64_t param1,__unused uint64_t param2)150 int32_t hcall_handle_tee_vcpu_boot_done(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
151 		__unused uint64_t param1, __unused uint64_t param2)
152 {
153 	struct acrn_vm *ree_vm;
154 	uint64_t rdi;
155 
156 	/*
157 	 * The (RDI == 1) indicates to start REE VM, otherwise only need
158 	 * to sleep the corresponding TEE vCPU.
159 	 */
160 	rdi = vcpu_get_gpreg(vcpu, CPU_REG_RDI);
161 	if (rdi == 1UL) {
162 		ree_vm = get_companion_vm(vcpu->vm);
163 		start_vm(ree_vm);
164 	}
165 
166 	sleep_thread(&vcpu->thread_obj);
167 
168 	return 0;
169 }
170 
hcall_switch_ee(struct acrn_vcpu * vcpu,__unused struct acrn_vm * target_vm,__unused uint64_t param1,__unused uint64_t param2)171 int32_t hcall_switch_ee(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
172 		__unused uint64_t param1, __unused uint64_t param2)
173 {
174 	int32_t ret = 0;
175 
176 	if (is_tee_vm(vcpu->vm)) {
177 		ret = tee_switch_to_ree(vcpu);
178 	} else if (is_ree_vm(vcpu->vm)) {
179 		ret = ree_switch_to_tee(vcpu);
180 	}
181 
182 	return ret;
183 }
184 
handle_x86_tee_int(struct ptirq_remapping_info * entry,uint16_t pcpu_id)185 void handle_x86_tee_int(struct ptirq_remapping_info *entry, uint16_t pcpu_id)
186 {
187 	struct acrn_vcpu *tee_vcpu;
188 	struct acrn_vcpu *curr_vcpu = get_running_vcpu(pcpu_id);
189 
190 	if (is_ree_vm(entry->vm) && is_tee_vm(curr_vcpu->vm)) {
191 		/*
192 		 * Non-Secure interrupt (interrupt belongs to REE) comes
193 		 * when REE vcpu is running, the interrupt will be injected
194 		 * to REE directly. But when TEE vcpu is running at that time,
195 		 * we need to inject a predefined vector to TEE for notification
196 		 * and continue to switch back to TEE for running.
197 		 */
198 		tee_vcpu = vcpu_from_pid(get_companion_vm(entry->vm), pcpu_id);
199 		vlapic_set_intr(tee_vcpu, TEE_FIXED_NONSECURE_VECTOR, LAPIC_TRIG_EDGE);
200 	} else if (is_tee_vm(entry->vm) && is_ree_vm(curr_vcpu->vm)) {
201 		/*
202 		 * Secure interrupt (interrupt belongs to TEE) comes
203 		 * when TEE vcpu is running, the interrupt will be
204 		 * injected to TEE directly. But when REE vcpu is running
205 		 * at that time, we need to switch to TEE for handling,
206 		 * and copy 0xB20000FF to RDI to notify OPTEE about this.
207 		 */
208 		tee_vcpu = vcpu_from_pid(entry->vm, pcpu_id);
209 		/*
210 		 * Copy 0xB20000FF to RDI to indicate the switch is from secure interrupt
211 		 * This is the contract with OPTEE.
212 		 */
213 		vcpu_set_gpreg(tee_vcpu, CPU_REG_RDI, OPTEE_FIQ_ENTRY);
214 
215 		wake_thread(&tee_vcpu->thread_obj);
216 	} else {
217 		/* Nothing need to do for this moment */
218 	}
219 }
220