1 /*
2  * Copyright 2024 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "hf/ffa/setup_and_discovery.h"
10 
11 #include "hf/arch/other_world.h"
12 #include "hf/arch/std.h"
13 
14 #include "hf/check.h"
15 #include "hf/ffa/init.h"
16 #include "hf/ffa/vm.h"
17 #include "hf/manifest.h"
18 #include "hf/vm.h"
19 
20 #include "smc.h"
21 
ffa_setup_spmc_id_get(void)22 struct ffa_value ffa_setup_spmc_id_get(void)
23 {
24 	if (ffa_init_is_tee_enabled()) {
25 		/*
26 		 * Fetch the SPMC ID from the SPMD using FFA_SPM_ID_GET.
27 		 * DEN0077A FF-A v1.1 Beta0 section 13.9.2
28 		 * "FFA_SPM_ID_GET invocation at a non-secure physical FF-A
29 		 * instance returns the ID of the SPMC."
30 		 */
31 		return smc_ffa_call(
32 			(struct ffa_value){.func = FFA_SPM_ID_GET_32});
33 	}
34 
35 	return (struct ffa_value){.func = FFA_ERROR_32,
36 				  .arg2 = FFA_NOT_SUPPORTED};
37 }
38 
39 /**
40  * Returns FFA_ERROR as FFA_SECONDARY_EP_REGISTER is not supported at the
41  * non-secure FF-A instances.
42  */
ffa_setup_is_secondary_ep_register_supported(void)43 bool ffa_setup_is_secondary_ep_register_supported(void)
44 {
45 	return false;
46 }
47 
ffa_setup_rxtx_map_spmc(paddr_t recv,paddr_t send,uint64_t page_count)48 void ffa_setup_rxtx_map_spmc(paddr_t recv, paddr_t send, uint64_t page_count)
49 {
50 	struct ffa_value ret;
51 
52 	ret = arch_other_world_call((struct ffa_value){.func = FFA_RXTX_MAP_64,
53 						       .arg1 = pa_addr(recv),
54 						       .arg2 = pa_addr(send),
55 						       .arg3 = page_count});
56 	CHECK(ret.func == FFA_SUCCESS_32);
57 }
58 
ffa_setup_rxtx_map_forward(struct vm_locked vm_locked)59 void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked)
60 {
61 	struct vm *vm = vm_locked.vm;
62 	struct vm *other_world;
63 
64 	if (!ffa_init_is_tee_enabled()) {
65 		vm_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
66 		return;
67 	}
68 
69 	if (!ffa_vm_supports_indirect_messages(vm)) {
70 		return;
71 	}
72 
73 	/* Hypervisor always forward the call to the SPMC. */
74 
75 	other_world = vm_find(HF_OTHER_WORLD_ID);
76 
77 	/* Fill the buffers descriptor in SPMC's RX buffer. */
78 	ffa_endpoint_rx_tx_descriptor_init(
79 		(struct ffa_endpoint_rx_tx_descriptor *)
80 			other_world->mailbox.recv,
81 		vm->id, (uintptr_t)vm->mailbox.recv,
82 		(uintptr_t)vm->mailbox.send);
83 
84 	ffa_setup_rxtx_map_spmc(pa_init(0), pa_init(0), 0);
85 
86 	vm_locked.vm->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED;
87 
88 	dlog_verbose("Mailbox of %x owned by SPMC.\n", vm_locked.vm->id);
89 }
90 
ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked)91 void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked)
92 {
93 	struct ffa_value ret;
94 	uint64_t func;
95 	ffa_id_t id;
96 
97 	assert(vm_locked.vm != NULL);
98 
99 	id = vm_locked.vm->id;
100 
101 	if (!ffa_init_is_tee_enabled()) {
102 		return;
103 	}
104 
105 	if (!ffa_vm_supports_indirect_messages(vm_locked.vm)) {
106 		return;
107 	}
108 
109 	/* Hypervisor always forwards forward RXTX_UNMAP to SPMC. */
110 	ret = arch_other_world_call(
111 		(struct ffa_value){.func = FFA_RXTX_UNMAP_32,
112 				   .arg1 = id << FFA_RXTX_ALLOCATOR_SHIFT});
113 	func = ret.func & ~SMCCC_CONVENTION_MASK;
114 	if (ret.func == (uint64_t)SMCCC_ERROR_UNKNOWN) {
115 		panic("Unknown error forwarding RXTX_UNMAP.\n");
116 	} else if (func == FFA_ERROR_32) {
117 		panic("Error %d forwarding RX/TX buffers.\n", ret.arg2);
118 	} else if (func != FFA_SUCCESS_32) {
119 		panic("Unexpected function %#x returned forwarding RX/TX "
120 		      "buffers.",
121 		      ret.func);
122 	}
123 }
124 
ffa_setup_partition_info_get_regs_forward_allowed(void)125 bool ffa_setup_partition_info_get_regs_forward_allowed(void)
126 {
127 	/*
128 	 * Allow forwarding from the Hypervisor if TEE or SPMC exists and
129 	 * declared as such in the Hypervisor manifest.
130 	 */
131 	return ffa_init_is_tee_enabled();
132 }
133 
134 /*
135  * Forward helper for FFA_PARTITION_INFO_GET.
136  * Emits FFA_PARTITION_INFO_GET from Hypervisor to SPMC if allowed.
137  */
ffa_setup_partition_info_get_forward(const struct ffa_uuid * uuid,uint32_t flags,struct ffa_partition_info * partitions,const size_t partitions_max_len,size_t entries_count)138 size_t ffa_setup_partition_info_get_forward(
139 	const struct ffa_uuid *uuid, uint32_t flags,
140 	struct ffa_partition_info *partitions, const size_t partitions_max_len,
141 	size_t entries_count)
142 {
143 	const struct vm *tee = vm_find(HF_TEE_VM_ID);
144 	struct ffa_partition_info *tee_partitions;
145 	size_t tee_partitions_count;
146 	struct ffa_value ret;
147 	size_t res;
148 
149 	CHECK(tee != NULL);
150 	CHECK(entries_count < MAX_VMS);
151 
152 	/*
153 	 * Allow forwarding from the Hypervisor if TEE or SPMC exists and
154 	 * declared as such in the Hypervisor manifest.
155 	 */
156 	if (!ffa_init_is_tee_enabled()) {
157 		return entries_count;
158 	}
159 
160 	ret = arch_other_world_call(
161 		(struct ffa_value){.func = FFA_PARTITION_INFO_GET_32,
162 				   .arg1 = uuid->uuid[0],
163 				   .arg2 = uuid->uuid[1],
164 				   .arg3 = uuid->uuid[2],
165 				   .arg4 = uuid->uuid[3],
166 				   .arg5 = flags});
167 	if (ffa_func_id(ret) != FFA_SUCCESS_32) {
168 		dlog_verbose(
169 			"Failed forwarding FFA_PARTITION_INFO_GET to "
170 			"the SPMC.\n");
171 		return entries_count;
172 	}
173 
174 	tee_partitions_count = ffa_partition_info_get_count(ret);
175 
176 	/*
177 	 * Check that the limit of the buffer can't be surpassed in the checks
178 	 * below.
179 	 */
180 	if (tee_partitions_count == 0 ||
181 	    add_overflow(tee_partitions_count, entries_count, &res) ||
182 	    res > partitions_max_len) {
183 		dlog_verbose(
184 			"Invalid number of SPs returned by the "
185 			"SPMC.\n");
186 		return entries_count;
187 	}
188 
189 	if ((flags & FFA_PARTITION_COUNT_FLAG_MASK) ==
190 	    FFA_PARTITION_COUNT_FLAG) {
191 		entries_count = res;
192 	} else {
193 		tee_partitions = (struct ffa_partition_info *)tee->mailbox.send;
194 		for (size_t index = 0; index < tee_partitions_count; index++) {
195 			partitions[entries_count] = tee_partitions[index];
196 			++entries_count;
197 		}
198 
199 		/* Release the RX buffer. */
200 		ret = arch_other_world_call(
201 			(struct ffa_value){.func = FFA_RX_RELEASE_32});
202 		CHECK(ret.func == FFA_SUCCESS_32);
203 	}
204 
205 	return entries_count;
206 }
207 
ffa_setup_parse_partition_manifest(struct mm_stage1_locked stage1_locked,paddr_t fdt_addr,size_t fdt_allocated_size,const struct manifest_vm * manifest_vm,const struct boot_params * boot_params,struct mpool * ppool)208 void ffa_setup_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
209 					paddr_t fdt_addr,
210 					size_t fdt_allocated_size,
211 					const struct manifest_vm *manifest_vm,
212 					const struct boot_params *boot_params,
213 					struct mpool *ppool)
214 {
215 	struct fdt partition_fdt;
216 
217 	/*
218 	 * If the partition is an FF-A partition and is not
219 	 * hypervisor loaded, the manifest is passed in the
220 	 * partition package and is parsed during
221 	 * manifest_init() and secondary fdt should be empty.
222 	 */
223 	CHECK(manifest_vm->is_hyp_loaded);
224 	CHECK(mm_identity_map(stage1_locked, fdt_addr,
225 			      pa_add(fdt_addr, fdt_allocated_size), MM_MODE_R,
226 			      ppool) != NULL);
227 	// NOLINTNEXTLINE(performance-no-int-to-ptr)
228 	CHECK(fdt_init_from_ptr(&partition_fdt, (void *)pa_addr(fdt_addr),
229 				fdt_allocated_size) == true);
230 	CHECK(parse_ffa_manifest(&partition_fdt,
231 				 (struct manifest_vm *)manifest_vm, NULL,
232 				 boot_params) == MANIFEST_SUCCESS);
233 	CHECK(mm_unmap(stage1_locked, fdt_addr,
234 		       pa_add(fdt_addr, fdt_allocated_size), ppool) == true);
235 }
236 
ffa_setup_partition_properties(ffa_id_t caller_id,const struct vm * target)237 ffa_partition_properties_t ffa_setup_partition_properties(
238 	ffa_id_t caller_id, const struct vm *target)
239 {
240 	ffa_partition_properties_t result = target->messaging_method;
241 	/*
242 	 * VMs support indirect messaging only in the Normal World.
243 	 * Primary VM cannot receive direct requests.
244 	 * Secondary VMs cannot send direct requests.
245 	 */
246 	if (!vm_id_is_current_world(caller_id)) {
247 		result &= ~FFA_PARTITION_INDIRECT_MSG;
248 	}
249 	if (vm_is_primary(target)) {
250 		result &= ~FFA_PARTITION_DIRECT_REQ_RECV;
251 	} else {
252 		result &= ~FFA_PARTITION_DIRECT_REQ_SEND;
253 	}
254 	return result;
255 }
256 
ffa_setup_rx_release_forward(struct vm_locked vm_locked,struct ffa_value * ret)257 bool ffa_setup_rx_release_forward(struct vm_locked vm_locked,
258 				  struct ffa_value *ret)
259 {
260 	struct vm *vm = vm_locked.vm;
261 	ffa_id_t vm_id = vm->id;
262 
263 	if (!ffa_init_is_tee_enabled() ||
264 	    !ffa_vm_supports_indirect_messages(vm)) {
265 		return false;
266 	}
267 
268 	CHECK(vm_id_is_current_world(vm_id));
269 
270 	/* Hypervisor always forward VM's RX_RELEASE to SPMC. */
271 	*ret = arch_other_world_call(
272 		(struct ffa_value){.func = FFA_RX_RELEASE_32, .arg1 = vm_id});
273 
274 	if (ret->func == FFA_SUCCESS_32) {
275 		/*
276 		 * The SPMC owns the VM's RX buffer after a successful
277 		 * FFA_RX_RELEASE call.
278 		 */
279 		vm->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED;
280 	} else {
281 		dlog_verbose("FFA_RX_RELEASE forwarded failed for VM ID %#x.\n",
282 			     vm_locked.vm->id);
283 	}
284 
285 	return true;
286 }
287 
288 /**
289  * Acquire the RX buffer of a VM from the SPM.
290  *
291  * VM RX/TX buffers must have been previously mapped in the SPM either
292  * by forwarding VM's RX_TX_MAP API or another way if buffers were
293  * declared in manifest.
294  *
295  * Returns true if the ownership belongs to the hypervisor.
296  */
ffa_setup_acquire_receiver_rx(struct vm_locked to_locked,struct ffa_value * ret)297 bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked,
298 				   struct ffa_value *ret)
299 {
300 	struct ffa_value other_world_ret;
301 
302 	/*
303 	 * Do not forward the call if either:
304 	 * - The TEE is not present.
305 	 * - The VM's version is not FF-A v1.1.
306 	 * - If the mailbox ownership hasn't been transferred to the SPMC.
307 	 */
308 	if (!ffa_init_is_tee_enabled() ||
309 	    !ffa_vm_supports_indirect_messages(to_locked.vm) ||
310 	    to_locked.vm->mailbox.state != MAILBOX_STATE_OTHER_WORLD_OWNED) {
311 		return true;
312 	}
313 
314 	other_world_ret = arch_other_world_call((struct ffa_value){
315 		.func = FFA_RX_ACQUIRE_32, .arg1 = to_locked.vm->id});
316 
317 	if (ret != NULL) {
318 		*ret = other_world_ret;
319 	}
320 
321 	if (other_world_ret.func == FFA_SUCCESS_32) {
322 		to_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
323 	}
324 
325 	return other_world_ret.func == FFA_SUCCESS_32;
326 }
327