1 /*
2  * Copyright 2024 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "hf/ffa/indirect_messaging.h"
10 
11 #include "hf/arch/other_world.h"
12 
13 #include "hf/api.h"
14 #include "hf/ffa/init.h"
15 #include "hf/ffa_internal.h"
16 #include "hf/vm.h"
17 
ffa_indirect_msg_is_supported(struct vm_locked sender_locked,struct vm_locked receiver_locked)18 bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked,
19 				   struct vm_locked receiver_locked)
20 {
21 	(void)sender_locked;
22 	(void)receiver_locked;
23 
24 	/*
25 	 * Hypervisor is only for testing purposes, always allow indirect
26 	 * messages from VM.
27 	 */
28 	return true;
29 }
30 
ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id,ffa_id_t sender_vm_id,struct ffa_value * ret)31 bool ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id,
32 				    ffa_id_t sender_vm_id,
33 				    struct ffa_value *ret)
34 {
35 	/* FFA_MSG_SEND2 is forwarded to SPMC when the receiver is an SP. */
36 	if (vm_id_is_current_world(receiver_vm_id)) {
37 		return false;
38 	}
39 
40 	/*
41 	 * Set the sender in arg1 to allow the SPMC to retrieve
42 	 * VM's TX buffer to copy in SP's RX buffer.
43 	 */
44 	*ret = arch_other_world_call((struct ffa_value){
45 		.func = FFA_MSG_SEND2_32,
46 		.arg1 = sender_vm_id << 16,
47 	});
48 
49 	if (ffa_func_id(*ret) != FFA_SUCCESS_32) {
50 		dlog_verbose(
51 			"Failed forwarding FFA_MSG_SEND2_32 to the "
52 			"SPMC, got error %s (%d).\n",
53 			ffa_error_name(ffa_error_code(*ret)),
54 			ffa_error_code(*ret));
55 	}
56 
57 	return true;
58 }
59 
60 /**
61  * Checks whether the vCPU's attempt to wait for a message has already been
62  * interrupted or whether it is allowed to block.
63  */
ffa_indirect_msg_recv_block_interrupted(struct vcpu_locked current_locked)64 static bool ffa_indirect_msg_recv_block_interrupted(
65 	struct vcpu_locked current_locked)
66 {
67 	bool interrupted;
68 
69 	/*
70 	 * Don't block if there are enabled and pending interrupts, to match
71 	 * behaviour of wait_for_interrupt.
72 	 */
73 	interrupted = (vcpu_virt_interrupt_count_get(current_locked) > 0);
74 
75 	return interrupted;
76 }
77 
78 /**
79  * Returns true if there is something in the return code, either a v1.0
80  * FFA_MSG_SEND, or an FFA_ERROR.
81  */
ffa_indirect_msg_return_pending_messages(struct vm_locked vm_locked,struct ffa_value * ret)82 static bool ffa_indirect_msg_return_pending_messages(struct vm_locked vm_locked,
83 						     struct ffa_value *ret)
84 {
85 	/* Return pending messages without blocking. */
86 	if (vm_locked.vm->mailbox.state == MAILBOX_STATE_FULL) {
87 		*ret = ffa_msg_recv_return(vm_locked.vm);
88 		if (ret->func == FFA_MSG_SEND_32) {
89 			vm_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
90 		}
91 		return true;
92 	}
93 
94 	return false;
95 }
96 
97 /**
98  * Receives a message from the mailbox. If one isn't available, this function
99  * can optionally block the caller until one becomes available.
100  *
101  * No new messages can be received until the mailbox has been cleared.
102  */
ffa_indirect_msg_recv(bool block,struct vcpu_locked current_locked,struct vcpu ** next)103 struct ffa_value ffa_indirect_msg_recv(bool block,
104 				       struct vcpu_locked current_locked,
105 				       struct vcpu **next)
106 {
107 	struct vm *vm = current_locked.vcpu->vm;
108 	struct vcpu *current = current_locked.vcpu;
109 	struct vm_locked vm_locked;
110 	struct ffa_value return_code;
111 
112 	/*
113 	 * The primary VM will receive messages as a status code from running
114 	 * vCPUs and must not call this function.
115 	 */
116 	if (vm_is_primary(vm)) {
117 		return ffa_error(FFA_NOT_SUPPORTED);
118 	}
119 
120 	/*
121 	 * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
122 	 * invocation.
123 	 */
124 	if (is_ffa_direct_msg_request_ongoing(current_locked)) {
125 		return ffa_error(FFA_DENIED);
126 	}
127 
128 	vcpu_unlock(&current_locked);
129 	vm_locked = vm_lock(vm);
130 	current_locked = vcpu_lock(current);
131 
132 	if (ffa_indirect_msg_return_pending_messages(vm_locked, &return_code)) {
133 		goto out;
134 	}
135 
136 	/* No pending message so fail if not allowed to block. */
137 	if (!block) {
138 		return_code = ffa_error(FFA_RETRY);
139 		goto out;
140 	}
141 
142 	/*
143 	 * From this point onward this call can only be interrupted or a message
144 	 * received. If a message is received the return value will be set at
145 	 * that time to FFA_SUCCESS.
146 	 */
147 	return_code = ffa_error(FFA_INTERRUPTED);
148 	if (ffa_indirect_msg_recv_block_interrupted(current_locked)) {
149 		goto out;
150 	}
151 
152 	{
153 		/* Switch back to primary VM to block. */
154 		struct ffa_value run_return = {
155 			.func = FFA_MSG_WAIT_32,
156 			.arg1 = ffa_vm_vcpu(vm->id,
157 					    vcpu_index(current_locked.vcpu)),
158 		};
159 
160 		*next = api_switch_to_primary(current_locked, run_return,
161 					      VCPU_STATE_WAITING);
162 	}
163 out:
164 	vm_unlock(&vm_locked);
165 
166 	return return_code;
167 }
168 
169 /**
170  * Notifies the `to` VM about the message currently in its mailbox, possibly
171  * with the help of the primary VM.
172  */
deliver_msg(struct vm_locked to,ffa_id_t from_id,struct vcpu_locked current_locked,struct vcpu ** next)173 static struct ffa_value deliver_msg(struct vm_locked to, ffa_id_t from_id,
174 				    struct vcpu_locked current_locked,
175 				    struct vcpu **next)
176 {
177 	struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
178 	struct ffa_value primary_ret = {
179 		.func = FFA_MSG_SEND_32,
180 		.arg1 = ((uint32_t)from_id << 16) | to.vm->id,
181 	};
182 
183 	/* Messages for the primary VM are delivered directly. */
184 	if (vm_is_primary(to.vm)) {
185 		/*
186 		 * Only tell the primary VM the size and other details if the
187 		 * message is for it, to avoid leaking data about messages for
188 		 * other VMs.
189 		 */
190 		primary_ret = ffa_msg_recv_return(to.vm);
191 
192 		*next = api_switch_to_primary(current_locked, primary_ret,
193 					      VCPU_STATE_BLOCKED);
194 		return ret;
195 	}
196 
197 	to.vm->mailbox.state = MAILBOX_STATE_FULL;
198 
199 	/* Messages for the TEE are sent on via the dispatcher. */
200 	if (to.vm->id == HF_TEE_VM_ID) {
201 		struct ffa_value call = ffa_msg_recv_return(to.vm);
202 
203 		ret = arch_other_world_call(call);
204 		/*
205 		 * After the call to the TEE completes it must have finished
206 		 * reading its RX buffer, so it is ready for another message.
207 		 */
208 		to.vm->mailbox.state = MAILBOX_STATE_EMPTY;
209 		/*
210 		 * Don't return to the primary VM in this case, as the TEE is
211 		 * not (yet) scheduled via FF-A.
212 		 */
213 		return ret;
214 	}
215 
216 	/* Return to the primary VM directly or with a switch. */
217 	if (from_id != HF_PRIMARY_VM_ID) {
218 		*next = api_switch_to_primary(current_locked, primary_ret,
219 					      VCPU_STATE_BLOCKED);
220 	}
221 
222 	return ret;
223 }
224 
225 /*
226  * Copies data from the sender's send buffer to the recipient's receive buffer
227  * and notifies the recipient.
228  *
229  * If the recipient's receive buffer is busy, it can optionally register the
230  * caller to be notified when the recipient's receive buffer becomes available.
231  */
ffa_indirect_msg_send(ffa_id_t sender_vm_id,ffa_id_t receiver_vm_id,uint32_t size,struct vcpu * current,struct vcpu ** next)232 struct ffa_value ffa_indirect_msg_send(ffa_id_t sender_vm_id,
233 				       ffa_id_t receiver_vm_id, uint32_t size,
234 				       struct vcpu *current, struct vcpu **next)
235 {
236 	struct vm *from = current->vm;
237 	struct vm *to;
238 	struct vm_locked to_locked;
239 	const void *from_msg;
240 	struct ffa_value ret;
241 	struct vcpu_locked current_locked;
242 	bool is_direct_request_ongoing;
243 
244 	/* Ensure sender VM ID corresponds to the current VM. */
245 	if (sender_vm_id != from->id) {
246 		return ffa_error(FFA_INVALID_PARAMETERS);
247 	}
248 
249 	/* Disallow reflexive requests as this suggests an error in the VM. */
250 	if (receiver_vm_id == from->id) {
251 		return ffa_error(FFA_INVALID_PARAMETERS);
252 	}
253 
254 	/* Limit the size of transfer. */
255 	if (size > FFA_MSG_PAYLOAD_MAX) {
256 		return ffa_error(FFA_INVALID_PARAMETERS);
257 	}
258 
259 	/* Ensure the receiver VM exists. */
260 	to = vm_find(receiver_vm_id);
261 	if (to == NULL) {
262 		return ffa_error(FFA_INVALID_PARAMETERS);
263 	}
264 
265 	/*
266 	 * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
267 	 * invocation.
268 	 */
269 	current_locked = vcpu_lock(current);
270 	is_direct_request_ongoing =
271 		is_ffa_direct_msg_request_ongoing(current_locked);
272 
273 	if (is_direct_request_ongoing) {
274 		ret = ffa_error(FFA_DENIED);
275 		goto out_current;
276 	}
277 
278 	/*
279 	 * Check that the sender has configured its send buffer. If the tx
280 	 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
281 	 * be safely accessed after releasing the lock since the tx mailbox
282 	 * address can only be configured once.
283 	 * A VM's lock must be acquired before any of its vCPU's lock. Hence,
284 	 * unlock current vCPU and acquire it immediately after its VM's lock.
285 	 */
286 	vcpu_unlock(&current_locked);
287 	sl_lock(&from->lock);
288 	current_locked = vcpu_lock(current);
289 	from_msg = from->mailbox.send;
290 	sl_unlock(&from->lock);
291 
292 	if (from_msg == NULL) {
293 		ret = ffa_error(FFA_INVALID_PARAMETERS);
294 		goto out_current;
295 	}
296 
297 	to_locked = vm_lock(to);
298 
299 	if (vm_is_mailbox_busy(to_locked)) {
300 		ret = ffa_error(FFA_BUSY);
301 		goto out;
302 	}
303 
304 	/* Copy data. */
305 	memcpy_s(to->mailbox.recv, FFA_MSG_PAYLOAD_MAX, from_msg, size);
306 	to->mailbox.recv_size = size;
307 	to->mailbox.recv_sender = sender_vm_id;
308 	to->mailbox.recv_func = FFA_MSG_SEND_32;
309 	to->mailbox.state = MAILBOX_STATE_FULL;
310 	ret = deliver_msg(to_locked, sender_vm_id, current_locked, next);
311 
312 out:
313 	vm_unlock(&to_locked);
314 
315 out_current:
316 	vcpu_unlock(&current_locked);
317 
318 	return ret;
319 }
320