1 /*
2 * Copyright 2018 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #pragma once
10
11 #include "hf/abi.h"
12 #include "hf/ffa.h"
13 #include "hf/types.h"
14
15 /**
16 * This function must be implemented to trigger the architecture-specific
17 * mechanism to call to the hypervisor.
18 */
19 int64_t hf_call(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3);
20 struct ffa_value ffa_call(struct ffa_value args);
21 void memcpy_s(void *dest, size_t destsz, const void *src, size_t count);
22
23 /**
24 * Returns the VM's own ID.
25 */
ffa_id_get(void)26 static inline struct ffa_value ffa_id_get(void)
27 {
28 return ffa_call((struct ffa_value){.func = FFA_ID_GET_32});
29 }
30
31 /**
32 * Returns the SPMC FF-A ID at NS virtual/physical and secure virtual
33 * FF-A instances.
34 * DEN0077A FF-A v1.1 Beta0 section 13.9 FFA_SPM_ID_GET.
35 */
ffa_spm_id_get(void)36 static inline struct ffa_value ffa_spm_id_get(void)
37 {
38 return ffa_call((struct ffa_value){.func = FFA_SPM_ID_GET_32});
39 }
40
41 /**
42 * Requests information for partitions instantiated in the system. If the
43 * FFA_PARTITION_COUNT_FLAG is not set, the information is returned
44 * in the RX buffer of the caller as an array of partition information
45 * descriptors (struct ffa_partition_info).
46 *
47 * A Null UUID (UUID that is all zeros) returns information for all partitions,
48 * whereas a non-Null UUID returns information only for partitions that match.
49 *
50 * Flags may include:
51 * - FFA_PARTITION_COUNT_FLAG, which specifes if the partition info descriptors
52 * are returned in RX buffer or just the count in arg2.
53 * 1 returns just the count.
54 * 0 returns the count with the partition info descriptors.
55 *
56 * Returns:
57 * - FFA_SUCCESS on success. The count of partition information descriptors
58 * populated in the RX buffer is returned in arg2 (register w2).
59 * - FFA_BUSY if the caller's RX buffer is not free.
60 * - FFA_NO_MEMORY if the results do not fit in the callers RX buffer.
61 * - FFA_INVALID_PARAMETERS for an unrecognized UUID.
62 */
ffa_partition_info_get(const struct ffa_uuid * uuid,const uint32_t flags)63 static inline struct ffa_value ffa_partition_info_get(
64 const struct ffa_uuid *uuid, const uint32_t flags)
65 {
66 return ffa_call((struct ffa_value){.func = FFA_PARTITION_INFO_GET_32,
67 .arg1 = uuid->uuid[0],
68 .arg2 = uuid->uuid[1],
69 .arg3 = uuid->uuid[2],
70 .arg4 = uuid->uuid[3],
71 .arg5 = flags});
72 }
73 /**
74 * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1
75 * Registers vCPU secondary entry point for the caller VM.
76 * Called from secure virtual FF-A instance.
77 */
ffa_secondary_ep_register(uintptr_t address)78 static inline struct ffa_value ffa_secondary_ep_register(uintptr_t address)
79 {
80 return ffa_call((struct ffa_value){.func = FFA_SECONDARY_EP_REGISTER_64,
81 .arg1 = address});
82 }
83
84 /**
85 * Returns the VM's own ID.
86 */
hf_vm_get_id(void)87 static inline ffa_vm_id_t hf_vm_get_id(void)
88 {
89 return ffa_id_get().arg2;
90 }
91
92 /**
93 * Runs the given vCPU of the given VM.
94 */
ffa_run(ffa_vm_id_t vm_id,ffa_vcpu_index_t vcpu_idx)95 static inline struct ffa_value ffa_run(ffa_vm_id_t vm_id,
96 ffa_vcpu_index_t vcpu_idx)
97 {
98 return ffa_call((struct ffa_value){.func = FFA_RUN_32,
99 ffa_vm_vcpu(vm_id, vcpu_idx)});
100 }
101
102 /**
103 * Hints that the vCPU is willing to yield its current use of the physical CPU.
104 * This call always returns FFA_SUCCESS.
105 */
ffa_yield(void)106 static inline struct ffa_value ffa_yield(void)
107 {
108 return ffa_call((struct ffa_value){.func = FFA_YIELD_32});
109 }
110
111 /**
112 * Configures the pages to send/receive data through. The pages must not be
113 * shared.
114 *
115 * Returns:
116 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
117 * aligned or are the same.
118 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
119 * due to insufficient page table memory.
120 * - FFA_ERROR FFA_DENIED if the pages are already mapped or are not owned by
121 * the caller.
122 * - FFA_SUCCESS on success if no further action is needed.
123 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
124 * needs to wake up or kick waiters.
125 */
ffa_rxtx_map(hf_ipaddr_t send,hf_ipaddr_t recv)126 static inline struct ffa_value ffa_rxtx_map(hf_ipaddr_t send, hf_ipaddr_t recv)
127 {
128 return ffa_call(
129 (struct ffa_value){.func = FFA_RXTX_MAP_64,
130 .arg1 = send,
131 .arg2 = recv,
132 .arg3 = HF_MAILBOX_SIZE / FFA_PAGE_SIZE});
133 }
134
135 /**
136 * Unmaps the RX/TX buffer pair of an endpoint or Hypervisor from the
137 * translation regime of the callee.
138 *
139 * Returns:
140 * - FFA_ERROR FFA_INVALID_PARAMETERS if there is no buffer pair registered on
141 * behalf of the caller.
142 * - FFA_SUCCESS on success if no further action is needed.
143 */
ffa_rxtx_unmap(void)144 static inline struct ffa_value ffa_rxtx_unmap(void)
145 {
146 /* Note that allocator ID MBZ at virtual instance. */
147 return ffa_call((struct ffa_value){.func = FFA_RXTX_UNMAP_32});
148 }
149
150 /**
151 * Copies data from the sender's send buffer to the recipient's receive buffer.
152 *
153 * If the recipient's receive buffer is busy, it can optionally register the
154 * caller to be notified when the recipient's receive buffer becomes available.
155 *
156 * Attributes may include:
157 * - FFA_MSG_SEND_NOTIFY, to notify the caller when it should try again.
158 * - FFA_MSG_SEND_LEGACY_MEMORY_*, to send a legacy architected memory sharing
159 * message.
160 *
161 * Returns FFA_SUCCESS if the message is sent, or an error code otherwise:
162 * - INVALID_PARAMETERS: one or more of the parameters do not conform.
163 * - BUSY: the message could not be delivered either because the mailbox
164 * was full or the target VM is not yet set up.
165 */
ffa_msg_send(ffa_vm_id_t sender_vm_id,ffa_vm_id_t target_vm_id,uint32_t size,uint32_t attributes)166 static inline struct ffa_value ffa_msg_send(ffa_vm_id_t sender_vm_id,
167 ffa_vm_id_t target_vm_id,
168 uint32_t size, uint32_t attributes)
169 {
170 return ffa_call((struct ffa_value){
171 .func = FFA_MSG_SEND_32,
172 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
173 .arg3 = size,
174 .arg4 = attributes});
175 }
176
177 /**
178 * Copies data from the sender's send buffer to the recipient's receive buffer
179 * and notifies the receiver.
180 *
181 * `flags` may include a 'Delay Schedule Receiver interrupt'.
182 *
183 * Returns FFA_SUCCESS if the message is sent, or an error code otherwise:
184 * - INVALID_PARAMETERS: one or more of the parameters do not conform.
185 * - BUSY: receiver's mailbox was full.
186 * - DENIED: receiver is not in a state to handle the request or doesn't
187 * support indirect messages.
188 */
ffa_msg_send2(uint32_t flags)189 static inline struct ffa_value ffa_msg_send2(uint32_t flags)
190 {
191 return ffa_call((struct ffa_value){
192 .func = FFA_MSG_SEND2_32, .arg1 = 0, .arg2 = flags});
193 }
194
ffa_mem_donate(uint32_t length,uint32_t fragment_length)195 static inline struct ffa_value ffa_mem_donate(uint32_t length,
196 uint32_t fragment_length)
197 {
198 return ffa_call((struct ffa_value){.func = FFA_MEM_DONATE_32,
199 .arg1 = length,
200 .arg2 = fragment_length});
201 }
202
ffa_mem_lend(uint32_t length,uint32_t fragment_length)203 static inline struct ffa_value ffa_mem_lend(uint32_t length,
204 uint32_t fragment_length)
205 {
206 return ffa_call((struct ffa_value){.func = FFA_MEM_LEND_32,
207 .arg1 = length,
208 .arg2 = fragment_length});
209 }
210
ffa_mem_share(uint32_t length,uint32_t fragment_length)211 static inline struct ffa_value ffa_mem_share(uint32_t length,
212 uint32_t fragment_length)
213 {
214 return ffa_call((struct ffa_value){.func = FFA_MEM_SHARE_32,
215 .arg1 = length,
216 .arg2 = fragment_length});
217 }
218
ffa_mem_retrieve_req(uint32_t length,uint32_t fragment_length)219 static inline struct ffa_value ffa_mem_retrieve_req(uint32_t length,
220 uint32_t fragment_length)
221 {
222 return ffa_call((struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
223 .arg1 = length,
224 .arg2 = fragment_length});
225 }
226
ffa_mem_relinquish(void)227 static inline struct ffa_value ffa_mem_relinquish(void)
228 {
229 return ffa_call((struct ffa_value){.func = FFA_MEM_RELINQUISH_32});
230 }
231
ffa_mem_reclaim(ffa_memory_handle_t handle,ffa_memory_region_flags_t flags)232 static inline struct ffa_value ffa_mem_reclaim(ffa_memory_handle_t handle,
233 ffa_memory_region_flags_t flags)
234 {
235 return ffa_call((struct ffa_value){.func = FFA_MEM_RECLAIM_32,
236 .arg1 = (uint32_t)handle,
237 .arg2 = (uint32_t)(handle >> 32),
238 .arg3 = flags});
239 }
240
ffa_mem_frag_rx(ffa_memory_handle_t handle,uint32_t fragment_offset)241 static inline struct ffa_value ffa_mem_frag_rx(ffa_memory_handle_t handle,
242 uint32_t fragment_offset)
243 {
244 /* Note that sender MBZ at virtual instance. */
245 return ffa_call((struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
246 .arg1 = (uint32_t)handle,
247 .arg2 = (uint32_t)(handle >> 32),
248 .arg3 = fragment_offset});
249 }
250
ffa_mem_frag_tx(ffa_memory_handle_t handle,uint32_t fragment_length)251 static inline struct ffa_value ffa_mem_frag_tx(ffa_memory_handle_t handle,
252 uint32_t fragment_length)
253 {
254 /* Note that sender MBZ at virtual instance. */
255 return ffa_call((struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
256 .arg1 = (uint32_t)handle,
257 .arg2 = (uint32_t)(handle >> 32),
258 .arg3 = fragment_length});
259 }
260
261 /**
262 * Called by secondary VMs to receive a message. This will block until a message
263 * is received.
264 *
265 * The mailbox must be cleared before a new message can be received.
266 *
267 * If no message is immediately available and there are no enabled and pending
268 * interrupts (irrespective of whether interrupts are enabled globally), then
269 * this will block until a message is available or an enabled interrupt becomes
270 * pending. This matches the behaviour of the WFI instruction on AArch64, except
271 * that a message becoming available is also treated like a wake-up event.
272 *
273 * Returns:
274 * - FFA_MSG_SEND if a message is successfully received.
275 * - FFA_ERROR FFA_NOT_SUPPORTED if called from the primary VM.
276 * - FFA_ERROR FFA_INTERRUPTED if an interrupt happened during the call.
277 */
ffa_msg_wait(void)278 static inline struct ffa_value ffa_msg_wait(void)
279 {
280 return ffa_call((struct ffa_value){.func = FFA_MSG_WAIT_32});
281 }
282
283 /**
284 * Called by secondary VMs to receive a message. The call will return whether or
285 * not a message is available.
286 *
287 * The mailbox must be cleared before a new message can be received.
288 *
289 * Returns:
290 * - FFA_MSG_SEND if a message is successfully received.
291 * - FFA_ERROR FFA_NOT_SUPPORTED if called from the primary VM.
292 * - FFA_ERROR FFA_INTERRUPTED if an interrupt happened during the call.
293 * - FFA_ERROR FFA_RETRY if there was no pending message.
294 */
ffa_msg_poll(void)295 static inline struct ffa_value ffa_msg_poll(void)
296 {
297 return ffa_call((struct ffa_value){.func = FFA_MSG_POLL_32});
298 }
299
300 /**
301 * Releases the caller's mailbox so that a new message can be received. The
302 * caller must have copied out all data they wish to preserve as new messages
303 * will overwrite the old and will arrive asynchronously.
304 *
305 * Returns:
306 * - FFA_ERROR FFA_DENIED on failure, if the mailbox hasn't been read.
307 * - FFA_SUCCESS on success if no further action is needed.
308 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
309 * needs to wake up or kick waiters. Waiters should be retrieved by calling
310 * hf_mailbox_waiter_get.
311 */
ffa_rx_release(void)312 static inline struct ffa_value ffa_rx_release(void)
313 {
314 return ffa_call((struct ffa_value){.func = FFA_RX_RELEASE_32});
315 }
316
317 /**
318 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
319 * by this function, the caller must have called api_mailbox_send before with
320 * the notify argument set to true, and this call must have failed because the
321 * mailbox was not available.
322 *
323 * It should be called repeatedly to retrieve a list of VMs.
324 *
325 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
326 * became writable.
327 */
hf_mailbox_writable_get(void)328 static inline int64_t hf_mailbox_writable_get(void)
329 {
330 return hf_call(HF_MAILBOX_WRITABLE_GET, 0, 0, 0);
331 }
332
333 /**
334 * Retrieves the next VM waiting to be notified that the mailbox of the
335 * specified VM became writable. Only primary VMs are allowed to call this.
336 *
337 * Returns -1 on failure or if there are no waiters; the VM id of the next
338 * waiter otherwise.
339 */
hf_mailbox_waiter_get(ffa_vm_id_t vm_id)340 static inline int64_t hf_mailbox_waiter_get(ffa_vm_id_t vm_id)
341 {
342 return hf_call(HF_MAILBOX_WAITER_GET, vm_id, 0, 0);
343 }
344
345 /**
346 * Enables or disables a given interrupt ID.
347 *
348 * Returns 0 on success, or -1 if the intid is invalid.
349 */
hf_interrupt_enable(uint32_t intid,bool enable,uint32_t type)350 static inline int64_t hf_interrupt_enable(uint32_t intid, bool enable,
351 uint32_t type)
352 {
353 return hf_call(HF_INTERRUPT_ENABLE, intid, enable, type);
354 }
355
356 /**
357 * Gets the ID of the pending interrupt (if any) and acknowledge it.
358 *
359 * Returns HF_INVALID_INTID if there are no pending interrupts.
360 */
hf_interrupt_get(void)361 static inline uint32_t hf_interrupt_get(void)
362 {
363 return hf_call(HF_INTERRUPT_GET, 0, 0, 0);
364 }
365
366 /**
367 * Injects a virtual interrupt of the given ID into the given target vCPU.
368 * This doesn't cause the vCPU to actually be run immediately; it will be taken
369 * when the vCPU is next run, which is up to the scheduler.
370 *
371 * Returns:
372 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
373 * ID is invalid, or the current VM is not allowed to inject interrupts to
374 * the target VM.
375 * - 0 on success if no further action is needed.
376 * - 1 if it was called by the primary VM and the primary VM now needs to wake
377 * up or kick the target vCPU.
378 */
hf_interrupt_inject(ffa_vm_id_t target_vm_id,ffa_vcpu_index_t target_vcpu_idx,uint32_t intid)379 static inline int64_t hf_interrupt_inject(ffa_vm_id_t target_vm_id,
380 ffa_vcpu_index_t target_vcpu_idx,
381 uint32_t intid)
382 {
383 return hf_call(HF_INTERRUPT_INJECT, target_vm_id, target_vcpu_idx,
384 intid);
385 }
386
387 /**
388 * Sends a character to the debug log for the VM.
389 *
390 * Returns 0 on success, or -1 if it failed for some reason.
391 */
hf_debug_log(char c)392 static inline int64_t hf_debug_log(char c)
393 {
394 return hf_call(HF_DEBUG_LOG, c, 0, 0);
395 }
396
397 /** Obtains the Hafnium's version of the implemented FF-A specification. */
ffa_version(uint32_t requested_version)398 static inline int32_t ffa_version(uint32_t requested_version)
399 {
400 return ffa_call((struct ffa_value){.func = FFA_VERSION_32,
401 .arg1 = requested_version})
402 .func;
403 }
404
405 /**
406 * Discovery function returning information about the implementation of optional
407 * FF-A interfaces.
408 *
409 * Returns:
410 * - FFA_SUCCESS in .func if the optional interface with function_id is
411 * implemented.
412 * - FFA_ERROR in .func if the optional interface with function_id is not
413 * implemented.
414 */
ffa_features(uint32_t function_id)415 static inline struct ffa_value ffa_features(uint32_t function_id)
416 {
417 return ffa_call((struct ffa_value){.func = FFA_FEATURES_32,
418 .arg1 = function_id});
419 }
420
ffa_msg_send_direct_req(ffa_vm_id_t sender_vm_id,ffa_vm_id_t target_vm_id,uint32_t arg3,uint32_t arg4,uint32_t arg5,uint32_t arg6,uint32_t arg7)421 static inline struct ffa_value ffa_msg_send_direct_req(
422 ffa_vm_id_t sender_vm_id, ffa_vm_id_t target_vm_id, uint32_t arg3,
423 uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7)
424 {
425 return ffa_call((struct ffa_value){
426 .func = FFA_MSG_SEND_DIRECT_REQ_32,
427 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
428 .arg3 = arg3,
429 .arg4 = arg4,
430 .arg5 = arg5,
431 .arg6 = arg6,
432 .arg7 = arg7,
433 });
434 }
435
ffa_msg_send_direct_resp(ffa_vm_id_t sender_vm_id,ffa_vm_id_t target_vm_id,uint32_t arg3,uint32_t arg4,uint32_t arg5,uint32_t arg6,uint32_t arg7)436 static inline struct ffa_value ffa_msg_send_direct_resp(
437 ffa_vm_id_t sender_vm_id, ffa_vm_id_t target_vm_id, uint32_t arg3,
438 uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7)
439 {
440 return ffa_call((struct ffa_value){
441 .func = FFA_MSG_SEND_DIRECT_RESP_32,
442 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
443 .arg3 = arg3,
444 .arg4 = arg4,
445 .arg5 = arg5,
446 .arg6 = arg6,
447 .arg7 = arg7,
448 });
449 }
450
ffa_notification_bind(ffa_vm_id_t sender_vm_id,ffa_vm_id_t receiver_vm_id,uint32_t flags,ffa_notifications_bitmap_t bitmap)451 static inline struct ffa_value ffa_notification_bind(
452 ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
453 ffa_notifications_bitmap_t bitmap)
454 {
455 return ffa_call((struct ffa_value){
456 .func = FFA_NOTIFICATION_BIND_32,
457 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
458 .arg2 = flags,
459 .arg3 = (uint32_t)(bitmap),
460 .arg4 = (uint32_t)(bitmap >> 32),
461 });
462 }
463
ffa_notification_unbind(ffa_vm_id_t sender_vm_id,ffa_vm_id_t receiver_vm_id,ffa_notifications_bitmap_t bitmap)464 static inline struct ffa_value ffa_notification_unbind(
465 ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id,
466 ffa_notifications_bitmap_t bitmap)
467 {
468 return ffa_call((struct ffa_value){
469 .func = FFA_NOTIFICATION_UNBIND_32,
470 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
471 .arg3 = (uint32_t)(bitmap),
472 .arg4 = (uint32_t)(bitmap >> 32),
473 });
474 }
475
ffa_notification_set(ffa_vm_id_t sender_vm_id,ffa_vm_id_t receiver_vm_id,uint32_t flags,ffa_notifications_bitmap_t bitmap)476 static inline struct ffa_value ffa_notification_set(
477 ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
478 ffa_notifications_bitmap_t bitmap)
479 {
480 return ffa_call((struct ffa_value){
481 .func = FFA_NOTIFICATION_SET_32,
482 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
483 .arg2 = flags,
484 .arg3 = (uint32_t)(bitmap),
485 .arg4 = (uint32_t)(bitmap >> 32),
486 });
487 }
488
ffa_notification_get(ffa_vm_id_t receiver_vm_id,ffa_vcpu_index_t vcpu_id,uint32_t flags)489 static inline struct ffa_value ffa_notification_get(ffa_vm_id_t receiver_vm_id,
490 ffa_vcpu_index_t vcpu_id,
491 uint32_t flags)
492 {
493 return ffa_call((struct ffa_value){
494 .func = FFA_NOTIFICATION_GET_32,
495 .arg1 = (vcpu_id << 16) | (receiver_vm_id),
496 .arg2 = flags,
497 });
498 }
499
ffa_notification_info_get(void)500 static inline struct ffa_value ffa_notification_info_get(void)
501 {
502 return ffa_call((struct ffa_value){
503 .func = FFA_NOTIFICATION_INFO_GET_64,
504 });
505 }
506
ffa_mem_perm_get(uint64_t base_va)507 static inline struct ffa_value ffa_mem_perm_get(uint64_t base_va)
508 {
509 return ffa_call((struct ffa_value){.func = FFA_MEM_PERM_GET_32,
510 .arg1 = base_va});
511 }
512
ffa_mem_perm_set(uint64_t base_va,uint32_t page_count,uint32_t mem_perm)513 static inline struct ffa_value ffa_mem_perm_set(uint64_t base_va,
514 uint32_t page_count,
515 uint32_t mem_perm)
516 {
517 return ffa_call((struct ffa_value){.func = FFA_MEM_PERM_SET_32,
518 .arg1 = base_va,
519 .arg2 = page_count,
520 .arg3 = mem_perm});
521 }
522
ffa_console_log_32(const char * src,size_t size)523 static inline struct ffa_value ffa_console_log_32(const char *src, size_t size)
524 {
525 struct ffa_value req = {
526 .func = FFA_CONSOLE_LOG_32,
527 .arg1 = size,
528 };
529 memcpy_s(&req.arg2, sizeof(uint32_t) * 6, src, size);
530
531 return ffa_call(req);
532 }
533
ffa_console_log_64(const char * src,size_t size)534 static inline struct ffa_value ffa_console_log_64(const char *src, size_t size)
535 {
536 struct ffa_value req = {
537 .func = FFA_CONSOLE_LOG_64,
538 .arg1 = size,
539 };
540 memcpy_s(&req.arg2, sizeof(uint64_t) * 6, src, size);
541
542 return ffa_call(req);
543 }
544