1 /*
2  * Copyright 2024 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "hf/ffa_memory.h"
10 
11 #include "hf/arch/mmu.h"
12 
13 #include "hf/ffa/ffa_memory.h"
14 #include "hf/ffa_internal.h"
15 #include "hf/mm.h"
16 #include "hf/vm.h"
17 
18 #include "sysregs.h"
19 
ffa_memory_get_handle_allocator(void)20 enum ffa_memory_handle_allocator ffa_memory_get_handle_allocator(void)
21 {
22 	return FFA_MEMORY_HANDLE_ALLOCATOR_SPMC;
23 }
24 
25 /** Check validity of the FF-A memory send function attempt. */
ffa_memory_is_send_valid(ffa_id_t receiver,ffa_id_t sender,uint32_t share_func,bool multiple_borrower)26 bool ffa_memory_is_send_valid(ffa_id_t receiver, ffa_id_t sender,
27 			      uint32_t share_func, bool multiple_borrower)
28 {
29 	const bool is_receiver_sp = vm_id_is_current_world(receiver);
30 	const bool is_sender_sp = vm_id_is_current_world(sender);
31 
32 	/*
33 	 * SPs can only share/lend/donate to another SP.
34 	 * VMs can send memory to SPs.
35 	 * In a multiple borrower operation, VMs might provide descriptors
36 	 * of other VMs.
37 	 * Refer to the section 1.4 of the FF-A v1.2 Memory Management
38 	 * supplement ALP0 specification.
39 	 */
40 	switch (share_func) {
41 	case FFA_MEM_DONATE_64:
42 	case FFA_MEM_DONATE_32:
43 	case FFA_MEM_LEND_64:
44 	case FFA_MEM_LEND_32:
45 		return is_receiver_sp;
46 	case FFA_MEM_SHARE_64:
47 	case FFA_MEM_SHARE_32: {
48 		bool result = (is_sender_sp && is_receiver_sp) ||
49 			      (!is_sender_sp && !multiple_borrower &&
50 			       is_receiver_sp) ||
51 			      (!is_sender_sp && multiple_borrower);
52 
53 		if (!result) {
54 			dlog_verbose(
55 				"SPMC only supports memory operations to a "
56 				"single SP, or multiple borrowers with mixed "
57 				"world borrowers.\n");
58 		}
59 		return result;
60 	}
61 	default:
62 		return false;
63 	}
64 }
65 
ffa_memory_get_other_world_mode(void)66 mm_mode_t ffa_memory_get_other_world_mode(void)
67 {
68 	return MM_MODE_NS;
69 }
70 
ffa_memory_is_mem_perm_get_valid(const struct vcpu * current)71 bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current)
72 {
73 	if (!current->vm->el0_partition) {
74 		dlog_error("FFA_MEM_PERM_GET: VM %#x is not an EL0 partition\n",
75 			   current->vm->id);
76 		return false;
77 	}
78 
79 	/* FFA_MEM_PERM_SET/GET is only valid before SPs are initialized */
80 	return has_vhe_support() && (current->rt_model == RTM_SP_INIT);
81 }
82 
ffa_memory_is_mem_perm_set_valid(const struct vcpu * current)83 bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current)
84 {
85 	/* FFA_MEM_PERM_SET/GET is only valid before SPs are initialized */
86 	return has_vhe_support() && (current->rt_model == RTM_SP_INIT);
87 }
88 
ffa_memory_other_world_mem_send(struct vm * from,uint32_t share_func,struct ffa_memory_region ** memory_region,uint32_t length,uint32_t fragment_length,struct mpool * page_pool)89 struct ffa_value ffa_memory_other_world_mem_send(
90 	struct vm *from, uint32_t share_func,
91 	struct ffa_memory_region **memory_region, uint32_t length,
92 	uint32_t fragment_length, struct mpool *page_pool)
93 {
94 	struct ffa_value ret;
95 	struct vm_locked from_locked = vm_lock(from);
96 
97 	ret = ffa_memory_send(from_locked, *memory_region, length,
98 			      fragment_length, share_func, page_pool);
99 	/*
100 	 * ffa_memory_send takes ownership of the memory_region, so
101 	 * make sure we don't free it.
102 	 */
103 	*memory_region = NULL;
104 
105 	vm_unlock(&from_locked);
106 
107 	return ret;
108 }
109 
110 /*
111  * SPMC handles its memory share requests internally, so no forwarding of the
112  * request is required.
113  */
ffa_memory_other_world_mem_reclaim(struct vm * to,ffa_memory_handle_t handle,ffa_memory_region_flags_t flags,struct mpool * page_pool)114 struct ffa_value ffa_memory_other_world_mem_reclaim(
115 	struct vm *to, ffa_memory_handle_t handle,
116 	ffa_memory_region_flags_t flags, struct mpool *page_pool)
117 {
118 	(void)handle;
119 	(void)flags;
120 	(void)page_pool;
121 	(void)to;
122 
123 	dlog_verbose("Invalid handle %#lx for FFA_MEM_RECLAIM.\n", handle);
124 	return ffa_error(FFA_INVALID_PARAMETERS);
125 }
126 
ffa_memory_other_world_mem_send_continue(struct vm * from,void * fragment,uint32_t fragment_length,ffa_memory_handle_t handle,struct mpool * page_pool)127 struct ffa_value ffa_memory_other_world_mem_send_continue(
128 	struct vm *from, void *fragment, uint32_t fragment_length,
129 	ffa_memory_handle_t handle, struct mpool *page_pool)
130 {
131 	(void)from;
132 	(void)fragment;
133 	(void)fragment_length;
134 	(void)handle;
135 	(void)page_pool;
136 
137 	return ffa_error(FFA_INVALID_PARAMETERS);
138 }
139 
140 /**
141  * Update the memory region attributes with the security state bit based on the
142  * supplied mode.
143  */
ffa_memory_add_security_bit_from_mode(ffa_memory_attributes_t attributes,mm_mode_t mode)144 ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
145 	ffa_memory_attributes_t attributes, mm_mode_t mode)
146 {
147 	ffa_memory_attributes_t ret = attributes;
148 
149 	if ((mode & MM_MODE_NS) != 0) {
150 		ret.security = FFA_MEMORY_SECURITY_NON_SECURE;
151 	}
152 
153 	return ret;
154 }
155