1 /*
2 * Copyright 2020 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/arch/other_world.h"
10
11 #include "hf/arch/mmu.h"
12
13 #include "hf/check.h"
14 #include "hf/dlog.h"
15 #include "hf/ffa.h"
16 #include "hf/ffa_internal.h"
17 #include "hf/mm.h"
18 #include "hf/vcpu.h"
19 #include "hf/vm.h"
20
21 #include "smc.h"
22
arch_other_world_vm_init(struct vm * other_world_vm,const struct boot_params * params,struct mpool * ppool)23 bool arch_other_world_vm_init(struct vm *other_world_vm,
24 const struct boot_params *params,
25 struct mpool *ppool)
26 {
27 const char *err_msg =
28 "Unable to initialise address space for Other world VM.\n";
29 struct vm_locked other_world_vm_locked;
30 bool ret = false;
31 uint32_t i;
32
33 other_world_vm_locked = vm_lock(other_world_vm);
34
35 /* Enabling all communication methods for the other world. */
36 other_world_vm->messaging_method =
37 FFA_PARTITION_DIRECT_REQ_SEND | FFA_PARTITION_DIRECT_REQ2_SEND;
38
39 /*
40 * If Hafnium is NWd Hypervisor, allow other_world_VM (SPMC) to
41 * receive requests.
42 * When Hafnium is SPMC, other_world_vm not allowed to receive requests
43 * from SPs.
44 */
45 #if SECURE_WORLD == 0
46 other_world_vm->messaging_method |= FFA_PARTITION_DIRECT_REQ2_RECV;
47 other_world_vm->messaging_method |= FFA_PARTITION_DIRECT_REQ_RECV;
48 #endif
49
50 /* Map NS mem ranges to "Other world VM" Stage-2 PTs. */
51 for (i = 0; i < params->ns_mem_ranges_count; i++) {
52 if (!vm_identity_map(
53 other_world_vm_locked,
54 params->ns_mem_ranges[i].begin,
55 params->ns_mem_ranges[i].end,
56 MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_NS,
57 ppool, NULL)) {
58 dlog_error("Normal Memory: %s", err_msg);
59 goto out;
60 }
61 }
62
63 /*
64 * Map NS device mem ranges to "Other world VM" Stage-2 PTs to allow
65 * for memory sharing operations from NWd to SWd.
66 */
67 for (i = 0; i < params->ns_device_mem_ranges_count; i++) {
68 if (!vm_identity_map(
69 other_world_vm_locked,
70 params->ns_device_mem_ranges[i].begin,
71 params->ns_device_mem_ranges[i].end,
72 MM_MODE_R | MM_MODE_W | MM_MODE_D | MM_MODE_NS,
73 ppool, NULL)) {
74 dlog_error("Device Memory: %s", err_msg);
75 goto out;
76 }
77 }
78
79 /*
80 * Force the hypervisor's version to be same as ours.
81 * FF-A version at hypervisor's initialization is not getting to the
82 * SPMC.
83 * TODO: fix the described above and delete this.
84 */
85 other_world_vm->ffa_version = FFA_VERSION_COMPILED;
86
87 ret = true;
88
89 out:
90 vm_unlock(&other_world_vm_locked);
91
92 return ret;
93 }
94
arch_other_world_call(struct ffa_value args)95 struct ffa_value arch_other_world_call(struct ffa_value args)
96 {
97 return smc_ffa_call(args);
98 }
99
arch_other_world_call_ext(struct ffa_value args)100 struct ffa_value arch_other_world_call_ext(struct ffa_value args)
101 {
102 return smc_ffa_call_ext(args);
103 }
104
105 /**
106 * Obtain a lock on the other world VM, making sure it is
107 * locked in the correct order relative to the owner VM in order to avoid a
108 * deadlock.
109 */
lock_other_world(struct vm_locked owner_vm_locked)110 static struct vm_locked lock_other_world(struct vm_locked owner_vm_locked)
111 {
112 struct vm *other_world_vm;
113 struct two_vm_locked both;
114
115 if (owner_vm_locked.vm->id == HF_OTHER_WORLD_ID) {
116 return owner_vm_locked;
117 }
118
119 other_world_vm = vm_find(HF_OTHER_WORLD_ID);
120 both = vm_lock_both_in_order(owner_vm_locked, other_world_vm);
121
122 return both.vm2;
123 }
124
unlock_other_world(struct vm_locked owner_vm_locked,struct vm_locked other_world_locked)125 static void unlock_other_world(struct vm_locked owner_vm_locked,
126 struct vm_locked other_world_locked)
127 {
128 if (owner_vm_locked.vm->id != other_world_locked.vm->id) {
129 vm_unlock(&other_world_locked);
130 }
131 }
132
133 /**
134 * Unmap rxtx buffers from other world so that they cannot be used for memory
135 * sharing operations from NWd, or FFA_RXTX_MAP in another instance.
136 *
137 * Fails if the given addresses are not already mapped in the other world page
138 * tables.
139 *
140 * Returns `FFA_DENIED` if the send/recv pages are not mapped in normal world
141 * pages tables, or are mapped with incorrect permissions.
142 *
143 * Returns `FFA_ABORTED` if unmapping the send/recv pages from the normal world
144 * page tables fails.
145 */
arch_other_world_vm_configure_rxtx_map(struct vm_locked vm_locked,struct mpool * local_page_pool,paddr_t pa_send_begin,paddr_t pa_send_end,paddr_t pa_recv_begin,paddr_t pa_recv_end)146 struct ffa_value arch_other_world_vm_configure_rxtx_map(
147 struct vm_locked vm_locked, struct mpool *local_page_pool,
148 paddr_t pa_send_begin, paddr_t pa_send_end, paddr_t pa_recv_begin,
149 paddr_t pa_recv_end)
150 {
151 struct ffa_value ret;
152 mm_mode_t send_mode;
153 mm_mode_t recv_mode;
154 struct vm_locked other_world_locked;
155 const mm_mode_t expected_mode =
156 MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_NS;
157
158 other_world_locked = lock_other_world(vm_locked);
159 assert(other_world_locked.vm != NULL);
160
161 /*
162 * Check that the memory is mapped in the NWd set of page
163 * tables.
164 */
165 if (!vm_mem_get_mode(other_world_locked, ipa_from_pa(pa_send_begin),
166 ipa_from_pa(pa_send_end), &send_mode)) {
167 ret = ffa_error(FFA_DENIED);
168 dlog_error("%s: send page not mapped in NWd VM\n", __func__);
169 goto out_unlock;
170 }
171 if (!vm_mem_get_mode(other_world_locked, ipa_from_pa(pa_recv_begin),
172 ipa_from_pa(pa_recv_end), &recv_mode)) {
173 ret = ffa_error(FFA_DENIED);
174 dlog_error("%s: recv page not mapped in NWd VM\n", __func__);
175 goto out_unlock;
176 }
177
178 if ((send_mode & expected_mode) != expected_mode) {
179 ret = ffa_error(FFA_DENIED);
180 dlog_error("%s: send page is invalid (expected %#x, got %#x)\n",
181 __func__, expected_mode, send_mode);
182 goto out_unlock;
183 }
184 if ((recv_mode & expected_mode) != expected_mode) {
185 ret = ffa_error(FFA_DENIED);
186 dlog_error("%s: recv page is invalid (expected %#x, got %#x)\n",
187 __func__, expected_mode, recv_mode);
188 goto out_unlock;
189 }
190
191 /*
192 * Unmap the memory from the NWd page tables, to prevent that memory
193 * being used in memory sharing operations from the NWd, or in further
194 * `FFA_RXTX_MAP` calls.
195 */
196 if (!vm_unmap(other_world_locked, pa_send_begin, pa_send_end,
197 local_page_pool)) {
198 dlog_error("%s: cannot unmap send page from NWd VM\n",
199 __func__);
200 ret = ffa_error(FFA_ABORTED);
201 goto out_unlock;
202 }
203 if (!vm_unmap(other_world_locked, pa_recv_begin, pa_recv_end,
204 local_page_pool)) {
205 ret = ffa_error(FFA_ABORTED);
206 dlog_error("%s: cannot unmap recv page from NWd VM\n",
207 __func__);
208 goto out_unlock;
209 }
210
211 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
212
213 out_unlock:
214 unlock_other_world(vm_locked, other_world_locked);
215 return ret;
216 }
217
218 /**
219 * Remap rxtx buffers to other world so that they can be used for memory sharing
220 * operations from NWd, or FFA_RXTX_MAP in another instance.
221 *
222 * Returns `FFA_ABORTED` if mapping the send/recv pages in the normal world page
223 * tables fails.
224 */
arch_other_world_vm_configure_rxtx_unmap(struct vm_locked vm_locked,struct mpool * local_page_pool,paddr_t pa_send_begin,paddr_t pa_send_end,paddr_t pa_recv_begin,paddr_t pa_recv_end)225 struct ffa_value arch_other_world_vm_configure_rxtx_unmap(
226 struct vm_locked vm_locked, struct mpool *local_page_pool,
227 paddr_t pa_send_begin, paddr_t pa_send_end, paddr_t pa_recv_begin,
228 paddr_t pa_recv_end)
229 {
230 struct vm_locked other_world_locked = lock_other_world(vm_locked);
231
232 if (other_world_locked.vm == NULL) {
233 return ffa_error(FFA_ABORTED);
234 }
235
236 /* Remap to other world page tables. */
237 if (!vm_identity_map(other_world_locked, pa_send_begin, pa_send_end,
238 MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_NS,
239 local_page_pool, NULL)) {
240 dlog_error(
241 "%s: unable to remap send page to other world page "
242 "tables\n",
243 __func__);
244 return ffa_error(FFA_ABORTED);
245 }
246
247 if (!vm_identity_map(other_world_locked, pa_recv_begin, pa_recv_end,
248 MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_NS,
249 local_page_pool, NULL)) {
250 dlog_error(
251 "%s: unable to remap recv page to other world page "
252 "tables\n",
253 __func__);
254 CHECK(vm_unmap(other_world_locked, pa_send_begin, pa_send_end,
255 local_page_pool));
256 return ffa_error(FFA_ABORTED);
257 }
258
259 unlock_other_world(vm_locked, other_world_locked);
260 return (struct ffa_value){.func = FFA_SUCCESS_32};
261 }
262