1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #if defined(HYPERCALLS)
6 #include <hyptypes.h>
7
8 #include <hypcall_def.h>
9 #include <hyprights.h>
10
11 #include <atomic.h>
12 #include <compiler.h>
13 #include <cspace.h>
14 #include <cspace_lookup.h>
15 #include <memextent.h>
16 #include <object.h>
17 #include <pgtable.h>
18 #include <rcu.h>
19 #include <spinlock.h>
20
21 error_t
hypercall_memextent_modify(cap_id_t memextent_cap,memextent_modify_flags_t flags,size_t offset,size_t size)22 hypercall_memextent_modify(cap_id_t memextent_cap,
23 memextent_modify_flags_t flags, size_t offset,
24 size_t size)
25 {
26 error_t err = OK;
27 cspace_t *cspace = cspace_get_self();
28
29 // FIXME:
30 if (memextent_modify_flags_get_res_0(&flags) != 0U) {
31 err = ERROR_ARGUMENT_INVALID;
32 goto out;
33 }
34
35 memextent_ptr_result_t m = cspace_lookup_memextent(
36 cspace, memextent_cap, CAP_RIGHTS_MEMEXTENT_MAP);
37 if (compiler_unexpected(m.e != OK)) {
38 err = m.e;
39 goto out;
40 }
41
42 memextent_t *memextent = m.r;
43 bool need_sync = !memextent_modify_flags_get_no_sync(&flags);
44
45 memextent_modify_op_t op = memextent_modify_flags_get_op(&flags);
46 if (op == MEMEXTENT_MODIFY_OP_UNMAP_ALL) {
47 memextent_unmap_all(memextent);
48 } else if ((op == MEMEXTENT_MODIFY_OP_ZERO_RANGE) && !need_sync) {
49 err = memextent_zero_range(memextent, offset, size);
50 } else if ((op == MEMEXTENT_MODIFY_OP_CACHE_CLEAN_RANGE) &&
51 !need_sync) {
52 err = memextent_cache_clean_range(memextent, offset, size);
53 } else if ((op == MEMEXTENT_MODIFY_OP_CACHE_FLUSH_RANGE) &&
54 !need_sync) {
55 err = memextent_cache_flush_range(memextent, offset, size);
56 } else if (op == MEMEXTENT_MODIFY_OP_SYNC_ALL) {
57 err = need_sync ? OK : ERROR_ARGUMENT_INVALID;
58 } else {
59 err = ERROR_ARGUMENT_INVALID;
60 }
61
62 if ((err == OK) && need_sync) {
63 // Wait for completion of EL2 operations using manual lookups
64 rcu_sync();
65 }
66
67 object_put_memextent(memextent);
68 out:
69 return err;
70 }
71
72 error_t
hypercall_memextent_configure(cap_id_t memextent_cap,paddr_t phys_base,size_t size,memextent_attrs_t attributes)73 hypercall_memextent_configure(cap_id_t memextent_cap, paddr_t phys_base,
74 size_t size, memextent_attrs_t attributes)
75 {
76 error_t err;
77 cspace_t *cspace = cspace_get_self();
78 object_type_t type;
79
80 object_ptr_result_t o = cspace_lookup_object_any(
81 cspace, memextent_cap, CAP_RIGHTS_GENERIC_OBJECT_ACTIVATE,
82 &type);
83 if (compiler_unexpected(o.e != OK)) {
84 err = o.e;
85 goto out;
86 }
87 if (type != OBJECT_TYPE_MEMEXTENT) {
88 err = ERROR_CSPACE_WRONG_OBJECT_TYPE;
89 goto out_memextent_release;
90 }
91
92 memextent_t *target_me = o.r.memextent;
93
94 spinlock_acquire(&target_me->header.lock);
95
96 if (atomic_load_relaxed(&target_me->header.state) ==
97 OBJECT_STATE_INIT) {
98 err = memextent_configure(target_me, phys_base, size,
99 attributes);
100 } else {
101 err = ERROR_OBJECT_STATE;
102 }
103
104 spinlock_release(&target_me->header.lock);
105 out_memextent_release:
106 object_put(type, o.r);
107 out:
108 return err;
109 }
110
111 error_t
hypercall_memextent_configure_derive(cap_id_t memextent_cap,cap_id_t parent_memextent_cap,size_t offset,size_t size,memextent_attrs_t attributes)112 hypercall_memextent_configure_derive(cap_id_t memextent_cap,
113 cap_id_t parent_memextent_cap,
114 size_t offset, size_t size,
115 memextent_attrs_t attributes)
116 {
117 error_t err;
118 cspace_t *cspace = cspace_get_self();
119 object_type_t type;
120
121 memextent_ptr_result_t m = cspace_lookup_memextent(
122 cspace, parent_memextent_cap, CAP_RIGHTS_MEMEXTENT_DERIVE);
123 if (compiler_unexpected(m.e != OK)) {
124 err = m.e;
125 goto out;
126 }
127
128 memextent_t *parent = m.r;
129
130 object_ptr_result_t o = cspace_lookup_object_any(
131 cspace, memextent_cap, CAP_RIGHTS_GENERIC_OBJECT_ACTIVATE,
132 &type);
133 if (compiler_unexpected(o.e != OK)) {
134 err = o.e;
135 goto out_parent_release;
136 }
137 if (type != OBJECT_TYPE_MEMEXTENT) {
138 err = ERROR_CSPACE_WRONG_OBJECT_TYPE;
139 goto out_memextent_release;
140 }
141
142 memextent_t *target_me = o.r.memextent;
143
144 spinlock_acquire(&target_me->header.lock);
145
146 if (atomic_load_relaxed(&target_me->header.state) ==
147 OBJECT_STATE_INIT) {
148 err = memextent_configure_derive(target_me, parent, offset,
149 size, attributes);
150
151 } else {
152 err = ERROR_OBJECT_STATE;
153 }
154
155 spinlock_release(&target_me->header.lock);
156 out_memextent_release:
157 object_put(type, o.r);
158 out_parent_release:
159 object_put_memextent(parent);
160 out:
161 return err;
162 }
163
164 static error_t
hypercall_memextent_donate_child(cap_id_t parent_cap,cap_id_t child_cap,size_t offset,size_t size,bool reverse)165 hypercall_memextent_donate_child(cap_id_t parent_cap, cap_id_t child_cap,
166 size_t offset, size_t size, bool reverse)
167 {
168 error_t err = OK;
169 cspace_t *cspace = cspace_get_self();
170
171 memextent_ptr_result_t child = cspace_lookup_memextent(
172 cspace, child_cap, CAP_RIGHTS_MEMEXTENT_DONATE);
173 if (compiler_unexpected(child.e != OK)) {
174 err = child.e;
175 goto out;
176 }
177
178 // We don't actually need a reference to the parent for the donate; the
179 // child already has a reference. So after sanity checking the provided
180 // parent cap we can immediately drop the reference.
181 if (child.r->parent != NULL) {
182 memextent_ptr_result_t m = cspace_lookup_memextent(
183 cspace, parent_cap, CAP_RIGHTS_MEMEXTENT_DONATE);
184 if (compiler_unexpected(m.e != OK)) {
185 err = m.e;
186 goto out_child_release;
187 }
188
189 if (child.r->parent != m.r) {
190 err = ERROR_ARGUMENT_INVALID;
191 }
192
193 object_put_memextent(m.r);
194 } else {
195 partition_ptr_result_t p = cspace_lookup_partition(
196 cspace, parent_cap, CAP_RIGHTS_PARTITION_DONATE);
197 if (compiler_unexpected(p.e != OK)) {
198 err = p.e;
199 goto out_child_release;
200 }
201
202 if (child.r->header.partition != p.r) {
203 err = ERROR_ARGUMENT_INVALID;
204 }
205
206 object_put_partition(p.r);
207 }
208
209 if (err == OK) {
210 err = memextent_donate_child(child.r, offset, size, reverse);
211 }
212
213 out_child_release:
214 object_put_memextent(child.r);
215 out:
216 return err;
217 }
218
219 static error_t
hypercall_memextent_donate_sibling(cap_id_t from,cap_id_t to,size_t offset,size_t size)220 hypercall_memextent_donate_sibling(cap_id_t from, cap_id_t to, size_t offset,
221 size_t size)
222 {
223 error_t err;
224 cspace_t *cspace = cspace_get_self();
225
226 memextent_ptr_result_t m1 = cspace_lookup_memextent(
227 cspace, from, CAP_RIGHTS_MEMEXTENT_DONATE);
228 if (compiler_unexpected(m1.e != OK)) {
229 err = m1.e;
230 goto out;
231 }
232
233 memextent_ptr_result_t m2 = cspace_lookup_memextent(
234 cspace, to, CAP_RIGHTS_MEMEXTENT_DONATE);
235 if (compiler_unexpected(m2.e != OK)) {
236 err = m2.e;
237 goto out_m1_release;
238 }
239
240 err = memextent_donate_sibling(m1.r, m2.r, offset, size);
241
242 object_put_memextent(m2.r);
243 out_m1_release:
244 object_put_memextent(m1.r);
245 out:
246 return err;
247 }
248
249 error_t
hypercall_memextent_donate(memextent_donate_options_t options,cap_id_t from,cap_id_t to,size_t offset,size_t size)250 hypercall_memextent_donate(memextent_donate_options_t options, cap_id_t from,
251 cap_id_t to, size_t offset, size_t size)
252 {
253 error_t err;
254
255 if (memextent_donate_options_get_res_0(&options) != 0U) {
256 err = ERROR_ARGUMENT_INVALID;
257 goto out;
258 }
259
260 memextent_donate_type_t type =
261 memextent_donate_options_get_type(&options);
262 if (type == MEMEXTENT_DONATE_TYPE_TO_CHILD) {
263 err = hypercall_memextent_donate_child(from, to, offset, size,
264 false);
265 } else if (type == MEMEXTENT_DONATE_TYPE_TO_PARENT) {
266 err = hypercall_memextent_donate_child(to, from, offset, size,
267 true);
268 } else if (type == MEMEXTENT_DONATE_TYPE_TO_SIBLING) {
269 err = hypercall_memextent_donate_sibling(from, to, offset,
270 size);
271 } else {
272 err = ERROR_ARGUMENT_INVALID;
273 }
274
275 if ((err == OK) && !memextent_donate_options_get_no_sync(&options)) {
276 // The donation may have caused addrspace mappings to change.
277 // Wait for completion of EL2 operations using manual lookups.
278 rcu_sync();
279 }
280
281 out:
282 return err;
283 }
284
285 #else
286 extern int unused;
287 #endif
288