1 /*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/ffa.h"
10
11 #include <stddef.h>
12
13 #include "hf/ffa_v1_0.h"
14 #include "hf/types.h"
15
16 #if defined(__linux__) && defined(__KERNEL__)
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #else
20 #include "hf/assert.h"
21 #include "hf/std.h"
22 #endif
23
24 /*
25 * hf/assert.h is not availble for linux builds as well as the
26 * ENABLE_ASSERTIONS and LOG_LEVEL macros it uses so we define the
27 * verbose log level macro here for this case.
28 */
29 #if defined(__linux__) && defined(__KERNEL__)
30 #define assert(e) \
31 ((e) ? ((void)0) : panic("ASSERT: %s:%d\n", __FILE__, __LINE__, #e))
32 #endif
33
34 static_assert(sizeof(struct ffa_endpoint_rx_tx_descriptor) % 16 == 0,
35 "struct ffa_endpoint_rx_tx_descriptor must be a multiple of 16 "
36 "bytes long.");
37
ffa_copy_memory_region_constituents(struct ffa_memory_region_constituent * dest,const struct ffa_memory_region_constituent * src)38 void ffa_copy_memory_region_constituents(
39 struct ffa_memory_region_constituent *dest,
40 const struct ffa_memory_region_constituent *src)
41 {
42 dest->address = src->address;
43 dest->page_count = src->page_count;
44 dest->reserved = 0;
45 }
46
47 /**
48 * Initializes receiver permissions, in a memory transaction descriptor
49 * and zero out the other fields to be set later if required.
50 */
ffa_memory_access_init(struct ffa_memory_access * receiver,ffa_id_t receiver_id,enum ffa_data_access data_access,enum ffa_instruction_access instruction_access,ffa_memory_receiver_flags_t flags,struct ffa_memory_access_impdef * impdef_val)51 void ffa_memory_access_init(struct ffa_memory_access *receiver,
52 ffa_id_t receiver_id,
53 enum ffa_data_access data_access,
54 enum ffa_instruction_access instruction_access,
55 ffa_memory_receiver_flags_t flags,
56 struct ffa_memory_access_impdef *impdef_val)
57 {
58 ffa_memory_access_permissions_t permissions = {
59 .data_access = data_access,
60 .instruction_access = instruction_access,
61 };
62
63 *receiver = (struct ffa_memory_access){
64 .receiver_permissions =
65 {
66 .receiver = receiver_id,
67 .permissions = permissions,
68 .flags = flags,
69 },
70 .composite_memory_region_offset = 0ULL,
71 .impdef = impdef_val != NULL
72 ? *impdef_val
73 : ffa_memory_access_impdef_init(0, 0),
74 receiver->reserved_0 = 0ULL,
75 };
76 }
77
78 /**
79 * Initialises the header of the given `ffa_memory_region`, not
80 * including the composite memory region offset.
81 */
ffa_memory_region_init_header(struct ffa_memory_region * memory_region,ffa_id_t sender,ffa_memory_attributes_t attributes,ffa_memory_region_flags_t flags,ffa_memory_handle_t handle,uint32_t tag,uint32_t receiver_count,uint32_t receiver_desc_size)82 void ffa_memory_region_init_header(struct ffa_memory_region *memory_region,
83 ffa_id_t sender,
84 ffa_memory_attributes_t attributes,
85 ffa_memory_region_flags_t flags,
86 ffa_memory_handle_t handle, uint32_t tag,
87 uint32_t receiver_count,
88 uint32_t receiver_desc_size)
89 {
90 memory_region->sender = sender;
91 memory_region->attributes = attributes;
92 memory_region->flags = flags;
93 memory_region->handle = handle;
94 memory_region->tag = tag;
95 memory_region->memory_access_desc_size = receiver_desc_size;
96 memory_region->receiver_count = receiver_count;
97 memory_region->receivers_offset = sizeof(struct ffa_memory_region);
98 #if defined(__linux__) && defined(__KERNEL__)
99 memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
100 #else
101 memset_s(memory_region->reserved, sizeof(memory_region->reserved), 0,
102 sizeof(memory_region->reserved));
103 #endif
104 }
105
106 /**
107 * Copies as many as possible of the given constituents to the respective
108 * memory region and sets the respective offset.
109 *
110 * Returns the number of constituents remaining which wouldn't fit, and (via
111 * return parameters) the size in bytes of the first fragment of data copied to
112 * `memory_region` (attributes, constituents and memory region header size), and
113 * the total size of the memory sharing message including all constituents.
114 */
ffa_memory_region_init_constituents(struct ffa_memory_region * memory_region,size_t memory_region_max_size,const struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,uint32_t * total_length,uint32_t * fragment_length)115 static uint32_t ffa_memory_region_init_constituents(
116 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
117 const struct ffa_memory_region_constituent constituents[],
118 uint32_t constituent_count, uint32_t *total_length,
119 uint32_t *fragment_length)
120 {
121 uint32_t composite_memory_region_offset;
122 struct ffa_composite_memory_region *composite_memory_region;
123 uint32_t fragment_max_constituents;
124 uint32_t constituents_offset;
125 uint32_t count_to_copy;
126 uint32_t i;
127
128 /*
129 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
130 * ffa_memory_access)` must both be multiples of 16 (as verified by the
131 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
132 * calculate here is aligned to a 64-bit boundary and so 64-bit values
133 * can be copied without alignment faults.
134 * If there are multiple receiver endpoints, their respective access
135 * structure should point to the same offset value.
136 */
137 composite_memory_region_offset =
138 memory_region->receivers_offset +
139 memory_region->receiver_count *
140 memory_region->memory_access_desc_size;
141 for (i = 0U; i < memory_region->receiver_count; i++) {
142 struct ffa_memory_access *receiver =
143 ffa_memory_region_get_receiver(memory_region, i);
144 assert(receiver != NULL);
145 receiver->composite_memory_region_offset =
146 composite_memory_region_offset;
147 }
148
149 composite_memory_region =
150 ffa_memory_region_get_composite(memory_region, 0);
151 composite_memory_region->page_count = 0;
152 composite_memory_region->constituent_count = constituent_count;
153 composite_memory_region->reserved_0 = 0;
154
155 constituents_offset = composite_memory_region_offset +
156 sizeof(struct ffa_composite_memory_region);
157 fragment_max_constituents =
158 (memory_region_max_size - constituents_offset) /
159 sizeof(struct ffa_memory_region_constituent);
160
161 count_to_copy = constituent_count;
162 if (count_to_copy > fragment_max_constituents) {
163 count_to_copy = fragment_max_constituents;
164 }
165
166 for (i = 0U; i < constituent_count; i++) {
167 if (i < count_to_copy) {
168 ffa_copy_memory_region_constituents(
169 &composite_memory_region->constituents[i],
170 &constituents[i]);
171 }
172 composite_memory_region->page_count +=
173 constituents[i].page_count;
174 }
175
176 if (total_length != NULL) {
177 *total_length =
178 constituents_offset +
179 composite_memory_region->constituent_count *
180 sizeof(struct ffa_memory_region_constituent);
181 }
182 if (fragment_length != NULL) {
183 *fragment_length =
184 constituents_offset +
185 count_to_copy *
186 sizeof(struct ffa_memory_region_constituent);
187 }
188
189 return composite_memory_region->constituent_count - count_to_copy;
190 }
191
192 /**
193 * Initialises the given `ffa_memory_region` and copies as many as possible of
194 * the given constituents to it.
195 *
196 * Returns the number of constituents remaining which wouldn't fit, and (via
197 * return parameters) the size in bytes of the first fragment of data copied to
198 * `memory_region` (attributes, constituents and memory region header size), and
199 * the total size of the memory sharing message including all constituents.
200 */
ffa_memory_region_init_single_receiver(struct ffa_memory_region * memory_region,size_t memory_region_max_size,ffa_id_t sender,ffa_id_t receiver,const struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,uint32_t tag,ffa_memory_region_flags_t flags,enum ffa_data_access data_access,enum ffa_instruction_access instruction_access,enum ffa_memory_type type,enum ffa_memory_cacheability cacheability,enum ffa_memory_shareability shareability,struct ffa_memory_access_impdef * impdef_val,uint32_t * total_length,uint32_t * fragment_length)201 uint32_t ffa_memory_region_init_single_receiver(
202 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
203 ffa_id_t sender, ffa_id_t receiver,
204 const struct ffa_memory_region_constituent constituents[],
205 uint32_t constituent_count, uint32_t tag,
206 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
207 enum ffa_instruction_access instruction_access,
208 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
209 enum ffa_memory_shareability shareability,
210 struct ffa_memory_access_impdef *impdef_val, uint32_t *total_length,
211 uint32_t *fragment_length)
212 {
213 struct ffa_memory_access receiver_access;
214
215 ffa_memory_access_init(&receiver_access, receiver, data_access,
216 instruction_access, 0, impdef_val);
217
218 return ffa_memory_region_init(
219 memory_region, memory_region_max_size, sender, &receiver_access,
220 1, sizeof(struct ffa_memory_access), constituents,
221 constituent_count, tag, flags, type, cacheability, shareability,
222 total_length, fragment_length);
223 }
224
ffa_memory_region_init(struct ffa_memory_region * memory_region,size_t memory_region_max_size,ffa_id_t sender,struct ffa_memory_access receivers[],uint32_t receiver_count,uint32_t receiver_desc_size,const struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,uint32_t tag,ffa_memory_region_flags_t flags,enum ffa_memory_type type,enum ffa_memory_cacheability cacheability,enum ffa_memory_shareability shareability,uint32_t * total_length,uint32_t * fragment_length)225 uint32_t ffa_memory_region_init(
226 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
227 ffa_id_t sender, struct ffa_memory_access receivers[],
228 uint32_t receiver_count, uint32_t receiver_desc_size,
229 const struct ffa_memory_region_constituent constituents[],
230 uint32_t constituent_count, uint32_t tag,
231 ffa_memory_region_flags_t flags, enum ffa_memory_type type,
232 enum ffa_memory_cacheability cacheability,
233 enum ffa_memory_shareability shareability, uint32_t *total_length,
234 uint32_t *fragment_length)
235 {
236 ffa_memory_attributes_t attributes = {
237 .type = type,
238 .cacheability = cacheability,
239 .shareability = shareability,
240 };
241
242 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
243 0, tag, receiver_count,
244 receiver_desc_size);
245
246 #if defined(__linux__) && defined(__KERNEL__)
247 memcpy(ffa_memory_region_get_receiver(memory_region, 0), receivers,
248 receiver_count * memory_region->memory_access_desc_size);
249 #else
250 memcpy_s(ffa_memory_region_get_receiver(memory_region, 0),
251 MAX_MEM_SHARE_RECIPIENTS *
252 memory_region->memory_access_desc_size,
253 receivers,
254 receiver_count * memory_region->memory_access_desc_size);
255 #endif
256
257 return ffa_memory_region_init_constituents(
258 memory_region, memory_region_max_size, constituents,
259 constituent_count, total_length, fragment_length);
260 }
261
262 /**
263 * Initialises the given `ffa_memory_region` to be used for an
264 * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
265 *
266 * Returns the size of the message written.
267 */
ffa_memory_retrieve_request_init_single_receiver(struct ffa_memory_region * memory_region,ffa_memory_handle_t handle,ffa_id_t sender,ffa_id_t receiver,uint32_t tag,ffa_memory_region_flags_t flags,enum ffa_data_access data_access,enum ffa_instruction_access instruction_access,enum ffa_memory_type type,enum ffa_memory_cacheability cacheability,enum ffa_memory_shareability shareability,struct ffa_memory_access_impdef * impdef_val)268 uint32_t ffa_memory_retrieve_request_init_single_receiver(
269 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
270 ffa_id_t sender, ffa_id_t receiver, uint32_t tag,
271 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
272 enum ffa_instruction_access instruction_access,
273 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
274 enum ffa_memory_shareability shareability,
275 struct ffa_memory_access_impdef *impdef_val)
276 {
277 struct ffa_memory_access receiver_access;
278
279 ffa_memory_access_init(&receiver_access, receiver, data_access,
280 instruction_access, 0, impdef_val);
281
282 return ffa_memory_retrieve_request_init(
283 memory_region, handle, sender, &receiver_access, 1,
284 sizeof(struct ffa_memory_access), tag, flags, type,
285 cacheability, shareability);
286 }
287
ffa_memory_retrieve_request_init(struct ffa_memory_region * memory_region,ffa_memory_handle_t handle,ffa_id_t sender,struct ffa_memory_access receivers[],uint32_t receiver_count,uint32_t receiver_desc_size,uint32_t tag,ffa_memory_region_flags_t flags,enum ffa_memory_type type,enum ffa_memory_cacheability cacheability,enum ffa_memory_shareability shareability)288 uint32_t ffa_memory_retrieve_request_init(
289 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
290 ffa_id_t sender, struct ffa_memory_access receivers[],
291 uint32_t receiver_count, uint32_t receiver_desc_size, uint32_t tag,
292 ffa_memory_region_flags_t flags, enum ffa_memory_type type,
293 enum ffa_memory_cacheability cacheability,
294 enum ffa_memory_shareability shareability)
295 {
296 uint32_t i;
297 ffa_memory_attributes_t attributes = {
298 .type = type,
299 .cacheability = cacheability,
300 .shareability = shareability,
301 };
302
303 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
304 handle, tag, receiver_count,
305 receiver_desc_size);
306
307 #if defined(__linux__) && defined(__KERNEL__)
308 memcpy(ffa_memory_region_get_receiver(memory_region, 0), receivers,
309 receiver_count * memory_region->memory_access_desc_size);
310 #else
311 memcpy_s(ffa_memory_region_get_receiver(memory_region, 0),
312 MAX_MEM_SHARE_RECIPIENTS *
313 memory_region->memory_access_desc_size,
314 receivers,
315 receiver_count * memory_region->memory_access_desc_size);
316 #endif
317
318 /* Zero the composite offset for all receivers */
319 for (i = 0U; i < receiver_count; i++) {
320 struct ffa_memory_access *receiver =
321 ffa_memory_region_get_receiver(memory_region, i);
322 assert(receiver != NULL);
323 receiver->composite_memory_region_offset = 0U;
324 }
325
326 return memory_region->receivers_offset +
327 memory_region->receiver_count *
328 memory_region->memory_access_desc_size;
329 }
330
331 /**
332 * Initialises the given `ffa_memory_region` to be used for an
333 * `FFA_MEM_RETRIEVE_REQ` from the hypervisor to the TEE.
334 *
335 * Returns the size of the message written.
336 */
ffa_memory_lender_retrieve_request_init(struct ffa_memory_region * memory_region,ffa_memory_handle_t handle,ffa_id_t sender)337 uint32_t ffa_memory_lender_retrieve_request_init(
338 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
339 ffa_id_t sender)
340 {
341 memory_region->sender = sender;
342 memory_region->attributes = (ffa_memory_attributes_t){0};
343 memory_region->flags = 0;
344 memory_region->handle = handle;
345 memory_region->tag = 0;
346 memory_region->receiver_count = 0;
347
348 #if defined(__linux__) && defined(__KERNEL__)
349 memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
350 #else
351 memset_s(memory_region->reserved, sizeof(memory_region->reserved), 0,
352 sizeof(memory_region->reserved));
353 #endif
354 return sizeof(struct ffa_memory_region);
355 }
356
ffa_memory_fragment_init(struct ffa_memory_region_constituent * fragment,size_t fragment_max_size,const struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,uint32_t * fragment_length)357 uint32_t ffa_memory_fragment_init(
358 struct ffa_memory_region_constituent *fragment,
359 size_t fragment_max_size,
360 const struct ffa_memory_region_constituent constituents[],
361 uint32_t constituent_count, uint32_t *fragment_length)
362 {
363 uint32_t fragment_max_constituents =
364 fragment_max_size /
365 sizeof(struct ffa_memory_region_constituent);
366 uint32_t count_to_copy = constituent_count;
367 uint32_t i;
368
369 if (count_to_copy > fragment_max_constituents) {
370 count_to_copy = fragment_max_constituents;
371 }
372
373 for (i = 0; i < count_to_copy; ++i) {
374 ffa_copy_memory_region_constituents(&fragment[i],
375 &constituents[i]);
376 }
377
378 if (fragment_length != NULL) {
379 *fragment_length = count_to_copy *
380 sizeof(struct ffa_memory_region_constituent);
381 }
382
383 return constituent_count - count_to_copy;
384 }
385
ffa_composite_memory_region_init(struct ffa_composite_memory_region * composite,uint64_t address,uint32_t page_count)386 static void ffa_composite_memory_region_init(
387 struct ffa_composite_memory_region *composite, uint64_t address,
388 uint32_t page_count)
389 {
390 composite->page_count = page_count;
391 composite->constituent_count = 1;
392 composite->reserved_0 = 0;
393
394 composite->constituents[0].page_count = page_count;
395 composite->constituents[0].address = address;
396 composite->constituents[0].reserved = 0;
397 }
398
399 /**
400 * Initialises the given `ffa_endpoint_rx_tx_descriptor` to be used for an
401 * `FFA_RXTX_MAP` forwarding.
402 * Each buffer is described by an `ffa_composite_memory_region` containing
403 * one `ffa_memory_region_constituent`.
404 */
ffa_endpoint_rx_tx_descriptor_init(struct ffa_endpoint_rx_tx_descriptor * desc,ffa_id_t endpoint_id,uint64_t rx_address,uint64_t tx_address)405 void ffa_endpoint_rx_tx_descriptor_init(
406 struct ffa_endpoint_rx_tx_descriptor *desc, ffa_id_t endpoint_id,
407 uint64_t rx_address, uint64_t tx_address)
408 {
409 desc->endpoint_id = endpoint_id;
410 desc->reserved = 0;
411 desc->pad = 0;
412
413 /*
414 * RX's composite descriptor is allocated after the enpoint descriptor.
415 * `sizeof(struct ffa_endpoint_rx_tx_descriptor)` is guaranteed to be
416 * 16-byte aligned.
417 */
418 desc->rx_offset = sizeof(struct ffa_endpoint_rx_tx_descriptor);
419
420 ffa_composite_memory_region_init(
421 (struct ffa_composite_memory_region *)((uintptr_t)desc +
422 desc->rx_offset),
423 rx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
424
425 /*
426 * TX's composite descriptor is allocated after the RX descriptor.
427 * `sizeof(struct ffa_composite_memory_region)` and
428 * `sizeof(struct ffa_memory_region_constituent)` are guaranteed to be
429 * 16-byte aligned in ffa_memory.c.
430 */
431 desc->tx_offset = desc->rx_offset +
432 sizeof(struct ffa_composite_memory_region) +
433 sizeof(struct ffa_memory_region_constituent);
434
435 ffa_composite_memory_region_init(
436 (struct ffa_composite_memory_region *)((uintptr_t)desc +
437 desc->tx_offset),
438 tx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
439 }
440