1 /*
2  * Copyright 2019 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "hf/manifest.h"
10 
11 #include <stddef.h>
12 #include <stdint.h>
13 
14 #include "hf/arch/types.h"
15 #include "hf/arch/vmid_base.h"
16 
17 #include "hf/addr.h"
18 #include "hf/assert.h"
19 #include "hf/boot_info.h"
20 #include "hf/boot_params.h"
21 #include "hf/check.h"
22 #include "hf/dlog.h"
23 #include "hf/fdt.h"
24 #include "hf/ffa.h"
25 #include "hf/ffa_partition_manifest.h"
26 #include "hf/layout.h"
27 #include "hf/mem_range.h"
28 #include "hf/mm.h"
29 #include "hf/mpool.h"
30 #include "hf/partition_pkg.h"
31 #include "hf/std.h"
32 
33 #define TRY(expr)                                            \
34 	do {                                                 \
35 		enum manifest_return_code ret_code = (expr); \
36 		if (ret_code != MANIFEST_SUCCESS) {          \
37 			return ret_code;                     \
38 		}                                            \
39 	} while (0)
40 
41 #define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
42 #define VM_ID_MAX_DIGITS (5)
43 #define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
44 #define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
45 static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
46 	      "VM name does not fit into a struct string.");
47 static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
48 static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
49 		      (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
50 	      "TrustZone VM ID clashes with normal VM range.");
51 
52 /* Bitmap to track boot order values in use. */
53 #define BOOT_ORDER_ENTRY_BITS (sizeof(uint64_t) * 8)
54 #define BOOT_ORDER_MAP_ENTRIES                                \
55 	((DEFAULT_BOOT_ORDER + (BOOT_ORDER_ENTRY_BITS - 1)) / \
56 	 BOOT_ORDER_ENTRY_BITS)
57 
58 /**
59  * A struct to keep track of the partitions properties during early boot
60  * manifest parsing:
61  * - Interrupts ID.
62  * - Physical memory ranges.
63  */
64 struct manifest_data {
65 	struct manifest manifest;
66 	struct interrupt_bitmap intids;
67 	/*
68 	 * Allocate enough for the maximum amount of memory regions defined via
69 	 * the partitions manifest, and regions for each partition
70 	 * address-space.
71 	 */
72 	struct mem_range mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS +
73 				     PARTITION_MAX_DEVICE_REGIONS * MAX_VMS +
74 				     MAX_VMS];
75 	size_t mem_regions_index;
76 	uint64_t boot_order_values[BOOT_ORDER_MAP_ENTRIES];
77 };
78 
79 /**
80  * Calculate the number of entries in the ppool that are required to
81  * store the manifest_data struct.
82  */
83 static const size_t manifest_data_ppool_entries =
84 	(align_up(sizeof(struct manifest_data), MM_PPOOL_ENTRY_SIZE) /
85 	 MM_PPOOL_ENTRY_SIZE);
86 
87 static struct manifest_data *manifest_data;
88 
check_boot_order(uint16_t boot_order)89 static bool check_boot_order(uint16_t boot_order)
90 {
91 	uint16_t i;
92 	uint64_t boot_order_mask;
93 
94 	if (boot_order == DEFAULT_BOOT_ORDER) {
95 		return true;
96 	}
97 	if (boot_order > DEFAULT_BOOT_ORDER) {
98 		dlog_error("Boot order should not exceed %x",
99 			   DEFAULT_BOOT_ORDER);
100 		return false;
101 	}
102 
103 	i = boot_order / BOOT_ORDER_ENTRY_BITS;
104 	boot_order_mask = UINT64_C(1) << (boot_order % BOOT_ORDER_ENTRY_BITS);
105 
106 	if ((boot_order_mask & manifest_data->boot_order_values[i]) != 0U) {
107 		dlog_error("Boot order must be a unique value.");
108 		return false;
109 	}
110 
111 	manifest_data->boot_order_values[i] |= boot_order_mask;
112 
113 	return true;
114 }
115 
116 /**
117  * Allocates and clear memory for the manifest data in the given memory pool.
118  * Returns true if the memory is successfully allocated.
119  */
manifest_data_init(struct mpool * ppool)120 static bool manifest_data_init(struct mpool *ppool)
121 {
122 	manifest_data = (struct manifest_data *)mpool_alloc_contiguous(
123 		ppool, manifest_data_ppool_entries, 1);
124 
125 	assert(manifest_data != NULL);
126 
127 	memset_s(manifest_data, sizeof(struct manifest_data), 0,
128 		 sizeof(struct manifest_data));
129 
130 	return manifest_data != NULL;
131 }
132 
133 /**
134  * Frees the memory used for the manifest data in the given memory pool.
135  */
manifest_data_deinit(struct mpool * ppool)136 static void manifest_data_deinit(struct mpool *ppool)
137 {
138 	/**
139 	 * Clear and return the memory used for the manifest_data struct to the
140 	 * memory pool.
141 	 */
142 	memset_s(manifest_data, sizeof(struct manifest_data), 0,
143 		 sizeof(struct manifest_data));
144 	mpool_add_chunk(ppool, manifest_data, manifest_data_ppool_entries);
145 }
146 
count_digits(ffa_id_t vm_id)147 static inline size_t count_digits(ffa_id_t vm_id)
148 {
149 	size_t digits = 0;
150 
151 	do {
152 		digits++;
153 		vm_id /= 10;
154 	} while (vm_id);
155 	return digits;
156 }
157 
158 /**
159  * Generates a string with the two letters "vm" followed by an integer.
160  * Assumes `buf` is of size VM_NAME_BUF_SIZE.
161  */
generate_vm_node_name(struct string * str,ffa_id_t vm_id)162 static void generate_vm_node_name(struct string *str, ffa_id_t vm_id)
163 {
164 	static const char *digits = "0123456789";
165 	size_t vm_id_digits = count_digits(vm_id);
166 	char *base = str->data;
167 	char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
168 
169 	assert(vm_id_digits <= VM_ID_MAX_DIGITS);
170 	*(--ptr) = '\0';
171 	do {
172 		*(--ptr) = digits[vm_id % 10];
173 		vm_id /= 10;
174 	} while (vm_id);
175 	*(--ptr) = 'm';
176 	*(--ptr) = 'v';
177 	assert(ptr == base);
178 }
179 
180 /**
181  * Read a boolean property: true if present; false if not. If present, the value
182  * of the property must be empty else it is considered malformed.
183  */
read_bool(const struct fdt_node * node,const char * property,bool * out)184 static enum manifest_return_code read_bool(const struct fdt_node *node,
185 					   const char *property, bool *out)
186 {
187 	struct memiter data;
188 	bool present = fdt_read_property(node, property, &data);
189 
190 	if (present && memiter_size(&data) != 0) {
191 		return MANIFEST_ERROR_MALFORMED_BOOLEAN;
192 	}
193 
194 	*out = present;
195 	return MANIFEST_SUCCESS;
196 }
197 
read_string(const struct fdt_node * node,const char * property,struct string * out)198 static enum manifest_return_code read_string(const struct fdt_node *node,
199 					     const char *property,
200 					     struct string *out)
201 {
202 	struct memiter data;
203 
204 	if (!fdt_read_property(node, property, &data)) {
205 		return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
206 	}
207 
208 	switch (string_init(out, &data)) {
209 	case STRING_SUCCESS:
210 		return MANIFEST_SUCCESS;
211 	case STRING_ERROR_INVALID_INPUT:
212 		return MANIFEST_ERROR_MALFORMED_STRING;
213 	case STRING_ERROR_TOO_LONG:
214 		return MANIFEST_ERROR_STRING_TOO_LONG;
215 	}
216 }
217 
read_optional_string(const struct fdt_node * node,const char * property,struct string * out)218 static enum manifest_return_code read_optional_string(
219 	const struct fdt_node *node, const char *property, struct string *out)
220 {
221 	enum manifest_return_code ret;
222 
223 	ret = read_string(node, property, out);
224 	if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
225 		string_init_empty(out);
226 		ret = MANIFEST_SUCCESS;
227 	}
228 	return ret;
229 }
230 
read_uint64(const struct fdt_node * node,const char * property,uint64_t * out)231 static enum manifest_return_code read_uint64(const struct fdt_node *node,
232 					     const char *property,
233 					     uint64_t *out)
234 {
235 	struct memiter data;
236 
237 	if (!fdt_read_property(node, property, &data)) {
238 		return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
239 	}
240 
241 	if (!fdt_parse_number(&data, memiter_size(&data), out)) {
242 		return MANIFEST_ERROR_MALFORMED_INTEGER;
243 	}
244 
245 	return MANIFEST_SUCCESS;
246 }
247 
read_optional_uint64(const struct fdt_node * node,const char * property,uint64_t default_value,uint64_t * out)248 static enum manifest_return_code read_optional_uint64(
249 	const struct fdt_node *node, const char *property,
250 	uint64_t default_value, uint64_t *out)
251 {
252 	enum manifest_return_code ret;
253 
254 	ret = read_uint64(node, property, out);
255 	if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
256 		*out = default_value;
257 		return MANIFEST_SUCCESS;
258 	}
259 	return ret;
260 }
261 
read_uint32(const struct fdt_node * node,const char * property,uint32_t * out)262 static enum manifest_return_code read_uint32(const struct fdt_node *node,
263 					     const char *property,
264 					     uint32_t *out)
265 {
266 	uint64_t value;
267 
268 	TRY(read_uint64(node, property, &value));
269 
270 	if (value > UINT32_MAX) {
271 		return MANIFEST_ERROR_INTEGER_OVERFLOW;
272 	}
273 
274 	*out = (uint32_t)value;
275 	return MANIFEST_SUCCESS;
276 }
277 
read_optional_uint32(const struct fdt_node * node,const char * property,uint32_t default_value,uint32_t * out)278 static enum manifest_return_code read_optional_uint32(
279 	const struct fdt_node *node, const char *property,
280 	uint32_t default_value, uint32_t *out)
281 {
282 	enum manifest_return_code ret;
283 
284 	ret = read_uint32(node, property, out);
285 	if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
286 		*out = default_value;
287 		return MANIFEST_SUCCESS;
288 	}
289 	return ret;
290 }
291 
read_uint16(const struct fdt_node * node,const char * property,uint16_t * out)292 static enum manifest_return_code read_uint16(const struct fdt_node *node,
293 					     const char *property,
294 					     uint16_t *out)
295 {
296 	uint64_t value;
297 
298 	TRY(read_uint64(node, property, &value));
299 	if (value > UINT16_MAX) {
300 		return MANIFEST_ERROR_INTEGER_OVERFLOW;
301 	}
302 
303 	*out = (uint16_t)value;
304 	return MANIFEST_SUCCESS;
305 }
306 
read_optional_uint16(const struct fdt_node * node,const char * property,uint16_t default_value,uint16_t * out)307 static enum manifest_return_code read_optional_uint16(
308 	const struct fdt_node *node, const char *property,
309 	uint16_t default_value, uint16_t *out)
310 {
311 	enum manifest_return_code ret;
312 
313 	ret = read_uint16(node, property, out);
314 	if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
315 		*out = default_value;
316 		return MANIFEST_SUCCESS;
317 	}
318 
319 	return ret;
320 }
321 
read_uint8(const struct fdt_node * node,const char * property,uint8_t * out)322 static enum manifest_return_code read_uint8(const struct fdt_node *node,
323 					    const char *property, uint8_t *out)
324 {
325 	uint64_t value;
326 
327 	TRY(read_uint64(node, property, &value));
328 
329 	if (value > UINT8_MAX) {
330 		return MANIFEST_ERROR_INTEGER_OVERFLOW;
331 	}
332 
333 	*out = (uint8_t)value;
334 	return MANIFEST_SUCCESS;
335 }
336 
read_optional_uint8(const struct fdt_node * node,const char * property,uint8_t default_value,uint8_t * out)337 static enum manifest_return_code read_optional_uint8(
338 	const struct fdt_node *node, const char *property,
339 	uint8_t default_value, uint8_t *out)
340 {
341 	enum manifest_return_code ret;
342 
343 	ret = read_uint8(node, property, out);
344 	if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
345 		*out = default_value;
346 		return MANIFEST_SUCCESS;
347 	}
348 
349 	return MANIFEST_SUCCESS;
350 }
351 
352 struct uint32list_iter {
353 	struct memiter mem_it;
354 };
355 
read_uint32list(const struct fdt_node * node,const char * property,struct uint32list_iter * out)356 static enum manifest_return_code read_uint32list(const struct fdt_node *node,
357 						 const char *property,
358 						 struct uint32list_iter *out)
359 {
360 	struct memiter data;
361 
362 	if (!fdt_read_property(node, property, &data)) {
363 		memiter_init(&out->mem_it, NULL, 0);
364 		return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
365 	}
366 
367 	if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
368 		return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
369 	}
370 
371 	out->mem_it = data;
372 	return MANIFEST_SUCCESS;
373 }
374 
read_optional_uint32list(const struct fdt_node * node,const char * property,struct uint32list_iter * out)375 static enum manifest_return_code read_optional_uint32list(
376 	const struct fdt_node *node, const char *property,
377 	struct uint32list_iter *out)
378 {
379 	enum manifest_return_code ret = read_uint32list(node, property, out);
380 
381 	if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
382 		return MANIFEST_SUCCESS;
383 	}
384 	return ret;
385 }
386 
uint32list_has_next(const struct uint32list_iter * list)387 static bool uint32list_has_next(const struct uint32list_iter *list)
388 {
389 	return memiter_size(&list->mem_it) > 0;
390 }
391 
uint32list_get_next(struct uint32list_iter * list,uint32_t * out)392 static enum manifest_return_code uint32list_get_next(
393 	struct uint32list_iter *list, uint32_t *out)
394 {
395 	uint64_t num;
396 
397 	CHECK(uint32list_has_next(list));
398 	if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
399 		return MANIFEST_ERROR_MALFORMED_INTEGER;
400 	}
401 
402 	*out = (uint32_t)num;
403 	return MANIFEST_SUCCESS;
404 }
405 
406 /**
407  * Parse a UUID from `uuid` into `out`.
408  * Returns `MANIFEST_SUCCESS` if parsing succeeded.
409  */
parse_uuid(struct uint32list_iter * uuid,struct ffa_uuid * out)410 static enum manifest_return_code parse_uuid(struct uint32list_iter *uuid,
411 					    struct ffa_uuid *out)
412 {
413 	for (size_t i = 0; i < 4 && uint32list_has_next(uuid); i++) {
414 		TRY(uint32list_get_next(uuid, &out->uuid[i]));
415 	}
416 
417 	return MANIFEST_SUCCESS;
418 }
419 
420 /**
421  * Parse a list of UUIDs from `uuid` into `out`.
422  * Writes the number of UUIDs parsed to `len`.
423  * Returns `MANIFEST_SUCCESS` if parsing succeeded.
424  * Returns `MANIFEST_ERROR_UUID_ALL_ZEROS` if any of the UUIDs are all zeros.
425  * Returns `MANIFEEST_ERROR_TOO_MANY_UUIDS` if there are more than
426  * `PARTITION_MAX_UUIDS`
427  */
parse_uuid_list(struct uint32list_iter * uuid,struct ffa_uuid * out,uint16_t * len)428 static enum manifest_return_code parse_uuid_list(struct uint32list_iter *uuid,
429 						 struct ffa_uuid *out,
430 						 uint16_t *len)
431 {
432 	uint16_t j;
433 
434 	for (j = 0; uint32list_has_next(uuid); j++) {
435 		TRY(parse_uuid(uuid, &out[j]));
436 
437 		if (ffa_uuid_is_null(&out[j])) {
438 			return MANIFEST_ERROR_UUID_ALL_ZEROS;
439 		}
440 		dlog_verbose("  UUID %#x-%x-%x-%x\n", out[j].uuid[0],
441 			     out[j].uuid[1], out[j].uuid[2], out[j].uuid[3]);
442 
443 		if (j >= PARTITION_MAX_UUIDS) {
444 			return MANIFEST_ERROR_TOO_MANY_UUIDS;
445 		}
446 	}
447 
448 	*len = j;
449 	return MANIFEST_SUCCESS;
450 }
451 
parse_vm_common(const struct fdt_node * node,struct manifest_vm * vm,ffa_id_t vm_id)452 static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
453 						 struct manifest_vm *vm,
454 						 ffa_id_t vm_id)
455 {
456 	struct uint32list_iter smcs;
457 	size_t idx;
458 
459 	TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
460 
461 	TRY(read_bool(node, "hyp_loaded", &vm->is_hyp_loaded));
462 
463 	TRY(read_string(node, "debug_name", &vm->debug_name));
464 
465 	TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
466 	while (uint32list_has_next(&smcs) &&
467 	       vm->smc_whitelist.smc_count < MAX_SMCS) {
468 		idx = vm->smc_whitelist.smc_count++;
469 		TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
470 	}
471 
472 	if (uint32list_has_next(&smcs)) {
473 		dlog_warning("%s SMC whitelist too long.\n",
474 			     vm->debug_name.data);
475 	}
476 
477 	TRY(read_bool(node, "smc_whitelist_permissive",
478 		      &vm->smc_whitelist.permissive));
479 
480 	if (vm_id != HF_PRIMARY_VM_ID) {
481 		TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
482 		TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
483 		TRY(read_optional_string(node, "fdt_filename",
484 					 &vm->secondary.fdt_filename));
485 	}
486 
487 	return MANIFEST_SUCCESS;
488 }
489 
parse_vm(struct fdt_node * node,struct manifest_vm * vm,ffa_id_t vm_id)490 static enum manifest_return_code parse_vm(struct fdt_node *node,
491 					  struct manifest_vm *vm,
492 					  ffa_id_t vm_id)
493 {
494 	TRY(read_optional_string(node, "kernel_filename",
495 				 &vm->kernel_filename));
496 
497 	if (vm_id == HF_PRIMARY_VM_ID) {
498 		TRY(read_optional_string(node, "ramdisk_filename",
499 					 &vm->primary.ramdisk_filename));
500 		TRY(read_optional_uint64(node, "boot_address",
501 					 MANIFEST_INVALID_ADDRESS,
502 					 &vm->primary.boot_address));
503 	}
504 	TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
505 				(uint8_t *)&vm->partition.run_time_el));
506 
507 	return MANIFEST_SUCCESS;
508 }
509 
510 /**
511  * Return true if the region described by `region_start` and `page_count`
512  * overlaps with any of `ranges`.
513  */
is_memory_region_within_ranges(uintptr_t region_start,uint32_t page_count,const struct mem_range ranges[],size_t ranges_size)514 static bool is_memory_region_within_ranges(uintptr_t region_start,
515 					   uint32_t page_count,
516 					   const struct mem_range ranges[],
517 					   size_t ranges_size)
518 {
519 	struct mem_range region = make_mem_range(region_start, page_count);
520 
521 	for (size_t i = 0; i < ranges_size; i++) {
522 		if (mem_range_overlaps(ranges[i], region)) {
523 			return true;
524 		}
525 	}
526 
527 	return false;
528 }
529 
dump_memory_ranges(const struct mem_range * ranges,const size_t ranges_size,bool ns)530 void dump_memory_ranges(const struct mem_range *ranges,
531 			const size_t ranges_size, bool ns)
532 {
533 	if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
534 		return;
535 	}
536 
537 	dlog("%s Memory ranges:\n", ns ? "NS" : "S");
538 
539 	for (size_t i = 0; i < ranges_size; i++) {
540 		uintptr_t begin = pa_addr(ranges[i].begin);
541 		uintptr_t end = pa_addr(ranges[i].end);
542 		size_t page_count =
543 			align_up(pa_difference(ranges[i].begin, ranges[i].end),
544 				 PAGE_SIZE) /
545 			PAGE_SIZE;
546 
547 		dlog("  [%lx - %lx (%zu pages)]\n", begin, end, page_count);
548 	}
549 }
550 
551 /**
552  * Check the partition's assigned memory is contained in the memory ranges
553  * configured for the SWd, in the SPMC's manifest.
554  */
check_partition_memory_is_valid(uintptr_t base_address,uint32_t page_count,uint32_t attributes,const struct boot_params * params,bool is_device_region)555 static enum manifest_return_code check_partition_memory_is_valid(
556 	uintptr_t base_address, uint32_t page_count, uint32_t attributes,
557 	const struct boot_params *params, bool is_device_region)
558 {
559 	bool is_secure_region =
560 		(attributes & MANIFEST_REGION_ATTR_SECURITY) == 0U;
561 	const struct mem_range *ranges_from_manifest;
562 	size_t ranges_count;
563 	bool within_ranges;
564 	enum manifest_return_code error_return;
565 
566 	if (!is_device_region) {
567 		ranges_from_manifest = is_secure_region ? params->mem_ranges
568 							: params->ns_mem_ranges;
569 		ranges_count = is_secure_region ? params->mem_ranges_count
570 						: params->ns_mem_ranges_count;
571 		error_return = MANIFEST_ERROR_MEM_REGION_INVALID;
572 	} else {
573 		ranges_from_manifest = is_secure_region
574 					       ? params->device_mem_ranges
575 					       : params->ns_device_mem_ranges;
576 		ranges_count = is_secure_region
577 				       ? params->device_mem_ranges_count
578 				       : params->ns_device_mem_ranges_count;
579 		error_return = MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID;
580 	}
581 
582 	within_ranges = is_memory_region_within_ranges(
583 		base_address, page_count, ranges_from_manifest, ranges_count);
584 
585 	return within_ranges ? MANIFEST_SUCCESS : error_return;
586 }
587 
588 /*
589  * Keep track of the memory allocated by partitions. This includes memory region
590  * nodes and device region nodes defined in their respective partition
591  * manifests, as well address space defined from their load address.
592  */
check_and_record_memory_used(uintptr_t base_address,uint32_t page_count,struct mem_range * mem_ranges,size_t * mem_regions_index)593 static enum manifest_return_code check_and_record_memory_used(
594 	uintptr_t base_address, uint32_t page_count,
595 	struct mem_range *mem_ranges, size_t *mem_regions_index)
596 {
597 	paddr_t begin;
598 
599 	if (!is_aligned(base_address, PAGE_SIZE)) {
600 		dlog_error("base_address (%#lx) is not aligned to page size.\n",
601 			   base_address);
602 		return MANIFEST_ERROR_MEM_REGION_UNALIGNED;
603 	}
604 
605 	if (is_memory_region_within_ranges(base_address, page_count, mem_ranges,
606 					   *mem_regions_index)) {
607 		return MANIFEST_ERROR_MEM_REGION_OVERLAP;
608 	}
609 
610 	begin = pa_init(base_address);
611 
612 	mem_ranges[*mem_regions_index].begin = begin;
613 	mem_ranges[*mem_regions_index].end =
614 		pa_add(begin, page_count * PAGE_SIZE - 1);
615 	(*mem_regions_index)++;
616 
617 	return MANIFEST_SUCCESS;
618 }
619 
parse_common_fields_mem_dev_region_node(struct fdt_node * ffa_node,struct dma_device_properties * dma_prop)620 static enum manifest_return_code parse_common_fields_mem_dev_region_node(
621 	struct fdt_node *ffa_node, struct dma_device_properties *dma_prop)
622 {
623 	uint32_t j = 0;
624 	struct uint32list_iter list;
625 
626 	TRY(read_optional_uint32(ffa_node, "smmu-id", MANIFEST_INVALID_ID,
627 				 &dma_prop->smmu_id));
628 	if (dma_prop->smmu_id != MANIFEST_INVALID_ID) {
629 		dlog_verbose("      smmu-id:  %u\n", dma_prop->smmu_id);
630 	}
631 
632 	TRY(read_optional_uint32list(ffa_node, "stream-ids", &list));
633 	dlog_verbose("      Stream IDs assigned:\n");
634 
635 	j = 0;
636 	while (uint32list_has_next(&list)) {
637 		if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
638 			return MANIFEST_ERROR_STREAM_IDS_OVERFLOW;
639 		}
640 
641 		TRY(uint32list_get_next(&list, &dma_prop->stream_ids[j]));
642 		dlog_verbose("        %u\n", dma_prop->stream_ids[j]);
643 		j++;
644 	}
645 	if (j == 0) {
646 		dlog_verbose("        None\n");
647 	} else if (dma_prop->smmu_id == MANIFEST_INVALID_ID) {
648 		/*
649 		 * SMMU ID must be specified if the partition specifies
650 		 * Stream IDs for any device upstream of SMMU.
651 		 */
652 		return MANIFEST_ERROR_MISSING_SMMU_ID;
653 	}
654 	dma_prop->stream_count = j;
655 
656 	return MANIFEST_SUCCESS;
657 }
658 
659 /**
660  * Parse and validate a memory regions's base address.
661  *
662  * The base address can be specified either as an absolute address (with
663  * `base-address`) or as an offset from `load_address` (with
664  * `load-address-relative-offset`).
665 
666  * Returns an error if:
667  * - Neither `base-address` or `load-address-relative-offset` are specified.
668  * - Both `base-address` and `load-address-relative-offset` are specified.
669  * - The effective address (`load-address-relative-offset` + `load_address`)
670  *   would overflow.
671  */
parse_base_address(struct fdt_node * mem_node,uintptr_t load_address,struct memory_region * mem_region)672 static enum manifest_return_code parse_base_address(
673 	struct fdt_node *mem_node, uintptr_t load_address,
674 	struct memory_region *mem_region)
675 {
676 	uintptr_t relative_offset;
677 	uintptr_t absolute_address;
678 
679 	bool is_relative;
680 	bool is_absolute;
681 
682 	TRY(read_optional_uint64(mem_node, "base-address",
683 				 MANIFEST_INVALID_ADDRESS, &absolute_address));
684 
685 	TRY(read_optional_uint64(mem_node, "load-address-relative-offset",
686 				 MANIFEST_INVALID_ADDRESS, &relative_offset));
687 
688 	is_absolute = (absolute_address != MANIFEST_INVALID_ADDRESS);
689 	is_relative = (relative_offset != MANIFEST_INVALID_ADDRESS);
690 
691 	if (!is_absolute && !is_relative) {
692 		return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
693 	}
694 
695 	if (is_absolute && is_relative) {
696 		return MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS;
697 	}
698 
699 	if (is_relative && relative_offset > UINT64_MAX - load_address) {
700 		return MANIFEST_ERROR_INTEGER_OVERFLOW;
701 	}
702 
703 	mem_region->base_address =
704 		is_absolute ? absolute_address : load_address + relative_offset;
705 	mem_region->is_relative = is_relative;
706 
707 	return MANIFEST_SUCCESS;
708 }
709 
710 /**
711  * Parse and validate a memory region/device region's attributes.
712  * Returns an error if:
713  * - Memory region attributes are not `R` or `RW` or `RX`.
714  * - Device region attributes are not `R` or `RW`.
715  * NOTE: Security attribute is not checked by this function, it is checked in
716  * the load phase.
717  */
parse_ffa_region_attributes(struct fdt_node * node,uint32_t * out_attributes,bool is_device)718 static enum manifest_return_code parse_ffa_region_attributes(
719 	struct fdt_node *node, uint32_t *out_attributes, bool is_device)
720 {
721 	uint32_t attributes;
722 
723 	TRY(read_uint32(node, "attributes", out_attributes));
724 
725 	attributes = *out_attributes &
726 		     (MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_WRITE |
727 		      MANIFEST_REGION_ATTR_EXEC);
728 
729 	if (is_device) {
730 		switch (attributes) {
731 		case MANIFEST_REGION_ATTR_READ:
732 		case MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_WRITE:
733 			break;
734 		default:
735 			return MANIFEST_ERROR_INVALID_MEM_PERM;
736 		}
737 	} else {
738 		switch (attributes) {
739 		case MANIFEST_REGION_ATTR_READ:
740 		case MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_WRITE:
741 		case MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_EXEC:
742 			break;
743 		default:
744 			return MANIFEST_ERROR_INVALID_MEM_PERM;
745 		}
746 	}
747 
748 	/* Filter region attributes. */
749 	*out_attributes &= MANIFEST_REGION_ALL_ATTR_MASK;
750 
751 	return MANIFEST_SUCCESS;
752 }
753 
parse_page_count(struct fdt_node * node,uint32_t * page_count)754 static enum manifest_return_code parse_page_count(struct fdt_node *node,
755 						  uint32_t *page_count)
756 {
757 	TRY(read_uint32(node, "pages-count", page_count));
758 
759 	if (*page_count == 0) {
760 		return MANIFEST_ERROR_MEM_REGION_EMPTY;
761 	}
762 
763 	return MANIFEST_SUCCESS;
764 }
765 
parse_ffa_memory_region_node(struct fdt_node * mem_node,uintptr_t load_address,struct memory_region * mem_regions,uint16_t * count,struct rx_tx * rxtx,const struct boot_params * boot_params)766 static enum manifest_return_code parse_ffa_memory_region_node(
767 	struct fdt_node *mem_node, uintptr_t load_address,
768 	struct memory_region *mem_regions, uint16_t *count, struct rx_tx *rxtx,
769 	const struct boot_params *boot_params)
770 {
771 	uint32_t phandle;
772 	uint16_t i = 0;
773 	uint32_t j = 0;
774 	struct uint32list_iter list;
775 
776 	dlog_verbose("  Partition memory regions\n");
777 
778 	if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
779 		return MANIFEST_ERROR_NOT_COMPATIBLE;
780 	}
781 
782 	if (!fdt_first_child(mem_node)) {
783 		return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
784 	}
785 
786 	do {
787 		dlog_verbose("    Memory Region[%u]\n", i);
788 
789 		TRY(read_optional_string(mem_node, "description",
790 					 &mem_regions[i].description));
791 		dlog_verbose("      Description: %s\n",
792 			     string_data(&mem_regions[i].description));
793 
794 		TRY(parse_base_address(mem_node, load_address,
795 				       &mem_regions[i]));
796 
797 		TRY(parse_page_count(mem_node, &mem_regions[i].page_count));
798 		dlog_verbose("      Pages_count: %u\n",
799 			     mem_regions[i].page_count);
800 
801 		TRY(parse_ffa_region_attributes(
802 			mem_node, &mem_regions[i].attributes, false));
803 		dlog_verbose("      Attributes: %#x\n",
804 			     mem_regions[i].attributes);
805 
806 		TRY(check_partition_memory_is_valid(
807 			mem_regions[i].base_address, mem_regions[i].page_count,
808 			mem_regions[i].attributes, boot_params, false));
809 
810 		/*
811 		 * Memory regions are not allowed to overlap with
812 		 * `load_address`, unless the memory region is relative.
813 		 */
814 		if (!mem_regions[i].is_relative) {
815 			struct mem_range range =
816 				make_mem_range(mem_regions[i].base_address,
817 					       mem_regions[i].page_count);
818 
819 			if (mem_range_contains_address(range, load_address)) {
820 				return MANIFEST_ERROR_MEM_REGION_OVERLAP;
821 			}
822 		}
823 
824 		TRY(check_and_record_memory_used(
825 			mem_regions[i].base_address, mem_regions[i].page_count,
826 			manifest_data->mem_regions,
827 			&manifest_data->mem_regions_index));
828 
829 		TRY(parse_common_fields_mem_dev_region_node(
830 			mem_node, &mem_regions[i].dma_prop));
831 
832 		TRY(read_optional_uint32list(
833 			mem_node, "stream-ids-access-permissions", &list));
834 		dlog_verbose("      Access permissions of Stream IDs:\n");
835 
836 		j = 0;
837 		while (uint32list_has_next(&list)) {
838 			uint32_t permissions;
839 
840 			if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
841 				return MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW;
842 			}
843 
844 			TRY(uint32list_get_next(&list, &permissions));
845 			dlog_verbose("        %u\n", permissions);
846 
847 			if (j == 0) {
848 				mem_regions[i].dma_access_permissions =
849 					permissions;
850 			}
851 
852 			/*
853 			 * All stream ids belonging to a dma device must specify
854 			 * the same access permissions.
855 			 */
856 			if (permissions !=
857 			    mem_regions[i].dma_access_permissions) {
858 				return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
859 			}
860 
861 			j++;
862 		}
863 
864 		if (j == 0) {
865 			dlog_verbose("        None\n");
866 		} else if (j != mem_regions[i].dma_prop.stream_count) {
867 			return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
868 		}
869 
870 		if (j > 0) {
871 			/* Filter the dma access permissions. */
872 			mem_regions[i].dma_access_permissions &=
873 				MANIFEST_REGION_ALL_ATTR_MASK;
874 		}
875 
876 		if (rxtx->available) {
877 			TRY(read_optional_uint32(
878 				mem_node, "phandle",
879 				(uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
880 			if (phandle == rxtx->rx_phandle) {
881 				dlog_verbose("      Assigned as RX buffer\n");
882 				rxtx->rx_buffer = &mem_regions[i];
883 			} else if (phandle == rxtx->tx_phandle) {
884 				dlog_verbose("      Assigned as TX buffer\n");
885 				rxtx->tx_buffer = &mem_regions[i];
886 			}
887 		}
888 
889 		i++;
890 	} while (fdt_next_sibling(mem_node) &&
891 		 (i < PARTITION_MAX_MEMORY_REGIONS));
892 
893 	if (rxtx->available &&
894 	    (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
895 		return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
896 	}
897 
898 	*count = i;
899 
900 	return MANIFEST_SUCCESS;
901 }
902 
device_region_get_interrupt_info(struct device_region * dev_regions,uint32_t intid)903 static struct interrupt_info *device_region_get_interrupt_info(
904 	struct device_region *dev_regions, uint32_t intid)
905 {
906 	for (uint32_t i = 0; i < ARRAY_SIZE(dev_regions->interrupts); i++) {
907 		if (dev_regions->interrupts[i].id == intid) {
908 			return &(dev_regions->interrupts[i]);
909 		}
910 	}
911 	return NULL;
912 }
913 
parse_ffa_device_region_node(struct fdt_node * dev_node,struct device_region * dev_regions,uint16_t * count,uint8_t * dma_device_count,const struct boot_params * boot_params)914 static enum manifest_return_code parse_ffa_device_region_node(
915 	struct fdt_node *dev_node, struct device_region *dev_regions,
916 	uint16_t *count, uint8_t *dma_device_count,
917 	const struct boot_params *boot_params)
918 {
919 	struct uint32list_iter list;
920 	uint16_t i = 0;
921 	uint32_t j = 0;
922 	struct interrupt_bitmap allocated_intids = manifest_data->intids;
923 	uint8_t dma_device_id = 0;
924 
925 	dlog_verbose("  Partition Device Regions\n");
926 
927 	if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
928 		return MANIFEST_ERROR_NOT_COMPATIBLE;
929 	}
930 
931 	if (!fdt_first_child(dev_node)) {
932 		return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
933 	}
934 
935 	*dma_device_count = 0;
936 
937 	do {
938 		dlog_verbose("    Device Region[%u]\n", i);
939 
940 		TRY(read_optional_string(dev_node, "description",
941 					 &dev_regions[i].name));
942 		dlog_verbose("      Name: %s\n",
943 			     string_data(&dev_regions[i].name));
944 
945 		TRY(read_uint64(dev_node, "base-address",
946 				&dev_regions[i].base_address));
947 		dlog_verbose("      Base address: %#lx\n",
948 			     dev_regions[i].base_address);
949 
950 		TRY(parse_page_count(dev_node, &dev_regions[i].page_count));
951 		dlog_verbose("      Pages_count: %u\n",
952 			     dev_regions[i].page_count);
953 
954 		TRY(check_and_record_memory_used(
955 			dev_regions[i].base_address, dev_regions[i].page_count,
956 			manifest_data->mem_regions,
957 			&manifest_data->mem_regions_index));
958 
959 		TRY(parse_ffa_region_attributes(
960 			dev_node, &dev_regions[i].attributes, true));
961 		dlog_verbose("      Attributes: %#x\n",
962 			     dev_regions[i].attributes);
963 
964 		TRY(check_partition_memory_is_valid(
965 			dev_regions[i].base_address, dev_regions[i].page_count,
966 			dev_regions[i].attributes, boot_params, true));
967 
968 		TRY(read_optional_uint32list(dev_node, "interrupts", &list));
969 		dlog_verbose("      Interrupt List:\n");
970 		j = 0;
971 		while (uint32list_has_next(&list) &&
972 		       j < PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
973 			uint32_t intid;
974 
975 			TRY(uint32list_get_next(
976 				&list, &dev_regions[i].interrupts[j].id));
977 			intid = dev_regions[i].interrupts[j].id;
978 
979 			dlog_verbose("        ID = %u\n", intid);
980 
981 			if (interrupt_bitmap_get_value(&allocated_intids,
982 						       intid) == 1U) {
983 				return MANIFEST_ERROR_INTERRUPT_ID_REPEATED;
984 			}
985 
986 			interrupt_bitmap_set_value(&allocated_intids, intid);
987 
988 			if (uint32list_has_next(&list)) {
989 				TRY(uint32list_get_next(&list,
990 							&dev_regions[i]
991 								 .interrupts[j]
992 								 .attributes));
993 			} else {
994 				return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
995 			}
996 
997 			dev_regions[i].interrupts[j].mpidr_valid = false;
998 			dev_regions[i].interrupts[j].mpidr = 0;
999 
1000 			dlog_verbose("        attributes = %u\n",
1001 				     dev_regions[i].interrupts[j].attributes);
1002 			j++;
1003 		}
1004 
1005 		dev_regions[i].interrupt_count = j;
1006 		if (j == 0) {
1007 			dlog_verbose("        Empty\n");
1008 		} else {
1009 			TRY(read_optional_uint32list(
1010 				dev_node, "interrupts-target", &list));
1011 			dlog_verbose("      Interrupt Target List:\n");
1012 
1013 			while (uint32list_has_next(&list)) {
1014 				uint32_t intid;
1015 				uint64_t mpidr = 0;
1016 				uint32_t mpidr_lower = 0;
1017 				uint32_t mpidr_upper = 0;
1018 				struct interrupt_info *info = NULL;
1019 
1020 				TRY(uint32list_get_next(&list, &intid));
1021 
1022 				dlog_verbose("        ID = %u\n", intid);
1023 
1024 				if (interrupt_bitmap_get_value(
1025 					    &allocated_intids, intid) != 1U) {
1026 					return MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST;
1027 				}
1028 
1029 				TRY(uint32list_get_next(&list, &mpidr_upper));
1030 				TRY(uint32list_get_next(&list, &mpidr_lower));
1031 				mpidr = mpidr_upper;
1032 				mpidr <<= 32;
1033 				mpidr |= mpidr_lower;
1034 
1035 				info = device_region_get_interrupt_info(
1036 					&dev_regions[i], intid);
1037 				/*
1038 				 * We should find info since
1039 				 * interrupt_bitmap_get_value already ensures
1040 				 * that we saw the interrupt and allocated ids
1041 				 * for it.
1042 				 */
1043 				assert(info != NULL);
1044 				info->mpidr = mpidr;
1045 				info->mpidr_valid = true;
1046 				dlog_verbose("        MPIDR = %#lx\n", mpidr);
1047 			}
1048 		}
1049 
1050 		TRY(parse_common_fields_mem_dev_region_node(
1051 			dev_node, &dev_regions[i].dma_prop));
1052 
1053 		if (dev_regions[i].dma_prop.smmu_id != MANIFEST_INVALID_ID) {
1054 			dev_regions[i].dma_prop.dma_device_id = dma_device_id++;
1055 			*dma_device_count = dma_device_id;
1056 
1057 			if (*dma_device_count > PARTITION_MAX_DMA_DEVICES) {
1058 				return MANIFEST_ERROR_DMA_DEVICE_OVERFLOW;
1059 			}
1060 
1061 			dlog_verbose("      dma peripheral device id:  %u\n",
1062 				     dev_regions[i].dma_prop.dma_device_id);
1063 		}
1064 
1065 		TRY(read_bool(dev_node, "exclusive-access",
1066 			      &dev_regions[i].exclusive_access));
1067 		dlog_verbose("      Exclusive_access: %u\n",
1068 			     dev_regions[i].exclusive_access);
1069 
1070 		i++;
1071 	} while (fdt_next_sibling(dev_node) &&
1072 		 (i < PARTITION_MAX_DEVICE_REGIONS));
1073 
1074 	*count = i;
1075 
1076 	return MANIFEST_SUCCESS;
1077 }
1078 
sanity_check_ffa_manifest(struct manifest_vm * vm)1079 static enum manifest_return_code sanity_check_ffa_manifest(
1080 	struct manifest_vm *vm)
1081 {
1082 	enum ffa_version ffa_version;
1083 	enum manifest_return_code ret_code = MANIFEST_SUCCESS;
1084 	const char *error_string = "specified in manifest is unsupported";
1085 	uint32_t k = 0;
1086 	bool using_req2 = (vm->partition.messaging_method &
1087 			   (FFA_PARTITION_DIRECT_REQ2_RECV |
1088 			    FFA_PARTITION_DIRECT_REQ2_SEND)) != 0;
1089 
1090 	/* ensure that the SPM version is compatible */
1091 	ffa_version = vm->partition.ffa_version;
1092 	if (!ffa_versions_are_compatible(ffa_version, FFA_VERSION_COMPILED)) {
1093 		dlog_error(
1094 			"FF-A partition manifest version v%u.%u is not "
1095 			"compatible with compiled version v%u.%u\n",
1096 			ffa_version_get_major(ffa_version),
1097 			ffa_version_get_minor(ffa_version),
1098 			ffa_version_get_major(FFA_VERSION_COMPILED),
1099 			ffa_version_get_minor(FFA_VERSION_COMPILED));
1100 		ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1101 	}
1102 
1103 	if (vm->partition.xlat_granule != PAGE_4KB) {
1104 		dlog_error("Translation granule %s: %u\n", error_string,
1105 			   vm->partition.xlat_granule);
1106 		ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1107 	}
1108 
1109 	if (vm->partition.execution_state != AARCH64) {
1110 		dlog_error("Execution state %s: %u\n", error_string,
1111 			   vm->partition.execution_state);
1112 		ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1113 	}
1114 
1115 	if (vm->partition.run_time_el != EL1 &&
1116 	    vm->partition.run_time_el != S_EL1 &&
1117 	    vm->partition.run_time_el != S_EL0 &&
1118 	    vm->partition.run_time_el != EL0) {
1119 		dlog_error("Exception level %s: %d\n", error_string,
1120 			   vm->partition.run_time_el);
1121 		ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1122 	}
1123 
1124 	if (vm->partition.ffa_version < FFA_VERSION_1_2 && using_req2) {
1125 		dlog_error("Messaging method %s: %x\n", error_string,
1126 			   vm->partition.messaging_method);
1127 		ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1128 	}
1129 
1130 	if ((vm->partition.messaging_method &
1131 	     ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
1132 	       FFA_PARTITION_INDIRECT_MSG | FFA_PARTITION_DIRECT_REQ2_RECV |
1133 	       FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
1134 		dlog_error("Messaging method %s: %x\n", error_string,
1135 			   vm->partition.messaging_method);
1136 		ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1137 	}
1138 
1139 	if ((vm->partition.run_time_el == S_EL0 ||
1140 	     vm->partition.run_time_el == EL0) &&
1141 	    vm->partition.execution_ctx_count != 1) {
1142 		dlog_error(
1143 			"Exception level and execution context count %s: %d "
1144 			"%d\n",
1145 			error_string, vm->partition.run_time_el,
1146 			vm->partition.execution_ctx_count);
1147 		ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1148 	}
1149 
1150 	for (uint16_t i = 0; i < vm->partition.dev_region_count; i++) {
1151 		struct device_region dev_region;
1152 
1153 		dev_region = vm->partition.dev_regions[i];
1154 
1155 		if (dev_region.interrupt_count >
1156 		    PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
1157 			dlog_error(
1158 				"Interrupt count for device region exceeds "
1159 				"limit.\n");
1160 			ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1161 			continue;
1162 		}
1163 
1164 		for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
1165 			k++;
1166 			if (k > VM_MANIFEST_MAX_INTERRUPTS) {
1167 				dlog_error(
1168 					"Interrupt count for VM exceeds "
1169 					"limit.\n");
1170 				ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1171 				continue;
1172 			}
1173 		}
1174 	}
1175 
1176 	/* GP register is restricted to one of x0 - x3. */
1177 	if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER &&
1178 	    vm->partition.gp_register_num > 3) {
1179 		dlog_error("GP register number %s: %u\n", error_string,
1180 			   vm->partition.gp_register_num);
1181 		ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1182 	}
1183 
1184 	if (vm->partition.run_time_el == S_EL0 &&
1185 	    (vm->partition.sri_policy.intr_while_waiting ||
1186 	     vm->partition.sri_policy.intr_pending_entry_wait)) {
1187 		ret_code = MANIFEST_ERROR_SRI_POLICY_NOT_SUPPORTED;
1188 	}
1189 
1190 	return ret_code;
1191 }
1192 
1193 /**
1194  * Find the device id allocated to the device region node corresponding to the
1195  * specified stream id.
1196  */
find_dma_device_id_from_dev_region_nodes(const struct manifest_vm * manifest_vm,uint32_t sid,uint8_t * device_id)1197 static bool find_dma_device_id_from_dev_region_nodes(
1198 	const struct manifest_vm *manifest_vm, uint32_t sid, uint8_t *device_id)
1199 {
1200 	for (uint16_t i = 0; i < manifest_vm->partition.dev_region_count; i++) {
1201 		struct device_region dev_region =
1202 			manifest_vm->partition.dev_regions[i];
1203 
1204 		for (uint8_t j = 0; j < dev_region.dma_prop.stream_count; j++) {
1205 			if (sid == dev_region.dma_prop.stream_ids[j]) {
1206 				*device_id = dev_region.dma_prop.dma_device_id;
1207 				return true;
1208 			}
1209 		}
1210 	}
1211 	return false;
1212 }
1213 
1214 /**
1215  * Identify the device id of a DMA device node corresponding to a stream id
1216  * specified in the memory region node.
1217  */
map_dma_device_id_to_stream_ids(struct manifest_vm * vm)1218 static bool map_dma_device_id_to_stream_ids(struct manifest_vm *vm)
1219 {
1220 	for (uint16_t i = 0; i < vm->partition.mem_region_count; i++) {
1221 		struct memory_region mem_region = vm->partition.mem_regions[i];
1222 
1223 		for (uint8_t j = 0; j < mem_region.dma_prop.stream_count; j++) {
1224 			uint32_t sid = mem_region.dma_prop.stream_ids[j];
1225 			uint8_t device_id = 0;
1226 
1227 			/*
1228 			 * Every stream id must have been declared in the
1229 			 * device node as well.
1230 			 */
1231 			if (!find_dma_device_id_from_dev_region_nodes(
1232 				    vm, sid, &device_id)) {
1233 				dlog_verbose(
1234 					"Stream ID %d not found in any device "
1235 					"region node of partition manifest\n",
1236 					sid);
1237 				return false;
1238 			}
1239 
1240 			mem_region.dma_prop.dma_device_id = device_id;
1241 		}
1242 	}
1243 
1244 	return true;
1245 }
1246 
parse_ffa_manifest(struct fdt * fdt,struct manifest_vm * vm,struct fdt_node * boot_info_node,const struct boot_params * boot_params)1247 enum manifest_return_code parse_ffa_manifest(
1248 	struct fdt *fdt, struct manifest_vm *vm,
1249 	struct fdt_node *boot_info_node, const struct boot_params *boot_params)
1250 {
1251 	struct uint32list_iter uuid;
1252 	uintpaddr_t load_address;
1253 	struct fdt_node root;
1254 	struct fdt_node ffa_node;
1255 	struct string rxtx_node_name = STRING_INIT("rx_tx-info");
1256 	struct string mem_region_node_name = STRING_INIT("memory-regions");
1257 	struct string dev_region_node_name = STRING_INIT("device-regions");
1258 	struct string boot_info_node_name = STRING_INIT("boot-info");
1259 	bool managed_exit_field_present = false;
1260 
1261 	if (!fdt_find_node(fdt, "/", &root)) {
1262 		return MANIFEST_ERROR_NO_ROOT_NODE;
1263 	}
1264 
1265 	/* Check "compatible" property. */
1266 	if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
1267 		return MANIFEST_ERROR_NOT_COMPATIBLE;
1268 	}
1269 
1270 	TRY(read_uint32list(&root, "uuid", &uuid));
1271 
1272 	TRY(parse_uuid_list(&uuid, vm->partition.uuids,
1273 			    &vm->partition.uuid_count));
1274 	dlog_verbose("  Number of UUIDs %u\n", vm->partition.uuid_count);
1275 
1276 	TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
1277 	dlog_verbose("  Expected FF-A version %u.%u\n",
1278 		     ffa_version_get_major(vm->partition.ffa_version),
1279 		     ffa_version_get_minor(vm->partition.ffa_version));
1280 
1281 	TRY(read_uint16(&root, "execution-ctx-count",
1282 			&vm->partition.execution_ctx_count));
1283 	dlog_verbose("  Number of execution context %u\n",
1284 		     vm->partition.execution_ctx_count);
1285 
1286 	TRY(read_uint8(&root, "exception-level",
1287 		       (uint8_t *)&vm->partition.run_time_el));
1288 	dlog_verbose("  Run-time EL %u\n", vm->partition.run_time_el);
1289 
1290 	TRY(read_uint8(&root, "execution-state",
1291 		       (uint8_t *)&vm->partition.execution_state));
1292 	dlog_verbose("  Execution state %u\n", vm->partition.execution_state);
1293 
1294 	TRY(read_optional_uint64(&root, "load-address", 0, &load_address));
1295 	if (vm->partition.load_addr != load_address) {
1296 		dlog_warning(
1297 			"Partition's `load_address` (%#lx) in its manifest "
1298 			"differs from `load-address` (%#lx) in its package\n",
1299 			vm->partition.load_addr, load_address);
1300 	}
1301 	dlog_verbose("  Load address %#lx\n", vm->partition.load_addr);
1302 
1303 	TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
1304 				 &vm->partition.ep_offset));
1305 	dlog_verbose("  Entry point offset %#zx\n", vm->partition.ep_offset);
1306 
1307 	TRY(read_optional_uint32(&root, "gp-register-num",
1308 				 DEFAULT_BOOT_GP_REGISTER,
1309 				 &vm->partition.gp_register_num));
1310 
1311 	if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1312 		dlog_verbose("  Boot GP register: x%u\n",
1313 			     vm->partition.gp_register_num);
1314 	}
1315 
1316 	TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
1317 				 &vm->partition.boot_order));
1318 	if (vm->partition.boot_order != DEFAULT_BOOT_ORDER) {
1319 		dlog_verbose("  Boot order %u\n", vm->partition.boot_order);
1320 	}
1321 
1322 	if (!check_boot_order(vm->partition.boot_order)) {
1323 		return MANIFEST_ERROR_INVALID_BOOT_ORDER;
1324 	}
1325 
1326 	TRY(read_optional_uint8(&root, "xlat-granule", 0,
1327 				(uint8_t *)&vm->partition.xlat_granule));
1328 	dlog_verbose("  Translation granule %u\n", vm->partition.xlat_granule);
1329 
1330 	ffa_node = root;
1331 	if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
1332 		if (!fdt_is_compatible(&ffa_node,
1333 				       "arm,ffa-manifest-rx_tx-buffer")) {
1334 			return MANIFEST_ERROR_NOT_COMPATIBLE;
1335 		}
1336 
1337 		/*
1338 		 * Read only phandles for now, it will be used to update buffers
1339 		 * while parsing memory regions.
1340 		 */
1341 		TRY(read_uint32(&ffa_node, "rx-buffer",
1342 				&vm->partition.rxtx.rx_phandle));
1343 
1344 		TRY(read_uint32(&ffa_node, "tx-buffer",
1345 				&vm->partition.rxtx.tx_phandle));
1346 
1347 		vm->partition.rxtx.available = true;
1348 	}
1349 
1350 	TRY(read_uint16(&root, "messaging-method",
1351 			(uint16_t *)&vm->partition.messaging_method));
1352 	dlog_verbose("  Messaging method %u\n", vm->partition.messaging_method);
1353 
1354 	TRY(read_bool(&root, "managed-exit", &managed_exit_field_present));
1355 
1356 	TRY(read_optional_uint8(
1357 		&root, "ns-interrupts-action", NS_ACTION_SIGNALED,
1358 		(uint8_t *)&vm->partition.ns_interrupts_action));
1359 
1360 	/*
1361 	 * An SP manifest can specify one of the fields listed below:
1362 	 * `managed-exit`: Introduced in FF-A v1.0 spec.
1363 	 * `ns-interrupts-action`: Introduced in FF-A v1.1 EAC0 spec.
1364 	 * If both are missing from the manifest, the default response is
1365 	 * NS_ACTION_SIGNALED.
1366 	 */
1367 	if (managed_exit_field_present) {
1368 		vm->partition.ns_interrupts_action = NS_ACTION_ME;
1369 	}
1370 	if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED &&
1371 	    vm->partition.ns_interrupts_action != NS_ACTION_ME &&
1372 	    vm->partition.ns_interrupts_action != NS_ACTION_SIGNALED) {
1373 		return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
1374 	}
1375 
1376 	dlog_verbose(
1377 		"  NS Interrupts %s\n",
1378 		(vm->partition.ns_interrupts_action == NS_ACTION_QUEUED)
1379 			? "Queued"
1380 		: (vm->partition.ns_interrupts_action == NS_ACTION_SIGNALED)
1381 			? "Signaled"
1382 			: "Managed exit");
1383 
1384 	if (vm->partition.ns_interrupts_action == NS_ACTION_ME) {
1385 		/* Managed exit only supported by S_EL1 partitions. */
1386 		if (vm->partition.run_time_el != S_EL1) {
1387 			dlog_error(
1388 				"Managed exit cannot be supported by this "
1389 				"partition\n");
1390 			return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
1391 		}
1392 
1393 		TRY(read_bool(&root, "managed-exit-virq",
1394 			      &vm->partition.me_signal_virq));
1395 		if (vm->partition.me_signal_virq) {
1396 			dlog_verbose("  Managed Exit signaled through vIRQ\n");
1397 		}
1398 	}
1399 
1400 	TRY(read_optional_uint8(&root, "sri-interrupts-policy", 0,
1401 				(uint8_t *)&vm->partition.sri_policy));
1402 
1403 	if (vm->partition.sri_policy.mbz != 0U) {
1404 		return MANIFEST_ERROR_ILLEGAL_SRI_POLICY;
1405 	}
1406 
1407 	dlog_verbose("  SRI Trigger Policy.\n");
1408 	if (!vm->partition.sri_policy.intr_while_waiting &&
1409 	    !vm->partition.sri_policy.intr_pending_entry_wait) {
1410 		dlog_verbose("    Not trigged in interrupt handling.\n");
1411 	} else {
1412 		if (vm->partition.sri_policy.intr_while_waiting) {
1413 			dlog_verbose("    On interrupts while waiting.\n");
1414 		}
1415 		if (vm->partition.sri_policy.intr_pending_entry_wait) {
1416 			dlog_verbose(
1417 				"    On entry to wait while interrupts "
1418 				"pending.\n");
1419 		}
1420 	}
1421 
1422 	TRY(read_bool(&root, "notification-support",
1423 		      &vm->partition.notification_support));
1424 	if (vm->partition.notification_support) {
1425 		dlog_verbose("  Notifications Receipt Supported\n");
1426 	}
1427 
1428 	TRY(read_optional_uint8(
1429 		&root, "other-s-interrupts-action", OTHER_S_INT_ACTION_SIGNALED,
1430 		(uint8_t *)&vm->partition.other_s_interrupts_action));
1431 
1432 	if (vm->partition.other_s_interrupts_action ==
1433 	    OTHER_S_INT_ACTION_QUEUED) {
1434 		if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED) {
1435 			dlog_error(
1436 				"Choice of the fields 'ns-interrupts-action' "
1437 				"and 'other-s-interrupts-action' not "
1438 				"compatible\n");
1439 			return MANIFEST_ERROR_NOT_COMPATIBLE;
1440 		}
1441 	} else if (vm->partition.other_s_interrupts_action >
1442 		   OTHER_S_INT_ACTION_SIGNALED) {
1443 		dlog_error(
1444 			"Illegal value specified for the field "
1445 			"'other-s-interrupts-action': %u\n",
1446 			vm->partition.other_s_interrupts_action);
1447 		return MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION;
1448 	}
1449 
1450 	/* Parse boot info node. */
1451 	if (boot_info_node != NULL) {
1452 		ffa_node = root;
1453 		vm->partition.boot_info =
1454 			fdt_find_child(&ffa_node, &boot_info_node_name);
1455 		if (vm->partition.boot_info) {
1456 			*boot_info_node = ffa_node;
1457 		}
1458 	} else {
1459 		vm->partition.boot_info = false;
1460 	}
1461 
1462 	TRY(read_optional_uint32(
1463 		&root, "vm-availability-messages", 0,
1464 		(uint32_t *)&vm->partition.vm_availability_messages));
1465 	dlog_verbose("vm-availability-messages=%#x\n",
1466 		     *(uint32_t *)&vm->partition.vm_availability_messages);
1467 
1468 	if (vm->partition.vm_availability_messages.mbz != 0) {
1469 		return MANIFEST_ERROR_VM_AVAILABILITY_MESSAGE_INVALID;
1470 	}
1471 
1472 	TRY(read_optional_uint32(&root, "power-management-messages",
1473 				 MANIFEST_POWER_MANAGEMENT_NONE_MASK,
1474 				 &vm->partition.power_management));
1475 	vm->partition.power_management &= MANIFEST_POWER_MANAGEMENT_ALL_MASK;
1476 	if (vm->partition.execution_ctx_count == 1 ||
1477 	    vm->partition.run_time_el == S_EL0 ||
1478 	    vm->partition.run_time_el == EL0) {
1479 		vm->partition.power_management =
1480 			MANIFEST_POWER_MANAGEMENT_NONE_MASK;
1481 	}
1482 
1483 	dlog_verbose("  Power management messages %#x\n",
1484 		     vm->partition.power_management);
1485 
1486 	/* Parse memory-regions */
1487 	ffa_node = root;
1488 	if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
1489 		TRY(parse_ffa_memory_region_node(
1490 			&ffa_node, vm->partition.load_addr,
1491 			vm->partition.mem_regions,
1492 			&vm->partition.mem_region_count, &vm->partition.rxtx,
1493 			boot_params));
1494 	}
1495 	dlog_verbose("  Total %u memory regions found\n",
1496 		     vm->partition.mem_region_count);
1497 
1498 	/* Parse Device-regions */
1499 	ffa_node = root;
1500 	if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
1501 		TRY(parse_ffa_device_region_node(
1502 			&ffa_node, vm->partition.dev_regions,
1503 			&vm->partition.dev_region_count,
1504 			&vm->partition.dma_device_count, boot_params));
1505 	}
1506 	dlog_verbose("  Total %u device regions found\n",
1507 		     vm->partition.dev_region_count);
1508 
1509 	if (!map_dma_device_id_to_stream_ids(vm)) {
1510 		return MANIFEST_ERROR_NOT_COMPATIBLE;
1511 	}
1512 
1513 	return sanity_check_ffa_manifest(vm);
1514 }
1515 
parse_ffa_partition_package(struct mm_stage1_locked stage1_locked,struct fdt_node * node,struct manifest_vm * vm,ffa_id_t vm_id,const struct boot_params * boot_params,struct mpool * ppool)1516 static enum manifest_return_code parse_ffa_partition_package(
1517 	struct mm_stage1_locked stage1_locked, struct fdt_node *node,
1518 	struct manifest_vm *vm, ffa_id_t vm_id,
1519 	const struct boot_params *boot_params, struct mpool *ppool)
1520 {
1521 	enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
1522 	uintpaddr_t load_address;
1523 	struct partition_pkg pkg;
1524 	struct fdt sp_fdt;
1525 	void *pm_ptr;
1526 	size_t pm_size;
1527 	struct fdt_node boot_info_node;
1528 	size_t total_mem_size;
1529 
1530 	/*
1531 	 * This must have been hinted as being an FF-A partition,
1532 	 * return straight with failure if this is not the case.
1533 	 */
1534 	if (!vm->is_ffa_partition) {
1535 		return ret;
1536 	}
1537 
1538 	TRY(read_uint64(node, "load_address", &load_address));
1539 	if (!is_aligned(load_address, PAGE_SIZE)) {
1540 		return MANIFEST_ERROR_NOT_COMPATIBLE;
1541 	}
1542 
1543 	assert(load_address != 0U);
1544 
1545 	if (!partition_pkg_init(stage1_locked, pa_init(load_address), &pkg,
1546 				ppool)) {
1547 		return ret;
1548 	}
1549 
1550 	total_mem_size = pa_difference(pkg.total.begin, pkg.total.end);
1551 
1552 	if (vm_id != HF_PRIMARY_VM_ID &&
1553 	    total_mem_size > (size_t)vm->secondary.mem_size) {
1554 		dlog_error("Partition pkg size %zx bigger than expected: %x\n",
1555 			   total_mem_size, (uint32_t)vm->secondary.mem_size);
1556 		goto out;
1557 	}
1558 
1559 	pm_ptr = ptr_from_va(va_from_pa(pkg.pm.begin));
1560 
1561 	pm_size = pa_difference(pkg.pm.begin, pkg.pm.end);
1562 	if (!fdt_init_from_ptr(&sp_fdt, pm_ptr, pm_size)) {
1563 		dlog_error("%s: FDT failed validation.\n", __func__);
1564 		goto out;
1565 	}
1566 
1567 	vm->partition.load_addr = load_address;
1568 
1569 	ret = parse_ffa_manifest(&sp_fdt, vm, &boot_info_node, boot_params);
1570 	if (ret != MANIFEST_SUCCESS) {
1571 		dlog_error("Error parsing partition manifest.\n");
1572 		goto out;
1573 	}
1574 
1575 	/* Partition subscribed to boot information. */
1576 	if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER &&
1577 	    vm->partition.boot_info) {
1578 		/* Its package should have available space for it. */
1579 		if (pa_addr(pkg.boot_info.begin) == 0U) {
1580 			dlog_warning(
1581 				"Partition Package %s doesn't have boot info "
1582 				"space.\n",
1583 				vm->debug_name.data);
1584 		} else {
1585 			if (!ffa_boot_info_node(&boot_info_node, &pkg,
1586 						vm->partition.ffa_version)) {
1587 				dlog_error(
1588 					"Failed to process boot "
1589 					"information.\n");
1590 			}
1591 		}
1592 	}
1593 
1594 out:
1595 	partition_pkg_deinit(stage1_locked, &pkg, ppool);
1596 
1597 	return ret;
1598 }
1599 
1600 /**
1601  * Parse manifest from FDT.
1602  */
manifest_init(struct mm_stage1_locked stage1_locked,struct manifest ** manifest_ret,struct memiter * manifest_fdt,struct boot_params * boot_params,struct mpool * ppool)1603 enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
1604 					struct manifest **manifest_ret,
1605 					struct memiter *manifest_fdt,
1606 					struct boot_params *boot_params,
1607 					struct mpool *ppool)
1608 {
1609 	struct manifest *manifest;
1610 	struct string vm_name;
1611 	struct fdt fdt;
1612 	struct fdt_node hyp_node;
1613 	size_t i = 0;
1614 	bool found_primary_vm = false;
1615 	const size_t spmc_size =
1616 		align_up(pa_difference(layout_text_begin(), layout_image_end()),
1617 			 PAGE_SIZE);
1618 	const size_t spmc_page_count = spmc_size / PAGE_SIZE;
1619 
1620 	if (boot_params->mem_ranges_count == 0 &&
1621 	    boot_params->ns_mem_ranges_count == 0) {
1622 		return MANIFEST_ERROR_MEMORY_MISSING;
1623 	}
1624 
1625 	dump_memory_ranges(boot_params->mem_ranges,
1626 			   boot_params->mem_ranges_count, false);
1627 	dump_memory_ranges(boot_params->ns_mem_ranges,
1628 			   boot_params->ns_mem_ranges_count, true);
1629 
1630 	/* Allocate space in the ppool for the manifest data. */
1631 	if (!manifest_data_init(ppool)) {
1632 		panic("Unable to allocate manifest data.\n");
1633 	}
1634 
1635 	/*
1636 	 * Add SPMC load address range to memory ranges to track to ensure
1637 	 * no partitions overlap with this memory.
1638 	 * The system integrator should have prevented this by defining the
1639 	 * secure memory region ranges so as not to overlap the SPMC load
1640 	 * address range. Therefore, this code is intended to catch any
1641 	 * potential misconfigurations there.
1642 	 */
1643 	if (is_aligned(pa_addr(layout_text_begin()), PAGE_SIZE) &&
1644 	    spmc_page_count != 0) {
1645 		TRY(check_and_record_memory_used(
1646 			pa_addr(layout_text_begin()), spmc_page_count,
1647 			manifest_data->mem_regions,
1648 			&manifest_data->mem_regions_index));
1649 	}
1650 
1651 	manifest = &manifest_data->manifest;
1652 	*manifest_ret = manifest;
1653 
1654 	if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
1655 		return MANIFEST_ERROR_FILE_SIZE; /* TODO */
1656 	}
1657 
1658 	/* Find hypervisor node. */
1659 	if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
1660 		return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
1661 	}
1662 
1663 	/* Check "compatible" property. */
1664 	if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
1665 		return MANIFEST_ERROR_NOT_COMPATIBLE;
1666 	}
1667 
1668 	TRY(read_bool(&hyp_node, "ffa_tee_enabled",
1669 		      &manifest->ffa_tee_enabled));
1670 
1671 	/* Iterate over reserved VM IDs and check no such nodes exist. */
1672 	for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
1673 		ffa_id_t vm_id = (ffa_id_t)i - HF_VM_ID_BASE;
1674 		struct fdt_node vm_node = hyp_node;
1675 
1676 		generate_vm_node_name(&vm_name, vm_id);
1677 		if (fdt_find_child(&vm_node, &vm_name)) {
1678 			return MANIFEST_ERROR_RESERVED_VM_ID;
1679 		}
1680 	}
1681 
1682 	/* Iterate over VM nodes until we find one that does not exist. */
1683 	for (i = 0; i <= MAX_VMS; ++i) {
1684 		ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
1685 		struct fdt_node vm_node = hyp_node;
1686 
1687 		generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
1688 		if (!fdt_find_child(&vm_node, &vm_name)) {
1689 			break;
1690 		}
1691 
1692 		if (i == MAX_VMS) {
1693 			return MANIFEST_ERROR_TOO_MANY_VMS;
1694 		}
1695 
1696 		if (vm_id == HF_PRIMARY_VM_ID) {
1697 			CHECK(found_primary_vm == false); /* sanity check */
1698 			found_primary_vm = true;
1699 		}
1700 
1701 		manifest->vm_count = i + 1;
1702 
1703 		TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
1704 
1705 		CHECK(!manifest->vm[i].is_hyp_loaded ||
1706 		      manifest->vm[i].is_ffa_partition);
1707 
1708 		if (manifest->vm[i].is_ffa_partition &&
1709 		    !manifest->vm[i].is_hyp_loaded) {
1710 			TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
1711 							&manifest->vm[i], vm_id,
1712 							boot_params, ppool));
1713 			size_t page_count =
1714 				align_up(manifest->vm[i].secondary.mem_size,
1715 					 PAGE_SIZE) /
1716 				PAGE_SIZE;
1717 
1718 			if (vm_id == HF_PRIMARY_VM_ID) {
1719 				continue;
1720 			}
1721 
1722 			TRY(check_partition_memory_is_valid(
1723 				manifest->vm[i].partition.load_addr, page_count,
1724 				0, boot_params, false));
1725 
1726 			/*
1727 			 * Check if memory from load-address until (load-address
1728 			 * + memory size) has been used by other partition.
1729 			 */
1730 			TRY(check_and_record_memory_used(
1731 				manifest->vm[i].partition.load_addr, page_count,
1732 				manifest_data->mem_regions,
1733 				&manifest_data->mem_regions_index));
1734 		} else {
1735 			TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
1736 		}
1737 	}
1738 
1739 	if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
1740 		return MANIFEST_ERROR_NO_PRIMARY_VM;
1741 	}
1742 
1743 	return MANIFEST_SUCCESS;
1744 }
1745 
1746 /**
1747  * Free manifest data resources, called once manifest parsing has
1748  * completed and VMs are loaded.
1749  */
manifest_deinit(struct mpool * ppool)1750 void manifest_deinit(struct mpool *ppool)
1751 {
1752 	manifest_data_deinit(ppool);
1753 }
1754 
manifest_strerror(enum manifest_return_code ret_code)1755 const char *manifest_strerror(enum manifest_return_code ret_code)
1756 {
1757 	switch (ret_code) {
1758 	case MANIFEST_SUCCESS:
1759 		return "Success";
1760 	case MANIFEST_ERROR_FILE_SIZE:
1761 		return "Total size in header does not match file size";
1762 	case MANIFEST_ERROR_NO_ROOT_NODE:
1763 		return "Could not find root node in manifest";
1764 	case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
1765 		return "Could not find \"hypervisor\" node in manifest";
1766 	case MANIFEST_ERROR_NOT_COMPATIBLE:
1767 		return "Hypervisor manifest entry not compatible with Hafnium";
1768 	case MANIFEST_ERROR_RESERVED_VM_ID:
1769 		return "Manifest defines a VM with a reserved ID";
1770 	case MANIFEST_ERROR_NO_PRIMARY_VM:
1771 		return "Manifest does not contain a primary VM entry";
1772 	case MANIFEST_ERROR_TOO_MANY_VMS:
1773 		return "Manifest specifies more VMs than Hafnium has "
1774 		       "statically allocated space for";
1775 	case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
1776 		return "Property not found";
1777 	case MANIFEST_ERROR_MALFORMED_STRING:
1778 		return "Malformed string property";
1779 	case MANIFEST_ERROR_STRING_TOO_LONG:
1780 		return "String too long";
1781 	case MANIFEST_ERROR_MALFORMED_INTEGER:
1782 		return "Malformed integer property";
1783 	case MANIFEST_ERROR_INTEGER_OVERFLOW:
1784 		return "Integer overflow";
1785 	case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
1786 		return "Malformed integer list property";
1787 	case MANIFEST_ERROR_MALFORMED_BOOLEAN:
1788 		return "Malformed boolean property";
1789 	case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
1790 		return "Memory-region node should have at least one entry";
1791 	case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
1792 		return "Device-region node should have at least one entry";
1793 	case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
1794 		return "RX and TX buffers should be of same size";
1795 	case MANIFEST_ERROR_MEM_REGION_EMPTY:
1796 		return "Memory region should have at least one page";
1797 	case MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS:
1798 		return "Base and relative addresses are mutually exclusive";
1799 	case MANIFEST_ERROR_MEM_REGION_OVERLAP:
1800 		return "Memory region overlaps with one already allocated";
1801 	case MANIFEST_ERROR_MEM_REGION_UNALIGNED:
1802 		return "Memory region is not aligned to a page boundary";
1803 	case MANIFEST_ERROR_INVALID_MEM_PERM:
1804 		return "Memory permission should be RO, RW or RX";
1805 	case MANIFEST_ERROR_INTERRUPT_ID_REPEATED:
1806 		return "Interrupt ID already assigned to another endpoint";
1807 	case MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION:
1808 		return "Illegal value specified for the field: Action in "
1809 		       "response to NS Interrupt";
1810 	case MANIFEST_ERROR_ILLEGAL_SRI_POLICY:
1811 		return "Illegal value specified for the field: SRI Policy";
1812 	case MANIFEST_ERROR_SRI_POLICY_NOT_SUPPORTED:
1813 		return "S-EL0 Partitions do not support the SRI trigger policy";
1814 	case MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST:
1815 		return "Interrupt ID is not in the list of interrupts";
1816 	case MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION:
1817 		return "Illegal value specified for the field: Action in "
1818 		       "response to Other-S Interrupt";
1819 	case MANIFEST_ERROR_MEMORY_MISSING:
1820 		return "Memory nodes must be defined in the SPMC manifest "
1821 		       "('memory' and 'ns-memory')";
1822 	case MANIFEST_ERROR_MEM_REGION_INVALID:
1823 		return "Invalid memory region range";
1824 	case MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID:
1825 		return "Invalid device memory region range";
1826 	case MANIFEST_ERROR_INVALID_BOOT_ORDER:
1827 		return "Boot order should be a unique value less than "
1828 		       "default largest value";
1829 	case MANIFEST_ERROR_UUID_ALL_ZEROS:
1830 		return "UUID should not be NIL";
1831 	case MANIFEST_ERROR_TOO_MANY_UUIDS:
1832 		return "Manifest specifies more UUIDs than Hafnium has "
1833 		       "statically allocated space for";
1834 	case MANIFEST_ERROR_MISSING_SMMU_ID:
1835 		return "SMMU ID must be specified for the given Stream IDs";
1836 	case MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS:
1837 		return "DMA device access permissions must match memory region "
1838 		       "attributes";
1839 	case MANIFEST_ERROR_STREAM_IDS_OVERFLOW:
1840 		return "DMA device stream ID count exceeds predefined limit";
1841 	case MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW:
1842 		return "DMA access permissions count exceeds predefined limit";
1843 	case MANIFEST_ERROR_DMA_DEVICE_OVERFLOW:
1844 		return "Number of device regions with DMA peripheral exceeds "
1845 		       "limit.";
1846 	case MANIFEST_ERROR_VM_AVAILABILITY_MESSAGE_INVALID:
1847 		return "VM availability messages invalid (bits [31:2] must be "
1848 		       "zero)";
1849 	}
1850 
1851 	panic("Unexpected manifest return code.");
1852 }
1853