1 /*
2 * Copyright 2021 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/check.h"
10 #include "hf/fdt.h"
11 #include "hf/fdt_handler.h"
12 #include "hf/ffa.h"
13 #include "hf/memiter.h"
14 #include "hf/mm.h"
15 #include "hf/std.h"
16 #include "hf/stdout.h"
17 #include "hf/string.h"
18
19 #include "vmapi/hf/call.h"
20
21 #include "../msr.h"
22 #include "test/hftest.h"
23 #include "test/hftest_impl.h"
24 #include "test/vmapi/arch/exception_handler.h"
25 #include "test/vmapi/ffa.h"
26
27 extern struct hftest_test hftest_begin[];
28 extern struct hftest_test hftest_end[];
29
30 static struct hftest_context global_context;
31
alignas(PAGE_SIZE)32 static alignas(PAGE_SIZE) uint8_t secondary_ec_stack[MAX_CPUS][PAGE_SIZE];
33
34 uint8_t *hftest_get_secondary_ec_stack(size_t id)
35 {
36 assert(id < MAX_CPUS);
37 return secondary_ec_stack[id];
38 }
39
hftest_get_context(void)40 struct hftest_context *hftest_get_context(void)
41 {
42 return &global_context;
43 }
44
uint32list_has_next(const struct memiter * list)45 static bool uint32list_has_next(const struct memiter *list)
46 {
47 return memiter_size(list) > 0;
48 }
49
uint32list_get_next(struct memiter * list,uint32_t * out)50 static void uint32list_get_next(struct memiter *list, uint32_t *out)
51 {
52 uint64_t num;
53
54 CHECK(uint32list_has_next(list));
55 if (!fdt_parse_number(list, sizeof(uint32_t), &num)) {
56 return;
57 }
58
59 *out = (uint32_t)num;
60 }
61
abort(void)62 [[noreturn]] void abort(void)
63 {
64 HFTEST_LOG("Service contained failures.");
65 /* Cause a fault, as a secondary/SP can't power down the machine. */
66 *((volatile uint8_t *)1) = 1;
67
68 /* This should never be reached, but to make the compiler happy... */
69 for (;;) {
70 }
71 }
72
73 /** Find the service with the name passed in the arguments. */
find_service(struct memiter * args)74 static hftest_test_fn find_service(struct memiter *args)
75 {
76 struct memiter service_name;
77 struct hftest_test *test;
78
79 if (!memiter_parse_str(args, &service_name)) {
80 return NULL;
81 }
82
83 for (test = hftest_begin; test < hftest_end; ++test) {
84 if (test->kind == HFTEST_KIND_SERVICE &&
85 memiter_iseq(&service_name, test->name)) {
86 return test->fn;
87 }
88 }
89
90 return NULL;
91 }
92
hftest_context_init(struct hftest_context * ctx,void * send,void * recv)93 void hftest_context_init(struct hftest_context *ctx, void *send, void *recv)
94 {
95 memset_s(ctx, sizeof(*ctx), 0, sizeof(*ctx));
96 ctx->abort = abort;
97 ctx->send = send;
98 ctx->recv = recv;
99 }
100
101 /*
102 * Parse the FF-A partition's manifest.
103 * This function assumes the 'fdt' field of the passed 'ctx' has been
104 * initialized.
105 * TODO: Parse other fields as needed.
106 */
hftest_parse_ffa_manifest(struct hftest_context * ctx,struct fdt * fdt)107 void hftest_parse_ffa_manifest(struct hftest_context *ctx, struct fdt *fdt)
108 {
109 struct fdt_node root;
110 struct fdt_node ffa_node;
111 struct string mem_region_node_name = STRING_INIT("memory-regions");
112 struct string dev_region_node_name = STRING_INIT("device-regions");
113 struct memiter uuid;
114 struct memiter description;
115 uint32_t uuid_word = 0;
116 uint16_t j = 0;
117 uint16_t i = 0;
118 uint64_t number;
119
120 CHECK(ctx != NULL);
121 CHECK(fdt != NULL);
122
123 ASSERT_TRUE(fdt_find_node(fdt, "/", &root));
124 EXPECT_TRUE(fdt_is_compatible(&root, "arm,ffa-manifest-1.0"));
125 ASSERT_TRUE(fdt_read_number(&root, "load-address",
126 &ctx->partition_manifest.load_addr));
127 EXPECT_TRUE(fdt_read_number(&root, "ffa-version", &number));
128 ctx->partition_manifest.ffa_version = number;
129
130 EXPECT_TRUE(fdt_read_number(&root, "execution-ctx-count", &number));
131 ctx->partition_manifest.execution_ctx_count = (uint16_t)number;
132
133 EXPECT_TRUE(fdt_read_number(&root, "exception-level", &number));
134 ctx->partition_manifest.run_time_el = (uint16_t)number;
135
136 EXPECT_TRUE(fdt_read_property(&root, "uuid", &uuid));
137
138 /* Parse UUIDs and populate uuid count.*/
139 while (uint32list_has_next(&uuid) && j < PARTITION_MAX_UUIDS) {
140 while (uint32list_has_next(&uuid) && i < 4) {
141 uint32list_get_next(&uuid, &uuid_word);
142 ctx->partition_manifest.uuids[j].uuid[i] = uuid_word;
143 i++;
144 }
145
146 EXPECT_FALSE(
147 ffa_uuid_is_null(&ctx->partition_manifest.uuids[j]));
148
149 dlog_verbose(" UUID %#x-%x-%x-%x\n",
150 ctx->partition_manifest.uuids[j].uuid[0],
151 ctx->partition_manifest.uuids[j].uuid[1],
152 ctx->partition_manifest.uuids[j].uuid[2],
153 ctx->partition_manifest.uuids[j].uuid[3]);
154 j++;
155 i = 0;
156 }
157
158 ctx->partition_manifest.uuid_count = j;
159
160 ffa_node = root;
161
162 /* Look for the memory region node. */
163 if (fdt_find_child(&ffa_node, &mem_region_node_name) &&
164 fdt_first_child(&ffa_node)) {
165 uint32_t mem_count = 0;
166
167 do {
168 struct memory_region *cur_region =
169 &ctx->partition_manifest.mem_regions[mem_count];
170 EXPECT_TRUE(fdt_read_number(&ffa_node, "pages-count",
171 &number));
172 cur_region->page_count = (uint32_t)number;
173
174 if (!fdt_read_number(&ffa_node, "base-address",
175 &cur_region->base_address)) {
176 EXPECT_TRUE(fdt_read_number(
177 &ffa_node,
178 "load-address-relative-offset",
179 &number));
180 cur_region->base_address =
181 ctx->partition_manifest.load_addr +
182 number;
183 cur_region->is_relative = true;
184 }
185
186 if (fdt_read_property(&ffa_node, "description",
187 &description)) {
188 EXPECT_EQ(string_init(&cur_region->description,
189 &description),
190 STRING_SUCCESS);
191 }
192
193 EXPECT_TRUE(fdt_read_number(&ffa_node, "attributes",
194 &number));
195 cur_region->attributes = (uint32_t)number;
196 mem_count++;
197 } while (fdt_next_sibling(&ffa_node));
198
199 assert(mem_count < PARTITION_MAX_MEMORY_REGIONS);
200
201 ctx->partition_manifest.mem_region_count = mem_count;
202 }
203
204 ffa_node = root;
205
206 /* Look for the device region node. */
207 if (fdt_find_child(&ffa_node, &dev_region_node_name) &&
208 fdt_first_child(&ffa_node)) {
209 uint32_t dev_region_count = 0;
210
211 do {
212 struct device_region *cur_region =
213 &ctx->partition_manifest
214 .dev_regions[dev_region_count];
215 EXPECT_TRUE(fdt_read_number(&ffa_node, "pages-count",
216 &number));
217 cur_region->page_count = (uint32_t)number;
218
219 if (!fdt_read_number(&ffa_node, "base-address",
220 &cur_region->base_address)) {
221 EXPECT_TRUE(fdt_read_number(
222 &ffa_node,
223 "load-address-relative-offset",
224 &number));
225 cur_region->base_address =
226 ctx->partition_manifest.load_addr +
227 number;
228 }
229
230 EXPECT_TRUE(fdt_read_number(&ffa_node, "attributes",
231 &number));
232 cur_region->attributes = (uint32_t)number;
233 dev_region_count++;
234 } while (fdt_next_sibling(&ffa_node));
235
236 assert(dev_region_count < PARTITION_MAX_DEVICE_REGIONS);
237
238 ctx->partition_manifest.dev_region_count = dev_region_count;
239 }
240
241 ctx->is_ffa_manifest_parsed = true;
242 }
243
hftest_service_set_up(struct hftest_context * ctx,struct fdt * fdt)244 void hftest_service_set_up(struct hftest_context *ctx, struct fdt *fdt)
245 {
246 struct fdt_node node;
247 struct hftest_test *hftest_info;
248
249 ASSERT_TRUE(fdt_find_node(fdt, "/", &node));
250
251 if (!fdt_find_child(&node, &(STRING_INIT("hftest-service-setup")))) {
252 return;
253 }
254
255 EXPECT_TRUE(fdt_is_compatible(&node, "arm,hftest"));
256
257 for (hftest_info = hftest_begin; hftest_info < hftest_end;
258 ++hftest_info) {
259 struct memiter data;
260 if (hftest_info->kind != HFTEST_KIND_SERVICE_SET_UP) {
261 continue;
262 }
263 if (fdt_read_property(&node, hftest_info->name, &data)) {
264 HFTEST_LOG("Running service_setup: %s\n",
265 hftest_info->name);
266 hftest_info->fn();
267 if (ctx->failures) {
268 HFTEST_LOG_FAILURE();
269 HFTEST_LOG(HFTEST_LOG_INDENT
270 "%s service_setup failed\n",
271 hftest_info->name);
272 abort();
273 }
274 } else {
275 HFTEST_LOG("Skipping service_setup: %s\n",
276 hftest_info->name);
277 }
278 }
279 }
280
hftest_service_main(const void * fdt_ptr)281 [[noreturn]] void hftest_service_main(const void *fdt_ptr)
282 {
283 struct hftest_context *ctx;
284 struct memiter args;
285 hftest_test_fn service;
286 struct ffa_value ret;
287 struct fdt fdt;
288 const ffa_id_t own_id = hf_vm_get_id();
289 ffa_notifications_bitmap_t bitmap;
290 const struct ffa_partition_msg *message;
291 uint32_t vcpu = get_current_vcpu_index();
292
293 ctx = hftest_get_context();
294
295 /* If boot vcpu, set up mailbox and intialize context abort function. */
296 if (vcpu == 0) {
297 struct mailbox_buffers mb;
298 mb = set_up_mailbox();
299 hftest_context_init(ctx, mb.send, mb.recv);
300 }
301
302 if (!fdt_struct_from_ptr(fdt_ptr, &fdt)) {
303 HFTEST_LOG(HFTEST_LOG_INDENT "Unable to access the FDT");
304 abort();
305 }
306
307 /*
308 * The memory size argument is to be used only by VMs. It is part of
309 * the dt provided by the Hypervisor. SPs expect to receive their
310 * FF-A manifest which doesn't have a memory size field.
311 */
312 if (ffa_is_vm_id(own_id) &&
313 !fdt_get_memory_size(&fdt, &ctx->memory_size)) {
314 HFTEST_LOG_FAILURE();
315 HFTEST_LOG(HFTEST_LOG_INDENT
316 "No entry in the FDT on memory size details");
317 abort();
318 } else if (!ffa_is_vm_id(own_id)) {
319 /*
320 * It is secure partition. We are currently using the partition
321 * manifest for the SP.
322 */
323 hftest_parse_ffa_manifest(ctx, &fdt);
324 stdout_init(ctx->partition_manifest.ffa_version);
325
326 /* TODO: Determine memory size referring to the SP Pkg. */
327 ctx->memory_size = 1048576;
328 }
329
330 /* If boot vcpu, it means it is running in RTM_INIT. */
331 if (vcpu == 0) {
332 run_service_set_up(ctx, &fdt);
333 }
334
335 /* Receive the name of the service to run. */
336 ret = ffa_msg_wait();
337 EXPECT_EQ(ret.func, FFA_RUN_32);
338
339 message = (struct ffa_partition_msg *)SERVICE_RECV_BUFFER();
340
341 /*
342 * Expect to wake up with indirect message related to the next service
343 * to be executed.
344 */
345 ret = ffa_notification_get(own_id, vcpu,
346 FFA_NOTIFICATION_FLAG_BITMAP_SPM |
347 FFA_NOTIFICATION_FLAG_BITMAP_HYP);
348 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
349 bitmap = ffa_notification_get_from_framework(ret);
350 ASSERT_TRUE(is_ffa_spm_buffer_full_notification(bitmap) ||
351 is_ffa_hyp_buffer_full_notification(bitmap));
352 ASSERT_EQ(own_id, message->header.receiver);
353
354 if (ctx->is_ffa_manifest_parsed &&
355 ctx->partition_manifest.run_time_el == S_EL1) {
356 ASSERT_EQ(hf_interrupt_get(), HF_NOTIFICATION_PENDING_INTID);
357 }
358
359 memiter_init(&args, ffa_partition_msg_payload_const(message),
360 message->header.size);
361
362 /* Find service handler. */
363 service = find_service(&args);
364 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
365
366 /* Check the service was found. */
367 if (service == NULL) {
368 HFTEST_LOG_FAILURE();
369 HFTEST_LOG(HFTEST_LOG_INDENT
370 "Unable to find requested service");
371 abort();
372 }
373
374 /* Pause so the next time cycles are given the service will be run. */
375 ffa_yield();
376
377 /* Let the service run. */
378 service();
379
380 /* Cleanly handle it if the service returns. */
381 if (ctx->failures) {
382 abort();
383 }
384
385 for (;;) {
386 /* Hang if the service returns. */
387 }
388 }
389
hftest_get_dir_req_source_id(void)390 ffa_id_t hftest_get_dir_req_source_id(void)
391 {
392 struct hftest_context *ctx = hftest_get_context();
393 return ctx->dir_req_source_id;
394 }
395
hftest_set_dir_req_source_id(ffa_id_t id)396 void hftest_set_dir_req_source_id(ffa_id_t id)
397 {
398 struct hftest_context *ctx = hftest_get_context();
399 ctx->dir_req_source_id = id;
400 }
401
hftest_map_device_regions(struct hftest_context * ctx)402 void hftest_map_device_regions(struct hftest_context *ctx)
403 {
404 struct device_region *dev_region;
405 uint32_t dev_region_count;
406
407 /*
408 * The running partition must have received and parsed its own
409 * partition manifest by now.
410 */
411 if (!ctx || !ctx->is_ffa_manifest_parsed) {
412 panic("Partition manifest not parsed.\n");
413 }
414
415 dev_region_count = ctx->partition_manifest.dev_region_count;
416
417 /* Map the MMIO address space of the devices. */
418 for (uint32_t i = 0; i < dev_region_count; i++) {
419 dev_region = &ctx->partition_manifest.dev_regions[i];
420
421 hftest_mm_identity_map(
422 // NOLINTNEXTLINE(performance-no-int-to-ptr)
423 (const void *)dev_region->base_address,
424 dev_region->page_count * PAGE_SIZE,
425 dev_region->attributes);
426 }
427 }
428