1 /*
2 * Copyright 2021 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/check.h"
10 #include "hf/fdt_handler.h"
11 #include "hf/ffa.h"
12 #include "hf/memiter.h"
13 #include "hf/mm.h"
14 #include "hf/std.h"
15 #include "hf/stdout.h"
16
17 #include "vmapi/hf/call.h"
18
19 #include "../msr.h"
20 #include "test/hftest.h"
21 #include "test/hftest_impl.h"
22 #include "test/vmapi/arch/exception_handler.h"
23 #include "test/vmapi/ffa.h"
24
25 extern struct hftest_test hftest_begin[];
26 extern struct hftest_test hftest_end[];
27
28 static struct hftest_context global_context;
29
alignas(PAGE_SIZE)30 static alignas(PAGE_SIZE) uint8_t secondary_ec_stack[MAX_CPUS][PAGE_SIZE];
31
32 uint8_t *hftest_get_secondary_ec_stack(size_t id)
33 {
34 assert(id < MAX_CPUS);
35 return secondary_ec_stack[id];
36 }
37
hftest_get_context(void)38 struct hftest_context *hftest_get_context(void)
39 {
40 return &global_context;
41 }
42
uint32list_has_next(const struct memiter * list)43 static bool uint32list_has_next(const struct memiter *list)
44 {
45 return memiter_size(list) > 0;
46 }
47
uint32list_get_next(struct memiter * list,uint32_t * out)48 static void uint32list_get_next(struct memiter *list, uint32_t *out)
49 {
50 uint64_t num;
51
52 CHECK(uint32list_has_next(list));
53 if (!fdt_parse_number(list, sizeof(uint32_t), &num)) {
54 return;
55 }
56
57 *out = (uint32_t)num;
58 }
59
abort(void)60 noreturn void abort(void)
61 {
62 HFTEST_LOG("Service contained failures.");
63 /* Cause a fault, as a secondary/SP can't power down the machine. */
64 *((volatile uint8_t *)1) = 1;
65
66 /* This should never be reached, but to make the compiler happy... */
67 for (;;) {
68 }
69 }
70
71 /** Find the service with the name passed in the arguments. */
find_service(struct memiter * args)72 static hftest_test_fn find_service(struct memiter *args)
73 {
74 struct memiter service_name;
75 struct hftest_test *test;
76
77 if (!memiter_parse_str(args, &service_name)) {
78 return NULL;
79 }
80
81 for (test = hftest_begin; test < hftest_end; ++test) {
82 if (test->kind == HFTEST_KIND_SERVICE &&
83 memiter_iseq(&service_name, test->name)) {
84 return test->fn;
85 }
86 }
87
88 return NULL;
89 }
90
hftest_context_init(struct hftest_context * ctx,void * send,void * recv)91 void hftest_context_init(struct hftest_context *ctx, void *send, void *recv)
92 {
93 memset_s(ctx, sizeof(*ctx), 0, sizeof(*ctx));
94 ctx->abort = abort;
95 ctx->send = send;
96 ctx->recv = recv;
97 }
98
99 /*
100 * Parse the FF-A partition's manifest.
101 * This function assumes the 'fdt' field of the passed 'ctx' has been
102 * initialized.
103 * TODO: Parse other fields as needed.
104 */
hftest_parse_ffa_manifest(struct hftest_context * ctx,struct fdt * fdt)105 static void hftest_parse_ffa_manifest(struct hftest_context *ctx,
106 struct fdt *fdt)
107 {
108 struct fdt_node root;
109 struct fdt_node ffa_node;
110 struct string mem_region_node_name = STRING_INIT("memory-regions");
111 struct string dev_region_node_name = STRING_INIT("device-regions");
112 struct memiter uuid;
113 uint32_t uuid_word = 0;
114 uint16_t j = 0;
115 uint16_t i = 0;
116 uint64_t number;
117
118 CHECK(ctx != NULL);
119 CHECK(fdt != NULL);
120
121 ASSERT_TRUE(fdt_find_node(fdt, "/", &root));
122 EXPECT_TRUE(fdt_is_compatible(&root, "arm,ffa-manifest-1.0"));
123 ASSERT_TRUE(fdt_read_number(&root, "load-address",
124 &ctx->partition_manifest.load_addr));
125 EXPECT_TRUE(fdt_read_number(&root, "ffa-version", &number));
126 ctx->partition_manifest.ffa_version = number;
127
128 EXPECT_TRUE(fdt_read_property(&root, "uuid", &uuid));
129
130 /* Parse UUIDs and populate uuid count.*/
131 while (uint32list_has_next(&uuid) && j < PARTITION_MAX_UUIDS) {
132 while (uint32list_has_next(&uuid) && i < 4) {
133 uint32list_get_next(&uuid, &uuid_word);
134 ctx->partition_manifest.uuids[j].uuid[i] = uuid_word;
135 i++;
136 }
137
138 EXPECT_FALSE(
139 ffa_uuid_is_null(&ctx->partition_manifest.uuids[j]));
140
141 dlog_verbose(" UUID %#x-%x-%x-%x\n",
142 ctx->partition_manifest.uuids[j].uuid[0],
143 ctx->partition_manifest.uuids[j].uuid[1],
144 ctx->partition_manifest.uuids[j].uuid[2],
145 ctx->partition_manifest.uuids[j].uuid[3]);
146 j++;
147 i = 0;
148 }
149
150 ctx->partition_manifest.uuid_count = j;
151
152 ffa_node = root;
153
154 /* Look for the memory region node. */
155 if (fdt_find_child(&ffa_node, &mem_region_node_name) &&
156 fdt_first_child(&ffa_node)) {
157 uint32_t mem_count = 0;
158
159 do {
160 struct memory_region *cur_region =
161 &ctx->partition_manifest.mem_regions[mem_count];
162 EXPECT_TRUE(fdt_read_number(&ffa_node, "pages-count",
163 &number));
164 cur_region->page_count = (uint32_t)number;
165
166 if (!fdt_read_number(&ffa_node, "base-address",
167 &cur_region->base_address)) {
168 EXPECT_TRUE(fdt_read_number(&ffa_node,
169 "relative-address",
170 &number));
171 cur_region->base_address =
172 ctx->partition_manifest.load_addr +
173 number;
174 }
175
176 EXPECT_TRUE(fdt_read_number(&ffa_node, "attributes",
177 &number));
178 cur_region->attributes = (uint32_t)number;
179 mem_count++;
180 } while (fdt_next_sibling(&ffa_node));
181
182 assert(mem_count < PARTITION_MAX_MEMORY_REGIONS);
183
184 ctx->partition_manifest.mem_region_count = mem_count;
185 }
186
187 ffa_node = root;
188
189 /* Look for the device region node. */
190 if (fdt_find_child(&ffa_node, &dev_region_node_name) &&
191 fdt_first_child(&ffa_node)) {
192 uint32_t dev_region_count = 0;
193
194 do {
195 struct device_region *cur_region =
196 &ctx->partition_manifest
197 .dev_regions[dev_region_count];
198 EXPECT_TRUE(fdt_read_number(&ffa_node, "pages-count",
199 &number));
200 cur_region->page_count = (uint32_t)number;
201
202 if (!fdt_read_number(&ffa_node, "base-address",
203 &cur_region->base_address)) {
204 EXPECT_TRUE(fdt_read_number(&ffa_node,
205 "relative-address",
206 &number));
207 cur_region->base_address =
208 ctx->partition_manifest.load_addr +
209 number;
210 }
211
212 EXPECT_TRUE(fdt_read_number(&ffa_node, "attributes",
213 &number));
214 cur_region->attributes = (uint32_t)number;
215 dev_region_count++;
216 } while (fdt_next_sibling(&ffa_node));
217
218 assert(dev_region_count < PARTITION_MAX_DEVICE_REGIONS);
219
220 ctx->partition_manifest.dev_region_count = dev_region_count;
221 }
222
223 ctx->is_ffa_manifest_parsed = true;
224 }
225
run_service_set_up(struct hftest_context * ctx,struct fdt * fdt)226 static void run_service_set_up(struct hftest_context *ctx, struct fdt *fdt)
227 {
228 struct fdt_node node;
229 struct hftest_test *hftest_info;
230
231 ASSERT_TRUE(fdt_find_node(fdt, "/", &node));
232
233 if (!fdt_find_child(&node, &(STRING_INIT("hftest-service-setup")))) {
234 return;
235 }
236
237 EXPECT_TRUE(fdt_is_compatible(&node, "arm,hftest"));
238
239 for (hftest_info = hftest_begin; hftest_info < hftest_end;
240 ++hftest_info) {
241 struct memiter data;
242 if (hftest_info->kind != HFTEST_KIND_SERVICE_SET_UP) {
243 continue;
244 }
245 if (fdt_read_property(&node, hftest_info->name, &data)) {
246 HFTEST_LOG("Running service_setup: %s\n",
247 hftest_info->name);
248 hftest_info->fn();
249 if (ctx->failures) {
250 HFTEST_LOG_FAILURE();
251 HFTEST_LOG(HFTEST_LOG_INDENT
252 "%s service_setup failed\n",
253 hftest_info->name);
254 abort();
255 }
256 } else {
257 HFTEST_LOG("Skipping service_setup: %s\n",
258 hftest_info->name);
259 }
260 }
261 }
262
hftest_service_main(const void * fdt_ptr)263 noreturn void hftest_service_main(const void *fdt_ptr)
264 {
265 struct hftest_context *ctx;
266 struct memiter args;
267 hftest_test_fn service;
268 struct ffa_value ret;
269 struct fdt fdt;
270 const ffa_id_t own_id = hf_vm_get_id();
271 ffa_notifications_bitmap_t bitmap;
272 struct ffa_partition_msg *message;
273 uint32_t vcpu = get_current_vcpu_index();
274
275 ctx = hftest_get_context();
276
277 /* If boot vcpu, set up mailbox and intialize context abort function. */
278 if (vcpu == 0) {
279 struct mailbox_buffers mb;
280 mb = set_up_mailbox();
281 hftest_context_init(ctx, mb.send, mb.recv);
282 }
283
284 if (!fdt_struct_from_ptr(fdt_ptr, &fdt)) {
285 HFTEST_LOG(HFTEST_LOG_INDENT "Unable to access the FDT");
286 abort();
287 }
288
289 /*
290 * The memory size argument is to be used only by VMs. It is part of
291 * the dt provided by the Hypervisor. SPs expect to receive their
292 * FF-A manifest which doesn't have a memory size field.
293 */
294 if (ffa_is_vm_id(own_id) &&
295 !fdt_get_memory_size(&fdt, &ctx->memory_size)) {
296 HFTEST_LOG_FAILURE();
297 HFTEST_LOG(HFTEST_LOG_INDENT
298 "No entry in the FDT on memory size details");
299 abort();
300 } else if (!ffa_is_vm_id(own_id)) {
301 /*
302 * It is secure partition. We are currently using the partition
303 * manifest for the SP.
304 */
305 hftest_parse_ffa_manifest(ctx, &fdt);
306 stdout_init(ctx->partition_manifest.ffa_version);
307
308 /* TODO: Determine memory size referring to the SP Pkg. */
309 ctx->memory_size = 1048576;
310 }
311
312 run_service_set_up(ctx, &fdt);
313
314 /* Receive the name of the service to run. */
315 ret = ffa_msg_wait();
316 EXPECT_EQ(ret.func, FFA_RUN_32);
317
318 message = (struct ffa_partition_msg *)SERVICE_RECV_BUFFER();
319
320 /*
321 * Expect to wake up with indirect message related to the next service
322 * to be executed.
323 */
324 ret = ffa_notification_get(own_id, vcpu,
325 FFA_NOTIFICATION_FLAG_BITMAP_SPM |
326 FFA_NOTIFICATION_FLAG_BITMAP_HYP);
327 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
328 bitmap = ffa_notification_get_from_framework(ret);
329 ASSERT_TRUE(is_ffa_spm_buffer_full_notification(bitmap) ||
330 is_ffa_hyp_buffer_full_notification(bitmap));
331 ASSERT_EQ(own_id, ffa_rxtx_header_receiver(&message->header));
332 memiter_init(&args, message->payload, message->header.size);
333
334 /* Find service handler. */
335 service = find_service(&args);
336 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
337
338 /* Check the service was found. */
339 if (service == NULL) {
340 HFTEST_LOG_FAILURE();
341 HFTEST_LOG(HFTEST_LOG_INDENT
342 "Unable to find requested service");
343 abort();
344 }
345
346 /* Pause so the next time cycles are given the service will be run. */
347 ffa_yield();
348
349 /* Let the service run. */
350 service();
351
352 /* Cleanly handle it if the service returns. */
353 if (ctx->failures) {
354 abort();
355 }
356
357 for (;;) {
358 /* Hang if the service returns. */
359 }
360 }
361
hftest_get_dir_req_source_id(void)362 ffa_id_t hftest_get_dir_req_source_id(void)
363 {
364 struct hftest_context *ctx = hftest_get_context();
365 return ctx->dir_req_source_id;
366 }
367
hftest_set_dir_req_source_id(ffa_id_t id)368 void hftest_set_dir_req_source_id(ffa_id_t id)
369 {
370 struct hftest_context *ctx = hftest_get_context();
371 ctx->dir_req_source_id = id;
372 }
373