1 /*
2 * Copyright 2023 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/arch/irq.h"
10 #include "hf/arch/vm/interrupts.h"
11
12 #include "hf/check.h"
13 #include "hf/ffa.h"
14
15 #include "vmapi/hf/call.h"
16 #include "vmapi/hf/ffa.h"
17
18 #include "primary_with_secondary.h"
19 #include "test/hftest.h"
20 #include "test/vmapi/arch/exception_handler.h"
21 #include "test/vmapi/ffa.h"
22
23 #define MAX_RESP_REGS (MAX_MSG_SIZE / sizeof(uint64_t))
24
get_uuid_count(struct hftest_context * ctx)25 static uint16_t get_uuid_count(struct hftest_context *ctx)
26 {
27 if (ctx->is_ffa_manifest_parsed) {
28 return ctx->partition_manifest.uuid_count;
29 }
30
31 return 0;
32 }
33
get_uuids(struct hftest_context * ctx)34 static struct ffa_uuid *get_uuids(struct hftest_context *ctx)
35 {
36 if (ctx->is_ffa_manifest_parsed) {
37 return (struct ffa_uuid *)&ctx->partition_manifest.uuids;
38 }
39
40 return NULL;
41 }
42
is_uuid_in_list(uint16_t uuid_count,struct ffa_uuid target_uuid,struct ffa_uuid * uuid_list)43 static bool is_uuid_in_list(uint16_t uuid_count, struct ffa_uuid target_uuid,
44 struct ffa_uuid *uuid_list)
45 {
46 uint16_t i;
47
48 /* Allow for nil uuid usage. */
49 if (ffa_uuid_is_null(&target_uuid)) {
50 return true;
51 }
52
53 for (i = 0; i < uuid_count; i++) {
54 if (ffa_uuid_is_null(&target_uuid)) {
55 break;
56 }
57 if (ffa_uuid_equal(&uuid_list[i], &target_uuid)) {
58 return true;
59 }
60 }
61
62 return false;
63 }
64
TEST_SERVICE(ffa_direct_message_resp_echo)65 TEST_SERVICE(ffa_direct_message_resp_echo)
66 {
67 struct ffa_value args = ffa_msg_wait();
68
69 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
70
71 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
72 args.arg3, args.arg4, args.arg5, args.arg6,
73 args.arg7);
74
75 FAIL("Direct response not expected to return");
76 }
77
TEST_SERVICE(ffa_direct_message_req2_resp_echo)78 TEST_SERVICE(ffa_direct_message_req2_resp_echo)
79 {
80 uint64_t msg[MAX_RESP_REGS];
81 struct ffa_value res = ffa_msg_wait();
82 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_REQ2_64);
83
84 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &res.arg4,
85 MAX_RESP_REGS * sizeof(uint64_t));
86
87 ffa_msg_send_direct_resp2(ffa_receiver(res), ffa_sender(res),
88 (const uint64_t *)msg, MAX_RESP_REGS);
89
90 FAIL("Direct response not expected to return");
91 }
92
TEST_SERVICE(ffa_yield_direct_message_resp_echo)93 TEST_SERVICE(ffa_yield_direct_message_resp_echo)
94 {
95 struct ffa_value args = ffa_msg_wait();
96
97 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
98
99 /*
100 * Give back control to VM/SP, that sent the direct request message,
101 * through FFA_YIELD ABI and specify timeout of 0x123456789.
102 */
103 ffa_yield_timeout(0x1, 0x23456789);
104
105 /* Send the echo through direct message response. */
106 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
107 args.arg3, args.arg4, args.arg5, args.arg6,
108 args.arg7);
109
110 FAIL("Direct response not expected to return");
111 }
112
TEST_SERVICE(ffa_yield_direct_message_resp2_echo)113 TEST_SERVICE(ffa_yield_direct_message_resp2_echo)
114 {
115 struct ffa_value res = ffa_msg_wait();
116 uint64_t msg[MAX_RESP_REGS] = {0};
117
118 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_REQ2_64);
119
120 /*
121 * Give back control to VM/SP, that sent the direct request message,
122 * through FFA_YIELD ABI and specify timeout of 0x123456789.
123 */
124 ffa_yield_timeout(0x1, 0x23456789);
125
126 HFTEST_LOG("after yield");
127
128 /* Send the echo through direct message response. */
129 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &res.arg4,
130 MAX_RESP_REGS * sizeof(uint64_t));
131
132 ffa_msg_send_direct_resp2(ffa_receiver(res), ffa_sender(res),
133 (const uint64_t *)msg, MAX_RESP_REGS);
134
135 FAIL("Direct response not expected to return");
136 }
137
TEST_SERVICE(ffa_direct_message_echo_services)138 TEST_SERVICE(ffa_direct_message_echo_services)
139 {
140 const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
141 0x88889999};
142 void *recv_buf = SERVICE_RECV_BUFFER();
143 struct ffa_value res;
144 ffa_id_t target_id;
145
146 /* Retrieve FF-A ID of the target endpoint. */
147 receive_indirect_message((void *)&target_id, sizeof(target_id),
148 recv_buf);
149
150 HFTEST_LOG("Echo test with: %x", target_id);
151
152 res = ffa_msg_send_direct_req(hf_vm_get_id(), target_id, msg[0], msg[1],
153 msg[2], msg[3], msg[4]);
154
155 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
156
157 EXPECT_EQ(res.arg3, msg[0]);
158 EXPECT_EQ(res.arg4, msg[1]);
159 EXPECT_EQ(res.arg5, msg[2]);
160 EXPECT_EQ(res.arg6, msg[3]);
161 EXPECT_EQ(res.arg7, msg[4]);
162
163 ffa_yield();
164 }
165
TEST_SERVICE(ffa_direct_message_req2_echo_services)166 TEST_SERVICE(ffa_direct_message_req2_echo_services)
167 {
168 const uint64_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
169 0x88889999, 0x01010101, 0x23232323, 0x45454545,
170 0x67676767, 0x89898989, 0x11001100, 0x22332233,
171 0x44554455, 0x66776677};
172 void *recv_buf = SERVICE_RECV_BUFFER();
173 struct ffa_value res;
174 struct ffa_partition_info target_info;
175 struct ffa_uuid target_uuid;
176
177 /* Retrieve uuid of target endpoint. */
178 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
179 recv_buf);
180
181 HFTEST_LOG("Target UUID: %X-%X-%X-%X", target_uuid.uuid[0],
182 target_uuid.uuid[1], target_uuid.uuid[2],
183 target_uuid.uuid[3]);
184
185 /* From uuid to respective partition info. */
186 ASSERT_EQ(get_ffa_partition_info(target_uuid, &target_info,
187 sizeof(target_info), recv_buf),
188 1);
189
190 HFTEST_LOG("Echo test with: %x", target_info.vm_id);
191
192 res = ffa_msg_send_direct_req2(hf_vm_get_id(), target_info.vm_id,
193 &target_uuid, (const uint64_t *)&msg,
194 ARRAY_SIZE(msg));
195
196 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
197
198 EXPECT_EQ(res.arg4, msg[0]);
199 EXPECT_EQ(res.arg5, msg[1]);
200 EXPECT_EQ(res.arg6, msg[2]);
201 EXPECT_EQ(res.arg7, msg[3]);
202 EXPECT_EQ(res.extended_val.arg8, msg[4]);
203 EXPECT_EQ(res.extended_val.arg9, msg[5]);
204 EXPECT_EQ(res.extended_val.arg10, msg[6]);
205 EXPECT_EQ(res.extended_val.arg11, msg[7]);
206 EXPECT_EQ(res.extended_val.arg12, msg[8]);
207 EXPECT_EQ(res.extended_val.arg13, msg[9]);
208 EXPECT_EQ(res.extended_val.arg14, msg[10]);
209 EXPECT_EQ(res.extended_val.arg15, msg[11]);
210 EXPECT_EQ(res.extended_val.arg16, msg[12]);
211 EXPECT_EQ(res.extended_val.arg17, msg[13]);
212
213 ffa_yield();
214 }
215
TEST_SERVICE(ffa_yield_direct_message_echo_services)216 TEST_SERVICE(ffa_yield_direct_message_echo_services)
217 {
218 const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
219 0x88889999};
220 void *recv_buf = SERVICE_RECV_BUFFER();
221 struct ffa_value res;
222 ffa_id_t target_id;
223
224 /* Retrieve FF-A ID of the target endpoint. */
225 receive_indirect_message((void *)&target_id, sizeof(target_id),
226 recv_buf);
227
228 HFTEST_LOG("Echo test with: %x", target_id);
229
230 res = ffa_msg_send_direct_req(hf_vm_get_id(), target_id, msg[0], msg[1],
231 msg[2], msg[3], msg[4]);
232
233 /*
234 * Be prepared to allocate CPU cycles to target vCPU if it yields while
235 * processing direct message.
236 */
237 while (res.func == FFA_YIELD_32) {
238 /* VM id/vCPU index are passed through arg1. */
239 EXPECT_EQ(res.arg1, ffa_vm_vcpu(target_id, 0));
240
241 /* Allocate CPU cycles to resume SP. */
242 res = ffa_run(target_id, 0);
243 }
244 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
245
246 EXPECT_EQ(res.arg3, msg[0]);
247 EXPECT_EQ(res.arg4, msg[1]);
248 EXPECT_EQ(res.arg5, msg[2]);
249 EXPECT_EQ(res.arg6, msg[3]);
250 EXPECT_EQ(res.arg7, msg[4]);
251
252 ffa_yield();
253 }
254
TEST_SERVICE(ffa_direct_msg_req_disallowed_smc)255 TEST_SERVICE(ffa_direct_msg_req_disallowed_smc)
256 {
257 struct ffa_value args = ffa_msg_wait();
258 struct ffa_value ret;
259 void *recv_buf = SERVICE_RECV_BUFFER();
260 struct ffa_partition_info *service1_info = service1(recv_buf);
261
262 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
263
264 ret = ffa_msg_wait();
265 EXPECT_FFA_ERROR(ret, FFA_DENIED);
266
267 ret = ffa_msg_send_direct_req(service1_info->vm_id, ffa_sender(args), 0,
268 0, 0, 0, 0);
269 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
270
271 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
272 args.arg3, args.arg4, args.arg5, args.arg6,
273 args.arg7);
274
275 FAIL("Direct response not expected to return");
276 }
277
278 /**
279 * Verify that services can't send direct message requests
280 * when invoked by FFA_RUN.
281 */
TEST_SERVICE(ffa_disallowed_direct_msg_req)282 TEST_SERVICE(ffa_disallowed_direct_msg_req)
283 {
284 struct ffa_value args;
285 struct ffa_value ret;
286 void *recv_buf = SERVICE_RECV_BUFFER();
287 struct ffa_partition_info *service1_info = service1(recv_buf);
288
289 ret = ffa_msg_send_direct_req(service1_info->vm_id, HF_PRIMARY_VM_ID, 0,
290 0, 0, 0, 0);
291 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
292
293 ret = ffa_msg_send_direct_req(service1_info->vm_id, HF_VM_ID_BASE + 10,
294 0, 0, 0, 0, 0);
295 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
296
297 args = ffa_msg_wait();
298 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
299
300 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
301 args.arg3, args.arg4, args.arg5, args.arg6,
302 args.arg7);
303
304 FAIL("Direct response not expected to return");
305 }
306
307 /**
308 * Verify a service can't send a direct message response when it hasn't
309 * first been sent a request.
310 */
TEST_SERVICE(ffa_disallowed_direct_msg_resp)311 TEST_SERVICE(ffa_disallowed_direct_msg_resp)
312 {
313 struct ffa_value args;
314 struct ffa_value ret;
315 void *recv_buf = SERVICE_RECV_BUFFER();
316 struct ffa_partition_info *service1_info = service1(recv_buf);
317
318 ret = ffa_msg_send_direct_resp(service1_info->vm_id, HF_PRIMARY_VM_ID,
319 0, 0, 0, 0, 0);
320 EXPECT_FFA_ERROR(ret, FFA_DENIED);
321
322 args = ffa_msg_wait();
323 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
324
325 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
326 args.arg3, args.arg4, args.arg5, args.arg6,
327 args.arg7);
328
329 FAIL("Direct response not expected to return");
330 }
331
332 /**
333 * Verify a service can't send a response to a different VM than the one
334 * that sent the request.
335 * Verify a service cannot send a response with a sender ID different from
336 * its own service ID.
337 */
TEST_SERVICE(ffa_direct_msg_resp_invalid_sender_receiver)338 TEST_SERVICE(ffa_direct_msg_resp_invalid_sender_receiver)
339 {
340 struct ffa_value res;
341 void *recv_buf = SERVICE_RECV_BUFFER();
342 struct ffa_partition_info *service2_info = service2(recv_buf);
343 ffa_id_t invalid_receiver;
344 struct ffa_value args = ffa_msg_wait();
345 ffa_id_t own_id = hf_vm_get_id();
346 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
347
348 ffa_id_t sender = ffa_sender(args);
349 ASSERT_EQ(own_id, ffa_receiver(args));
350
351 /* Other receiver ID. */
352 invalid_receiver = ffa_is_vm_id(own_id) ? service2_info->vm_id : own_id;
353 res = ffa_msg_send_direct_resp(own_id, invalid_receiver, 0, 0, 0, 0, 0);
354 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
355
356 /* Spoof sender ID. */
357 res = ffa_msg_send_direct_resp(service2_info->vm_id, sender, 0, 0, 0, 0,
358 0);
359 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
360
361 ffa_msg_send_direct_resp(own_id, sender, 0, 0, 0, 0, 0);
362
363 FAIL("Direct response not expected to return");
364 }
365
TEST_SERVICE(ffa_direct_message_cycle_denied)366 TEST_SERVICE(ffa_direct_message_cycle_denied)
367 {
368 struct ffa_value res;
369 struct ffa_value args = ffa_msg_wait();
370 ffa_id_t sender;
371 ffa_id_t receiver;
372 ffa_id_t own_id = hf_vm_get_id();
373
374 ASSERT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
375 receiver = ffa_receiver(args);
376 sender = ffa_sender(args);
377
378 EXPECT_EQ(receiver, hf_vm_get_id());
379
380 res = ffa_msg_send_direct_req(own_id, sender, 1, 2, 3, 4, 5);
381 EXPECT_FFA_ERROR(res, FFA_DENIED);
382
383 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
384 args.arg3, args.arg4, args.arg5, args.arg6,
385 args.arg7);
386
387 FAIL("Direct response not expected to return");
388 }
389
TEST_SERVICE(ffa_direct_message_v_1_2_cycle_denied)390 TEST_SERVICE(ffa_direct_message_v_1_2_cycle_denied)
391 {
392 struct ffa_value res;
393 struct ffa_value args;
394 ffa_id_t sender;
395 ffa_id_t receiver;
396 ffa_id_t own_id = hf_vm_get_id();
397 const uint64_t invalid_msg[] = {1, 2, 3, 4, 5};
398 uint64_t msg[MAX_RESP_REGS];
399
400 void *recv_buf = SERVICE_RECV_BUFFER();
401 struct ffa_uuid target_uuid;
402
403 /* Setup handling of NPI, to handle RX buffer full notification. */
404 exception_setup(check_npi, NULL);
405 arch_irq_enable();
406
407 /* Retrieve uuid of target endpoint. */
408 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
409 recv_buf);
410
411 /* Wait for direct request. */
412 args = ffa_msg_wait();
413
414 ASSERT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
415 receiver = ffa_receiver(args);
416 sender = ffa_sender(args);
417
418 EXPECT_EQ(receiver, hf_vm_get_id());
419
420 /* Try to send a request back instead of a response. */
421 res = ffa_msg_send_direct_req2(own_id, sender, &target_uuid,
422 (const uint64_t *)&invalid_msg,
423 ARRAY_SIZE(invalid_msg));
424
425 EXPECT_FFA_ERROR(res, FFA_DENIED);
426
427 /* Send the echo through direct message response. */
428 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &args.arg4,
429 MAX_RESP_REGS * sizeof(uint64_t));
430
431 ffa_msg_send_direct_resp2(receiver, sender, (const uint64_t *)msg,
432 MAX_RESP_REGS);
433
434 FAIL("Direct response not expected to return");
435 }
436
TEST_SERVICE(ffa_direct_message_cycle_req_req2_denied)437 TEST_SERVICE(ffa_direct_message_cycle_req_req2_denied)
438 {
439 struct ffa_value res;
440 struct ffa_value args;
441 ffa_id_t sender;
442 ffa_id_t receiver;
443 ffa_id_t own_id = hf_vm_get_id();
444 const uint64_t invalid_msg[] = {1, 2, 3, 4, 5};
445 void *recv_buf = SERVICE_RECV_BUFFER();
446 struct ffa_uuid target_uuid;
447
448 /* Setup handling of NPI, to handle RX buffer full notification. */
449 exception_setup(check_npi, NULL);
450 arch_irq_enable();
451
452 /* Retrieve uuid of target endpoint. */
453 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
454 recv_buf);
455
456 /* Wait for direct request. */
457 args = ffa_msg_wait();
458 ASSERT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
459 receiver = ffa_receiver(args);
460 sender = ffa_sender(args);
461
462 EXPECT_EQ(receiver, hf_vm_get_id());
463
464 /* Try to send a request back instead of a response. */
465 res = ffa_msg_send_direct_req2(own_id, sender, &target_uuid,
466 (const uint64_t *)&invalid_msg,
467 ARRAY_SIZE(invalid_msg));
468 EXPECT_FFA_ERROR(res, FFA_DENIED);
469
470 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
471 args.arg3, args.arg4, args.arg5, args.arg6,
472 args.arg7);
473
474 FAIL("Direct response not expected to return");
475 }
476
TEST_SERVICE(ffa_yield_direct_message_v_1_2_echo_services)477 TEST_SERVICE(ffa_yield_direct_message_v_1_2_echo_services)
478 {
479 const uint64_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
480 0x88889999, 0x01010101, 0x23232323, 0x45454545,
481 0x67676767, 0x89898989, 0x11001100, 0x22332233,
482 0x44554455, 0x66776677};
483 void *recv_buf = SERVICE_RECV_BUFFER();
484 struct ffa_value res;
485 struct ffa_uuid target_uuid;
486 struct ffa_partition_info target_info;
487
488 /* Retrieve FF-A ID of the target endpoint. */
489 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
490 recv_buf);
491
492 /* From uuid to respective partition info. */
493 ASSERT_EQ(get_ffa_partition_info(target_uuid, &target_info,
494 sizeof(target_info), recv_buf),
495 1);
496
497 HFTEST_LOG("Echo test with: %x", target_info.vm_id);
498
499 res = ffa_msg_send_direct_req2(hf_vm_get_id(), target_info.vm_id,
500 &target_uuid, (const uint64_t *)&msg,
501 ARRAY_SIZE(msg));
502 /*
503 * Be prepared to allocate CPU cycles to target vCPU if it yields while
504 * processing direct message.
505 */
506 while (res.func == FFA_YIELD_32) {
507 /* VM id/vCPU index are passed through arg1. */
508 EXPECT_EQ(res.arg1, ffa_vm_vcpu(target_info.vm_id, 0));
509
510 /* Allocate CPU cycles to resume SP. */
511 res = ffa_run(target_info.vm_id, 0);
512 }
513 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
514
515 EXPECT_EQ(res.arg4, msg[0]);
516 EXPECT_EQ(res.arg5, msg[1]);
517 EXPECT_EQ(res.arg6, msg[2]);
518 EXPECT_EQ(res.arg7, msg[3]);
519 EXPECT_EQ(res.extended_val.arg8, msg[4]);
520 EXPECT_EQ(res.extended_val.arg9, msg[5]);
521 EXPECT_EQ(res.extended_val.arg10, msg[6]);
522 EXPECT_EQ(res.extended_val.arg11, msg[7]);
523 EXPECT_EQ(res.extended_val.arg12, msg[8]);
524 EXPECT_EQ(res.extended_val.arg13, msg[9]);
525 EXPECT_EQ(res.extended_val.arg14, msg[10]);
526 EXPECT_EQ(res.extended_val.arg15, msg[11]);
527 EXPECT_EQ(res.extended_val.arg16, msg[12]);
528 EXPECT_EQ(res.extended_val.arg17, msg[13]);
529
530 ffa_yield();
531 }
532
533 /**
534 * Verify a service can't send a direct message response when it hasn't
535 * first been sent a request.
536 */
TEST_SERVICE(ffa_disallowed_direct_msg_resp2)537 TEST_SERVICE(ffa_disallowed_direct_msg_resp2)
538 {
539 struct ffa_value args;
540 struct ffa_value ret;
541 void *recv_buf = SERVICE_RECV_BUFFER();
542 struct ffa_partition_info *service1_info = service1(recv_buf);
543 uint64_t msg[MAX_RESP_REGS];
544
545 ret = ffa_msg_send_direct_resp2(service1_info->vm_id, HF_PRIMARY_VM_ID,
546 (uint64_t *)msg, ARRAY_SIZE(msg));
547 EXPECT_FFA_ERROR(ret, FFA_DENIED);
548
549 args = ffa_msg_wait();
550 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
551
552 ffa_msg_send_direct_resp2(ffa_receiver(args), ffa_sender(args),
553 (uint64_t *)msg, ARRAY_SIZE(msg));
554
555 FAIL("Direct response not expected to return");
556 }
557
TEST_SERVICE(ffa_direct_msg_req2_disallowed_smc)558 TEST_SERVICE(ffa_direct_msg_req2_disallowed_smc)
559 {
560 struct ffa_value args = ffa_msg_wait();
561 struct ffa_value ret;
562 void *recv_buf = SERVICE_RECV_BUFFER();
563 struct ffa_partition_info *service1_info = service1(recv_buf);
564 uint64_t msg[MAX_RESP_REGS] = {0};
565 struct ffa_uuid sender_uuid;
566 ffa_uuid_init(0, 0, 0, 0, &sender_uuid);
567
568 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
569
570 ret = ffa_msg_wait();
571 EXPECT_FFA_ERROR(ret, FFA_DENIED);
572
573 ret = ffa_msg_send_direct_req2(service1_info->vm_id, ffa_sender(args),
574 &sender_uuid, (uint64_t *)msg,
575 ARRAY_SIZE(msg));
576 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
577
578 ffa_msg_send_direct_resp2(ffa_receiver(args), ffa_sender(args),
579 (uint64_t *)msg, ARRAY_SIZE(msg));
580
581 FAIL("Direct response not expected to return");
582 }
583
584 /**
585 * Verify that services can't send direct message requests
586 * via FFA_MSG_SEND_DIRECT_REQ2 after being invoked by FFA_RUN.
587 */
TEST_SERVICE(ffa_disallowed_direct_msg_req2)588 TEST_SERVICE(ffa_disallowed_direct_msg_req2)
589 {
590 struct ffa_value args;
591 struct ffa_value ret;
592 void *recv_buf = SERVICE_RECV_BUFFER();
593 struct ffa_partition_info *service1_info;
594 uint64_t msg[MAX_RESP_REGS] = {0};
595 struct ffa_uuid target_uuid;
596
597 /* Setup handling of NPI, to handle RX buffer full notification. */
598 exception_setup(check_npi, NULL);
599 arch_irq_enable();
600
601 /* Retrieve uuid of NWd PVM. */
602 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
603 recv_buf);
604
605 service1_info = service1(recv_buf);
606
607 /* Attempt request to NWd VM. */
608 ret = ffa_msg_send_direct_req2(service1_info->vm_id, HF_PRIMARY_VM_ID,
609 &target_uuid, (uint64_t *)msg,
610 ARRAY_SIZE(msg));
611 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
612
613 ret = ffa_msg_send_direct_req2(service1_info->vm_id, HF_VM_ID_BASE + 10,
614 &target_uuid, (uint64_t *)msg,
615 ARRAY_SIZE(msg));
616 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
617
618 args = ffa_msg_wait();
619 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
620
621 ffa_msg_send_direct_resp2(ffa_receiver(args), ffa_sender(args),
622 (uint64_t *)msg, ARRAY_SIZE(msg));
623
624 FAIL("Direct response not expected to return");
625 }
626
627 /**
628 * Verify a service can't send a response to a different VM than the one
629 * that sent the request.
630 * Verify a service cannot send a response with a sender ID different from
631 * its own service ID.
632 */
TEST_SERVICE(ffa_direct_msg_resp2_invalid_sender_receiver)633 TEST_SERVICE(ffa_direct_msg_resp2_invalid_sender_receiver)
634 {
635 struct ffa_value res;
636 void *recv_buf = SERVICE_RECV_BUFFER();
637 struct ffa_partition_info *service2_info = service2(recv_buf);
638 ffa_id_t invalid_receiver;
639 uint64_t msg[MAX_RESP_REGS] = {0};
640 ffa_id_t own_id;
641 ffa_id_t sender;
642 struct ffa_value args = ffa_msg_wait();
643
644 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
645
646 sender = ffa_sender(args);
647 own_id = hf_vm_get_id();
648 ASSERT_EQ(own_id, ffa_receiver(args));
649
650 /* Other receiver ID. */
651 invalid_receiver = ffa_is_vm_id(own_id) ? service2_info->vm_id : own_id;
652 res = ffa_msg_send_direct_resp2(own_id, invalid_receiver,
653 (uint64_t *)msg, ARRAY_SIZE(msg));
654 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
655
656 /* Spoof sender ID. */
657 res = ffa_msg_send_direct_resp2(service2_info->vm_id, sender,
658 (uint64_t *)msg, ARRAY_SIZE(msg));
659 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
660
661 ffa_msg_send_direct_resp2(own_id, sender, (uint64_t *)msg,
662 ARRAY_SIZE(msg));
663
664 FAIL("Direct response not expected to return");
665 }
666
TEST_SERVICE(ffa_direct_msg_req2_resp_failure)667 TEST_SERVICE(ffa_direct_msg_req2_resp_failure)
668 {
669 struct ffa_value res;
670 struct ffa_value args = ffa_msg_wait();
671 uint64_t msg[MAX_RESP_REGS] = {0};
672
673 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
674
675 /* Respond to FFA_MSG_SEND_DIRECT_REQ2 with FFA_MSG_SEND_DIRECT_RESP. */
676 res = ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
677 args.arg3, args.arg4, args.arg5,
678 args.arg6, args.arg7);
679
680 EXPECT_FFA_ERROR(res, FFA_DENIED);
681
682 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &args.arg4,
683 MAX_RESP_REGS * sizeof(uint64_t));
684 ffa_msg_send_direct_resp2(ffa_receiver(args), ffa_sender(args),
685 (uint64_t *)msg, ARRAY_SIZE(msg));
686
687 FAIL("Direct response not expected to return");
688 }
689
TEST_SERVICE(ffa_direct_msg_req_resp2_failure)690 TEST_SERVICE(ffa_direct_msg_req_resp2_failure)
691 {
692 struct ffa_value res;
693 struct ffa_value args = ffa_msg_wait();
694 uint64_t msg[MAX_RESP_REGS] = {0};
695
696 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
697
698 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &args.arg4,
699 MAX_RESP_REGS * sizeof(uint64_t));
700 /* Respond to FFA_MSG_SEND_DIRECT_REQ with FFA_MSG_SEND_DIRECT_RESP2. */
701 res = ffa_msg_send_direct_resp2(ffa_receiver(args), ffa_sender(args),
702 (uint64_t *)msg, ARRAY_SIZE(msg));
703
704 EXPECT_FFA_ERROR(res, FFA_DENIED);
705
706 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
707 args.arg3, args.arg4, args.arg5, args.arg6,
708 args.arg7);
709
710 FAIL("Direct response not expected to return");
711 }
712
713 /**
714 * Put SP in waiting state via FFA_MSG_SEND_DIRECT_RESP.
715 * Make sure extended registers are preserved when SP is brought
716 * into running state with receipt of FFA_MSG_SEND_DIRECT_REQ2.
717 *
718 * Run twice to cover the reverse scenario (SP waits with
719 * FFA_MSG_SEND_DIRECT_RESP2 and wakes up with receipt of
720 * FFA_MSG_SEND_DIRECT_REQ).
721 */
TEST_SERVICE(ffa_direct_msg_resp_ext_registers_preserved)722 TEST_SERVICE(ffa_direct_msg_resp_ext_registers_preserved)
723 {
724 struct ffa_value args = ffa_msg_wait();
725 uint64_t msg[MAX_RESP_REGS];
726
727 for (uint32_t i = 0; i < 2; i++) {
728 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
729
730 args = ffa_msg_send_direct_resp(
731 ffa_receiver(args), ffa_sender(args), args.arg3,
732 args.arg4, args.arg5, args.arg6, args.arg7);
733
734 /*
735 * Wake up from waiting state with receipt of
736 * FFA_MSG_SEND_DIRECT_REQ2.
737 */
738 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
739 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &args.arg4,
740 MAX_RESP_REGS * sizeof(uint64_t));
741
742 args = ffa_msg_send_direct_resp2(
743 ffa_receiver(args), ffa_sender(args),
744 (const uint64_t *)msg, MAX_RESP_REGS);
745 }
746
747 FAIL("Direct response not expected to return");
748 }
749
750 /**
751 * FF-A v1.1 endpoint attempts to use FFA_MSG_SEND_DIRECT_REQ2
752 * and fails.
753 */
TEST_SERVICE(version_does_not_support_req2)754 TEST_SERVICE(version_does_not_support_req2)
755 {
756 const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
757 0x88889999};
758 void *recv_buf = SERVICE_RECV_BUFFER();
759 struct ffa_value res;
760 struct ffa_partition_info target_info;
761 struct ffa_uuid target_uuid;
762
763 /* Retrieve uuid of target endpoint. */
764 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
765 recv_buf);
766
767 HFTEST_LOG("Target UUID: %X-%X-%X-%X", target_uuid.uuid[0],
768 target_uuid.uuid[1], target_uuid.uuid[2],
769 target_uuid.uuid[3]);
770
771 /* From uuid to respective partition info. */
772 ASSERT_EQ(get_ffa_partition_info(target_uuid, &target_info,
773 sizeof(target_info), recv_buf),
774 1);
775
776 /* Attempt to send FFA_MSG_SEND_DIRECT_REQ2 and fail. */
777 res = ffa_msg_send_direct_req2(hf_vm_get_id(), target_info.vm_id,
778 &target_uuid, (const uint64_t *)&msg,
779 ARRAY_SIZE(msg));
780 EXPECT_FFA_ERROR(res, FFA_DENIED);
781 ffa_yield();
782 }
783
784 /**
785 * Service traps execution in a loop, and expects to always wake up with a
786 * FFA_MSG_SEND_DIRECT_REQ2. Verify that target UUID was specified in the target
787 * partition's manifest before echoing message back to sender.
788 */
TEST_SERVICE(ffa_direct_message_req2_resp_loop)789 TEST_SERVICE(ffa_direct_message_req2_resp_loop)
790 {
791 struct hftest_context *ctx = hftest_get_context();
792 struct ffa_uuid *uuids = get_uuids(ctx);
793 uint16_t uuid_count = get_uuid_count(ctx);
794 struct ffa_value res;
795
796 if (!ctx->is_ffa_manifest_parsed) {
797 FAIL("Manifest not parsed");
798 }
799
800 res = ffa_msg_wait();
801
802 while (true) {
803 uint64_t msg[MAX_RESP_REGS];
804 struct ffa_uuid target_uuid;
805
806 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_REQ2_64);
807
808 ffa_uuid_from_u64x2(res.arg2, res.arg3, &target_uuid);
809
810 HFTEST_LOG("Target UUID: %X-%X-%X-%X", target_uuid.uuid[0],
811 target_uuid.uuid[1], target_uuid.uuid[2],
812 target_uuid.uuid[3]);
813
814 EXPECT_TRUE(is_uuid_in_list(uuid_count, target_uuid, uuids));
815
816 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &res.arg4,
817 MAX_RESP_REGS * sizeof(uint64_t));
818
819 res = ffa_msg_send_direct_resp2(
820 ffa_receiver(res), ffa_sender(res),
821 (const uint64_t *)msg, MAX_RESP_REGS);
822 }
823 }
824
825 /**
826 * Identifies the VM availiability message.
827 * See section 18.3 of v1.2 FF-A specification.
828 */
829 enum ffa_vm_availability_message {
830 VM_MSG_CREATED,
831 VM_MSG_DESTROYED,
832 };
833
834 /**
835 * The state of a VM.
836 * See section 18.3.2.3 of v1.2 FF-A specification.
837 */
838 enum ffa_vm_availability_state {
839 VM_STATE_UNAVAILABLE,
840 VM_STATE_AVAILABLE,
841 VM_STATE_ERROR,
842 };
843
844 /**
845 * In the scope of the VM availability messages, the test SP is maintaining the
846 * state of 4 VMs.
847 */
848 static enum ffa_vm_availability_state vm_states[4] = {0};
849
next_vm_availability_state(enum ffa_vm_availability_state current_state,enum ffa_vm_availability_message msg)850 static enum ffa_vm_availability_state next_vm_availability_state(
851 enum ffa_vm_availability_state current_state,
852 enum ffa_vm_availability_message msg)
853 {
854 /* clang-format off */
855 static const enum ffa_vm_availability_state table[3][2] = {
856 /* State VM created VM destoyed */
857 /* VM unavailable */ { VM_STATE_AVAILABLE, VM_STATE_ERROR },
858 /* VM available */ { VM_STATE_ERROR, VM_STATE_UNAVAILABLE },
859 /* Error */ { VM_STATE_ERROR, VM_STATE_ERROR },
860 };
861 /* clang-format on */
862
863 return table[current_state][msg];
864 }
865
866 /**
867 * Receive VM availability messages and update VM's state accordingly.
868 */
TEST_SERVICE(vm_availability_messaging)869 TEST_SERVICE(vm_availability_messaging)
870 {
871 struct ffa_value args = ffa_msg_wait();
872
873 for (;;) {
874 enum ffa_framework_msg_func func =
875 ffa_framework_msg_get_func(args);
876 ffa_id_t sender_id = ffa_sender(args);
877 ffa_id_t receiver_id = ffa_receiver(args);
878 ffa_id_t vm_id = ffa_vm_availability_message_vm_id(args);
879 enum ffa_vm_availability_state current_state;
880 enum ffa_vm_availability_state new_state;
881 enum ffa_vm_availability_message msg;
882
883 switch (func) {
884 case FFA_FRAMEWORK_MSG_VM_CREATION_REQ:
885 msg = VM_MSG_CREATED;
886 break;
887 case FFA_FRAMEWORK_MSG_VM_DESTRUCTION_REQ:
888 msg = VM_MSG_DESTROYED;
889 break;
890 default:
891 dlog_verbose("Unknown framework message func: %#x\n",
892 func);
893 return;
894 }
895
896 current_state = vm_states[vm_id];
897 new_state = next_vm_availability_state(current_state, msg);
898 vm_states[vm_id] = new_state;
899
900 args = ffa_framework_message_send_direct_resp(
901 receiver_id, sender_id,
902 ((msg == VM_MSG_CREATED)
903 ? FFA_FRAMEWORK_MSG_VM_CREATION_RESP
904 : FFA_FRAMEWORK_MSG_VM_DESTRUCTION_RESP),
905 (new_state != VM_STATE_ERROR) ? 0
906 : FFA_INVALID_PARAMETERS);
907 }
908 }
909
910 /**
911 * Forward a framework message from primary VM, to demonstrate that SP cannot
912 * send framework messages.
913 */
TEST_SERVICE(vm_availability_messaging_send_from_sp)914 TEST_SERVICE(vm_availability_messaging_send_from_sp)
915 {
916 struct ffa_value args = ffa_msg_wait();
917
918 for (;;) {
919 enum ffa_framework_msg_func func =
920 ffa_framework_msg_get_func(args);
921 ffa_id_t sp_id = ffa_receiver(args);
922 ffa_id_t pvm_id = ffa_sender(args);
923 ffa_id_t vm_id = ffa_vm_availability_message_vm_id(args);
924
925 struct ffa_value ret = ffa_framework_msg_send_direct_req(
926 sp_id, pvm_id, func, vm_id);
927
928 /* Verify that the framework message request failed. */
929 ASSERT_EQ(ret.func, FFA_ERROR_32);
930 ASSERT_EQ(ret.arg3, 0);
931
932 /*
933 * Send a valid response, so that the PVM is not blocked
934 * forever.
935 */
936 args = ffa_msg_send_direct_resp(sp_id, pvm_id, ret.func,
937 ret.arg1, ret.arg2, ret.arg3,
938 ret.arg4);
939 }
940 }
941
942 /**
943 * Forward a framework message from primary VM, to demonstrate that SP cannot
944 * send non-framework messages in response to framework messages.
945 */
TEST_SERVICE(vm_availability_messaging_send_non_framework_from_sp)946 TEST_SERVICE(vm_availability_messaging_send_non_framework_from_sp)
947 {
948 struct ffa_value args = ffa_msg_wait();
949 struct ffa_value ret;
950 enum ffa_framework_msg_func func = ffa_framework_msg_get_func(args);
951 enum ffa_framework_msg_func func_resp = FFA_FRAMEWORK_MSG_INVALID;
952 ffa_id_t sp_id = ffa_receiver(args);
953 ffa_id_t pvm_id = ffa_sender(args);
954
955 if (func == FFA_FRAMEWORK_MSG_VM_CREATION_REQ) {
956 func_resp = FFA_FRAMEWORK_MSG_VM_CREATION_RESP;
957 } else if (func == FFA_FRAMEWORK_MSG_VM_DESTRUCTION_REQ) {
958 func_resp = FFA_FRAMEWORK_MSG_VM_DESTRUCTION_RESP;
959 } else {
960 FAIL("Unsupported framework message function%#x received",
961 func);
962 }
963
964 /* Attempt to send a standard direct response message. */
965 ret = ffa_msg_send_direct_resp(sp_id, pvm_id, args.arg3, args.arg4,
966 args.arg5, args.arg6, args.arg7);
967
968 /* Verify that the direct message response failed. */
969 ASSERT_EQ(ret.func, FFA_ERROR_32);
970 ASSERT_EQ((enum ffa_error)ret.arg2, FFA_DENIED);
971
972 /*
973 * Send a valid framework direct response message, so that the VM is not
974 * blocked forever.
975 */
976 ffa_framework_message_send_direct_resp(sp_id, pvm_id, func_resp, 0);
977 FAIL("Direct response not expected to return");
978 }
979