1 /*
2 * Copyright 2023 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/ffa.h"
10 #include "hf/std.h"
11
12 #include "vmapi/hf/call.h"
13
14 #include "primary_with_secondary.h"
15 #include "test/hftest.h"
16 #include "test/vmapi/ffa.h"
17
18 #define MAX_RESP_REGS (MAX_MSG_SIZE / sizeof(uint64_t))
19
get_uuid_count(struct hftest_context * ctx)20 static uint16_t get_uuid_count(struct hftest_context *ctx)
21 {
22 if (ctx->is_ffa_manifest_parsed) {
23 return ctx->partition_manifest.uuid_count;
24 }
25
26 return 0;
27 }
28
get_uuids(struct hftest_context * ctx)29 static struct ffa_uuid *get_uuids(struct hftest_context *ctx)
30 {
31 if (ctx->is_ffa_manifest_parsed) {
32 return (struct ffa_uuid *)&ctx->partition_manifest.uuids;
33 }
34
35 return NULL;
36 }
37
is_uuid_in_list(uint16_t uuid_count,struct ffa_uuid target_uuid,struct ffa_uuid * uuid_list)38 static bool is_uuid_in_list(uint16_t uuid_count, struct ffa_uuid target_uuid,
39 struct ffa_uuid *uuid_list)
40 {
41 uint16_t i;
42
43 /* Allow for nil uuid usage. */
44 if (ffa_uuid_is_null(&target_uuid)) {
45 return true;
46 }
47
48 for (i = 0; i < uuid_count; i++) {
49 if (ffa_uuid_is_null(&target_uuid)) {
50 break;
51 }
52 if (ffa_uuid_equal(&uuid_list[i], &target_uuid)) {
53 return true;
54 }
55 }
56
57 return false;
58 }
59
TEST_SERVICE(ffa_direct_message_resp_echo)60 TEST_SERVICE(ffa_direct_message_resp_echo)
61 {
62 struct ffa_value args = ffa_msg_wait();
63
64 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
65
66 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
67 args.arg3, args.arg4, args.arg5, args.arg6,
68 args.arg7);
69
70 FAIL("Direct response not expected to return");
71 }
72
TEST_SERVICE(ffa_direct_message_req2_resp_echo)73 TEST_SERVICE(ffa_direct_message_req2_resp_echo)
74 {
75 uint64_t msg[MAX_RESP_REGS];
76 struct ffa_value res = ffa_msg_wait();
77 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_REQ2_64);
78
79 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &res.arg4,
80 MAX_RESP_REGS * sizeof(uint64_t));
81
82 ffa_msg_send_direct_resp2(ffa_receiver(res), ffa_sender(res),
83 (const uint64_t *)msg, MAX_RESP_REGS);
84
85 FAIL("Direct response not expected to return");
86 }
87
TEST_SERVICE(ffa_yield_direct_message_resp_echo)88 TEST_SERVICE(ffa_yield_direct_message_resp_echo)
89 {
90 struct ffa_value args = ffa_msg_wait();
91
92 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
93
94 /*
95 * Give back control to VM/SP, that sent the direct request message,
96 * through FFA_YIELD ABI and specify timeout of 0x123456789.
97 */
98 ffa_yield_timeout(0x1, 0x23456789);
99
100 /* Send the echo through direct message response. */
101 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
102 args.arg3, args.arg4, args.arg5, args.arg6,
103 args.arg7);
104
105 FAIL("Direct response not expected to return");
106 }
107
TEST_SERVICE(ffa_yield_direct_message_resp2_echo)108 TEST_SERVICE(ffa_yield_direct_message_resp2_echo)
109 {
110 struct ffa_value res = ffa_msg_wait();
111 uint64_t msg[MAX_RESP_REGS] = {0};
112
113 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_REQ2_64);
114
115 /*
116 * Give back control to VM/SP, that sent the direct request message,
117 * through FFA_YIELD ABI and specify timeout of 0x123456789.
118 */
119 ffa_yield_timeout(0x1, 0x23456789);
120
121 HFTEST_LOG("after yield");
122
123 /* Send the echo through direct message response. */
124 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &res.arg4,
125 MAX_RESP_REGS * sizeof(uint64_t));
126
127 ffa_msg_send_direct_resp2(ffa_receiver(res), ffa_sender(res),
128 (const uint64_t *)msg, MAX_RESP_REGS);
129
130 FAIL("Direct response not expected to return");
131 }
132
TEST_SERVICE(ffa_direct_message_echo_services)133 TEST_SERVICE(ffa_direct_message_echo_services)
134 {
135 const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
136 0x88889999};
137 void *recv_buf = SERVICE_RECV_BUFFER();
138 struct ffa_value res;
139 ffa_id_t target_id;
140
141 /* Retrieve FF-A ID of the target endpoint. */
142 receive_indirect_message((void *)&target_id, sizeof(target_id),
143 recv_buf, NULL);
144
145 HFTEST_LOG("Echo test with: %x", target_id);
146
147 res = ffa_msg_send_direct_req(hf_vm_get_id(), target_id, msg[0], msg[1],
148 msg[2], msg[3], msg[4]);
149
150 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
151
152 EXPECT_EQ(res.arg3, msg[0]);
153 EXPECT_EQ(res.arg4, msg[1]);
154 EXPECT_EQ(res.arg5, msg[2]);
155 EXPECT_EQ(res.arg6, msg[3]);
156 EXPECT_EQ(res.arg7, msg[4]);
157
158 ffa_yield();
159 }
160
TEST_SERVICE(ffa_direct_message_req2_echo_services)161 TEST_SERVICE(ffa_direct_message_req2_echo_services)
162 {
163 const uint64_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
164 0x88889999, 0x01010101, 0x23232323, 0x45454545,
165 0x67676767, 0x89898989, 0x11001100, 0x22332233,
166 0x44554455, 0x66776677};
167 void *recv_buf = SERVICE_RECV_BUFFER();
168 struct ffa_value res;
169 struct ffa_partition_info target_info;
170 struct ffa_uuid target_uuid;
171
172 /* Retrieve uuid of target endpoint. */
173 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
174 recv_buf, NULL);
175
176 HFTEST_LOG("Target UUID: %X-%X-%X-%X", target_uuid.uuid[0],
177 target_uuid.uuid[1], target_uuid.uuid[2],
178 target_uuid.uuid[3]);
179
180 /* From uuid to respective partition info. */
181 ASSERT_EQ(get_ffa_partition_info(&target_uuid, &target_info,
182 sizeof(target_info), recv_buf),
183 1);
184
185 HFTEST_LOG("Echo test with: %x", target_info.vm_id);
186
187 res = ffa_msg_send_direct_req2(hf_vm_get_id(), target_info.vm_id,
188 &target_uuid, (const uint64_t *)&msg,
189 ARRAY_SIZE(msg));
190
191 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
192
193 EXPECT_EQ(res.arg4, msg[0]);
194 EXPECT_EQ(res.arg5, msg[1]);
195 EXPECT_EQ(res.arg6, msg[2]);
196 EXPECT_EQ(res.arg7, msg[3]);
197 EXPECT_EQ(res.extended_val.arg8, msg[4]);
198 EXPECT_EQ(res.extended_val.arg9, msg[5]);
199 EXPECT_EQ(res.extended_val.arg10, msg[6]);
200 EXPECT_EQ(res.extended_val.arg11, msg[7]);
201 EXPECT_EQ(res.extended_val.arg12, msg[8]);
202 EXPECT_EQ(res.extended_val.arg13, msg[9]);
203 EXPECT_EQ(res.extended_val.arg14, msg[10]);
204 EXPECT_EQ(res.extended_val.arg15, msg[11]);
205 EXPECT_EQ(res.extended_val.arg16, msg[12]);
206 EXPECT_EQ(res.extended_val.arg17, msg[13]);
207
208 ffa_yield();
209 }
210
TEST_SERVICE(ffa_yield_direct_message_echo_services)211 TEST_SERVICE(ffa_yield_direct_message_echo_services)
212 {
213 const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
214 0x88889999};
215 void *recv_buf = SERVICE_RECV_BUFFER();
216 struct ffa_value res;
217 ffa_id_t target_id;
218
219 /* Retrieve FF-A ID of the target endpoint. */
220 receive_indirect_message((void *)&target_id, sizeof(target_id),
221 recv_buf, NULL);
222
223 HFTEST_LOG("Echo test with: %x", target_id);
224
225 res = ffa_msg_send_direct_req(hf_vm_get_id(), target_id, msg[0], msg[1],
226 msg[2], msg[3], msg[4]);
227
228 /*
229 * Be prepared to allocate CPU cycles to target vCPU if it yields while
230 * processing direct message.
231 */
232 while (res.func == FFA_YIELD_32) {
233 /* VM id/vCPU index are passed through arg1. */
234 EXPECT_EQ(res.arg1, ffa_vm_vcpu(target_id, 0));
235
236 /* Allocate CPU cycles to resume SP. */
237 res = ffa_run(target_id, 0);
238 }
239 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
240
241 EXPECT_EQ(res.arg3, msg[0]);
242 EXPECT_EQ(res.arg4, msg[1]);
243 EXPECT_EQ(res.arg5, msg[2]);
244 EXPECT_EQ(res.arg6, msg[3]);
245 EXPECT_EQ(res.arg7, msg[4]);
246
247 ffa_yield();
248 }
249
TEST_SERVICE(ffa_direct_msg_req_disallowed_smc)250 TEST_SERVICE(ffa_direct_msg_req_disallowed_smc)
251 {
252 struct ffa_value args = ffa_msg_wait();
253 struct ffa_value ret;
254 void *recv_buf = SERVICE_RECV_BUFFER();
255 struct ffa_partition_info *service1_info = service1(recv_buf);
256
257 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
258
259 ret = ffa_msg_wait();
260 EXPECT_FFA_ERROR(ret, FFA_DENIED);
261
262 ret = ffa_msg_send_direct_req(service1_info->vm_id, ffa_sender(args), 0,
263 0, 0, 0, 0);
264 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
265
266 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
267 args.arg3, args.arg4, args.arg5, args.arg6,
268 args.arg7);
269
270 FAIL("Direct response not expected to return");
271 }
272
273 /**
274 * Verify that services can't send direct message requests
275 * when invoked by FFA_RUN.
276 */
TEST_SERVICE(ffa_disallowed_direct_msg_req)277 TEST_SERVICE(ffa_disallowed_direct_msg_req)
278 {
279 struct ffa_value args;
280 struct ffa_value ret;
281 void *recv_buf = SERVICE_RECV_BUFFER();
282 struct ffa_partition_info *service1_info = service1(recv_buf);
283
284 ret = ffa_msg_send_direct_req(service1_info->vm_id, HF_PRIMARY_VM_ID, 0,
285 0, 0, 0, 0);
286 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
287
288 ret = ffa_msg_send_direct_req(service1_info->vm_id, HF_VM_ID_BASE + 10,
289 0, 0, 0, 0, 0);
290 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
291
292 args = ffa_msg_wait();
293 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
294
295 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
296 args.arg3, args.arg4, args.arg5, args.arg6,
297 args.arg7);
298
299 FAIL("Direct response not expected to return");
300 }
301
302 /**
303 * Verify a service can't send a direct message response when it hasn't
304 * first been sent a request.
305 */
TEST_SERVICE(ffa_disallowed_direct_msg_resp)306 TEST_SERVICE(ffa_disallowed_direct_msg_resp)
307 {
308 struct ffa_value args;
309 struct ffa_value ret;
310 void *recv_buf = SERVICE_RECV_BUFFER();
311 struct ffa_partition_info *service1_info = service1(recv_buf);
312
313 ret = ffa_msg_send_direct_resp(service1_info->vm_id, HF_PRIMARY_VM_ID,
314 0, 0, 0, 0, 0);
315 EXPECT_FFA_ERROR(ret, FFA_DENIED);
316
317 args = ffa_msg_wait();
318 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
319
320 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
321 args.arg3, args.arg4, args.arg5, args.arg6,
322 args.arg7);
323
324 FAIL("Direct response not expected to return");
325 }
326
327 /**
328 * Verify a service can't send a response to a different VM than the one
329 * that sent the request.
330 * Verify a service cannot send a response with a sender ID different from
331 * its own service ID.
332 */
TEST_SERVICE(ffa_direct_msg_resp_invalid_sender_receiver)333 TEST_SERVICE(ffa_direct_msg_resp_invalid_sender_receiver)
334 {
335 struct ffa_value res;
336 void *recv_buf = SERVICE_RECV_BUFFER();
337 struct ffa_partition_info *service2_info = service2(recv_buf);
338 ffa_id_t invalid_receiver;
339 struct ffa_value args = ffa_msg_wait();
340 ffa_id_t own_id = hf_vm_get_id();
341 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
342
343 ffa_id_t sender = ffa_sender(args);
344 ASSERT_EQ(own_id, ffa_receiver(args));
345
346 /* Other receiver ID. */
347 invalid_receiver = ffa_is_vm_id(own_id) ? service2_info->vm_id : own_id;
348 res = ffa_msg_send_direct_resp(own_id, invalid_receiver, 0, 0, 0, 0, 0);
349 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
350
351 /* Spoof sender ID. */
352 res = ffa_msg_send_direct_resp(service2_info->vm_id, sender, 0, 0, 0, 0,
353 0);
354 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
355
356 ffa_msg_send_direct_resp(own_id, sender, 0, 0, 0, 0, 0);
357
358 FAIL("Direct response not expected to return");
359 }
360
TEST_SERVICE(ffa_direct_message_cycle_denied)361 TEST_SERVICE(ffa_direct_message_cycle_denied)
362 {
363 struct ffa_value res;
364 struct ffa_value args = ffa_msg_wait();
365 ffa_id_t sender;
366 ffa_id_t receiver;
367 ffa_id_t own_id = hf_vm_get_id();
368
369 ASSERT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
370 receiver = ffa_receiver(args);
371 sender = ffa_sender(args);
372
373 EXPECT_EQ(receiver, hf_vm_get_id());
374
375 res = ffa_msg_send_direct_req(own_id, sender, 1, 2, 3, 4, 5);
376 EXPECT_FFA_ERROR(res, FFA_DENIED);
377
378 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
379 args.arg3, args.arg4, args.arg5, args.arg6,
380 args.arg7);
381
382 FAIL("Direct response not expected to return");
383 }
384
TEST_SERVICE(ffa_direct_message_v_1_2_cycle_denied)385 TEST_SERVICE(ffa_direct_message_v_1_2_cycle_denied)
386 {
387 struct ffa_value res;
388 struct ffa_value args;
389 ffa_id_t sender;
390 ffa_id_t receiver;
391 ffa_id_t own_id = hf_vm_get_id();
392 const uint64_t invalid_msg[] = {1, 2, 3, 4, 5};
393 uint64_t msg[MAX_RESP_REGS];
394
395 void *recv_buf = SERVICE_RECV_BUFFER();
396 struct ffa_uuid target_uuid;
397
398 /* Retrieve uuid of target endpoint. */
399 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
400 recv_buf, NULL);
401
402 /* Wait for direct request. */
403 args = ffa_msg_wait();
404
405 ASSERT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
406 receiver = ffa_receiver(args);
407 sender = ffa_sender(args);
408
409 EXPECT_EQ(receiver, hf_vm_get_id());
410
411 /* Try to send a request back instead of a response. */
412 res = ffa_msg_send_direct_req2(own_id, sender, &target_uuid,
413 (const uint64_t *)&invalid_msg,
414 ARRAY_SIZE(invalid_msg));
415
416 EXPECT_FFA_ERROR(res, FFA_DENIED);
417
418 /* Send the echo through direct message response. */
419 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &args.arg4,
420 MAX_RESP_REGS * sizeof(uint64_t));
421
422 ffa_msg_send_direct_resp2(receiver, sender, (const uint64_t *)msg,
423 MAX_RESP_REGS);
424
425 FAIL("Direct response not expected to return");
426 }
427
TEST_SERVICE(ffa_direct_message_cycle_req_req2_denied)428 TEST_SERVICE(ffa_direct_message_cycle_req_req2_denied)
429 {
430 struct ffa_value res;
431 struct ffa_value args;
432 ffa_id_t sender;
433 ffa_id_t receiver;
434 ffa_id_t own_id = hf_vm_get_id();
435 const uint64_t invalid_msg[] = {1, 2, 3, 4, 5};
436 void *recv_buf = SERVICE_RECV_BUFFER();
437 struct ffa_uuid target_uuid;
438
439 /* Retrieve uuid of target endpoint. */
440 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
441 recv_buf, NULL);
442
443 /* Wait for direct request. */
444 args = ffa_msg_wait();
445 ASSERT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
446 receiver = ffa_receiver(args);
447 sender = ffa_sender(args);
448
449 EXPECT_EQ(receiver, hf_vm_get_id());
450
451 /* Try to send a request back instead of a response. */
452 res = ffa_msg_send_direct_req2(own_id, sender, &target_uuid,
453 (const uint64_t *)&invalid_msg,
454 ARRAY_SIZE(invalid_msg));
455 EXPECT_FFA_ERROR(res, FFA_DENIED);
456
457 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
458 args.arg3, args.arg4, args.arg5, args.arg6,
459 args.arg7);
460
461 FAIL("Direct response not expected to return");
462 }
463
TEST_SERVICE(ffa_yield_direct_message_v_1_2_echo_services)464 TEST_SERVICE(ffa_yield_direct_message_v_1_2_echo_services)
465 {
466 const uint64_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
467 0x88889999, 0x01010101, 0x23232323, 0x45454545,
468 0x67676767, 0x89898989, 0x11001100, 0x22332233,
469 0x44554455, 0x66776677};
470 void *recv_buf = SERVICE_RECV_BUFFER();
471 struct ffa_value res;
472 struct ffa_uuid target_uuid;
473 struct ffa_partition_info target_info;
474
475 /* Retrieve FF-A ID of the target endpoint. */
476 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
477 recv_buf, NULL);
478
479 /* From uuid to respective partition info. */
480 ASSERT_EQ(get_ffa_partition_info(&target_uuid, &target_info,
481 sizeof(target_info), recv_buf),
482 1);
483
484 HFTEST_LOG("Echo test with: %x", target_info.vm_id);
485
486 res = ffa_msg_send_direct_req2(hf_vm_get_id(), target_info.vm_id,
487 &target_uuid, (const uint64_t *)&msg,
488 ARRAY_SIZE(msg));
489 /*
490 * Be prepared to allocate CPU cycles to target vCPU if it yields while
491 * processing direct message.
492 */
493 while (res.func == FFA_YIELD_32) {
494 /* VM id/vCPU index are passed through arg1. */
495 EXPECT_EQ(res.arg1, ffa_vm_vcpu(target_info.vm_id, 0));
496
497 /* Allocate CPU cycles to resume SP. */
498 res = ffa_run(target_info.vm_id, 0);
499 }
500 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
501
502 EXPECT_EQ(res.arg4, msg[0]);
503 EXPECT_EQ(res.arg5, msg[1]);
504 EXPECT_EQ(res.arg6, msg[2]);
505 EXPECT_EQ(res.arg7, msg[3]);
506 EXPECT_EQ(res.extended_val.arg8, msg[4]);
507 EXPECT_EQ(res.extended_val.arg9, msg[5]);
508 EXPECT_EQ(res.extended_val.arg10, msg[6]);
509 EXPECT_EQ(res.extended_val.arg11, msg[7]);
510 EXPECT_EQ(res.extended_val.arg12, msg[8]);
511 EXPECT_EQ(res.extended_val.arg13, msg[9]);
512 EXPECT_EQ(res.extended_val.arg14, msg[10]);
513 EXPECT_EQ(res.extended_val.arg15, msg[11]);
514 EXPECT_EQ(res.extended_val.arg16, msg[12]);
515 EXPECT_EQ(res.extended_val.arg17, msg[13]);
516
517 ffa_yield();
518 }
519
520 /**
521 * Verify a service can't send a direct message response when it hasn't
522 * first been sent a request.
523 */
TEST_SERVICE(ffa_disallowed_direct_msg_resp2)524 TEST_SERVICE(ffa_disallowed_direct_msg_resp2)
525 {
526 struct ffa_value args;
527 struct ffa_value ret;
528 void *recv_buf = SERVICE_RECV_BUFFER();
529 struct ffa_partition_info *service1_info = service1(recv_buf);
530 uint64_t msg[MAX_RESP_REGS];
531
532 ret = ffa_msg_send_direct_resp2(service1_info->vm_id, HF_PRIMARY_VM_ID,
533 (uint64_t *)msg, ARRAY_SIZE(msg));
534 EXPECT_FFA_ERROR(ret, FFA_DENIED);
535
536 args = ffa_msg_wait();
537 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
538
539 ffa_msg_send_direct_resp2(ffa_receiver(args), ffa_sender(args),
540 (uint64_t *)msg, ARRAY_SIZE(msg));
541
542 FAIL("Direct response not expected to return");
543 }
544
TEST_SERVICE(ffa_direct_msg_req2_disallowed_smc)545 TEST_SERVICE(ffa_direct_msg_req2_disallowed_smc)
546 {
547 struct ffa_value args = ffa_msg_wait();
548 struct ffa_value ret;
549 void *recv_buf = SERVICE_RECV_BUFFER();
550 struct ffa_partition_info *service1_info = service1(recv_buf);
551 uint64_t msg[MAX_RESP_REGS] = {0};
552 struct ffa_uuid sender_uuid;
553 ffa_uuid_init(0, 0, 0, 0, &sender_uuid);
554
555 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
556
557 ret = ffa_msg_wait();
558 EXPECT_FFA_ERROR(ret, FFA_DENIED);
559
560 ret = ffa_msg_send_direct_req2(service1_info->vm_id, ffa_sender(args),
561 &sender_uuid, (uint64_t *)msg,
562 ARRAY_SIZE(msg));
563 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
564
565 ffa_msg_send_direct_resp2(ffa_receiver(args), ffa_sender(args),
566 (uint64_t *)msg, ARRAY_SIZE(msg));
567
568 FAIL("Direct response not expected to return");
569 }
570
571 /**
572 * Verify that services can't send direct message requests
573 * via FFA_MSG_SEND_DIRECT_REQ2 after being invoked by FFA_RUN.
574 */
TEST_SERVICE(ffa_disallowed_direct_msg_req2)575 TEST_SERVICE(ffa_disallowed_direct_msg_req2)
576 {
577 struct ffa_value args;
578 struct ffa_value ret;
579 void *recv_buf = SERVICE_RECV_BUFFER();
580 struct ffa_partition_info *service1_info;
581 uint64_t msg[MAX_RESP_REGS] = {0};
582 struct ffa_uuid target_uuid;
583
584 /* Retrieve uuid of NWd PVM. */
585 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
586 recv_buf, NULL);
587
588 service1_info = service1(recv_buf);
589
590 /* Attempt request to NWd VM. */
591 ret = ffa_msg_send_direct_req2(service1_info->vm_id, HF_PRIMARY_VM_ID,
592 &target_uuid, (uint64_t *)msg,
593 ARRAY_SIZE(msg));
594 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
595
596 ret = ffa_msg_send_direct_req2(service1_info->vm_id, HF_VM_ID_BASE + 10,
597 &target_uuid, (uint64_t *)msg,
598 ARRAY_SIZE(msg));
599 EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
600
601 args = ffa_msg_wait();
602 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
603
604 ffa_msg_send_direct_resp2(ffa_receiver(args), ffa_sender(args),
605 (uint64_t *)msg, ARRAY_SIZE(msg));
606
607 FAIL("Direct response not expected to return");
608 }
609
610 /**
611 * Verify a service can't send a response to a different VM than the one
612 * that sent the request.
613 * Verify a service cannot send a response with a sender ID different from
614 * its own service ID.
615 */
TEST_SERVICE(ffa_direct_msg_resp2_invalid_sender_receiver)616 TEST_SERVICE(ffa_direct_msg_resp2_invalid_sender_receiver)
617 {
618 struct ffa_value res;
619 void *recv_buf = SERVICE_RECV_BUFFER();
620 struct ffa_partition_info *service2_info = service2(recv_buf);
621 ffa_id_t invalid_receiver;
622 uint64_t msg[MAX_RESP_REGS] = {0};
623 ffa_id_t own_id;
624 ffa_id_t sender;
625 struct ffa_value args = ffa_msg_wait();
626
627 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
628
629 sender = ffa_sender(args);
630 own_id = hf_vm_get_id();
631 ASSERT_EQ(own_id, ffa_receiver(args));
632
633 /* Other receiver ID. */
634 invalid_receiver = ffa_is_vm_id(own_id) ? service2_info->vm_id : own_id;
635 res = ffa_msg_send_direct_resp2(own_id, invalid_receiver,
636 (uint64_t *)msg, ARRAY_SIZE(msg));
637 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
638
639 /* Spoof sender ID. */
640 res = ffa_msg_send_direct_resp2(service2_info->vm_id, sender,
641 (uint64_t *)msg, ARRAY_SIZE(msg));
642 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
643
644 ffa_msg_send_direct_resp2(own_id, sender, (uint64_t *)msg,
645 ARRAY_SIZE(msg));
646
647 FAIL("Direct response not expected to return");
648 }
649
TEST_SERVICE(ffa_direct_msg_req2_resp_failure)650 TEST_SERVICE(ffa_direct_msg_req2_resp_failure)
651 {
652 struct ffa_value res;
653 struct ffa_value args = ffa_msg_wait();
654 uint64_t msg[MAX_RESP_REGS] = {0};
655
656 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
657
658 /* Respond to FFA_MSG_SEND_DIRECT_REQ2 with FFA_MSG_SEND_DIRECT_RESP. */
659 res = ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
660 args.arg3, args.arg4, args.arg5,
661 args.arg6, args.arg7);
662
663 EXPECT_FFA_ERROR(res, FFA_DENIED);
664
665 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &args.arg4,
666 MAX_RESP_REGS * sizeof(uint64_t));
667 ffa_msg_send_direct_resp2(ffa_receiver(args), ffa_sender(args),
668 (uint64_t *)msg, ARRAY_SIZE(msg));
669
670 FAIL("Direct response not expected to return");
671 }
672
TEST_SERVICE(ffa_direct_msg_req_resp2_failure)673 TEST_SERVICE(ffa_direct_msg_req_resp2_failure)
674 {
675 struct ffa_value res;
676 struct ffa_value args = ffa_msg_wait();
677 uint64_t msg[MAX_RESP_REGS] = {0};
678
679 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
680
681 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &args.arg4,
682 MAX_RESP_REGS * sizeof(uint64_t));
683 /* Respond to FFA_MSG_SEND_DIRECT_REQ with FFA_MSG_SEND_DIRECT_RESP2. */
684 res = ffa_msg_send_direct_resp2(ffa_receiver(args), ffa_sender(args),
685 (uint64_t *)msg, ARRAY_SIZE(msg));
686
687 EXPECT_FFA_ERROR(res, FFA_DENIED);
688
689 ffa_msg_send_direct_resp(ffa_receiver(args), ffa_sender(args),
690 args.arg3, args.arg4, args.arg5, args.arg6,
691 args.arg7);
692
693 FAIL("Direct response not expected to return");
694 }
695
696 /**
697 * Put SP in waiting state via FFA_MSG_SEND_DIRECT_RESP.
698 * Make sure extended registers are preserved when SP is brought
699 * into running state with receipt of FFA_MSG_SEND_DIRECT_REQ2.
700 *
701 * Run twice to cover the reverse scenario (SP waits with
702 * FFA_MSG_SEND_DIRECT_RESP2 and wakes up with receipt of
703 * FFA_MSG_SEND_DIRECT_REQ).
704 */
TEST_SERVICE(ffa_direct_msg_resp_ext_registers_preserved)705 TEST_SERVICE(ffa_direct_msg_resp_ext_registers_preserved)
706 {
707 struct ffa_value args = ffa_msg_wait();
708 uint64_t msg[MAX_RESP_REGS];
709
710 for (uint32_t i = 0; i < 2; i++) {
711 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ_32);
712
713 args = ffa_msg_send_direct_resp(
714 ffa_receiver(args), ffa_sender(args), args.arg3,
715 args.arg4, args.arg5, args.arg6, args.arg7);
716
717 /*
718 * Wake up from waiting state with receipt of
719 * FFA_MSG_SEND_DIRECT_REQ2.
720 */
721 EXPECT_EQ(args.func, FFA_MSG_SEND_DIRECT_REQ2_64);
722 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &args.arg4,
723 MAX_RESP_REGS * sizeof(uint64_t));
724
725 args = ffa_msg_send_direct_resp2(
726 ffa_receiver(args), ffa_sender(args),
727 (const uint64_t *)msg, MAX_RESP_REGS);
728 }
729
730 FAIL("Direct response not expected to return");
731 }
732
733 /**
734 * FF-A v1.1 endpoint attempts to use FFA_MSG_SEND_DIRECT_REQ2
735 * and fails.
736 */
TEST_SERVICE(version_does_not_support_req2)737 TEST_SERVICE(version_does_not_support_req2)
738 {
739 const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
740 0x88889999};
741 void *recv_buf = SERVICE_RECV_BUFFER();
742 struct ffa_value res;
743 struct ffa_partition_info target_info;
744 struct ffa_uuid target_uuid;
745
746 /* Retrieve uuid of target endpoint. */
747 receive_indirect_message((void *)&target_uuid, sizeof(target_uuid),
748 recv_buf, NULL);
749
750 HFTEST_LOG("Target UUID: %X-%X-%X-%X", target_uuid.uuid[0],
751 target_uuid.uuid[1], target_uuid.uuid[2],
752 target_uuid.uuid[3]);
753
754 /* From uuid to respective partition info. */
755 ASSERT_EQ(get_ffa_partition_info(&target_uuid, &target_info,
756 sizeof(target_info), recv_buf),
757 1);
758
759 /* Attempt to send FFA_MSG_SEND_DIRECT_REQ2 and fail. */
760 res = ffa_msg_send_direct_req2(hf_vm_get_id(), target_info.vm_id,
761 &target_uuid, (const uint64_t *)&msg,
762 ARRAY_SIZE(msg));
763 EXPECT_FFA_ERROR(res, FFA_DENIED);
764 ffa_yield();
765 }
766
767 /**
768 * Service traps execution in a loop, and expects to always wake up with a
769 * FFA_MSG_SEND_DIRECT_REQ2. Verify that target UUID was specified in the target
770 * partition's manifest before echoing message back to sender.
771 */
TEST_SERVICE(ffa_direct_message_req2_resp_loop)772 TEST_SERVICE(ffa_direct_message_req2_resp_loop)
773 {
774 struct hftest_context *ctx = hftest_get_context();
775 struct ffa_uuid *uuids = get_uuids(ctx);
776 uint16_t uuid_count = get_uuid_count(ctx);
777 struct ffa_value res;
778
779 if (!ctx->is_ffa_manifest_parsed) {
780 FAIL("Manifest not parsed");
781 }
782
783 res = ffa_msg_wait();
784
785 while (true) {
786 uint64_t msg[MAX_RESP_REGS];
787 struct ffa_uuid target_uuid;
788
789 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_REQ2_64);
790
791 ffa_uuid_unpack_from_uint64(res.arg2, res.arg3, &target_uuid);
792
793 HFTEST_LOG("Target UUID: %X-%X-%X-%X", target_uuid.uuid[0],
794 target_uuid.uuid[1], target_uuid.uuid[2],
795 target_uuid.uuid[3]);
796
797 EXPECT_TRUE(is_uuid_in_list(uuid_count, target_uuid, uuids));
798
799 memcpy_s(&msg, sizeof(uint64_t) * MAX_RESP_REGS, &res.arg4,
800 MAX_RESP_REGS * sizeof(uint64_t));
801
802 res = ffa_msg_send_direct_resp2(
803 ffa_receiver(res), ffa_sender(res),
804 (const uint64_t *)msg, MAX_RESP_REGS);
805 }
806 }
807