1 /*
2 * Copyright 2023 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include <stdint.h>
10
11 #include "hf/arch/vm/power_mgmt.h"
12 #include "hf/arch/vmid_base.h"
13
14 #include "hf/ffa.h"
15
16 #include "vmapi/hf/call.h"
17
18 #include "primary_with_secondary.h"
19 #include "test/hftest.h"
20 #include "test/hftest_impl.h"
21 #include "test/semaphore.h"
22 #include "test/vmapi/ffa.h"
23
24 #define MAX_RESP_REGS (MAX_MSG_SIZE / sizeof(uint64_t))
25
26 /**
27 * Structure defined for usage in tests with multiple cores.
28 * Used to pass arguments from primary to secondary core.
29 */
30 struct echo_test_secondary_cpu_entry_args {
31 uint32_t req_func;
32 ffa_id_t receiver_id;
33 struct ffa_uuid receiver_uuid;
34 ffa_vcpu_count_t receiver_vcpu_count;
35 ffa_vcpu_index_t vcpu_id;
36 struct mailbox_buffers mb;
37 struct semaphore sync;
38 };
39
echo_test(ffa_id_t target_id)40 static void echo_test(ffa_id_t target_id)
41 {
42 const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
43 0x88889999};
44 struct ffa_value res;
45
46 res = ffa_msg_send_direct_req(hf_vm_get_id(), target_id, msg[0], msg[1],
47 msg[2], msg[3], msg[4]);
48
49 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
50
51 EXPECT_EQ(res.arg3, msg[0]);
52 EXPECT_EQ(res.arg4, msg[1]);
53 EXPECT_EQ(res.arg5, msg[2]);
54 EXPECT_EQ(res.arg6, msg[3]);
55 EXPECT_EQ(res.arg7, msg[4]);
56 }
57
echo_test_req2(ffa_id_t target_id,struct ffa_uuid target_uuid)58 static void echo_test_req2(ffa_id_t target_id, struct ffa_uuid target_uuid)
59 {
60 const uint64_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
61 0x88889999, 0x01010101, 0x23232323, 0x45454545,
62 0x67676767, 0x89898989, 0x11001100, 0x22332233,
63 0x44554455, 0x66776677};
64
65 struct ffa_value res;
66 res = ffa_msg_send_direct_req2(hf_vm_get_id(), target_id, &target_uuid,
67 (const uint64_t *)&msg, ARRAY_SIZE(msg));
68
69 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
70
71 EXPECT_EQ(res.arg4, msg[0]);
72 EXPECT_EQ(res.arg5, msg[1]);
73 EXPECT_EQ(res.arg6, msg[2]);
74 EXPECT_EQ(res.arg7, msg[3]);
75 EXPECT_EQ(res.extended_val.arg8, msg[4]);
76 EXPECT_EQ(res.extended_val.arg9, msg[5]);
77 EXPECT_EQ(res.extended_val.arg10, msg[6]);
78 EXPECT_EQ(res.extended_val.arg11, msg[7]);
79 EXPECT_EQ(res.extended_val.arg12, msg[8]);
80 EXPECT_EQ(res.extended_val.arg13, msg[9]);
81 EXPECT_EQ(res.extended_val.arg14, msg[10]);
82 EXPECT_EQ(res.extended_val.arg15, msg[11]);
83 EXPECT_EQ(res.extended_val.arg16, msg[12]);
84 EXPECT_EQ(res.extended_val.arg17, msg[13]);
85 }
86
87 /**
88 * Send direct message to an VM/SP. Expect it to yield its CPU cycles. Allocate
89 * cycles through FFA_RUN and verify that sent info is echoed back.
90 */
TEST(direct_message,ffa_send_direct_message_req_yield_echo)91 TEST(direct_message, ffa_send_direct_message_req_yield_echo)
92 {
93 const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
94 0x88889999};
95 struct mailbox_buffers mb = set_up_mailbox();
96 struct ffa_value res;
97 struct ffa_partition_info *service1_info = service1(mb.recv);
98
99 SERVICE_SELECT(service1_info->vm_id,
100 "ffa_yield_direct_message_resp_echo", mb.send);
101 ffa_run(service1_info->vm_id, 0);
102
103 res = ffa_msg_send_direct_req(hf_vm_get_id(), service1_info->vm_id,
104 msg[0], msg[1], msg[2], msg[3], msg[4]);
105
106 /*
107 * Consider the scenario where VM1 allocated CPU cycles to SP1 through
108 * a direct request message but SP1 yields execution back to VM1
109 * instead of busy waiting for an IO operation.
110 */
111 EXPECT_EQ(res.func, FFA_YIELD_32);
112
113 /* SP1 id/vCPU index are passed through arg1. */
114 EXPECT_EQ(res.arg1, ffa_vm_vcpu(service1_info->vm_id, 0));
115
116 /*
117 * Additionally, SP1 can also specify timeout while yielding cycles
118 * back to VM1. This is a hint to VM1 that it can be resumed upon
119 * expiration of the timeout.
120 * Check for 64-bit timeout specified by SP1 through arg2 and arg3. The
121 * purpose of these checks is just to validate the timeout value but
122 * not to leverage it upon expiration.
123 */
124 EXPECT_EQ(res.arg2, 0x1);
125 EXPECT_EQ(res.arg3, 0x23456789);
126
127 /* Allocate CPU cycles to resume SP. */
128 res = ffa_run(service1_info->vm_id, 0);
129
130 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
131
132 EXPECT_EQ(res.arg3, msg[0]);
133 EXPECT_EQ(res.arg4, msg[1]);
134 EXPECT_EQ(res.arg5, msg[2]);
135 EXPECT_EQ(res.arg6, msg[3]);
136 EXPECT_EQ(res.arg7, msg[4]);
137 }
138
139 /*
140 * Send direct message, verify that sent info is echoed back.
141 */
TEST(direct_message,ffa_send_direct_message_req_echo)142 TEST(direct_message, ffa_send_direct_message_req_echo)
143 {
144 struct mailbox_buffers mb = set_up_mailbox();
145 struct ffa_partition_info *service1_info = service1(mb.recv);
146
147 SERVICE_SELECT(service1_info->vm_id, "ffa_direct_message_resp_echo",
148 mb.send);
149
150 ffa_run(service1_info->vm_id, 0);
151
152 echo_test(service1_info->vm_id);
153 }
154
155 /**
156 * Initiate direct message request between test SPs.
157 * If test services are VMs, test should be skipped.
158 */
TEST_PRECONDITION(direct_message,ffa_direct_message_services_echo,service1_and_service2_are_secure)159 TEST_PRECONDITION(direct_message, ffa_direct_message_services_echo,
160 service1_and_service2_are_secure)
161 {
162 struct mailbox_buffers mb = set_up_mailbox();
163 struct ffa_partition_info *service1_info = service1(mb.recv);
164 struct ffa_partition_info *service2_info = service2(mb.recv);
165 ffa_id_t own_id = hf_vm_get_id();
166 struct ffa_value ret;
167
168 /* Run service2 for it to wait for a request from service1. */
169 SERVICE_SELECT(service2_info->vm_id, "ffa_direct_message_resp_echo",
170 mb.send);
171 ffa_run(service2_info->vm_id, 0);
172
173 /* Service1 requests echo from service2. */
174 SERVICE_SELECT(service1_info->vm_id, "ffa_direct_message_echo_services",
175 mb.send);
176
177 /* Send to service1 the uuid of the target for its message. */
178 ret = send_indirect_message(own_id, service1_info->vm_id, mb.send,
179 &service2_info->vm_id,
180 sizeof(service2_info->vm_id), 0);
181 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
182 ffa_run(service1_info->vm_id, 0);
183 }
184
185 /**
186 * Initiate direct message request between two Secure Partitions. Configure
187 * the second SP in the call chain to yield cycles received from first SP
188 * through direct message request. The first SP is equipped to reallocate
189 * CPU cycles to resume the direct message processing.
190 */
TEST_PRECONDITION(direct_message,ffa_direct_message_services_yield_echo,service1_and_service2_are_secure)191 TEST_PRECONDITION(direct_message, ffa_direct_message_services_yield_echo,
192 service1_and_service2_are_secure)
193 {
194 struct mailbox_buffers mb = set_up_mailbox();
195 struct ffa_partition_info *service1_info = service1(mb.recv);
196 struct ffa_partition_info *service2_info = service2(mb.recv);
197 ffa_id_t own_id = hf_vm_get_id();
198 struct ffa_value ret;
199
200 /* Run service2 for it to wait for a request from service1. */
201 SERVICE_SELECT(service2_info->vm_id,
202 "ffa_yield_direct_message_resp_echo", mb.send);
203 ffa_run(service2_info->vm_id, 0);
204
205 /* Service1 requests echo from service2. */
206 SERVICE_SELECT(service1_info->vm_id,
207 "ffa_yield_direct_message_echo_services", mb.send);
208
209 /* Send to service1 the FF-A ID of the target for its message. */
210 ret = send_indirect_message(own_id, service1_info->vm_id, mb.send,
211 &service2_info->vm_id,
212 sizeof(service2_info->vm_id), 0);
213 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
214
215 ret = ffa_run(service1_info->vm_id, 0);
216 EXPECT_EQ(ret.func, FFA_YIELD_32);
217 }
218
219 /**
220 * If Hafnium is the hypervisor, and service1 is a VM:
221 * - Service verifies disallowed SMC invocations while ffa_msg_send_direct_req
222 * is being serviced.
223 * If Hafnium as SPMC is deployed and service1 is an SP:
224 * - Validate the state transitions permitted under RTM_FFA_DIR_REQ partition
225 * runtime model
226 */
TEST(direct_message,ffa_send_direct_message_req_disallowed_smc)227 TEST(direct_message, ffa_send_direct_message_req_disallowed_smc)
228 {
229 const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
230 0x88889999};
231 struct mailbox_buffers mb = set_up_mailbox();
232 struct ffa_value res;
233 struct ffa_partition_info *service1_info = service1(mb.recv);
234
235 SERVICE_SELECT(service1_info->vm_id,
236 "ffa_direct_msg_req_disallowed_smc", mb.send);
237 ffa_run(service1_info->vm_id, 0);
238
239 res = ffa_msg_send_direct_req(hf_vm_get_id(), service1_info->vm_id,
240 msg[0], msg[1], msg[2], msg[3], msg[4]);
241
242 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
243 }
244
245 /**
246 * Send direct message to invalid destination.
247 */
TEST(direct_message,ffa_send_direct_message_req_invalid_dst)248 TEST(direct_message, ffa_send_direct_message_req_invalid_dst)
249 {
250 const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
251 0x88889999};
252 struct ffa_value res;
253
254 res = ffa_msg_send_direct_req(hf_vm_get_id(), HF_PRIMARY_VM_ID, msg[0],
255 msg[1], msg[2], msg[3], msg[4]);
256
257 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
258 }
259
260 /**
261 * Verify that the primary VM can't send direct message responses.
262 */
TEST(direct_message,ffa_send_direct_message_resp_invalid)263 TEST(direct_message, ffa_send_direct_message_resp_invalid)
264 {
265 struct mailbox_buffers mb = set_up_mailbox();
266 struct ffa_value res;
267 struct ffa_partition_info *service1_info = service1(mb.recv);
268
269 SERVICE_SELECT(service1_info->vm_id, "ffa_direct_message_resp_echo",
270 mb.send);
271 ffa_run(service1_info->vm_id, 0);
272
273 res = ffa_msg_send_direct_resp(hf_vm_get_id(), service1_info->vm_id, 0,
274 0, 0, 0, 0);
275 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
276 }
277
278 /**
279 * Test has two purposes. It runs the test service via ffa_run, and validates
280 * that:
281 * - If service is an SP, it can't send a direct message request to a VM in the
282 * NWd.
283 * - If service is a secondary VM, it can't invoke a direct message request to
284 * the PVM (legacy behavior, for hafnium as an hypervisor).
285 */
TEST(direct_message,ffa_secondary_direct_msg_req_invalid)286 TEST(direct_message, ffa_secondary_direct_msg_req_invalid)
287 {
288 struct mailbox_buffers mb = set_up_mailbox();
289 struct ffa_value res;
290 struct ffa_partition_info *service1_info = service1(mb.recv);
291
292 SERVICE_SELECT(service1_info->vm_id, "ffa_disallowed_direct_msg_req",
293 mb.send);
294 ffa_run(service1_info->vm_id, 0);
295
296 res = ffa_msg_send_direct_req(hf_vm_get_id(), service1_info->vm_id, 0,
297 0, 0, 0, 0);
298 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
299 }
300
301 /**
302 * Run secondary VM without sending a direct message request beforehand.
303 * Secondary VM must fail sending a direct message response.
304 */
TEST(direct_message,ffa_secondary_direct_msg_resp_invalid)305 TEST(direct_message, ffa_secondary_direct_msg_resp_invalid)
306 {
307 struct mailbox_buffers mb = set_up_mailbox();
308 struct ffa_value res;
309 struct ffa_partition_info *service1_info = service1(mb.recv);
310
311 SERVICE_SELECT(service1_info->vm_id, "ffa_disallowed_direct_msg_resp",
312 mb.send);
313 ffa_run(service1_info->vm_id, 0);
314
315 res = ffa_msg_send_direct_req(hf_vm_get_id(), service1_info->vm_id, 0,
316 0, 0, 0, 0);
317 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
318 }
319
320 /**
321 * Run secondary VM and send a direct message request. Secondary VM attempts
322 * altering the sender and receiver in its direct message responses, and must
323 * fail to do so.
324 */
TEST(direct_message,ffa_secondary_spoofed_response)325 TEST(direct_message, ffa_secondary_spoofed_response)
326 {
327 struct mailbox_buffers mb = set_up_mailbox();
328 struct ffa_value res;
329 struct ffa_partition_info *service1_info = service1(mb.recv);
330
331 SERVICE_SELECT(service1_info->vm_id,
332 "ffa_direct_msg_resp_invalid_sender_receiver", mb.send);
333 ffa_run(service1_info->vm_id, 0);
334
335 res = ffa_msg_send_direct_req(hf_vm_get_id(), service1_info->vm_id, 0,
336 0, 0, 0, 0);
337 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
338 }
339
340 /*
341 * Validate that the creation of a cyclic dependency via direct_messaging
342 * is not possible.
343 * The test only makes sense in the scope of validating the SPMC, as the
344 * hypervisor limits the direct message requests to be only invoked from
345 * the primary VM. Thus, using precondition that checks both involved test
346 * services are SPs.
347 */
TEST_PRECONDITION(direct_message,fail_if_cyclic_dependency,service1_and_service2_are_secure)348 TEST_PRECONDITION(direct_message, fail_if_cyclic_dependency,
349 service1_and_service2_are_secure)
350 {
351 struct mailbox_buffers mb = set_up_mailbox();
352 struct ffa_partition_info *service1_info = service1(mb.recv);
353 struct ffa_partition_info *service2_info = service2(mb.recv);
354 ffa_id_t own_id = hf_vm_get_id();
355 struct ffa_value ret;
356
357 /* Run service2 for it to wait for a request from service1. */
358 SERVICE_SELECT(service2_info->vm_id, "ffa_direct_message_cycle_denied",
359 mb.send);
360 ffa_run(service2_info->vm_id, 0);
361
362 /* Service1 requests echo from service2. */
363 SERVICE_SELECT(service1_info->vm_id, "ffa_direct_message_echo_services",
364 mb.send);
365
366 /* Send to service1 the uuid of the target for its message. */
367 ret = send_indirect_message(own_id, service1_info->vm_id, mb.send,
368 &service2_info->vm_id,
369 sizeof(service2_info->vm_id), 0);
370
371 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
372 EXPECT_EQ(ffa_run(service1_info->vm_id, 0).func, FFA_YIELD_32);
373 }
374
375 /**
376 * Send direct message via FFA_MSG_SEND_DIRECT_REQ2, verify that sent info is
377 * echoed back.
378 */
TEST(direct_message,ffa_send_direct_message_req2_echo)379 TEST(direct_message, ffa_send_direct_message_req2_echo)
380 {
381 struct mailbox_buffers mb = set_up_mailbox();
382 struct ffa_partition_info *service1_info = service1(mb.recv);
383 struct ffa_uuid target_uuid = SERVICE1;
384
385 SERVICE_SELECT(service1_info->vm_id,
386 "ffa_direct_message_req2_resp_echo", mb.send);
387 ffa_run(service1_info->vm_id, 0);
388
389 echo_test_req2(service1_info->vm_id, target_uuid);
390 }
391
392 /**
393 * Send direct message to an VM/SP. Expect it to yield its CPU cycles. Allocate
394 * cycles through FFA_RUN and verify that sent info is echoed back.
395 */
TEST(direct_message,ffa_send_direct_message_req2_yield_echo)396 TEST(direct_message, ffa_send_direct_message_req2_yield_echo)
397 {
398 const uint64_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
399 0x88889999, 0x01010101, 0x23232323, 0x45454545,
400 0x67676767, 0x89898989, 0x11001100, 0x22332233,
401 0x44554455, 0x66776677};
402 struct mailbox_buffers mb = set_up_mailbox();
403 struct ffa_value res;
404 struct ffa_partition_info *service1_info = service1(mb.recv);
405 struct ffa_uuid uuid = SERVICE1;
406
407 SERVICE_SELECT(service1_info->vm_id,
408 "ffa_yield_direct_message_resp2_echo", mb.send);
409 ffa_run(service1_info->vm_id, 0);
410
411 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service1_info->vm_id,
412 &uuid, (const uint64_t *)&msg,
413 ARRAY_SIZE(msg));
414
415 /*
416 * Consider the scenario where VM1 allocated CPU cycles to service1
417 * through a direct request message but service1 yields execution back
418 * to VM1 instead of busy waiting for an IO operation.
419 */
420 EXPECT_EQ(res.func, FFA_YIELD_32);
421
422 /* Service1 id/vCPU index are passed through arg1. */
423 EXPECT_EQ(res.arg1, ffa_vm_vcpu(service1_info->vm_id, 0));
424
425 /*
426 * Additionally, service1 can also specify timeout while yielding cycles
427 * back to VM1. This is a hint to VM1 that it can be resumed upon
428 * expiration of the timeout.
429 * Check for 64-bit timeout specified by service1 through arg2 and arg3.
430 * The purpose of these checks is just to validate the timeout value but
431 * not to leverage it upon expiration.
432 */
433 EXPECT_EQ(res.arg2, 0x1);
434 EXPECT_EQ(res.arg3, 0x23456789);
435
436 /* Allocate CPU cycles to resume service */
437 res = ffa_run(service1_info->vm_id, 0);
438
439 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
440
441 EXPECT_EQ(res.arg4, msg[0]);
442 EXPECT_EQ(res.arg5, msg[1]);
443 EXPECT_EQ(res.arg6, msg[2]);
444 EXPECT_EQ(res.arg7, msg[3]);
445 EXPECT_EQ(res.extended_val.arg8, msg[4]);
446 EXPECT_EQ(res.extended_val.arg9, msg[5]);
447 EXPECT_EQ(res.extended_val.arg10, msg[6]);
448 EXPECT_EQ(res.extended_val.arg11, msg[7]);
449 EXPECT_EQ(res.extended_val.arg12, msg[8]);
450 EXPECT_EQ(res.extended_val.arg13, msg[9]);
451 EXPECT_EQ(res.extended_val.arg14, msg[10]);
452 EXPECT_EQ(res.extended_val.arg15, msg[11]);
453 EXPECT_EQ(res.extended_val.arg16, msg[12]);
454 EXPECT_EQ(res.extended_val.arg17, msg[13]);
455 }
456
457 /**
458 * Initiate direct message request between test SPs.
459 * If test services are VMs, test should be skipped.
460 */
TEST_PRECONDITION(direct_message,ffa_direct_message_req2_services_echo,service1_and_service2_are_secure)461 TEST_PRECONDITION(direct_message, ffa_direct_message_req2_services_echo,
462 service1_and_service2_are_secure)
463 {
464 struct mailbox_buffers mb = set_up_mailbox();
465 struct ffa_partition_info *service1_info = service1(mb.recv);
466 struct ffa_partition_info *service2_info = service2(mb.recv);
467 ffa_id_t own_id = hf_vm_get_id();
468 struct ffa_value ret;
469 const struct ffa_uuid service2_uuid = SERVICE2;
470
471 /* Run service2 for it to wait for a request from service1. */
472 SERVICE_SELECT(service2_info->vm_id,
473 "ffa_direct_message_req2_resp_echo", mb.send);
474 ffa_run(service2_info->vm_id, 0);
475
476 /* Service1 requests echo from service2. */
477 SERVICE_SELECT(service1_info->vm_id,
478 "ffa_direct_message_req2_echo_services", mb.send);
479
480 /* Send to service1 the uuid of the target for its message. */
481 ret = send_indirect_message(own_id, service1_info->vm_id, mb.send,
482 &service2_uuid, sizeof(service2_uuid), 0);
483 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
484 ffa_run(service1_info->vm_id, 0);
485 }
486
487 /**
488 * Initiate direct message request between two Secure Partitions. Configure
489 * the second SP in the call chain to yield cycles received from first SP
490 * through direct message request. The first SP is equipped to reallocate
491 * CPU cycles to resume the direct message processing.
492 */
TEST_PRECONDITION(direct_message,ffa_direct_message_req2_services_yield_echo,service1_and_service2_are_secure)493 TEST_PRECONDITION(direct_message, ffa_direct_message_req2_services_yield_echo,
494 service1_and_service2_are_secure)
495 {
496 struct mailbox_buffers mb = set_up_mailbox();
497 struct ffa_partition_info *service1_info = service1(mb.recv);
498 struct ffa_partition_info *service2_info = service2(mb.recv);
499 ffa_id_t own_id = hf_vm_get_id();
500 struct ffa_value ret;
501 const struct ffa_uuid service2_uuid = SERVICE2;
502
503 /* Run service2 for it to wait for a request from service1. */
504 SERVICE_SELECT(service2_info->vm_id,
505 "ffa_yield_direct_message_resp2_echo", mb.send);
506 ffa_run(service2_info->vm_id, 0);
507
508 /* Service1 requests echo from service2. */
509 SERVICE_SELECT(service1_info->vm_id,
510 "ffa_yield_direct_message_v_1_2_echo_services", mb.send);
511
512 /* Send to service1 the UUID of the target for its message. */
513 ret = send_indirect_message(own_id, service1_info->vm_id, mb.send,
514 &service2_uuid, sizeof(service2_uuid), 0);
515 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
516
517 ret = ffa_run(service1_info->vm_id, 0);
518 EXPECT_EQ(ret.func, FFA_YIELD_32);
519 }
520
521 /**
522 * If Hafnium is the hypervisor, and service1 is a VM:
523 * - Service verifies disallowed SMC invocations while ffa_msg_send_direct_req
524 * is being serviced.
525 *
526 * If Hafnium as SPMC is deployed and service1 is an SP:
527 * - Validate the state transitions permitted under RTM_FFA_DIR_REQ partition
528 * runtime model
529 */
TEST(direct_message,ffa_send_direct_message_req2_disallowed_smc)530 TEST(direct_message, ffa_send_direct_message_req2_disallowed_smc)
531 {
532 const uint32_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
533 0x88889999};
534 struct mailbox_buffers mb = set_up_mailbox();
535 struct ffa_value res;
536 struct ffa_partition_info *service1_info = service1(mb.recv);
537 const struct ffa_uuid service1_uuid = SERVICE1;
538
539 SERVICE_SELECT(service1_info->vm_id,
540 "ffa_direct_msg_req2_disallowed_smc", mb.send);
541 ffa_run(service1_info->vm_id, 0);
542
543 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service1_info->vm_id,
544 &service1_uuid, (const uint64_t *)&msg,
545 ARRAY_SIZE(msg));
546
547 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
548 }
549
550 /**
551 * Send direct message via FFA_MSG_SEND_DIRECT_REQ2 targeting an invalid UUID.
552 */
TEST(direct_message,ffa_send_direct_message_req2_invalid_uuid)553 TEST(direct_message, ffa_send_direct_message_req2_invalid_uuid)
554 {
555 const uint64_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
556 0x88889999, 0x01010101, 0x23232323, 0x45454545,
557 0x67676767, 0x89898989, 0x11001100, 0x22332233,
558 0x44554455, 0x66776677};
559 struct mailbox_buffers mb = set_up_mailbox();
560 struct ffa_value res;
561 struct ffa_partition_info *service1_info = service1(mb.recv);
562 struct ffa_uuid uuid;
563
564 /* Non-existent UUID. */
565 ffa_uuid_init(1, 1, 1, 1, &uuid);
566
567 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service1_info->vm_id,
568 &uuid, (const uint64_t *)&msg,
569 ARRAY_SIZE(msg));
570 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
571
572 /* UUID for a different partition than given FF-A id. */
573 uuid = SERVICE2;
574 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service1_info->vm_id,
575 &uuid, (const uint64_t *)&msg,
576 ARRAY_SIZE(msg));
577 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
578 }
579
580 /**
581 * Verify that the primary VM can't send direct message responses
582 * via FFA_MSG_SEND_DIRECT_RESP2_64.
583 */
TEST(direct_message,ffa_send_direct_message_resp2_invalid)584 TEST(direct_message, ffa_send_direct_message_resp2_invalid)
585 {
586 struct mailbox_buffers mb = set_up_mailbox();
587 struct ffa_value res;
588 struct ffa_partition_info *service1_info = service1(mb.recv);
589 const uint64_t msg[] = {1, 2, 3, 4, 5};
590
591 SERVICE_SELECT(service1_info->vm_id,
592 "ffa_direct_message_req2_resp_echo", mb.send);
593 ffa_run(service1_info->vm_id, 0);
594
595 res = ffa_msg_send_direct_resp2(hf_vm_get_id(), service1_info->vm_id,
596 (const uint64_t *)&msg,
597 ARRAY_SIZE(msg));
598 EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
599 }
600
601 /**
602 * Test runs the test service via ffa_run, and validates that:
603 * - If service is an SP, it can't send a direct message request to a VM in the
604 * NWd.
605 *
606 * Legacy case for secondary VM
607 * - If service is a secondary VM, it can't invoke a direct message request to
608 * the PVM (legacy behavior, for hafnium as an hypervisor).
609 */
TEST(direct_message,ffa_secondary_direct_msg_req2_invalid)610 TEST(direct_message, ffa_secondary_direct_msg_req2_invalid)
611 {
612 struct mailbox_buffers mb = set_up_mailbox();
613 struct ffa_value res;
614 struct ffa_partition_info *service1_info = service1(mb.recv);
615 const struct ffa_uuid service1_uuid = SERVICE1;
616 uint64_t msg[MAX_RESP_REGS] = {0};
617 struct ffa_uuid own_uuid = PVM;
618
619 SERVICE_SELECT(service1_info->vm_id, "ffa_disallowed_direct_msg_req2",
620 mb.send);
621 res = send_indirect_message(hf_vm_get_id(), service1_info->vm_id,
622 mb.send, &own_uuid, sizeof(own_uuid), 0);
623 ASSERT_EQ(res.func, FFA_SUCCESS_32);
624 ffa_run(service1_info->vm_id, 0);
625
626 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service1_info->vm_id,
627 &service1_uuid, (const uint64_t *)&msg,
628 ARRAY_SIZE(msg));
629 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
630 }
631
632 /**
633 * Run secondary VM without sending a direct message request beforehand.
634 * Secondary VM must fail sending a direct message response.
635 */
TEST(direct_message,ffa_secondary_direct_msg_resp2_invalid)636 TEST(direct_message, ffa_secondary_direct_msg_resp2_invalid)
637 {
638 struct mailbox_buffers mb = set_up_mailbox();
639 struct ffa_value res;
640 struct ffa_partition_info *service1_info = service1(mb.recv);
641 struct ffa_uuid service1_uuid = SERVICE1;
642 uint64_t msg[MAX_RESP_REGS] = {0};
643
644 SERVICE_SELECT(service1_info->vm_id, "ffa_disallowed_direct_msg_resp2",
645 mb.send);
646 ffa_run(service1_info->vm_id, 0);
647
648 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service1_info->vm_id,
649 &service1_uuid, (uint64_t *)msg,
650 ARRAY_SIZE(msg));
651
652 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
653 }
654
655 /**
656 * Run secondary VM and send a direct message request. Secondary VM attempts
657 * altering the sender and receiver in its direct message responses, and must
658 * fail to do so.
659 */
TEST(direct_message,ffa_secondary_spoofed_response2)660 TEST(direct_message, ffa_secondary_spoofed_response2)
661 {
662 struct mailbox_buffers mb = set_up_mailbox();
663 struct ffa_value res;
664 struct ffa_partition_info *service1_info = service1(mb.recv);
665 struct ffa_uuid service1_uuid = SERVICE1;
666 uint64_t msg[MAX_RESP_REGS] = {0};
667
668 SERVICE_SELECT(service1_info->vm_id,
669 "ffa_direct_msg_resp2_invalid_sender_receiver", mb.send);
670 ffa_run(service1_info->vm_id, 0);
671
672 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service1_info->vm_id,
673 &service1_uuid, (uint64_t *)msg,
674 ARRAY_SIZE(msg));
675 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
676 }
677
678 /**
679 * Validate that the creation of a cyclic dependency via direct messaging
680 * interfaces introduced in FF-A v1.2 is not possible. The test only makes sense
681 * in the scope of validating the SPMC, as the hypervisor limits the direct
682 * message requests to be only invoked from the primary VM. Thus, using
683 * precondition that checks both involved test services are SPs.
684 */
TEST_PRECONDITION(direct_message,fail_if_cyclic_dependency_v1_2,service1_and_service2_are_secure)685 TEST_PRECONDITION(direct_message, fail_if_cyclic_dependency_v1_2,
686 service1_and_service2_are_secure)
687 {
688 struct mailbox_buffers mb = set_up_mailbox();
689 struct ffa_partition_info *service1_info = service1(mb.recv);
690 struct ffa_partition_info *service2_info = service2(mb.recv);
691 ffa_id_t own_id = hf_vm_get_id();
692 struct ffa_uuid service1_uuid = SERVICE1;
693 struct ffa_uuid service2_uuid = SERVICE2;
694 struct ffa_value ret;
695
696 /*
697 * Run service2 for it to wait for a request from service1 after
698 * receiving indirect message containing uuid.
699 */
700 SERVICE_SELECT(service2_info->vm_id,
701 "ffa_direct_message_v_1_2_cycle_denied", mb.send);
702
703 /* Send to service2 the uuid of service1 for its attempted message. */
704 ret = send_indirect_message(own_id, service2_info->vm_id, mb.send,
705 &service1_uuid, sizeof(service1_uuid), 0);
706 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
707 ffa_run(service2_info->vm_id, 0);
708
709 /* Service1 requests echo from service2. */
710 SERVICE_SELECT(service1_info->vm_id,
711 "ffa_direct_message_req2_echo_services", mb.send);
712
713 /* Send to service1 the uuid of the target for its message. */
714 ret = send_indirect_message(own_id, service1_info->vm_id, mb.send,
715 &service2_uuid, sizeof(service2_uuid), 0);
716
717 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
718 EXPECT_EQ(ffa_run(service1_info->vm_id, 0).func, FFA_YIELD_32);
719 }
720
721 /**
722 * Send a direct message request via FFA_MSG_SEND_DIR_REQ2 to each of the target
723 * partition's UUIDs and verify that sent info is echoed back.
724 */
725 // NOLINTNEXTLINE(readability-function-size)
TEST_PRECONDITION(direct_message,ffa_send_direct_message_req2_multiple_uuids,service1_and_service2_are_secure)726 TEST_PRECONDITION(direct_message, ffa_send_direct_message_req2_multiple_uuids,
727 service1_and_service2_are_secure)
728 {
729 const uint64_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
730 0x88889999, 0x01010101, 0x23232323, 0x45454545,
731 0x67676767, 0x89898989, 0x11001100, 0x22332233,
732 0x44554455, 0x66776677};
733 struct mailbox_buffers mb = set_up_mailbox();
734 struct ffa_value res;
735 struct ffa_partition_info *service2_info = service2(mb.recv);
736 struct ffa_uuid uuid = SERVICE2_UUID2;
737
738 SERVICE_SELECT(service2_info->vm_id,
739 "ffa_direct_message_req2_resp_loop", mb.send);
740 ffa_run(service2_info->vm_id, 0);
741
742 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service2_info->vm_id,
743 &uuid, (const uint64_t *)&msg,
744 ARRAY_SIZE(msg));
745
746 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
747
748 EXPECT_EQ(res.arg4, msg[0]);
749 EXPECT_EQ(res.arg5, msg[1]);
750 EXPECT_EQ(res.arg6, msg[2]);
751 EXPECT_EQ(res.arg7, msg[3]);
752 EXPECT_EQ(res.extended_val.arg8, msg[4]);
753 EXPECT_EQ(res.extended_val.arg9, msg[5]);
754 EXPECT_EQ(res.extended_val.arg10, msg[6]);
755 EXPECT_EQ(res.extended_val.arg11, msg[7]);
756 EXPECT_EQ(res.extended_val.arg12, msg[8]);
757 EXPECT_EQ(res.extended_val.arg13, msg[9]);
758 EXPECT_EQ(res.extended_val.arg14, msg[10]);
759 EXPECT_EQ(res.extended_val.arg15, msg[11]);
760 EXPECT_EQ(res.extended_val.arg16, msg[12]);
761 EXPECT_EQ(res.extended_val.arg17, msg[13]);
762
763 uuid = SERVICE2;
764
765 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service2_info->vm_id,
766 &uuid, (const uint64_t *)&msg,
767 ARRAY_SIZE(msg));
768
769 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
770
771 EXPECT_EQ(res.arg4, msg[0]);
772 EXPECT_EQ(res.arg5, msg[1]);
773 EXPECT_EQ(res.arg6, msg[2]);
774 EXPECT_EQ(res.arg7, msg[3]);
775 EXPECT_EQ(res.extended_val.arg8, msg[4]);
776 EXPECT_EQ(res.extended_val.arg9, msg[5]);
777 EXPECT_EQ(res.extended_val.arg10, msg[6]);
778 EXPECT_EQ(res.extended_val.arg11, msg[7]);
779 EXPECT_EQ(res.extended_val.arg12, msg[8]);
780 EXPECT_EQ(res.extended_val.arg13, msg[9]);
781 EXPECT_EQ(res.extended_val.arg14, msg[10]);
782 EXPECT_EQ(res.extended_val.arg15, msg[11]);
783 EXPECT_EQ(res.extended_val.arg16, msg[12]);
784 EXPECT_EQ(res.extended_val.arg17, msg[13]);
785 }
786
787 /**
788 * Test that a request sent via:
789 * - FFA_MSG_SEND_DIRECT_REQ2 cannot be completed by FFA_MSG_SEND_DIRECT_RESP
790 * - FFA_MSG_SEND_DIRECT_REQ cannot be completed by FFA_MSG_SEND_DIRECT_RESP2
791 */
TEST(direct_message,ffa_direct_msg_check_abi_pairs_nwd_to_sp)792 TEST(direct_message, ffa_direct_msg_check_abi_pairs_nwd_to_sp)
793 {
794 const uint64_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
795 0x88889999};
796 struct mailbox_buffers mb = set_up_mailbox();
797 struct ffa_value res;
798 struct ffa_partition_info *service1_info = service1(mb.recv);
799 struct ffa_partition_info *service2_info = service2(mb.recv);
800 struct ffa_uuid uuid1 = SERVICE1;
801
802 /* Setup Service1 to respond with FFA_MSG_SEND_DIRECT_RESP ABI. */
803 SERVICE_SELECT(service1_info->vm_id, "ffa_direct_msg_req2_resp_failure",
804 mb.send);
805 ffa_run(service1_info->vm_id, 0);
806
807 /* Send a direct request with FFA_MSG_SEND_DIRECT_REQ2. */
808 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service1_info->vm_id,
809 &uuid1, (const uint64_t *)&msg,
810 ARRAY_SIZE(msg));
811
812 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
813
814 /* Set up Service2 to respond with FFA_MSG_SEND_DIRECT_RESP2 ABI. */
815 SERVICE_SELECT(service2_info->vm_id, "ffa_direct_msg_req_resp2_failure",
816 mb.send);
817 ffa_run(service2_info->vm_id, 0);
818
819 /*
820 * Send a direct request with FFA_MSG_SEND_DIRECT_REQ and expect
821 * failure.
822 */
823 res = ffa_msg_send_direct_req(hf_vm_get_id(), service2_info->vm_id,
824 msg[0], msg[1], msg[2], msg[3], msg[4]);
825
826 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
827 }
828
829 /**
830 * Ensure that an SP that enters the waiting state with FFA_MSG_SEND_DIRECT_RESP
831 * can preserve extended registers when resumed by FFA_MSG_SEND_DIRECT_REQ2.
832 *
833 * Run twice to cover the reverse scenario - SP enters waiting state with
834 * FFA_MSG_SEND_DIRECT_RESP2 and is resumed by FFA_MSG_SEND_DIRECT_REQ.
835 */
836 // NOLINTNEXTLINE(readability-function-size)
TEST(direct_message,ffa_direct_message_req2_after_req)837 TEST(direct_message, ffa_direct_message_req2_after_req)
838 {
839 const uint64_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
840 0x88889999, 0x01010101, 0x23232323, 0x45454545,
841 0x67676767, 0x89898989, 0x11001100, 0x22332233,
842 0x44554455, 0x66776677};
843 struct mailbox_buffers mb = set_up_mailbox();
844 struct ffa_value res;
845 struct ffa_partition_info *service1_info = service1(mb.recv);
846 struct ffa_uuid uuid1 = SERVICE1;
847
848 SERVICE_SELECT(service1_info->vm_id,
849 "ffa_direct_msg_resp_ext_registers_preserved", mb.send);
850 ffa_run(service1_info->vm_id, 0);
851
852 for (uint32_t i = 0; i < 2; i++) {
853 /* Send a direct request with FFA_MSG_SEND_DIRECT_REQ. */
854 res = ffa_msg_send_direct_req(hf_vm_get_id(),
855 service1_info->vm_id, msg[0],
856 msg[1], msg[2], msg[3], msg[4]);
857
858 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
859
860 EXPECT_EQ(res.arg3, msg[0]);
861 EXPECT_EQ(res.arg4, msg[1]);
862 EXPECT_EQ(res.arg5, msg[2]);
863 EXPECT_EQ(res.arg6, msg[3]);
864 EXPECT_EQ(res.arg7, msg[4]);
865 EXPECT_EQ(res.extended_val.arg8, 0);
866 EXPECT_EQ(res.extended_val.arg9, 0);
867 EXPECT_EQ(res.extended_val.arg10, 0);
868 EXPECT_EQ(res.extended_val.arg11, 0);
869 EXPECT_EQ(res.extended_val.arg12, 0);
870 EXPECT_EQ(res.extended_val.arg13, 0);
871 EXPECT_EQ(res.extended_val.arg14, 0);
872 EXPECT_EQ(res.extended_val.arg15, 0);
873 EXPECT_EQ(res.extended_val.arg16, 0);
874 EXPECT_EQ(res.extended_val.arg17, 0);
875
876 /* Send a direct request with FFA_MSG_SEND_DIRECT_REQ2. */
877 res = ffa_msg_send_direct_req2(
878 hf_vm_get_id(), service1_info->vm_id, &uuid1,
879 (const uint64_t *)&msg, ARRAY_SIZE(msg));
880
881 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP2_64);
882 EXPECT_EQ(res.arg4, msg[0]);
883 EXPECT_EQ(res.arg5, msg[1]);
884 EXPECT_EQ(res.arg6, msg[2]);
885 EXPECT_EQ(res.arg7, msg[3]);
886 EXPECT_EQ(res.extended_val.arg8, msg[4]);
887 EXPECT_EQ(res.extended_val.arg9, msg[5]);
888 EXPECT_EQ(res.extended_val.arg10, msg[6]);
889 EXPECT_EQ(res.extended_val.arg11, msg[7]);
890 EXPECT_EQ(res.extended_val.arg12, msg[8]);
891 EXPECT_EQ(res.extended_val.arg13, msg[9]);
892 EXPECT_EQ(res.extended_val.arg14, msg[10]);
893 EXPECT_EQ(res.extended_val.arg15, msg[11]);
894 EXPECT_EQ(res.extended_val.arg16, msg[12]);
895 EXPECT_EQ(res.extended_val.arg17, msg[13]);
896 }
897 }
898
899 /**
900 * Test showing that an FF-A v1.1 endpoint (service4) cannot send a direct
901 * request via FFA_MSG_SEND_DIRECT_REQ2.
902 */
TEST_PRECONDITION(direct_message,ffa_msg_send_direct_req2_send_v1_1_not_supported,service1_is_not_vm)903 TEST_PRECONDITION(direct_message,
904 ffa_msg_send_direct_req2_send_v1_1_not_supported,
905 service1_is_not_vm)
906 {
907 struct mailbox_buffers mb = set_up_mailbox();
908 struct ffa_partition_info *service4_info = service4(mb.recv);
909 ffa_id_t own_id = hf_vm_get_id();
910 struct ffa_value ret;
911 const struct ffa_uuid service1_uuid = SERVICE1;
912
913 /*
914 * Service4 requests echo from service1.
915 * Request sent via FFA_MSG_SEND_DIRECT_REQ2 should fail as Service4
916 * FF-A version is < FF-A v1.2.
917 */
918 SERVICE_SELECT(service4_info->vm_id, "version_does_not_support_req2",
919 mb.send);
920
921 /* Send to service4 the uuid of the target for its message. */
922 ret = send_indirect_message(own_id, service4_info->vm_id, mb.send,
923 &service1_uuid, sizeof(service1_uuid), 0);
924 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
925 ffa_run(service4_info->vm_id, 0);
926 }
927
928 /**
929 * Test showing that an FF-A v1.1 endpoint (service3) cannot receive a direct
930 * request via FFA_MSG_SEND_DIRECT_REQ2.
931 *
932 * Also show an FF-A v1.2 endpoint (service4) that does not specify receipt of
933 * direct requsts via FFA_MSG_SEND_DIRECT_REQ2 in its manifest cannot receive a
934 * direct request via this function id.
935 */
TEST_PRECONDITION(direct_message,ffa_msg_send_direct_req2_recv_not_supported,service1_and_service2_are_secure)936 TEST_PRECONDITION(direct_message, ffa_msg_send_direct_req2_recv_not_supported,
937 service1_and_service2_are_secure)
938 {
939 const uint64_t msg[] = {0x00001111, 0x22223333, 0x44445555, 0x66667777,
940 0x88889999, 0x01010101, 0x23232323, 0x45454545,
941 0x67676767, 0x89898989, 0x11001100, 0x22332233,
942 0x44554455, 0x66776677};
943 struct mailbox_buffers mb = set_up_mailbox();
944 struct ffa_partition_info *service3_info = service3(mb.recv);
945 struct ffa_partition_info *service4_info = service4(mb.recv);
946 struct ffa_value res;
947 const struct ffa_uuid service3_uuid = SERVICE3;
948 const struct ffa_uuid service4_uuid = SERVICE4;
949
950 /* Send a direct request with FFA_MSG_SEND_DIRECT_REQ2. */
951 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service3_info->vm_id,
952 &service3_uuid, (const uint64_t *)&msg,
953 ARRAY_SIZE(msg));
954 EXPECT_FFA_ERROR(res, FFA_DENIED);
955
956 SERVICE_SELECT(service4_info->vm_id,
957 "ffa_direct_message_req2_resp_echo", mb.send);
958 ffa_run(service4_info->vm_id, 0);
959
960 /* Send a direct request with FFA_MSG_SEND_DIRECT_REQ2. */
961 res = ffa_msg_send_direct_req2(hf_vm_get_id(), service4_info->vm_id,
962 &service4_uuid, (const uint64_t *)&msg,
963 ARRAY_SIZE(msg));
964 EXPECT_FFA_ERROR(res, FFA_DENIED);
965 }
966
967 /**
968 * Validate that the creation of a cyclic dependency via combined usage of
969 * FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_REQ2 is not possible. The
970 * test only makes sense in the scope of validating the SPMC, as the hypervisor
971 * limits the direct message requests to be only invoked from the primary VM.
972 * Thus, using precondition that checks both involved test services are SPs.
973 */
TEST_PRECONDITION(direct_message,fail_if_cyclic_dependency_req_req2,service1_and_service2_are_secure)974 TEST_PRECONDITION(direct_message, fail_if_cyclic_dependency_req_req2,
975 service1_and_service2_are_secure)
976 {
977 struct mailbox_buffers mb = set_up_mailbox();
978 struct ffa_partition_info *service1_info = service1(mb.recv);
979 struct ffa_partition_info *service2_info = service2(mb.recv);
980 ffa_id_t own_id = hf_vm_get_id();
981 struct ffa_uuid service1_uuid = SERVICE1;
982 struct ffa_value ret;
983
984 /*
985 * Run service2 for it to wait for a request from service1 after
986 * receiving indirect message containing uuid.
987 */
988 SERVICE_SELECT(service2_info->vm_id,
989 "ffa_direct_message_cycle_req_req2_denied", mb.send);
990
991 /* Send to service2 the uuid of service1 for its attempted message. */
992 ret = send_indirect_message(own_id, service2_info->vm_id, mb.send,
993 &service1_uuid, sizeof(service1_uuid), 0);
994 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
995 ffa_run(service2_info->vm_id, 0);
996
997 /* Service1 requests echo from service2. */
998 SERVICE_SELECT(service1_info->vm_id, "ffa_direct_message_echo_services",
999 mb.send);
1000
1001 /* Send to service1 the FF-A id of the target for its message. */
1002 ret = send_indirect_message(own_id, service1_info->vm_id, mb.send,
1003 &service2_info->vm_id,
1004 sizeof(service2_info->vm_id), 0);
1005
1006 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
1007 EXPECT_EQ(ffa_run(service1_info->vm_id, 0).func, FFA_YIELD_32);
1008 }
1009
cpu_entry_echo_mp(uintptr_t arg)1010 static void cpu_entry_echo_mp(uintptr_t arg)
1011 {
1012 struct echo_test_secondary_cpu_entry_args *args =
1013 // NOLINTNEXTLINE(performance-no-int-to-ptr)
1014 (struct echo_test_secondary_cpu_entry_args *)arg;
1015 ffa_vcpu_index_t service_vcpu_id;
1016
1017 ASSERT_TRUE(args != NULL);
1018
1019 HFTEST_LOG("Within secondary core... %u", args->vcpu_id);
1020
1021 service_vcpu_id = (args->receiver_vcpu_count > 1) ? args->vcpu_id : 0;
1022
1023 if (args->req_func == FFA_MSG_SEND_DIRECT_REQ_32) {
1024 SERVICE_SELECT_MP(args->receiver_id,
1025 "ffa_direct_message_resp_echo", args->mb.send,
1026 service_vcpu_id);
1027 ffa_run(args->receiver_id, service_vcpu_id);
1028 echo_test(args->receiver_id);
1029 } else {
1030 SERVICE_SELECT_MP(args->receiver_id,
1031 "ffa_direct_message_req2_resp_echo",
1032 args->mb.send, service_vcpu_id);
1033 ffa_run(args->receiver_id, service_vcpu_id);
1034 echo_test_req2(args->receiver_id, args->receiver_uuid);
1035 }
1036
1037 HFTEST_LOG("Done with secondary core...");
1038
1039 /* Signal to primary core that test is complete.*/
1040 semaphore_signal(&args->sync);
1041
1042 arch_cpu_stop();
1043 }
1044
1045 /**
1046 * Test validating direct messaging via FFA_MSG_SEND_DIRECT_REQ/RESP
1047 * between secondary cores.
1048 */
TEST_PRECONDITION(direct_message,echo_mp,service1_is_not_vm)1049 TEST_PRECONDITION(direct_message, echo_mp, service1_is_not_vm)
1050 {
1051 struct mailbox_buffers mb_mp = set_up_mailbox();
1052 struct ffa_partition_info *service1_info = service1(mb_mp.recv);
1053 const ffa_vcpu_index_t vcpu_id = 1;
1054 struct echo_test_secondary_cpu_entry_args args = {
1055 .req_func = FFA_MSG_SEND_DIRECT_REQ_32,
1056 .receiver_id = service1_info->vm_id,
1057 .receiver_uuid = SERVICE1,
1058 .receiver_vcpu_count = service1_info->vcpu_count,
1059 .vcpu_id = vcpu_id,
1060 .mb = mb_mp};
1061
1062 /*
1063 * Initialize semaphore for synchronization purposes between primary and
1064 * secondary core.
1065 */
1066 semaphore_init(&args.sync);
1067
1068 HFTEST_LOG("Starting secondary core...");
1069
1070 ASSERT_TRUE(hftest_cpu_start(hftest_get_cpu_id(vcpu_id),
1071 hftest_get_secondary_ec_stack(vcpu_id),
1072 cpu_entry_echo_mp, (uintptr_t)&args));
1073
1074 /* Wait for secondary core to return before finishing the test. */
1075 semaphore_wait(&args.sync);
1076
1077 HFTEST_LOG("Finished the test...");
1078 }
1079
1080 /**
1081 * Test validating direct messaging via FFA_MSG_SEND_DIRECT_REQ2/RESP2
1082 * between secondary cores.
1083 */
TEST_PRECONDITION(direct_message,echo_mp_req2,service1_is_not_vm)1084 TEST_PRECONDITION(direct_message, echo_mp_req2, service1_is_not_vm)
1085 {
1086 struct mailbox_buffers mb_mp = set_up_mailbox();
1087 struct ffa_partition_info *service1_info = service1(mb_mp.recv);
1088 const ffa_vcpu_index_t vcpu_id = 1;
1089 struct echo_test_secondary_cpu_entry_args args = {
1090 .req_func = FFA_MSG_SEND_DIRECT_REQ2_64,
1091 .receiver_id = service1_info->vm_id,
1092 .receiver_uuid = SERVICE1,
1093 .receiver_vcpu_count = service1_info->vcpu_count,
1094 .vcpu_id = vcpu_id,
1095 .mb = mb_mp};
1096
1097 /*
1098 * Initialize semaphore for synchronization purposes between primary and
1099 * secondary core.
1100 */
1101 semaphore_init(&args.sync);
1102
1103 HFTEST_LOG("Starting secondary core...");
1104
1105 ASSERT_TRUE(hftest_cpu_start(hftest_get_cpu_id(vcpu_id),
1106 hftest_get_secondary_ec_stack(vcpu_id - 1),
1107 cpu_entry_echo_mp, (uintptr_t)&args));
1108
1109 /* Wait for secondary core to return before finishing the test. */
1110 semaphore_wait(&args.sync);
1111
1112 HFTEST_LOG("Finished the test...");
1113 }
1114
1115 /**
1116 * Helper for sending a VM availability message and asserting on the response.
1117 *
1118 * NOTE: This is intended for hypervisor messages according to the spec, but we
1119 * are using it from the primary VM because it is more convenient and we care
1120 * about testing the SPMC component. Hypervisor implementation was changed to
1121 * forward these requests from the PVM.
1122 */
assert_vm_availability_message(ffa_id_t sender_id,ffa_id_t receiver_id,ffa_id_t vm_id,enum ffa_framework_msg_func framework_func,uint32_t response_ffa_func,ffa_id_t response_sender_id,ffa_id_t response_receiver_id,enum ffa_framework_msg_func response_framework_func,enum ffa_error response_status)1123 void assert_vm_availability_message(
1124 ffa_id_t sender_id, ffa_id_t receiver_id, ffa_id_t vm_id,
1125 enum ffa_framework_msg_func framework_func, uint32_t response_ffa_func,
1126 ffa_id_t response_sender_id, ffa_id_t response_receiver_id,
1127 enum ffa_framework_msg_func response_framework_func,
1128 enum ffa_error response_status)
1129 {
1130 struct ffa_value res;
1131
1132 res = ffa_framework_msg_send_direct_req(sender_id, receiver_id,
1133 framework_func, vm_id);
1134
1135 EXPECT_EQ(res.func, response_ffa_func);
1136
1137 /* sender and receiver endpoint IDs */
1138 EXPECT_EQ(ffa_sender(res), response_receiver_id);
1139 EXPECT_EQ(ffa_receiver(res), response_sender_id);
1140
1141 /* message flags */
1142 EXPECT_EQ(res.arg2, FFA_FRAMEWORK_MSG_BIT | response_framework_func);
1143
1144 /* status code */
1145 EXPECT_EQ((enum ffa_error)res.arg3, response_status);
1146 }
1147
1148 /** Assert that a VM availability message is successful. */
assert_vm_availability_message_success(ffa_id_t sender_id,ffa_id_t receiver_id,ffa_id_t vm_id,enum ffa_framework_msg_func framework_func)1149 void assert_vm_availability_message_success(
1150 ffa_id_t sender_id, ffa_id_t receiver_id, ffa_id_t vm_id,
1151 enum ffa_framework_msg_func framework_func)
1152 {
1153 assert_vm_availability_message(sender_id, receiver_id, vm_id,
1154 framework_func,
1155 FFA_MSG_SEND_DIRECT_RESP_32, sender_id,
1156 receiver_id, framework_func + 1, 0);
1157 }
1158
1159 /**
1160 * Assert that a VM availability message is successfully delivered to the SP,
1161 * but the SP reponds with `FFA_INVALID_PARAMETERS` because of an invalid state
1162 * transition.
1163 */
assert_vm_availability_message_invalid_transition(ffa_id_t sender_id,ffa_id_t receiver_id,ffa_id_t vm_id,enum ffa_framework_msg_func framework_func)1164 void assert_vm_availability_message_invalid_transition(
1165 ffa_id_t sender_id, ffa_id_t receiver_id, ffa_id_t vm_id,
1166 enum ffa_framework_msg_func framework_func)
1167 {
1168 assert_vm_availability_message(
1169 sender_id, receiver_id, vm_id, framework_func,
1170 FFA_MSG_SEND_DIRECT_RESP_32, sender_id, receiver_id,
1171 framework_func + 1, FFA_INVALID_PARAMETERS);
1172 }
1173
1174 /**
1175 * Assert that a VM availability message is not delivered to the SP.
1176 */
assert_vm_availability_message_not_delivered(ffa_id_t sender_id,ffa_id_t receiver_id,ffa_id_t vm_id,enum ffa_framework_msg_func framework_func)1177 void assert_vm_availability_message_not_delivered(
1178 ffa_id_t sender_id, ffa_id_t receiver_id, ffa_id_t vm_id,
1179 enum ffa_framework_msg_func framework_func)
1180 {
1181 assert_vm_availability_message(
1182 sender_id, receiver_id, vm_id, framework_func, FFA_ERROR_32, 0,
1183 0, (enum ffa_framework_msg_func)FFA_INVALID_PARAMETERS, 0);
1184 }
1185
1186 /**
1187 * VM state: Unvailable
1188 * Message: VM created
1189 * New state: Available
1190 */
TEST_PRECONDITION(vm_availability_messaging,vm_unavailable_created,service1_is_secure)1191 TEST_PRECONDITION(vm_availability_messaging, vm_unavailable_created,
1192 service1_is_secure)
1193 {
1194 struct mailbox_buffers mb = set_up_mailbox();
1195 ffa_id_t sender_id = hf_vm_get_id();
1196 ffa_id_t receiver_id = service1(mb.recv)->vm_id;
1197 ffa_id_t vm_id = VM_ID(1);
1198
1199 SERVICE_SELECT(receiver_id, "vm_availability_messaging", mb.send);
1200 ffa_run(receiver_id, 0);
1201
1202 assert_vm_availability_message_success(
1203 sender_id, receiver_id, vm_id,
1204 FFA_FRAMEWORK_MSG_VM_CREATION_REQ);
1205 }
1206
1207 /**
1208 * VM state: Available
1209 * Message: VM created
1210 * New state: Error
1211 */
TEST_PRECONDITION(vm_availability_messaging,vm_available_created,service1_is_secure)1212 TEST_PRECONDITION(vm_availability_messaging, vm_available_created,
1213 service1_is_secure)
1214 {
1215 struct mailbox_buffers mb = set_up_mailbox();
1216 ffa_id_t sender_id = hf_vm_get_id();
1217 ffa_id_t receiver_id = service1(mb.recv)->vm_id;
1218 ffa_id_t vm_id = VM_ID(1);
1219
1220 SERVICE_SELECT(receiver_id, "vm_availability_messaging", mb.send);
1221 ffa_run(receiver_id, 0);
1222
1223 assert_vm_availability_message_success(
1224 sender_id, receiver_id, vm_id,
1225 FFA_FRAMEWORK_MSG_VM_CREATION_REQ);
1226
1227 assert_vm_availability_message_invalid_transition(
1228 sender_id, receiver_id, vm_id,
1229 FFA_FRAMEWORK_MSG_VM_CREATION_REQ);
1230 }
1231
1232 /**
1233 * VM state: Unavailable
1234 * Message: VM destroyed
1235 * New state: Error
1236 */
TEST_PRECONDITION(vm_availability_messaging,vm_unavailable_destroyed,service1_is_secure)1237 TEST_PRECONDITION(vm_availability_messaging, vm_unavailable_destroyed,
1238 service1_is_secure)
1239 {
1240 struct mailbox_buffers mb = set_up_mailbox();
1241 ffa_id_t sender_id = hf_vm_get_id();
1242 ffa_id_t receiver_id = service1(mb.recv)->vm_id;
1243 ffa_id_t vm_id = VM_ID(1);
1244
1245 SERVICE_SELECT(receiver_id, "vm_availability_messaging", mb.send);
1246 ffa_run(receiver_id, 0);
1247
1248 assert_vm_availability_message_invalid_transition(
1249 sender_id, receiver_id, vm_id,
1250 FFA_FRAMEWORK_MSG_VM_DESTRUCTION_REQ);
1251 }
1252
1253 /**
1254 * VM state: Available
1255 * Message: VM destroyed
1256 * New state: Unavailable
1257 */
TEST_PRECONDITION(vm_availability_messaging,vm_available_destroyed,service1_is_secure)1258 TEST_PRECONDITION(vm_availability_messaging, vm_available_destroyed,
1259 service1_is_secure)
1260 {
1261 struct mailbox_buffers mb = set_up_mailbox();
1262 ffa_id_t sender_id = hf_vm_get_id();
1263 ffa_id_t receiver_id = service1(mb.recv)->vm_id;
1264 ffa_id_t vm_id = VM_ID(1);
1265
1266 SERVICE_SELECT(receiver_id, "vm_availability_messaging", mb.send);
1267 ffa_run(receiver_id, 0);
1268
1269 assert_vm_availability_message_success(
1270 sender_id, receiver_id, vm_id,
1271 FFA_FRAMEWORK_MSG_VM_CREATION_REQ);
1272
1273 assert_vm_availability_message_success(
1274 sender_id, receiver_id, vm_id,
1275 FFA_FRAMEWORK_MSG_VM_DESTRUCTION_REQ);
1276 }
1277
1278 /**
1279 * VM state: Error
1280 * Message: VM created
1281 * New state: Error
1282 */
TEST_PRECONDITION(vm_availability_messaging,vm_error_created,service1_is_secure)1283 TEST_PRECONDITION(vm_availability_messaging, vm_error_created,
1284 service1_is_secure)
1285 {
1286 struct mailbox_buffers mb = set_up_mailbox();
1287 ffa_id_t sender_id = hf_vm_get_id();
1288 ffa_id_t receiver_id = service1(mb.recv)->vm_id;
1289 ffa_id_t vm_id = VM_ID(1);
1290
1291 SERVICE_SELECT(receiver_id, "vm_availability_messaging", mb.send);
1292 ffa_run(receiver_id, 0);
1293
1294 assert_vm_availability_message_invalid_transition(
1295 sender_id, receiver_id, vm_id,
1296 FFA_FRAMEWORK_MSG_VM_DESTRUCTION_REQ);
1297
1298 assert_vm_availability_message_invalid_transition(
1299 sender_id, receiver_id, vm_id,
1300 FFA_FRAMEWORK_MSG_VM_CREATION_REQ);
1301 }
1302
1303 /**
1304 * VM state: Error
1305 * Message: VM destroyed
1306 * New state: Error
1307 */
TEST_PRECONDITION(vm_availability_messaging,vm_error_destroyed,service1_is_secure)1308 TEST_PRECONDITION(vm_availability_messaging, vm_error_destroyed,
1309 service1_is_secure)
1310 {
1311 struct mailbox_buffers mb = set_up_mailbox();
1312 ffa_id_t sender_id = hf_vm_get_id();
1313 ffa_id_t receiver_id = service1(mb.recv)->vm_id;
1314 ffa_id_t vm_id = VM_ID(1);
1315
1316 SERVICE_SELECT(receiver_id, "vm_availability_messaging", mb.send);
1317 ffa_run(receiver_id, 0);
1318
1319 assert_vm_availability_message_invalid_transition(
1320 sender_id, receiver_id, vm_id,
1321 FFA_FRAMEWORK_MSG_VM_DESTRUCTION_REQ);
1322
1323 assert_vm_availability_message_invalid_transition(
1324 sender_id, receiver_id, vm_id,
1325 FFA_FRAMEWORK_MSG_VM_DESTRUCTION_REQ);
1326 }
1327
1328 /**
1329 * Multiple SPs can receieve VM availability messages, and each SP has their own
1330 * set of VM states.
1331 */
TEST_PRECONDITION(vm_availability_messaging,multiple_sps,service1_and_service2_are_secure)1332 TEST_PRECONDITION(vm_availability_messaging, multiple_sps,
1333 service1_and_service2_are_secure)
1334 {
1335 struct mailbox_buffers mb = set_up_mailbox();
1336 ffa_id_t sender_id = hf_vm_get_id();
1337 ffa_id_t sp1 = service1(mb.recv)->vm_id;
1338 ffa_id_t sp2 = service2(mb.recv)->vm_id;
1339 ffa_id_t vm_id = VM_ID(1);
1340
1341 SERVICE_SELECT(sp1, "vm_availability_messaging", mb.send);
1342 ffa_run(sp1, 0);
1343
1344 SERVICE_SELECT(sp2, "vm_availability_messaging", mb.send);
1345 ffa_run(sp2, 0);
1346
1347 assert_vm_availability_message_success(
1348 sender_id, sp1, vm_id, FFA_FRAMEWORK_MSG_VM_CREATION_REQ);
1349
1350 assert_vm_availability_message_success(
1351 sender_id, sp2, vm_id, FFA_FRAMEWORK_MSG_VM_CREATION_REQ);
1352 }
1353
1354 /**
1355 * If the SP is not subscribed, any VM availability message should not be
1356 * delivered.
1357 */
TEST_PRECONDITION(vm_availability_messaging,sp_not_subscribed,service1_and_service2_are_secure)1358 TEST_PRECONDITION(vm_availability_messaging, sp_not_subscribed,
1359 service1_and_service2_are_secure)
1360 {
1361 struct mailbox_buffers mb = set_up_mailbox();
1362 ffa_id_t sender_id = hf_vm_get_id();
1363 struct ffa_partition_info *sp3_info = service3(mb.recv);
1364 ffa_id_t receiver_id = sp3_info->vm_id;
1365 ffa_id_t vm_id = VM_ID(1);
1366
1367 EXPECT_EQ(sp3_info->properties & FFA_PARTITION_VM_CREATED, 0);
1368 EXPECT_EQ(sp3_info->properties & FFA_PARTITION_VM_DESTROYED, 0);
1369
1370 SERVICE_SELECT(receiver_id, "vm_availability_messaging", mb.send);
1371 ffa_run(receiver_id, 0);
1372
1373 assert_vm_availability_message_not_delivered(
1374 sender_id, receiver_id, vm_id,
1375 FFA_FRAMEWORK_MSG_VM_CREATION_REQ);
1376
1377 assert_vm_availability_message_not_delivered(
1378 sender_id, receiver_id, vm_id,
1379 FFA_FRAMEWORK_MSG_VM_DESTRUCTION_REQ);
1380 }
1381
1382 /*
1383 * Check that SPs cannot send VM availability messages.
1384 */
TEST_PRECONDITION(vm_availability_messaging,sp_cannot_send_messages,service1_is_secure)1385 TEST_PRECONDITION(vm_availability_messaging, sp_cannot_send_messages,
1386 service1_is_secure)
1387 {
1388 struct mailbox_buffers mb = set_up_mailbox();
1389 ffa_id_t sender_id = hf_vm_get_id();
1390 ffa_id_t receiver_id = service1(mb.recv)->vm_id;
1391 ffa_id_t vm_id = VM_ID(1);
1392
1393 struct ffa_value res;
1394
1395 SERVICE_SELECT(receiver_id, "vm_availability_messaging_send_from_sp",
1396 mb.send);
1397 ffa_run(receiver_id, 0);
1398
1399 res = ffa_msg_send_direct_req(sender_id, receiver_id,
1400 FFA_FRAMEWORK_MSG_VM_CREATION_REQ, vm_id,
1401 0, 0, 0);
1402 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
1403 EXPECT_EQ(res.arg3, FFA_ERROR_32);
1404 EXPECT_EQ((enum ffa_error)res.arg5, FFA_INVALID_PARAMETERS);
1405
1406 res = ffa_msg_send_direct_req(sender_id, receiver_id,
1407 FFA_FRAMEWORK_MSG_VM_DESTRUCTION_REQ,
1408 vm_id, 0, 0, 0);
1409 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
1410 EXPECT_EQ(res.arg3, FFA_ERROR_32);
1411 EXPECT_EQ((enum ffa_error)res.arg5, FFA_INVALID_PARAMETERS);
1412 }
1413
1414 /*
1415 * Check that SPs cannot send non-framework messages in response to a VM
1416 * availability message.
1417 */
TEST_PRECONDITION(vm_availability_messaging,sp_cannot_send_non_framework_messages,service1_is_secure)1418 TEST_PRECONDITION(vm_availability_messaging,
1419 sp_cannot_send_non_framework_messages, service1_is_secure)
1420 {
1421 struct mailbox_buffers mb = set_up_mailbox();
1422 ffa_id_t sender_id = hf_vm_get_id();
1423 ffa_id_t receiver_id = service1(mb.recv)->vm_id;
1424 ffa_id_t vm_id = VM_ID(1);
1425
1426 struct ffa_value res;
1427
1428 SERVICE_SELECT(receiver_id,
1429 "vm_availability_messaging_send_non_framework_from_sp",
1430 mb.send);
1431 ffa_run(receiver_id, 0);
1432
1433 res = ffa_framework_msg_send_direct_req(
1434 sender_id, receiver_id, FFA_FRAMEWORK_MSG_VM_CREATION_REQ,
1435 vm_id);
1436
1437 EXPECT_EQ(res.func, FFA_MSG_SEND_DIRECT_RESP_32);
1438 EXPECT_EQ(res.arg2,
1439 FFA_FRAMEWORK_MSG_BIT | FFA_FRAMEWORK_MSG_VM_CREATION_RESP);
1440 EXPECT_EQ(res.arg3, 0);
1441 }
1442