1 /*
2  * Copyright 2018 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include <stdalign.h>
10 #include <stdint.h>
11 
12 #include "hf/ffa_v1_0.h"
13 #include "hf/mm.h"
14 #include "hf/static_assert.h"
15 #include "hf/std.h"
16 
17 #include "vmapi/hf/call.h"
18 
19 #include "primary_with_secondary.h"
20 #include "test/hftest.h"
21 #include "test/vmapi/ffa.h"
22 
23 static alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
24 static alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
25 static_assert(sizeof(send_page) == PAGE_SIZE, "Send page is not a page.");
26 static_assert(sizeof(recv_page) == PAGE_SIZE, "Recv page is not a page.");
27 
28 static hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
29 static hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
30 
31 /**
32  * Confirms the primary VM has the primary ID.
33  */
TEST(hf_vm_get_id,primary_has_primary_id)34 TEST(hf_vm_get_id, primary_has_primary_id)
35 {
36 	EXPECT_EQ(hf_vm_get_id(), HF_PRIMARY_VM_ID);
37 }
38 
TEAR_DOWN(ffa_partition_info_get)39 TEAR_DOWN(ffa_partition_info_get)
40 {
41 	EXPECT_FFA_ERROR(ffa_rx_release(), FFA_DENIED);
42 }
43 
44 /**
45  * Confirm there are 3 secondary VMs as well as this primary VM, and that they
46  * have the expected number of vCPUs.
47  */
TEST(ffa_partition_info_get,three_secondary_vms)48 TEST(ffa_partition_info_get, three_secondary_vms)
49 {
50 	struct mailbox_buffers mb;
51 	struct ffa_value ret;
52 	const struct ffa_partition_info *partitions;
53 	struct ffa_uuid uuid;
54 
55 	/* A Null UUID requests information for all partitions. */
56 	ffa_uuid_init(0, 0, 0, 0, &uuid);
57 
58 	/* Try to get partition information before the RX buffer is setup. */
59 	ret = ffa_partition_info_get(&uuid, 0);
60 	EXPECT_FFA_ERROR(ret, FFA_BUSY);
61 
62 	/* Only getting the partition count should succeed however. */
63 	ret = ffa_partition_info_get(&uuid, FFA_PARTITION_COUNT_FLAG);
64 	EXPECT_EQ(ret.func, FFA_SUCCESS_32);
65 	EXPECT_EQ(ret.arg2, 4);
66 
67 	/* Setup the mailbox (which holds the RX buffer). */
68 	mb = set_up_mailbox();
69 	partitions = mb.recv;
70 
71 	/* Check that the expected partition information is returned. */
72 	ret = ffa_partition_info_get(&uuid, 0);
73 	EXPECT_EQ(ret.func, FFA_SUCCESS_32);
74 	/* Confirm there are 3 FF-A partitions, one with 2 UUIDs. */
75 	EXPECT_EQ(ret.arg2, 4);
76 
77 	/* The first two secondary VMs should have 1 vCPU, the other one 2. */
78 	EXPECT_EQ(partitions[0].vcpu_count, 8);
79 	EXPECT_EQ(partitions[1].vcpu_count, 8);
80 	EXPECT_EQ(partitions[2].vcpu_count, 8);
81 
82 	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
83 }
84 
85 /**
86  * Confirm that it is an error to get partition info for a nonexistent VM.
87  */
TEST(ffa_partition_info_get,invalid_vm_uuid)88 TEST(ffa_partition_info_get, invalid_vm_uuid)
89 {
90 	struct ffa_value ret;
91 	struct ffa_uuid uuid;
92 
93 	/* Try to get partition information for an unrecognized UUID. */
94 	ffa_uuid_init(0, 0, 0, 1, &uuid);
95 
96 	ret = ffa_partition_info_get(&uuid, 0);
97 	EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
98 }
99 
TEST(ffa_partition_info_get,get_v1_0_descriptor)100 TEST(ffa_partition_info_get, get_v1_0_descriptor)
101 {
102 	struct mailbox_buffers mb;
103 	struct ffa_value ret;
104 	const struct ffa_partition_info_v1_0 *partitions;
105 	struct ffa_uuid uuid;
106 
107 	/* Set ffa_version to v1.0. */
108 	EXPECT_EQ(ffa_version(FFA_VERSION_1_0), FFA_VERSION_COMPILED);
109 
110 	/* A Null UUID requests information for all partitions. */
111 	ffa_uuid_init(0, 0, 0, 0, &uuid);
112 
113 	/* Try to get partition information before the RX buffer is setup. */
114 	ret = ffa_partition_info_get(&uuid, 0);
115 	EXPECT_FFA_ERROR(ret, FFA_BUSY);
116 
117 	/* Only getting the partition count should succeed however. */
118 	ret = ffa_partition_info_get(&uuid, FFA_PARTITION_COUNT_FLAG);
119 	EXPECT_EQ(ret.func, FFA_SUCCESS_32);
120 
121 	/* Setup the mailbox (which holds the RX buffer). */
122 	mb = set_up_mailbox();
123 	partitions = mb.recv;
124 
125 	/* Check that the expected partition information is returned. */
126 	ret = ffa_partition_info_get(&uuid, 0);
127 	EXPECT_EQ(ret.func, FFA_SUCCESS_32);
128 
129 	/* Confirm there are 3 secondary VMs, one with 2 UUIDs. */
130 	EXPECT_EQ(ret.arg2, 4);
131 
132 	EXPECT_EQ(partitions[0].vcpu_count, 8);
133 	EXPECT_EQ(partitions[1].vcpu_count, 8);
134 	EXPECT_EQ(partitions[2].vcpu_count, 8);
135 
136 	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
137 }
138 
139 /**
140  * The primary can't be run by the hypervisor.
141  */
TEST(ffa_run,cannot_run_primary)142 TEST(ffa_run, cannot_run_primary)
143 {
144 	struct ffa_value res = ffa_run(HF_PRIMARY_VM_ID, 0);
145 	EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
146 }
147 
148 /**
149  * Can only run a VM that exists.
150  */
TEST(ffa_run,cannot_run_absent_secondary)151 TEST(ffa_run, cannot_run_absent_secondary)
152 {
153 	struct ffa_value res = ffa_run(1234, 0);
154 	EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
155 }
156 
157 /**
158  * Can only run a vCPU that exists.
159  */
TEST(ffa_run,cannot_run_absent_vcpu)160 TEST(ffa_run, cannot_run_absent_vcpu)
161 {
162 	struct ffa_value res = ffa_run(SERVICE_VM1, 1234);
163 	EXPECT_FFA_ERROR(res, FFA_INVALID_PARAMETERS);
164 }
165 
TEAR_DOWN(ffa_rxtx_map)166 TEAR_DOWN(ffa_rxtx_map)
167 {
168 	EXPECT_FFA_ERROR(ffa_rx_release(), FFA_DENIED);
169 }
170 
171 /**
172  * The configured send/receive addresses can't be device memory.
173  */
TEST(ffa_rxtx_map,fails_with_device_memory)174 TEST(ffa_rxtx_map, fails_with_device_memory)
175 {
176 	EXPECT_FFA_ERROR(ffa_rxtx_map(PAGE_SIZE, PAGE_SIZE * 2),
177 			 FFA_INVALID_PARAMETERS);
178 }
179 
180 /**
181  * The configured send/receive addresses can't be unaligned.
182  */
TEST(ffa_rxtx_map,fails_with_unaligned_pointer)183 TEST(ffa_rxtx_map, fails_with_unaligned_pointer)
184 {
185 	uint8_t maybe_aligned[2];
186 	hf_ipaddr_t unaligned_addr = (hf_ipaddr_t)&maybe_aligned[1];
187 	hf_ipaddr_t aligned_addr = (hf_ipaddr_t)send_page;
188 
189 	/* Check that the address is unaligned. */
190 	ASSERT_EQ(unaligned_addr & 1, 1);
191 
192 	EXPECT_FFA_ERROR(ffa_rxtx_map(aligned_addr, unaligned_addr),
193 			 FFA_INVALID_PARAMETERS);
194 	EXPECT_FFA_ERROR(ffa_rxtx_map(unaligned_addr, aligned_addr),
195 			 FFA_INVALID_PARAMETERS);
196 	EXPECT_FFA_ERROR(ffa_rxtx_map(unaligned_addr, unaligned_addr),
197 			 FFA_INVALID_PARAMETERS);
198 }
199 
200 /**
201  * The configured send/receive addresses can't be the same page.
202  */
TEST(ffa_rxtx_map,fails_with_same_page)203 TEST(ffa_rxtx_map, fails_with_same_page)
204 {
205 	EXPECT_FFA_ERROR(ffa_rxtx_map(send_page_addr, send_page_addr),
206 			 FFA_INVALID_PARAMETERS);
207 	EXPECT_FFA_ERROR(ffa_rxtx_map(recv_page_addr, recv_page_addr),
208 			 FFA_INVALID_PARAMETERS);
209 }
210 
211 /**
212  * The configuration of the send/receive addresses can only happen once.
213  */
TEST(ffa_rxtx_map,fails_if_already_succeeded)214 TEST(ffa_rxtx_map, fails_if_already_succeeded)
215 {
216 	EXPECT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
217 		  FFA_SUCCESS_32);
218 	EXPECT_FFA_ERROR(ffa_rxtx_map(send_page_addr, recv_page_addr),
219 			 FFA_DENIED);
220 }
221 
222 /**
223  * The configuration of the send/receive address is successful with valid
224  * arguments.
225  */
TEST(ffa_rxtx_map,succeeds)226 TEST(ffa_rxtx_map, succeeds)
227 {
228 	EXPECT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
229 		  FFA_SUCCESS_32);
230 }
231 
232 /**
233  * The primary receives messages from ffa_run().
234  */
TEST(hf_mailbox_receive,cannot_receive_from_primary_blocking)235 TEST(hf_mailbox_receive, cannot_receive_from_primary_blocking)
236 {
237 	struct ffa_value res = ffa_msg_wait();
238 	EXPECT_NE(res.func, FFA_SUCCESS_32);
239 }
240 
241 /**
242  * The primary receives messages from ffa_run().
243  */
TEST(hf_mailbox_receive,cannot_receive_from_primary_non_blocking)244 TEST(hf_mailbox_receive, cannot_receive_from_primary_non_blocking)
245 {
246 	struct ffa_value res = ffa_msg_poll();
247 	EXPECT_NE(res.func, FFA_SUCCESS_32);
248 }
249 
250 /**
251  * The buffer pair can be successfully unmapped from a VM that has
252  * just created the mapping.
253  */
TEST(ffa_rxtx_unmap,succeeds)254 TEST(ffa_rxtx_unmap, succeeds)
255 {
256 	EXPECT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
257 		  FFA_SUCCESS_32);
258 	EXPECT_EQ(ffa_rxtx_unmap().func, FFA_SUCCESS_32);
259 }
260 
261 /**
262  * Unmap will fail if no mapping exists for the VM.
263  */
TEST(ffa_rxtx_unmap,fails_if_no_mapping)264 TEST(ffa_rxtx_unmap, fails_if_no_mapping)
265 {
266 	EXPECT_FFA_ERROR(ffa_rxtx_unmap(), FFA_INVALID_PARAMETERS);
267 }
268 
269 /**
270  * A buffer pair cannot be unmapped multiple times.
271  */
TEST(ffa_rxtx_unmap,fails_if_already_unmapped)272 TEST(ffa_rxtx_unmap, fails_if_already_unmapped)
273 {
274 	EXPECT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
275 		  FFA_SUCCESS_32);
276 	EXPECT_EQ(ffa_rxtx_unmap().func, FFA_SUCCESS_32);
277 	EXPECT_FFA_ERROR(ffa_rxtx_unmap(), FFA_INVALID_PARAMETERS);
278 }
279 
280 /**
281  * Test we can remap a region after it has been unmapped.
282  */
TEST(ffa_rxtx_unmap,succeeds_in_remapping_region)283 TEST(ffa_rxtx_unmap, succeeds_in_remapping_region)
284 {
285 	EXPECT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
286 		  FFA_SUCCESS_32);
287 	EXPECT_EQ(ffa_rxtx_unmap().func, FFA_SUCCESS_32);
288 	EXPECT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
289 		  FFA_SUCCESS_32);
290 }
291 
292 /**
293  * The `allocator_id` must be 0 at virtual instances.
294  */
TEST(ffa_rxtx_unmap,validate_allocator_id)295 TEST(ffa_rxtx_unmap, validate_allocator_id)
296 {
297 	struct ffa_value ret;
298 
299 	EXPECT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
300 		  FFA_SUCCESS_32);
301 
302 	/* Set the `allocator_id`, which MBZ at virtual instances. */
303 	ret = ffa_call(
304 		(struct ffa_value){.func = FFA_RXTX_UNMAP_32,
305 				   .arg1 = 1ULL << FFA_RXTX_ALLOCATOR_SHIFT});
306 	EXPECT_FFA_ERROR(ret, FFA_INVALID_PARAMETERS);
307 
308 	EXPECT_EQ(ffa_rxtx_unmap().func, FFA_SUCCESS_32);
309 }
310