1 /*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/ffa.h"
10 #include "hf/ffa_v1_0.h"
11
12 #include "ffa_secure_partitions.h"
13 #include "partition_services.h"
14 #include "test/hftest.h"
15 #include "test/vmapi/ffa.h"
16
alignas(PAGE_SIZE)17 alignas(PAGE_SIZE) static uint8_t
18 pages[FRAGMENTED_SHARE_PAGE_COUNT * PAGE_SIZE];
19
20 SET_UP(memory_sharing_v1_2)
21 {
22 EXPECT_EQ(ffa_version(FFA_VERSION_1_2), FFA_VERSION_COMPILED);
23 }
24
SET_UP(memory_sharing_v1_0)25 SET_UP(memory_sharing_v1_0)
26 {
27 EXPECT_EQ(ffa_version(FFA_VERSION_1_0), FFA_VERSION_COMPILED);
28 }
29
30 /** Test sharing memory from a v1.2 VM to a v1.0 SP. */
TEST(memory_sharing_v1_2,share_ffa_v1_2_to_v1_0)31 TEST(memory_sharing_v1_2, share_ffa_v1_2_to_v1_0)
32 {
33 struct ffa_value ret;
34 struct mailbox_buffers mb = set_up_mailbox();
35
36 struct ffa_partition_info *service1_info = service1(mb.recv);
37 const ffa_id_t receiver_id = service1_info->vm_id;
38 const ffa_id_t sender_id = hf_vm_get_id();
39
40 struct ffa_memory_region_constituent constituents[] = {
41 {.address = (uint64_t)pages, .page_count = 1},
42 };
43
44 struct ffa_memory_access receiver_v1_2;
45 struct ffa_memory_access_impdef impdef =
46 ffa_memory_access_impdef_init(receiver_id, receiver_id + 1);
47
48 uint32_t total_length;
49 uint32_t fragment_length;
50 uint32_t remaining_constituent_count;
51 ffa_memory_handle_t handle;
52
53 /* Initialise the memory before giving it. */
54 for (uint32_t i = 0; i < PAGE_SIZE; i++) {
55 pages[i] = i;
56 }
57
58 ffa_memory_access_init(&receiver_v1_2, receiver_id, FFA_DATA_ACCESS_RW,
59 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, 0,
60 &impdef);
61
62 remaining_constituent_count = ffa_memory_region_init(
63 mb.send, HF_MAILBOX_SIZE, sender_id, &receiver_v1_2, 1,
64 sizeof(struct ffa_memory_access), constituents,
65 ARRAY_SIZE(constituents), 0, 0, FFA_MEMORY_NORMAL_MEM,
66 FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
67 &fragment_length, &total_length);
68 EXPECT_EQ(remaining_constituent_count, 0);
69 EXPECT_EQ(fragment_length, total_length);
70
71 ret = ffa_mem_share(total_length, fragment_length);
72 handle = ffa_mem_success_handle(ret);
73 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
74 EXPECT_NE(handle, FFA_MEMORY_HANDLE_INVALID);
75
76 ret = sp_ffa_mem_retrieve_cmd_send(sender_id, receiver_id, handle,
77 FFA_VERSION_1_0);
78 EXPECT_EQ(ret.func, FFA_MSG_SEND_DIRECT_RESP_32);
79 EXPECT_EQ(sp_resp(ret), SP_SUCCESS);
80
81 for (uint32_t i = 0; i < PAGE_SIZE; i++) {
82 uint8_t val = i + 1;
83 ASSERT_EQ(pages[i], val);
84 }
85 }
86
87 /** Test sharing memory from a v1.0 VM to a v1.2 SP. */
TEST(memory_sharing_v1_0,share_ffa_v1_0_to_v1_2)88 TEST(memory_sharing_v1_0, share_ffa_v1_0_to_v1_2)
89 {
90 struct ffa_value ret;
91 struct mailbox_buffers mb = set_up_mailbox();
92
93 struct ffa_partition_info *service2_info = service2(mb.recv);
94 const ffa_id_t receiver_id = service2_info->vm_id;
95 const ffa_id_t sender_id = hf_vm_get_id();
96
97 struct ffa_memory_region_constituent constituents[] = {
98 {.address = (uint64_t)pages, .page_count = 2},
99 {.address = (uint64_t)pages + PAGE_SIZE * 3, .page_count = 1},
100 };
101
102 struct ffa_memory_access_v1_0 receiver_v1_0;
103
104 uint32_t total_length;
105 uint32_t fragment_length;
106 uint32_t remaining_constituent_count;
107 ffa_memory_handle_t handle;
108
109 /* Initialise the memory before giving it. */
110 for (uint32_t i = 0; i < PAGE_SIZE; i++) {
111 pages[i] = i;
112 }
113
114 ffa_memory_access_init_v1_0(&receiver_v1_0, receiver_id,
115 FFA_DATA_ACCESS_RW,
116 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, 0);
117
118 /* Initialize memory sharing test according to v1.0. */
119 remaining_constituent_count = ffa_memory_region_init_v1_0(
120 mb.send, HF_MAILBOX_SIZE, sender_id, &receiver_v1_0, 1,
121 constituents, ARRAY_SIZE(constituents), 0, 0,
122 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
123 FFA_MEMORY_INNER_SHAREABLE, &total_length, &fragment_length);
124
125 EXPECT_EQ(remaining_constituent_count, 0);
126 EXPECT_EQ(total_length, fragment_length);
127
128 ret = ffa_mem_share(total_length, fragment_length);
129 handle = ffa_frag_handle(ret);
130 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
131 EXPECT_NE(handle, FFA_MEMORY_HANDLE_INVALID);
132
133 ret = sp_ffa_mem_retrieve_cmd_send(sender_id, receiver_id, handle,
134 FFA_VERSION_COMPILED);
135 EXPECT_EQ(ret.func, FFA_MSG_SEND_DIRECT_RESP_32);
136 EXPECT_EQ(sp_resp(ret), SP_SUCCESS);
137
138 for (uint32_t i = 0; i < PAGE_SIZE; i++) {
139 uint8_t val = i + 1;
140 ASSERT_EQ(pages[i], val);
141 }
142 }
143
144 /** Test fragmented sharing memory from a v1.0 VM to a v1.0 SP. */
TEST(memory_sharing_v1_0,force_fragmented_ffa_v1_0)145 TEST(memory_sharing_v1_0, force_fragmented_ffa_v1_0)
146 {
147 struct ffa_value ret;
148 struct mailbox_buffers mb = set_up_mailbox();
149
150 struct ffa_partition_info *service1_info = service1(mb.recv);
151 const ffa_id_t receiver_id = service1_info->vm_id;
152 const ffa_id_t sender_id = hf_vm_get_id();
153
154 struct ffa_memory_region_constituent constituents[] = {
155 {.address = (uint64_t)pages, .page_count = 2},
156 {.address = (uint64_t)pages + PAGE_SIZE * 3, .page_count = 1},
157 };
158
159 struct ffa_memory_access_v1_0 receiver_v1_0;
160
161 uint32_t total_length;
162 uint32_t fragment_length;
163 uint32_t remaining_constituent_count;
164 ffa_memory_handle_t handle;
165 uint64_t allocator_mask;
166
167 /* Initialise the memory before giving it. */
168 for (uint32_t i = 0; i < PAGE_SIZE; i++) {
169 pages[i] = i;
170 }
171
172 ffa_memory_access_init_v1_0(&receiver_v1_0, receiver_id,
173 FFA_DATA_ACCESS_RW,
174 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, 0);
175
176 /* Initialize memory sharing test according to v1.0. */
177 remaining_constituent_count = ffa_memory_region_init_v1_0(
178 mb.send, HF_MAILBOX_SIZE, sender_id, &receiver_v1_0, 1,
179 constituents, ARRAY_SIZE(constituents), 0, 0,
180 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
181 FFA_MEMORY_INNER_SHAREABLE, &total_length, &fragment_length);
182
183 EXPECT_EQ(remaining_constituent_count, 0);
184 EXPECT_EQ(total_length, fragment_length);
185
186 /* Don't include the last constituent in the first fragment. */
187 fragment_length -= sizeof(struct ffa_memory_region_constituent);
188 remaining_constituent_count = 1;
189
190 ret = ffa_mem_share(total_length, fragment_length);
191 handle = ffa_frag_handle(ret);
192 EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
193 EXPECT_NE(handle, FFA_MEMORY_HANDLE_INVALID);
194
195 ASSERT_TRUE(!ffa_is_vm_id(sender_id) ||
196 !ffa_is_vm_id(service1_info->vm_id));
197 allocator_mask = FFA_MEMORY_HANDLE_ALLOCATOR_SPMC;
198
199 send_fragmented_memory_region(
200 &ret, mb.send, constituents, ARRAY_SIZE(constituents),
201 remaining_constituent_count, fragment_length, total_length,
202 &handle, allocator_mask);
203
204 ret = sp_ffa_mem_retrieve_cmd_send(sender_id, receiver_id, handle,
205 FFA_VERSION_1_0);
206 EXPECT_EQ(ret.func, FFA_MSG_SEND_DIRECT_RESP_32);
207 EXPECT_EQ(sp_resp(ret), SP_SUCCESS);
208
209 for (uint32_t i = 0; i < PAGE_SIZE; i++) {
210 uint8_t val = i + 1;
211 ASSERT_EQ(pages[i], val);
212 }
213 }
214