1 /*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/ffa/notifications.h"
10
11 #include "hf/arch/other_world.h"
12
13 #include "hf/ffa/init.h"
14 #include "hf/ffa_internal.h"
15 #include "hf/std.h"
16 #include "hf/vm.h"
17
18 /**
19 * Check validity of the calls:
20 * FFA_NOTIFICATION_BITMAP_CREATE/FFA_NOTIFICATION_BITMAP_DESTROY.
21 */
ffa_notifications_is_bitmap_access_valid(struct vcpu * current,ffa_id_t vm_id)22 struct ffa_value ffa_notifications_is_bitmap_access_valid(struct vcpu *current,
23 ffa_id_t vm_id)
24 {
25 /*
26 * Call should only be used by the Hypervisor, so any attempt of
27 * invocation from NWd FF-A endpoints should fail.
28 */
29 (void)current;
30 (void)vm_id;
31
32 return ffa_error(FFA_NOT_SUPPORTED);
33 }
34
ffa_notifications_is_bind_valid(struct vcpu * current,ffa_id_t sender_id,ffa_id_t receiver_id)35 bool ffa_notifications_is_bind_valid(struct vcpu *current, ffa_id_t sender_id,
36 ffa_id_t receiver_id)
37 {
38 ffa_id_t current_vm_id = current->vm->id;
39 /** If Hafnium is hypervisor, receiver needs to be current vm. */
40 return sender_id != receiver_id && current_vm_id == receiver_id;
41 }
42
ffa_notifications_update_bindings_forward(ffa_id_t receiver_id,ffa_id_t sender_id,ffa_notification_flags_t flags,ffa_notifications_bitmap_t bitmap,bool is_bind,struct ffa_value * ret)43 bool ffa_notifications_update_bindings_forward(
44 ffa_id_t receiver_id, ffa_id_t sender_id,
45 ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
46 bool is_bind, struct ffa_value *ret)
47 {
48 CHECK(ret != NULL);
49
50 if (vm_id_is_current_world(receiver_id) &&
51 !vm_id_is_current_world(sender_id)) {
52 dlog_verbose(
53 "Forward notifications bind/unbind to other world.\n");
54 *ret = arch_other_world_call((struct ffa_value){
55 .func = is_bind ? FFA_NOTIFICATION_BIND_32
56 : FFA_NOTIFICATION_UNBIND_32,
57 .arg1 = (sender_id << 16) | (receiver_id),
58 .arg2 = is_bind ? flags : 0U,
59 .arg3 = (uint32_t)(bitmap),
60 .arg4 = (uint32_t)(bitmap >> 32),
61 });
62 return true;
63 }
64 return false;
65 }
66
ffa_notifications_is_set_valid(struct vcpu * current,ffa_id_t sender_id,ffa_id_t receiver_id)67 bool ffa_notifications_is_set_valid(struct vcpu *current, ffa_id_t sender_id,
68 ffa_id_t receiver_id)
69 {
70 ffa_id_t current_vm_id = current->vm->id;
71
72 /* If Hafnium is hypervisor, sender needs to be current vm. */
73 return sender_id == current_vm_id && sender_id != receiver_id;
74 }
75
ffa_notifications_set_forward(ffa_id_t sender_vm_id,ffa_id_t receiver_vm_id,uint32_t flags,ffa_notifications_bitmap_t bitmap,struct ffa_value * ret)76 bool ffa_notifications_set_forward(ffa_id_t sender_vm_id,
77 ffa_id_t receiver_vm_id, uint32_t flags,
78 ffa_notifications_bitmap_t bitmap,
79 struct ffa_value *ret)
80 {
81 /* Forward only if receiver is an SP. */
82 if (vm_id_is_current_world(receiver_vm_id)) {
83 return false;
84 }
85
86 dlog_verbose("Forwarding notification set to SPMC.\n");
87
88 *ret = arch_other_world_call((struct ffa_value){
89 .func = FFA_NOTIFICATION_SET_32,
90 .arg1 = (sender_vm_id << 16) | receiver_vm_id,
91 .arg2 = flags & ~FFA_NOTIFICATIONS_FLAG_DELAY_SRI,
92 .arg3 = (uint32_t)(bitmap),
93 .arg4 = (uint32_t)(bitmap >> 32),
94 });
95
96 if (ret->func == FFA_ERROR_32) {
97 dlog_verbose("Failed to set notifications from SPMC.\n");
98 }
99
100 return true;
101 }
102
ffa_notifications_is_get_valid(struct vcpu * current,ffa_id_t receiver_id,ffa_notification_flags_t flags)103 bool ffa_notifications_is_get_valid(struct vcpu *current, ffa_id_t receiver_id,
104 ffa_notification_flags_t flags)
105 {
106 ffa_id_t current_vm_id = current->vm->id;
107
108 (void)flags;
109
110 /* If Hafnium is hypervisor, receiver needs to be current vm. */
111 return (current_vm_id == receiver_id);
112 }
113
ffa_notifications_bitmap_create(ffa_id_t vm_id,ffa_vcpu_count_t vcpu_count)114 struct ffa_value ffa_notifications_bitmap_create(ffa_id_t vm_id,
115 ffa_vcpu_count_t vcpu_count)
116 {
117 (void)vm_id;
118 (void)vcpu_count;
119
120 return ffa_error(FFA_NOT_SUPPORTED);
121 }
122
ffa_notifications_bitmap_destroy(ffa_id_t vm_id)123 struct ffa_value ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
124 {
125 (void)vm_id;
126
127 return ffa_error(FFA_NOT_SUPPORTED);
128 }
129
ffa_notifications_bitmap_create_call(ffa_id_t vm_id,ffa_vcpu_count_t vcpu_count)130 bool ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
131 ffa_vcpu_count_t vcpu_count)
132 {
133 struct ffa_value ret;
134
135 if (ffa_init_is_tee_enabled()) {
136 ret = arch_other_world_call((struct ffa_value){
137 .func = FFA_NOTIFICATION_BITMAP_CREATE_32,
138 .arg1 = vm_id,
139 .arg2 = vcpu_count,
140 });
141
142 if (ret.func == FFA_ERROR_32) {
143 dlog_error(
144 "Failed to create notifications bitmap "
145 "to VM: %#x; error: %#x.\n",
146 vm_id, ffa_error_code(ret));
147 return false;
148 }
149 }
150
151 return true;
152 }
153
ffa_notifications_info_get_forward(uint16_t * ids,uint32_t * ids_count,uint32_t * lists_sizes,uint32_t * lists_count,const uint32_t ids_count_max)154 void ffa_notifications_info_get_forward(uint16_t *ids, uint32_t *ids_count,
155 uint32_t *lists_sizes,
156 uint32_t *lists_count,
157 const uint32_t ids_count_max)
158 {
159 CHECK(ids != NULL);
160 CHECK(ids_count != NULL);
161 CHECK(lists_sizes != NULL);
162 CHECK(lists_count != NULL);
163 CHECK(ids_count_max == FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
164
165 uint32_t local_lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS];
166 struct ffa_value ret;
167
168 dlog_verbose("Forwarding notification info get to SPMC.\n");
169
170 ret = arch_other_world_call((struct ffa_value){
171 .func = FFA_NOTIFICATION_INFO_GET_64,
172 });
173
174 if (ret.func == FFA_ERROR_32) {
175 dlog_verbose("No notifications returned by SPMC.\n");
176 return;
177 }
178
179 *lists_count = ffa_notification_info_get_lists_count(ret);
180
181 if (*lists_count > ids_count_max) {
182 *lists_count = 0;
183 return;
184 }
185
186 /*
187 * The count of ids should be at least the number of lists, to
188 * encompass for at least the ids of the FF-A endpoints. List
189 * sizes will be between 0 and 3, and relates to the counting of
190 * vCPU of the endpoint that have pending notifications.
191 * If `lists_count` is already ids_count_max, each list size
192 * must be 0.
193 */
194 *ids_count = *lists_count;
195
196 for (uint32_t i = 0; i < *lists_count; i++) {
197 local_lists_sizes[i] =
198 ffa_notification_info_get_list_size(ret, i + 1);
199
200 /*
201 * ... sum the counting of each list size that are part
202 * of the main list.
203 */
204 *ids_count += local_lists_sizes[i];
205 }
206
207 /*
208 * Sanity check returned `lists_count` and determined
209 * `ids_count`. If something wrong, reset arguments to 0 such
210 * that hypervisor's handling of FFA_NOTIFICATION_INFO_GET can
211 * proceed without SPMC's values.
212 */
213 if (*ids_count > ids_count_max) {
214 *ids_count = 0;
215 return;
216 }
217
218 /* Copy now lists sizes, as return sizes have been validated. */
219 memcpy_s(lists_sizes, sizeof(lists_sizes[0]) * ids_count_max,
220 local_lists_sizes, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
221
222 /* Unpack the notifications info from the return. */
223 memcpy_s(ids, sizeof(ids[0]) * ids_count_max, &ret.arg3,
224 sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET);
225 }
226
ffa_notifications_get_from_sp(struct vm_locked receiver_locked,ffa_vcpu_index_t vcpu_id,ffa_notifications_bitmap_t * from_sp)227 struct ffa_value ffa_notifications_get_from_sp(
228 struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
229 ffa_notifications_bitmap_t *from_sp)
230 {
231 struct ffa_value ret = {.func = FFA_SUCCESS_32};
232 ffa_id_t receiver_id = receiver_locked.vm->id;
233
234 assert(from_sp != NULL);
235
236 ret = arch_other_world_call((struct ffa_value){
237 .func = FFA_NOTIFICATION_GET_32,
238 .arg1 = (vcpu_id << 16) | receiver_id,
239 .arg2 = FFA_NOTIFICATION_FLAG_BITMAP_SP,
240 });
241
242 if (ret.func == FFA_ERROR_32) {
243 return ret;
244 }
245
246 *from_sp = ffa_notification_get_from_sp(ret);
247
248 return ret;
249 }
250
ffa_notifications_get_framework_notifications(struct vm_locked receiver_locked,ffa_notifications_bitmap_t * from_fwk,ffa_notification_flags_t flags,ffa_vcpu_index_t vcpu_id)251 struct ffa_value ffa_notifications_get_framework_notifications(
252 struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
253 ffa_notification_flags_t flags, ffa_vcpu_index_t vcpu_id)
254 {
255 struct ffa_value ret = {.func = FFA_SUCCESS_32};
256 ffa_id_t receiver_id = receiver_locked.vm->id;
257 ffa_notifications_bitmap_t spm_notifications = 0;
258
259 (void)flags;
260
261 assert(from_fwk != NULL);
262
263 /* Get SPMC notifications. */
264 if (ffa_init_is_tee_enabled()) {
265 ret = arch_other_world_call((struct ffa_value){
266 .func = FFA_NOTIFICATION_GET_32,
267 .arg1 = (vcpu_id << 16) | receiver_id,
268 .arg2 = FFA_NOTIFICATION_FLAG_BITMAP_SPM,
269 });
270
271 if (ffa_func_id(ret) == FFA_ERROR_32) {
272 return ret;
273 }
274
275 spm_notifications = ffa_notification_get_from_framework(ret);
276 }
277
278 /* Merge notifications from SPMC and Hypervisor. */
279 *from_fwk = spm_notifications |
280 vm_notifications_framework_get_pending(receiver_locked);
281
282 return ret;
283 }
284
285 /**
286 * A hypervisor should send the SRI to the Primary Endpoint. Not implemented as
287 * the hypervisor is only interesting for us for the sake of having a test
288 * intrastructure that encompasses the NWd, and we are not interested in testing
289 * the flow of notifications between VMs only.
290 */
ffa_notifications_sri_trigger_if_delayed(struct cpu * cpu)291 void ffa_notifications_sri_trigger_if_delayed(struct cpu *cpu)
292 {
293 (void)cpu;
294 }
295
ffa_notifications_sri_trigger_not_delayed(struct cpu * cpu)296 void ffa_notifications_sri_trigger_not_delayed(struct cpu *cpu)
297 {
298 (void)cpu;
299 }
300
301 /**
302 * Track that in current CPU there was a notification set with delay SRI
303 * flag.
304 */
ffa_notifications_sri_set_delayed(struct cpu * cpu)305 void ffa_notifications_sri_set_delayed(struct cpu *cpu)
306 {
307 (void)cpu;
308 }
309