1 /*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/ffa/notifications.h"
10
11 #include <stdint.h>
12
13 #include "hf/check.h"
14 #include "hf/cpu.h"
15 #include "hf/ffa.h"
16 #include "hf/ffa/direct_messaging.h"
17 #include "hf/ffa/vm.h"
18 #include "hf/ffa_internal.h"
19 #include "hf/plat/interrupts.h"
20 #include "hf/types.h"
21 #include "hf/vm.h"
22
23 #include "./vm.h"
24
25 /** Interrupt priority for the Schedule Receiver Interrupt. */
26 #define SRI_PRIORITY UINT32_C(0xf0)
27
ffa_notifications_is_bitmap_access_valid(struct vcpu * current,ffa_id_t vm_id)28 struct ffa_value ffa_notifications_is_bitmap_access_valid(struct vcpu *current,
29 ffa_id_t vm_id)
30 {
31 /**
32 * Create/Destroy interfaces to be called by the hypervisor, into the
33 * SPMC.
34 */
35 if (current->vm->id != HF_HYPERVISOR_VM_ID) {
36 return ffa_error(FFA_NOT_SUPPORTED);
37 }
38
39 /* ID provided must be a valid VM ID. */
40 if (!ffa_is_vm_id(vm_id)) {
41 return ffa_error(FFA_INVALID_PARAMETERS);
42 }
43
44 return (struct ffa_value){
45 .func = FFA_SUCCESS_32,
46 };
47 }
48
49 /**
50 * - A bind call cannot be from an SPMD logical partition or target an
51 * SPMD logical partition.
52 * - If bind call from SP, receiver's ID must be same as current VM ID.
53 * - If bind call from NWd, current VM ID must be same as Hypervisor ID,
54 * receiver's ID must be from NWd, and sender's ID from SWd.
55 */
ffa_notifications_is_bind_valid(struct vcpu * current,ffa_id_t sender_id,ffa_id_t receiver_id)56 bool ffa_notifications_is_bind_valid(struct vcpu *current, ffa_id_t sender_id,
57 ffa_id_t receiver_id)
58 {
59 ffa_id_t current_vm_id = current->vm->id;
60
61 if (ffa_direct_msg_is_spmd_lp_id(sender_id) ||
62 ffa_direct_msg_is_spmd_lp_id(receiver_id)) {
63 dlog_verbose(
64 "Notification bind: not permitted for logical SPs (%x "
65 "%x).\n",
66 sender_id, receiver_id);
67 return false;
68 }
69
70 if (sender_id == receiver_id) {
71 dlog_verbose(
72 "Notification set: sender can't target itself. (%x == "
73 "%x)\n",
74 sender_id, receiver_id);
75 return false;
76 }
77
78 /* Caller is an SP. */
79 if (vm_id_is_current_world(current_vm_id)) {
80 if (receiver_id != current_vm_id) {
81 dlog_verbose(
82 "Notification bind: caller (%x) must be the "
83 "receiver(%x).\n",
84 current_vm_id, receiver_id);
85 return false;
86 }
87 } else {
88 assert(current_vm_id == HF_HYPERVISOR_VM_ID);
89
90 if (!vm_id_is_current_world(sender_id) ||
91 vm_id_is_current_world(receiver_id)) {
92 dlog_verbose(
93 "Notification bind: VM must specify itself as "
94 "receiver (%x), and SP as sender(%x).\n",
95 receiver_id, sender_id);
96 return false;
97 }
98 }
99
100 return true;
101 }
102
ffa_notifications_update_bindings_forward(ffa_id_t receiver_id,ffa_id_t sender_id,ffa_notification_flags_t flags,ffa_notifications_bitmap_t bitmap,bool is_bind,struct ffa_value * ret)103 bool ffa_notifications_update_bindings_forward(
104 ffa_id_t receiver_id, ffa_id_t sender_id,
105 ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
106 bool is_bind, struct ffa_value *ret)
107 {
108 (void)ret;
109 (void)receiver_id;
110 (void)sender_id;
111 (void)flags;
112 (void)bitmap;
113 (void)is_bind;
114 (void)ret;
115
116 return false;
117 }
118
119 /*
120 * - A set call cannot be from an SPMD logical partition or target an
121 * SPMD logical partition.
122 * - If set call from SP, sender's ID must be the same as current.
123 * - If set call from NWd, current VM ID must be same as Hypervisor ID,
124 * and receiver must be an SP.
125 */
ffa_notifications_is_set_valid(struct vcpu * current,ffa_id_t sender_id,ffa_id_t receiver_id)126 bool ffa_notifications_is_set_valid(struct vcpu *current, ffa_id_t sender_id,
127 ffa_id_t receiver_id)
128 {
129 ffa_id_t current_vm_id = current->vm->id;
130
131 if (ffa_direct_msg_is_spmd_lp_id(sender_id) ||
132 ffa_direct_msg_is_spmd_lp_id(receiver_id)) {
133 dlog_verbose(
134 "Notification set: not permitted for logical SPs (%x "
135 "%x).\n",
136 sender_id, receiver_id);
137 return false;
138 }
139
140 if (sender_id == receiver_id) {
141 dlog_verbose(
142 "Notification set: sender can't target itself. (%x == "
143 "%x)\n",
144 sender_id, receiver_id);
145 return false;
146 }
147
148 if (vm_id_is_current_world(current_vm_id)) {
149 if (sender_id != current_vm_id) {
150 dlog_verbose(
151 "Notification set: caller (%x) must be the "
152 "sender(%x).\n",
153 current_vm_id, sender_id);
154 return false;
155 }
156 } else {
157 assert(current_vm_id == HF_HYPERVISOR_VM_ID);
158
159 if (vm_id_is_current_world(sender_id) ||
160 !vm_id_is_current_world(receiver_id)) {
161 dlog_verbose(
162 "Notification set: sender (%x) must be a VM "
163 "and receiver (%x) an SP.\n",
164 sender_id, receiver_id);
165 return false;
166 }
167 }
168
169 return true;
170 }
171
ffa_notifications_set_forward(ffa_id_t sender_vm_id,ffa_id_t receiver_vm_id,ffa_notification_flags_t flags,ffa_notifications_bitmap_t bitmap,struct ffa_value * ret)172 bool ffa_notifications_set_forward(ffa_id_t sender_vm_id,
173 ffa_id_t receiver_vm_id,
174 ffa_notification_flags_t flags,
175 ffa_notifications_bitmap_t bitmap,
176 struct ffa_value *ret)
177 {
178 (void)sender_vm_id;
179 (void)receiver_vm_id;
180 (void)flags;
181 (void)bitmap;
182 (void)ret;
183
184 return false;
185 }
186
ffa_notifications_is_get_valid(struct vcpu * current,ffa_id_t receiver_id,ffa_notification_flags_t flags)187 bool ffa_notifications_is_get_valid(struct vcpu *current, ffa_id_t receiver_id,
188 ffa_notification_flags_t flags)
189 {
190 ffa_id_t current_vm_id = current->vm->id;
191 /*
192 * SPMC:
193 * - A get call cannot be targeted to an SPMD logical partition.
194 * - An SP can ask for its notifications, or the hypervisor can get
195 * notifications target to a VM.
196 */
197 bool caller_and_receiver_valid =
198 (!ffa_direct_msg_is_spmd_lp_id(receiver_id) &&
199 (current_vm_id == receiver_id)) ||
200 (current_vm_id == HF_HYPERVISOR_VM_ID &&
201 !vm_id_is_current_world(receiver_id));
202
203 /*
204 * Flags field is not valid if NWd endpoint requests notifications from
205 * VMs or Hypervisor. Those are managed by the hypervisor if present.
206 */
207 bool flags_valid =
208 !(ffa_is_vm_id(receiver_id) &&
209 ((flags & FFA_NOTIFICATION_FLAG_BITMAP_VM) != 0U ||
210 (flags & FFA_NOTIFICATION_FLAG_BITMAP_HYP) != 0U));
211
212 return caller_and_receiver_valid && flags_valid;
213 }
214
ffa_notifications_info_get_forward(uint16_t * ids,uint32_t * ids_count,uint32_t * lists_sizes,uint32_t * lists_count,const uint32_t ids_count_max)215 void ffa_notifications_info_get_forward( // NOLINTNEXTLINE
216 uint16_t *ids, uint32_t *ids_count, // NOLINTNEXTLINE
217 uint32_t *lists_sizes, uint32_t *lists_count,
218 const uint32_t ids_count_max)
219 {
220 (void)ids;
221 (void)ids_count;
222 (void)lists_sizes;
223 (void)lists_count;
224 (void)ids_count_max;
225 }
226
ffa_notifications_bitmap_create(ffa_id_t vm_id,ffa_vcpu_count_t vcpu_count)227 struct ffa_value ffa_notifications_bitmap_create(ffa_id_t vm_id,
228 ffa_vcpu_count_t vcpu_count)
229 {
230 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
231 struct vm_locked vm_locked;
232
233 if (vm_id == HF_OTHER_WORLD_ID) {
234 /*
235 * If the provided VM ID regards to the Hypervisor, represented
236 * by the other world VM with ID HF_OTHER_WORLD_ID, check if the
237 * notifications have been enabled.
238 */
239
240 vm_locked = vm_find_locked(vm_id);
241
242 CHECK(vm_locked.vm != NULL);
243
244 /* Call has been used for the other world vm already */
245 if (vm_locked.vm->notifications.enabled) {
246 dlog_verbose("Notification bitmap already created.\n");
247 ret = ffa_error(FFA_DENIED);
248 goto out;
249 }
250
251 /* Enable notifications for `other_world_vm`. */
252 vm_locked.vm->notifications.enabled = true;
253 } else {
254 /* Else should regard with NWd VM ID. */
255 vm_locked = ffa_vm_nwd_create(vm_id);
256
257 /* If received NULL, there are no slots for VM creation. */
258 if (vm_locked.vm == NULL) {
259 dlog_verbose("No memory to create VM ID %#x.\n", vm_id);
260 return ffa_error(FFA_NO_MEMORY);
261 }
262
263 /* Ensure bitmap has not already been created. */
264 if (vm_locked.vm->notifications.enabled) {
265 dlog_verbose("Notification bitmap already created.\n");
266 ret = ffa_error(FFA_DENIED);
267 goto out;
268 }
269
270 vm_locked.vm->notifications.enabled = true;
271 vm_locked.vm->vcpu_count = vcpu_count;
272 }
273
274 out:
275 vm_unlock(&vm_locked);
276
277 return ret;
278 }
279
ffa_notifications_bitmap_create_call(ffa_id_t vm_id,ffa_vcpu_count_t vcpu_count)280 bool ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
281 ffa_vcpu_count_t vcpu_count)
282 {
283 (void)vm_id;
284 (void)vcpu_count;
285
286 return true;
287 }
288
ffa_notifications_bitmap_destroy(ffa_id_t vm_id)289 struct ffa_value ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
290 {
291 struct ffa_value ret = {.func = FFA_SUCCESS_32};
292 struct vm_locked to_destroy_locked = ffa_vm_find_locked(vm_id);
293
294 if (to_destroy_locked.vm == NULL) {
295 dlog_verbose("Bitmap not created for VM: %u\n", vm_id);
296 return ffa_error(FFA_DENIED);
297 }
298
299 if (!to_destroy_locked.vm->notifications.enabled) {
300 dlog_verbose("Notification disabled for VM: %u\n", vm_id);
301 ret = ffa_error(FFA_DENIED);
302 goto out;
303 }
304
305 /* Check if there is any notification pending. */
306 if (vm_are_notifications_pending(to_destroy_locked, false, ~0x0U)) {
307 dlog_verbose("VM has notifications pending.\n");
308 ret = ffa_error(FFA_DENIED);
309 goto out;
310 }
311
312 to_destroy_locked.vm->notifications.enabled = false;
313 vm_notifications_init(to_destroy_locked.vm,
314 to_destroy_locked.vm->vcpu_count, NULL);
315 if (vm_id != HF_OTHER_WORLD_ID) {
316 ffa_vm_destroy(to_destroy_locked);
317 }
318
319 out:
320 vm_unlock(&to_destroy_locked);
321
322 return ret;
323 }
324
ffa_notifications_get_from_sp(struct vm_locked receiver_locked,ffa_vcpu_index_t vcpu_id,ffa_notifications_bitmap_t * from_sp)325 struct ffa_value ffa_notifications_get_from_sp(
326 struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
327 ffa_notifications_bitmap_t *from_sp)
328 {
329 *from_sp = vm_notifications_partition_get_pending(receiver_locked,
330 false, vcpu_id);
331
332 return (struct ffa_value){.func = FFA_SUCCESS_32};
333 }
334
ffa_notifications_get_framework_notifications(struct vm_locked receiver_locked,ffa_notifications_bitmap_t * from_fwk,ffa_notification_flags_t flags,ffa_vcpu_index_t vcpu_id)335 struct ffa_value ffa_notifications_get_framework_notifications(
336 struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
337 ffa_notification_flags_t flags, ffa_vcpu_index_t vcpu_id)
338 {
339 assert(from_fwk != NULL);
340
341 (void)vcpu_id;
342
343 if (!vm_id_is_current_world(receiver_locked.vm->id) &&
344 (flags & FFA_NOTIFICATION_FLAG_BITMAP_HYP) != 0U) {
345 dlog_error(
346 "Notification get flag from hypervisor in call to SPMC "
347 "MBZ.\n");
348 return ffa_error(FFA_INVALID_PARAMETERS);
349 }
350
351 *from_fwk = vm_notifications_framework_get_pending(receiver_locked);
352
353 return (struct ffa_value){.func = FFA_SUCCESS_32};
354 }
355
ffa_notifications_send_schedule_receiver_interrupt(struct cpu * cpu)356 static void ffa_notifications_send_schedule_receiver_interrupt(struct cpu *cpu)
357 {
358 dlog_verbose("Setting Schedule Receiver SGI %u on core: %zu\n",
359 HF_SCHEDULE_RECEIVER_INTID, cpu_index(cpu));
360
361 plat_interrupts_send_sgi(HF_SCHEDULE_RECEIVER_INTID, cpu, false);
362 }
363
ffa_notifications_sri_set_delayed_internal(struct cpu * cpu,bool delayed)364 static void ffa_notifications_sri_set_delayed_internal(struct cpu *cpu,
365 bool delayed)
366 {
367 assert(cpu != NULL);
368 cpu->is_sri_delayed = delayed;
369 }
370
ffa_notifications_sri_set_delayed(struct cpu * cpu)371 void ffa_notifications_sri_set_delayed(struct cpu *cpu)
372 {
373 ffa_notifications_sri_set_delayed_internal(cpu, true);
374 }
375
ffa_notifications_is_sri_delayed(struct cpu * cpu)376 static bool ffa_notifications_is_sri_delayed(struct cpu *cpu)
377 {
378 assert(cpu != NULL);
379 return cpu->is_sri_delayed;
380 }
381
ffa_notifications_sri_trigger_if_delayed(struct cpu * cpu)382 void ffa_notifications_sri_trigger_if_delayed(struct cpu *cpu)
383 {
384 assert(cpu != NULL);
385
386 if (ffa_notifications_is_sri_delayed(cpu)) {
387 ffa_notifications_send_schedule_receiver_interrupt(cpu);
388 ffa_notifications_sri_set_delayed_internal(cpu, false);
389 }
390 }
391
ffa_notifications_sri_trigger_not_delayed(struct cpu * cpu)392 void ffa_notifications_sri_trigger_not_delayed(struct cpu *cpu)
393 {
394 /*
395 * If flag to delay SRI isn't set, trigger SRI such that the
396 * receiver scheduler is aware there are pending notifications.
397 */
398 ffa_notifications_send_schedule_receiver_interrupt(cpu);
399 ffa_notifications_sri_set_delayed_internal(cpu, false);
400 }
401
ffa_notifications_sri_init(struct cpu * cpu)402 void ffa_notifications_sri_init(struct cpu *cpu)
403 {
404 /* Configure as Non Secure SGI. */
405 struct interrupt_descriptor sri_desc = {
406 .interrupt_id = HF_SCHEDULE_RECEIVER_INTID,
407 .type = INT_DESC_TYPE_SGI,
408 .sec_state = INT_DESC_SEC_STATE_NS,
409 .priority = SRI_PRIORITY,
410 .valid = true,
411 };
412
413 /* TODO: when supported, make the interrupt driver use cpu structure. */
414 (void)cpu;
415
416 plat_interrupts_configure_interrupt(sri_desc);
417 }
418