1 /*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/vm.h"
10
11 #include "hf/arch/std.h"
12
13 #include "hf/check.h"
14 #include "hf/ffa/vm.h"
15 #include "hf/plat/interrupts.h"
16
17 /**
18 * The SPMC needs to keep track of some information about NWd VMs.
19 * For the time being, only the notifications state structures.
20 * Allocation and deallocation of a slot in 'nwd_vms' to and from a given VM
21 * will happen upon calls to FFA_NOTIFICATION_BITMAP_CREATE and
22 * FFA_NOTIFICATION_BITMAP_DESTROY.
23 */
24 static struct vm nwd_vms[MAX_VMS];
25
26 /**
27 * All accesses to `nwd_vms` needs to be guarded by this lock.
28 */
29 static struct spinlock nwd_vms_lock_instance = SPINLOCK_INIT;
30
31 /**
32 * Encapsulates the set of share states while the `nwd_vms_lock_instance` is
33 * held.
34 */
35 struct nwd_vms_locked {
36 struct vm *nwd_vms;
37 };
38
39 const uint32_t nwd_vms_size = ARRAY_SIZE(nwd_vms);
40
41 /** Locks the normal world vms guarding lock. */
nwd_vms_lock(void)42 static struct nwd_vms_locked nwd_vms_lock(void)
43 {
44 sl_lock(&nwd_vms_lock_instance);
45
46 return (struct nwd_vms_locked){.nwd_vms = nwd_vms};
47 }
48
49 /** Unlocks the normal world vms guarding lock. */
nwd_vms_unlock(struct nwd_vms_locked * vms)50 static void nwd_vms_unlock(struct nwd_vms_locked *vms)
51 {
52 CHECK(vms->nwd_vms == nwd_vms);
53 vms->nwd_vms = NULL;
54 sl_unlock(&nwd_vms_lock_instance);
55 }
56
ffa_vm_nwd_find_locked(struct nwd_vms_locked nwd_vms_locked,ffa_id_t vm_id)57 static struct vm_locked ffa_vm_nwd_find_locked(
58 struct nwd_vms_locked nwd_vms_locked, ffa_id_t vm_id)
59 {
60 assert(nwd_vms_locked.nwd_vms != NULL);
61
62 for (uint32_t i = 0U; i < nwd_vms_size; i++) {
63 if (nwd_vms[i].id == vm_id) {
64 return vm_lock(&nwd_vms[i]);
65 }
66 }
67
68 return (struct vm_locked){.vm = NULL};
69 }
70
71 /**
72 * Allocates a NWd VM structure to the VM of given ID.
73 * If a VM with the ID already exists return it.
74 * Return NULL if it can't allocate a new VM.
75 */
ffa_vm_nwd_create(ffa_id_t vm_id)76 struct vm_locked ffa_vm_nwd_create(ffa_id_t vm_id)
77 {
78 struct vm_locked vm_locked;
79 struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
80
81 CHECK(!vm_id_is_current_world(vm_id));
82
83 /* Check if a VM with `vm_id` already exists and returns it. */
84 vm_locked = ffa_vm_nwd_find_locked(nwd_vms_locked, vm_id);
85 if (vm_locked.vm != NULL) {
86 goto out;
87 }
88
89 /* Get first empty slot in `nwd_vms` to create VM. */
90 vm_locked = ffa_vm_nwd_find_locked(nwd_vms_locked, HF_INVALID_VM_ID);
91 if (vm_locked.vm == NULL) {
92 /* NULL means there are no slots in `nwd_vms`. */
93 goto out;
94 }
95
96 /*
97 * Note: VM struct for Nwd VMs is only partially initialized, to the
98 * extend of what's currently used by the SPMC (VM ID, waiter list).
99 */
100 vm_locked.vm->id = vm_id;
101
102 out:
103 nwd_vms_unlock(&nwd_vms_locked);
104
105 return vm_locked;
106 }
107
ffa_vm_destroy(struct vm_locked to_destroy_locked)108 void ffa_vm_destroy(struct vm_locked to_destroy_locked)
109 {
110 struct vm *vm = to_destroy_locked.vm;
111 /*
112 * Free the VM slot if notifications are disabled and mailbox is not
113 * mapped.
114 */
115 if (!vm_id_is_current_world(vm->id) && vm->id != HF_HYPERVISOR_VM_ID &&
116 !vm->notifications.enabled && vm->mailbox.send == NULL &&
117 vm->mailbox.recv == NULL) {
118 to_destroy_locked.vm->id = HF_INVALID_VM_ID;
119 to_destroy_locked.vm->vcpu_count = 0U;
120 }
121 }
122
ffa_vm_init(struct mpool * ppool)123 void ffa_vm_init(struct mpool *ppool)
124 {
125 struct vm *other_world = vm_find(HF_OTHER_WORLD_ID);
126
127 /* Init NWd VMs structures for use of Notifications interfaces. */
128 for (uint32_t i = 0; i < nwd_vms_size; i++) {
129 /*
130 * Note that vm_init() is not called on nwd_vms. This means that
131 * dynamically allocated structures, such as vcpus, are left
132 * as NULL in the nwd_vms structures. This is okay, since as of
133 * today, the vcpu structures are not used. This also helps
134 * reduce memory foot print. A slot in 'nwd_vms' is considered
135 * available if its id is HF_INVALID_VM_ID.
136 */
137 nwd_vms[i].id = HF_INVALID_VM_ID;
138 nwd_vms[i].vcpu_count = MAX_CPUS;
139 vm_notifications_init(&nwd_vms[i], MAX_CPUS, ppool);
140
141 /* Give them the same version as the Hypervisor. */
142 nwd_vms[i].ffa_version = other_world->ffa_version;
143 }
144 }
145
ffa_vm_managed_exit_supported(struct vm * vm)146 bool ffa_vm_managed_exit_supported(struct vm *vm)
147 {
148 return (vm->ns_interrupts_action == NS_ACTION_ME);
149 }
150
ffa_vm_find_locked(ffa_id_t vm_id)151 struct vm_locked ffa_vm_find_locked(ffa_id_t vm_id)
152 {
153 struct vm_locked to_ret_locked;
154
155 if (vm_id_is_current_world(vm_id) || vm_id == HF_OTHER_WORLD_ID) {
156 return vm_find_locked(vm_id);
157 }
158
159 struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
160
161 to_ret_locked = ffa_vm_nwd_find_locked(nwd_vms_locked, vm_id);
162
163 nwd_vms_unlock(&nwd_vms_locked);
164
165 return to_ret_locked;
166 }
167
ffa_vm_find_locked_create(ffa_id_t vm_id)168 struct vm_locked ffa_vm_find_locked_create(ffa_id_t vm_id)
169 {
170 if (vm_id_is_current_world(vm_id) || vm_id == HF_OTHER_WORLD_ID) {
171 return vm_find_locked(vm_id);
172 }
173
174 return ffa_vm_nwd_create(vm_id);
175 }
176
ffa_vm_notifications_info_get(uint16_t * ids,uint32_t * ids_count,uint32_t * lists_sizes,uint32_t * lists_count,const uint32_t ids_count_max)177 bool ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
178 uint32_t *lists_sizes, uint32_t *lists_count,
179 const uint32_t ids_count_max)
180 {
181 struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
182 struct vm_locked other_world_locked = vm_find_locked(HF_OTHER_WORLD_ID);
183 /*
184 * Variable to save return from 'vm_notifications_info_get'. To be
185 * returned and used as indicator that scheduler should conduct more
186 * calls to retrieve info of pending notifications.
187 */
188 bool list_full_and_more_pending = false;
189
190 CHECK(other_world_locked.vm != NULL);
191
192 list_full_and_more_pending = vm_notifications_info_get(
193 other_world_locked, ids, ids_count, lists_sizes, lists_count,
194 ids_count_max);
195
196 vm_unlock(&other_world_locked);
197
198 for (ffa_vm_count_t i = 0;
199 i < nwd_vms_size && !list_full_and_more_pending; i++) {
200 if (nwd_vms[i].id != HF_INVALID_VM_ID) {
201 struct vm_locked vm_locked = vm_lock(&nwd_vms[i]);
202
203 list_full_and_more_pending = vm_notifications_info_get(
204 vm_locked, ids, ids_count, lists_sizes,
205 lists_count, ids_count_max);
206
207 vm_unlock(&vm_locked);
208 }
209 }
210
211 nwd_vms_unlock(&nwd_vms_locked);
212
213 return list_full_and_more_pending;
214 }
215
ffa_vm_disable_interrupts(struct vm_locked vm_locked)216 void ffa_vm_disable_interrupts(struct vm_locked vm_locked)
217 {
218 uint32_t core_pos = arch_find_core_pos();
219
220 /* Gracefully disable interrupts. */
221 dlog_verbose("Interrupts belonging to SP %x disabled\n",
222 vm_locked.vm->id);
223
224 for (uint32_t i = 0; i < VM_MANIFEST_MAX_INTERRUPTS; i++) {
225 struct interrupt_descriptor int_desc;
226
227 int_desc = vm_locked.vm->interrupt_desc[i];
228 if (!int_desc.valid) {
229 break;
230 }
231 plat_interrupts_disable(int_desc.interrupt_id, core_pos);
232 }
233 }
234
235 /**
236 * Reclaim all resources belonging to VM in aborted state.
237 */
ffa_vm_free_resources(struct vm_locked vm_locked)238 void ffa_vm_free_resources(struct vm_locked vm_locked)
239 {
240 /*
241 * Gracefully disable all interrupts belonging to SP.
242 */
243 ffa_vm_disable_interrupts(vm_locked);
244 }
245