1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #if defined(HYPERCALLS)
6 #include <assert.h>
7 #include <hyptypes.h>
8
9 #include <hypcall_def.h>
10 #include <hyprights.h>
11
12 #include <atomic.h>
13 #include <compiler.h>
14 #include <cpulocal.h>
15 #include <cspace.h>
16 #include <cspace_lookup.h>
17 #include <object.h>
18 #include <platform_cpu.h>
19 #include <scheduler.h>
20 #include <spinlock.h>
21 #include <thread.h>
22 #include <util.h>
23 #include <vcpu.h>
24
25 #include "event_handlers.h"
26 #include "reg_access.h"
27
28 // This hypercall should be called before the vCPU is activated. It copies the
29 // provided flags into a variable called vcpu_options in the thread structure.
30 // Relevant modules (such as the debug module) need to extend the
31 // vcpu_option_flags bitfield to add their configuration flags, and in their
32 // thread_activate handlers they need to check the values of these flags (by
33 // looking at the thread's vcpu_options variable) and act on them.
34 error_t
hypercall_vcpu_configure(cap_id_t cap_id,vcpu_option_flags_t vcpu_options)35 hypercall_vcpu_configure(cap_id_t cap_id, vcpu_option_flags_t vcpu_options)
36 {
37 error_t ret = OK;
38
39 // Check for unknown option flags
40 vcpu_option_flags_t clean = vcpu_option_flags_clean(vcpu_options);
41 if (vcpu_option_flags_raw(vcpu_options) !=
42 vcpu_option_flags_raw(clean)) {
43 ret = ERROR_ARGUMENT_INVALID;
44 goto out;
45 }
46
47 cspace_t *cspace = cspace_get_self();
48 object_type_t type;
49 object_ptr_result_t result = cspace_lookup_object_any(
50 cspace, cap_id, CAP_RIGHTS_GENERIC_OBJECT_ACTIVATE, &type);
51 if (compiler_unexpected(result.e != OK)) {
52 ret = result.e;
53 goto out;
54 }
55
56 if (compiler_unexpected(type != OBJECT_TYPE_THREAD)) {
57 ret = ERROR_CSPACE_WRONG_OBJECT_TYPE;
58 object_put(type, result.r);
59 goto out;
60 }
61
62 thread_t *vcpu = result.r.thread;
63
64 if (compiler_expected(vcpu->kind == THREAD_KIND_VCPU)) {
65 spinlock_acquire(&vcpu->header.lock);
66 object_state_t state = atomic_load_relaxed(&vcpu->header.state);
67 if (state == OBJECT_STATE_INIT) {
68 ret = vcpu_configure(vcpu, vcpu_options);
69 } else {
70 ret = ERROR_OBJECT_STATE;
71 }
72 spinlock_release(&vcpu->header.lock);
73 } else {
74 ret = ERROR_ARGUMENT_INVALID;
75 }
76
77 object_put_thread(vcpu);
78 out:
79 return ret;
80 }
81
82 error_t
hypercall_vcpu_register_write(cap_id_t vcpu_cap,vcpu_register_set_t register_set,index_t register_index,register_t value)83 hypercall_vcpu_register_write(cap_id_t vcpu_cap,
84 vcpu_register_set_t register_set,
85 index_t register_index, register_t value)
86 {
87 error_t ret;
88 cspace_t *cspace = cspace_get_self();
89
90 thread_ptr_result_t result = cspace_lookup_thread_any(
91 cspace, vcpu_cap, CAP_RIGHTS_THREAD_WRITE_CONTEXT);
92 if (compiler_unexpected(result.e != OK)) {
93 ret = result.e;
94 goto out;
95 }
96
97 thread_t *vcpu = result.r;
98
99 ret = vcpu_register_write(vcpu, register_set, register_index, value);
100
101 object_put_thread(vcpu);
102 out:
103 return ret;
104 }
105
106 error_t
hypercall_vcpu_bind_virq(cap_id_t vcpu_cap,cap_id_t vic_cap,virq_t virq,vcpu_virq_type_t virq_type)107 hypercall_vcpu_bind_virq(cap_id_t vcpu_cap, cap_id_t vic_cap, virq_t virq,
108 vcpu_virq_type_t virq_type)
109 {
110 error_t err;
111 cspace_t *cspace = cspace_get_self();
112
113 thread_ptr_result_t result = cspace_lookup_thread(
114 cspace, vcpu_cap, CAP_RIGHTS_THREAD_BIND_VIRQ);
115 if (compiler_unexpected(result.e != OK)) {
116 err = result.e;
117 goto out;
118 }
119 thread_t *vcpu = result.r;
120
121 vic_ptr_result_t v =
122 cspace_lookup_vic(cspace, vic_cap, CAP_RIGHTS_VIC_BIND_SOURCE);
123 if (compiler_unexpected(v.e != OK)) {
124 err = v.e;
125 goto out_release_vcpu;
126 }
127 vic_t *vic = v.r;
128
129 err = vcpu_bind_virq(vcpu, vic, virq, virq_type);
130
131 object_put_vic(vic);
132 out_release_vcpu:
133 object_put_thread(vcpu);
134 out:
135 return err;
136 }
137
138 error_t
hypercall_vcpu_unbind_virq(cap_id_t vcpu_cap,vcpu_virq_type_t virq_type)139 hypercall_vcpu_unbind_virq(cap_id_t vcpu_cap, vcpu_virq_type_t virq_type)
140 {
141 error_t err = OK;
142 cspace_t *cspace = cspace_get_self();
143
144 thread_ptr_result_t result = cspace_lookup_thread(
145 cspace, vcpu_cap, CAP_RIGHTS_THREAD_BIND_VIRQ);
146 if (compiler_unexpected(result.e != OK)) {
147 err = result.e;
148 goto out;
149 }
150 thread_t *vcpu = result.r;
151
152 err = vcpu_unbind_virq(vcpu, virq_type);
153
154 object_put_thread(vcpu);
155 out:
156 return err;
157 }
158
159 error_t
hypercall_vcpu_set_affinity(cap_id_t cap_id,cpu_index_t affinity)160 hypercall_vcpu_set_affinity(cap_id_t cap_id, cpu_index_t affinity)
161 {
162 error_t ret;
163 cspace_t *cspace = cspace_get_self();
164 cap_rights_thread_t required_rights;
165
166 if (affinity == CPU_INDEX_INVALID) {
167 #if SCHEDULER_CAN_MIGRATE
168 // Thread will become non-runnable
169 required_rights = cap_rights_thread_union(
170 CAP_RIGHTS_THREAD_AFFINITY, CAP_RIGHTS_THREAD_DISABLE);
171 #else
172 ret = ERROR_UNIMPLEMENTED;
173 goto out;
174 #endif
175 } else if (!platform_cpu_exists(affinity)) {
176 ret = ERROR_ARGUMENT_INVALID;
177 goto out;
178 } else {
179 // Affinity is valid
180 required_rights = CAP_RIGHTS_THREAD_AFFINITY;
181 }
182
183 thread_ptr_result_t result =
184 cspace_lookup_thread_any(cspace, cap_id, required_rights);
185 if (compiler_unexpected(result.e != OK)) {
186 ret = result.e;
187 goto out;
188 }
189
190 thread_t *vcpu = result.r;
191
192 if (compiler_unexpected(vcpu->kind != THREAD_KIND_VCPU)) {
193 ret = ERROR_ARGUMENT_INVALID;
194 object_put_thread(vcpu);
195 goto out;
196 }
197
198 spinlock_acquire(&vcpu->header.lock);
199 object_state_t state = atomic_load_relaxed(&vcpu->header.state);
200 #if SCHEDULER_CAN_MIGRATE
201 if ((state == OBJECT_STATE_INIT) || (state == OBJECT_STATE_ACTIVE)) {
202 #else
203 if (state == OBJECT_STATE_INIT) {
204 #endif
205 scheduler_lock_nopreempt(vcpu);
206 ret = scheduler_set_affinity(vcpu, affinity);
207 scheduler_unlock_nopreempt(vcpu);
208 } else {
209 ret = ERROR_OBJECT_STATE;
210 }
211 spinlock_release(&vcpu->header.lock);
212
213 object_put_thread(vcpu);
214 out:
215 return ret;
216 }
217
218 error_t
219 hypercall_vcpu_poweron(cap_id_t cap_id, uint64_t entry_point, uint64_t context,
220 vcpu_poweron_flags_t flags)
221 {
222 error_t ret = OK;
223 cspace_t *cspace = cspace_get_self();
224
225 if (!vcpu_poweron_flags_is_clean(flags)) {
226 ret = ERROR_ARGUMENT_INVALID;
227 goto out;
228 }
229
230 thread_ptr_result_t result =
231 cspace_lookup_thread(cspace, cap_id, CAP_RIGHTS_THREAD_POWER);
232 if (compiler_unexpected(result.e != OK)) {
233 ret = result.e;
234 goto out;
235 }
236
237 thread_t *vcpu = result.r;
238
239 if (compiler_expected(vcpu->kind == THREAD_KIND_VCPU)) {
240 bool reschedule = false;
241
242 scheduler_lock(vcpu);
243 if (scheduler_is_blocked(vcpu, SCHEDULER_BLOCK_VCPU_OFF)) {
244 bool_result_t poweron_result = vcpu_poweron(
245 vcpu,
246 vcpu_poweron_flags_get_preserve_entry_point(
247 &flags)
248 ? vmaddr_result_error(
249 ERROR_ARGUMENT_INVALID)
250 : vmaddr_result_ok(entry_point),
251 vcpu_poweron_flags_get_preserve_context(&flags)
252 ? register_result_error(
253 ERROR_ARGUMENT_INVALID)
254 : register_result_ok(context));
255 reschedule = poweron_result.r;
256 ret = poweron_result.e;
257 } else {
258 ret = ERROR_BUSY;
259 }
260 scheduler_unlock(vcpu);
261 object_put_thread(vcpu);
262
263 if (reschedule) {
264 (void)scheduler_schedule();
265 }
266 } else {
267 ret = ERROR_ARGUMENT_INVALID;
268 object_put_thread(vcpu);
269 }
270 out:
271 return ret;
272 }
273
274 error_t
275 hypercall_vcpu_poweroff(cap_id_t cap_id, vcpu_poweroff_flags_t flags)
276 {
277 error_t ret = OK;
278 cspace_t *cspace = cspace_get_self();
279
280 if (!vcpu_poweroff_flags_is_clean(flags)) {
281 ret = ERROR_ARGUMENT_INVALID;
282 goto out;
283 }
284
285 thread_ptr_result_t result =
286 cspace_lookup_thread(cspace, cap_id, CAP_RIGHTS_THREAD_POWER);
287 if (compiler_unexpected(result.e != OK)) {
288 ret = result.e;
289 goto out;
290 }
291
292 thread_t *vcpu = result.r;
293
294 if (compiler_expected(vcpu->kind == THREAD_KIND_VCPU) &&
295 (vcpu == thread_get_self())) {
296 // We can (and must) safely release our reference to the VCPU
297 // here, because we know it's the current thread so the
298 // scheduler will keep a reference to it. Since vcpu_poweroff()
299 // does not return, failing to release this reference will
300 // leave the thread as a zombie after it halts.
301 object_put_thread(vcpu);
302
303 ret = vcpu_poweroff(vcpu_poweroff_flags_get_last_vcpu(&flags),
304 false);
305 // It will not reach here if it succeeded
306 } else {
307 ret = ERROR_ARGUMENT_INVALID;
308 object_put_thread(vcpu);
309 }
310
311 out:
312 return ret;
313 }
314
315 error_t
316 hypercall_vcpu_set_priority(cap_id_t cap_id, priority_t priority)
317 {
318 error_t ret;
319 cspace_t *cspace = cspace_get_self();
320
321 thread_ptr_result_t result = cspace_lookup_thread_any(
322 cspace, cap_id, CAP_RIGHTS_THREAD_PRIORITY);
323 if (compiler_unexpected(result.e != OK)) {
324 ret = result.e;
325 goto out;
326 }
327
328 thread_t *vcpu = result.r;
329
330 if (compiler_unexpected(vcpu->kind != THREAD_KIND_VCPU)) {
331 ret = ERROR_ARGUMENT_INVALID;
332 object_put_thread(vcpu);
333 goto out;
334 }
335
336 if (priority > VCPU_MAX_PRIORITY) {
337 ret = ERROR_DENIED;
338 object_put_thread(vcpu);
339 goto out;
340 }
341
342 spinlock_acquire(&vcpu->header.lock);
343 object_state_t state = atomic_load_relaxed(&vcpu->header.state);
344 if (state == OBJECT_STATE_INIT) {
345 scheduler_lock_nopreempt(vcpu);
346 ret = scheduler_set_priority(vcpu, priority);
347 scheduler_unlock_nopreempt(vcpu);
348 } else {
349 ret = ERROR_OBJECT_STATE;
350 }
351 spinlock_release(&vcpu->header.lock);
352
353 object_put_thread(vcpu);
354 out:
355 return ret;
356 }
357
358 error_t
359 hypercall_vcpu_set_timeslice(cap_id_t cap_id, nanoseconds_t timeslice)
360 {
361 error_t ret;
362 cspace_t *cspace = cspace_get_self();
363
364 thread_ptr_result_t result = cspace_lookup_thread_any(
365 cspace, cap_id, CAP_RIGHTS_THREAD_TIMESLICE);
366 if (compiler_unexpected(result.e != OK)) {
367 ret = result.e;
368 goto out;
369 }
370
371 thread_t *vcpu = result.r;
372
373 if (compiler_unexpected(vcpu->kind != THREAD_KIND_VCPU)) {
374 ret = ERROR_ARGUMENT_INVALID;
375 object_put_thread(vcpu);
376 goto out;
377 }
378
379 spinlock_acquire(&vcpu->header.lock);
380 object_state_t state = atomic_load_relaxed(&vcpu->header.state);
381 if (state == OBJECT_STATE_INIT) {
382 scheduler_lock_nopreempt(vcpu);
383 ret = scheduler_set_timeslice(vcpu, timeslice);
384 scheduler_unlock_nopreempt(vcpu);
385 } else {
386 ret = ERROR_OBJECT_STATE;
387 }
388 spinlock_release(&vcpu->header.lock);
389
390 object_put_thread(vcpu);
391 out:
392 return ret;
393 }
394
395 error_t
396 hypercall_vcpu_kill(cap_id_t cap_id)
397 {
398 error_t ret;
399 cspace_t *cspace = cspace_get_self();
400
401 thread_ptr_result_t result = cspace_lookup_thread(
402 cspace, cap_id, CAP_RIGHTS_THREAD_LIFECYCLE);
403 if (compiler_unexpected(result.e != OK)) {
404 ret = result.e;
405 goto out;
406 }
407
408 thread_t *vcpu = result.r;
409
410 if (compiler_expected(vcpu->kind == THREAD_KIND_VCPU)) {
411 ret = thread_kill(vcpu);
412 } else {
413 ret = ERROR_ARGUMENT_INVALID;
414 }
415
416 object_put_thread(vcpu);
417 out:
418 return ret;
419 }
420
421 #else
422 extern int unused;
423 #endif
424