1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4 
5 #include <hyptypes.h>
6 
7 #include <hypcall_def.h>
8 #include <hyprights.h>
9 
10 #include <atomic.h>
11 #include <compiler.h>
12 #include <cspace.h>
13 #include <cspace_lookup.h>
14 #include <object.h>
15 #include <partition.h>
16 #include <spinlock.h>
17 #include <vpm.h>
18 
19 error_t
hypercall_vpm_group_configure(cap_id_t vpm_group_cap,vpm_group_option_flags_t flags)20 hypercall_vpm_group_configure(cap_id_t		       vpm_group_cap,
21 			      vpm_group_option_flags_t flags)
22 {
23 	error_t	  err;
24 	cspace_t *cspace = cspace_get_self();
25 
26 	if (!vpm_group_option_flags_is_clean(flags)) {
27 		err = ERROR_UNIMPLEMENTED;
28 		goto out;
29 	}
30 
31 	object_type_t	    type;
32 	object_ptr_result_t o = cspace_lookup_object_any(
33 		cspace, vpm_group_cap, CAP_RIGHTS_GENERIC_OBJECT_ACTIVATE,
34 		&type);
35 	if (compiler_unexpected(o.e != OK)) {
36 		err = o.e;
37 		goto out;
38 	}
39 	if (type != OBJECT_TYPE_VPM_GROUP) {
40 		err = ERROR_CSPACE_WRONG_OBJECT_TYPE;
41 		goto out_release_vpm_group;
42 	}
43 	vpm_group_t *vpm_group = o.r.vpm_group;
44 
45 	spinlock_acquire(&vpm_group->header.lock);
46 
47 	if (atomic_load_relaxed(&vpm_group->header.state) ==
48 	    OBJECT_STATE_INIT) {
49 		err = vpm_group_configure(vpm_group, flags);
50 	} else {
51 		err = ERROR_OBJECT_STATE;
52 	}
53 
54 	spinlock_release(&vpm_group->header.lock);
55 
56 out_release_vpm_group:
57 	object_put(type, o.r);
58 out:
59 	return err;
60 }
61 
62 error_t
hypercall_vpm_group_attach_vcpu(cap_id_t vpm_group_cap,cap_id_t vcpu_cap,index_t index)63 hypercall_vpm_group_attach_vcpu(cap_id_t vpm_group_cap, cap_id_t vcpu_cap,
64 				index_t index)
65 {
66 	error_t	  err;
67 	cspace_t *cspace = cspace_get_self();
68 
69 	vpm_group_ptr_result_t vpm_group_r = cspace_lookup_vpm_group(
70 		cspace, vpm_group_cap, CAP_RIGHTS_VPM_GROUP_ATTACH_VCPU);
71 	if (compiler_unexpected(vpm_group_r.e) != OK) {
72 		err = vpm_group_r.e;
73 		goto out;
74 	}
75 
76 	object_type_t	    type;
77 	object_ptr_result_t o = cspace_lookup_object_any(
78 		cspace, vcpu_cap, CAP_RIGHTS_GENERIC_OBJECT_ACTIVATE, &type);
79 	if (compiler_unexpected(o.e != OK)) {
80 		err = o.e;
81 		goto out_release_vic;
82 	}
83 	if (type != OBJECT_TYPE_THREAD) {
84 		err = ERROR_CSPACE_WRONG_OBJECT_TYPE;
85 		goto out_release_vcpu;
86 	}
87 	thread_t *thread = o.r.thread;
88 
89 	spinlock_acquire(&thread->header.lock);
90 	if (atomic_load_relaxed(&thread->header.state) == OBJECT_STATE_INIT) {
91 		err = vpm_attach(vpm_group_r.r, thread, index);
92 	} else {
93 		err = ERROR_OBJECT_STATE;
94 	}
95 	spinlock_release(&thread->header.lock);
96 
97 out_release_vcpu:
98 	object_put(type, o.r);
99 out_release_vic:
100 	object_put_vpm_group(vpm_group_r.r);
101 out:
102 	return err;
103 }
104 
105 error_t
hypercall_vpm_group_bind_virq(cap_id_t vpm_group_cap,cap_id_t vic_cap,virq_t virq)106 hypercall_vpm_group_bind_virq(cap_id_t vpm_group_cap, cap_id_t vic_cap,
107 			      virq_t virq)
108 {
109 	error_t	  err	 = OK;
110 	cspace_t *cspace = cspace_get_self();
111 
112 	vpm_group_ptr_result_t p = cspace_lookup_vpm_group(
113 		cspace, vpm_group_cap, CAP_RIGHTS_VPM_GROUP_BIND_VIRQ);
114 	if (compiler_unexpected(p.e != OK)) {
115 		err = p.e;
116 		goto out;
117 	}
118 	vpm_group_t *vpm_group = p.r;
119 
120 	vic_ptr_result_t v =
121 		cspace_lookup_vic(cspace, vic_cap, CAP_RIGHTS_VIC_BIND_SOURCE);
122 	if (compiler_unexpected(v.e != OK)) {
123 		err = v.e;
124 		goto out_vpm_group_release;
125 	}
126 	vic_t *vic = v.r;
127 
128 	err = vpm_bind_virq(vpm_group, vic, virq);
129 
130 	object_put_vic(vic);
131 out_vpm_group_release:
132 	object_put_vpm_group(vpm_group);
133 out:
134 	return err;
135 }
136 
137 error_t
hypercall_vpm_group_unbind_virq(cap_id_t vpm_group_cap)138 hypercall_vpm_group_unbind_virq(cap_id_t vpm_group_cap)
139 {
140 	error_t	  err	 = OK;
141 	cspace_t *cspace = cspace_get_self();
142 
143 	vpm_group_ptr_result_t p = cspace_lookup_vpm_group(
144 		cspace, vpm_group_cap, CAP_RIGHTS_VPM_GROUP_BIND_VIRQ);
145 	if (compiler_unexpected(p.e != OK)) {
146 		err = p.e;
147 		goto out;
148 	}
149 	vpm_group_t *vpm_group = p.r;
150 
151 	vpm_unbind_virq(vpm_group);
152 
153 	object_put_vpm_group(vpm_group);
154 out:
155 	return err;
156 }
157 
158 hypercall_vpm_group_get_state_result_t
hypercall_vpm_group_get_state(cap_id_t vpm_group_cap)159 hypercall_vpm_group_get_state(cap_id_t vpm_group_cap)
160 {
161 	hypercall_vpm_group_get_state_result_t ret    = { 0 };
162 	cspace_t			      *cspace = cspace_get_self();
163 
164 	vpm_group_ptr_result_t p = cspace_lookup_vpm_group(
165 		cspace, vpm_group_cap, CAP_RIGHTS_VPM_GROUP_QUERY);
166 	if (compiler_unexpected(p.e != OK)) {
167 		ret.error = p.e;
168 		goto out;
169 	}
170 	vpm_group_t *vpm_group = p.r;
171 
172 	vpm_state_t state = vpm_get_state(vpm_group);
173 
174 	ret.error     = OK;
175 	ret.vpm_state = (uint64_t)state;
176 
177 	object_put_vpm_group(vpm_group);
178 out:
179 	return ret;
180 }
181