1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4 
5 #include <assert.h>
6 #include <hyptypes.h>
7 
8 #include <atomic.h>
9 #include <preempt.h>
10 #include <spinlock.h>
11 #include <thread.h>
12 #include <util.h>
13 
14 #include "internal.h"
15 
16 vgic_irq_type_t
vgic_get_irq_type(virq_t irq)17 vgic_get_irq_type(virq_t irq)
18 {
19 	vgic_irq_type_t type;
20 
21 	if (irq < (virq_t)(GIC_SGI_BASE + GIC_SGI_NUM)) {
22 		type = VGIC_IRQ_TYPE_SGI;
23 	} else if ((irq >= (virq_t)GIC_PPI_BASE) &&
24 		   (irq < (virq_t)(GIC_PPI_BASE + GIC_PPI_NUM))) {
25 		type = VGIC_IRQ_TYPE_PPI;
26 	} else if ((irq >= (virq_t)GIC_SPI_BASE) &&
27 		   (irq < (virq_t)(GIC_SPI_BASE + GIC_SPI_NUM))) {
28 		type = VGIC_IRQ_TYPE_SPI;
29 	}
30 #if VGIC_HAS_EXT_IRQS
31 	else if ((irq >= (virq_t)GIC_PPI_EXT_BASE) &&
32 		 (irq < (virq_t)(GIC_PPI_EXT_BASE + GIC_PPI_EXT_NUM))) {
33 		type = VGIC_IRQ_TYPE_PPI_EXT;
34 	} else if ((irq >= (virq_t)GIC_SPI_EXT_BASE) &&
35 		   (irq < (virq_t)(GIC_SPI_EXT_BASE + GIC_SPI_EXT_NUM))) {
36 		type = VGIC_IRQ_TYPE_SPI_EXT;
37 	}
38 #endif
39 #if VGIC_HAS_LPI
40 	else if (irq >= (virq_t)GIC_LPI_BASE) {
41 		type = VGIC_IRQ_TYPE_LPI;
42 	}
43 #endif
44 	else {
45 		type = VGIC_IRQ_TYPE_RESERVED;
46 	}
47 
48 	return type;
49 }
50 
51 bool
vgic_irq_is_private(virq_t virq)52 vgic_irq_is_private(virq_t virq)
53 {
54 	bool result;
55 	switch (vgic_get_irq_type(virq)) {
56 	case VGIC_IRQ_TYPE_SGI:
57 	case VGIC_IRQ_TYPE_PPI:
58 		// If adding any classes here (e.g. PPI_EXT) you _must_ audit
59 		// all callers of this function and fix up their array indexing
60 		result = true;
61 		break;
62 	case VGIC_IRQ_TYPE_SPI:
63 	case VGIC_IRQ_TYPE_RESERVED:
64 #if VGIC_HAS_LPI && GICV3_HAS_VLPI_V4_1
65 	case VGIC_IRQ_TYPE_LPI:
66 #endif
67 	default:
68 		result = false;
69 		break;
70 	}
71 	return result;
72 }
73 
74 bool
vgic_irq_is_spi(virq_t virq)75 vgic_irq_is_spi(virq_t virq)
76 {
77 	bool result;
78 	switch (vgic_get_irq_type(virq)) {
79 	case VGIC_IRQ_TYPE_SPI:
80 		// If adding any classes here (e.g. SPI_EXT) you _must_ audit
81 		// all callers of this function and fix up their array indexing
82 		result = true;
83 		break;
84 	case VGIC_IRQ_TYPE_SGI:
85 	case VGIC_IRQ_TYPE_PPI:
86 	case VGIC_IRQ_TYPE_RESERVED:
87 #if VGIC_HAS_LPI && GICV3_HAS_VLPI_V4_1
88 	case VGIC_IRQ_TYPE_LPI:
89 #endif
90 	default:
91 		result = false;
92 		break;
93 	}
94 	return result;
95 }
96 
97 bool
vgic_irq_is_ppi(virq_t virq)98 vgic_irq_is_ppi(virq_t virq)
99 {
100 	bool result;
101 	switch (vgic_get_irq_type(virq)) {
102 	case VGIC_IRQ_TYPE_PPI:
103 		// If adding any classes here (e.g. PPI_EXT) you _must_ audit
104 		// all callers of this function and fix up their array indexing
105 		result = true;
106 		break;
107 	case VGIC_IRQ_TYPE_SGI:
108 	case VGIC_IRQ_TYPE_SPI:
109 	case VGIC_IRQ_TYPE_RESERVED:
110 #if VGIC_HAS_LPI && GICV3_HAS_VLPI_V4_1
111 	case VGIC_IRQ_TYPE_LPI:
112 #endif
113 	default:
114 		result = false;
115 		break;
116 	}
117 	return result;
118 }
119 
120 // Find the target of a given VIRQ source, if it is directly routed or private.
121 //
122 // No routing decisions are made by this function; it returns NULL for 1-of-N
123 // SPIs.
124 thread_t *
vgic_find_target(vic_t * vic,virq_source_t * source)125 vgic_find_target(vic_t *vic, virq_source_t *source)
126 {
127 	thread_t *ret;
128 
129 	if (source->is_private) {
130 		if (source->vgic_gicr_index < vic->gicr_count) {
131 			ret = atomic_load_consume(
132 				&vic->gicr_vcpus[source->vgic_gicr_index]);
133 		} else {
134 			ret = NULL;
135 		}
136 	} else {
137 		_Atomic vgic_delivery_state_t *dstate =
138 			vgic_find_dstate(vic, NULL, source->virq);
139 		vgic_delivery_state_t current_dstate =
140 			atomic_load_relaxed(dstate);
141 
142 #if VGIC_HAS_1N
143 		if (vgic_delivery_state_get_route_1n(&current_dstate)) {
144 			ret = NULL;
145 			goto out;
146 		}
147 #endif
148 
149 		index_t route_index =
150 			vgic_delivery_state_get_route(&current_dstate);
151 		if (route_index < vic->gicr_count) {
152 			ret = atomic_load_consume(
153 				&vic->gicr_vcpus[route_index]);
154 		} else {
155 			ret = NULL;
156 		}
157 	}
158 
159 #if VGIC_HAS_1N
160 out:
161 #endif
162 	return ret;
163 }
164 
165 virq_source_t *
vgic_find_source(vic_t * vic,thread_t * vcpu,virq_t virq)166 vgic_find_source(vic_t *vic, thread_t *vcpu, virq_t virq)
167 {
168 	virq_source_t *source;
169 
170 	// Load the source object pointer for a VIRQ. This must be a load
171 	// acquire to ensure that this is accessed prior to reading the virq
172 	// delivery state's level_src bit, because that bit being set should
173 	// guarantee that this pointer is non-NULL (see vic_unbind()).
174 
175 	switch (vgic_get_irq_type(virq)) {
176 	case VGIC_IRQ_TYPE_SPI:
177 		assert(vic != NULL);
178 		if ((virq - GIC_SPI_BASE) < vic->sources_count) {
179 			source = atomic_load_acquire(
180 				&vic->sources[virq - GIC_SPI_BASE]);
181 		} else {
182 			source = NULL;
183 		}
184 		break;
185 	case VGIC_IRQ_TYPE_PPI:
186 		assert(vcpu != NULL);
187 		source = atomic_load_acquire(
188 			&vcpu->vgic_sources[virq - GIC_PPI_BASE]);
189 		break;
190 	case VGIC_IRQ_TYPE_SGI:
191 	case VGIC_IRQ_TYPE_RESERVED:
192 #if VGIC_HAS_LPI && GICV3_HAS_VLPI_V4_1
193 	case VGIC_IRQ_TYPE_LPI:
194 #endif
195 	default:
196 		source = NULL;
197 		break;
198 	}
199 	return source;
200 }
201 
_Atomic(vgic_delivery_state_t)202 _Atomic(vgic_delivery_state_t) *
203 vgic_find_dstate(vic_t *vic, thread_t *vcpu, virq_t virq)
204 {
205 	_Atomic vgic_delivery_state_t *dstate;
206 	switch (vgic_get_irq_type(virq)) {
207 	case VGIC_IRQ_TYPE_SGI:
208 	case VGIC_IRQ_TYPE_PPI:
209 		assert(vcpu != NULL);
210 		dstate = &vcpu->vgic_private_states[virq];
211 		break;
212 	case VGIC_IRQ_TYPE_SPI:
213 		assert(vic != NULL);
214 		dstate = &vic->spi_states[virq - GIC_SPI_BASE];
215 		break;
216 	case VGIC_IRQ_TYPE_RESERVED:
217 #if VGIC_HAS_LPI && GICV3_HAS_VLPI_V4_1
218 	case VGIC_IRQ_TYPE_LPI:
219 #endif
220 	default:
221 		// Invalid IRQ number
222 		dstate = NULL;
223 		break;
224 	}
225 	return dstate;
226 }
227 
228 bool
vgic_delivery_state_is_level_asserted(const vgic_delivery_state_t * x)229 vgic_delivery_state_is_level_asserted(const vgic_delivery_state_t *x)
230 {
231 	return vgic_delivery_state_get_level_sw(x) ||
232 	       vgic_delivery_state_get_level_msg(x) ||
233 	       vgic_delivery_state_get_level_src(x);
234 }
235 
236 bool
vgic_delivery_state_is_pending(const vgic_delivery_state_t * x)237 vgic_delivery_state_is_pending(const vgic_delivery_state_t *x)
238 {
239 	return vgic_delivery_state_get_cfg_is_edge(x)
240 		       ? vgic_delivery_state_get_edge(x)
241 		       : vgic_delivery_state_is_level_asserted(x);
242 }
243 
244 cpu_index_t
vgic_lr_owner_lock(thread_t * vcpu)245 vgic_lr_owner_lock(thread_t *vcpu)
246 {
247 	preempt_disable();
248 	return vgic_lr_owner_lock_nopreempt(vcpu);
249 }
250 
251 cpu_index_t
vgic_lr_owner_lock_nopreempt(thread_t * vcpu)252 vgic_lr_owner_lock_nopreempt(thread_t *vcpu) LOCK_IMPL
253 {
254 	cpu_index_t remote_cpu;
255 	if ((vcpu != NULL) && (vcpu != thread_get_self())) {
256 		spinlock_acquire_nopreempt(&vcpu->vgic_lr_owner_lock.lock);
257 		remote_cpu =
258 			atomic_load_relaxed(&vcpu->vgic_lr_owner_lock.owner);
259 	} else {
260 		remote_cpu = CPU_INDEX_INVALID;
261 	}
262 	return remote_cpu;
263 }
264 
265 void
vgic_lr_owner_unlock(thread_t * vcpu)266 vgic_lr_owner_unlock(thread_t *vcpu)
267 {
268 	vgic_lr_owner_unlock_nopreempt(vcpu);
269 	preempt_enable();
270 }
271 
272 void
vgic_lr_owner_unlock_nopreempt(thread_t * vcpu)273 vgic_lr_owner_unlock_nopreempt(thread_t *vcpu) LOCK_IMPL
274 {
275 	if ((vcpu != NULL) && (vcpu != thread_get_self())) {
276 		spinlock_release_nopreempt(&vcpu->vgic_lr_owner_lock.lock);
277 	}
278 }
279