1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #include <assert.h>
6 #include <hyptypes.h>
7 #include <string.h>
8
9 #include <atomic.h>
10 #include <bitmap.h>
11 #include <compiler.h>
12 #include <globals.h>
13 #include <ipi.h>
14 #include <irq.h>
15 #include <object.h>
16 #include <panic.h>
17 #include <partition.h>
18 #include <partition_alloc.h>
19 #include <platform_irq.h>
20 #include <preempt.h>
21 #include <rcu.h>
22 #include <trace.h>
23
24 #include <events/irq.h>
25
26 #include "event_handlers.h"
27
28 #if IRQ_SPARSE_IDS
29 // Dynamically allocated two-level table of RCU-protected pointers to hwirq
30 // objects. No lock is needed to protect writes; they are done with
31 // compare-exchange, at both levels. Empty levels are never freed, on the
32 // assumption that IRQ numbers are set by hardware and therefore are likely to
33 // be reused.
34 #define IRQ_TABLE_L2_SIZE PGTABLE_HYP_PAGE_SIZE
35 #define IRQ_TABLE_L2_ENTRIES \
36 (count_t)((IRQ_TABLE_L2_SIZE / sizeof(hwirq_t *_Atomic)))
37 static hwirq_t *_Atomic *_Atomic *irq_table_l1;
38 #else
39 // Dynamically allocated array of RCU-protected pointers to hwirq objects.
40 // No lock is needed to protect writes; they are done with compare-exchange.
41 static hwirq_t *_Atomic *irq_table;
42 #endif
43 static count_t irq_max_cache;
44
45 #if IRQ_HAS_MSI
46 static index_t irq_msi_bitmap_size;
47 static _Atomic register_t *irq_msi_bitmap;
48 #endif
49
50 void
irq_handle_boot_cold_init(void)51 irq_handle_boot_cold_init(void)
52 {
53 irq_max_cache = (count_t)platform_irq_max();
54
55 #if IRQ_SPARSE_IDS
56 count_t irq_table_entries =
57 (irq_max_cache + IRQ_TABLE_L2_ENTRIES) / IRQ_TABLE_L2_ENTRIES;
58 #else
59 count_t irq_table_entries = irq_max_cache + 1U;
60 #endif
61 assert(irq_table_entries != 0U);
62
63 size_t alloc_size = irq_table_entries * sizeof(void *);
64 size_t alloc_align = alignof(void *);
65
66 void_ptr_result_t ptr_r = partition_alloc(partition_get_private(),
67 alloc_size, alloc_align);
68
69 if (ptr_r.e != OK) {
70 panic("Unable to allocate IRQ table");
71 }
72
73 #if IRQ_SPARSE_IDS
74 irq_table_l1 = ptr_r.r;
75 (void)memset_s(irq_table_l1, alloc_size, 0, alloc_size);
76 #else
77 irq_table = ptr_r.r;
78 (void)memset_s(irq_table, alloc_size, 0, alloc_size);
79 #endif
80
81 #if IRQ_HAS_MSI
82 irq_msi_bitmap_size =
83 platform_irq_msi_max() - platform_irq_msi_base + 1U;
84 count_t msi_bitmap_words = BITMAP_NUM_WORDS(irq_msi_bitmap_size);
85 alloc_size = msi_bitmap_words * sizeof(register_t);
86
87 ptr_r = partition_alloc(partition_get_private(), alloc_size,
88 alignof(register_t));
89 if (ptr_r.e != OK) {
90 panic("Unable to allocate MSI allocator bitmap");
91 }
92
93 irq_msi_bitmap = ptr_r.r;
94 (void)memset_s(irq_msi_bitmap, alloc_size, 0, alloc_size);
95 #endif
96 }
97
98 static hwirq_t *_Atomic *
irq_find_entry(irq_t irq,bool allocate)99 irq_find_entry(irq_t irq, bool allocate)
100 {
101 assert((count_t)irq <= irq_max_cache);
102
103 #if IRQ_SPARSE_IDS
104 count_t irq_l1_index = irq / IRQ_TABLE_L2_ENTRIES;
105 count_t irq_l2_index = irq % IRQ_TABLE_L2_ENTRIES;
106 hwirq_t *_Atomic *irq_table_l2 =
107 atomic_load_consume(&irq_table_l1[irq_l1_index]);
108
109 if ((irq_table_l2 == NULL) && allocate) {
110 size_t alloc_size = IRQ_TABLE_L2_SIZE;
111 size_t alloc_align = alignof(void *);
112 void_ptr_result_t ptr_r = partition_alloc(
113 partition_get_private(), alloc_size, alloc_align);
114
115 if (ptr_r.e == OK) {
116 (void)memset_s(ptr_r.r, alloc_size, 0, alloc_size);
117
118 if (atomic_compare_exchange_strong_explicit(
119 &irq_table_l1[irq_l1_index], &irq_table_l2,
120 (hwirq_t *_Atomic *)ptr_r.r,
121 memory_order_release,
122 memory_order_consume)) {
123 irq_table_l2 = (hwirq_t *_Atomic *)ptr_r.r;
124 } else {
125 assert(irq_table_l2 != NULL);
126 (void)partition_free(partition_get_private(),
127 ptr_r.r, alloc_size);
128 }
129 }
130 }
131
132 return (irq_table_l2 == NULL) ? NULL : &irq_table_l2[irq_l2_index];
133 #else
134 (void)allocate;
135 return &irq_table[irq];
136 #endif
137 }
138
139 hwirq_t *
irq_lookup_hwirq(irq_t irq)140 irq_lookup_hwirq(irq_t irq)
141 {
142 hwirq_t *_Atomic *entry = irq_find_entry(irq, false);
143 return (entry == NULL) ? NULL : atomic_load_consume(entry);
144 }
145
146 error_t
irq_handle_object_create_hwirq(hwirq_create_t params)147 irq_handle_object_create_hwirq(hwirq_create_t params)
148 {
149 hwirq_t *hwirq = params.hwirq;
150 assert(hwirq != NULL);
151
152 hwirq->irq = params.irq;
153 hwirq->action = params.action;
154
155 return OK;
156 }
157
158 error_t
irq_handle_object_activate_hwirq(hwirq_t * hwirq)159 irq_handle_object_activate_hwirq(hwirq_t *hwirq)
160 {
161 error_t err = platform_irq_check(hwirq->irq);
162 if (err != OK) {
163 goto out;
164 }
165
166 // Locate the IRQ's entry in the global IRQ table, allocating table
167 // levels if necessary.
168 hwirq_t *_Atomic *entry = irq_find_entry(hwirq->irq, true);
169 if (entry == NULL) {
170 err = ERROR_NOMEM;
171 goto out;
172 }
173
174 // Insert the pointer in the global table if the current entry in the
175 // table is NULL. We do not keep a reference; this is an RCU-protected
176 // pointer which is automatically set to NULL on object deletion. The
177 // release ordering here matches the consume ordering in
178 // lookup.
179 hwirq_t *prev = NULL;
180 if (!atomic_compare_exchange_strong_explicit(entry, &prev, hwirq,
181 memory_order_release,
182 memory_order_relaxed)) {
183 // This IRQ is already registered.
184 err = ERROR_BUSY;
185 goto out;
186 }
187
188 // The IRQ is fully registered; give the handler an opportunity to
189 // enable it if desired.
190 (void)trigger_irq_registered_event(hwirq->action, hwirq->irq, hwirq);
191
192 out:
193 return err;
194 }
195
196 irq_t
irq_max(void)197 irq_max(void)
198 {
199 return irq_max_cache;
200 }
201
202 void
irq_enable_shared(hwirq_t * hwirq)203 irq_enable_shared(hwirq_t *hwirq)
204 {
205 platform_irq_enable_shared(hwirq->irq);
206 }
207
208 void
irq_enable_local(hwirq_t * hwirq)209 irq_enable_local(hwirq_t *hwirq)
210 {
211 platform_irq_enable_local(hwirq->irq);
212 }
213
214 void
irq_disable_shared_nosync(hwirq_t * hwirq)215 irq_disable_shared_nosync(hwirq_t *hwirq)
216 {
217 platform_irq_disable_shared(hwirq->irq);
218 }
219
220 void
irq_disable_local(hwirq_t * hwirq)221 irq_disable_local(hwirq_t *hwirq)
222 {
223 platform_irq_disable_local(hwirq->irq);
224 }
225
226 void
irq_disable_local_nowait(hwirq_t * hwirq)227 irq_disable_local_nowait(hwirq_t *hwirq)
228 {
229 platform_irq_disable_local_nowait(hwirq->irq);
230 }
231
232 void
irq_disable_shared_sync(hwirq_t * hwirq)233 irq_disable_shared_sync(hwirq_t *hwirq)
234 {
235 irq_disable_shared_nosync(hwirq);
236
237 // Wait for any in-progress IRQ deliveries on other CPUs to complete.
238 //
239 // This works regardless of the RCU implementation because IRQ delivery
240 // itself is in an RCU critical section, and the
241 // irq_disable_shared_nosync() is enough to guarantee that any delivery
242 // that hasn't started its critical section yet will not receive the
243 // IRQ.
244 rcu_sync();
245 }
246
247 void
irq_deactivate(hwirq_t * hwirq)248 irq_deactivate(hwirq_t *hwirq)
249 {
250 platform_irq_deactivate(hwirq->irq);
251 }
252
253 void
irq_handle_object_deactivate_hwirq(hwirq_t * hwirq)254 irq_handle_object_deactivate_hwirq(hwirq_t *hwirq)
255 {
256 assert(hwirq != NULL);
257 assert(hwirq->irq <= irq_max_cache);
258
259 // This object was activated successfully, so it must already be in the
260 // global table.
261 hwirq_t *_Atomic *entry = irq_find_entry(hwirq->irq, false);
262 assert(entry != NULL);
263 assert(atomic_load_relaxed(entry) == hwirq);
264
265 // Disable the physical IRQ if possible.
266 if (platform_irq_is_percpu(hwirq->irq)) {
267 // To make this take effect immediately across all CPUs we would
268 // need to perform an IPI. That is a waste of effort since
269 // irq_interrupt_dispatch() will disable IRQs with no handler
270 // anyway, so we just disable it locally.
271 preempt_disable();
272 platform_irq_disable_local(hwirq->irq);
273 preempt_enable();
274 } else {
275 platform_irq_disable_shared(hwirq->irq);
276 }
277
278 // Remove this HWIRQ from the dispatch table.
279 atomic_store_relaxed(entry, NULL);
280 }
281
282 static void
disable_unhandled_irq(irq_result_t irq_r)283 disable_unhandled_irq(irq_result_t irq_r) REQUIRE_PREEMPT_DISABLED
284 {
285 TRACE(ERROR, WARN, "disabling unhandled HW IRQ {:d}", irq_r.r);
286 if (platform_irq_is_percpu(irq_r.r)) {
287 platform_irq_disable_local(irq_r.r);
288 } else {
289 platform_irq_disable_shared(irq_r.r);
290 }
291 platform_irq_priority_drop(irq_r.r);
292 platform_irq_deactivate(irq_r.r);
293 }
294
295 static bool
irq_interrupt_dispatch_one(void)296 irq_interrupt_dispatch_one(void) REQUIRE_PREEMPT_DISABLED
297 {
298 irq_result_t irq_r = platform_irq_acknowledge();
299 bool ret = true;
300
301 if (irq_r.e == ERROR_RETRY) {
302 // IRQ handled by the platform, probably an IPI
303 goto out;
304 } else if (compiler_unexpected(irq_r.e == ERROR_IDLE)) {
305 // No IRQs are pending; exit
306 ret = false;
307 goto out;
308 } else {
309 assert(irq_r.e == OK);
310 TRACE(INFO, INFO, "acknowledged HW IRQ {:d}", irq_r.r);
311
312 // The entire IRQ delivery is an RCU critical section.
313 //
314 // Note that this naturally true anyway if we don't
315 // allow interrupt nesting.
316 //
317 // Also, the alternative is to take a reference to the
318 // hwirq, which might force us to tear down the hwirq
319 // (and potentially the whole partition) in the
320 // interrupt handler.
321 rcu_read_start();
322 hwirq_t *hwirq = irq_lookup_hwirq(irq_r.r);
323
324 if (compiler_unexpected(hwirq == NULL)) {
325 disable_unhandled_irq(irq_r);
326 rcu_read_finish();
327 goto out;
328 }
329
330 assert(hwirq->irq == irq_r.r);
331
332 bool handled = trigger_irq_received_event(hwirq->action,
333 irq_r.r, hwirq);
334 platform_irq_priority_drop(irq_r.r);
335 if (handled) {
336 platform_irq_deactivate(irq_r.r);
337 }
338 rcu_read_finish();
339 }
340
341 out:
342 return ret;
343 }
344
345 bool
irq_interrupt_dispatch(void)346 irq_interrupt_dispatch(void)
347 {
348 bool spurious = true;
349
350 while (irq_interrupt_dispatch_one()) {
351 spurious = false;
352 }
353
354 if (spurious) {
355 TRACE(INFO, INFO, "spurious EL2 IRQ");
356 }
357
358 return ipi_handle_relaxed();
359 }
360
361 #if IRQ_HAS_MSI
362
363 hwirq_ptr_result_t
irq_allocate_msi(partition_t * partition,hwirq_action_t action)364 irq_allocate_msi(partition_t *partition, hwirq_action_t action)
365 {
366 hwirq_ptr_result_t ret;
367 index_t msi;
368
369 assert(irq_msi_bitmap != NULL);
370
371 do {
372 if (!bitmap_atomic_ffc(irq_msi_bitmap, irq_msi_bitmap_size,
373 &msi)) {
374 ret = hwirq_ptr_result_error(ERROR_BUSY);
375 goto out;
376 }
377 } while (bitmap_atomic_test_and_set(irq_msi_bitmap, msi,
378 memory_order_relaxed));
379
380 irq_t irq = msi + platform_irq_msi_base;
381 hwirq_create_t hwirq_params = { .action = action, .irq = irq };
382 ret = partition_allocate_hwirq(partition, hwirq_params);
383 if (ret.e != OK) {
384 bitmap_atomic_clear(irq_msi_bitmap, msi, memory_order_relaxed);
385 goto out;
386 }
387
388 ret.e = object_activate_hwirq(ret.r);
389 if (ret.e != OK) {
390 // IRQ number will be freed by cleanup handler
391 object_put_hwirq(ret.r);
392 goto out;
393 }
394
395 out:
396 return ret;
397 }
398
399 void
irq_handle_object_cleanup_hwirq(hwirq_t * hwirq)400 irq_handle_object_cleanup_hwirq(hwirq_t *hwirq)
401 {
402 if (hwirq->irq >= platform_irq_msi_base) {
403 index_t msi = hwirq->irq - platform_irq_msi_base;
404 if (msi < irq_msi_bitmap_size) {
405 assert(irq_msi_bitmap != NULL);
406
407 // Free the IRQ number from the MSI allocator
408 bitmap_atomic_clear(irq_msi_bitmap, msi,
409 memory_order_release);
410 }
411 }
412 }
413 #endif // IRQ_HAS_MSI
414