1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2009 Corey Tabaka
3 // Copyright (c) 2015 Intel Corporation
4 //
5 // Use of this source code is governed by a MIT-style
6 // license that can be found in the LICENSE file or at
7 // https://opensource.org/licenses/MIT
8
9 #include "platform_p.h"
10 #include <arch/x86.h>
11 #include <arch/x86/apic.h>
12 #include <arch/x86/interrupts.h>
13 #include <assert.h>
14 #include <debug.h>
15 #include <dev/interrupt.h>
16 #include <err.h>
17 #include <fbl/algorithm.h>
18 #include <kernel/auto_lock.h>
19 #include <kernel/spinlock.h>
20 #include <kernel/thread.h>
21 #include <lib/pow2_range_allocator.h>
22 #include <lk/init.h>
23 #include <platform/pc.h>
24 #include <platform/pc/acpi.h>
25 #include <platform/pic.h>
26 #include <pow2.h>
27 #include <reg.h>
28 #include <sys/types.h>
29 #include <zircon/types.h>
30
31 #include "platform_p.h"
32
33 #define MAX_IRQ_BLOCK_SIZE MAX_MSI_IRQS
34
35 #include <trace.h>
36
37 struct int_handler_struct {
38 SpinLock lock;
39 int_handler handler;
40 void* arg;
41 };
42
43 static SpinLock lock;
44 static struct int_handler_struct int_handler_table[X86_INT_COUNT];
45 static p2ra_state_t x86_irq_vector_allocator;
46
platform_init_apic(uint level)47 static void platform_init_apic(uint level) {
48 pic_map(PIC1_BASE, PIC2_BASE);
49 pic_disable();
50
51 // Enumerate the IO APICs
52 uint32_t num_io_apics;
53 zx_status_t status = platform_enumerate_io_apics(NULL, 0, &num_io_apics);
54 // TODO: If we want to support x86 without IO APICs, we should do something
55 // better here.
56 ASSERT(status == ZX_OK);
57 io_apic_descriptor* io_apics =
58 static_cast<io_apic_descriptor*>(calloc(num_io_apics, sizeof(*io_apics)));
59 ASSERT(io_apics != NULL);
60 uint32_t num_found = 0;
61 status = platform_enumerate_io_apics(io_apics, num_io_apics, &num_found);
62 ASSERT(status == ZX_OK);
63 ASSERT(num_io_apics == num_found);
64
65 // Enumerate the IO APICs
66 uint32_t num_isos;
67 status = platform_enumerate_interrupt_source_overrides(NULL, 0, &num_isos);
68 ASSERT(status == ZX_OK);
69 io_apic_isa_override* isos = NULL;
70 if (num_isos > 0) {
71 isos = static_cast<io_apic_isa_override*>(calloc(num_isos, sizeof(*isos)));
72 ASSERT(isos != NULL);
73 status = platform_enumerate_interrupt_source_overrides(
74 isos,
75 num_isos,
76 &num_found);
77 ASSERT(status == ZX_OK);
78 ASSERT(num_isos == num_found);
79 }
80
81 apic_vm_init();
82 apic_local_init();
83 apic_io_init(io_apics, num_io_apics, isos, num_isos);
84
85 free(io_apics);
86 free(isos);
87
88 ASSERT(arch_ints_disabled());
89
90 // Initialize the delivery modes/targets for the ISA interrupts
91 uint8_t bsp_apic_id = apic_bsp_id();
92 for (uint8_t irq = 0; irq < 8; ++irq) {
93 // Explicitly skip mapping the PIC2 interrupt, since it is actually
94 // just used internally on the PICs for daisy chaining. QEMU remaps
95 // ISA IRQ 0 to global IRQ 2, but does not remap ISA IRQ 2 off of
96 // global IRQ 2, so skipping this mapping also prevents a collision
97 // with the PIT IRQ.
98 if (irq != ISA_IRQ_PIC2) {
99 apic_io_configure_isa_irq(
100 irq,
101 DELIVERY_MODE_FIXED,
102 IO_APIC_IRQ_MASK,
103 DST_MODE_PHYSICAL,
104 bsp_apic_id,
105 0);
106 }
107 apic_io_configure_isa_irq(
108 static_cast<uint8_t>(irq + 8),
109 DELIVERY_MODE_FIXED,
110 IO_APIC_IRQ_MASK,
111 DST_MODE_PHYSICAL,
112 bsp_apic_id,
113 0);
114 }
115
116 // Initialize the x86 IRQ vector allocator and add the range of vectors to manage.
117 status = p2ra_init(&x86_irq_vector_allocator, MAX_IRQ_BLOCK_SIZE);
118 ASSERT(status == ZX_OK);
119
120 status = p2ra_add_range(&x86_irq_vector_allocator,
121 X86_INT_PLATFORM_BASE,
122 X86_INT_PLATFORM_MAX - X86_INT_PLATFORM_BASE + 1);
123 ASSERT(status == ZX_OK);
124 }
125 LK_INIT_HOOK(apic, &platform_init_apic, LK_INIT_LEVEL_VM + 2);
126
mask_interrupt(unsigned int vector)127 zx_status_t mask_interrupt(unsigned int vector) {
128 AutoSpinLock guard(&lock);
129 apic_io_mask_irq(vector, IO_APIC_IRQ_MASK);
130 return ZX_OK;
131 }
132
unmask_interrupt(unsigned int vector)133 zx_status_t unmask_interrupt(unsigned int vector) {
134 AutoSpinLock guard(&lock);
135 apic_io_mask_irq(vector, IO_APIC_IRQ_UNMASK);
136 return ZX_OK;
137 }
138
configure_interrupt(unsigned int vector,enum interrupt_trigger_mode tm,enum interrupt_polarity pol)139 zx_status_t configure_interrupt(unsigned int vector,
140 enum interrupt_trigger_mode tm,
141 enum interrupt_polarity pol) {
142 AutoSpinLock guard(&lock);
143 apic_io_configure_irq(
144 vector,
145 tm,
146 pol,
147 DELIVERY_MODE_FIXED,
148 IO_APIC_IRQ_MASK,
149 DST_MODE_PHYSICAL,
150 apic_bsp_id(),
151 0);
152 return ZX_OK;
153 }
154
get_interrupt_config(unsigned int vector,enum interrupt_trigger_mode * tm,enum interrupt_polarity * pol)155 zx_status_t get_interrupt_config(unsigned int vector,
156 enum interrupt_trigger_mode* tm,
157 enum interrupt_polarity* pol) {
158 AutoSpinLock guard(&lock);
159 return apic_io_fetch_irq_config(vector, tm, pol);
160 }
161
platform_irq(x86_iframe_t * frame)162 void platform_irq(x86_iframe_t* frame) {
163 // get the current vector
164 uint64_t x86_vector = frame->vector;
165 DEBUG_ASSERT(x86_vector >= X86_INT_PLATFORM_BASE &&
166 x86_vector <= X86_INT_PLATFORM_MAX);
167
168 // deliver the interrupt
169 struct int_handler_struct* handler = &int_handler_table[x86_vector];
170
171 {
172 AutoSpinLockNoIrqSave guard(&handler->lock);
173 if (handler->handler) {
174 handler->handler(handler->arg);
175 }
176 }
177
178 // NOTE: On x86, we always deactivate the interrupt.
179 apic_issue_eoi();
180 }
181
register_int_handler(unsigned int vector,int_handler handler,void * arg)182 zx_status_t register_int_handler(unsigned int vector, int_handler handler, void* arg) {
183 if (!is_valid_interrupt(vector, 0)) {
184 return ZX_ERR_INVALID_ARGS;
185 }
186
187 AutoSpinLock guard(&lock);
188 zx_status_t result = ZX_OK;
189
190 /* Fetch the x86 vector currently configured for this global irq. Force
191 * it's value to zero if it is currently invalid */
192 uint8_t x86_vector = apic_io_fetch_irq_vector(vector);
193 if ((x86_vector < X86_INT_PLATFORM_BASE) ||
194 (x86_vector > X86_INT_PLATFORM_MAX))
195 x86_vector = 0;
196
197 if (x86_vector && !handler) {
198 /* If the x86 vector is valid, and we are unregistering the handler,
199 * return the x86 vector to the pool. */
200 p2ra_free_range(&x86_irq_vector_allocator, x86_vector, 1);
201 x86_vector = 0;
202 } else if (!x86_vector && handler) {
203 /* If the x86 vector is invalid, and we are registering a handler,
204 * attempt to get a new x86 vector from the pool. */
205 uint range_start;
206
207 /* Right now, there is not much we can do if the allocation fails. In
208 * debug builds, we ASSERT that everything went well. In release
209 * builds, we log a message and then silently ignore the request to
210 * register a new handler. */
211 result = p2ra_allocate_range(&x86_irq_vector_allocator, 1, &range_start);
212 DEBUG_ASSERT(result == ZX_OK);
213
214 if (result != ZX_OK) {
215 TRACEF("Failed to allocate x86 IRQ vector for global IRQ (%u) when "
216 "registering new handler (%p, %p)\n",
217 vector, handler, arg);
218 return result;
219 }
220
221 DEBUG_ASSERT((range_start >= X86_INT_PLATFORM_BASE) &&
222 (range_start <= X86_INT_PLATFORM_MAX));
223 x86_vector = (uint8_t)range_start;
224 }
225
226 // Update the handler table and register the x86 vector with the io_apic.
227 DEBUG_ASSERT(!!x86_vector == !!handler);
228
229 {
230 // No need to irq_save; we already did that when we grabbed the outer lock.
231 AutoSpinLockNoIrqSave handler_guard(&int_handler_table[x86_vector].lock);
232
233 if (handler && int_handler_table[x86_vector].handler) {
234 p2ra_free_range(&x86_irq_vector_allocator, x86_vector, 1);
235 return ZX_ERR_ALREADY_BOUND;
236 }
237
238 int_handler_table[x86_vector].handler = handler;
239 int_handler_table[x86_vector].arg = handler ? arg : NULL;
240 }
241
242 apic_io_configure_irq_vector(vector, x86_vector);
243
244 return ZX_OK;
245 }
246
interrupt_get_base_vector(void)247 uint32_t interrupt_get_base_vector(void) {
248 // Intel Software Developer's Manual v3 chapter 6.2
249 // 0-31 are reserved for architecture defined interrupts & exceptions
250 return 32;
251 }
252
interrupt_get_max_vector(void)253 uint32_t interrupt_get_max_vector(void) {
254 // x64 APIC supports 256 total vectors
255 return 255;
256 }
257
is_valid_interrupt(unsigned int vector,uint32_t flags)258 bool is_valid_interrupt(unsigned int vector, uint32_t flags) {
259 return apic_io_is_valid_irq(vector);
260 }
261
remap_interrupt(unsigned int vector)262 unsigned int remap_interrupt(unsigned int vector) {
263 if (vector > NUM_ISA_IRQS) {
264 return vector;
265 }
266 return apic_io_isa_to_global(static_cast<uint8_t>(vector));
267 }
268
shutdown_interrupts(void)269 void shutdown_interrupts(void) {
270 pic_disable();
271 }
272
shutdown_interrupts_curr_cpu(void)273 void shutdown_interrupts_curr_cpu(void) {
274 // TODO(maniscalco): Walk interrupt redirection entries and make sure nothing targets this CPU.
275 }
276
277 // Intel 64 socs support the IOAPIC and Local APIC which support MSI by default.
278 // See 10.1, 10.4, and 10.11 of Intel® 64 and IA-32 Architectures Software Developer’s
279 // Manual 3A
msi_is_supported(void)280 bool msi_is_supported(void) {
281 return true;
282 }
283
msi_alloc_block(uint requested_irqs,bool can_target_64bit,bool is_msix,msi_block_t * out_block)284 zx_status_t msi_alloc_block(uint requested_irqs,
285 bool can_target_64bit,
286 bool is_msix,
287 msi_block_t* out_block) {
288 if (!out_block) {
289 return ZX_ERR_INVALID_ARGS;
290 }
291
292 if (out_block->allocated) {
293 return ZX_ERR_BAD_STATE;
294 }
295
296 if (!requested_irqs || (requested_irqs > MAX_MSI_IRQS)) {
297 return ZX_ERR_INVALID_ARGS;
298 }
299
300 zx_status_t res;
301 uint alloc_start;
302 uint alloc_size = 1u << log2_uint_ceil(requested_irqs);
303
304 res = p2ra_allocate_range(&x86_irq_vector_allocator, alloc_size, &alloc_start);
305 if (res == ZX_OK) {
306 // Compute the target address.
307 // See section 10.11.1 of the Intel 64 and IA-32 Architectures Software
308 // Developer's Manual Volume 3A.
309 //
310 // TODO(johngro) : don't just bind this block to the Local APIC of the
311 // processor which is active when calling msi_alloc_block. Instead,
312 // there should either be a system policy (like, always send to any
313 // processor, or just processor 0, or something), or the decision of
314 // which CPUs to bind to should be left to the caller.
315 uint32_t tgt_addr = 0xFEE00000; // base addr
316 tgt_addr |= ((uint32_t)apic_bsp_id()) << 12; // Dest ID == the BSP APIC ID
317 tgt_addr |= 0x08; // Redir hint == 1
318 tgt_addr &= ~0x04; // Dest Mode == Physical
319
320 // Compute the target data.
321 // See section 10.11.2 of the Intel 64 and IA-32 Architectures Software
322 // Developer's Manual Volume 3A.
323 //
324 // delivery mode == 0 (fixed)
325 // trigger mode == 0 (edge)
326 // vector == start of block range
327 DEBUG_ASSERT(!(alloc_start & ~0xFF));
328 DEBUG_ASSERT(!(alloc_start & (alloc_size - 1)));
329 uint32_t tgt_data = alloc_start;
330
331 /* Success! Fill out the bookkeeping and we are done */
332 out_block->platform_ctx = NULL;
333 out_block->base_irq_id = alloc_start;
334 out_block->num_irq = alloc_size;
335 out_block->tgt_addr = tgt_addr;
336 out_block->tgt_data = tgt_data;
337 out_block->allocated = true;
338 }
339
340 return res;
341 }
342
msi_free_block(msi_block_t * block)343 void msi_free_block(msi_block_t* block) {
344 DEBUG_ASSERT(block);
345 DEBUG_ASSERT(block->allocated);
346 p2ra_free_range(&x86_irq_vector_allocator, block->base_irq_id, block->num_irq);
347 memset(block, 0, sizeof(*block));
348 }
349
msi_register_handler(const msi_block_t * block,uint msi_id,int_handler handler,void * ctx)350 void msi_register_handler(const msi_block_t* block, uint msi_id, int_handler handler, void* ctx) {
351 DEBUG_ASSERT(block && block->allocated);
352 DEBUG_ASSERT(msi_id < block->num_irq);
353
354 uint x86_vector = msi_id + block->base_irq_id;
355 DEBUG_ASSERT((x86_vector >= X86_INT_PLATFORM_BASE) &&
356 (x86_vector <= X86_INT_PLATFORM_MAX));
357
358 AutoSpinLock guard(&int_handler_table[x86_vector].lock);
359 int_handler_table[x86_vector].handler = handler;
360 int_handler_table[x86_vector].arg = handler ? ctx : NULL;
361 }
362