1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <xen/lib.h>
24 #include <xen/init.h>
25 #include <xen/irq.h>
26 #include <xen/delay.h>
27 #include <xen/sched.h>
28 #include <xen/acpi.h>
29 #include <xen/keyhandler.h>
30 #include <xen/softirq.h>
31 #include <asm/mc146818rtc.h>
32 #include <asm/smp.h>
33 #include <asm/desc.h>
34 #include <asm/msi.h>
35 #include <asm/setup.h>
36 #include <mach_apic.h>
37 #include <io_ports.h>
38 #include <public/physdev.h>
39 #include <xen/trace.h>
40
41 /* Where if anywhere is the i8259 connect in external int mode */
42 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
43
44 static DEFINE_SPINLOCK(ioapic_lock);
45
46 bool __read_mostly skip_ioapic_setup;
47 bool __initdata ioapic_ack_new = true;
48 bool __initdata ioapic_ack_forced;
49
50 /*
51 * # of IRQ routing registers
52 */
53 int __read_mostly nr_ioapic_entries[MAX_IO_APICS];
54 int __read_mostly nr_ioapics;
55
56 /*
57 * Rough estimation of how many shared IRQs there are, can
58 * be changed anytime.
59 */
60 #define MAX_PLUS_SHARED_IRQS nr_irqs_gsi
61 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + nr_irqs_gsi)
62
63
64 #define ioapic_has_eoi_reg(apic) (mp_ioapics[(apic)].mpc_apicver >= 0x20)
65
66 static int apic_pin_2_gsi_irq(int apic, int pin);
67
68 static vmask_t *__read_mostly vector_map[MAX_IO_APICS];
69
share_vector_maps(unsigned int src,unsigned int dst)70 static void share_vector_maps(unsigned int src, unsigned int dst)
71 {
72 unsigned int pin;
73
74 if (vector_map[src] == vector_map[dst])
75 return;
76
77 bitmap_or(vector_map[src]->_bits, vector_map[src]->_bits,
78 vector_map[dst]->_bits, NR_VECTORS);
79
80 for (pin = 0; pin < nr_ioapic_entries[dst]; ++pin) {
81 int irq = apic_pin_2_gsi_irq(dst, pin);
82 struct irq_desc *desc;
83
84 if (irq < 0)
85 continue;
86 desc = irq_to_desc(irq);
87 if (desc->arch.used_vectors == vector_map[dst])
88 desc->arch.used_vectors = vector_map[src];
89 }
90
91 vector_map[dst] = vector_map[src];
92 }
93
94 /*
95 * This is performance-critical, we want to do it O(1)
96 *
97 * the indexing order of this array favors 1:1 mappings
98 * between pins and IRQs.
99 */
100
101 static struct irq_pin_list {
102 int apic, pin;
103 unsigned int next;
104 } *__read_mostly irq_2_pin;
105
106 static unsigned int irq_2_pin_free_entry;
107
108 /*
109 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
110 * shared ISA-space IRQs, so we have to support them. We are super
111 * fast in the common case, and fast for shared ISA-space IRQs.
112 */
add_pin_to_irq(unsigned int irq,int apic,int pin)113 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
114 {
115 struct irq_pin_list *entry = irq_2_pin + irq;
116
117 while (entry->next) {
118 BUG_ON((entry->apic == apic) && (entry->pin == pin));
119 entry = irq_2_pin + entry->next;
120 }
121
122 BUG_ON((entry->apic == apic) && (entry->pin == pin));
123
124 if (entry->pin != -1) {
125 if (irq_2_pin_free_entry >= PIN_MAP_SIZE)
126 panic("io_apic.c: whoops");
127 entry->next = irq_2_pin_free_entry;
128 entry = irq_2_pin + entry->next;
129 irq_2_pin_free_entry = entry->next;
130 entry->next = 0;
131 }
132 entry->apic = apic;
133 entry->pin = pin;
134 share_vector_maps(irq_2_pin[irq].apic, apic);
135 }
136
remove_pin_from_irq(unsigned int irq,int apic,int pin)137 static void remove_pin_from_irq(unsigned int irq, int apic, int pin)
138 {
139 struct irq_pin_list *entry, *prev;
140
141 for (entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next]) {
142 if ((entry->apic == apic) && (entry->pin == pin))
143 break;
144 BUG_ON(!entry->next);
145 }
146
147 entry->pin = entry->apic = -1;
148
149 if (entry != &irq_2_pin[irq]) {
150 /* Removed entry is not at head of list. */
151 prev = &irq_2_pin[irq];
152 while (&irq_2_pin[prev->next] != entry)
153 prev = &irq_2_pin[prev->next];
154 prev->next = entry->next;
155 } else if (entry->next) {
156 /* Removed entry is at head of multi-item list. */
157 prev = entry;
158 entry = &irq_2_pin[entry->next];
159 *prev = *entry;
160 entry->pin = entry->apic = -1;
161 } else
162 return;
163
164 entry->next = irq_2_pin_free_entry;
165 irq_2_pin_free_entry = entry - irq_2_pin;
166 }
167
168 /*
169 * Reroute an IRQ to a different pin.
170 */
replace_pin_at_irq(unsigned int irq,int oldapic,int oldpin,int newapic,int newpin)171 static void __init replace_pin_at_irq(unsigned int irq,
172 int oldapic, int oldpin,
173 int newapic, int newpin)
174 {
175 struct irq_pin_list *entry = irq_2_pin + irq;
176
177 while (1) {
178 if (entry->apic == oldapic && entry->pin == oldpin) {
179 entry->apic = newapic;
180 entry->pin = newpin;
181 share_vector_maps(oldapic, newapic);
182 }
183 if (!entry->next)
184 break;
185 entry = irq_2_pin + entry->next;
186 }
187 }
188
io_apic_get_used_vector_map(unsigned int irq)189 vmask_t *io_apic_get_used_vector_map(unsigned int irq)
190 {
191 struct irq_pin_list *entry = irq_2_pin + irq;
192
193 if (entry->pin == -1)
194 return NULL;
195
196 return vector_map[entry->apic];
197 }
198
alloc_ioapic_entries(void)199 struct IO_APIC_route_entry **alloc_ioapic_entries(void)
200 {
201 int apic;
202 struct IO_APIC_route_entry **ioapic_entries;
203
204 ioapic_entries = xmalloc_array(struct IO_APIC_route_entry *, nr_ioapics);
205 if (!ioapic_entries)
206 return 0;
207
208 for (apic = 0; apic < nr_ioapics; apic++) {
209 ioapic_entries[apic] =
210 xmalloc_array(struct IO_APIC_route_entry,
211 nr_ioapic_entries[apic]);
212 if (!ioapic_entries[apic] && nr_ioapic_entries[apic])
213 goto nomem;
214 }
215
216 return ioapic_entries;
217
218 nomem:
219 while (--apic >= 0)
220 xfree(ioapic_entries[apic]);
221 xfree(ioapic_entries);
222
223 return 0;
224 }
225
226 union entry_union {
227 struct { u32 w1, w2; };
228 struct IO_APIC_route_entry entry;
229 };
230
__ioapic_read_entry(unsigned int apic,unsigned int pin,bool raw)231 struct IO_APIC_route_entry __ioapic_read_entry(
232 unsigned int apic, unsigned int pin, bool raw)
233 {
234 unsigned int (*read)(unsigned int, unsigned int)
235 = raw ? __io_apic_read : io_apic_read;
236 union entry_union eu;
237 eu.w1 = (*read)(apic, 0x10 + 2 * pin);
238 eu.w2 = (*read)(apic, 0x11 + 2 * pin);
239 return eu.entry;
240 }
241
ioapic_read_entry(unsigned int apic,unsigned int pin,bool raw)242 static struct IO_APIC_route_entry ioapic_read_entry(
243 unsigned int apic, unsigned int pin, bool raw)
244 {
245 struct IO_APIC_route_entry entry;
246 unsigned long flags;
247
248 spin_lock_irqsave(&ioapic_lock, flags);
249 entry = __ioapic_read_entry(apic, pin, raw);
250 spin_unlock_irqrestore(&ioapic_lock, flags);
251 return entry;
252 }
253
__ioapic_write_entry(unsigned int apic,unsigned int pin,bool raw,struct IO_APIC_route_entry e)254 void __ioapic_write_entry(
255 unsigned int apic, unsigned int pin, bool raw,
256 struct IO_APIC_route_entry e)
257 {
258 void (*write)(unsigned int, unsigned int, unsigned int)
259 = raw ? __io_apic_write : io_apic_write;
260 union entry_union eu = { .entry = e };
261
262 (*write)(apic, 0x11 + 2*pin, eu.w2);
263 (*write)(apic, 0x10 + 2*pin, eu.w1);
264 }
265
ioapic_write_entry(unsigned int apic,unsigned int pin,bool raw,struct IO_APIC_route_entry e)266 static void ioapic_write_entry(
267 unsigned int apic, unsigned int pin, bool raw,
268 struct IO_APIC_route_entry e)
269 {
270 unsigned long flags;
271 spin_lock_irqsave(&ioapic_lock, flags);
272 __ioapic_write_entry(apic, pin, raw, e);
273 spin_unlock_irqrestore(&ioapic_lock, flags);
274 }
275
276 /* EOI an IO-APIC entry. Vector may be -1, indicating that it should be
277 * worked out using the pin. This function expects that the ioapic_lock is
278 * being held, and interrupts are disabled (or there is a good reason not
279 * to), and that if both pin and vector are passed, that they refer to the
280 * same redirection entry in the IO-APIC. */
__io_apic_eoi(unsigned int apic,unsigned int vector,unsigned int pin)281 static void __io_apic_eoi(unsigned int apic, unsigned int vector, unsigned int pin)
282 {
283 /* Prefer the use of the EOI register if available */
284 if ( ioapic_has_eoi_reg(apic) )
285 {
286 /* If vector is unknown, read it from the IO-APIC */
287 if ( vector == IRQ_VECTOR_UNASSIGNED )
288 vector = __ioapic_read_entry(apic, pin, TRUE).vector;
289
290 *(IO_APIC_BASE(apic)+16) = vector;
291 }
292 else
293 {
294 /* Else fake an EOI by switching to edge triggered mode
295 * and back */
296 struct IO_APIC_route_entry entry;
297 bool need_to_unmask = false;
298
299 entry = __ioapic_read_entry(apic, pin, TRUE);
300
301 if ( ! entry.mask )
302 {
303 /* If entry is not currently masked, mask it and make
304 * a note to unmask it later */
305 entry.mask = 1;
306 __ioapic_write_entry(apic, pin, TRUE, entry);
307 need_to_unmask = true;
308 }
309
310 /* Flip the trigger mode to edge and back */
311 entry.trigger = 0;
312 __ioapic_write_entry(apic, pin, TRUE, entry);
313 entry.trigger = 1;
314 __ioapic_write_entry(apic, pin, TRUE, entry);
315
316 if ( need_to_unmask )
317 {
318 /* Unmask if neccesary */
319 entry.mask = 0;
320 __ioapic_write_entry(apic, pin, TRUE, entry);
321 }
322 }
323 }
324
325 /*
326 * Saves all the IO-APIC RTE's
327 */
save_IO_APIC_setup(struct IO_APIC_route_entry ** ioapic_entries)328 int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
329 {
330 int apic, pin;
331
332 if (!ioapic_entries)
333 return -ENOMEM;
334
335 for (apic = 0; apic < nr_ioapics; apic++) {
336 if (!nr_ioapic_entries[apic])
337 continue;
338
339 if (!ioapic_entries[apic])
340 return -ENOMEM;
341
342 for (pin = 0; pin < nr_ioapic_entries[apic]; pin++)
343 ioapic_entries[apic][pin] = __ioapic_read_entry(apic, pin, 1);
344 }
345
346 return 0;
347 }
348
349 /*
350 * Mask all IO APIC entries.
351 */
mask_IO_APIC_setup(struct IO_APIC_route_entry ** ioapic_entries)352 void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
353 {
354 int apic, pin;
355
356 if (!ioapic_entries)
357 return;
358
359 for (apic = 0; apic < nr_ioapics; apic++) {
360 if (!nr_ioapic_entries[apic])
361 continue;
362
363 if (!ioapic_entries[apic])
364 break;
365
366 for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) {
367 struct IO_APIC_route_entry entry;
368
369 entry = ioapic_entries[apic][pin];
370 if (!entry.mask) {
371 entry.mask = 1;
372
373 ioapic_write_entry(apic, pin, 1, entry);
374 }
375 }
376 }
377 }
378
379 /*
380 * Restore IO APIC entries which was saved in ioapic_entries.
381 */
restore_IO_APIC_setup(struct IO_APIC_route_entry ** ioapic_entries)382 int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
383 {
384 int apic, pin;
385
386 if (!ioapic_entries)
387 return -ENOMEM;
388
389 for (apic = 0; apic < nr_ioapics; apic++) {
390 if (!nr_ioapic_entries[apic])
391 continue;
392
393 if (!ioapic_entries[apic])
394 return -ENOMEM;
395
396 for (pin = 0; pin < nr_ioapic_entries[apic]; pin++)
397 ioapic_write_entry(apic, pin, 1, ioapic_entries[apic][pin]);
398 }
399
400 return 0;
401 }
402
free_ioapic_entries(struct IO_APIC_route_entry ** ioapic_entries)403 void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
404 {
405 int apic;
406
407 for (apic = 0; apic < nr_ioapics; apic++)
408 xfree(ioapic_entries[apic]);
409
410 xfree(ioapic_entries);
411 }
412
__modify_IO_APIC_irq(unsigned int irq,unsigned long enable,unsigned long disable)413 static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
414 {
415 struct irq_pin_list *entry = irq_2_pin + irq;
416 unsigned int pin, reg;
417
418 for (;;) {
419 pin = entry->pin;
420 if (pin == -1)
421 break;
422 reg = io_apic_read(entry->apic, 0x10 + pin*2);
423 reg &= ~disable;
424 reg |= enable;
425 io_apic_modify(entry->apic, 0x10 + pin*2, reg);
426 if (!entry->next)
427 break;
428 entry = irq_2_pin + entry->next;
429 }
430 }
431
432 /* mask = 1 */
__mask_IO_APIC_irq(unsigned int irq)433 static void __mask_IO_APIC_irq (unsigned int irq)
434 {
435 __modify_IO_APIC_irq(irq, 0x00010000, 0);
436 }
437
438 /* mask = 0 */
__unmask_IO_APIC_irq(unsigned int irq)439 static void __unmask_IO_APIC_irq (unsigned int irq)
440 {
441 __modify_IO_APIC_irq(irq, 0, 0x00010000);
442 }
443
444 /* trigger = 0 */
__edge_IO_APIC_irq(unsigned int irq)445 static void __edge_IO_APIC_irq (unsigned int irq)
446 {
447 __modify_IO_APIC_irq(irq, 0, 0x00008000);
448 }
449
450 /* trigger = 1 */
__level_IO_APIC_irq(unsigned int irq)451 static void __level_IO_APIC_irq (unsigned int irq)
452 {
453 __modify_IO_APIC_irq(irq, 0x00008000, 0);
454 }
455
mask_IO_APIC_irq(struct irq_desc * desc)456 static void mask_IO_APIC_irq(struct irq_desc *desc)
457 {
458 unsigned long flags;
459
460 spin_lock_irqsave(&ioapic_lock, flags);
461 __mask_IO_APIC_irq(desc->irq);
462 spin_unlock_irqrestore(&ioapic_lock, flags);
463 }
464
unmask_IO_APIC_irq(struct irq_desc * desc)465 static void unmask_IO_APIC_irq(struct irq_desc *desc)
466 {
467 unsigned long flags;
468
469 spin_lock_irqsave(&ioapic_lock, flags);
470 __unmask_IO_APIC_irq(desc->irq);
471 spin_unlock_irqrestore(&ioapic_lock, flags);
472 }
473
__eoi_IO_APIC_irq(struct irq_desc * desc)474 static void __eoi_IO_APIC_irq(struct irq_desc *desc)
475 {
476 struct irq_pin_list *entry = irq_2_pin + desc->irq;
477 unsigned int pin, vector = desc->arch.vector;
478
479 for (;;) {
480 pin = entry->pin;
481 if (pin == -1)
482 break;
483 __io_apic_eoi(entry->apic, vector, pin);
484 if (!entry->next)
485 break;
486 entry = irq_2_pin + entry->next;
487 }
488 }
489
eoi_IO_APIC_irq(struct irq_desc * desc)490 static void eoi_IO_APIC_irq(struct irq_desc *desc)
491 {
492 unsigned long flags;
493 spin_lock_irqsave(&ioapic_lock, flags);
494 __eoi_IO_APIC_irq(desc);
495 spin_unlock_irqrestore(&ioapic_lock, flags);
496 }
497
clear_IO_APIC_pin(unsigned int apic,unsigned int pin)498 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
499 {
500 struct IO_APIC_route_entry entry;
501
502 /* Check delivery_mode to be sure we're not clearing an SMI pin */
503 entry = __ioapic_read_entry(apic, pin, FALSE);
504 if (entry.delivery_mode == dest_SMI)
505 return;
506
507 /*
508 * Make sure the entry is masked and re-read the contents to check
509 * if it is a level triggered pin and if the remoteIRR is set.
510 */
511 if (!entry.mask) {
512 entry.mask = 1;
513 __ioapic_write_entry(apic, pin, FALSE, entry);
514 }
515 entry = __ioapic_read_entry(apic, pin, TRUE);
516
517 if (entry.irr) {
518 /* Make sure the trigger mode is set to level. */
519 if (!entry.trigger) {
520 entry.trigger = 1;
521 __ioapic_write_entry(apic, pin, TRUE, entry);
522 }
523 __io_apic_eoi(apic, entry.vector, pin);
524 }
525
526 /*
527 * Disable it in the IO-APIC irq-routing table:
528 */
529 memset(&entry, 0, sizeof(entry));
530 entry.mask = 1;
531 __ioapic_write_entry(apic, pin, TRUE, entry);
532
533 entry = __ioapic_read_entry(apic, pin, TRUE);
534 if (entry.irr)
535 printk(KERN_ERR "IO-APIC%02x-%u: Unable to reset IRR\n",
536 IO_APIC_ID(apic), pin);
537 }
538
clear_IO_APIC(void)539 static void clear_IO_APIC (void)
540 {
541 int apic, pin;
542
543 for (apic = 0; apic < nr_ioapics; apic++) {
544 for (pin = 0; pin < nr_ioapic_entries[apic]; pin++)
545 clear_IO_APIC_pin(apic, pin);
546 }
547 }
548
549 static void
set_ioapic_affinity_irq(struct irq_desc * desc,const cpumask_t * mask)550 set_ioapic_affinity_irq(struct irq_desc *desc, const cpumask_t *mask)
551 {
552 unsigned long flags;
553 unsigned int dest;
554 int pin, irq;
555 struct irq_pin_list *entry;
556
557 irq = desc->irq;
558
559 spin_lock_irqsave(&ioapic_lock, flags);
560 dest = set_desc_affinity(desc, mask);
561 if (dest != BAD_APICID) {
562 if ( !x2apic_enabled )
563 dest = SET_APIC_LOGICAL_ID(dest);
564 entry = irq_2_pin + irq;
565 for (;;) {
566 unsigned int data;
567 pin = entry->pin;
568 if (pin == -1)
569 break;
570
571 io_apic_write(entry->apic, 0x10 + 1 + pin*2, dest);
572 data = io_apic_read(entry->apic, 0x10 + pin*2);
573 data &= ~IO_APIC_REDIR_VECTOR_MASK;
574 data |= desc->arch.vector & 0xFF;
575 io_apic_modify(entry->apic, 0x10 + pin*2, data);
576
577 if (!entry->next)
578 break;
579 entry = irq_2_pin + entry->next;
580 }
581 }
582 spin_unlock_irqrestore(&ioapic_lock, flags);
583
584 }
585
586 /*
587 * Find the IRQ entry number of a certain pin.
588 */
find_irq_entry(int apic,int pin,int type)589 static int find_irq_entry(int apic, int pin, int type)
590 {
591 int i;
592
593 for (i = 0; i < mp_irq_entries; i++)
594 if (mp_irqs[i].mpc_irqtype == type &&
595 (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
596 mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
597 mp_irqs[i].mpc_dstirq == pin)
598 return i;
599
600 return -1;
601 }
602
603 /*
604 * Find the pin to which IRQ[irq] (ISA) is connected
605 */
find_isa_irq_pin(int irq,int type)606 static int __init find_isa_irq_pin(int irq, int type)
607 {
608 int i;
609
610 for (i = 0; i < mp_irq_entries; i++) {
611 int lbus = mp_irqs[i].mpc_srcbus;
612
613 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
614 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
615 mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
616 mp_bus_id_to_type[lbus] == MP_BUS_NEC98
617 ) &&
618 (mp_irqs[i].mpc_irqtype == type) &&
619 (mp_irqs[i].mpc_srcbusirq == irq))
620
621 return mp_irqs[i].mpc_dstirq;
622 }
623 return -1;
624 }
625
find_isa_irq_apic(int irq,int type)626 static int __init find_isa_irq_apic(int irq, int type)
627 {
628 int i;
629
630 for (i = 0; i < mp_irq_entries; i++) {
631 int lbus = mp_irqs[i].mpc_srcbus;
632
633 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
634 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
635 mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
636 mp_bus_id_to_type[lbus] == MP_BUS_NEC98
637 ) &&
638 (mp_irqs[i].mpc_irqtype == type) &&
639 (mp_irqs[i].mpc_srcbusirq == irq))
640 break;
641 }
642 if (i < mp_irq_entries) {
643 int apic;
644 for(apic = 0; apic < nr_ioapics; apic++) {
645 if (!nr_ioapic_entries[apic])
646 continue;
647 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
648 return apic;
649 }
650 }
651
652 return -1;
653 }
654
655 /*
656 * Find a specific PCI IRQ entry.
657 * Not an __init, possibly needed by modules
658 */
659 static int pin_2_irq(int idx, int apic, int pin);
660
661 /*
662 * This function currently is only a helper for the i386 smp boot process where
663 * we need to reprogram the ioredtbls to cater for the cpus which have come online
664 * so mask in all cases should simply be TARGET_CPUS
665 */
setup_ioapic_dest(void)666 void /*__init*/ setup_ioapic_dest(void)
667 {
668 int pin, ioapic, irq, irq_entry;
669
670 if (skip_ioapic_setup)
671 return;
672
673 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
674 for (pin = 0; pin < nr_ioapic_entries[ioapic]; pin++) {
675 struct irq_desc *desc;
676
677 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
678 if (irq_entry == -1)
679 continue;
680 irq = pin_2_irq(irq_entry, ioapic, pin);
681 desc = irq_to_desc(irq);
682 BUG_ON(cpumask_empty(desc->arch.cpu_mask));
683 set_ioapic_affinity_irq(desc, desc->arch.cpu_mask);
684 }
685
686 }
687 }
688
689 /*
690 * EISA Edge/Level control register, ELCR
691 */
EISA_ELCR(unsigned int irq)692 static int EISA_ELCR(unsigned int irq)
693 {
694 if (platform_legacy_irq(irq)) {
695 unsigned int port = 0x4d0 + (irq >> 3);
696 return (inb(port) >> (irq & 7)) & 1;
697 }
698 apic_printk(APIC_VERBOSE, KERN_INFO
699 "Broken MPtable reports ISA irq %d\n", irq);
700 return 0;
701 }
702
703 /* EISA interrupts are always polarity zero and can be edge or level
704 * trigger depending on the ELCR value. If an interrupt is listed as
705 * EISA conforming in the MP table, that means its trigger type must
706 * be read in from the ELCR */
707
708 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
709 #define default_EISA_polarity(idx) (0)
710
711 /* ISA interrupts are always polarity zero edge triggered,
712 * when listed as conforming in the MP table. */
713
714 #define default_ISA_trigger(idx) (0)
715 #define default_ISA_polarity(idx) (0)
716
717 /* PCI interrupts are always polarity one level triggered,
718 * when listed as conforming in the MP table. */
719
720 #define default_PCI_trigger(idx) (1)
721 #define default_PCI_polarity(idx) (1)
722
723 /* MCA interrupts are always polarity zero level triggered,
724 * when listed as conforming in the MP table. */
725
726 #define default_MCA_trigger(idx) (1)
727 #define default_MCA_polarity(idx) (0)
728
729 /* NEC98 interrupts are always polarity zero edge triggered,
730 * when listed as conforming in the MP table. */
731
732 #define default_NEC98_trigger(idx) (0)
733 #define default_NEC98_polarity(idx) (0)
734
MPBIOS_polarity(int idx)735 static int __init MPBIOS_polarity(int idx)
736 {
737 int bus = mp_irqs[idx].mpc_srcbus;
738 int polarity;
739
740 /*
741 * Determine IRQ line polarity (high active or low active):
742 */
743 switch (mp_irqs[idx].mpc_irqflag & 3)
744 {
745 case 0: /* conforms, ie. bus-type dependent polarity */
746 {
747 switch (mp_bus_id_to_type[bus])
748 {
749 case MP_BUS_ISA: /* ISA pin */
750 {
751 polarity = default_ISA_polarity(idx);
752 break;
753 }
754 case MP_BUS_EISA: /* EISA pin */
755 {
756 polarity = default_EISA_polarity(idx);
757 break;
758 }
759 case MP_BUS_PCI: /* PCI pin */
760 {
761 polarity = default_PCI_polarity(idx);
762 break;
763 }
764 case MP_BUS_MCA: /* MCA pin */
765 {
766 polarity = default_MCA_polarity(idx);
767 break;
768 }
769 case MP_BUS_NEC98: /* NEC 98 pin */
770 {
771 polarity = default_NEC98_polarity(idx);
772 break;
773 }
774 default:
775 {
776 printk(KERN_WARNING "broken BIOS!!\n");
777 polarity = 1;
778 break;
779 }
780 }
781 break;
782 }
783 case 1: /* high active */
784 {
785 polarity = 0;
786 break;
787 }
788 case 2: /* reserved */
789 {
790 printk(KERN_WARNING "broken BIOS!!\n");
791 polarity = 1;
792 break;
793 }
794 case 3: /* low active */
795 {
796 polarity = 1;
797 break;
798 }
799 default: /* invalid */
800 {
801 printk(KERN_WARNING "broken BIOS!!\n");
802 polarity = 1;
803 break;
804 }
805 }
806 return polarity;
807 }
808
MPBIOS_trigger(int idx)809 static int MPBIOS_trigger(int idx)
810 {
811 int bus = mp_irqs[idx].mpc_srcbus;
812 int trigger;
813
814 /*
815 * Determine IRQ trigger mode (edge or level sensitive):
816 */
817 switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
818 {
819 case 0: /* conforms, ie. bus-type dependent */
820 {
821 switch (mp_bus_id_to_type[bus])
822 {
823 case MP_BUS_ISA: /* ISA pin */
824 {
825 trigger = default_ISA_trigger(idx);
826 break;
827 }
828 case MP_BUS_EISA: /* EISA pin */
829 {
830 trigger = default_EISA_trigger(idx);
831 break;
832 }
833 case MP_BUS_PCI: /* PCI pin */
834 {
835 trigger = default_PCI_trigger(idx);
836 break;
837 }
838 case MP_BUS_MCA: /* MCA pin */
839 {
840 trigger = default_MCA_trigger(idx);
841 break;
842 }
843 case MP_BUS_NEC98: /* NEC 98 pin */
844 {
845 trigger = default_NEC98_trigger(idx);
846 break;
847 }
848 default:
849 {
850 printk(KERN_WARNING "broken BIOS!!\n");
851 trigger = 1;
852 break;
853 }
854 }
855 break;
856 }
857 case 1: /* edge */
858 {
859 trigger = 0;
860 break;
861 }
862 case 2: /* reserved */
863 {
864 printk(KERN_WARNING "broken BIOS!!\n");
865 trigger = 1;
866 break;
867 }
868 case 3: /* level */
869 {
870 trigger = 1;
871 break;
872 }
873 default: /* invalid */
874 {
875 printk(KERN_WARNING "broken BIOS!!\n");
876 trigger = 0;
877 break;
878 }
879 }
880 return trigger;
881 }
882
irq_polarity(int idx)883 static inline int irq_polarity(int idx)
884 {
885 return MPBIOS_polarity(idx);
886 }
887
irq_trigger(int idx)888 static inline int irq_trigger(int idx)
889 {
890 return MPBIOS_trigger(idx);
891 }
892
pin_2_irq(int idx,int apic,int pin)893 static int pin_2_irq(int idx, int apic, int pin)
894 {
895 int irq, i;
896 int bus = mp_irqs[idx].mpc_srcbus;
897
898 /*
899 * Debugging check, we are in big trouble if this message pops up!
900 */
901 if (mp_irqs[idx].mpc_dstirq != pin)
902 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
903
904 switch (mp_bus_id_to_type[bus])
905 {
906 case MP_BUS_ISA: /* ISA pin */
907 case MP_BUS_EISA:
908 case MP_BUS_MCA:
909 case MP_BUS_NEC98:
910 {
911 irq = mp_irqs[idx].mpc_srcbusirq;
912 break;
913 }
914 case MP_BUS_PCI: /* PCI pin */
915 {
916 /*
917 * PCI IRQs are mapped in order
918 */
919 i = irq = 0;
920 while (i < apic)
921 irq += nr_ioapic_entries[i++];
922 irq += pin;
923 break;
924 }
925 default:
926 {
927 printk(KERN_ERR "unknown bus type %d.\n",bus);
928 irq = 0;
929 break;
930 }
931 }
932
933 return irq;
934 }
935
IO_APIC_irq_trigger(int irq)936 static inline int IO_APIC_irq_trigger(int irq)
937 {
938 int apic, idx, pin;
939
940 for (apic = 0; apic < nr_ioapics; apic++) {
941 for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) {
942 idx = find_irq_entry(apic,pin,mp_INT);
943 if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
944 return irq_trigger(idx);
945 }
946 }
947 /*
948 * nonexistent IRQs are edge default
949 */
950 return 0;
951 }
952
953 static struct hw_interrupt_type ioapic_level_type;
954 static hw_irq_controller ioapic_edge_type;
955
956 #define IOAPIC_AUTO -1
957 #define IOAPIC_EDGE 0
958 #define IOAPIC_LEVEL 1
959
960 #define SET_DEST(ent, mode, val) do { \
961 if (x2apic_enabled) \
962 (ent).dest.dest32 = (val); \
963 else \
964 (ent).dest.mode.mode##_dest = (val); \
965 } while (0)
966
ioapic_register_intr(int irq,unsigned long trigger)967 static inline void ioapic_register_intr(int irq, unsigned long trigger)
968 {
969 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
970 trigger == IOAPIC_LEVEL)
971 irq_desc[irq].handler = &ioapic_level_type;
972 else
973 irq_desc[irq].handler = &ioapic_edge_type;
974 }
975
setup_IO_APIC_irqs(void)976 static void __init setup_IO_APIC_irqs(void)
977 {
978 struct IO_APIC_route_entry entry;
979 int apic, pin, idx, irq, first_notcon = 1, vector;
980 unsigned long flags;
981
982 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
983
984 for (apic = 0; apic < nr_ioapics; apic++) {
985 for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) {
986 struct irq_desc *desc;
987
988 /*
989 * add it to the IO-APIC irq-routing table:
990 */
991 memset(&entry,0,sizeof(entry));
992
993 entry.delivery_mode = INT_DELIVERY_MODE;
994 entry.dest_mode = INT_DEST_MODE;
995 entry.mask = 0; /* enable IRQ */
996
997 idx = find_irq_entry(apic,pin,mp_INT);
998 if (idx == -1) {
999 if (first_notcon) {
1000 apic_printk(APIC_VERBOSE, KERN_DEBUG
1001 " IO-APIC (apicid-pin) %d-%d",
1002 mp_ioapics[apic].mpc_apicid,
1003 pin);
1004 first_notcon = 0;
1005 } else
1006 apic_printk(APIC_VERBOSE, ", %d-%d",
1007 mp_ioapics[apic].mpc_apicid, pin);
1008 continue;
1009 }
1010
1011 entry.trigger = irq_trigger(idx);
1012 entry.polarity = irq_polarity(idx);
1013
1014 if (irq_trigger(idx)) {
1015 entry.trigger = 1;
1016 entry.mask = 1;
1017 }
1018
1019 irq = pin_2_irq(idx, apic, pin);
1020 /*
1021 * skip adding the timer int on secondary nodes, which causes
1022 * a small but painful rift in the time-space continuum
1023 */
1024 if (multi_timer_check(apic, irq))
1025 continue;
1026 else
1027 add_pin_to_irq(irq, apic, pin);
1028
1029 if (!IO_APIC_IRQ(irq))
1030 continue;
1031
1032 vector = assign_irq_vector(irq, NULL);
1033 BUG_ON(vector < 0);
1034 entry.vector = vector;
1035 ioapic_register_intr(irq, IOAPIC_AUTO);
1036
1037 if (platform_legacy_irq(irq))
1038 disable_8259A_irq(irq_to_desc(irq));
1039
1040 desc = irq_to_desc(irq);
1041 SET_DEST(entry, logical, cpu_mask_to_apicid(TARGET_CPUS));
1042 spin_lock_irqsave(&ioapic_lock, flags);
1043 __ioapic_write_entry(apic, pin, 0, entry);
1044 set_native_irq_info(irq, TARGET_CPUS);
1045 spin_unlock_irqrestore(&ioapic_lock, flags);
1046 }
1047 }
1048
1049 if (!first_notcon)
1050 apic_printk(APIC_VERBOSE, " not connected.\n");
1051 }
1052
1053 /*
1054 * Set up the 8259A-master output pin:
1055 */
setup_ExtINT_IRQ0_pin(unsigned int apic,unsigned int pin,int vector)1056 static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
1057 {
1058 struct IO_APIC_route_entry entry;
1059
1060 memset(&entry,0,sizeof(entry));
1061
1062 disable_8259A_irq(irq_to_desc(0));
1063
1064 /* mask LVT0 */
1065 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1066
1067 /*
1068 * We use logical delivery to get the timer IRQ
1069 * to the first CPU.
1070 */
1071 entry.dest_mode = INT_DEST_MODE;
1072 entry.mask = 0; /* unmask IRQ now */
1073 SET_DEST(entry, logical, cpu_mask_to_apicid(TARGET_CPUS));
1074 entry.delivery_mode = INT_DELIVERY_MODE;
1075 entry.polarity = 0;
1076 entry.trigger = 0;
1077 entry.vector = vector;
1078
1079 /*
1080 * The timer IRQ doesn't have to know that behind the
1081 * scene we have a 8259A-master in AEOI mode ...
1082 */
1083 irq_desc[0].handler = &ioapic_edge_type;
1084
1085 /*
1086 * Add it to the IO-APIC irq-routing table:
1087 */
1088 ioapic_write_entry(apic, pin, 0, entry);
1089
1090 enable_8259A_irq(irq_to_desc(0));
1091 }
1092
UNEXPECTED_IO_APIC(void)1093 static inline void UNEXPECTED_IO_APIC(void)
1094 {
1095 }
1096
__print_IO_APIC(bool boot)1097 static void /*__init*/ __print_IO_APIC(bool boot)
1098 {
1099 int apic, i;
1100 union IO_APIC_reg_00 reg_00;
1101 union IO_APIC_reg_01 reg_01;
1102 union IO_APIC_reg_02 reg_02;
1103 union IO_APIC_reg_03 reg_03;
1104 unsigned long flags;
1105
1106 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1107 for (i = 0; i < nr_ioapics; i++)
1108 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1109 mp_ioapics[i].mpc_apicid, nr_ioapic_entries[i]);
1110
1111 /*
1112 * We are a bit conservative about what we expect. We have to
1113 * know about every hardware change ASAP.
1114 */
1115 printk(KERN_INFO "testing the IO APIC.......................\n");
1116
1117 for (apic = 0; apic < nr_ioapics; apic++) {
1118 if ( !boot )
1119 process_pending_softirqs();
1120
1121 if (!nr_ioapic_entries[apic])
1122 continue;
1123
1124 spin_lock_irqsave(&ioapic_lock, flags);
1125 reg_00.raw = io_apic_read(apic, 0);
1126 reg_01.raw = io_apic_read(apic, 1);
1127 if (reg_01.bits.version >= 0x10)
1128 reg_02.raw = io_apic_read(apic, 2);
1129 if (reg_01.bits.version >= 0x20)
1130 reg_03.raw = io_apic_read(apic, 3);
1131 spin_unlock_irqrestore(&ioapic_lock, flags);
1132
1133 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
1134 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1135 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1136 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1137 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1138 if (reg_00.bits.ID >= get_physical_broadcast())
1139 UNEXPECTED_IO_APIC();
1140 if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
1141 UNEXPECTED_IO_APIC();
1142
1143 printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
1144 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1145 if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
1146 (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
1147 (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
1148 (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
1149 (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
1150 (reg_01.bits.entries != 0x2E) &&
1151 (reg_01.bits.entries != 0x3F)
1152 )
1153 UNEXPECTED_IO_APIC();
1154
1155 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1156 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1157 if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
1158 (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
1159 (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
1160 (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
1161 (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
1162 )
1163 UNEXPECTED_IO_APIC();
1164 if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
1165 UNEXPECTED_IO_APIC();
1166
1167 /*
1168 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1169 * but the value of reg_02 is read as the previous read register
1170 * value, so ignore it if reg_02 == reg_01.
1171 */
1172 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1173 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1174 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1175 if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
1176 UNEXPECTED_IO_APIC();
1177 }
1178
1179 /*
1180 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1181 * or reg_03, but the value of reg_0[23] is read as the previous read
1182 * register value, so ignore it if reg_03 == reg_0[12].
1183 */
1184 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1185 reg_03.raw != reg_01.raw) {
1186 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1187 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1188 if (reg_03.bits.__reserved_1)
1189 UNEXPECTED_IO_APIC();
1190 }
1191
1192 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1193
1194 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
1195 " Stat Dest Deli Vect: \n");
1196
1197 for (i = 0; i <= reg_01.bits.entries; i++) {
1198 struct IO_APIC_route_entry entry;
1199
1200 entry = ioapic_read_entry(apic, i, 0);
1201
1202 printk(KERN_DEBUG " %02x %03X %02X ",
1203 i,
1204 entry.dest.logical.logical_dest,
1205 entry.dest.physical.physical_dest
1206 );
1207
1208 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1209 entry.mask,
1210 entry.trigger,
1211 entry.irr,
1212 entry.polarity,
1213 entry.delivery_status,
1214 entry.dest_mode,
1215 entry.delivery_mode,
1216 entry.vector
1217 );
1218 }
1219 }
1220 printk(KERN_INFO "Using vector-based indexing\n");
1221 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1222 for (i = 0; i < nr_irqs_gsi; i++) {
1223 struct irq_pin_list *entry = irq_2_pin + i;
1224
1225 if ( !boot && !(i & 0x1f) )
1226 process_pending_softirqs();
1227
1228 if (entry->pin < 0)
1229 continue;
1230 printk(KERN_DEBUG "IRQ%d ", irq_to_desc(i)->arch.vector);
1231 for (;;) {
1232 printk("-> %d:%d", entry->apic, entry->pin);
1233 if (!entry->next)
1234 break;
1235 entry = irq_2_pin + entry->next;
1236 }
1237 printk("\n");
1238 }
1239
1240 printk(KERN_INFO ".................................... done.\n");
1241
1242 return;
1243 }
1244
print_IO_APIC(void)1245 static void __init print_IO_APIC(void)
1246 {
1247 if (apic_verbosity != APIC_QUIET)
1248 __print_IO_APIC(1);
1249 }
1250
_print_IO_APIC_keyhandler(unsigned char key)1251 static void _print_IO_APIC_keyhandler(unsigned char key)
1252 {
1253 __print_IO_APIC(0);
1254 }
1255
enable_IO_APIC(void)1256 static void __init enable_IO_APIC(void)
1257 {
1258 int i8259_apic, i8259_pin;
1259 int i, apic;
1260
1261 /* Initialise dynamic irq_2_pin free list. */
1262 irq_2_pin = xzalloc_array(struct irq_pin_list, PIN_MAP_SIZE);
1263
1264 for (i = 0; i < PIN_MAP_SIZE; i++)
1265 irq_2_pin[i].pin = -1;
1266 for (i = irq_2_pin_free_entry = nr_irqs_gsi; i < PIN_MAP_SIZE; i++)
1267 irq_2_pin[i].next = i + 1;
1268
1269 if (directed_eoi_enabled) {
1270 for (apic = 0; apic < nr_ioapics; apic++) {
1271 if (!nr_ioapic_entries[apic])
1272 continue;
1273 vector_map[apic] = xzalloc(vmask_t);
1274 BUG_ON(!vector_map[apic]);
1275 }
1276 } else {
1277 vector_map[0] = xzalloc(vmask_t);
1278 BUG_ON(!vector_map[0]);
1279 for (apic = 1; apic < nr_ioapics; apic++)
1280 vector_map[apic] = vector_map[0];
1281 }
1282
1283 for(apic = 0; apic < nr_ioapics; apic++) {
1284 int pin;
1285 /* See if any of the pins is in ExtINT mode */
1286 for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) {
1287 struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin, 0);
1288
1289 /* If the interrupt line is enabled and in ExtInt mode
1290 * I have found the pin where the i8259 is connected.
1291 */
1292 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1293 ioapic_i8259.apic = apic;
1294 ioapic_i8259.pin = pin;
1295 goto found_i8259;
1296 }
1297 }
1298 }
1299 found_i8259:
1300 /* Look to see what if the MP table has reported the ExtINT */
1301 /* If we could not find the appropriate pin by looking at the ioapic
1302 * the i8259 probably is not connected the ioapic but give the
1303 * mptable a chance anyway.
1304 */
1305 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1306 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1307 /* Trust the MP table if nothing is setup in the hardware */
1308 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1309 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1310 ioapic_i8259.pin = i8259_pin;
1311 ioapic_i8259.apic = i8259_apic;
1312 }
1313 /* Complain if the MP table and the hardware disagree */
1314 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1315 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1316 {
1317 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1318 }
1319
1320 /*
1321 * Do not trust the IO-APIC being empty at bootup
1322 */
1323 clear_IO_APIC();
1324 }
1325
1326 /*
1327 * Not an __init, needed by the reboot code
1328 */
disable_IO_APIC(void)1329 void disable_IO_APIC(void)
1330 {
1331 /*
1332 * Clear the IO-APIC before rebooting:
1333 */
1334 clear_IO_APIC();
1335
1336 /*
1337 * If the i8259 is routed through an IOAPIC
1338 * Put that IOAPIC in virtual wire mode
1339 * so legacy interrupts can be delivered.
1340 */
1341 if (ioapic_i8259.pin != -1) {
1342 struct IO_APIC_route_entry entry;
1343
1344 memset(&entry, 0, sizeof(entry));
1345 entry.mask = 0; /* Enabled */
1346 entry.trigger = 0; /* Edge */
1347 entry.irr = 0;
1348 entry.polarity = 0; /* High */
1349 entry.delivery_status = 0;
1350 entry.dest_mode = 0; /* Physical */
1351 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1352 entry.vector = 0;
1353 SET_DEST(entry, physical, get_apic_id());
1354
1355 /*
1356 * Add it to the IO-APIC irq-routing table:
1357 */
1358 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, 0, entry);
1359 }
1360 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1361 }
1362
1363 /*
1364 * function to set the IO-APIC physical IDs based on the
1365 * values stored in the MPC table.
1366 *
1367 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1368 */
1369
setup_ioapic_ids_from_mpc(void)1370 static void __init setup_ioapic_ids_from_mpc(void)
1371 {
1372 union IO_APIC_reg_00 reg_00;
1373 static physid_mask_t __initdata phys_id_present_map;
1374 int apic;
1375 int i;
1376 unsigned char old_id;
1377 unsigned long flags;
1378
1379 /*
1380 * Don't check I/O APIC IDs for xAPIC systems. They have
1381 * no meaning without the serial APIC bus.
1382 */
1383 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1384 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1385 return;
1386
1387 /*
1388 * This is broken; anything with a real cpu count has to
1389 * circumvent this idiocy regardless.
1390 */
1391 ioapic_phys_id_map(&phys_id_present_map);
1392
1393 /*
1394 * Set the IOAPIC ID to the value stored in the MPC table.
1395 */
1396 for (apic = 0; apic < nr_ioapics; apic++) {
1397 if (!nr_ioapic_entries[apic])
1398 continue;
1399
1400 /* Read the register 0 value */
1401 spin_lock_irqsave(&ioapic_lock, flags);
1402 reg_00.raw = io_apic_read(apic, 0);
1403 spin_unlock_irqrestore(&ioapic_lock, flags);
1404
1405 old_id = mp_ioapics[apic].mpc_apicid;
1406
1407 if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
1408 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1409 apic, mp_ioapics[apic].mpc_apicid);
1410 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1411 reg_00.bits.ID);
1412 mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
1413 }
1414
1415 /*
1416 * Sanity check, is the ID really free? Every APIC in a
1417 * system must have a unique ID or we get lots of nice
1418 * 'stuck on smp_invalidate_needed IPI wait' messages.
1419 */
1420 if (check_apicid_used(&phys_id_present_map,
1421 mp_ioapics[apic].mpc_apicid)) {
1422 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1423 apic, mp_ioapics[apic].mpc_apicid);
1424 for (i = 0; i < get_physical_broadcast(); i++)
1425 if (!physid_isset(i, phys_id_present_map))
1426 break;
1427 if (i >= get_physical_broadcast())
1428 panic("Max APIC ID exceeded");
1429 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1430 i);
1431 mp_ioapics[apic].mpc_apicid = i;
1432 } else {
1433 apic_printk(APIC_VERBOSE, "Setting %d in the "
1434 "phys_id_present_map\n",
1435 mp_ioapics[apic].mpc_apicid);
1436 }
1437 set_apicid(mp_ioapics[apic].mpc_apicid, &phys_id_present_map);
1438
1439 /*
1440 * We need to adjust the IRQ routing table
1441 * if the ID changed.
1442 */
1443 if (old_id != mp_ioapics[apic].mpc_apicid)
1444 for (i = 0; i < mp_irq_entries; i++)
1445 if (mp_irqs[i].mpc_dstapic == old_id)
1446 mp_irqs[i].mpc_dstapic
1447 = mp_ioapics[apic].mpc_apicid;
1448
1449 /*
1450 * Read the right value from the MPC table and
1451 * write it into the ID register.
1452 */
1453 apic_printk(APIC_VERBOSE, KERN_INFO
1454 "...changing IO-APIC physical APIC ID to %d ...",
1455 mp_ioapics[apic].mpc_apicid);
1456
1457 reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
1458 spin_lock_irqsave(&ioapic_lock, flags);
1459 io_apic_write(apic, 0, reg_00.raw);
1460 spin_unlock_irqrestore(&ioapic_lock, flags);
1461
1462 /*
1463 * Sanity check
1464 */
1465 spin_lock_irqsave(&ioapic_lock, flags);
1466 reg_00.raw = io_apic_read(apic, 0);
1467 spin_unlock_irqrestore(&ioapic_lock, flags);
1468 if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
1469 printk("could not set ID!\n");
1470 else
1471 apic_printk(APIC_VERBOSE, " ok.\n");
1472 }
1473 }
1474
1475 /*
1476 * There is a nasty bug in some older SMP boards, their mptable lies
1477 * about the timer IRQ. We do the following to work around the situation:
1478 *
1479 * - timer IRQ defaults to IO-APIC IRQ
1480 * - if this function detects that timer IRQs are defunct, then we fall
1481 * back to ISA timer IRQs
1482 */
timer_irq_works(void)1483 static int __init timer_irq_works(void)
1484 {
1485 unsigned long t1, flags;
1486
1487 t1 = ACCESS_ONCE(pit0_ticks);
1488
1489 local_save_flags(flags);
1490 local_irq_enable();
1491 /* Let ten ticks pass... */
1492 mdelay((10 * 1000) / HZ);
1493 local_irq_restore(flags);
1494
1495 /*
1496 * Expect a few ticks at least, to be sure some possible
1497 * glue logic does not lock up after one or two first
1498 * ticks in a non-ExtINT mode. Also the local APIC
1499 * might have cached one ExtINT interrupt. Finally, at
1500 * least one tick may be lost due to delays.
1501 */
1502 if ( (ACCESS_ONCE(pit0_ticks) - t1) > 4 )
1503 return 1;
1504
1505 return 0;
1506 }
1507
1508 /*
1509 * In the SMP+IOAPIC case it might happen that there are an unspecified
1510 * number of pending IRQ events unhandled. These cases are very rare,
1511 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1512 * better to do it this way as thus we do not have to be aware of
1513 * 'pending' interrupts in the IRQ path, except at this point.
1514 */
1515 /*
1516 * Edge triggered needs to resend any interrupt
1517 * that was delayed but this is now handled in the device
1518 * independent code.
1519 */
1520
1521 /*
1522 * Starting up a edge-triggered IO-APIC interrupt is
1523 * nasty - we need to make sure that we get the edge.
1524 * If it is already asserted for some reason, we need
1525 * return 1 to indicate that is was pending.
1526 *
1527 * This is not complete - we should be able to fake
1528 * an edge even if it isn't on the 8259A...
1529 */
startup_edge_ioapic_irq(struct irq_desc * desc)1530 static unsigned int startup_edge_ioapic_irq(struct irq_desc *desc)
1531 {
1532 int was_pending = 0;
1533 unsigned long flags;
1534
1535 spin_lock_irqsave(&ioapic_lock, flags);
1536 if (platform_legacy_irq(desc->irq)) {
1537 disable_8259A_irq(desc);
1538 if (i8259A_irq_pending(desc->irq))
1539 was_pending = 1;
1540 }
1541 __unmask_IO_APIC_irq(desc->irq);
1542 spin_unlock_irqrestore(&ioapic_lock, flags);
1543
1544 return was_pending;
1545 }
1546
1547 /*
1548 * Once we have recorded IRQ_PENDING already, we can mask the
1549 * interrupt for real. This prevents IRQ storms from unhandled
1550 * devices.
1551 */
ack_edge_ioapic_irq(struct irq_desc * desc)1552 static void ack_edge_ioapic_irq(struct irq_desc *desc)
1553 {
1554 irq_complete_move(desc);
1555 move_native_irq(desc);
1556
1557 if ((desc->status & (IRQ_PENDING | IRQ_DISABLED))
1558 == (IRQ_PENDING | IRQ_DISABLED))
1559 mask_IO_APIC_irq(desc);
1560 ack_APIC_irq();
1561 }
1562
1563 /*
1564 * Level triggered interrupts can just be masked,
1565 * and shutting down and starting up the interrupt
1566 * is the same as enabling and disabling them -- except
1567 * with a startup need to return a "was pending" value.
1568 *
1569 * Level triggered interrupts are special because we
1570 * do not touch any IO-APIC register while handling
1571 * them. We ack the APIC in the end-IRQ handler, not
1572 * in the start-IRQ-handler. Protection against reentrance
1573 * from the same interrupt is still provided, both by the
1574 * generic IRQ layer and by the fact that an unacked local
1575 * APIC does not accept IRQs.
1576 */
startup_level_ioapic_irq(struct irq_desc * desc)1577 static unsigned int startup_level_ioapic_irq(struct irq_desc *desc)
1578 {
1579 unmask_IO_APIC_irq(desc);
1580
1581 return 0; /* don't check for pending */
1582 }
1583
setup_ioapic_ack(const char * s)1584 static int __init setup_ioapic_ack(const char *s)
1585 {
1586 if ( !strcmp(s, "old") )
1587 {
1588 ioapic_ack_new = false;
1589 ioapic_ack_forced = true;
1590 }
1591 else if ( !strcmp(s, "new") )
1592 {
1593 ioapic_ack_new = true;
1594 ioapic_ack_forced = true;
1595 }
1596 else
1597 return -EINVAL;
1598
1599 return 0;
1600 }
1601 custom_param("ioapic_ack", setup_ioapic_ack);
1602
io_apic_level_ack_pending(unsigned int irq)1603 static bool io_apic_level_ack_pending(unsigned int irq)
1604 {
1605 struct irq_pin_list *entry;
1606 unsigned long flags;
1607
1608 spin_lock_irqsave(&ioapic_lock, flags);
1609 entry = &irq_2_pin[irq];
1610 for (;;) {
1611 unsigned int reg;
1612 int pin;
1613
1614 if (!entry)
1615 break;
1616
1617 pin = entry->pin;
1618 if (pin == -1)
1619 continue;
1620 reg = io_apic_read(entry->apic, 0x10 + pin*2);
1621 /* Is the remote IRR bit set? */
1622 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
1623 spin_unlock_irqrestore(&ioapic_lock, flags);
1624 return 1;
1625 }
1626 if (!entry->next)
1627 break;
1628 entry = irq_2_pin + entry->next;
1629 }
1630 spin_unlock_irqrestore(&ioapic_lock, flags);
1631
1632 return 0;
1633 }
1634
mask_and_ack_level_ioapic_irq(struct irq_desc * desc)1635 static void mask_and_ack_level_ioapic_irq(struct irq_desc *desc)
1636 {
1637 unsigned long v;
1638 int i;
1639
1640 irq_complete_move(desc);
1641
1642 if ( !directed_eoi_enabled )
1643 mask_IO_APIC_irq(desc);
1644
1645 /*
1646 * It appears there is an erratum which affects at least version 0x11
1647 * of I/O APIC (that's the 82093AA and cores integrated into various
1648 * chipsets). Under certain conditions a level-triggered interrupt is
1649 * erroneously delivered as edge-triggered one but the respective IRR
1650 * bit gets set nevertheless. As a result the I/O unit expects an EOI
1651 * message but it will never arrive and further interrupts are blocked
1652 * from the source. The exact reason is so far unknown, but the
1653 * phenomenon was observed when two consecutive interrupt requests
1654 * from a given source get delivered to the same CPU and the source is
1655 * temporarily disabled in between.
1656 *
1657 * A workaround is to simulate an EOI message manually. We achieve it
1658 * by setting the trigger mode to edge and then to level when the edge
1659 * trigger mode gets detected in the TMR of a local APIC for a
1660 * level-triggered interrupt. We mask the source for the time of the
1661 * operation to prevent an edge-triggered interrupt escaping meanwhile.
1662 * The idea is from Manfred Spraul. --macro
1663 */
1664 i = desc->arch.vector;
1665
1666 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
1667
1668 ack_APIC_irq();
1669
1670 if ( directed_eoi_enabled )
1671 return;
1672
1673 if ((desc->status & IRQ_MOVE_PENDING) &&
1674 !io_apic_level_ack_pending(desc->irq))
1675 move_masked_irq(desc);
1676
1677 if ( !(v & (1 << (i & 0x1f))) ) {
1678 spin_lock(&ioapic_lock);
1679 __edge_IO_APIC_irq(desc->irq);
1680 __level_IO_APIC_irq(desc->irq);
1681 spin_unlock(&ioapic_lock);
1682 }
1683 }
1684
end_level_ioapic_irq_old(struct irq_desc * desc,u8 vector)1685 static void end_level_ioapic_irq_old(struct irq_desc *desc, u8 vector)
1686 {
1687 if ( directed_eoi_enabled )
1688 {
1689 if ( !(desc->status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) )
1690 {
1691 eoi_IO_APIC_irq(desc);
1692 return;
1693 }
1694
1695 mask_IO_APIC_irq(desc);
1696 eoi_IO_APIC_irq(desc);
1697 if ( (desc->status & IRQ_MOVE_PENDING) &&
1698 !io_apic_level_ack_pending(desc->irq) )
1699 move_masked_irq(desc);
1700 }
1701
1702 if ( !(desc->status & IRQ_DISABLED) )
1703 unmask_IO_APIC_irq(desc);
1704 }
1705
end_level_ioapic_irq_new(struct irq_desc * desc,u8 vector)1706 static void end_level_ioapic_irq_new(struct irq_desc *desc, u8 vector)
1707 {
1708 /*
1709 * It appears there is an erratum which affects at least version 0x11
1710 * of I/O APIC (that's the 82093AA and cores integrated into various
1711 * chipsets). Under certain conditions a level-triggered interrupt is
1712 * erroneously delivered as edge-triggered one but the respective IRR
1713 * bit gets set nevertheless. As a result the I/O unit expects an EOI
1714 * message but it will never arrive and further interrupts are blocked
1715 * from the source. The exact reason is so far unknown, but the
1716 * phenomenon was observed when two consecutive interrupt requests
1717 * from a given source get delivered to the same CPU and the source is
1718 * temporarily disabled in between.
1719 *
1720 * A workaround is to simulate an EOI message manually. We achieve it
1721 * by setting the trigger mode to edge and then to level when the edge
1722 * trigger mode gets detected in the TMR of a local APIC for a
1723 * level-triggered interrupt. We mask the source for the time of the
1724 * operation to prevent an edge-triggered interrupt escaping meanwhile.
1725 * The idea is from Manfred Spraul. --macro
1726 */
1727 unsigned int v, i = desc->arch.vector;
1728
1729 /* Manually EOI the old vector if we are moving to the new */
1730 if ( vector && i != vector )
1731 eoi_IO_APIC_irq(desc);
1732
1733 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
1734
1735 ack_APIC_irq();
1736
1737 if ( (desc->status & IRQ_MOVE_PENDING) &&
1738 !io_apic_level_ack_pending(desc->irq) )
1739 move_native_irq(desc);
1740
1741 if (!(v & (1 << (i & 0x1f)))) {
1742 spin_lock(&ioapic_lock);
1743 __mask_IO_APIC_irq(desc->irq);
1744 __edge_IO_APIC_irq(desc->irq);
1745 __level_IO_APIC_irq(desc->irq);
1746 if ( !(desc->status & IRQ_DISABLED) )
1747 __unmask_IO_APIC_irq(desc->irq);
1748 spin_unlock(&ioapic_lock);
1749 }
1750 }
1751
1752 /*
1753 * Level and edge triggered IO-APIC interrupts need different handling,
1754 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1755 * handled with the level-triggered descriptor, but that one has slightly
1756 * more overhead. Level-triggered interrupts cannot be handled with the
1757 * edge-triggered handler, without risking IRQ storms and other ugly
1758 * races.
1759 */
1760 static hw_irq_controller ioapic_edge_type = {
1761 .typename = "IO-APIC-edge",
1762 .startup = startup_edge_ioapic_irq,
1763 .shutdown = irq_shutdown_none,
1764 .enable = unmask_IO_APIC_irq,
1765 .disable = irq_disable_none,
1766 .ack = ack_edge_ioapic_irq,
1767 .set_affinity = set_ioapic_affinity_irq,
1768 };
1769
1770 static struct hw_interrupt_type __read_mostly ioapic_level_type = {
1771 .typename = "IO-APIC-level",
1772 .startup = startup_level_ioapic_irq,
1773 .shutdown = mask_IO_APIC_irq,
1774 .enable = unmask_IO_APIC_irq,
1775 .disable = mask_IO_APIC_irq,
1776 .ack = mask_and_ack_level_ioapic_irq,
1777 .end = end_level_ioapic_irq_old,
1778 .set_affinity = set_ioapic_affinity_irq,
1779 };
1780
init_IO_APIC_traps(void)1781 static inline void init_IO_APIC_traps(void)
1782 {
1783 int irq;
1784 /* Xen: This is way simpler than the Linux implementation. */
1785 for (irq = 0; platform_legacy_irq(irq); irq++)
1786 if (IO_APIC_IRQ(irq) && !irq_to_vector(irq))
1787 make_8259A_irq(irq);
1788 }
1789
enable_lapic_irq(struct irq_desc * desc)1790 static void enable_lapic_irq(struct irq_desc *desc)
1791 {
1792 unsigned long v;
1793
1794 v = apic_read(APIC_LVT0);
1795 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
1796 }
1797
disable_lapic_irq(struct irq_desc * desc)1798 static void disable_lapic_irq(struct irq_desc *desc)
1799 {
1800 unsigned long v;
1801
1802 v = apic_read(APIC_LVT0);
1803 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1804 }
1805
ack_lapic_irq(struct irq_desc * desc)1806 static void ack_lapic_irq(struct irq_desc *desc)
1807 {
1808 ack_APIC_irq();
1809 }
1810
1811 static hw_irq_controller lapic_irq_type = {
1812 .typename = "local-APIC-edge",
1813 .startup = NULL, /* startup_irq() not used for IRQ0 */
1814 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
1815 .enable = enable_lapic_irq,
1816 .disable = disable_lapic_irq,
1817 .ack = ack_lapic_irq,
1818 };
1819
1820 /*
1821 * This looks a bit hackish but it's about the only one way of sending
1822 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1823 * not support the ExtINT mode, unfortunately. We need to send these
1824 * cycles as some i82489DX-based boards have glue logic that keeps the
1825 * 8259A interrupt line asserted until INTA. --macro
1826 */
unlock_ExtINT_logic(void)1827 static void __init unlock_ExtINT_logic(void)
1828 {
1829 int apic, pin, i;
1830 struct IO_APIC_route_entry entry0, entry1;
1831 unsigned char save_control, save_freq_select;
1832
1833 pin = find_isa_irq_pin(8, mp_INT);
1834 apic = find_isa_irq_apic(8, mp_INT);
1835 if ( pin == -1 || apic == -1 )
1836 return;
1837
1838 entry0 = ioapic_read_entry(apic, pin, 0);
1839 clear_IO_APIC_pin(apic, pin);
1840
1841 memset(&entry1, 0, sizeof(entry1));
1842
1843 entry1.dest_mode = 0; /* physical delivery */
1844 entry1.mask = 0; /* unmask IRQ now */
1845 SET_DEST(entry1, physical, get_apic_id());
1846 entry1.delivery_mode = dest_ExtINT;
1847 entry1.polarity = entry0.polarity;
1848 entry1.trigger = 0;
1849 entry1.vector = 0;
1850
1851 ioapic_write_entry(apic, pin, 0, entry1);
1852
1853 save_control = CMOS_READ(RTC_CONTROL);
1854 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1855 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
1856 RTC_FREQ_SELECT);
1857 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
1858
1859 i = 100;
1860 while (i-- > 0) {
1861 mdelay(10);
1862 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
1863 i -= 10;
1864 }
1865
1866 CMOS_WRITE(save_control, RTC_CONTROL);
1867 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1868 clear_IO_APIC_pin(apic, pin);
1869
1870 ioapic_write_entry(apic, pin, 0, entry0);
1871 }
1872
1873 /*
1874 * This code may look a bit paranoid, but it's supposed to cooperate with
1875 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1876 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1877 * fanatically on his truly buggy board.
1878 */
check_timer(void)1879 static void __init check_timer(void)
1880 {
1881 int apic1, pin1, apic2, pin2;
1882 int vector, ret;
1883 unsigned long flags;
1884 cpumask_t mask_all;
1885
1886 local_irq_save(flags);
1887
1888 /*
1889 * get/set the timer IRQ vector:
1890 */
1891 disable_8259A_irq(irq_to_desc(0));
1892 vector = IRQ0_VECTOR;
1893 clear_irq_vector(0);
1894
1895 cpumask_setall(&mask_all);
1896 if ((ret = bind_irq_vector(0, vector, &mask_all)))
1897 printk(KERN_ERR"..IRQ0 is not set correctly with ioapic!!!, err:%d\n", ret);
1898
1899 irq_desc[0].status &= ~IRQ_DISABLED;
1900
1901 /*
1902 * Subtle, code in do_timer_interrupt() expects an AEOI
1903 * mode for the 8259A whenever interrupts are routed
1904 * through I/O APICs. Also IRQ0 has to be enabled in
1905 * the 8259A which implies the virtual wire has to be
1906 * disabled in the local APIC.
1907 */
1908 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1909 init_8259A(1);
1910 /* XEN: Ripped out the legacy missed-tick logic, so below is not needed. */
1911 /*timer_ack = 1;*/
1912 /*enable_8259A_irq(irq_to_desc(0));*/
1913
1914 pin1 = find_isa_irq_pin(0, mp_INT);
1915 apic1 = find_isa_irq_apic(0, mp_INT);
1916 pin2 = ioapic_i8259.pin;
1917 apic2 = ioapic_i8259.apic;
1918
1919 printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
1920 vector, apic1, pin1, apic2, pin2);
1921
1922 if (pin1 != -1) {
1923 /*
1924 * Ok, does IRQ0 through the IOAPIC work?
1925 */
1926 unmask_IO_APIC_irq(irq_to_desc(0));
1927 if (timer_irq_works()) {
1928 local_irq_restore(flags);
1929 return;
1930 }
1931 clear_IO_APIC_pin(apic1, pin1);
1932 printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
1933 "IO-APIC\n");
1934 }
1935
1936 printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
1937 if (pin2 != -1) {
1938 printk("\n..... (found pin %d) ...", pin2);
1939 /*
1940 * legacy devices should be connected to IO APIC #0
1941 */
1942 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
1943 if (timer_irq_works()) {
1944 local_irq_restore(flags);
1945 printk("works.\n");
1946 if (pin1 != -1)
1947 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
1948 else
1949 add_pin_to_irq(0, apic2, pin2);
1950 return;
1951 }
1952 /*
1953 * Cleanup, just in case ...
1954 */
1955 clear_IO_APIC_pin(apic2, pin2);
1956 }
1957 printk(" failed.\n");
1958
1959 if (nmi_watchdog == NMI_IO_APIC) {
1960 printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
1961 nmi_watchdog = 0;
1962 }
1963
1964 printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
1965
1966 disable_8259A_irq(irq_to_desc(0));
1967 irq_desc[0].handler = &lapic_irq_type;
1968 apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
1969 enable_8259A_irq(irq_to_desc(0));
1970
1971 if (timer_irq_works()) {
1972 local_irq_restore(flags);
1973 printk(" works.\n");
1974 return;
1975 }
1976 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
1977 printk(" failed.\n");
1978
1979 printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
1980
1981 /*timer_ack = 0;*/
1982 init_8259A(0);
1983 make_8259A_irq(0);
1984 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1985
1986 unlock_ExtINT_logic();
1987
1988 local_irq_restore(flags);
1989
1990 if (timer_irq_works()) {
1991 printk(" works.\n");
1992 return;
1993 }
1994 printk(" failed :(.\n");
1995 panic("IO-APIC + timer doesn't work! Boot with apic_verbosity=debug "
1996 "and send a report. Then try booting with the 'noapic' option");
1997 }
1998
1999 /*
2000 *
2001 * IRQ's that are handled by the PIC in the MPS IOAPIC case.
2002 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
2003 * Linux doesn't really care, as it's not actually used
2004 * for any interrupt handling anyway.
2005 */
2006 #define PIC_IRQS (1 << PIC_CASCADE_IR)
2007
2008 static struct IO_APIC_route_entry *ioapic_pm_state;
2009
ioapic_pm_state_alloc(void)2010 static void __init ioapic_pm_state_alloc(void)
2011 {
2012 int i, nr_entry = 0;
2013
2014 for (i = 0; i < nr_ioapics; i++)
2015 nr_entry += nr_ioapic_entries[i];
2016
2017 ioapic_pm_state = _xmalloc(sizeof(struct IO_APIC_route_entry)*nr_entry,
2018 sizeof(struct IO_APIC_route_entry));
2019 BUG_ON(ioapic_pm_state == NULL);
2020 }
2021
setup_IO_APIC(void)2022 void __init setup_IO_APIC(void)
2023 {
2024 enable_IO_APIC();
2025
2026 if (acpi_ioapic)
2027 io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
2028 else
2029 io_apic_irqs = ~PIC_IRQS;
2030
2031 printk("ENABLING IO-APIC IRQs\n");
2032 printk(" -> Using %s ACK method\n", ioapic_ack_new ? "new" : "old");
2033
2034 if (ioapic_ack_new) {
2035 ioapic_level_type.ack = irq_complete_move;
2036 ioapic_level_type.end = end_level_ioapic_irq_new;
2037 }
2038
2039 /*
2040 * Set up IO-APIC IRQ routing.
2041 */
2042 if (!acpi_ioapic)
2043 setup_ioapic_ids_from_mpc();
2044 sync_Arb_IDs();
2045 setup_IO_APIC_irqs();
2046 init_IO_APIC_traps();
2047 check_timer();
2048 print_IO_APIC();
2049 ioapic_pm_state_alloc();
2050
2051 register_keyhandler('z', _print_IO_APIC_keyhandler, "dump IOAPIC info", 1);
2052 }
2053
ioapic_suspend(void)2054 void ioapic_suspend(void)
2055 {
2056 struct IO_APIC_route_entry *entry = ioapic_pm_state;
2057 unsigned long flags;
2058 int apic, i;
2059
2060 spin_lock_irqsave(&ioapic_lock, flags);
2061 for (apic = 0; apic < nr_ioapics; apic++) {
2062 for (i = 0; i < nr_ioapic_entries[apic]; i ++, entry ++ ) {
2063 *(((int *)entry) + 1) = __io_apic_read(apic, 0x11 + 2 * i);
2064 *(((int *)entry) + 0) = __io_apic_read(apic, 0x10 + 2 * i);
2065 }
2066 }
2067 spin_unlock_irqrestore(&ioapic_lock, flags);
2068 }
2069
ioapic_resume(void)2070 void ioapic_resume(void)
2071 {
2072 struct IO_APIC_route_entry *entry = ioapic_pm_state;
2073 unsigned long flags;
2074 union IO_APIC_reg_00 reg_00;
2075 int i, apic;
2076
2077 spin_lock_irqsave(&ioapic_lock, flags);
2078 for (apic = 0; apic < nr_ioapics; apic++){
2079 if (!nr_ioapic_entries[apic])
2080 continue;
2081 reg_00.raw = __io_apic_read(apic, 0);
2082 if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid) {
2083 reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
2084 __io_apic_write(apic, 0, reg_00.raw);
2085 }
2086 for (i = 0; i < nr_ioapic_entries[apic]; i++, entry++) {
2087 __io_apic_write(apic, 0x11+2*i, *(((int *)entry)+1));
2088 __io_apic_write(apic, 0x10+2*i, *(((int *)entry)+0));
2089 }
2090 }
2091 spin_unlock_irqrestore(&ioapic_lock, flags);
2092 }
2093
2094 /* --------------------------------------------------------------------------
2095 ACPI-based IOAPIC Configuration
2096 -------------------------------------------------------------------------- */
2097
2098
io_apic_get_unique_id(int ioapic,int apic_id)2099 int __init io_apic_get_unique_id (int ioapic, int apic_id)
2100 {
2101 union IO_APIC_reg_00 reg_00;
2102 static physid_mask_t __initdata apic_id_map = PHYSID_MASK_NONE;
2103 unsigned long flags;
2104 int i = 0;
2105
2106 /*
2107 * The P4 platform supports up to 256 APIC IDs on two separate APIC
2108 * buses (one for LAPICs, one for IOAPICs), where predecessors only
2109 * supports up to 16 on one shared APIC bus.
2110 *
2111 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
2112 * advantage of new APIC bus architecture.
2113 */
2114
2115 if (physids_empty(apic_id_map))
2116 ioapic_phys_id_map(&apic_id_map);
2117
2118 spin_lock_irqsave(&ioapic_lock, flags);
2119 reg_00.raw = io_apic_read(ioapic, 0);
2120 spin_unlock_irqrestore(&ioapic_lock, flags);
2121
2122 if (apic_id >= get_physical_broadcast()) {
2123 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
2124 "%d\n", ioapic, apic_id, reg_00.bits.ID);
2125 apic_id = reg_00.bits.ID;
2126 }
2127
2128 /*
2129 * Every APIC in a system must have a unique ID or we get lots of nice
2130 * 'stuck on smp_invalidate_needed IPI wait' messages.
2131 */
2132 if (check_apicid_used(&apic_id_map, apic_id)) {
2133
2134 for (i = 0; i < get_physical_broadcast(); i++) {
2135 if (!check_apicid_used(&apic_id_map, i))
2136 break;
2137 }
2138
2139 if (i == get_physical_broadcast())
2140 panic("Max apic_id exceeded");
2141
2142 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
2143 "trying %d\n", ioapic, apic_id, i);
2144
2145 apic_id = i;
2146 }
2147
2148 set_apicid(apic_id, &apic_id_map);
2149
2150 if (reg_00.bits.ID != apic_id) {
2151 reg_00.bits.ID = apic_id;
2152
2153 spin_lock_irqsave(&ioapic_lock, flags);
2154 io_apic_write(ioapic, 0, reg_00.raw);
2155 reg_00.raw = io_apic_read(ioapic, 0);
2156 spin_unlock_irqrestore(&ioapic_lock, flags);
2157
2158 /* Sanity check */
2159 if (reg_00.bits.ID != apic_id) {
2160 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
2161 return -1;
2162 }
2163 }
2164
2165 apic_printk(APIC_VERBOSE, KERN_INFO
2166 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
2167
2168 return apic_id;
2169 }
2170
2171
io_apic_get_version(int ioapic)2172 int __init io_apic_get_version (int ioapic)
2173 {
2174 union IO_APIC_reg_01 reg_01;
2175 unsigned long flags;
2176
2177 spin_lock_irqsave(&ioapic_lock, flags);
2178 reg_01.raw = io_apic_read(ioapic, 1);
2179 spin_unlock_irqrestore(&ioapic_lock, flags);
2180
2181 return reg_01.bits.version;
2182 }
2183
2184
io_apic_get_redir_entries(int ioapic)2185 int __init io_apic_get_redir_entries (int ioapic)
2186 {
2187 union IO_APIC_reg_01 reg_01;
2188 unsigned long flags;
2189
2190 spin_lock_irqsave(&ioapic_lock, flags);
2191 reg_01.raw = io_apic_read(ioapic, 1);
2192 spin_unlock_irqrestore(&ioapic_lock, flags);
2193
2194 return reg_01.bits.entries;
2195 }
2196
2197
io_apic_set_pci_routing(int ioapic,int pin,int irq,int edge_level,int active_high_low)2198 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
2199 {
2200 struct irq_desc *desc = irq_to_desc(irq);
2201 struct IO_APIC_route_entry entry;
2202 cpumask_t mask;
2203 unsigned long flags;
2204 int vector;
2205
2206 if (!IO_APIC_IRQ(irq)) {
2207 printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ %d\n",
2208 ioapic, irq);
2209 return -EINVAL;
2210 }
2211
2212 /*
2213 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
2214 * Note that we mask (disable) IRQs now -- these get enabled when the
2215 * corresponding device driver registers for this IRQ.
2216 */
2217
2218 memset(&entry,0,sizeof(entry));
2219
2220 entry.delivery_mode = INT_DELIVERY_MODE;
2221 entry.dest_mode = INT_DEST_MODE;
2222 entry.trigger = edge_level;
2223 entry.polarity = active_high_low;
2224 entry.mask = 1;
2225
2226 /*
2227 * IRQs < 16 are already in the irq_2_pin[] map
2228 */
2229 if (!platform_legacy_irq(irq))
2230 add_pin_to_irq(irq, ioapic, pin);
2231
2232 vector = assign_irq_vector(irq, NULL);
2233 if (vector < 0)
2234 return vector;
2235 entry.vector = vector;
2236
2237 cpumask_copy(&mask, TARGET_CPUS);
2238 /* Don't chance ending up with an empty mask. */
2239 if (cpumask_intersects(&mask, desc->arch.cpu_mask))
2240 cpumask_and(&mask, &mask, desc->arch.cpu_mask);
2241 SET_DEST(entry, logical, cpu_mask_to_apicid(&mask));
2242
2243 apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
2244 "(%d-%d -> %#x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
2245 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
2246 edge_level, active_high_low);
2247
2248 ioapic_register_intr(irq, edge_level);
2249
2250 if (!ioapic && platform_legacy_irq(irq))
2251 disable_8259A_irq(desc);
2252
2253 spin_lock_irqsave(&ioapic_lock, flags);
2254 __ioapic_write_entry(ioapic, pin, 0, entry);
2255 set_native_irq_info(irq, TARGET_CPUS);
2256 spin_unlock(&ioapic_lock);
2257
2258 spin_lock(&desc->lock);
2259 if (!(desc->status & (IRQ_DISABLED | IRQ_GUEST)))
2260 desc->handler->startup(desc);
2261 spin_unlock_irqrestore(&desc->lock, flags);
2262
2263 return 0;
2264 }
2265
ioapic_physbase_to_id(unsigned long physbase)2266 static int ioapic_physbase_to_id(unsigned long physbase)
2267 {
2268 int apic;
2269 for ( apic = 0; apic < nr_ioapics; apic++ )
2270 {
2271 if ( !nr_ioapic_entries[apic] )
2272 continue;
2273 if ( mp_ioapics[apic].mpc_apicaddr == physbase )
2274 return apic;
2275 }
2276 return -EINVAL;
2277 }
2278
apic_pin_2_gsi_irq(int apic,int pin)2279 static int apic_pin_2_gsi_irq(int apic, int pin)
2280 {
2281 int idx;
2282
2283 if (apic < 0)
2284 return -EINVAL;
2285
2286 idx = find_irq_entry(apic, pin, mp_INT);
2287
2288 return idx >= 0 ? pin_2_irq(idx, apic, pin)
2289 : io_apic_gsi_base(apic) + pin;
2290 }
2291
ioapic_guest_read(unsigned long physbase,unsigned int reg,u32 * pval)2292 int ioapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval)
2293 {
2294 int apic;
2295 unsigned long flags;
2296
2297 if ( (apic = ioapic_physbase_to_id(physbase)) < 0 )
2298 return apic;
2299
2300 spin_lock_irqsave(&ioapic_lock, flags);
2301 *pval = io_apic_read(apic, reg);
2302 spin_unlock_irqrestore(&ioapic_lock, flags);
2303
2304 return 0;
2305 }
2306
2307 #define WARN_BOGUS_WRITE(f, a...) \
2308 dprintk(XENLOG_INFO, "IO-APIC: apic=%d, pin=%d, irq=%d\n" \
2309 XENLOG_INFO "IO-APIC: new_entry=%08x\n" \
2310 XENLOG_INFO "IO-APIC: " f "\n", \
2311 apic, pin, irq, *(u32 *)&rte, ##a )
2312
ioapic_guest_write(unsigned long physbase,unsigned int reg,u32 val)2313 int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
2314 {
2315 int apic, pin, irq, ret, pirq;
2316 struct IO_APIC_route_entry rte = { 0 };
2317 unsigned long flags;
2318 struct irq_desc *desc;
2319
2320 if ( (apic = ioapic_physbase_to_id(physbase)) < 0 )
2321 return apic;
2322
2323 /* Only write to the first half of a route entry. */
2324 if ( (reg < 0x10) || (reg & 1) )
2325 return 0;
2326
2327 pin = (reg - 0x10) >> 1;
2328
2329 /* Write first half from guest; second half is target info. */
2330 *(u32 *)&rte = val;
2331
2332 /*
2333 * What about weird destination types?
2334 * SMI: Ignore? Ought to be set up by the BIOS.
2335 * NMI: Ignore? Watchdog functionality is Xen's concern.
2336 * INIT: Definitely ignore: probably a guest OS bug.
2337 * ExtINT: Ignore? Linux only asserts this at start of day.
2338 * For now, print a message and return an error. We can fix up on demand.
2339 */
2340 if ( rte.delivery_mode > dest_LowestPrio )
2341 {
2342 printk("ERROR: Attempt to write weird IOAPIC destination mode!\n");
2343 printk(" APIC=%d/%d, lo-reg=%x\n", apic, pin, val);
2344 return -EINVAL;
2345 }
2346
2347 /*
2348 * The guest does not know physical APIC arrangement (flat vs. cluster).
2349 * Apply genapic conventions for this platform.
2350 */
2351 rte.delivery_mode = INT_DELIVERY_MODE;
2352 rte.dest_mode = INT_DEST_MODE;
2353
2354 irq = apic_pin_2_gsi_irq(apic, pin);
2355 if ( irq < 0 )
2356 return irq;
2357
2358 desc = irq_to_desc(irq);
2359
2360 /*
2361 * Since PHYSDEVOP_alloc_irq_vector is dummy, rte.vector is the pirq
2362 * which corresponds to this ioapic pin, retrieve it for building
2363 * pirq and irq mapping. Where the GSI is greater than 256, we assume
2364 * that dom0 pirq == irq.
2365 */
2366 if ( !rte.mask )
2367 {
2368 pirq = (irq >= 256) ? irq : rte.vector;
2369 if ( pirq >= hardware_domain->nr_pirqs )
2370 return -EINVAL;
2371 }
2372 else
2373 pirq = -1;
2374
2375 if ( desc->action )
2376 {
2377 spin_lock_irqsave(&ioapic_lock, flags);
2378 ret = io_apic_read(apic, 0x10 + 2 * pin);
2379 spin_unlock_irqrestore(&ioapic_lock, flags);
2380 rte.vector = desc->arch.vector;
2381 if ( *(u32*)&rte != ret )
2382 WARN_BOGUS_WRITE("old_entry=%08x pirq=%d\n" XENLOG_INFO
2383 "IO-APIC: Attempt to modify IO-APIC pin for in-use IRQ!",
2384 ret, pirq);
2385 return 0;
2386 }
2387
2388 if ( desc->arch.vector <= 0 || desc->arch.vector > LAST_DYNAMIC_VECTOR )
2389 {
2390 int vector = desc->arch.vector;
2391
2392 if ( vector < FIRST_HIPRIORITY_VECTOR )
2393 add_pin_to_irq(irq, apic, pin);
2394 else
2395 desc->arch.vector = IRQ_VECTOR_UNASSIGNED;
2396 ret = assign_irq_vector(irq, NULL);
2397 if ( ret < 0 )
2398 {
2399 if ( vector < FIRST_HIPRIORITY_VECTOR )
2400 remove_pin_from_irq(irq, apic, pin);
2401 else
2402 desc->arch.vector = vector;
2403 return ret;
2404 }
2405
2406 printk(XENLOG_INFO "allocated vector %02x for irq %d\n", ret, irq);
2407 }
2408 if ( pirq >= 0 )
2409 {
2410 spin_lock(&hardware_domain->event_lock);
2411 ret = map_domain_pirq(hardware_domain, pirq, irq,
2412 MAP_PIRQ_TYPE_GSI, NULL);
2413 spin_unlock(&hardware_domain->event_lock);
2414 if ( ret < 0 )
2415 return ret;
2416 }
2417
2418 spin_lock_irqsave(&ioapic_lock, flags);
2419 /* Set the correct irq-handling type. */
2420 desc->handler = rte.trigger ?
2421 &ioapic_level_type: &ioapic_edge_type;
2422
2423 /* Mask iff level triggered. */
2424 rte.mask = rte.trigger;
2425 /* Set the vector field to the real vector! */
2426 rte.vector = desc->arch.vector;
2427
2428 SET_DEST(rte, logical, cpu_mask_to_apicid(desc->arch.cpu_mask));
2429
2430 __ioapic_write_entry(apic, pin, 0, rte);
2431
2432 spin_unlock_irqrestore(&ioapic_lock, flags);
2433
2434 return 0;
2435 }
2436
delivery_mode_2_str(const enum ioapic_irq_destination_types mode)2437 static const char * delivery_mode_2_str(
2438 const enum ioapic_irq_destination_types mode)
2439 {
2440 switch ( mode )
2441 {
2442 case dest_Fixed: return "Fixed";
2443 case dest_LowestPrio: return "LoPri";
2444 case dest_SMI: return "SMI";
2445 case dest_NMI: return "NMI";
2446 case dest_INIT: return "INIT";
2447 case dest_ExtINT: return "ExINT";
2448 case dest__reserved_1:
2449 case dest__reserved_2: return "Resvd";
2450 default: return "INVAL";
2451 }
2452 }
2453
dump_ioapic_irq_info(void)2454 void dump_ioapic_irq_info(void)
2455 {
2456 struct irq_pin_list *entry;
2457 struct IO_APIC_route_entry rte;
2458 unsigned int irq, pin, printed = 0;
2459
2460 if ( !irq_2_pin )
2461 return;
2462
2463 for ( irq = 0; irq < nr_irqs_gsi; irq++ )
2464 {
2465 if ( !(irq & 0x1f) )
2466 process_pending_softirqs();
2467
2468 entry = &irq_2_pin[irq];
2469 if ( entry->pin == -1 )
2470 continue;
2471
2472 if ( !printed++ )
2473 printk("IO-APIC interrupt information:\n");
2474
2475 printk(" IRQ%3d Vec%3d:\n", irq, irq_to_vector(irq));
2476
2477 for ( ; ; )
2478 {
2479 pin = entry->pin;
2480
2481 printk(" Apic 0x%02x, Pin %2d: ", entry->apic, pin);
2482
2483 rte = ioapic_read_entry(entry->apic, pin, 0);
2484
2485 printk("vec=%02x delivery=%-5s dest=%c status=%d "
2486 "polarity=%d irr=%d trig=%c mask=%d dest_id:%d\n",
2487 rte.vector, delivery_mode_2_str(rte.delivery_mode),
2488 rte.dest_mode ? 'L' : 'P',
2489 rte.delivery_status, rte.polarity, rte.irr,
2490 rte.trigger ? 'L' : 'E', rte.mask,
2491 rte.dest.logical.logical_dest);
2492
2493 if ( entry->next == 0 )
2494 break;
2495 entry = &irq_2_pin[entry->next];
2496 }
2497 }
2498 }
2499
2500 static unsigned int __initdata max_gsi_irqs;
2501 integer_param("max_gsi_irqs", max_gsi_irqs);
2502
bad_ioapic_register(unsigned int idx)2503 static __init bool bad_ioapic_register(unsigned int idx)
2504 {
2505 union IO_APIC_reg_00 reg_00 = { .raw = io_apic_read(idx, 0) };
2506 union IO_APIC_reg_01 reg_01 = { .raw = io_apic_read(idx, 1) };
2507 union IO_APIC_reg_02 reg_02 = { .raw = io_apic_read(idx, 2) };
2508
2509 if ( reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1 )
2510 {
2511 printk(KERN_WARNING "I/O APIC %#x registers return all ones, skipping!\n",
2512 mp_ioapics[idx].mpc_apicaddr);
2513 return 1;
2514 }
2515
2516 return 0;
2517 }
2518
init_ioapic_mappings(void)2519 void __init init_ioapic_mappings(void)
2520 {
2521 unsigned long ioapic_phys;
2522 unsigned int i, idx = FIX_IO_APIC_BASE_0;
2523 union IO_APIC_reg_01 reg_01;
2524
2525 if ( smp_found_config )
2526 nr_irqs_gsi = 0;
2527 for ( i = 0; i < nr_ioapics; i++ )
2528 {
2529 if ( smp_found_config )
2530 {
2531 ioapic_phys = mp_ioapics[i].mpc_apicaddr;
2532 if ( !ioapic_phys )
2533 {
2534 printk(KERN_ERR "WARNING: bogus zero IO-APIC address "
2535 "found in MPTABLE, disabling IO/APIC support!\n");
2536 smp_found_config = false;
2537 skip_ioapic_setup = true;
2538 goto fake_ioapic_page;
2539 }
2540 }
2541 else
2542 {
2543 fake_ioapic_page:
2544 ioapic_phys = __pa(alloc_xenheap_page());
2545 clear_page(__va(ioapic_phys));
2546 }
2547 set_fixmap_nocache(idx, ioapic_phys);
2548 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08Lx (%08lx)\n",
2549 __fix_to_virt(idx), ioapic_phys);
2550 idx++;
2551
2552 if ( bad_ioapic_register(i) )
2553 {
2554 clear_fixmap(idx);
2555 continue;
2556 }
2557
2558 if ( smp_found_config )
2559 {
2560 /* The number of IO-APIC IRQ registers (== #pins): */
2561 reg_01.raw = io_apic_read(i, 1);
2562 nr_ioapic_entries[i] = reg_01.bits.entries + 1;
2563 nr_irqs_gsi += nr_ioapic_entries[i];
2564
2565 if ( rangeset_add_singleton(mmio_ro_ranges,
2566 ioapic_phys >> PAGE_SHIFT) )
2567 printk(KERN_ERR "Failed to mark IO-APIC page %lx read-only\n",
2568 ioapic_phys);
2569 }
2570 }
2571
2572 nr_irqs_gsi = max(nr_irqs_gsi, highest_gsi() + 1);
2573
2574 if ( max_gsi_irqs == 0 )
2575 max_gsi_irqs = nr_irqs ? nr_irqs / 8 : PAGE_SIZE;
2576 else if ( nr_irqs != 0 && max_gsi_irqs > nr_irqs )
2577 {
2578 printk(XENLOG_WARNING "\"max_gsi_irqs=\" cannot be specified larger"
2579 " than \"nr_irqs=\"\n");
2580 max_gsi_irqs = nr_irqs;
2581 }
2582 if ( max_gsi_irqs < 16 )
2583 max_gsi_irqs = 16;
2584
2585 /* for PHYSDEVOP_pirq_eoi_gmfn guest assumptions */
2586 if ( max_gsi_irqs > PAGE_SIZE * 8 )
2587 max_gsi_irqs = PAGE_SIZE * 8;
2588
2589 if ( !smp_found_config || skip_ioapic_setup || nr_irqs_gsi < 16 )
2590 nr_irqs_gsi = 16;
2591 else if ( nr_irqs_gsi > max_gsi_irqs )
2592 {
2593 printk(XENLOG_WARNING "Limiting to %u GSI IRQs (found %u)\n",
2594 max_gsi_irqs, nr_irqs_gsi);
2595 nr_irqs_gsi = max_gsi_irqs;
2596 }
2597
2598 if ( nr_irqs == 0 )
2599 nr_irqs = cpu_has_apic ?
2600 max(16U + num_present_cpus() * NR_DYNAMIC_VECTORS,
2601 8 * nr_irqs_gsi) :
2602 nr_irqs_gsi;
2603 else if ( nr_irqs < 16 )
2604 nr_irqs = 16;
2605 printk(XENLOG_INFO "IRQ limits: %u GSI, %u MSI/MSI-X\n",
2606 nr_irqs_gsi, nr_irqs - nr_irqs_gsi);
2607 }
2608
arch_hwdom_irqs(domid_t domid)2609 unsigned int arch_hwdom_irqs(domid_t domid)
2610 {
2611 unsigned int n = fls(num_present_cpus());
2612
2613 if ( !domid )
2614 n = min(n, dom0_max_vcpus());
2615 n = min(nr_irqs_gsi + n * NR_DYNAMIC_VECTORS, nr_irqs);
2616
2617 /* Bounded by the domain pirq eoi bitmap gfn. */
2618 n = min_t(unsigned int, n, PAGE_SIZE * BITS_PER_BYTE);
2619
2620 printk("Dom%d has maximum %u PIRQs\n", domid, n);
2621
2622 return n;
2623 }
2624