1 /*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
14 */
15
16 #include <xen/types.h>
17 #include <xen/irq.h>
18 #include <xen/init.h>
19 #include <xen/acpi.h>
20 #include <xen/delay.h>
21 #include <xen/efi.h>
22 #include <xen/sched.h>
23
24 #include <xen/bitops.h>
25 #include <asm/smp.h>
26 #include <asm/acpi.h>
27 #include <asm/mtrr.h>
28 #include <asm/mpspec.h>
29 #include <asm/io_apic.h>
30 #include <asm/setup.h>
31
32 #include <mach_apic.h>
33 #include <mach_mpparse.h>
34 #include <bios_ebda.h>
35
36 /* Have we found an MP table */
37 bool __initdata smp_found_config;
38
39 /*
40 * Various Linux-internal data structures created from the
41 * MP-table.
42 */
43 unsigned char __read_mostly apic_version[MAX_APICS];
44 unsigned char __read_mostly mp_bus_id_to_type[MAX_MP_BUSSES];
45
46 /* I/O APIC entries */
47 struct mpc_config_ioapic __read_mostly mp_ioapics[MAX_IO_APICS];
48
49 /* # of MP IRQ source entries */
50 struct mpc_config_intsrc __read_mostly mp_irqs[MAX_IRQ_SOURCES];
51
52 /* MP IRQ source entries */
53 int __read_mostly mp_irq_entries;
54
55 bool __read_mostly pic_mode;
56 bool __read_mostly def_to_bigsmp;
57 unsigned long __read_mostly mp_lapic_addr;
58
59 /* Processor that is doing the boot up */
60 unsigned int __read_mostly boot_cpu_physical_apicid = BAD_APICID;
61
62 /* Internal processor count */
63 static unsigned int num_processors;
64 static unsigned int __initdata disabled_cpus;
65
66 /* Bitmask of physically existing CPUs */
67 physid_mask_t phys_cpu_present_map;
68
set_nr_cpu_ids(unsigned int max_cpus)69 void __init set_nr_cpu_ids(unsigned int max_cpus)
70 {
71 if (!max_cpus)
72 max_cpus = num_processors + disabled_cpus;
73 if (max_cpus > NR_CPUS)
74 max_cpus = NR_CPUS;
75 else if (!max_cpus)
76 max_cpus = 1;
77 printk(XENLOG_INFO "SMP: Allowing %u CPUs (%d hotplug CPUs)\n",
78 max_cpus, max_t(int, max_cpus - num_processors, 0));
79 nr_cpu_ids = max_cpus;
80
81 #ifndef nr_cpumask_bits
82 nr_cpumask_bits = (max_cpus + (BITS_PER_LONG - 1)) &
83 ~(BITS_PER_LONG - 1);
84 printk(XENLOG_DEBUG "NR_CPUS:%u nr_cpumask_bits:%u\n",
85 NR_CPUS, nr_cpumask_bits);
86 #endif
87 }
88
set_nr_sockets(void)89 void __init set_nr_sockets(void)
90 {
91 nr_sockets = last_physid(phys_cpu_present_map)
92 / boot_cpu_data.x86_max_cores
93 / boot_cpu_data.x86_num_siblings + 1;
94 if (disabled_cpus)
95 nr_sockets += (disabled_cpus - 1)
96 / boot_cpu_data.x86_max_cores
97 / boot_cpu_data.x86_num_siblings + 1;
98 printk(XENLOG_DEBUG "nr_sockets: %u\n", nr_sockets);
99 }
100
101 /*
102 * Intel MP BIOS table parsing routines:
103 */
104
105
106 /*
107 * Checksum an MP configuration block.
108 */
109
mpf_checksum(unsigned char * mp,int len)110 static int __init mpf_checksum(unsigned char *mp, int len)
111 {
112 int sum = 0;
113
114 while (len--)
115 sum += *mp++;
116
117 return sum & 0xFF;
118 }
119
120 /* Return xen's logical cpu_id of the new added cpu or <0 if error */
MP_processor_info_x(struct mpc_config_processor * m,u32 apicid,bool hotplug)121 static int MP_processor_info_x(struct mpc_config_processor *m,
122 u32 apicid, bool hotplug)
123 {
124 int ver, cpu = 0;
125
126 if (!(m->mpc_cpuflag & CPU_ENABLED)) {
127 if (!hotplug)
128 ++disabled_cpus;
129 return -EINVAL;
130 }
131
132 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
133 Dprintk(" Bootup CPU\n");
134 boot_cpu_physical_apicid = apicid;
135 }
136
137 ver = m->mpc_apicver;
138
139 /*
140 * Validate version
141 */
142 if (ver == 0x0) {
143 printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
144 "fixing up to 0x10. (tell your hw vendor)\n",
145 apicid);
146 ver = 0x10;
147 }
148 apic_version[apicid] = ver;
149
150 set_apicid(apicid, &phys_cpu_present_map);
151
152 if (num_processors >= nr_cpu_ids) {
153 printk(KERN_WARNING "WARNING: NR_CPUS limit of %u reached."
154 " Processor ignored.\n", nr_cpu_ids);
155 return -ENOSPC;
156 }
157
158 if (num_processors >= 8 && hotplug && genapic == &apic_default) {
159 printk(KERN_WARNING "WARNING: CPUs limit of 8 reached."
160 " Processor ignored.\n");
161 return -ENOSPC;
162 }
163
164 /* Boot cpu has been marked present in smp_prepare_boot_cpu */
165 if (!(m->mpc_cpuflag & CPU_BOOTPROCESSOR)) {
166 cpu = alloc_cpu_id();
167 if (cpu < 0) {
168 printk(KERN_WARNING "WARNING: Can't alloc cpu_id."
169 " Processor with apicid %i ignored\n", apicid);
170 return cpu;
171 }
172 x86_cpu_to_apicid[cpu] = apicid;
173 cpumask_set_cpu(cpu, &cpu_present_map);
174 }
175
176 if (++num_processors > 8) {
177 /*
178 * No need for processor or APIC checks: physical delivery
179 * (bigsmp) mode should always work.
180 */
181 def_to_bigsmp = true;
182 }
183
184 return cpu;
185 }
186
MP_processor_info(struct mpc_config_processor * m)187 static int MP_processor_info(struct mpc_config_processor *m)
188 {
189 return MP_processor_info_x(m, m->mpc_apicid, 0);
190 }
191
MP_bus_info(struct mpc_config_bus * m)192 static void __init MP_bus_info (struct mpc_config_bus *m)
193 {
194 char str[7];
195
196 memcpy(str, m->mpc_bustype, 6);
197 str[6] = 0;
198
199 #if 0 /* size of mpc_busid (8 bits) makes this check unnecessary */
200 if (m->mpc_busid >= MAX_MP_BUSSES) {
201 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
202 " is too large, max. supported is %d\n",
203 m->mpc_busid, str, MAX_MP_BUSSES - 1);
204 return;
205 }
206 #endif
207
208 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
209 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
210 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
211 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
212 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
213 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
214 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
215 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
216 } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
217 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
218 } else {
219 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
220 }
221 }
222
MP_ioapic_info(struct mpc_config_ioapic * m)223 static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
224 {
225 if (!(m->mpc_flags & MPC_APIC_USABLE))
226 return;
227
228 printk(KERN_INFO "I/O APIC #%d Version %d at %#x.\n",
229 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
230 if (nr_ioapics >= MAX_IO_APICS) {
231 printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
232 MAX_IO_APICS, nr_ioapics);
233 panic("Recompile kernel with bigger MAX_IO_APICS");
234 }
235 if (!m->mpc_apicaddr) {
236 printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
237 " found in MP table, skipping!\n");
238 return;
239 }
240 mp_ioapics[nr_ioapics] = *m;
241 nr_ioapics++;
242 }
243
MP_intsrc_info(struct mpc_config_intsrc * m)244 static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
245 {
246 mp_irqs [mp_irq_entries] = *m;
247 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
248 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
249 m->mpc_irqtype, m->mpc_irqflag & 3,
250 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
251 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
252 if (++mp_irq_entries == MAX_IRQ_SOURCES)
253 panic("Max # of irq sources exceeded");
254 }
255
MP_lintsrc_info(struct mpc_config_lintsrc * m)256 static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
257 {
258 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
259 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
260 m->mpc_irqtype, m->mpc_irqflag & 3,
261 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
262 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
263 /*
264 * Well it seems all SMP boards in existence
265 * use ExtINT/LVT1 == LINT0 and
266 * NMI/LVT2 == LINT1 - the following check
267 * will show us if this assumptions is false.
268 * Until then we do not have to add baggage.
269 */
270 if ((m->mpc_irqtype == mp_ExtINT) &&
271 (m->mpc_destapiclint != 0))
272 BUG();
273 if ((m->mpc_irqtype == mp_NMI) &&
274 (m->mpc_destapiclint != 1))
275 BUG();
276 }
277
278 /*
279 * Read/parse the MPC
280 */
281
smp_read_mpc(struct mp_config_table * mpc)282 static int __init smp_read_mpc(struct mp_config_table *mpc)
283 {
284 char str[16];
285 char oem[10];
286 int count=sizeof(*mpc);
287 unsigned char *mpt=((unsigned char *)mpc)+count;
288
289 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
290 printk(KERN_ERR "SMP mptable: bad signature [%#x]!\n",
291 *(u32 *)mpc->mpc_signature);
292 return 0;
293 }
294 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
295 printk(KERN_ERR "SMP mptable: checksum error!\n");
296 return 0;
297 }
298 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
299 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
300 mpc->mpc_spec);
301 return 0;
302 }
303 if (!mpc->mpc_lapic) {
304 printk(KERN_ERR "SMP mptable: null local APIC address!\n");
305 return 0;
306 }
307 memcpy(oem,mpc->mpc_oem,8);
308 oem[8]=0;
309 printk(KERN_INFO "OEM ID: %s ",oem);
310
311 memcpy(str,mpc->mpc_productid,12);
312 str[12]=0;
313 printk("Product ID: %s ",str);
314
315 mps_oem_check(mpc, oem, str);
316
317 printk("APIC at: %#x\n", mpc->mpc_lapic);
318
319 /*
320 * Save the local APIC address (it might be non-default) -- but only
321 * if we're not using ACPI.
322 */
323 if (!acpi_lapic)
324 mp_lapic_addr = mpc->mpc_lapic;
325
326 /*
327 * Now process the configuration blocks.
328 */
329 while (count < mpc->mpc_length) {
330 switch(*mpt) {
331 case MP_PROCESSOR:
332 {
333 struct mpc_config_processor *m=
334 (struct mpc_config_processor *)mpt;
335
336 mpt += sizeof(*m);
337 count += sizeof(*m);
338
339 /* ACPI may have already provided this data. */
340 if (acpi_lapic)
341 break;
342
343 printk("Processor #%02x %u:%u APIC version %u%s\n",
344 m->mpc_apicid,
345 MASK_EXTR(m->mpc_cpufeature,
346 CPU_FAMILY_MASK),
347 MASK_EXTR(m->mpc_cpufeature,
348 CPU_MODEL_MASK),
349 m->mpc_apicver,
350 m->mpc_cpuflag & CPU_ENABLED
351 ? "" : " [disabled]");
352 MP_processor_info(m);
353 break;
354 }
355 case MP_BUS:
356 {
357 struct mpc_config_bus *m=
358 (struct mpc_config_bus *)mpt;
359 MP_bus_info(m);
360 mpt += sizeof(*m);
361 count += sizeof(*m);
362 break;
363 }
364 case MP_IOAPIC:
365 {
366 struct mpc_config_ioapic *m=
367 (struct mpc_config_ioapic *)mpt;
368 MP_ioapic_info(m);
369 mpt+=sizeof(*m);
370 count+=sizeof(*m);
371 break;
372 }
373 case MP_INTSRC:
374 {
375 struct mpc_config_intsrc *m=
376 (struct mpc_config_intsrc *)mpt;
377
378 MP_intsrc_info(m);
379 mpt+=sizeof(*m);
380 count+=sizeof(*m);
381 break;
382 }
383 case MP_LINTSRC:
384 {
385 struct mpc_config_lintsrc *m=
386 (struct mpc_config_lintsrc *)mpt;
387 MP_lintsrc_info(m);
388 mpt+=sizeof(*m);
389 count+=sizeof(*m);
390 break;
391 }
392 default:
393 {
394 count = mpc->mpc_length;
395 break;
396 }
397 }
398 }
399 clustered_apic_check();
400 if (!num_processors)
401 printk(KERN_ERR "SMP mptable: no processors registered!\n");
402 return num_processors;
403 }
404
ELCR_trigger(unsigned int irq)405 static int __init ELCR_trigger(unsigned int irq)
406 {
407 unsigned int port;
408
409 port = 0x4d0 + (irq >> 3);
410 return (inb(port) >> (irq & 7)) & 1;
411 }
412
construct_default_ioirq_mptable(int mpc_default_type)413 static void __init construct_default_ioirq_mptable(int mpc_default_type)
414 {
415 struct mpc_config_intsrc intsrc;
416 int i;
417 int ELCR_fallback = 0;
418
419 intsrc.mpc_type = MP_INTSRC;
420 intsrc.mpc_irqflag = 0; /* conforming */
421 intsrc.mpc_srcbus = 0;
422 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
423
424 intsrc.mpc_irqtype = mp_INT;
425
426 /*
427 * If true, we have an ISA/PCI system with no IRQ entries
428 * in the MP table. To prevent the PCI interrupts from being set up
429 * incorrectly, we try to use the ELCR. The sanity check to see if
430 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
431 * never be level sensitive, so we simply see if the ELCR agrees.
432 * If it does, we assume it's valid.
433 */
434 if (mpc_default_type == 5) {
435 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
436
437 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
438 printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
439 else {
440 printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
441 ELCR_fallback = 1;
442 }
443 }
444
445 for (i = 0; platform_legacy_irq(i); i++) {
446 switch (mpc_default_type) {
447 case 2:
448 if (i == 0 || i == 13)
449 continue; /* IRQ0 & IRQ13 not connected */
450 /* fall through */
451 default:
452 if (i == 2)
453 continue; /* IRQ2 is never connected */
454 }
455
456 if (ELCR_fallback) {
457 /*
458 * If the ELCR indicates a level-sensitive interrupt, we
459 * copy that information over to the MP table in the
460 * irqflag field (level sensitive, active high polarity).
461 */
462 if (ELCR_trigger(i))
463 intsrc.mpc_irqflag = 13;
464 else
465 intsrc.mpc_irqflag = 0;
466 }
467
468 intsrc.mpc_srcbusirq = i;
469 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
470 MP_intsrc_info(&intsrc);
471 }
472
473 intsrc.mpc_irqtype = mp_ExtINT;
474 intsrc.mpc_srcbusirq = 0;
475 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
476 MP_intsrc_info(&intsrc);
477 }
478
construct_default_ISA_mptable(int mpc_default_type)479 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
480 {
481 struct mpc_config_processor processor;
482 struct mpc_config_bus bus;
483 struct mpc_config_ioapic ioapic;
484 struct mpc_config_lintsrc lintsrc;
485 int linttypes[2] = { mp_ExtINT, mp_NMI };
486 int i;
487
488 /*
489 * local APIC has default address
490 */
491 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
492
493 /*
494 * 2 CPUs, numbered 0 & 1.
495 */
496 processor.mpc_type = MP_PROCESSOR;
497 /* Either an integrated APIC or a discrete 82489DX. */
498 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
499 processor.mpc_cpuflag = CPU_ENABLED;
500 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
501 (boot_cpu_data.x86_model << 4) |
502 boot_cpu_data.x86_mask;
503 processor.mpc_featureflag =
504 boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_FPU)];
505 processor.mpc_reserved[0] = 0;
506 processor.mpc_reserved[1] = 0;
507 for (i = 0; i < 2; i++) {
508 processor.mpc_apicid = i;
509 MP_processor_info(&processor);
510 }
511
512 bus.mpc_type = MP_BUS;
513 bus.mpc_busid = 0;
514 switch (mpc_default_type) {
515 default:
516 printk("???\n");
517 printk(KERN_ERR "Unknown standard configuration %d\n",
518 mpc_default_type);
519 /* fall through */
520 case 1:
521 case 5:
522 memcpy(bus.mpc_bustype, "ISA ", 6);
523 break;
524 case 2:
525 case 6:
526 case 3:
527 memcpy(bus.mpc_bustype, "EISA ", 6);
528 break;
529 case 4:
530 case 7:
531 memcpy(bus.mpc_bustype, "MCA ", 6);
532 }
533 MP_bus_info(&bus);
534 if (mpc_default_type > 4) {
535 bus.mpc_busid = 1;
536 memcpy(bus.mpc_bustype, "PCI ", 6);
537 MP_bus_info(&bus);
538 }
539
540 ioapic.mpc_type = MP_IOAPIC;
541 ioapic.mpc_apicid = 2;
542 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
543 ioapic.mpc_flags = MPC_APIC_USABLE;
544 ioapic.mpc_apicaddr = 0xFEC00000;
545 MP_ioapic_info(&ioapic);
546
547 /*
548 * We set up most of the low 16 IO-APIC pins according to MPS rules.
549 */
550 construct_default_ioirq_mptable(mpc_default_type);
551
552 lintsrc.mpc_type = MP_LINTSRC;
553 lintsrc.mpc_irqflag = 0; /* conforming */
554 lintsrc.mpc_srcbusid = 0;
555 lintsrc.mpc_srcbusirq = 0;
556 lintsrc.mpc_destapic = MP_APIC_ALL;
557 for (i = 0; i < 2; i++) {
558 lintsrc.mpc_irqtype = linttypes[i];
559 lintsrc.mpc_destapiclint = i;
560 MP_lintsrc_info(&lintsrc);
561 }
562 }
563
efi_unmap_mpf(void)564 static __init void efi_unmap_mpf(void)
565 {
566 if (efi_enabled(EFI_BOOT))
567 clear_fixmap(FIX_EFI_MPF);
568 }
569
570 static struct intel_mp_floating *__initdata mpf_found;
571
572 /*
573 * Scan the memory blocks for an SMP configuration block.
574 */
get_smp_config(void)575 void __init get_smp_config (void)
576 {
577 struct intel_mp_floating *mpf = mpf_found;
578
579 /*
580 * ACPI supports both logical (e.g. Hyper-Threading) and physical
581 * processors, where MPS only supports physical.
582 */
583 if (acpi_lapic && acpi_ioapic) {
584 efi_unmap_mpf();
585 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
586 return;
587 }
588 else if (acpi_lapic)
589 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
590
591 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
592 if (mpf->mpf_feature2 & (1<<7)) {
593 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
594 pic_mode = true;
595 } else {
596 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
597 pic_mode = false;
598 }
599
600 /*
601 * Now see if we need to read further.
602 */
603 if (mpf->mpf_feature1 != 0) {
604
605 printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
606 construct_default_ISA_mptable(mpf->mpf_feature1);
607
608 } else if (mpf->mpf_physptr) {
609
610 /*
611 * Read the physical hardware table. Anything here will
612 * override the defaults.
613 */
614 if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) {
615 efi_unmap_mpf();
616 smp_found_config = false;
617 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
618 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
619 return;
620 }
621 /*
622 * If there are no explicit MP IRQ entries, then we are
623 * broken. We set up most of the low 16 IO-APIC pins to
624 * ISA defaults and hope it will work.
625 */
626 if (!mp_irq_entries) {
627 struct mpc_config_bus bus;
628
629 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
630
631 bus.mpc_type = MP_BUS;
632 bus.mpc_busid = 0;
633 memcpy(bus.mpc_bustype, "ISA ", 6);
634 MP_bus_info(&bus);
635
636 construct_default_ioirq_mptable(0);
637 }
638
639 } else
640 BUG();
641
642 efi_unmap_mpf();
643
644 printk(KERN_INFO "Processors: %d\n", num_processors);
645 /*
646 * Only use the first configuration found.
647 */
648 }
649
smp_scan_config(unsigned long base,unsigned long length)650 static int __init smp_scan_config (unsigned long base, unsigned long length)
651 {
652 unsigned int *bp = maddr_to_virt(base);
653 struct intel_mp_floating *mpf;
654
655 Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
656 if (sizeof(*mpf) != 16)
657 printk("Error: MPF size\n");
658
659 while (length > 0) {
660 mpf = (struct intel_mp_floating *)bp;
661 if ((*bp == SMP_MAGIC_IDENT) &&
662 (mpf->mpf_length == 1) &&
663 !mpf_checksum((unsigned char *)bp, 16) &&
664 ((mpf->mpf_specification == 1)
665 || (mpf->mpf_specification == 4)) ) {
666
667 smp_found_config = true;
668 printk(KERN_INFO "found SMP MP-table at %08lx\n",
669 virt_to_maddr(mpf));
670 #if 0
671 reserve_bootmem(virt_to_maddr(mpf), PAGE_SIZE);
672 if (mpf->mpf_physptr) {
673 /*
674 * We cannot access to MPC table to compute
675 * table size yet, as only few megabytes from
676 * the bottom is mapped now.
677 * PC-9800's MPC table places on the very last
678 * of physical memory; so that simply reserving
679 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
680 * in reserve_bootmem.
681 */
682 unsigned long size = PAGE_SIZE;
683 unsigned long end = max_low_pfn * PAGE_SIZE;
684 if (mpf->mpf_physptr + size > end)
685 size = end - mpf->mpf_physptr;
686 reserve_bootmem(mpf->mpf_physptr, size);
687 }
688 #endif
689 mpf_found = mpf;
690 return 1;
691 }
692 bp += 4;
693 length -= 16;
694 }
695 return 0;
696 }
697
efi_check_config(void)698 static void __init efi_check_config(void)
699 {
700 struct intel_mp_floating *mpf;
701
702 if (efi.mps == EFI_INVALID_TABLE_ADDR)
703 return;
704
705 __set_fixmap(FIX_EFI_MPF, PFN_DOWN(efi.mps), __PAGE_HYPERVISOR);
706 mpf = fix_to_virt(FIX_EFI_MPF) + ((long)efi.mps & (PAGE_SIZE-1));
707
708 if (memcmp(mpf->mpf_signature, "_MP_", 4) == 0 &&
709 mpf->mpf_length == 1 &&
710 mpf_checksum((void *)mpf, 16) &&
711 (mpf->mpf_specification == 1 || mpf->mpf_specification == 4)) {
712 smp_found_config = true;
713 printk(KERN_INFO "SMP MP-table at %08lx\n", efi.mps);
714 mpf_found = mpf;
715 }
716 else
717 efi_unmap_mpf();
718 }
719
find_smp_config(void)720 void __init find_smp_config (void)
721 {
722 unsigned int address;
723
724 if (efi_enabled(EFI_BOOT)) {
725 efi_check_config();
726 return;
727 }
728
729 /*
730 * FIXME: Linux assumes you have 640K of base ram..
731 * this continues the error...
732 *
733 * 1) Scan the bottom 1K for a signature
734 * 2) Scan the top 1K of base RAM
735 * 3) Scan the 64K of bios
736 */
737 if (smp_scan_config(0x0,0x400) ||
738 smp_scan_config(639*0x400,0x400) ||
739 smp_scan_config(0xF0000,0x10000))
740 return;
741 /*
742 * If it is an SMP machine we should know now, unless the
743 * configuration is in an EISA/MCA bus machine with an
744 * extended bios data area.
745 *
746 * there is a real-mode segmented pointer pointing to the
747 * 4K EBDA area at 0x40E, calculate and scan it here.
748 *
749 * NOTE! There are Linux loaders that will corrupt the EBDA
750 * area, and as such this kind of SMP config may be less
751 * trustworthy, simply because the SMP table may have been
752 * stomped on during early boot. These loaders are buggy and
753 * should be fixed.
754 *
755 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
756 */
757
758 address = get_bios_ebda();
759 if (address)
760 smp_scan_config(address, 0x400);
761 }
762
763 /* --------------------------------------------------------------------------
764 ACPI-based MP Configuration
765 -------------------------------------------------------------------------- */
766
767 #ifdef CONFIG_ACPI
768
mp_register_lapic_address(u64 address)769 void __init mp_register_lapic_address (
770 u64 address)
771 {
772 if (!x2apic_enabled) {
773 mp_lapic_addr = (unsigned long) address;
774 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
775 }
776
777 if (boot_cpu_physical_apicid == -1U)
778 boot_cpu_physical_apicid = get_apic_id();
779
780 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
781 }
782
783
mp_register_lapic(u32 id,bool enabled,bool hotplug)784 int mp_register_lapic(u32 id, bool enabled, bool hotplug)
785 {
786 struct mpc_config_processor processor = {
787 .mpc_type = MP_PROCESSOR,
788 /* Note: We don't fill in fields not consumed anywhere. */
789 .mpc_apicid = id,
790 .mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)),
791 .mpc_cpuflag = (enabled ? CPU_ENABLED : 0) |
792 (id == boot_cpu_physical_apicid ?
793 CPU_BOOTPROCESSOR : 0),
794 };
795
796 if (MAX_APICS <= id) {
797 printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
798 id, MAX_APICS);
799 return -EINVAL;
800 }
801
802 return MP_processor_info_x(&processor, id, hotplug);
803 }
804
mp_unregister_lapic(uint32_t apic_id,uint32_t cpu)805 void mp_unregister_lapic(uint32_t apic_id, uint32_t cpu)
806 {
807 if (!cpu || (apic_id == boot_cpu_physical_apicid))
808 return;
809
810 if (x86_cpu_to_apicid[cpu] != apic_id)
811 return;
812
813 physid_clear(apic_id, phys_cpu_present_map);
814
815 x86_cpu_to_apicid[cpu] = BAD_APICID;
816 cpumask_clear_cpu(cpu, &cpu_present_map);
817 }
818
819 #define MP_ISA_BUS 0
820 #define MP_MAX_IOAPIC_PIN 127
821
822 static struct mp_ioapic_routing {
823 int gsi_base;
824 int gsi_end;
825 unsigned long pin_programmed[BITS_TO_LONGS(MP_MAX_IOAPIC_PIN + 1)];
826 } mp_ioapic_routing[MAX_IO_APICS];
827
828
mp_find_ioapic(int gsi)829 static int mp_find_ioapic (
830 int gsi)
831 {
832 unsigned int i;
833
834 /* Find the IOAPIC that manages this GSI. */
835 for (i = 0; i < nr_ioapics; i++) {
836 if ((gsi >= mp_ioapic_routing[i].gsi_base)
837 && (gsi <= mp_ioapic_routing[i].gsi_end))
838 return i;
839 }
840
841 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
842
843 return -1;
844 }
845
846
mp_register_ioapic(u8 id,u32 address,u32 gsi_base)847 void __init mp_register_ioapic (
848 u8 id,
849 u32 address,
850 u32 gsi_base)
851 {
852 int idx = 0;
853 int tmpid;
854
855 if (nr_ioapics >= MAX_IO_APICS) {
856 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
857 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
858 panic("Recompile kernel with bigger MAX_IO_APICS");
859 }
860 if (!address) {
861 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
862 " found in MADT table, skipping!\n");
863 return;
864 }
865
866 idx = nr_ioapics++;
867
868 mp_ioapics[idx].mpc_type = MP_IOAPIC;
869 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
870 mp_ioapics[idx].mpc_apicaddr = address;
871
872 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
873 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
874 && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
875 tmpid = io_apic_get_unique_id(idx, id);
876 else
877 tmpid = id;
878 if (tmpid == -1) {
879 nr_ioapics--;
880 return;
881 }
882 mp_ioapics[idx].mpc_apicid = tmpid;
883 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
884
885 /*
886 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
887 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
888 */
889 mp_ioapic_routing[idx].gsi_base = gsi_base;
890 mp_ioapic_routing[idx].gsi_end = gsi_base +
891 io_apic_get_redir_entries(idx);
892
893 printk("IOAPIC[%d]: apic_id %d, version %d, address %#x, "
894 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
895 mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
896 mp_ioapic_routing[idx].gsi_base,
897 mp_ioapic_routing[idx].gsi_end);
898
899 return;
900 }
901
highest_gsi(void)902 unsigned __init highest_gsi(void)
903 {
904 unsigned x, res = 0;
905 for (x = 0; x < nr_ioapics; x++)
906 if (res < mp_ioapic_routing[x].gsi_end)
907 res = mp_ioapic_routing[x].gsi_end;
908 return res;
909 }
910
io_apic_gsi_base(unsigned int apic)911 unsigned int io_apic_gsi_base(unsigned int apic)
912 {
913 return mp_ioapic_routing[apic].gsi_base;
914 }
915
mp_override_legacy_irq(u8 bus_irq,u8 polarity,u8 trigger,u32 gsi)916 void __init mp_override_legacy_irq (
917 u8 bus_irq,
918 u8 polarity,
919 u8 trigger,
920 u32 gsi)
921 {
922 struct mpc_config_intsrc intsrc;
923 int ioapic = -1;
924 int pin = -1;
925
926 /*
927 * Convert 'gsi' to 'ioapic.pin'.
928 */
929 ioapic = mp_find_ioapic(gsi);
930 if (ioapic < 0)
931 return;
932 pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
933
934 /*
935 * TBD: This check is for faulty timer entries, where the override
936 * erroneously sets the trigger to level, resulting in a HUGE
937 * increase of timer interrupts!
938 */
939 if ((bus_irq == 0) && (trigger == 3))
940 trigger = 1;
941
942 intsrc.mpc_type = MP_INTSRC;
943 intsrc.mpc_irqtype = mp_INT;
944 intsrc.mpc_irqflag = (trigger << 2) | polarity;
945 intsrc.mpc_srcbus = MP_ISA_BUS;
946 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
947 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
948 intsrc.mpc_dstirq = pin; /* INTIN# */
949
950 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
951 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
952 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
953 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
954
955 mp_irqs[mp_irq_entries] = intsrc;
956 if (++mp_irq_entries == MAX_IRQ_SOURCES)
957 panic("Max # of irq sources exceeded");
958
959 return;
960 }
961
mp_config_acpi_legacy_irqs(void)962 void __init mp_config_acpi_legacy_irqs (void)
963 {
964 struct mpc_config_intsrc intsrc;
965 int i = 0;
966 int ioapic = -1;
967
968 /*
969 * Fabricate the legacy ISA bus (bus #31).
970 */
971 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
972 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
973
974 /*
975 * Locate the IOAPIC that manages the ISA IRQs (0-15).
976 */
977 ioapic = mp_find_ioapic(0);
978 if (ioapic < 0)
979 return;
980
981 intsrc.mpc_type = MP_INTSRC;
982 intsrc.mpc_irqflag = 0; /* Conforming */
983 intsrc.mpc_srcbus = MP_ISA_BUS;
984 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
985
986 /*
987 * Use the default configuration for the IRQs 0-15. Unless
988 * overriden by (MADT) interrupt source override entries.
989 */
990 for (i = 0; platform_legacy_irq(i); i++) {
991 int idx;
992
993 for (idx = 0; idx < mp_irq_entries; idx++) {
994 struct mpc_config_intsrc *irq = mp_irqs + idx;
995
996 /* Do we already have a mapping for this ISA IRQ? */
997 if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
998 break;
999
1000 /* Do we already have a mapping for this IOAPIC pin */
1001 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
1002 (irq->mpc_dstirq == i))
1003 break;
1004 }
1005
1006 if (idx != mp_irq_entries) {
1007 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
1008 continue; /* IRQ already used */
1009 }
1010
1011 intsrc.mpc_irqtype = mp_INT;
1012 intsrc.mpc_srcbusirq = i; /* Identity mapped */
1013 intsrc.mpc_dstirq = i;
1014
1015 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
1016 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1017 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1018 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
1019 intsrc.mpc_dstirq);
1020
1021 mp_irqs[mp_irq_entries] = intsrc;
1022 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1023 panic("Max # of irq sources exceeded");
1024 }
1025 }
1026
mp_register_gsi(u32 gsi,int triggering,int polarity)1027 int mp_register_gsi (u32 gsi, int triggering, int polarity)
1028 {
1029 int ioapic;
1030 int ioapic_pin;
1031 struct irq_desc * desc;
1032 unsigned long flags;
1033
1034 /*
1035 * Mapping between Global System Interrups, which
1036 * represent all possible interrupts, and IRQs
1037 * assigned to actual devices.
1038 */
1039
1040 #ifdef CONFIG_ACPI_BUS
1041 /* Don't set up the ACPI SCI because it's already set up */
1042 if (acpi_fadt.sci_int == gsi)
1043 return gsi;
1044 #endif
1045
1046 if (!nr_ioapics) {
1047 unsigned int port = 0x4d0 + (gsi >> 3);
1048 u8 val;
1049
1050 if (!platform_legacy_irq(gsi))
1051 return -EINVAL;
1052 val = inb(port);
1053 if (triggering)
1054 val |= 1 << (gsi & 7);
1055 else
1056 val &= ~(1 << (gsi & 7));
1057 outb(val, port);
1058 return 0;
1059 }
1060
1061 ioapic = mp_find_ioapic(gsi);
1062 if (ioapic < 0) {
1063 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
1064 return -EINVAL;
1065 }
1066
1067 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
1068
1069 desc = irq_to_desc(gsi);
1070 spin_lock_irqsave(&desc->lock, flags);
1071 if (!(desc->status & IRQ_DISABLED) && desc->handler != &no_irq_type) {
1072 spin_unlock_irqrestore(&desc->lock, flags);
1073 return -EEXIST;
1074 }
1075 spin_unlock_irqrestore(&desc->lock, flags);
1076
1077 /*
1078 * Avoid pin reprogramming. PRTs typically include entries
1079 * with redundant pin->gsi mappings (but unique PCI devices);
1080 * we only program the IOAPIC on the first.
1081 */
1082 if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
1083 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1084 "%d-%d\n", mp_ioapics[ioapic].mpc_apicid,
1085 ioapic_pin);
1086 return -EINVAL;
1087 }
1088 if (test_and_set_bit(ioapic_pin,
1089 mp_ioapic_routing[ioapic].pin_programmed)) {
1090 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1091 mp_ioapics[ioapic].mpc_apicid, ioapic_pin);
1092 return -EEXIST;
1093 }
1094
1095 return io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1096 triggering, polarity);
1097 }
1098
1099 #endif /* CONFIG_ACPI */
1100