1 /*
2 * boot.c - Architecture-Specific Low-Level ACPI Boot Support
3 *
4 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; If not, see <http://www.gnu.org/licenses/>.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
24
25 #include <xen/errno.h>
26 #include <xen/init.h>
27 #include <xen/acpi.h>
28 #include <xen/irq.h>
29 #include <xen/mm.h>
30 #include <xen/dmi.h>
31 #include <asm/fixmap.h>
32 #include <asm/page.h>
33 #include <asm/apic.h>
34 #include <asm/io_apic.h>
35 #include <asm/apic.h>
36 #include <asm/io.h>
37 #include <asm/mpspec.h>
38 #include <asm/processor.h>
39 #ifdef CONFIG_HPET_TIMER
40 #include <asm/hpet.h> /* for hpet_address */
41 #endif
42 #include <mach_apic.h>
43 #include <mach_mpparse.h>
44
45 #define PREFIX "ACPI: "
46
47 bool __initdata acpi_noirq; /* skip ACPI IRQ initialization */
48 bool __initdata acpi_ht = true; /* enable HT */
49
50 bool __initdata acpi_lapic;
51 bool __initdata acpi_ioapic;
52
53 /* acpi_skip_timer_override: Skip IRQ0 overrides. */
54 static bool __initdata acpi_skip_timer_override;
55 boolean_param("acpi_skip_timer_override", acpi_skip_timer_override);
56
57 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
58
59 /* --------------------------------------------------------------------------
60 Boot-time Configuration
61 -------------------------------------------------------------------------- */
62
acpi_parse_madt(struct acpi_table_header * table)63 static int __init acpi_parse_madt(struct acpi_table_header *table)
64 {
65 struct acpi_table_madt *madt;
66
67 madt = (struct acpi_table_madt *)table;
68
69 if (madt->address) {
70 acpi_lapic_addr = (u64) madt->address;
71
72 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
73 madt->address);
74 }
75
76 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
77
78 return 0;
79 }
80
81 static int __init
acpi_parse_x2apic(struct acpi_subtable_header * header,const unsigned long end)82 acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
83 {
84 struct acpi_madt_local_x2apic *processor =
85 container_of(header, struct acpi_madt_local_x2apic, header);
86 bool enabled = false;
87
88 if (BAD_MADT_ENTRY(processor, end))
89 return -EINVAL;
90
91 acpi_table_print_madt_entry(header);
92
93 /* Record local apic id only when enabled and fitting. */
94 if (processor->local_apic_id >= MAX_APICS ||
95 processor->uid >= MAX_MADT_ENTRIES) {
96 printk("%sAPIC ID %#x and/or ACPI ID %#x beyond limit"
97 " - processor ignored\n",
98 processor->lapic_flags & ACPI_MADT_ENABLED ?
99 KERN_WARNING "WARNING: " : KERN_INFO,
100 processor->local_apic_id, processor->uid);
101 /*
102 * Must not return an error here, to prevent
103 * acpi_table_parse_entries() from terminating early.
104 */
105 return 0 /* -ENOSPC */;
106 }
107 if (processor->lapic_flags & ACPI_MADT_ENABLED) {
108 x86_acpiid_to_apicid[processor->uid] =
109 processor->local_apic_id;
110 enabled = true;
111 }
112
113 /*
114 * We need to register disabled CPU as well to permit
115 * counting disabled CPUs. This allows us to size
116 * cpus_possible_map more accurately, to permit
117 * to not preallocating memory for all NR_CPUS
118 * when we use CPU hotplug.
119 */
120 mp_register_lapic(processor->local_apic_id, enabled, 0);
121
122 return 0;
123 }
124
125 static int __init
acpi_parse_lapic(struct acpi_subtable_header * header,const unsigned long end)126 acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
127 {
128 struct acpi_madt_local_apic *processor =
129 container_of(header, struct acpi_madt_local_apic, header);
130 bool enabled = false;
131
132 if (BAD_MADT_ENTRY(processor, end))
133 return -EINVAL;
134
135 acpi_table_print_madt_entry(header);
136
137 /* Record local apic id only when enabled */
138 if (processor->lapic_flags & ACPI_MADT_ENABLED) {
139 x86_acpiid_to_apicid[processor->processor_id] = processor->id;
140 enabled = true;
141 }
142
143 /*
144 * We need to register disabled CPU as well to permit
145 * counting disabled CPUs. This allows us to size
146 * cpus_possible_map more accurately, to permit
147 * to not preallocating memory for all NR_CPUS
148 * when we use CPU hotplug.
149 */
150 mp_register_lapic(processor->id, enabled, 0);
151
152 return 0;
153 }
154
155 static int __init
acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,const unsigned long end)156 acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
157 const unsigned long end)
158 {
159 struct acpi_madt_local_apic_override *lapic_addr_ovr =
160 container_of(header, struct acpi_madt_local_apic_override,
161 header);
162
163 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
164 return -EINVAL;
165
166 acpi_lapic_addr = lapic_addr_ovr->address;
167
168 return 0;
169 }
170
171 static int __init
acpi_parse_x2apic_nmi(struct acpi_subtable_header * header,const unsigned long end)172 acpi_parse_x2apic_nmi(struct acpi_subtable_header *header,
173 const unsigned long end)
174 {
175 struct acpi_madt_local_x2apic_nmi *x2apic_nmi =
176 container_of(header, struct acpi_madt_local_x2apic_nmi,
177 header);
178
179 if (BAD_MADT_ENTRY(x2apic_nmi, end))
180 return -EINVAL;
181
182 acpi_table_print_madt_entry(header);
183
184 if (x2apic_nmi->lint != 1)
185 printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
186
187 return 0;
188 }
189
190 static int __init
acpi_parse_lapic_nmi(struct acpi_subtable_header * header,const unsigned long end)191 acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
192 {
193 struct acpi_madt_local_apic_nmi *lapic_nmi =
194 container_of(header, struct acpi_madt_local_apic_nmi, header);
195
196 if (BAD_MADT_ENTRY(lapic_nmi, end))
197 return -EINVAL;
198
199 acpi_table_print_madt_entry(header);
200
201 if (lapic_nmi->lint != 1)
202 printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
203
204 return 0;
205 }
206
207 static int __init
acpi_parse_ioapic(struct acpi_subtable_header * header,const unsigned long end)208 acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
209 {
210 struct acpi_madt_io_apic *ioapic =
211 container_of(header, struct acpi_madt_io_apic, header);
212
213 if (BAD_MADT_ENTRY(ioapic, end))
214 return -EINVAL;
215
216 acpi_table_print_madt_entry(header);
217
218 mp_register_ioapic(ioapic->id,
219 ioapic->address, ioapic->global_irq_base);
220
221 return 0;
222 }
223
224 static int __init
acpi_parse_int_src_ovr(struct acpi_subtable_header * header,const unsigned long end)225 acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
226 const unsigned long end)
227 {
228 struct acpi_madt_interrupt_override *intsrc =
229 container_of(header, struct acpi_madt_interrupt_override,
230 header);
231
232 if (BAD_MADT_ENTRY(intsrc, end))
233 return -EINVAL;
234
235 acpi_table_print_madt_entry(header);
236
237 if (acpi_skip_timer_override &&
238 intsrc->source_irq == 0 && intsrc->global_irq == 2) {
239 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
240 return 0;
241 }
242
243 mp_override_legacy_irq(intsrc->source_irq,
244 ACPI_MADT_GET_POLARITY(intsrc->inti_flags),
245 ACPI_MADT_GET_TRIGGER(intsrc->inti_flags),
246 intsrc->global_irq);
247
248 return 0;
249 }
250
251 static int __init
acpi_parse_nmi_src(struct acpi_subtable_header * header,const unsigned long end)252 acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
253 {
254 struct acpi_madt_nmi_source *nmi_src =
255 container_of(header, struct acpi_madt_nmi_source, header);
256
257 if (BAD_MADT_ENTRY(nmi_src, end))
258 return -EINVAL;
259
260 acpi_table_print_madt_entry(header);
261
262 /* TBD: Support nimsrc entries? */
263
264 return 0;
265 }
266
267 #ifdef CONFIG_HPET_TIMER
268
acpi_parse_hpet(struct acpi_table_header * table)269 static int __init acpi_parse_hpet(struct acpi_table_header *table)
270 {
271 struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
272
273 if (hpet_tbl->address.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
274 printk(KERN_WARNING PREFIX "HPET timers must be located in "
275 "memory.\n");
276 return -1;
277 }
278
279 /*
280 * Some BIOSes provide multiple HPET tables. Sometimes this is a BIOS
281 * bug; the intended way of supporting more than 1 HPET is to use AML
282 * entries.
283 *
284 * If someone finds a real system with two genuine HPET tables, perhaps
285 * they will be kind and implement support. Until then however, warn
286 * that we will ignore subsequent tables.
287 */
288 if (hpet_address)
289 {
290 printk(KERN_WARNING PREFIX
291 "Found multiple HPET tables. Only using first\n");
292 return -1;
293 }
294
295 hpet_address = hpet_tbl->address.address;
296 hpet_blockid = hpet_tbl->sequence;
297 hpet_flags = hpet_tbl->flags;
298 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
299 hpet_tbl->id, hpet_address);
300
301 return 0;
302 }
303 #else
304 #define acpi_parse_hpet NULL
305 #endif
306
acpi_invalidate_bgrt(struct acpi_table_header * table)307 static int __init acpi_invalidate_bgrt(struct acpi_table_header *table)
308 {
309 struct acpi_table_bgrt *bgrt_tbl =
310 container_of(table, struct acpi_table_bgrt, header);
311
312 if (table->length < sizeof(*bgrt_tbl))
313 return -1;
314
315 if (bgrt_tbl->version == 1 && bgrt_tbl->image_address
316 && !page_is_ram_type(PFN_DOWN(bgrt_tbl->image_address),
317 RAM_TYPE_CONVENTIONAL))
318 return 0;
319
320 printk(KERN_INFO PREFIX "BGRT: invalidating v%d image at %#"PRIx64"\n",
321 bgrt_tbl->version, bgrt_tbl->image_address);
322 bgrt_tbl->image_address = 0;
323 bgrt_tbl->status &= ~1;
324
325 return 0;
326 }
327
328 #ifdef CONFIG_ACPI_SLEEP
329 #define acpi_fadt_copy_address(dst, src, len) do { \
330 if (fadt->header.revision >= FADT2_REVISION_ID && \
331 fadt->header.length >= ACPI_FADT_V2_SIZE) \
332 acpi_sinfo.dst##_blk = fadt->x##src##_block; \
333 if (!acpi_sinfo.dst##_blk.address) { \
334 acpi_sinfo.dst##_blk.address = fadt->src##_block; \
335 acpi_sinfo.dst##_blk.space_id = ACPI_ADR_SPACE_SYSTEM_IO; \
336 acpi_sinfo.dst##_blk.bit_width = fadt->len##_length << 3; \
337 acpi_sinfo.dst##_blk.bit_offset = 0; \
338 acpi_sinfo.dst##_blk.access_width = fadt->len##_length; \
339 } \
340 } while (0)
341
342 /* Get pm1x_cnt and pm1x_evt information for ACPI sleep */
343 static void __init
acpi_fadt_parse_sleep_info(struct acpi_table_fadt * fadt)344 acpi_fadt_parse_sleep_info(struct acpi_table_fadt *fadt)
345 {
346 struct acpi_table_facs *facs = NULL;
347 uint64_t facs_pa;
348
349 if (fadt->header.revision >= 5 &&
350 fadt->header.length >= ACPI_FADT_V5_SIZE) {
351 acpi_sinfo.sleep_control = fadt->sleep_control;
352 acpi_sinfo.sleep_status = fadt->sleep_status;
353
354 printk(KERN_INFO PREFIX
355 "v5 SLEEP INFO: control[%d:%"PRIx64"],"
356 " status[%d:%"PRIx64"]\n",
357 acpi_sinfo.sleep_control.space_id,
358 acpi_sinfo.sleep_control.address,
359 acpi_sinfo.sleep_status.space_id,
360 acpi_sinfo.sleep_status.address);
361
362 if ((fadt->sleep_control.address &&
363 (fadt->sleep_control.bit_offset ||
364 fadt->sleep_control.bit_width !=
365 fadt->sleep_control.access_width * 8)) ||
366 (fadt->sleep_status.address &&
367 (fadt->sleep_status.bit_offset ||
368 fadt->sleep_status.bit_width !=
369 fadt->sleep_status.access_width * 8))) {
370 printk(KERN_WARNING PREFIX
371 "Invalid sleep control/status register data:"
372 " %#x:%#x:%#x %#x:%#x:%#x\n",
373 fadt->sleep_control.bit_offset,
374 fadt->sleep_control.bit_width,
375 fadt->sleep_control.access_width,
376 fadt->sleep_status.bit_offset,
377 fadt->sleep_status.bit_width,
378 fadt->sleep_status.access_width);
379 fadt->sleep_control.address = 0;
380 fadt->sleep_status.address = 0;
381 }
382 }
383
384 if (fadt->flags & ACPI_FADT_HW_REDUCED)
385 goto bad;
386
387 acpi_fadt_copy_address(pm1a_cnt, pm1a_control, pm1_control);
388 acpi_fadt_copy_address(pm1b_cnt, pm1b_control, pm1_control);
389 acpi_fadt_copy_address(pm1a_evt, pm1a_event, pm1_event);
390 acpi_fadt_copy_address(pm1b_evt, pm1b_event, pm1_event);
391
392 printk(KERN_INFO PREFIX
393 "SLEEP INFO: pm1x_cnt[%d:%"PRIx64",%d:%"PRIx64"], "
394 "pm1x_evt[%d:%"PRIx64",%d:%"PRIx64"]\n",
395 acpi_sinfo.pm1a_cnt_blk.space_id,
396 acpi_sinfo.pm1a_cnt_blk.address,
397 acpi_sinfo.pm1b_cnt_blk.space_id,
398 acpi_sinfo.pm1b_cnt_blk.address,
399 acpi_sinfo.pm1a_evt_blk.space_id,
400 acpi_sinfo.pm1a_evt_blk.address,
401 acpi_sinfo.pm1b_evt_blk.space_id,
402 acpi_sinfo.pm1b_evt_blk.address);
403
404 /* Now FACS... */
405 facs_pa = ((fadt->header.revision >= FADT2_REVISION_ID)
406 ? fadt->Xfacs : (uint64_t)fadt->facs);
407 if (fadt->facs && ((uint64_t)fadt->facs != facs_pa)) {
408 printk(KERN_WARNING PREFIX
409 "32/64X FACS address mismatch in FADT - "
410 "%08x/%016"PRIx64", using 32\n",
411 fadt->facs, facs_pa);
412 facs_pa = (uint64_t)fadt->facs;
413 }
414 if (!facs_pa)
415 goto bad;
416
417 facs = (struct acpi_table_facs *)
418 __acpi_map_table(facs_pa, sizeof(struct acpi_table_facs));
419 if (!facs)
420 goto bad;
421
422 if (strncmp(facs->signature, "FACS", 4)) {
423 printk(KERN_ERR PREFIX "Invalid FACS signature %.4s\n",
424 facs->signature);
425 goto bad;
426 }
427
428 if (facs->length < 24) {
429 printk(KERN_ERR PREFIX "Invalid FACS table length: %#x",
430 facs->length);
431 goto bad;
432 }
433
434 if (facs->length < 64)
435 printk(KERN_WARNING PREFIX
436 "FACS is shorter than ACPI spec allow: %#x",
437 facs->length);
438
439 acpi_sinfo.wakeup_vector = facs_pa +
440 offsetof(struct acpi_table_facs, firmware_waking_vector);
441 acpi_sinfo.vector_width = 32;
442
443 printk(KERN_INFO PREFIX
444 " wakeup_vec[%"PRIx64"], vec_size[%x]\n",
445 acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width);
446 return;
447 bad:
448 memset(&acpi_sinfo, 0,
449 offsetof(struct acpi_sleep_info, sleep_control));
450 memset(&acpi_sinfo.sleep_status + 1, 0,
451 (long)(&acpi_sinfo + 1) - (long)(&acpi_sinfo.sleep_status + 1));
452 }
453 #endif
454
acpi_parse_fadt(struct acpi_table_header * table)455 static int __init acpi_parse_fadt(struct acpi_table_header *table)
456 {
457 struct acpi_table_fadt *fadt = (struct acpi_table_fadt *)table;
458
459 #ifdef CONFIG_ACPI_INTERPRETER
460 /* initialize sci_int early for INT_SRC_OVR MADT parsing */
461 acpi_fadt.sci_int = fadt->sci_int;
462
463 /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
464 acpi_fadt.revision = fadt->revision;
465 acpi_fadt.force_apic_physical_destination_mode =
466 fadt->force_apic_physical_destination_mode;
467 #endif
468
469 #ifdef CONFIG_X86_PM_TIMER
470 /* detect the location of the ACPI PM Timer */
471 if (fadt->header.revision >= FADT2_REVISION_ID) {
472 /* FADT rev. 2 */
473 if (fadt->xpm_timer_block.space_id ==
474 ACPI_ADR_SPACE_SYSTEM_IO) {
475 pmtmr_ioport = fadt->xpm_timer_block.address;
476 pmtmr_width = fadt->xpm_timer_block.bit_width;
477 }
478 }
479 /*
480 * "X" fields are optional extensions to the original V1.0
481 * fields, so we must selectively expand V1.0 fields if the
482 * corresponding X field is zero.
483 */
484 if (!pmtmr_ioport) {
485 pmtmr_ioport = fadt->pm_timer_block;
486 pmtmr_width = fadt->pm_timer_length == 4 ? 24 : 0;
487 }
488 if (pmtmr_ioport)
489 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x (%u bits)\n",
490 pmtmr_ioport, pmtmr_width);
491 #endif
492
493 acpi_smi_cmd = fadt->smi_command;
494 acpi_enable_value = fadt->acpi_enable;
495 acpi_disable_value = fadt->acpi_disable;
496
497 #ifdef CONFIG_ACPI_SLEEP
498 acpi_fadt_parse_sleep_info(fadt);
499 #endif
500
501 return 0;
502 }
503
504 /*
505 * Parse LAPIC entries in MADT
506 * returns 0 on success, < 0 on error
507 */
acpi_parse_madt_lapic_entries(void)508 static int __init acpi_parse_madt_lapic_entries(void)
509 {
510 int count, x2count;
511
512 if (!cpu_has_apic)
513 return -ENODEV;
514
515 /*
516 * Note that the LAPIC address is obtained from the MADT (32-bit value)
517 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
518 */
519
520 count =
521 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
522 acpi_parse_lapic_addr_ovr, 0);
523 if (count < 0) {
524 printk(KERN_ERR PREFIX
525 "Error parsing LAPIC address override entry\n");
526 return count;
527 }
528
529 mp_register_lapic_address(acpi_lapic_addr);
530
531 BUILD_BUG_ON(MAX_APICS != MAX_LOCAL_APIC);
532 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
533 acpi_parse_lapic, MAX_APICS);
534 x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
535 acpi_parse_x2apic, MAX_APICS);
536 if (!count && !x2count) {
537 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
538 /* TBD: Cleanup to allow fallback to MPS */
539 return -ENODEV;
540 } else if (count < 0 || x2count < 0) {
541 printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
542 /* TBD: Cleanup to allow fallback to MPS */
543 return count < 0 ? count : x2count;
544 }
545
546 count =
547 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
548 acpi_parse_lapic_nmi, 0);
549 x2count =
550 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI,
551 acpi_parse_x2apic_nmi, 0);
552 if (count < 0 || x2count < 0) {
553 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
554 /* TBD: Cleanup to allow fallback to MPS */
555 return count < 0 ? count : x2count;
556 }
557 return 0;
558 }
559
560 /*
561 * Parse IOAPIC related entries in MADT
562 * returns 0 on success, < 0 on error
563 */
acpi_parse_madt_ioapic_entries(void)564 static int __init acpi_parse_madt_ioapic_entries(void)
565 {
566 int count;
567
568 /*
569 * ACPI interpreter is required to complete interrupt setup,
570 * so if it is off, don't enumerate the io-apics with ACPI.
571 * If MPS is present, it will handle them,
572 * otherwise the system will stay in PIC mode
573 */
574 if (acpi_disabled || acpi_noirq) {
575 return -ENODEV;
576 }
577
578 if (!cpu_has_apic)
579 return -ENODEV;
580
581 /*
582 * if "noapic" boot option, don't look for IO-APICs
583 */
584 if (skip_ioapic_setup) {
585 printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
586 "due to 'noapic' option.\n");
587 return -ENODEV;
588 }
589
590 count =
591 acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
592 MAX_IO_APICS);
593 if (!count) {
594 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
595 return -ENODEV;
596 } else if (count < 0) {
597 printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
598 return count;
599 }
600
601 count =
602 acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
603 acpi_parse_int_src_ovr, MAX_IRQ_SOURCES);
604 if (count < 0) {
605 printk(KERN_ERR PREFIX
606 "Error parsing interrupt source overrides entry\n");
607 /* TBD: Cleanup to allow fallback to MPS */
608 return count;
609 }
610
611 /* Fill in identity legacy mapings where no override */
612 mp_config_acpi_legacy_irqs();
613
614 count =
615 acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
616 acpi_parse_nmi_src, MAX_IRQ_SOURCES);
617 if (count < 0) {
618 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
619 /* TBD: Cleanup to allow fallback to MPS */
620 return count;
621 }
622
623 return 0;
624 }
625
acpi_process_madt(void)626 static void __init acpi_process_madt(void)
627 {
628 int error;
629
630 if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
631
632 /*
633 * Parse MADT LAPIC entries
634 */
635 error = acpi_parse_madt_lapic_entries();
636 if (!error) {
637 acpi_lapic = true;
638 generic_bigsmp_probe();
639
640 /*
641 * Parse MADT IO-APIC entries
642 */
643 error = acpi_parse_madt_ioapic_entries();
644 if (!error) {
645 acpi_ioapic = true;
646
647 smp_found_config = true;
648 clustered_apic_check();
649 }
650 }
651 if (error == -EINVAL) {
652 /*
653 * Dell Precision Workstation 410, 610 come here.
654 */
655 printk(KERN_ERR PREFIX
656 "Invalid BIOS MADT, disabling ACPI\n");
657 disable_acpi();
658 }
659 }
660 }
661
662 /*
663 * acpi_boot_table_init() and acpi_boot_init()
664 * called from setup_arch(), always.
665 * 1. checksums all tables
666 * 2. enumerates lapics
667 * 3. enumerates io-apics
668 *
669 * acpi_table_init() is separate to allow reading SRAT without
670 * other side effects.
671 *
672 * side effects of acpi_boot_init:
673 * acpi_lapic = true if LAPIC found
674 * acpi_ioapic = true if IOAPIC found
675 * if (acpi_lapic && acpi_ioapic) smp_found_config = true;
676 * ...
677 *
678 * return value: (currently ignored)
679 * 0: success
680 * !0: failure
681 */
682
acpi_boot_table_init(void)683 int __init acpi_boot_table_init(void)
684 {
685 int error;
686
687 /*
688 * If acpi_disabled, bail out
689 * One exception: acpi=ht continues far enough to enumerate LAPICs
690 */
691 if (acpi_disabled && !acpi_ht)
692 return 1;
693
694 /*
695 * Initialize the ACPI boot-time table parser.
696 */
697 error = acpi_table_init();
698 if (error) {
699 disable_acpi();
700 return error;
701 }
702
703 return 0;
704 }
705
acpi_boot_init(void)706 int __init acpi_boot_init(void)
707 {
708 /*
709 * If acpi_disabled, bail out
710 * One exception: acpi=ht continues far enough to enumerate LAPICs
711 */
712 if (acpi_disabled && !acpi_ht)
713 return 1;
714
715 /*
716 * set sci_int and PM timer address
717 */
718 acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
719
720 /*
721 * Process the Multiple APIC Description Table (MADT), if present
722 */
723 acpi_process_madt();
724
725 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
726
727 acpi_dmar_init();
728
729 erst_init();
730
731 acpi_hest_init();
732
733 acpi_table_parse(ACPI_SIG_BGRT, acpi_invalidate_bgrt);
734
735 return 0;
736 }
737