1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Intel Multiprocessor Specification 1.1 and 1.4
4 * compliant MP-table parsing routines.
5 *
6 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/bitops.h>
18 #include <linux/acpi.h>
19 #include <linux/smp.h>
20 #include <linux/pci.h>
21
22 #include <asm/io_apic.h>
23 #include <asm/acpi.h>
24 #include <asm/irqdomain.h>
25 #include <asm/mtrr.h>
26 #include <asm/mpspec.h>
27 #include <asm/proto.h>
28 #include <asm/bios_ebda.h>
29 #include <asm/e820/api.h>
30 #include <asm/setup.h>
31 #include <asm/smp.h>
32
33 #include <asm/apic.h>
34 /*
35 * Checksum an MP configuration block.
36 */
37
mpf_checksum(unsigned char * mp,int len)38 static int __init mpf_checksum(unsigned char *mp, int len)
39 {
40 int sum = 0;
41
42 while (len--)
43 sum += *mp++;
44
45 return sum & 0xFF;
46 }
47
MP_processor_info(struct mpc_cpu * m)48 static void __init MP_processor_info(struct mpc_cpu *m)
49 {
50 int apicid;
51 char *bootup_cpu = "";
52
53 if (!(m->cpuflag & CPU_ENABLED)) {
54 disabled_cpus++;
55 return;
56 }
57
58 apicid = m->apicid;
59
60 if (m->cpuflag & CPU_BOOTPROCESSOR) {
61 bootup_cpu = " (Bootup-CPU)";
62 boot_cpu_physical_apicid = m->apicid;
63 }
64
65 pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
66 generic_processor_info(apicid, m->apicver);
67 }
68
69 #ifdef CONFIG_X86_IO_APIC
mpc_oem_bus_info(struct mpc_bus * m,char * str)70 static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str)
71 {
72 memcpy(str, m->bustype, 6);
73 str[6] = 0;
74 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
75 }
76
MP_bus_info(struct mpc_bus * m)77 static void __init MP_bus_info(struct mpc_bus *m)
78 {
79 char str[7];
80
81 mpc_oem_bus_info(m, str);
82
83 #if MAX_MP_BUSSES < 256
84 if (m->busid >= MAX_MP_BUSSES) {
85 pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
86 m->busid, str, MAX_MP_BUSSES - 1);
87 return;
88 }
89 #endif
90
91 set_bit(m->busid, mp_bus_not_pci);
92 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
93 #ifdef CONFIG_EISA
94 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
95 #endif
96 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
97 clear_bit(m->busid, mp_bus_not_pci);
98 #ifdef CONFIG_EISA
99 mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
100 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
101 mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
102 #endif
103 } else
104 pr_warn("Unknown bustype %s - ignoring\n", str);
105 }
106
MP_ioapic_info(struct mpc_ioapic * m)107 static void __init MP_ioapic_info(struct mpc_ioapic *m)
108 {
109 struct ioapic_domain_cfg cfg = {
110 .type = IOAPIC_DOMAIN_LEGACY,
111 .ops = &mp_ioapic_irqdomain_ops,
112 };
113
114 if (m->flags & MPC_APIC_USABLE)
115 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
116 }
117
print_mp_irq_info(struct mpc_intsrc * mp_irq)118 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
119 {
120 apic_printk(APIC_VERBOSE,
121 "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
122 mp_irq->irqtype, mp_irq->irqflag & 3,
123 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
124 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
125 }
126
127 #else /* CONFIG_X86_IO_APIC */
MP_bus_info(struct mpc_bus * m)128 static inline void __init MP_bus_info(struct mpc_bus *m) {}
MP_ioapic_info(struct mpc_ioapic * m)129 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
130 #endif /* CONFIG_X86_IO_APIC */
131
MP_lintsrc_info(struct mpc_lintsrc * m)132 static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
133 {
134 apic_printk(APIC_VERBOSE,
135 "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
136 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
137 m->srcbusirq, m->destapic, m->destapiclint);
138 }
139
140 /*
141 * Read/parse the MPC
142 */
smp_check_mpc(struct mpc_table * mpc,char * oem,char * str)143 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
144 {
145
146 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
147 pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
148 mpc->signature[0], mpc->signature[1],
149 mpc->signature[2], mpc->signature[3]);
150 return 0;
151 }
152 if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
153 pr_err("MPTABLE: checksum error!\n");
154 return 0;
155 }
156 if (mpc->spec != 0x01 && mpc->spec != 0x04) {
157 pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
158 return 0;
159 }
160 if (!mpc->lapic) {
161 pr_err("MPTABLE: null local APIC address!\n");
162 return 0;
163 }
164 memcpy(oem, mpc->oem, 8);
165 oem[8] = 0;
166 pr_info("MPTABLE: OEM ID: %s\n", oem);
167
168 memcpy(str, mpc->productid, 12);
169 str[12] = 0;
170
171 pr_info("MPTABLE: Product ID: %s\n", str);
172
173 pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
174
175 return 1;
176 }
177
skip_entry(unsigned char ** ptr,int * count,int size)178 static void skip_entry(unsigned char **ptr, int *count, int size)
179 {
180 *ptr += size;
181 *count += size;
182 }
183
smp_dump_mptable(struct mpc_table * mpc,unsigned char * mpt)184 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
185 {
186 pr_err("Your mptable is wrong, contact your HW vendor!\n");
187 pr_cont("type %x\n", *mpt);
188 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
189 1, mpc, mpc->length, 1);
190 }
191
smp_read_mpc(struct mpc_table * mpc,unsigned early)192 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
193 {
194 char str[16];
195 char oem[10];
196
197 int count = sizeof(*mpc);
198 unsigned char *mpt = ((unsigned char *)mpc) + count;
199
200 if (!smp_check_mpc(mpc, oem, str))
201 return 0;
202
203 /* Initialize the lapic mapping */
204 if (!acpi_lapic)
205 register_lapic_address(mpc->lapic);
206
207 if (early)
208 return 1;
209
210 /* Now process the configuration blocks. */
211 while (count < mpc->length) {
212 switch (*mpt) {
213 case MP_PROCESSOR:
214 /* ACPI may have already provided this data */
215 if (!acpi_lapic)
216 MP_processor_info((struct mpc_cpu *)mpt);
217 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
218 break;
219 case MP_BUS:
220 MP_bus_info((struct mpc_bus *)mpt);
221 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
222 break;
223 case MP_IOAPIC:
224 MP_ioapic_info((struct mpc_ioapic *)mpt);
225 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
226 break;
227 case MP_INTSRC:
228 mp_save_irq((struct mpc_intsrc *)mpt);
229 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
230 break;
231 case MP_LINTSRC:
232 MP_lintsrc_info((struct mpc_lintsrc *)mpt);
233 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
234 break;
235 default:
236 /* wrong mptable */
237 smp_dump_mptable(mpc, mpt);
238 count = mpc->length;
239 break;
240 }
241 }
242
243 if (!num_processors)
244 pr_err("MPTABLE: no processors registered!\n");
245 return num_processors;
246 }
247
248 #ifdef CONFIG_X86_IO_APIC
249
ELCR_trigger(unsigned int irq)250 static int __init ELCR_trigger(unsigned int irq)
251 {
252 unsigned int port;
253
254 port = 0x4d0 + (irq >> 3);
255 return (inb(port) >> (irq & 7)) & 1;
256 }
257
construct_default_ioirq_mptable(int mpc_default_type)258 static void __init construct_default_ioirq_mptable(int mpc_default_type)
259 {
260 struct mpc_intsrc intsrc;
261 int i;
262 int ELCR_fallback = 0;
263
264 intsrc.type = MP_INTSRC;
265 intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
266 intsrc.srcbus = 0;
267 intsrc.dstapic = mpc_ioapic_id(0);
268
269 intsrc.irqtype = mp_INT;
270
271 /*
272 * If true, we have an ISA/PCI system with no IRQ entries
273 * in the MP table. To prevent the PCI interrupts from being set up
274 * incorrectly, we try to use the ELCR. The sanity check to see if
275 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
276 * never be level sensitive, so we simply see if the ELCR agrees.
277 * If it does, we assume it's valid.
278 */
279 if (mpc_default_type == 5) {
280 pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
281
282 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
283 ELCR_trigger(13))
284 pr_err("ELCR contains invalid data... not using ELCR\n");
285 else {
286 pr_info("Using ELCR to identify PCI interrupts\n");
287 ELCR_fallback = 1;
288 }
289 }
290
291 for (i = 0; i < 16; i++) {
292 switch (mpc_default_type) {
293 case 2:
294 if (i == 0 || i == 13)
295 continue; /* IRQ0 & IRQ13 not connected */
296 fallthrough;
297 default:
298 if (i == 2)
299 continue; /* IRQ2 is never connected */
300 }
301
302 if (ELCR_fallback) {
303 /*
304 * If the ELCR indicates a level-sensitive interrupt, we
305 * copy that information over to the MP table in the
306 * irqflag field (level sensitive, active high polarity).
307 */
308 if (ELCR_trigger(i)) {
309 intsrc.irqflag = MP_IRQTRIG_LEVEL |
310 MP_IRQPOL_ACTIVE_HIGH;
311 } else {
312 intsrc.irqflag = MP_IRQTRIG_DEFAULT |
313 MP_IRQPOL_DEFAULT;
314 }
315 }
316
317 intsrc.srcbusirq = i;
318 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
319 mp_save_irq(&intsrc);
320 }
321
322 intsrc.irqtype = mp_ExtINT;
323 intsrc.srcbusirq = 0;
324 intsrc.dstirq = 0; /* 8259A to INTIN0 */
325 mp_save_irq(&intsrc);
326 }
327
328
construct_ioapic_table(int mpc_default_type)329 static void __init construct_ioapic_table(int mpc_default_type)
330 {
331 struct mpc_ioapic ioapic;
332 struct mpc_bus bus;
333
334 bus.type = MP_BUS;
335 bus.busid = 0;
336 switch (mpc_default_type) {
337 default:
338 pr_err("???\nUnknown standard configuration %d\n",
339 mpc_default_type);
340 fallthrough;
341 case 1:
342 case 5:
343 memcpy(bus.bustype, "ISA ", 6);
344 break;
345 case 2:
346 case 6:
347 case 3:
348 memcpy(bus.bustype, "EISA ", 6);
349 break;
350 }
351 MP_bus_info(&bus);
352 if (mpc_default_type > 4) {
353 bus.busid = 1;
354 memcpy(bus.bustype, "PCI ", 6);
355 MP_bus_info(&bus);
356 }
357
358 ioapic.type = MP_IOAPIC;
359 ioapic.apicid = 2;
360 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
361 ioapic.flags = MPC_APIC_USABLE;
362 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
363 MP_ioapic_info(&ioapic);
364
365 /*
366 * We set up most of the low 16 IO-APIC pins according to MPS rules.
367 */
368 construct_default_ioirq_mptable(mpc_default_type);
369 }
370 #else
construct_ioapic_table(int mpc_default_type)371 static inline void __init construct_ioapic_table(int mpc_default_type) { }
372 #endif
373
construct_default_ISA_mptable(int mpc_default_type)374 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
375 {
376 struct mpc_cpu processor;
377 struct mpc_lintsrc lintsrc;
378 int linttypes[2] = { mp_ExtINT, mp_NMI };
379 int i;
380
381 /*
382 * local APIC has default address
383 */
384 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
385
386 /*
387 * 2 CPUs, numbered 0 & 1.
388 */
389 processor.type = MP_PROCESSOR;
390 /* Either an integrated APIC or a discrete 82489DX. */
391 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
392 processor.cpuflag = CPU_ENABLED;
393 processor.cpufeature = (boot_cpu_data.x86 << 8) |
394 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
395 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
396 processor.reserved[0] = 0;
397 processor.reserved[1] = 0;
398 for (i = 0; i < 2; i++) {
399 processor.apicid = i;
400 MP_processor_info(&processor);
401 }
402
403 construct_ioapic_table(mpc_default_type);
404
405 lintsrc.type = MP_LINTSRC;
406 lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
407 lintsrc.srcbusid = 0;
408 lintsrc.srcbusirq = 0;
409 lintsrc.destapic = MP_APIC_ALL;
410 for (i = 0; i < 2; i++) {
411 lintsrc.irqtype = linttypes[i];
412 lintsrc.destapiclint = i;
413 MP_lintsrc_info(&lintsrc);
414 }
415 }
416
417 static unsigned long mpf_base;
418 static bool mpf_found;
419
get_mpc_size(unsigned long physptr)420 static unsigned long __init get_mpc_size(unsigned long physptr)
421 {
422 struct mpc_table *mpc;
423 unsigned long size;
424
425 mpc = early_memremap(physptr, PAGE_SIZE);
426 size = mpc->length;
427 early_memunmap(mpc, PAGE_SIZE);
428 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
429
430 return size;
431 }
432
check_physptr(struct mpf_intel * mpf,unsigned int early)433 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
434 {
435 struct mpc_table *mpc;
436 unsigned long size;
437
438 size = get_mpc_size(mpf->physptr);
439 mpc = early_memremap(mpf->physptr, size);
440
441 /*
442 * Read the physical hardware table. Anything here will
443 * override the defaults.
444 */
445 if (!smp_read_mpc(mpc, early)) {
446 #ifdef CONFIG_X86_LOCAL_APIC
447 smp_found_config = 0;
448 #endif
449 pr_err("BIOS bug, MP table errors detected!...\n");
450 pr_cont("... disabling SMP support. (tell your hw vendor)\n");
451 early_memunmap(mpc, size);
452 return -1;
453 }
454 early_memunmap(mpc, size);
455
456 if (early)
457 return -1;
458
459 #ifdef CONFIG_X86_IO_APIC
460 /*
461 * If there are no explicit MP IRQ entries, then we are
462 * broken. We set up most of the low 16 IO-APIC pins to
463 * ISA defaults and hope it will work.
464 */
465 if (!mp_irq_entries) {
466 struct mpc_bus bus;
467
468 pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
469
470 bus.type = MP_BUS;
471 bus.busid = 0;
472 memcpy(bus.bustype, "ISA ", 6);
473 MP_bus_info(&bus);
474
475 construct_default_ioirq_mptable(0);
476 }
477 #endif
478
479 return 0;
480 }
481
482 /*
483 * Scan the memory blocks for an SMP configuration block.
484 */
default_get_smp_config(unsigned int early)485 void __init default_get_smp_config(unsigned int early)
486 {
487 struct mpf_intel *mpf;
488
489 if (!smp_found_config)
490 return;
491
492 if (!mpf_found)
493 return;
494
495 if (acpi_lapic && early)
496 return;
497
498 /*
499 * MPS doesn't support hyperthreading, aka only have
500 * thread 0 apic id in MPS table
501 */
502 if (acpi_lapic && acpi_ioapic)
503 return;
504
505 mpf = early_memremap(mpf_base, sizeof(*mpf));
506 if (!mpf) {
507 pr_err("MPTABLE: error mapping MP table\n");
508 return;
509 }
510
511 pr_info("Intel MultiProcessor Specification v1.%d\n",
512 mpf->specification);
513 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
514 if (mpf->feature2 & (1 << 7)) {
515 pr_info(" IMCR and PIC compatibility mode.\n");
516 pic_mode = 1;
517 } else {
518 pr_info(" Virtual Wire compatibility mode.\n");
519 pic_mode = 0;
520 }
521 #endif
522 /*
523 * Now see if we need to read further.
524 */
525 if (mpf->feature1) {
526 if (early) {
527 /*
528 * local APIC has default address
529 */
530 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
531 goto out;
532 }
533
534 pr_info("Default MP configuration #%d\n", mpf->feature1);
535 construct_default_ISA_mptable(mpf->feature1);
536
537 } else if (mpf->physptr) {
538 if (check_physptr(mpf, early))
539 goto out;
540 } else
541 BUG();
542
543 if (!early)
544 pr_info("Processors: %d\n", num_processors);
545 /*
546 * Only use the first configuration found.
547 */
548 out:
549 early_memunmap(mpf, sizeof(*mpf));
550 }
551
smp_reserve_memory(struct mpf_intel * mpf)552 static void __init smp_reserve_memory(struct mpf_intel *mpf)
553 {
554 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
555 }
556
smp_scan_config(unsigned long base,unsigned long length)557 static int __init smp_scan_config(unsigned long base, unsigned long length)
558 {
559 unsigned int *bp;
560 struct mpf_intel *mpf;
561 int ret = 0;
562
563 apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
564 base, base + length - 1);
565 BUILD_BUG_ON(sizeof(*mpf) != 16);
566
567 while (length > 0) {
568 bp = early_memremap(base, length);
569 mpf = (struct mpf_intel *)bp;
570 if ((*bp == SMP_MAGIC_IDENT) &&
571 (mpf->length == 1) &&
572 !mpf_checksum((unsigned char *)bp, 16) &&
573 ((mpf->specification == 1)
574 || (mpf->specification == 4))) {
575 #ifdef CONFIG_X86_LOCAL_APIC
576 smp_found_config = 1;
577 #endif
578 mpf_base = base;
579 mpf_found = true;
580
581 pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
582 base, base + sizeof(*mpf) - 1);
583
584 memblock_reserve(base, sizeof(*mpf));
585 if (mpf->physptr)
586 smp_reserve_memory(mpf);
587
588 ret = 1;
589 }
590 early_memunmap(bp, length);
591
592 if (ret)
593 break;
594
595 base += 16;
596 length -= 16;
597 }
598 return ret;
599 }
600
default_find_smp_config(void)601 void __init default_find_smp_config(void)
602 {
603 unsigned int address;
604
605 /*
606 * FIXME: Linux assumes you have 640K of base ram..
607 * this continues the error...
608 *
609 * 1) Scan the bottom 1K for a signature
610 * 2) Scan the top 1K of base RAM
611 * 3) Scan the 64K of bios
612 */
613 if (smp_scan_config(0x0, 0x400) ||
614 smp_scan_config(639 * 0x400, 0x400) ||
615 smp_scan_config(0xF0000, 0x10000))
616 return;
617 /*
618 * If it is an SMP machine we should know now, unless the
619 * configuration is in an EISA bus machine with an
620 * extended bios data area.
621 *
622 * there is a real-mode segmented pointer pointing to the
623 * 4K EBDA area at 0x40E, calculate and scan it here.
624 *
625 * NOTE! There are Linux loaders that will corrupt the EBDA
626 * area, and as such this kind of SMP config may be less
627 * trustworthy, simply because the SMP table may have been
628 * stomped on during early boot. These loaders are buggy and
629 * should be fixed.
630 *
631 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
632 */
633
634 address = get_bios_ebda();
635 if (address)
636 smp_scan_config(address, 0x400);
637 }
638
639 #ifdef CONFIG_X86_IO_APIC
640 static u8 __initdata irq_used[MAX_IRQ_SOURCES];
641
get_MP_intsrc_index(struct mpc_intsrc * m)642 static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
643 {
644 int i;
645
646 if (m->irqtype != mp_INT)
647 return 0;
648
649 if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
650 return 0;
651
652 /* not legacy */
653
654 for (i = 0; i < mp_irq_entries; i++) {
655 if (mp_irqs[i].irqtype != mp_INT)
656 continue;
657
658 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
659 MP_IRQPOL_ACTIVE_LOW))
660 continue;
661
662 if (mp_irqs[i].srcbus != m->srcbus)
663 continue;
664 if (mp_irqs[i].srcbusirq != m->srcbusirq)
665 continue;
666 if (irq_used[i]) {
667 /* already claimed */
668 return -2;
669 }
670 irq_used[i] = 1;
671 return i;
672 }
673
674 /* not found */
675 return -1;
676 }
677
678 #define SPARE_SLOT_NUM 20
679
680 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
681
check_irq_src(struct mpc_intsrc * m,int * nr_m_spare)682 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
683 {
684 int i;
685
686 apic_printk(APIC_VERBOSE, "OLD ");
687 print_mp_irq_info(m);
688
689 i = get_MP_intsrc_index(m);
690 if (i > 0) {
691 memcpy(m, &mp_irqs[i], sizeof(*m));
692 apic_printk(APIC_VERBOSE, "NEW ");
693 print_mp_irq_info(&mp_irqs[i]);
694 return;
695 }
696 if (!i) {
697 /* legacy, do nothing */
698 return;
699 }
700 if (*nr_m_spare < SPARE_SLOT_NUM) {
701 /*
702 * not found (-1), or duplicated (-2) are invalid entries,
703 * we need to use the slot later
704 */
705 m_spare[*nr_m_spare] = m;
706 *nr_m_spare += 1;
707 }
708 }
709
710 static int __init
check_slot(unsigned long mpc_new_phys,unsigned long mpc_new_length,int count)711 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
712 {
713 if (!mpc_new_phys || count <= mpc_new_length) {
714 WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
715 return -1;
716 }
717
718 return 0;
719 }
720 #else /* CONFIG_X86_IO_APIC */
721 static
check_irq_src(struct mpc_intsrc * m,int * nr_m_spare)722 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
723 #endif /* CONFIG_X86_IO_APIC */
724
replace_intsrc_all(struct mpc_table * mpc,unsigned long mpc_new_phys,unsigned long mpc_new_length)725 static int __init replace_intsrc_all(struct mpc_table *mpc,
726 unsigned long mpc_new_phys,
727 unsigned long mpc_new_length)
728 {
729 #ifdef CONFIG_X86_IO_APIC
730 int i;
731 #endif
732 int count = sizeof(*mpc);
733 int nr_m_spare = 0;
734 unsigned char *mpt = ((unsigned char *)mpc) + count;
735
736 pr_info("mpc_length %x\n", mpc->length);
737 while (count < mpc->length) {
738 switch (*mpt) {
739 case MP_PROCESSOR:
740 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
741 break;
742 case MP_BUS:
743 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
744 break;
745 case MP_IOAPIC:
746 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
747 break;
748 case MP_INTSRC:
749 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
750 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
751 break;
752 case MP_LINTSRC:
753 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
754 break;
755 default:
756 /* wrong mptable */
757 smp_dump_mptable(mpc, mpt);
758 goto out;
759 }
760 }
761
762 #ifdef CONFIG_X86_IO_APIC
763 for (i = 0; i < mp_irq_entries; i++) {
764 if (irq_used[i])
765 continue;
766
767 if (mp_irqs[i].irqtype != mp_INT)
768 continue;
769
770 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
771 MP_IRQPOL_ACTIVE_LOW))
772 continue;
773
774 if (nr_m_spare > 0) {
775 apic_printk(APIC_VERBOSE, "*NEW* found\n");
776 nr_m_spare--;
777 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
778 m_spare[nr_m_spare] = NULL;
779 } else {
780 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
781 count += sizeof(struct mpc_intsrc);
782 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
783 goto out;
784 memcpy(m, &mp_irqs[i], sizeof(*m));
785 mpc->length = count;
786 mpt += sizeof(struct mpc_intsrc);
787 }
788 print_mp_irq_info(&mp_irqs[i]);
789 }
790 #endif
791 out:
792 /* update checksum */
793 mpc->checksum = 0;
794 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
795
796 return 0;
797 }
798
799 int enable_update_mptable;
800
update_mptable_setup(char * str)801 static int __init update_mptable_setup(char *str)
802 {
803 enable_update_mptable = 1;
804 #ifdef CONFIG_PCI
805 pci_routeirq = 1;
806 #endif
807 return 0;
808 }
809 early_param("update_mptable", update_mptable_setup);
810
811 static unsigned long __initdata mpc_new_phys;
812 static unsigned long mpc_new_length __initdata = 4096;
813
814 /* alloc_mptable or alloc_mptable=4k */
815 static int __initdata alloc_mptable;
parse_alloc_mptable_opt(char * p)816 static int __init parse_alloc_mptable_opt(char *p)
817 {
818 enable_update_mptable = 1;
819 #ifdef CONFIG_PCI
820 pci_routeirq = 1;
821 #endif
822 alloc_mptable = 1;
823 if (!p)
824 return 0;
825 mpc_new_length = memparse(p, &p);
826 return 0;
827 }
828 early_param("alloc_mptable", parse_alloc_mptable_opt);
829
e820__memblock_alloc_reserved_mpc_new(void)830 void __init e820__memblock_alloc_reserved_mpc_new(void)
831 {
832 if (enable_update_mptable && alloc_mptable)
833 mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
834 }
835
update_mp_table(void)836 static int __init update_mp_table(void)
837 {
838 char str[16];
839 char oem[10];
840 struct mpf_intel *mpf;
841 struct mpc_table *mpc, *mpc_new;
842 unsigned long size;
843
844 if (!enable_update_mptable)
845 return 0;
846
847 if (!mpf_found)
848 return 0;
849
850 mpf = early_memremap(mpf_base, sizeof(*mpf));
851 if (!mpf) {
852 pr_err("MPTABLE: mpf early_memremap() failed\n");
853 return 0;
854 }
855
856 /*
857 * Now see if we need to go further.
858 */
859 if (mpf->feature1)
860 goto do_unmap_mpf;
861
862 if (!mpf->physptr)
863 goto do_unmap_mpf;
864
865 size = get_mpc_size(mpf->physptr);
866 mpc = early_memremap(mpf->physptr, size);
867 if (!mpc) {
868 pr_err("MPTABLE: mpc early_memremap() failed\n");
869 goto do_unmap_mpf;
870 }
871
872 if (!smp_check_mpc(mpc, oem, str))
873 goto do_unmap_mpc;
874
875 pr_info("mpf: %llx\n", (u64)mpf_base);
876 pr_info("physptr: %x\n", mpf->physptr);
877
878 if (mpc_new_phys && mpc->length > mpc_new_length) {
879 mpc_new_phys = 0;
880 pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
881 mpc_new_length);
882 }
883
884 if (!mpc_new_phys) {
885 unsigned char old, new;
886 /* check if we can change the position */
887 mpc->checksum = 0;
888 old = mpf_checksum((unsigned char *)mpc, mpc->length);
889 mpc->checksum = 0xff;
890 new = mpf_checksum((unsigned char *)mpc, mpc->length);
891 if (old == new) {
892 pr_info("mpc is readonly, please try alloc_mptable instead\n");
893 goto do_unmap_mpc;
894 }
895 pr_info("use in-position replacing\n");
896 } else {
897 mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
898 if (!mpc_new) {
899 pr_err("MPTABLE: new mpc early_memremap() failed\n");
900 goto do_unmap_mpc;
901 }
902 mpf->physptr = mpc_new_phys;
903 memcpy(mpc_new, mpc, mpc->length);
904 early_memunmap(mpc, size);
905 mpc = mpc_new;
906 size = mpc_new_length;
907 /* check if we can modify that */
908 if (mpc_new_phys - mpf->physptr) {
909 struct mpf_intel *mpf_new;
910 /* steal 16 bytes from [0, 1k) */
911 mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
912 if (!mpf_new) {
913 pr_err("MPTABLE: new mpf early_memremap() failed\n");
914 goto do_unmap_mpc;
915 }
916 pr_info("mpf new: %x\n", 0x400 - 16);
917 memcpy(mpf_new, mpf, 16);
918 early_memunmap(mpf, sizeof(*mpf));
919 mpf = mpf_new;
920 mpf->physptr = mpc_new_phys;
921 }
922 mpf->checksum = 0;
923 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
924 pr_info("physptr new: %x\n", mpf->physptr);
925 }
926
927 /*
928 * only replace the one with mp_INT and
929 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
930 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
931 * may need pci=routeirq for all coverage
932 */
933 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
934
935 do_unmap_mpc:
936 early_memunmap(mpc, size);
937
938 do_unmap_mpf:
939 early_memunmap(mpf, sizeof(*mpf));
940
941 return 0;
942 }
943
944 late_initcall(update_mp_table);
945