• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
4  *
5  * Author: Jianmin Lv <lvjianmin@loongson.cn>
6  * Author: Huacai Chen <chenhuacai@loongson.cn>
7  * Copyright (C) 2020 Loongson Technology Corporation Limited
8  */
9 
10 #include <linux/init.h>
11 #include <linux/acpi.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/memblock.h>
15 #include <linux/serial_core.h>
16 #include <asm/io.h>
17 #include <asm/numa.h>
18 #include <loongson.h>
19 
20 int acpi_disabled;
21 EXPORT_SYMBOL(acpi_disabled);
22 int acpi_noirq;
23 int acpi_pci_disabled;
24 EXPORT_SYMBOL(acpi_pci_disabled);
25 int acpi_strict = 1; /* We have no workarounds on LoongArch */
26 int num_processors;
27 int disabled_cpus;
28 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_LPIC;
29 
30 u64 acpi_saved_sp;
31 
32 #define PREFIX			"ACPI: "
33 
34 struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
35 
acpi_gsi_to_irq(u32 gsi,unsigned int * irqp)36 int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
37 {
38 	if (irqp != NULL)
39 		*irqp = acpi_register_gsi(NULL, gsi, -1, -1);
40 	return (*irqp >= 0) ? 0 : -EINVAL;
41 }
42 EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
43 
acpi_isa_irq_to_gsi(unsigned int isa_irq,u32 * gsi)44 int acpi_isa_irq_to_gsi(unsigned int isa_irq, u32 *gsi)
45 {
46 	if (gsi)
47 		*gsi = isa_irq;
48 	return 0;
49 }
50 
51 /*
52  * success: return IRQ number (>=0)
53  * failure: return < 0
54  */
acpi_register_gsi(struct device * dev,u32 gsi,int trigger,int polarity)55 int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
56 {
57 	int id;
58 	struct irq_fwspec fwspec;
59 
60 	switch (gsi) {
61 	case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
62 		fwspec.fwnode = liointc_domain->fwnode;
63 		fwspec.param[0] = gsi - GSI_MIN_CPU_IRQ;
64 		fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
65 		fwspec.param_count = 2;
66 
67 		return irq_create_fwspec_mapping(&fwspec);
68 
69 	case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
70 		if (!pch_lpc_domain)
71 			return -EINVAL;
72 
73 		fwspec.fwnode = pch_lpc_domain->fwnode;
74 		fwspec.param[0] = gsi - GSI_MIN_LPC_IRQ;
75 		fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
76 		fwspec.param_count = 2;
77 
78 		return irq_create_fwspec_mapping(&fwspec);
79 
80 	case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
81 		id = find_pch_pic(gsi);
82 		if (id < 0)
83 			return -EINVAL;
84 
85 		fwspec.fwnode = pch_pic_domain[id]->fwnode;
86 		fwspec.param[0] = gsi - acpi_pchpic[id]->gsi_base;
87 		fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
88 		fwspec.param_count = 2;
89 
90 		return irq_create_fwspec_mapping(&fwspec);
91 	}
92 
93 	return -EINVAL;
94 }
95 EXPORT_SYMBOL_GPL(acpi_register_gsi);
96 
acpi_unregister_gsi(u32 gsi)97 void acpi_unregister_gsi(u32 gsi)
98 {
99 
100 }
101 EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
102 
__acpi_map_table(unsigned long phys,unsigned long size)103 void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
104 {
105 
106 	if (!phys || !size)
107 		return NULL;
108 
109 	return early_memremap(phys, size);
110 }
__acpi_unmap_table(void __iomem * map,unsigned long size)111 void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
112 {
113 	if (!map || !size)
114 		return;
115 
116 	early_memunmap(map, size);
117 }
118 
acpi_os_ioremap(acpi_physical_address phys,acpi_size size)119 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
120 {
121 	if (!memblock_is_memory(phys))
122 		return ioremap(phys, size);
123 	else
124 		return ioremap_cache(phys, size);
125 }
126 
set_processor_mask(u32 id,u32 flags)127 static int set_processor_mask(u32 id, u32 flags)
128 {
129 
130 	int cpu, cpuid = id;
131 
132 	if (num_processors >= nr_cpu_ids) {
133 		pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
134 			" processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
135 
136 		return -ENODEV;
137 
138 	}
139 	if (cpuid == loongson_sysconf.boot_cpu_id)
140 		cpu = 0;
141 	else
142 		cpu = cpumask_next_zero(-1, cpu_present_mask);
143 
144 	if (flags & ACPI_MADT_ENABLED) {
145 		num_processors++;
146 		set_cpu_possible(cpu, true);
147 		set_cpu_present(cpu, true);
148 		__cpu_number_map[cpuid] = cpu;
149 		__cpu_logical_map[cpu] = cpuid;
150 	} else
151 		disabled_cpus++;
152 
153 	return cpu;
154 }
155 
156 static int __init
acpi_parse_cpuintc(union acpi_subtable_headers * header,const unsigned long end)157 acpi_parse_cpuintc(union acpi_subtable_headers *header, const unsigned long end)
158 {
159 	struct acpi_madt_core_pic *processor = NULL;
160 
161 	processor = (struct acpi_madt_core_pic *)header;
162 	if (BAD_MADT_ENTRY(processor, end))
163 		return -EINVAL;
164 
165 	acpi_table_print_madt_entry(&header->common);
166 	acpi_core_pic[processor->core_id] = *processor;
167 	set_processor_mask(processor->core_id, processor->flags);
168 
169 	return 0;
170 }
171 
172 static int __init
acpi_parse_liointc(union acpi_subtable_headers * header,const unsigned long end)173 acpi_parse_liointc(union acpi_subtable_headers *header, const unsigned long end)
174 {
175 	struct acpi_madt_lio_pic *liointc = NULL;
176 
177 	liointc = (struct acpi_madt_lio_pic *)header;
178 
179 	if (BAD_MADT_ENTRY(liointc, end))
180 		return -EINVAL;
181 
182 	acpi_liointc = liointc;
183 
184 	return 0;
185 }
186 
187 static int __init
acpi_parse_eiointc(union acpi_subtable_headers * header,const unsigned long end)188 acpi_parse_eiointc(union acpi_subtable_headers *header, const unsigned long end)
189 {
190 	static int id = 0;
191 	struct acpi_madt_eio_pic *eiointc = NULL;
192 
193 	eiointc = (struct acpi_madt_eio_pic *)header;
194 
195 	if (BAD_MADT_ENTRY(eiointc, end))
196 		return -EINVAL;
197 
198 	acpi_eiointc[id++] = eiointc;
199 	loongson_sysconf.nr_io_pics = id;
200 
201 	return 0;
202 }
203 
204 static int __init
acpi_parse_htintc(union acpi_subtable_headers * header,const unsigned long end)205 acpi_parse_htintc(union acpi_subtable_headers *header, const unsigned long end)
206 {
207 	struct acpi_madt_ht_pic *htintc = NULL;
208 
209 	htintc = (struct acpi_madt_ht_pic *)header;
210 
211 	if (BAD_MADT_ENTRY(htintc, end))
212 		return -EINVAL;
213 
214 	acpi_htintc = htintc;
215 	loongson_sysconf.nr_io_pics = 1;
216 
217 	return 0;
218 }
219 
220 static int __init
acpi_parse_pch_pic(union acpi_subtable_headers * header,const unsigned long end)221 acpi_parse_pch_pic(union acpi_subtable_headers *header, const unsigned long end)
222 {
223 	static int id = 0;
224 	struct acpi_madt_bio_pic *pchpic = NULL;
225 
226 	pchpic = (struct acpi_madt_bio_pic *)header;
227 
228 	if (BAD_MADT_ENTRY(pchpic, end))
229 		return -EINVAL;
230 
231 	acpi_pchpic[id++] = pchpic;
232 
233 	return 0;
234 }
235 
236 static int __init
acpi_parse_pch_msi(union acpi_subtable_headers * header,const unsigned long end)237 acpi_parse_pch_msi(union acpi_subtable_headers *header, const unsigned long end)
238 {
239 	static int id = 0;
240 	struct acpi_madt_msi_pic *pchmsi = NULL;
241 
242 	pchmsi = (struct acpi_madt_msi_pic *)header;
243 
244 	if (BAD_MADT_ENTRY(pchmsi, end))
245 		return -EINVAL;
246 
247 	acpi_pchmsi[id++] = pchmsi;
248 
249 	return 0;
250 }
251 
252 static int __init
acpi_parse_pch_lpc(union acpi_subtable_headers * header,const unsigned long end)253 acpi_parse_pch_lpc(union acpi_subtable_headers *header, const unsigned long end)
254 {
255 	struct acpi_madt_lpc_pic *pchlpc = NULL;
256 
257 	pchlpc = (struct acpi_madt_lpc_pic *)header;
258 
259 	if (BAD_MADT_ENTRY(pchlpc, end))
260 		return -EINVAL;
261 
262 	acpi_pchlpc = pchlpc;
263 
264 	return 0;
265 }
266 
acpi_process_madt(void)267 static void __init acpi_process_madt(void)
268 {
269 	int i, error;
270 
271 	for (i = 0; i < NR_CPUS; i++) {
272 		__cpu_number_map[i] = -1;
273 		__cpu_logical_map[i] = -1;
274 	}
275 
276 	/* Parse MADT CPUINTC entries */
277 	error = acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, acpi_parse_cpuintc, MAX_CORE_PIC);
278 	if (error < 0) {
279 		disable_acpi();
280 		pr_err(PREFIX "Invalid BIOS MADT (CPUINTC entries), ACPI disabled\n");
281 		return;
282 	}
283 
284 	loongson_sysconf.nr_cpus = num_processors;
285 
286 	/* Parse MADT LIOINTC entries */
287 	error = acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC, acpi_parse_liointc, 1);
288 	if (error < 0) {
289 		disable_acpi();
290 		pr_err(PREFIX "Invalid BIOS MADT (LIOINTC entries), ACPI disabled\n");
291 		return;
292 	}
293 
294 	/* Parse MADT EIOINTC entries */
295 	error = acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, acpi_parse_eiointc, MAX_IO_PICS);
296 	if (error < 0) {
297 		disable_acpi();
298 		pr_err(PREFIX "Invalid BIOS MADT (EIOINTC entries), ACPI disabled\n");
299 		return;
300 	}
301 
302 	/* Parse MADT HTVEC entries */
303 	error = acpi_table_parse_madt(ACPI_MADT_TYPE_HT_PIC, acpi_parse_htintc, 1);
304 	if (error < 0) {
305 		disable_acpi();
306 		pr_err(PREFIX "Invalid BIOS MADT (HTVEC entries), ACPI disabled\n");
307 		return;
308 	}
309 
310 	/* Parse MADT PCHPIC entries */
311 	error = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, acpi_parse_pch_pic, MAX_IO_PICS);
312 	if (error < 0) {
313 		disable_acpi();
314 		pr_err(PREFIX "Invalid BIOS MADT (PCHPIC entries), ACPI disabled\n");
315 		return;
316 	}
317 
318 	/* Parse MADT PCHMSI entries */
319 	error = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, acpi_parse_pch_msi, MAX_IO_PICS);
320 	if (error < 0) {
321 		disable_acpi();
322 		pr_err(PREFIX "Invalid BIOS MADT (PCHMSI entries), ACPI disabled\n");
323 		return;
324 	}
325 
326 	/* Parse MADT PCHLPC entries */
327 	error = acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC, acpi_parse_pch_lpc, 1);
328 	if (error < 0) {
329 		disable_acpi();
330 		pr_err(PREFIX "Invalid BIOS MADT (PCHLPC entries), ACPI disabled\n");
331 		return;
332 	}
333 }
334 
335 int pptt_enabled;
336 
parse_acpi_topology(void)337 int __init parse_acpi_topology(void)
338 {
339 	int cpu, topology_id;
340 
341 	for_each_possible_cpu(cpu) {
342 		topology_id = find_acpi_cpu_topology(cpu, 0);
343 		if (topology_id < 0) {
344 			pr_warn("Invalid BIOS PPTT\n");
345 			return -ENOENT;
346 		}
347 
348 		if (acpi_pptt_cpu_is_thread(cpu) <= 0)
349 			cpu_data[cpu].core = topology_id;
350 		else {
351 			topology_id = find_acpi_cpu_topology(cpu, 1);
352 			if (topology_id < 0)
353 				return -ENOENT;
354 
355 			cpu_data[cpu].core = topology_id;
356 		}
357 	}
358 
359 	pptt_enabled = 1;
360 
361 	return 0;
362 }
363 
364 #ifndef CONFIG_SUSPEND
365 int (*acpi_suspend_lowlevel)(void);
366 #else
367 int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
368 #endif
369 
acpi_boot_table_init(void)370 void __init acpi_boot_table_init(void)
371 {
372 	/*
373 	 * If acpi_disabled, bail out
374 	 */
375 	if (acpi_disabled)
376 		return;
377 
378 	/*
379 	 * Initialize the ACPI boot-time table parser.
380 	 */
381 	if (acpi_table_init()) {
382 		disable_acpi();
383 		return;
384 	}
385 
386 	loongson_sysconf.boot_cpu_id = read_csr_cpuid();
387 
388 	/*
389 	 * Process the Multiple APIC Description Table (MADT), if present
390 	 */
391 	acpi_process_madt();
392 
393 	/* Do not enable ACPI SPCR console by default */
394 	acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
395 }
396 
397 #ifdef CONFIG_ACPI_NUMA
398 
setup_node(int pxm)399 static __init int setup_node(int pxm)
400 {
401 	return acpi_map_pxm_to_node(pxm);
402 }
403 
404 /*
405  * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
406  * I/O localities since SRAT does not list them.  I/O localities are
407  * not supported at this point.
408  */
409 unsigned int numa_distance_cnt;
410 
get_numa_distances_cnt(struct acpi_table_slit * slit)411 static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
412 {
413 	return slit->locality_count;
414 }
415 
numa_set_distance(int from,int to,int distance)416 void __init numa_set_distance(int from, int to, int distance)
417 {
418 	if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
419 		pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
420 				from, to, distance);
421 		return;
422 	}
423 
424 	node_distances[from][to] = distance;
425 }
426 
427 /* Callback for Proximity Domain -> CPUID mapping */
428 void __init
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity * pa)429 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
430 {
431 	int pxm, node;
432 
433 	if (srat_disabled())
434 		return;
435 	if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
436 		bad_srat();
437 		return;
438 	}
439 	if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
440 		return;
441 	pxm = pa->proximity_domain_lo;
442 	if (acpi_srat_revision >= 2) {
443 		pxm |= (pa->proximity_domain_hi[0] << 8);
444 		pxm |= (pa->proximity_domain_hi[1] << 16);
445 		pxm |= (pa->proximity_domain_hi[2] << 24);
446 	}
447 	node = setup_node(pxm);
448 	if (node < 0) {
449 		printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
450 		bad_srat();
451 		return;
452 	}
453 
454 	if (pa->apic_id >= CONFIG_NR_CPUS) {
455 		printk(KERN_INFO "SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
456 				pxm, pa->apic_id, node);
457 		return;
458 	}
459 
460 	early_numa_add_cpu(pa->apic_id, node);
461 
462 	set_cpuid_to_node(pa->apic_id, node);
463 	node_set(node, numa_nodes_parsed);
464 	printk(KERN_INFO "SRAT: PXM %u -> CPU 0x%02x -> Node %u\n",
465 		pxm, pa->apic_id, node);
466 }
467 
468 #endif
469 
arch_reserve_mem_area(acpi_physical_address addr,size_t size)470 void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
471 {
472 	memblock_reserve(addr, size);
473 }
474 
475 #ifdef CONFIG_ACPI_HOTPLUG_CPU
476 
477 #include <acpi/processor.h>
478 
acpi_map_cpu2node(acpi_handle handle,int cpu,int physid)479 static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
480 {
481 #ifdef CONFIG_ACPI_NUMA
482 	int nid;
483 
484 	nid = acpi_get_node(handle);
485 	if (nid != NUMA_NO_NODE) {
486 		set_cpuid_to_node(physid, nid);
487 		node_set(nid, numa_nodes_parsed);
488 		set_cpu_numa_node(cpu, nid);
489 		cpumask_set_cpu(cpu, cpumask_of_node(nid));
490 	}
491 #endif
492 	return 0;
493 }
494 
acpi_map_cpu(acpi_handle handle,phys_cpuid_t physid,u32 acpi_id,int * pcpu)495 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
496 {
497 	int cpu;
498 
499 	cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
500 	if (cpu < 0) {
501 		pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
502 		return cpu;
503 	}
504 
505 	acpi_map_cpu2node(handle, cpu, physid);
506 
507 	*pcpu = cpu;
508 
509 	return 0;
510 }
511 EXPORT_SYMBOL(acpi_map_cpu);
512 
acpi_unmap_cpu(int cpu)513 int acpi_unmap_cpu(int cpu)
514 {
515 #ifdef CONFIG_ACPI_NUMA
516 	set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
517 #endif
518 	set_cpu_present(cpu, false);
519 	num_processors--;
520 
521 	pr_info("cpu%d hot remove!\n", cpu);
522 
523 	return 0;
524 }
525 EXPORT_SYMBOL(acpi_unmap_cpu);
526 
527 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
528