• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  acpi_numa.c - ACPI NUMA support
4  *
5  *  Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
6  */
7 
8 #define pr_fmt(fmt) "ACPI: " fmt
9 
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/errno.h>
15 #include <linux/acpi.h>
16 #include <linux/memblock.h>
17 #include <linux/numa.h>
18 #include <linux/nodemask.h>
19 #include <linux/topology.h>
20 
21 static nodemask_t nodes_found_map = NODE_MASK_NONE;
22 
23 /* maps to convert between proximity domain and logical node ID */
24 static int pxm_to_node_map[MAX_PXM_DOMAINS]
25 			= { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
26 static int node_to_pxm_map[MAX_NUMNODES]
27 			= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
28 
29 unsigned char acpi_srat_revision __initdata;
30 static int acpi_numa __initdata;
31 
disable_srat(void)32 void __init disable_srat(void)
33 {
34 	acpi_numa = -1;
35 }
36 
pxm_to_node(int pxm)37 int pxm_to_node(int pxm)
38 {
39 	if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
40 		return NUMA_NO_NODE;
41 	return pxm_to_node_map[pxm];
42 }
43 EXPORT_SYMBOL(pxm_to_node);
44 
node_to_pxm(int node)45 int node_to_pxm(int node)
46 {
47 	if (node < 0)
48 		return PXM_INVAL;
49 	return node_to_pxm_map[node];
50 }
51 
__acpi_map_pxm_to_node(int pxm,int node)52 static void __acpi_map_pxm_to_node(int pxm, int node)
53 {
54 	if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm])
55 		pxm_to_node_map[pxm] = node;
56 	if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node])
57 		node_to_pxm_map[node] = pxm;
58 }
59 
acpi_map_pxm_to_node(int pxm)60 int acpi_map_pxm_to_node(int pxm)
61 {
62 	int node;
63 
64 	if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
65 		return NUMA_NO_NODE;
66 
67 	node = pxm_to_node_map[pxm];
68 
69 	if (node == NUMA_NO_NODE) {
70 		if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
71 			return NUMA_NO_NODE;
72 		node = first_unset_node(nodes_found_map);
73 		__acpi_map_pxm_to_node(pxm, node);
74 		node_set(node, nodes_found_map);
75 	}
76 
77 	return node;
78 }
79 EXPORT_SYMBOL(acpi_map_pxm_to_node);
80 
81 static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header * header)82 acpi_table_print_srat_entry(struct acpi_subtable_header *header)
83 {
84 	switch (header->type) {
85 	case ACPI_SRAT_TYPE_CPU_AFFINITY:
86 		{
87 			struct acpi_srat_cpu_affinity *p =
88 			    (struct acpi_srat_cpu_affinity *)header;
89 			pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
90 				 p->apic_id, p->local_sapic_eid,
91 				 p->proximity_domain_lo,
92 				 (p->flags & ACPI_SRAT_CPU_ENABLED) ?
93 				 "enabled" : "disabled");
94 		}
95 		break;
96 
97 	case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
98 		{
99 			struct acpi_srat_mem_affinity *p =
100 			    (struct acpi_srat_mem_affinity *)header;
101 			pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
102 				 (unsigned long long)p->base_address,
103 				 (unsigned long long)p->length,
104 				 p->proximity_domain,
105 				 (p->flags & ACPI_SRAT_MEM_ENABLED) ?
106 				 "enabled" : "disabled",
107 				 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
108 				 " hot-pluggable" : "",
109 				 (p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ?
110 				 " non-volatile" : "");
111 		}
112 		break;
113 
114 	case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
115 		{
116 			struct acpi_srat_x2apic_cpu_affinity *p =
117 			    (struct acpi_srat_x2apic_cpu_affinity *)header;
118 			pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n",
119 				 p->apic_id,
120 				 p->proximity_domain,
121 				 (p->flags & ACPI_SRAT_CPU_ENABLED) ?
122 				 "enabled" : "disabled");
123 		}
124 		break;
125 
126 	case ACPI_SRAT_TYPE_GICC_AFFINITY:
127 		{
128 			struct acpi_srat_gicc_affinity *p =
129 			    (struct acpi_srat_gicc_affinity *)header;
130 			pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
131 				 p->acpi_processor_uid,
132 				 p->proximity_domain,
133 				 (p->flags & ACPI_SRAT_GICC_ENABLED) ?
134 				 "enabled" : "disabled");
135 		}
136 		break;
137 
138 	case ACPI_SRAT_TYPE_GENERIC_AFFINITY:
139 	{
140 		struct acpi_srat_generic_affinity *p =
141 			(struct acpi_srat_generic_affinity *)header;
142 
143 		if (p->device_handle_type == 0) {
144 			/*
145 			 * For pci devices this may be the only place they
146 			 * are assigned a proximity domain
147 			 */
148 			pr_debug("SRAT Generic Initiator(Seg:%u BDF:%u) in proximity domain %d %s\n",
149 				 *(u16 *)(&p->device_handle[0]),
150 				 *(u16 *)(&p->device_handle[2]),
151 				 p->proximity_domain,
152 				 (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
153 				"enabled" : "disabled");
154 		} else {
155 			/*
156 			 * In this case we can rely on the device having a
157 			 * proximity domain reference
158 			 */
159 			pr_debug("SRAT Generic Initiator(HID=%.8s UID=%.4s) in proximity domain %d %s\n",
160 				(char *)(&p->device_handle[0]),
161 				(char *)(&p->device_handle[8]),
162 				p->proximity_domain,
163 				(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
164 				"enabled" : "disabled");
165 		}
166 	}
167 	break;
168 	default:
169 		pr_warn("Found unsupported SRAT entry (type = 0x%x)\n",
170 			header->type);
171 		break;
172 	}
173 }
174 
175 /*
176  * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
177  * up the NUMA heuristics which wants the local node to have a smaller
178  * distance than the others.
179  * Do some quick checks here and only use the SLIT if it passes.
180  */
slit_valid(struct acpi_table_slit * slit)181 static int __init slit_valid(struct acpi_table_slit *slit)
182 {
183 	int i, j;
184 	int d = slit->locality_count;
185 	for (i = 0; i < d; i++) {
186 		for (j = 0; j < d; j++) {
187 			u8 val = slit->entry[d*i + j];
188 			if (i == j) {
189 				if (val != LOCAL_DISTANCE)
190 					return 0;
191 			} else if (val <= LOCAL_DISTANCE)
192 				return 0;
193 		}
194 	}
195 	return 1;
196 }
197 
bad_srat(void)198 void __init bad_srat(void)
199 {
200 	pr_err("SRAT: SRAT not used.\n");
201 	disable_srat();
202 }
203 
srat_disabled(void)204 int __init srat_disabled(void)
205 {
206 	return acpi_numa < 0;
207 }
208 
209 #if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
210 /*
211  * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
212  * I/O localities since SRAT does not list them.  I/O localities are
213  * not supported at this point.
214  */
acpi_numa_slit_init(struct acpi_table_slit * slit)215 void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
216 {
217 	int i, j;
218 
219 	for (i = 0; i < slit->locality_count; i++) {
220 		const int from_node = pxm_to_node(i);
221 
222 		if (from_node == NUMA_NO_NODE)
223 			continue;
224 
225 		for (j = 0; j < slit->locality_count; j++) {
226 			const int to_node = pxm_to_node(j);
227 
228 			if (to_node == NUMA_NO_NODE)
229 				continue;
230 
231 			numa_set_distance(from_node, to_node,
232 				slit->entry[slit->locality_count * i + j]);
233 		}
234 	}
235 }
236 
237 /*
238  * Default callback for parsing of the Proximity Domain <-> Memory
239  * Area mappings
240  */
241 int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity * ma)242 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
243 {
244 	u64 start, end;
245 	u32 hotpluggable;
246 	int node, pxm;
247 
248 	if (srat_disabled())
249 		goto out_err;
250 	if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) {
251 		pr_err("SRAT: Unexpected header length: %d\n",
252 		       ma->header.length);
253 		goto out_err_bad_srat;
254 	}
255 	if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
256 		goto out_err;
257 	hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
258 		(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE);
259 
260 	start = ma->base_address;
261 	end = start + ma->length;
262 	pxm = ma->proximity_domain;
263 	if (acpi_srat_revision <= 1)
264 		pxm &= 0xff;
265 
266 	node = acpi_map_pxm_to_node(pxm);
267 	if (node == NUMA_NO_NODE) {
268 		pr_err("SRAT: Too many proximity domains.\n");
269 		goto out_err_bad_srat;
270 	}
271 
272 	if (numa_add_memblk(node, start, end) < 0) {
273 		pr_err("SRAT: Failed to add memblk to node %u [mem %#010Lx-%#010Lx]\n",
274 		       node, (unsigned long long) start,
275 		       (unsigned long long) end - 1);
276 		goto out_err_bad_srat;
277 	}
278 
279 	node_set(node, numa_nodes_parsed);
280 
281 	pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n",
282 		node, pxm,
283 		(unsigned long long) start, (unsigned long long) end - 1,
284 		hotpluggable ? " hotplug" : "",
285 		ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : "");
286 
287 	/* Mark hotplug range in memblock. */
288 	if (hotpluggable && memblock_mark_hotplug(start, ma->length))
289 		pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n",
290 			(unsigned long long)start, (unsigned long long)end - 1);
291 
292 	max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
293 
294 	return 0;
295 out_err_bad_srat:
296 	bad_srat();
297 out_err:
298 	return -EINVAL;
299 }
300 
acpi_parse_cfmws(union acpi_subtable_headers * header,void * arg,const unsigned long table_end)301 static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
302 				   void *arg, const unsigned long table_end)
303 {
304 	struct acpi_cedt_cfmws *cfmws;
305 	int *fake_pxm = arg;
306 	u64 start, end;
307 	int node;
308 
309 	cfmws = (struct acpi_cedt_cfmws *)header;
310 	start = cfmws->base_hpa;
311 	end = cfmws->base_hpa + cfmws->window_size;
312 
313 	/*
314 	 * The SRAT may have already described NUMA details for all,
315 	 * or a portion of, this CFMWS HPA range. Extend the memblks
316 	 * found for any portion of the window to cover the entire
317 	 * window.
318 	 */
319 	if (!numa_fill_memblks(start, end))
320 		return 0;
321 
322 	/* No SRAT description. Create a new node. */
323 	node = acpi_map_pxm_to_node(*fake_pxm);
324 
325 	if (node == NUMA_NO_NODE) {
326 		pr_err("ACPI NUMA: Too many proximity domains while processing CFMWS.\n");
327 		return -EINVAL;
328 	}
329 
330 	if (numa_add_memblk(node, start, end) < 0) {
331 		/* CXL driver must handle the NUMA_NO_NODE case */
332 		pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
333 			node, start, end);
334 	}
335 	node_set(node, numa_nodes_parsed);
336 
337 	/* Set the next available fake_pxm value */
338 	(*fake_pxm)++;
339 	return 0;
340 }
341 #else
acpi_parse_cfmws(union acpi_subtable_headers * header,void * arg,const unsigned long table_end)342 static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
343 				   void *arg, const unsigned long table_end)
344 {
345 	return 0;
346 }
347 #endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
348 
acpi_parse_slit(struct acpi_table_header * table)349 static int __init acpi_parse_slit(struct acpi_table_header *table)
350 {
351 	struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
352 
353 	if (!slit_valid(slit)) {
354 		pr_info("SLIT table looks invalid. Not used.\n");
355 		return -EINVAL;
356 	}
357 	acpi_numa_slit_init(slit);
358 
359 	return 0;
360 }
361 
362 void __init __weak
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity * pa)363 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
364 {
365 	pr_warn("Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id);
366 }
367 
368 static int __init
acpi_parse_x2apic_affinity(union acpi_subtable_headers * header,const unsigned long end)369 acpi_parse_x2apic_affinity(union acpi_subtable_headers *header,
370 			   const unsigned long end)
371 {
372 	struct acpi_srat_x2apic_cpu_affinity *processor_affinity;
373 
374 	processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header;
375 
376 	acpi_table_print_srat_entry(&header->common);
377 
378 	/* let architecture-dependent part to do it */
379 	acpi_numa_x2apic_affinity_init(processor_affinity);
380 
381 	return 0;
382 }
383 
384 static int __init
acpi_parse_processor_affinity(union acpi_subtable_headers * header,const unsigned long end)385 acpi_parse_processor_affinity(union acpi_subtable_headers *header,
386 			      const unsigned long end)
387 {
388 	struct acpi_srat_cpu_affinity *processor_affinity;
389 
390 	processor_affinity = (struct acpi_srat_cpu_affinity *)header;
391 
392 	acpi_table_print_srat_entry(&header->common);
393 
394 	/* let architecture-dependent part to do it */
395 	acpi_numa_processor_affinity_init(processor_affinity);
396 
397 	return 0;
398 }
399 
400 static int __init
acpi_parse_gicc_affinity(union acpi_subtable_headers * header,const unsigned long end)401 acpi_parse_gicc_affinity(union acpi_subtable_headers *header,
402 			 const unsigned long end)
403 {
404 	struct acpi_srat_gicc_affinity *processor_affinity;
405 
406 	processor_affinity = (struct acpi_srat_gicc_affinity *)header;
407 
408 	acpi_table_print_srat_entry(&header->common);
409 
410 	/* let architecture-dependent part to do it */
411 	acpi_numa_gicc_affinity_init(processor_affinity);
412 
413 	return 0;
414 }
415 
416 #if defined(CONFIG_X86) || defined(CONFIG_ARM64)
417 static int __init
acpi_parse_gi_affinity(union acpi_subtable_headers * header,const unsigned long end)418 acpi_parse_gi_affinity(union acpi_subtable_headers *header,
419 		       const unsigned long end)
420 {
421 	struct acpi_srat_generic_affinity *gi_affinity;
422 	int node;
423 
424 	gi_affinity = (struct acpi_srat_generic_affinity *)header;
425 	if (!gi_affinity)
426 		return -EINVAL;
427 	acpi_table_print_srat_entry(&header->common);
428 
429 	if (!(gi_affinity->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED))
430 		return -EINVAL;
431 
432 	node = acpi_map_pxm_to_node(gi_affinity->proximity_domain);
433 	if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
434 		pr_err("SRAT: Too many proximity domains.\n");
435 		return -EINVAL;
436 	}
437 	node_set(node, numa_nodes_parsed);
438 	node_set_state(node, N_GENERIC_INITIATOR);
439 
440 	return 0;
441 }
442 #else
443 static int __init
acpi_parse_gi_affinity(union acpi_subtable_headers * header,const unsigned long end)444 acpi_parse_gi_affinity(union acpi_subtable_headers *header,
445 		       const unsigned long end)
446 {
447 	return 0;
448 }
449 #endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
450 
451 static int __initdata parsed_numa_memblks;
452 
453 static int __init
acpi_parse_memory_affinity(union acpi_subtable_headers * header,const unsigned long end)454 acpi_parse_memory_affinity(union acpi_subtable_headers * header,
455 			   const unsigned long end)
456 {
457 	struct acpi_srat_mem_affinity *memory_affinity;
458 
459 	memory_affinity = (struct acpi_srat_mem_affinity *)header;
460 
461 	acpi_table_print_srat_entry(&header->common);
462 
463 	/* let architecture-dependent part to do it */
464 	if (!acpi_numa_memory_affinity_init(memory_affinity))
465 		parsed_numa_memblks++;
466 	return 0;
467 }
468 
acpi_parse_srat(struct acpi_table_header * table)469 static int __init acpi_parse_srat(struct acpi_table_header *table)
470 {
471 	struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
472 
473 	acpi_srat_revision = srat->header.revision;
474 
475 	/* Real work done in acpi_table_parse_srat below. */
476 
477 	return 0;
478 }
479 
480 static int __init
acpi_table_parse_srat(enum acpi_srat_type id,acpi_tbl_entry_handler handler,unsigned int max_entries)481 acpi_table_parse_srat(enum acpi_srat_type id,
482 		      acpi_tbl_entry_handler handler, unsigned int max_entries)
483 {
484 	return acpi_table_parse_entries(ACPI_SIG_SRAT,
485 					    sizeof(struct acpi_table_srat), id,
486 					    handler, max_entries);
487 }
488 
acpi_numa_init(void)489 int __init acpi_numa_init(void)
490 {
491 	int i, fake_pxm, cnt = 0;
492 
493 	if (acpi_disabled)
494 		return -EINVAL;
495 
496 	/*
497 	 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
498 	 * SRAT cpu entries could have different order with that in MADT.
499 	 * So go over all cpu entries in SRAT to get apicid to node mapping.
500 	 */
501 
502 	/* SRAT: System Resource Affinity Table */
503 	if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
504 		struct acpi_subtable_proc srat_proc[4];
505 
506 		memset(srat_proc, 0, sizeof(srat_proc));
507 		srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
508 		srat_proc[0].handler = acpi_parse_processor_affinity;
509 		srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY;
510 		srat_proc[1].handler = acpi_parse_x2apic_affinity;
511 		srat_proc[2].id = ACPI_SRAT_TYPE_GICC_AFFINITY;
512 		srat_proc[2].handler = acpi_parse_gicc_affinity;
513 		srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY;
514 		srat_proc[3].handler = acpi_parse_gi_affinity;
515 
516 		acpi_table_parse_entries_array(ACPI_SIG_SRAT,
517 					sizeof(struct acpi_table_srat),
518 					srat_proc, ARRAY_SIZE(srat_proc), 0);
519 
520 		cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
521 					    acpi_parse_memory_affinity, 0);
522 	}
523 
524 	/* SLIT: System Locality Information Table */
525 	acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
526 
527 	/*
528 	 * CXL Fixed Memory Window Structures (CFMWS) must be parsed
529 	 * after the SRAT. Create NUMA Nodes for CXL memory ranges that
530 	 * are defined in the CFMWS and not already defined in the SRAT.
531 	 * Initialize a fake_pxm as the first available PXM to emulate.
532 	 */
533 
534 	/* fake_pxm is the next unused PXM value after SRAT parsing */
535 	for (i = 0, fake_pxm = -1; i < MAX_NUMNODES; i++) {
536 		if (node_to_pxm_map[i] > fake_pxm)
537 			fake_pxm = node_to_pxm_map[i];
538 	}
539 	fake_pxm++;
540 	acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, acpi_parse_cfmws,
541 			      &fake_pxm);
542 
543 	if (cnt < 0)
544 		return cnt;
545 	else if (!parsed_numa_memblks)
546 		return -ENOENT;
547 	return 0;
548 }
549 
acpi_get_pxm(acpi_handle h)550 static int acpi_get_pxm(acpi_handle h)
551 {
552 	unsigned long long pxm;
553 	acpi_status status;
554 	acpi_handle handle;
555 	acpi_handle phandle = h;
556 
557 	do {
558 		handle = phandle;
559 		status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
560 		if (ACPI_SUCCESS(status))
561 			return pxm;
562 		status = acpi_get_parent(handle, &phandle);
563 	} while (ACPI_SUCCESS(status));
564 	return -1;
565 }
566 
acpi_get_node(acpi_handle handle)567 int acpi_get_node(acpi_handle handle)
568 {
569 	int pxm;
570 
571 	pxm = acpi_get_pxm(handle);
572 
573 	return pxm_to_node(pxm);
574 }
575 EXPORT_SYMBOL(acpi_get_node);
576