• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019, Intel Corporation.
4  *
5  * Heterogeneous Memory Attributes Table (HMAT) representation
6  *
7  * This program parses and reports the platform's HMAT tables, and registers
8  * the applicable attributes with the node's interfaces.
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/bitops.h>
13 #include <linux/device.h>
14 #include <linux/init.h>
15 #include <linux/list.h>
16 #include <linux/list_sort.h>
17 #include <linux/memory.h>
18 #include <linux/mutex.h>
19 #include <linux/node.h>
20 #include <linux/sysfs.h>
21 
22 static u8 hmat_revision;
23 
24 static LIST_HEAD(targets);
25 static LIST_HEAD(initiators);
26 static LIST_HEAD(localities);
27 
28 static DEFINE_MUTEX(target_lock);
29 
30 /*
31  * The defined enum order is used to prioritize attributes to break ties when
32  * selecting the best performing node.
33  */
34 enum locality_types {
35 	WRITE_LATENCY,
36 	READ_LATENCY,
37 	WRITE_BANDWIDTH,
38 	READ_BANDWIDTH,
39 };
40 
41 static struct memory_locality *localities_types[4];
42 
43 struct target_cache {
44 	struct list_head node;
45 	struct node_cache_attrs cache_attrs;
46 };
47 
48 struct memory_target {
49 	struct list_head node;
50 	unsigned int memory_pxm;
51 	unsigned int processor_pxm;
52 	struct node_hmem_attrs hmem_attrs;
53 	struct list_head caches;
54 	struct node_cache_attrs cache_attrs;
55 	bool registered;
56 };
57 
58 struct memory_initiator {
59 	struct list_head node;
60 	unsigned int processor_pxm;
61 };
62 
63 struct memory_locality {
64 	struct list_head node;
65 	struct acpi_hmat_locality *hmat_loc;
66 };
67 
find_mem_initiator(unsigned int cpu_pxm)68 static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
69 {
70 	struct memory_initiator *initiator;
71 
72 	list_for_each_entry(initiator, &initiators, node)
73 		if (initiator->processor_pxm == cpu_pxm)
74 			return initiator;
75 	return NULL;
76 }
77 
find_mem_target(unsigned int mem_pxm)78 static struct memory_target *find_mem_target(unsigned int mem_pxm)
79 {
80 	struct memory_target *target;
81 
82 	list_for_each_entry(target, &targets, node)
83 		if (target->memory_pxm == mem_pxm)
84 			return target;
85 	return NULL;
86 }
87 
alloc_memory_initiator(unsigned int cpu_pxm)88 static __init void alloc_memory_initiator(unsigned int cpu_pxm)
89 {
90 	struct memory_initiator *initiator;
91 
92 	if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE)
93 		return;
94 
95 	initiator = find_mem_initiator(cpu_pxm);
96 	if (initiator)
97 		return;
98 
99 	initiator = kzalloc(sizeof(*initiator), GFP_KERNEL);
100 	if (!initiator)
101 		return;
102 
103 	initiator->processor_pxm = cpu_pxm;
104 	list_add_tail(&initiator->node, &initiators);
105 }
106 
alloc_memory_target(unsigned int mem_pxm)107 static __init void alloc_memory_target(unsigned int mem_pxm)
108 {
109 	struct memory_target *target;
110 
111 	target = find_mem_target(mem_pxm);
112 	if (target)
113 		return;
114 
115 	target = kzalloc(sizeof(*target), GFP_KERNEL);
116 	if (!target)
117 		return;
118 
119 	target->memory_pxm = mem_pxm;
120 	target->processor_pxm = PXM_INVAL;
121 	list_add_tail(&target->node, &targets);
122 	INIT_LIST_HEAD(&target->caches);
123 }
124 
hmat_data_type(u8 type)125 static __init const char *hmat_data_type(u8 type)
126 {
127 	switch (type) {
128 	case ACPI_HMAT_ACCESS_LATENCY:
129 		return "Access Latency";
130 	case ACPI_HMAT_READ_LATENCY:
131 		return "Read Latency";
132 	case ACPI_HMAT_WRITE_LATENCY:
133 		return "Write Latency";
134 	case ACPI_HMAT_ACCESS_BANDWIDTH:
135 		return "Access Bandwidth";
136 	case ACPI_HMAT_READ_BANDWIDTH:
137 		return "Read Bandwidth";
138 	case ACPI_HMAT_WRITE_BANDWIDTH:
139 		return "Write Bandwidth";
140 	default:
141 		return "Reserved";
142 	}
143 }
144 
hmat_data_type_suffix(u8 type)145 static __init const char *hmat_data_type_suffix(u8 type)
146 {
147 	switch (type) {
148 	case ACPI_HMAT_ACCESS_LATENCY:
149 	case ACPI_HMAT_READ_LATENCY:
150 	case ACPI_HMAT_WRITE_LATENCY:
151 		return " nsec";
152 	case ACPI_HMAT_ACCESS_BANDWIDTH:
153 	case ACPI_HMAT_READ_BANDWIDTH:
154 	case ACPI_HMAT_WRITE_BANDWIDTH:
155 		return " MB/s";
156 	default:
157 		return "";
158 	}
159 }
160 
hmat_normalize(u16 entry,u64 base,u8 type)161 static u32 hmat_normalize(u16 entry, u64 base, u8 type)
162 {
163 	u32 value;
164 
165 	/*
166 	 * Check for invalid and overflow values
167 	 */
168 	if (entry == 0xffff || !entry)
169 		return 0;
170 	else if (base > (UINT_MAX / (entry)))
171 		return 0;
172 
173 	/*
174 	 * Divide by the base unit for version 1, convert latency from
175 	 * picosenonds to nanoseconds if revision 2.
176 	 */
177 	value = entry * base;
178 	if (hmat_revision == 1) {
179 		if (value < 10)
180 			return 0;
181 		value = DIV_ROUND_UP(value, 10);
182 	} else if (hmat_revision == 2) {
183 		switch (type) {
184 		case ACPI_HMAT_ACCESS_LATENCY:
185 		case ACPI_HMAT_READ_LATENCY:
186 		case ACPI_HMAT_WRITE_LATENCY:
187 			value = DIV_ROUND_UP(value, 1000);
188 			break;
189 		default:
190 			break;
191 		}
192 	}
193 	return value;
194 }
195 
hmat_update_target_access(struct memory_target * target,u8 type,u32 value)196 static void hmat_update_target_access(struct memory_target *target,
197 					     u8 type, u32 value)
198 {
199 	switch (type) {
200 	case ACPI_HMAT_ACCESS_LATENCY:
201 		target->hmem_attrs.read_latency = value;
202 		target->hmem_attrs.write_latency = value;
203 		break;
204 	case ACPI_HMAT_READ_LATENCY:
205 		target->hmem_attrs.read_latency = value;
206 		break;
207 	case ACPI_HMAT_WRITE_LATENCY:
208 		target->hmem_attrs.write_latency = value;
209 		break;
210 	case ACPI_HMAT_ACCESS_BANDWIDTH:
211 		target->hmem_attrs.read_bandwidth = value;
212 		target->hmem_attrs.write_bandwidth = value;
213 		break;
214 	case ACPI_HMAT_READ_BANDWIDTH:
215 		target->hmem_attrs.read_bandwidth = value;
216 		break;
217 	case ACPI_HMAT_WRITE_BANDWIDTH:
218 		target->hmem_attrs.write_bandwidth = value;
219 		break;
220 	default:
221 		break;
222 	}
223 }
224 
hmat_add_locality(struct acpi_hmat_locality * hmat_loc)225 static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
226 {
227 	struct memory_locality *loc;
228 
229 	loc = kzalloc(sizeof(*loc), GFP_KERNEL);
230 	if (!loc) {
231 		pr_notice_once("Failed to allocate HMAT locality\n");
232 		return;
233 	}
234 
235 	loc->hmat_loc = hmat_loc;
236 	list_add_tail(&loc->node, &localities);
237 
238 	switch (hmat_loc->data_type) {
239 	case ACPI_HMAT_ACCESS_LATENCY:
240 		localities_types[READ_LATENCY] = loc;
241 		localities_types[WRITE_LATENCY] = loc;
242 		break;
243 	case ACPI_HMAT_READ_LATENCY:
244 		localities_types[READ_LATENCY] = loc;
245 		break;
246 	case ACPI_HMAT_WRITE_LATENCY:
247 		localities_types[WRITE_LATENCY] = loc;
248 		break;
249 	case ACPI_HMAT_ACCESS_BANDWIDTH:
250 		localities_types[READ_BANDWIDTH] = loc;
251 		localities_types[WRITE_BANDWIDTH] = loc;
252 		break;
253 	case ACPI_HMAT_READ_BANDWIDTH:
254 		localities_types[READ_BANDWIDTH] = loc;
255 		break;
256 	case ACPI_HMAT_WRITE_BANDWIDTH:
257 		localities_types[WRITE_BANDWIDTH] = loc;
258 		break;
259 	default:
260 		break;
261 	}
262 }
263 
hmat_parse_locality(union acpi_subtable_headers * header,const unsigned long end)264 static __init int hmat_parse_locality(union acpi_subtable_headers *header,
265 				      const unsigned long end)
266 {
267 	struct acpi_hmat_locality *hmat_loc = (void *)header;
268 	struct memory_target *target;
269 	unsigned int init, targ, total_size, ipds, tpds;
270 	u32 *inits, *targs, value;
271 	u16 *entries;
272 	u8 type, mem_hier;
273 
274 	if (hmat_loc->header.length < sizeof(*hmat_loc)) {
275 		pr_notice("HMAT: Unexpected locality header length: %d\n",
276 			 hmat_loc->header.length);
277 		return -EINVAL;
278 	}
279 
280 	type = hmat_loc->data_type;
281 	mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY;
282 	ipds = hmat_loc->number_of_initiator_Pds;
283 	tpds = hmat_loc->number_of_target_Pds;
284 	total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
285 		     sizeof(*inits) * ipds + sizeof(*targs) * tpds;
286 	if (hmat_loc->header.length < total_size) {
287 		pr_notice("HMAT: Unexpected locality header length:%d, minimum required:%d\n",
288 			 hmat_loc->header.length, total_size);
289 		return -EINVAL;
290 	}
291 
292 	pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%d Target Domains:%d Base:%lld\n",
293 		hmat_loc->flags, hmat_data_type(type), ipds, tpds,
294 		hmat_loc->entry_base_unit);
295 
296 	inits = (u32 *)(hmat_loc + 1);
297 	targs = inits + ipds;
298 	entries = (u16 *)(targs + tpds);
299 	for (init = 0; init < ipds; init++) {
300 		alloc_memory_initiator(inits[init]);
301 		for (targ = 0; targ < tpds; targ++) {
302 			value = hmat_normalize(entries[init * tpds + targ],
303 					       hmat_loc->entry_base_unit,
304 					       type);
305 			pr_info("  Initiator-Target[%d-%d]:%d%s\n",
306 				inits[init], targs[targ], value,
307 				hmat_data_type_suffix(type));
308 
309 			if (mem_hier == ACPI_HMAT_MEMORY) {
310 				target = find_mem_target(targs[targ]);
311 				if (target && target->processor_pxm == inits[init])
312 					hmat_update_target_access(target, type, value);
313 			}
314 		}
315 	}
316 
317 	if (mem_hier == ACPI_HMAT_MEMORY)
318 		hmat_add_locality(hmat_loc);
319 
320 	return 0;
321 }
322 
hmat_parse_cache(union acpi_subtable_headers * header,const unsigned long end)323 static __init int hmat_parse_cache(union acpi_subtable_headers *header,
324 				   const unsigned long end)
325 {
326 	struct acpi_hmat_cache *cache = (void *)header;
327 	struct memory_target *target;
328 	struct target_cache *tcache;
329 	u32 attrs;
330 
331 	if (cache->header.length < sizeof(*cache)) {
332 		pr_notice("HMAT: Unexpected cache header length: %d\n",
333 			 cache->header.length);
334 		return -EINVAL;
335 	}
336 
337 	attrs = cache->cache_attributes;
338 	pr_info("HMAT: Cache: Domain:%d Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
339 		cache->memory_PD, cache->cache_size, attrs,
340 		cache->number_of_SMBIOShandles);
341 
342 	target = find_mem_target(cache->memory_PD);
343 	if (!target)
344 		return 0;
345 
346 	tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
347 	if (!tcache) {
348 		pr_notice_once("Failed to allocate HMAT cache info\n");
349 		return 0;
350 	}
351 
352 	tcache->cache_attrs.size = cache->cache_size;
353 	tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
354 	tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
355 
356 	switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
357 	case ACPI_HMAT_CA_DIRECT_MAPPED:
358 		tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
359 		break;
360 	case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
361 		tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
362 		break;
363 	case ACPI_HMAT_CA_NONE:
364 	default:
365 		tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
366 		break;
367 	}
368 
369 	switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
370 	case ACPI_HMAT_CP_WB:
371 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
372 		break;
373 	case ACPI_HMAT_CP_WT:
374 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
375 		break;
376 	case ACPI_HMAT_CP_NONE:
377 	default:
378 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
379 		break;
380 	}
381 	list_add_tail(&tcache->node, &target->caches);
382 
383 	return 0;
384 }
385 
hmat_parse_proximity_domain(union acpi_subtable_headers * header,const unsigned long end)386 static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header,
387 					      const unsigned long end)
388 {
389 	struct acpi_hmat_proximity_domain *p = (void *)header;
390 	struct memory_target *target = NULL;
391 
392 	if (p->header.length != sizeof(*p)) {
393 		pr_notice("HMAT: Unexpected address range header length: %d\n",
394 			 p->header.length);
395 		return -EINVAL;
396 	}
397 
398 	if (hmat_revision == 1)
399 		pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%d Memory Domain:%d\n",
400 			p->reserved3, p->reserved4, p->flags, p->processor_PD,
401 			p->memory_PD);
402 	else
403 		pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
404 			p->flags, p->processor_PD, p->memory_PD);
405 
406 	if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
407 	    hmat_revision > 1) {
408 		target = find_mem_target(p->memory_PD);
409 		if (!target) {
410 			pr_debug("HMAT: Memory Domain missing from SRAT\n");
411 			return -EINVAL;
412 		}
413 	}
414 	if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
415 		int p_node = pxm_to_node(p->processor_PD);
416 
417 		if (p_node == NUMA_NO_NODE) {
418 			pr_debug("HMAT: Invalid Processor Domain\n");
419 			return -EINVAL;
420 		}
421 		target->processor_pxm = p_node;
422 	}
423 
424 	return 0;
425 }
426 
hmat_parse_subtable(union acpi_subtable_headers * header,const unsigned long end)427 static int __init hmat_parse_subtable(union acpi_subtable_headers *header,
428 				      const unsigned long end)
429 {
430 	struct acpi_hmat_structure *hdr = (void *)header;
431 
432 	if (!hdr)
433 		return -EINVAL;
434 
435 	switch (hdr->type) {
436 	case ACPI_HMAT_TYPE_PROXIMITY:
437 		return hmat_parse_proximity_domain(header, end);
438 	case ACPI_HMAT_TYPE_LOCALITY:
439 		return hmat_parse_locality(header, end);
440 	case ACPI_HMAT_TYPE_CACHE:
441 		return hmat_parse_cache(header, end);
442 	default:
443 		return -EINVAL;
444 	}
445 }
446 
srat_parse_mem_affinity(union acpi_subtable_headers * header,const unsigned long end)447 static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
448 					  const unsigned long end)
449 {
450 	struct acpi_srat_mem_affinity *ma = (void *)header;
451 
452 	if (!ma)
453 		return -EINVAL;
454 	if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
455 		return 0;
456 	alloc_memory_target(ma->proximity_domain);
457 	return 0;
458 }
459 
hmat_initiator_perf(struct memory_target * target,struct memory_initiator * initiator,struct acpi_hmat_locality * hmat_loc)460 static u32 hmat_initiator_perf(struct memory_target *target,
461 			       struct memory_initiator *initiator,
462 			       struct acpi_hmat_locality *hmat_loc)
463 {
464 	unsigned int ipds, tpds, i, idx = 0, tdx = 0;
465 	u32 *inits, *targs;
466 	u16 *entries;
467 
468 	ipds = hmat_loc->number_of_initiator_Pds;
469 	tpds = hmat_loc->number_of_target_Pds;
470 	inits = (u32 *)(hmat_loc + 1);
471 	targs = inits + ipds;
472 	entries = (u16 *)(targs + tpds);
473 
474 	for (i = 0; i < ipds; i++) {
475 		if (inits[i] == initiator->processor_pxm) {
476 			idx = i;
477 			break;
478 		}
479 	}
480 
481 	if (i == ipds)
482 		return 0;
483 
484 	for (i = 0; i < tpds; i++) {
485 		if (targs[i] == target->memory_pxm) {
486 			tdx = i;
487 			break;
488 		}
489 	}
490 	if (i == tpds)
491 		return 0;
492 
493 	return hmat_normalize(entries[idx * tpds + tdx],
494 			      hmat_loc->entry_base_unit,
495 			      hmat_loc->data_type);
496 }
497 
hmat_update_best(u8 type,u32 value,u32 * best)498 static bool hmat_update_best(u8 type, u32 value, u32 *best)
499 {
500 	bool updated = false;
501 
502 	if (!value)
503 		return false;
504 
505 	switch (type) {
506 	case ACPI_HMAT_ACCESS_LATENCY:
507 	case ACPI_HMAT_READ_LATENCY:
508 	case ACPI_HMAT_WRITE_LATENCY:
509 		if (!*best || *best > value) {
510 			*best = value;
511 			updated = true;
512 		}
513 		break;
514 	case ACPI_HMAT_ACCESS_BANDWIDTH:
515 	case ACPI_HMAT_READ_BANDWIDTH:
516 	case ACPI_HMAT_WRITE_BANDWIDTH:
517 		if (!*best || *best < value) {
518 			*best = value;
519 			updated = true;
520 		}
521 		break;
522 	}
523 
524 	return updated;
525 }
526 
initiator_cmp(void * priv,struct list_head * a,struct list_head * b)527 static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
528 {
529 	struct memory_initiator *ia;
530 	struct memory_initiator *ib;
531 	unsigned long *p_nodes = priv;
532 
533 	ia = list_entry(a, struct memory_initiator, node);
534 	ib = list_entry(b, struct memory_initiator, node);
535 
536 	set_bit(ia->processor_pxm, p_nodes);
537 	set_bit(ib->processor_pxm, p_nodes);
538 
539 	return ia->processor_pxm - ib->processor_pxm;
540 }
541 
hmat_register_target_initiators(struct memory_target * target)542 static void hmat_register_target_initiators(struct memory_target *target)
543 {
544 	static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
545 	struct memory_initiator *initiator;
546 	unsigned int mem_nid, cpu_nid;
547 	struct memory_locality *loc = NULL;
548 	u32 best = 0;
549 	int i;
550 
551 	mem_nid = pxm_to_node(target->memory_pxm);
552 	/*
553 	 * If the Address Range Structure provides a local processor pxm, link
554 	 * only that one. Otherwise, find the best performance attributes and
555 	 * register all initiators that match.
556 	 */
557 	if (target->processor_pxm != PXM_INVAL) {
558 		cpu_nid = pxm_to_node(target->processor_pxm);
559 		register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
560 		return;
561 	}
562 
563 	if (list_empty(&localities))
564 		return;
565 
566 	/*
567 	 * We need the initiator list sorted so we can use bitmap_clear for
568 	 * previously set initiators when we find a better memory accessor.
569 	 * We'll also use the sorting to prime the candidate nodes with known
570 	 * initiators.
571 	 */
572 	bitmap_zero(p_nodes, MAX_NUMNODES);
573 	list_sort(p_nodes, &initiators, initiator_cmp);
574 	for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
575 		loc = localities_types[i];
576 		if (!loc)
577 			continue;
578 
579 		best = 0;
580 		list_for_each_entry(initiator, &initiators, node) {
581 			u32 value;
582 
583 			if (!test_bit(initiator->processor_pxm, p_nodes))
584 				continue;
585 
586 			value = hmat_initiator_perf(target, initiator, loc->hmat_loc);
587 			if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
588 				bitmap_clear(p_nodes, 0, initiator->processor_pxm);
589 			if (value != best)
590 				clear_bit(initiator->processor_pxm, p_nodes);
591 		}
592 		if (best)
593 			hmat_update_target_access(target, loc->hmat_loc->data_type, best);
594 	}
595 
596 	for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
597 		cpu_nid = pxm_to_node(i);
598 		register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
599 	}
600 }
601 
hmat_register_target_cache(struct memory_target * target)602 static void hmat_register_target_cache(struct memory_target *target)
603 {
604 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
605 	struct target_cache *tcache;
606 
607 	list_for_each_entry(tcache, &target->caches, node)
608 		node_add_cache(mem_nid, &tcache->cache_attrs);
609 }
610 
hmat_register_target_perf(struct memory_target * target)611 static void hmat_register_target_perf(struct memory_target *target)
612 {
613 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
614 	node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
615 }
616 
hmat_register_target(struct memory_target * target)617 static void hmat_register_target(struct memory_target *target)
618 {
619 	int nid = pxm_to_node(target->memory_pxm);
620 
621 	/*
622 	 * Skip offline nodes. This can happen when memory
623 	 * marked EFI_MEMORY_SP, "specific purpose", is applied
624 	 * to all the memory in a promixity domain leading to
625 	 * the node being marked offline / unplugged, or if
626 	 * memory-only "hotplug" node is offline.
627 	 */
628 	if (nid == NUMA_NO_NODE || !node_online(nid))
629 		return;
630 
631 	mutex_lock(&target_lock);
632 	if (!target->registered) {
633 		hmat_register_target_initiators(target);
634 		hmat_register_target_cache(target);
635 		hmat_register_target_perf(target);
636 		target->registered = true;
637 	}
638 	mutex_unlock(&target_lock);
639 }
640 
hmat_register_targets(void)641 static void hmat_register_targets(void)
642 {
643 	struct memory_target *target;
644 
645 	list_for_each_entry(target, &targets, node)
646 		hmat_register_target(target);
647 }
648 
hmat_callback(struct notifier_block * self,unsigned long action,void * arg)649 static int hmat_callback(struct notifier_block *self,
650 			 unsigned long action, void *arg)
651 {
652 	struct memory_target *target;
653 	struct memory_notify *mnb = arg;
654 	int pxm, nid = mnb->status_change_nid;
655 
656 	if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
657 		return NOTIFY_OK;
658 
659 	pxm = node_to_pxm(nid);
660 	target = find_mem_target(pxm);
661 	if (!target)
662 		return NOTIFY_OK;
663 
664 	hmat_register_target(target);
665 	return NOTIFY_OK;
666 }
667 
668 static struct notifier_block hmat_callback_nb = {
669 	.notifier_call = hmat_callback,
670 	.priority = 2,
671 };
672 
hmat_free_structures(void)673 static __init void hmat_free_structures(void)
674 {
675 	struct memory_target *target, *tnext;
676 	struct memory_locality *loc, *lnext;
677 	struct memory_initiator *initiator, *inext;
678 	struct target_cache *tcache, *cnext;
679 
680 	list_for_each_entry_safe(target, tnext, &targets, node) {
681 		list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
682 			list_del(&tcache->node);
683 			kfree(tcache);
684 		}
685 		list_del(&target->node);
686 		kfree(target);
687 	}
688 
689 	list_for_each_entry_safe(initiator, inext, &initiators, node) {
690 		list_del(&initiator->node);
691 		kfree(initiator);
692 	}
693 
694 	list_for_each_entry_safe(loc, lnext, &localities, node) {
695 		list_del(&loc->node);
696 		kfree(loc);
697 	}
698 }
699 
hmat_init(void)700 static __init int hmat_init(void)
701 {
702 	struct acpi_table_header *tbl;
703 	enum acpi_hmat_type i;
704 	acpi_status status;
705 
706 	if (srat_disabled())
707 		return 0;
708 
709 	status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl);
710 	if (ACPI_FAILURE(status))
711 		return 0;
712 
713 	if (acpi_table_parse_entries(ACPI_SIG_SRAT,
714 				sizeof(struct acpi_table_srat),
715 				ACPI_SRAT_TYPE_MEMORY_AFFINITY,
716 				srat_parse_mem_affinity, 0) < 0)
717 		goto out_put;
718 	acpi_put_table(tbl);
719 
720 	status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
721 	if (ACPI_FAILURE(status))
722 		goto out_put;
723 
724 	hmat_revision = tbl->revision;
725 	switch (hmat_revision) {
726 	case 1:
727 	case 2:
728 		break;
729 	default:
730 		pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision);
731 		goto out_put;
732 	}
733 
734 	for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) {
735 		if (acpi_table_parse_entries(ACPI_SIG_HMAT,
736 					     sizeof(struct acpi_table_hmat), i,
737 					     hmat_parse_subtable, 0) < 0) {
738 			pr_notice("Ignoring HMAT: Invalid table");
739 			goto out_put;
740 		}
741 	}
742 	hmat_register_targets();
743 
744 	/* Keep the table and structures if the notifier may use them */
745 	if (!register_hotmemory_notifier(&hmat_callback_nb))
746 		return 0;
747 out_put:
748 	hmat_free_structures();
749 	acpi_put_table(tbl);
750 	return 0;
751 }
752 subsys_initcall(hmat_init);
753