• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * pseries Memory Hotplug infrastructure.
4  *
5  * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
6  */
7 
8 #define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
9 
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/memblock.h>
13 #include <linux/memory.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/slab.h>
16 
17 #include <asm/firmware.h>
18 #include <asm/machdep.h>
19 #include <asm/prom.h>
20 #include <asm/sparsemem.h>
21 #include <asm/fadump.h>
22 #include <asm/drmem.h>
23 #include "pseries.h"
24 
pseries_memory_block_size(void)25 unsigned long pseries_memory_block_size(void)
26 {
27 	struct device_node *np;
28 	u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
29 	struct resource r;
30 
31 	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
32 	if (np) {
33 		int len;
34 		int size_cells;
35 		const __be32 *prop;
36 
37 		size_cells = of_n_size_cells(np);
38 
39 		prop = of_get_property(np, "ibm,lmb-size", &len);
40 		if (prop && len >= size_cells * sizeof(__be32))
41 			memblock_size = of_read_number(prop, size_cells);
42 		of_node_put(np);
43 
44 	} else  if (machine_is(pseries)) {
45 		/* This fallback really only applies to pseries */
46 		unsigned int memzero_size = 0;
47 
48 		np = of_find_node_by_path("/memory@0");
49 		if (np) {
50 			if (!of_address_to_resource(np, 0, &r))
51 				memzero_size = resource_size(&r);
52 			of_node_put(np);
53 		}
54 
55 		if (memzero_size) {
56 			/* We now know the size of memory@0, use this to find
57 			 * the first memoryblock and get its size.
58 			 */
59 			char buf[64];
60 
61 			sprintf(buf, "/memory@%x", memzero_size);
62 			np = of_find_node_by_path(buf);
63 			if (np) {
64 				if (!of_address_to_resource(np, 0, &r))
65 					memblock_size = resource_size(&r);
66 				of_node_put(np);
67 			}
68 		}
69 	}
70 	return memblock_size;
71 }
72 
dlpar_free_property(struct property * prop)73 static void dlpar_free_property(struct property *prop)
74 {
75 	kfree(prop->name);
76 	kfree(prop->value);
77 	kfree(prop);
78 }
79 
dlpar_clone_property(struct property * prop,u32 prop_size)80 static struct property *dlpar_clone_property(struct property *prop,
81 					     u32 prop_size)
82 {
83 	struct property *new_prop;
84 
85 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
86 	if (!new_prop)
87 		return NULL;
88 
89 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
90 	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
91 	if (!new_prop->name || !new_prop->value) {
92 		dlpar_free_property(new_prop);
93 		return NULL;
94 	}
95 
96 	memcpy(new_prop->value, prop->value, prop->length);
97 	new_prop->length = prop_size;
98 
99 	of_property_set_flag(new_prop, OF_DYNAMIC);
100 	return new_prop;
101 }
102 
find_aa_index(struct device_node * dr_node,struct property * ala_prop,const u32 * lmb_assoc,u32 * aa_index)103 static bool find_aa_index(struct device_node *dr_node,
104 			 struct property *ala_prop,
105 			 const u32 *lmb_assoc, u32 *aa_index)
106 {
107 	u32 *assoc_arrays, new_prop_size;
108 	struct property *new_prop;
109 	int aa_arrays, aa_array_entries, aa_array_sz;
110 	int i, index;
111 
112 	/*
113 	 * The ibm,associativity-lookup-arrays property is defined to be
114 	 * a 32-bit value specifying the number of associativity arrays
115 	 * followed by a 32-bitvalue specifying the number of entries per
116 	 * array, followed by the associativity arrays.
117 	 */
118 	assoc_arrays = ala_prop->value;
119 
120 	aa_arrays = be32_to_cpu(assoc_arrays[0]);
121 	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
122 	aa_array_sz = aa_array_entries * sizeof(u32);
123 
124 	for (i = 0; i < aa_arrays; i++) {
125 		index = (i * aa_array_entries) + 2;
126 
127 		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
128 			continue;
129 
130 		*aa_index = i;
131 		return true;
132 	}
133 
134 	new_prop_size = ala_prop->length + aa_array_sz;
135 	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
136 	if (!new_prop)
137 		return false;
138 
139 	assoc_arrays = new_prop->value;
140 
141 	/* increment the number of entries in the lookup array */
142 	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
143 
144 	/* copy the new associativity into the lookup array */
145 	index = aa_arrays * aa_array_entries + 2;
146 	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
147 
148 	of_update_property(dr_node, new_prop);
149 
150 	/*
151 	 * The associativity lookup array index for this lmb is
152 	 * number of entries - 1 since we added its associativity
153 	 * to the end of the lookup array.
154 	 */
155 	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
156 	return true;
157 }
158 
update_lmb_associativity_index(struct drmem_lmb * lmb)159 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
160 {
161 	struct device_node *parent, *lmb_node, *dr_node;
162 	struct property *ala_prop;
163 	const u32 *lmb_assoc;
164 	u32 aa_index;
165 	bool found;
166 
167 	parent = of_find_node_by_path("/");
168 	if (!parent)
169 		return -ENODEV;
170 
171 	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
172 					     parent);
173 	of_node_put(parent);
174 	if (!lmb_node)
175 		return -EINVAL;
176 
177 	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
178 	if (!lmb_assoc) {
179 		dlpar_free_cc_nodes(lmb_node);
180 		return -ENODEV;
181 	}
182 
183 	update_numa_distance(lmb_node);
184 
185 	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
186 	if (!dr_node) {
187 		dlpar_free_cc_nodes(lmb_node);
188 		return -ENODEV;
189 	}
190 
191 	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
192 				    NULL);
193 	if (!ala_prop) {
194 		of_node_put(dr_node);
195 		dlpar_free_cc_nodes(lmb_node);
196 		return -ENODEV;
197 	}
198 
199 	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
200 
201 	of_node_put(dr_node);
202 	dlpar_free_cc_nodes(lmb_node);
203 
204 	if (!found) {
205 		pr_err("Could not find LMB associativity\n");
206 		return -1;
207 	}
208 
209 	lmb->aa_index = aa_index;
210 	return 0;
211 }
212 
lmb_to_memblock(struct drmem_lmb * lmb)213 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
214 {
215 	unsigned long section_nr;
216 	struct mem_section *mem_sect;
217 	struct memory_block *mem_block;
218 
219 	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
220 	mem_sect = __nr_to_section(section_nr);
221 
222 	mem_block = find_memory_block(mem_sect);
223 	return mem_block;
224 }
225 
get_lmb_range(u32 drc_index,int n_lmbs,struct drmem_lmb ** start_lmb,struct drmem_lmb ** end_lmb)226 static int get_lmb_range(u32 drc_index, int n_lmbs,
227 			 struct drmem_lmb **start_lmb,
228 			 struct drmem_lmb **end_lmb)
229 {
230 	struct drmem_lmb *lmb, *start, *end;
231 	struct drmem_lmb *limit;
232 
233 	start = NULL;
234 	for_each_drmem_lmb(lmb) {
235 		if (lmb->drc_index == drc_index) {
236 			start = lmb;
237 			break;
238 		}
239 	}
240 
241 	if (!start)
242 		return -EINVAL;
243 
244 	end = &start[n_lmbs];
245 
246 	limit = &drmem_info->lmbs[drmem_info->n_lmbs];
247 	if (end > limit)
248 		return -EINVAL;
249 
250 	*start_lmb = start;
251 	*end_lmb = end;
252 	return 0;
253 }
254 
dlpar_change_lmb_state(struct drmem_lmb * lmb,bool online)255 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
256 {
257 	struct memory_block *mem_block;
258 	int rc;
259 
260 	mem_block = lmb_to_memblock(lmb);
261 	if (!mem_block)
262 		return -EINVAL;
263 
264 	if (online && mem_block->dev.offline)
265 		rc = device_online(&mem_block->dev);
266 	else if (!online && !mem_block->dev.offline)
267 		rc = device_offline(&mem_block->dev);
268 	else
269 		rc = 0;
270 
271 	put_device(&mem_block->dev);
272 
273 	return rc;
274 }
275 
dlpar_online_lmb(struct drmem_lmb * lmb)276 static int dlpar_online_lmb(struct drmem_lmb *lmb)
277 {
278 	return dlpar_change_lmb_state(lmb, true);
279 }
280 
281 #ifdef CONFIG_MEMORY_HOTREMOVE
dlpar_offline_lmb(struct drmem_lmb * lmb)282 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
283 {
284 	return dlpar_change_lmb_state(lmb, false);
285 }
286 
pseries_remove_memblock(unsigned long base,unsigned long memblock_size)287 static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
288 {
289 	unsigned long block_sz, start_pfn;
290 	int sections_per_block;
291 	int i, nid;
292 
293 	start_pfn = base >> PAGE_SHIFT;
294 
295 	lock_device_hotplug();
296 
297 	if (!pfn_valid(start_pfn))
298 		goto out;
299 
300 	block_sz = pseries_memory_block_size();
301 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
302 	nid = memory_add_physaddr_to_nid(base);
303 
304 	for (i = 0; i < sections_per_block; i++) {
305 		__remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
306 		base += MIN_MEMORY_BLOCK_SIZE;
307 	}
308 
309 out:
310 	/* Update memory regions for memory remove */
311 	memblock_remove(base, memblock_size);
312 	unlock_device_hotplug();
313 	return 0;
314 }
315 
pseries_remove_mem_node(struct device_node * np)316 static int pseries_remove_mem_node(struct device_node *np)
317 {
318 	const __be32 *prop;
319 	unsigned long base;
320 	unsigned long lmb_size;
321 	int ret = -EINVAL;
322 	int addr_cells, size_cells;
323 
324 	/*
325 	 * Check to see if we are actually removing memory
326 	 */
327 	if (!of_node_is_type(np, "memory"))
328 		return 0;
329 
330 	/*
331 	 * Find the base address and size of the memblock
332 	 */
333 	prop = of_get_property(np, "reg", NULL);
334 	if (!prop)
335 		return ret;
336 
337 	addr_cells = of_n_addr_cells(np);
338 	size_cells = of_n_size_cells(np);
339 
340 	/*
341 	 * "reg" property represents (addr,size) tuple.
342 	 */
343 	base = of_read_number(prop, addr_cells);
344 	prop += addr_cells;
345 	lmb_size = of_read_number(prop, size_cells);
346 
347 	pseries_remove_memblock(base, lmb_size);
348 	return 0;
349 }
350 
lmb_is_removable(struct drmem_lmb * lmb)351 static bool lmb_is_removable(struct drmem_lmb *lmb)
352 {
353 	if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
354 		return false;
355 
356 #ifdef CONFIG_FA_DUMP
357 	/*
358 	 * Don't hot-remove memory that falls in fadump boot memory area
359 	 * and memory that is reserved for capturing old kernel memory.
360 	 */
361 	if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
362 		return false;
363 #endif
364 	/* device_offline() will determine if we can actually remove this lmb */
365 	return true;
366 }
367 
368 static int dlpar_add_lmb(struct drmem_lmb *);
369 
dlpar_remove_lmb(struct drmem_lmb * lmb)370 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
371 {
372 	struct memory_block *mem_block;
373 	unsigned long block_sz;
374 	int rc;
375 
376 	if (!lmb_is_removable(lmb))
377 		return -EINVAL;
378 
379 	mem_block = lmb_to_memblock(lmb);
380 	if (mem_block == NULL)
381 		return -EINVAL;
382 
383 	rc = dlpar_offline_lmb(lmb);
384 	if (rc) {
385 		put_device(&mem_block->dev);
386 		return rc;
387 	}
388 
389 	block_sz = pseries_memory_block_size();
390 
391 	__remove_memory(mem_block->nid, lmb->base_addr, block_sz);
392 	put_device(&mem_block->dev);
393 
394 	/* Update memory regions for memory remove */
395 	memblock_remove(lmb->base_addr, block_sz);
396 
397 	invalidate_lmb_associativity_index(lmb);
398 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
399 
400 	return 0;
401 }
402 
dlpar_memory_remove_by_count(u32 lmbs_to_remove)403 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
404 {
405 	struct drmem_lmb *lmb;
406 	int lmbs_removed = 0;
407 	int lmbs_available = 0;
408 	int rc;
409 
410 	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
411 
412 	if (lmbs_to_remove == 0)
413 		return -EINVAL;
414 
415 	/* Validate that there are enough LMBs to satisfy the request */
416 	for_each_drmem_lmb(lmb) {
417 		if (lmb_is_removable(lmb))
418 			lmbs_available++;
419 
420 		if (lmbs_available == lmbs_to_remove)
421 			break;
422 	}
423 
424 	if (lmbs_available < lmbs_to_remove) {
425 		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
426 			lmbs_available, lmbs_to_remove);
427 		return -EINVAL;
428 	}
429 
430 	for_each_drmem_lmb(lmb) {
431 		rc = dlpar_remove_lmb(lmb);
432 		if (rc)
433 			continue;
434 
435 		/* Mark this lmb so we can add it later if all of the
436 		 * requested LMBs cannot be removed.
437 		 */
438 		drmem_mark_lmb_reserved(lmb);
439 
440 		lmbs_removed++;
441 		if (lmbs_removed == lmbs_to_remove)
442 			break;
443 	}
444 
445 	if (lmbs_removed != lmbs_to_remove) {
446 		pr_err("Memory hot-remove failed, adding LMB's back\n");
447 
448 		for_each_drmem_lmb(lmb) {
449 			if (!drmem_lmb_reserved(lmb))
450 				continue;
451 
452 			rc = dlpar_add_lmb(lmb);
453 			if (rc)
454 				pr_err("Failed to add LMB back, drc index %x\n",
455 				       lmb->drc_index);
456 
457 			drmem_remove_lmb_reservation(lmb);
458 		}
459 
460 		rc = -EINVAL;
461 	} else {
462 		for_each_drmem_lmb(lmb) {
463 			if (!drmem_lmb_reserved(lmb))
464 				continue;
465 
466 			dlpar_release_drc(lmb->drc_index);
467 			pr_info("Memory at %llx was hot-removed\n",
468 				lmb->base_addr);
469 
470 			drmem_remove_lmb_reservation(lmb);
471 		}
472 		rc = 0;
473 	}
474 
475 	return rc;
476 }
477 
dlpar_memory_remove_by_index(u32 drc_index)478 static int dlpar_memory_remove_by_index(u32 drc_index)
479 {
480 	struct drmem_lmb *lmb;
481 	int lmb_found;
482 	int rc;
483 
484 	pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
485 
486 	lmb_found = 0;
487 	for_each_drmem_lmb(lmb) {
488 		if (lmb->drc_index == drc_index) {
489 			lmb_found = 1;
490 			rc = dlpar_remove_lmb(lmb);
491 			if (!rc)
492 				dlpar_release_drc(lmb->drc_index);
493 
494 			break;
495 		}
496 	}
497 
498 	if (!lmb_found) {
499 		pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
500 		rc = -EINVAL;
501 	} else if (rc) {
502 		pr_debug("Failed to hot-remove memory at %llx\n",
503 			 lmb->base_addr);
504 	} else {
505 		pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
506 	}
507 
508 	return rc;
509 }
510 
dlpar_memory_remove_by_ic(u32 lmbs_to_remove,u32 drc_index)511 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
512 {
513 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
514 	int lmbs_available = 0;
515 	int rc;
516 
517 	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
518 		lmbs_to_remove, drc_index);
519 
520 	if (lmbs_to_remove == 0)
521 		return -EINVAL;
522 
523 	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
524 	if (rc)
525 		return -EINVAL;
526 
527 	/* Validate that there are enough LMBs to satisfy the request */
528 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
529 		if (lmb->flags & DRCONF_MEM_RESERVED)
530 			break;
531 
532 		lmbs_available++;
533 	}
534 
535 	if (lmbs_available < lmbs_to_remove)
536 		return -EINVAL;
537 
538 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
539 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
540 			continue;
541 
542 		rc = dlpar_remove_lmb(lmb);
543 		if (rc)
544 			break;
545 
546 		drmem_mark_lmb_reserved(lmb);
547 	}
548 
549 	if (rc) {
550 		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
551 
552 
553 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
554 			if (!drmem_lmb_reserved(lmb))
555 				continue;
556 
557 			rc = dlpar_add_lmb(lmb);
558 			if (rc)
559 				pr_err("Failed to add LMB, drc index %x\n",
560 				       lmb->drc_index);
561 
562 			drmem_remove_lmb_reservation(lmb);
563 		}
564 		rc = -EINVAL;
565 	} else {
566 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
567 			if (!drmem_lmb_reserved(lmb))
568 				continue;
569 
570 			dlpar_release_drc(lmb->drc_index);
571 			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
572 				lmb->base_addr, lmb->drc_index);
573 
574 			drmem_remove_lmb_reservation(lmb);
575 		}
576 	}
577 
578 	return rc;
579 }
580 
581 #else
pseries_remove_memblock(unsigned long base,unsigned long memblock_size)582 static inline int pseries_remove_memblock(unsigned long base,
583 					  unsigned long memblock_size)
584 {
585 	return -EOPNOTSUPP;
586 }
pseries_remove_mem_node(struct device_node * np)587 static inline int pseries_remove_mem_node(struct device_node *np)
588 {
589 	return 0;
590 }
dlpar_memory_remove(struct pseries_hp_errorlog * hp_elog)591 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
592 {
593 	return -EOPNOTSUPP;
594 }
dlpar_remove_lmb(struct drmem_lmb * lmb)595 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
596 {
597 	return -EOPNOTSUPP;
598 }
dlpar_memory_remove_by_count(u32 lmbs_to_remove)599 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
600 {
601 	return -EOPNOTSUPP;
602 }
dlpar_memory_remove_by_index(u32 drc_index)603 static int dlpar_memory_remove_by_index(u32 drc_index)
604 {
605 	return -EOPNOTSUPP;
606 }
607 
dlpar_memory_remove_by_ic(u32 lmbs_to_remove,u32 drc_index)608 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
609 {
610 	return -EOPNOTSUPP;
611 }
612 #endif /* CONFIG_MEMORY_HOTREMOVE */
613 
dlpar_add_lmb(struct drmem_lmb * lmb)614 static int dlpar_add_lmb(struct drmem_lmb *lmb)
615 {
616 	unsigned long block_sz;
617 	int nid, rc;
618 
619 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
620 		return -EINVAL;
621 
622 	rc = update_lmb_associativity_index(lmb);
623 	if (rc) {
624 		dlpar_release_drc(lmb->drc_index);
625 		return rc;
626 	}
627 
628 	block_sz = memory_block_size_bytes();
629 
630 	/* Find the node id for this LMB.  Fake one if necessary. */
631 	nid = of_drconf_to_nid_single(lmb);
632 	if (nid < 0 || !node_possible(nid))
633 		nid = first_online_node;
634 
635 	/* Add the memory */
636 	rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE);
637 	if (rc) {
638 		invalidate_lmb_associativity_index(lmb);
639 		return rc;
640 	}
641 
642 	rc = dlpar_online_lmb(lmb);
643 	if (rc) {
644 		__remove_memory(nid, lmb->base_addr, block_sz);
645 		invalidate_lmb_associativity_index(lmb);
646 	} else {
647 		lmb->flags |= DRCONF_MEM_ASSIGNED;
648 	}
649 
650 	return rc;
651 }
652 
dlpar_memory_add_by_count(u32 lmbs_to_add)653 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
654 {
655 	struct drmem_lmb *lmb;
656 	int lmbs_available = 0;
657 	int lmbs_added = 0;
658 	int rc;
659 
660 	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
661 
662 	if (lmbs_to_add == 0)
663 		return -EINVAL;
664 
665 	/* Validate that there are enough LMBs to satisfy the request */
666 	for_each_drmem_lmb(lmb) {
667 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
668 			lmbs_available++;
669 
670 		if (lmbs_available == lmbs_to_add)
671 			break;
672 	}
673 
674 	if (lmbs_available < lmbs_to_add)
675 		return -EINVAL;
676 
677 	for_each_drmem_lmb(lmb) {
678 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
679 			continue;
680 
681 		rc = dlpar_acquire_drc(lmb->drc_index);
682 		if (rc)
683 			continue;
684 
685 		rc = dlpar_add_lmb(lmb);
686 		if (rc) {
687 			dlpar_release_drc(lmb->drc_index);
688 			continue;
689 		}
690 
691 		/* Mark this lmb so we can remove it later if all of the
692 		 * requested LMBs cannot be added.
693 		 */
694 		drmem_mark_lmb_reserved(lmb);
695 
696 		lmbs_added++;
697 		if (lmbs_added == lmbs_to_add)
698 			break;
699 	}
700 
701 	if (lmbs_added != lmbs_to_add) {
702 		pr_err("Memory hot-add failed, removing any added LMBs\n");
703 
704 		for_each_drmem_lmb(lmb) {
705 			if (!drmem_lmb_reserved(lmb))
706 				continue;
707 
708 			rc = dlpar_remove_lmb(lmb);
709 			if (rc)
710 				pr_err("Failed to remove LMB, drc index %x\n",
711 				       lmb->drc_index);
712 			else
713 				dlpar_release_drc(lmb->drc_index);
714 
715 			drmem_remove_lmb_reservation(lmb);
716 		}
717 		rc = -EINVAL;
718 	} else {
719 		for_each_drmem_lmb(lmb) {
720 			if (!drmem_lmb_reserved(lmb))
721 				continue;
722 
723 			pr_debug("Memory at %llx (drc index %x) was hot-added\n",
724 				 lmb->base_addr, lmb->drc_index);
725 			drmem_remove_lmb_reservation(lmb);
726 		}
727 		rc = 0;
728 	}
729 
730 	return rc;
731 }
732 
dlpar_memory_add_by_index(u32 drc_index)733 static int dlpar_memory_add_by_index(u32 drc_index)
734 {
735 	struct drmem_lmb *lmb;
736 	int rc, lmb_found;
737 
738 	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
739 
740 	lmb_found = 0;
741 	for_each_drmem_lmb(lmb) {
742 		if (lmb->drc_index == drc_index) {
743 			lmb_found = 1;
744 			rc = dlpar_acquire_drc(lmb->drc_index);
745 			if (!rc) {
746 				rc = dlpar_add_lmb(lmb);
747 				if (rc)
748 					dlpar_release_drc(lmb->drc_index);
749 			}
750 
751 			break;
752 		}
753 	}
754 
755 	if (!lmb_found)
756 		rc = -EINVAL;
757 
758 	if (rc)
759 		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
760 	else
761 		pr_info("Memory at %llx (drc index %x) was hot-added\n",
762 			lmb->base_addr, drc_index);
763 
764 	return rc;
765 }
766 
dlpar_memory_add_by_ic(u32 lmbs_to_add,u32 drc_index)767 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
768 {
769 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
770 	int lmbs_available = 0;
771 	int rc;
772 
773 	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
774 		lmbs_to_add, drc_index);
775 
776 	if (lmbs_to_add == 0)
777 		return -EINVAL;
778 
779 	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
780 	if (rc)
781 		return -EINVAL;
782 
783 	/* Validate that the LMBs in this range are not reserved */
784 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
785 		if (lmb->flags & DRCONF_MEM_RESERVED)
786 			break;
787 
788 		lmbs_available++;
789 	}
790 
791 	if (lmbs_available < lmbs_to_add)
792 		return -EINVAL;
793 
794 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
795 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
796 			continue;
797 
798 		rc = dlpar_acquire_drc(lmb->drc_index);
799 		if (rc)
800 			break;
801 
802 		rc = dlpar_add_lmb(lmb);
803 		if (rc) {
804 			dlpar_release_drc(lmb->drc_index);
805 			break;
806 		}
807 
808 		drmem_mark_lmb_reserved(lmb);
809 	}
810 
811 	if (rc) {
812 		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
813 
814 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
815 			if (!drmem_lmb_reserved(lmb))
816 				continue;
817 
818 			rc = dlpar_remove_lmb(lmb);
819 			if (rc)
820 				pr_err("Failed to remove LMB, drc index %x\n",
821 				       lmb->drc_index);
822 			else
823 				dlpar_release_drc(lmb->drc_index);
824 
825 			drmem_remove_lmb_reservation(lmb);
826 		}
827 		rc = -EINVAL;
828 	} else {
829 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
830 			if (!drmem_lmb_reserved(lmb))
831 				continue;
832 
833 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
834 				lmb->base_addr, lmb->drc_index);
835 			drmem_remove_lmb_reservation(lmb);
836 		}
837 	}
838 
839 	return rc;
840 }
841 
dlpar_memory(struct pseries_hp_errorlog * hp_elog)842 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
843 {
844 	u32 count, drc_index;
845 	int rc;
846 
847 	lock_device_hotplug();
848 
849 	switch (hp_elog->action) {
850 	case PSERIES_HP_ELOG_ACTION_ADD:
851 		switch (hp_elog->id_type) {
852 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
853 			count = hp_elog->_drc_u.drc_count;
854 			rc = dlpar_memory_add_by_count(count);
855 			break;
856 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
857 			drc_index = hp_elog->_drc_u.drc_index;
858 			rc = dlpar_memory_add_by_index(drc_index);
859 			break;
860 		case PSERIES_HP_ELOG_ID_DRC_IC:
861 			count = hp_elog->_drc_u.ic.count;
862 			drc_index = hp_elog->_drc_u.ic.index;
863 			rc = dlpar_memory_add_by_ic(count, drc_index);
864 			break;
865 		default:
866 			rc = -EINVAL;
867 			break;
868 		}
869 
870 		break;
871 	case PSERIES_HP_ELOG_ACTION_REMOVE:
872 		switch (hp_elog->id_type) {
873 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
874 			count = hp_elog->_drc_u.drc_count;
875 			rc = dlpar_memory_remove_by_count(count);
876 			break;
877 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
878 			drc_index = hp_elog->_drc_u.drc_index;
879 			rc = dlpar_memory_remove_by_index(drc_index);
880 			break;
881 		case PSERIES_HP_ELOG_ID_DRC_IC:
882 			count = hp_elog->_drc_u.ic.count;
883 			drc_index = hp_elog->_drc_u.ic.index;
884 			rc = dlpar_memory_remove_by_ic(count, drc_index);
885 			break;
886 		default:
887 			rc = -EINVAL;
888 			break;
889 		}
890 
891 		break;
892 	default:
893 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
894 		rc = -EINVAL;
895 		break;
896 	}
897 
898 	if (!rc)
899 		rc = drmem_update_dt();
900 
901 	unlock_device_hotplug();
902 	return rc;
903 }
904 
pseries_add_mem_node(struct device_node * np)905 static int pseries_add_mem_node(struct device_node *np)
906 {
907 	const __be32 *prop;
908 	unsigned long base;
909 	unsigned long lmb_size;
910 	int ret = -EINVAL;
911 	int addr_cells, size_cells;
912 
913 	/*
914 	 * Check to see if we are actually adding memory
915 	 */
916 	if (!of_node_is_type(np, "memory"))
917 		return 0;
918 
919 	/*
920 	 * Find the base and size of the memblock
921 	 */
922 	prop = of_get_property(np, "reg", NULL);
923 	if (!prop)
924 		return ret;
925 
926 	addr_cells = of_n_addr_cells(np);
927 	size_cells = of_n_size_cells(np);
928 	/*
929 	 * "reg" property represents (addr,size) tuple.
930 	 */
931 	base = of_read_number(prop, addr_cells);
932 	prop += addr_cells;
933 	lmb_size = of_read_number(prop, size_cells);
934 
935 	/*
936 	 * Update memory region to represent the memory add
937 	 */
938 	ret = memblock_add(base, lmb_size);
939 	return (ret < 0) ? -EINVAL : 0;
940 }
941 
pseries_memory_notifier(struct notifier_block * nb,unsigned long action,void * data)942 static int pseries_memory_notifier(struct notifier_block *nb,
943 				   unsigned long action, void *data)
944 {
945 	struct of_reconfig_data *rd = data;
946 	int err = 0;
947 
948 	switch (action) {
949 	case OF_RECONFIG_ATTACH_NODE:
950 		err = pseries_add_mem_node(rd->dn);
951 		break;
952 	case OF_RECONFIG_DETACH_NODE:
953 		err = pseries_remove_mem_node(rd->dn);
954 		break;
955 	}
956 	return notifier_from_errno(err);
957 }
958 
959 static struct notifier_block pseries_mem_nb = {
960 	.notifier_call = pseries_memory_notifier,
961 };
962 
pseries_memory_hotplug_init(void)963 static int __init pseries_memory_hotplug_init(void)
964 {
965 	if (firmware_has_feature(FW_FEATURE_LPAR))
966 		of_reconfig_notifier_register(&pseries_mem_nb);
967 
968 	return 0;
969 }
970 machine_device_initcall(pseries, pseries_memory_hotplug_init);
971