• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * pseries Memory Hotplug infrastructure.
3  *
4  * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
13 
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/memblock.h>
17 #include <linux/memory.h>
18 #include <linux/memory_hotplug.h>
19 #include <linux/slab.h>
20 
21 #include <asm/firmware.h>
22 #include <asm/machdep.h>
23 #include <asm/prom.h>
24 #include <asm/sparsemem.h>
25 #include <asm/fadump.h>
26 #include <asm/drmem.h>
27 #include "pseries.h"
28 
29 static bool rtas_hp_event;
30 
pseries_memory_block_size(void)31 unsigned long pseries_memory_block_size(void)
32 {
33 	struct device_node *np;
34 	u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
35 	struct resource r;
36 
37 	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
38 	if (np) {
39 		const __be64 *size;
40 
41 		size = of_get_property(np, "ibm,lmb-size", NULL);
42 		if (size)
43 			memblock_size = be64_to_cpup(size);
44 		of_node_put(np);
45 	} else  if (machine_is(pseries)) {
46 		/* This fallback really only applies to pseries */
47 		unsigned int memzero_size = 0;
48 
49 		np = of_find_node_by_path("/memory@0");
50 		if (np) {
51 			if (!of_address_to_resource(np, 0, &r))
52 				memzero_size = resource_size(&r);
53 			of_node_put(np);
54 		}
55 
56 		if (memzero_size) {
57 			/* We now know the size of memory@0, use this to find
58 			 * the first memoryblock and get its size.
59 			 */
60 			char buf[64];
61 
62 			sprintf(buf, "/memory@%x", memzero_size);
63 			np = of_find_node_by_path(buf);
64 			if (np) {
65 				if (!of_address_to_resource(np, 0, &r))
66 					memblock_size = resource_size(&r);
67 				of_node_put(np);
68 			}
69 		}
70 	}
71 	return memblock_size;
72 }
73 
dlpar_free_property(struct property * prop)74 static void dlpar_free_property(struct property *prop)
75 {
76 	kfree(prop->name);
77 	kfree(prop->value);
78 	kfree(prop);
79 }
80 
dlpar_clone_property(struct property * prop,u32 prop_size)81 static struct property *dlpar_clone_property(struct property *prop,
82 					     u32 prop_size)
83 {
84 	struct property *new_prop;
85 
86 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
87 	if (!new_prop)
88 		return NULL;
89 
90 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
91 	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
92 	if (!new_prop->name || !new_prop->value) {
93 		dlpar_free_property(new_prop);
94 		return NULL;
95 	}
96 
97 	memcpy(new_prop->value, prop->value, prop->length);
98 	new_prop->length = prop_size;
99 
100 	of_property_set_flag(new_prop, OF_DYNAMIC);
101 	return new_prop;
102 }
103 
find_aa_index(struct device_node * dr_node,struct property * ala_prop,const u32 * lmb_assoc,u32 * aa_index)104 static bool find_aa_index(struct device_node *dr_node,
105 			 struct property *ala_prop,
106 			 const u32 *lmb_assoc, u32 *aa_index)
107 {
108 	u32 *assoc_arrays, new_prop_size;
109 	struct property *new_prop;
110 	int aa_arrays, aa_array_entries, aa_array_sz;
111 	int i, index;
112 
113 	/*
114 	 * The ibm,associativity-lookup-arrays property is defined to be
115 	 * a 32-bit value specifying the number of associativity arrays
116 	 * followed by a 32-bitvalue specifying the number of entries per
117 	 * array, followed by the associativity arrays.
118 	 */
119 	assoc_arrays = ala_prop->value;
120 
121 	aa_arrays = be32_to_cpu(assoc_arrays[0]);
122 	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
123 	aa_array_sz = aa_array_entries * sizeof(u32);
124 
125 	for (i = 0; i < aa_arrays; i++) {
126 		index = (i * aa_array_entries) + 2;
127 
128 		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
129 			continue;
130 
131 		*aa_index = i;
132 		return true;
133 	}
134 
135 	new_prop_size = ala_prop->length + aa_array_sz;
136 	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
137 	if (!new_prop)
138 		return false;
139 
140 	assoc_arrays = new_prop->value;
141 
142 	/* increment the number of entries in the lookup array */
143 	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
144 
145 	/* copy the new associativity into the lookup array */
146 	index = aa_arrays * aa_array_entries + 2;
147 	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
148 
149 	of_update_property(dr_node, new_prop);
150 
151 	/*
152 	 * The associativity lookup array index for this lmb is
153 	 * number of entries - 1 since we added its associativity
154 	 * to the end of the lookup array.
155 	 */
156 	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
157 	return true;
158 }
159 
update_lmb_associativity_index(struct drmem_lmb * lmb)160 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
161 {
162 	struct device_node *parent, *lmb_node, *dr_node;
163 	struct property *ala_prop;
164 	const u32 *lmb_assoc;
165 	u32 aa_index;
166 	bool found;
167 
168 	parent = of_find_node_by_path("/");
169 	if (!parent)
170 		return -ENODEV;
171 
172 	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
173 					     parent);
174 	of_node_put(parent);
175 	if (!lmb_node)
176 		return -EINVAL;
177 
178 	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
179 	if (!lmb_assoc) {
180 		dlpar_free_cc_nodes(lmb_node);
181 		return -ENODEV;
182 	}
183 
184 	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
185 	if (!dr_node) {
186 		dlpar_free_cc_nodes(lmb_node);
187 		return -ENODEV;
188 	}
189 
190 	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
191 				    NULL);
192 	if (!ala_prop) {
193 		of_node_put(dr_node);
194 		dlpar_free_cc_nodes(lmb_node);
195 		return -ENODEV;
196 	}
197 
198 	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
199 
200 	of_node_put(dr_node);
201 	dlpar_free_cc_nodes(lmb_node);
202 
203 	if (!found) {
204 		pr_err("Could not find LMB associativity\n");
205 		return -1;
206 	}
207 
208 	lmb->aa_index = aa_index;
209 	return 0;
210 }
211 
lmb_to_memblock(struct drmem_lmb * lmb)212 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
213 {
214 	unsigned long section_nr;
215 	struct mem_section *mem_sect;
216 	struct memory_block *mem_block;
217 
218 	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
219 	mem_sect = __nr_to_section(section_nr);
220 
221 	mem_block = find_memory_block(mem_sect);
222 	return mem_block;
223 }
224 
get_lmb_range(u32 drc_index,int n_lmbs,struct drmem_lmb ** start_lmb,struct drmem_lmb ** end_lmb)225 static int get_lmb_range(u32 drc_index, int n_lmbs,
226 			 struct drmem_lmb **start_lmb,
227 			 struct drmem_lmb **end_lmb)
228 {
229 	struct drmem_lmb *lmb, *start, *end;
230 	struct drmem_lmb *limit;
231 
232 	start = NULL;
233 	for_each_drmem_lmb(lmb) {
234 		if (lmb->drc_index == drc_index) {
235 			start = lmb;
236 			break;
237 		}
238 	}
239 
240 	if (!start)
241 		return -EINVAL;
242 
243 	end = &start[n_lmbs];
244 
245 	limit = &drmem_info->lmbs[drmem_info->n_lmbs];
246 	if (end > limit)
247 		return -EINVAL;
248 
249 	*start_lmb = start;
250 	*end_lmb = end;
251 	return 0;
252 }
253 
dlpar_change_lmb_state(struct drmem_lmb * lmb,bool online)254 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
255 {
256 	struct memory_block *mem_block;
257 	int rc;
258 
259 	mem_block = lmb_to_memblock(lmb);
260 	if (!mem_block)
261 		return -EINVAL;
262 
263 	if (online && mem_block->dev.offline)
264 		rc = device_online(&mem_block->dev);
265 	else if (!online && !mem_block->dev.offline)
266 		rc = device_offline(&mem_block->dev);
267 	else
268 		rc = 0;
269 
270 	put_device(&mem_block->dev);
271 
272 	return rc;
273 }
274 
dlpar_online_lmb(struct drmem_lmb * lmb)275 static int dlpar_online_lmb(struct drmem_lmb *lmb)
276 {
277 	return dlpar_change_lmb_state(lmb, true);
278 }
279 
280 #ifdef CONFIG_MEMORY_HOTREMOVE
dlpar_offline_lmb(struct drmem_lmb * lmb)281 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
282 {
283 	return dlpar_change_lmb_state(lmb, false);
284 }
285 
pseries_remove_memblock(unsigned long base,unsigned int memblock_size)286 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
287 {
288 	unsigned long block_sz, start_pfn;
289 	int sections_per_block;
290 	int i, nid;
291 
292 	start_pfn = base >> PAGE_SHIFT;
293 
294 	lock_device_hotplug();
295 
296 	if (!pfn_valid(start_pfn))
297 		goto out;
298 
299 	block_sz = pseries_memory_block_size();
300 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
301 	nid = memory_add_physaddr_to_nid(base);
302 
303 	for (i = 0; i < sections_per_block; i++) {
304 		__remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
305 		base += MIN_MEMORY_BLOCK_SIZE;
306 	}
307 
308 out:
309 	/* Update memory regions for memory remove */
310 	memblock_remove(base, memblock_size);
311 	unlock_device_hotplug();
312 	return 0;
313 }
314 
pseries_remove_mem_node(struct device_node * np)315 static int pseries_remove_mem_node(struct device_node *np)
316 {
317 	const char *type;
318 	const __be32 *regs;
319 	unsigned long base;
320 	unsigned int lmb_size;
321 	int ret = -EINVAL;
322 
323 	/*
324 	 * Check to see if we are actually removing memory
325 	 */
326 	type = of_get_property(np, "device_type", NULL);
327 	if (type == NULL || strcmp(type, "memory") != 0)
328 		return 0;
329 
330 	/*
331 	 * Find the base address and size of the memblock
332 	 */
333 	regs = of_get_property(np, "reg", NULL);
334 	if (!regs)
335 		return ret;
336 
337 	base = be64_to_cpu(*(unsigned long *)regs);
338 	lmb_size = be32_to_cpu(regs[3]);
339 
340 	pseries_remove_memblock(base, lmb_size);
341 	return 0;
342 }
343 
lmb_is_removable(struct drmem_lmb * lmb)344 static bool lmb_is_removable(struct drmem_lmb *lmb)
345 {
346 	int i, scns_per_block;
347 	int rc = 1;
348 	unsigned long pfn, block_sz;
349 	u64 phys_addr;
350 
351 	if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
352 		return false;
353 
354 	block_sz = memory_block_size_bytes();
355 	scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
356 	phys_addr = lmb->base_addr;
357 
358 #ifdef CONFIG_FA_DUMP
359 	/*
360 	 * Don't hot-remove memory that falls in fadump boot memory area
361 	 * and memory that is reserved for capturing old kernel memory.
362 	 */
363 	if (is_fadump_memory_area(phys_addr, block_sz))
364 		return false;
365 #endif
366 
367 	for (i = 0; i < scns_per_block; i++) {
368 		pfn = PFN_DOWN(phys_addr);
369 		if (!pfn_present(pfn)) {
370 			phys_addr += MIN_MEMORY_BLOCK_SIZE;
371 			continue;
372 		}
373 
374 		rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
375 		phys_addr += MIN_MEMORY_BLOCK_SIZE;
376 	}
377 
378 	return rc ? true : false;
379 }
380 
381 static int dlpar_add_lmb(struct drmem_lmb *);
382 
dlpar_remove_lmb(struct drmem_lmb * lmb)383 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
384 {
385 	unsigned long block_sz;
386 	int nid, rc;
387 
388 	if (!lmb_is_removable(lmb))
389 		return -EINVAL;
390 
391 	rc = dlpar_offline_lmb(lmb);
392 	if (rc)
393 		return rc;
394 
395 	block_sz = pseries_memory_block_size();
396 	nid = memory_add_physaddr_to_nid(lmb->base_addr);
397 
398 	__remove_memory(nid, lmb->base_addr, block_sz);
399 
400 	/* Update memory regions for memory remove */
401 	memblock_remove(lmb->base_addr, block_sz);
402 
403 	invalidate_lmb_associativity_index(lmb);
404 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
405 
406 	return 0;
407 }
408 
dlpar_memory_remove_by_count(u32 lmbs_to_remove)409 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
410 {
411 	struct drmem_lmb *lmb;
412 	int lmbs_removed = 0;
413 	int lmbs_available = 0;
414 	int rc;
415 
416 	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
417 
418 	if (lmbs_to_remove == 0)
419 		return -EINVAL;
420 
421 	/* Validate that there are enough LMBs to satisfy the request */
422 	for_each_drmem_lmb(lmb) {
423 		if (lmb_is_removable(lmb))
424 			lmbs_available++;
425 
426 		if (lmbs_available == lmbs_to_remove)
427 			break;
428 	}
429 
430 	if (lmbs_available < lmbs_to_remove) {
431 		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
432 			lmbs_available, lmbs_to_remove);
433 		return -EINVAL;
434 	}
435 
436 	for_each_drmem_lmb(lmb) {
437 		rc = dlpar_remove_lmb(lmb);
438 		if (rc)
439 			continue;
440 
441 		/* Mark this lmb so we can add it later if all of the
442 		 * requested LMBs cannot be removed.
443 		 */
444 		drmem_mark_lmb_reserved(lmb);
445 
446 		lmbs_removed++;
447 		if (lmbs_removed == lmbs_to_remove)
448 			break;
449 	}
450 
451 	if (lmbs_removed != lmbs_to_remove) {
452 		pr_err("Memory hot-remove failed, adding LMB's back\n");
453 
454 		for_each_drmem_lmb(lmb) {
455 			if (!drmem_lmb_reserved(lmb))
456 				continue;
457 
458 			rc = dlpar_add_lmb(lmb);
459 			if (rc)
460 				pr_err("Failed to add LMB back, drc index %x\n",
461 				       lmb->drc_index);
462 
463 			drmem_remove_lmb_reservation(lmb);
464 		}
465 
466 		rc = -EINVAL;
467 	} else {
468 		for_each_drmem_lmb(lmb) {
469 			if (!drmem_lmb_reserved(lmb))
470 				continue;
471 
472 			dlpar_release_drc(lmb->drc_index);
473 			pr_info("Memory at %llx was hot-removed\n",
474 				lmb->base_addr);
475 
476 			drmem_remove_lmb_reservation(lmb);
477 		}
478 		rc = 0;
479 	}
480 
481 	return rc;
482 }
483 
dlpar_memory_remove_by_index(u32 drc_index)484 static int dlpar_memory_remove_by_index(u32 drc_index)
485 {
486 	struct drmem_lmb *lmb;
487 	int lmb_found;
488 	int rc;
489 
490 	pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
491 
492 	lmb_found = 0;
493 	for_each_drmem_lmb(lmb) {
494 		if (lmb->drc_index == drc_index) {
495 			lmb_found = 1;
496 			rc = dlpar_remove_lmb(lmb);
497 			if (!rc)
498 				dlpar_release_drc(lmb->drc_index);
499 
500 			break;
501 		}
502 	}
503 
504 	if (!lmb_found)
505 		rc = -EINVAL;
506 
507 	if (rc)
508 		pr_info("Failed to hot-remove memory at %llx\n",
509 			lmb->base_addr);
510 	else
511 		pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
512 
513 	return rc;
514 }
515 
dlpar_memory_readd_by_index(u32 drc_index)516 static int dlpar_memory_readd_by_index(u32 drc_index)
517 {
518 	struct drmem_lmb *lmb;
519 	int lmb_found;
520 	int rc;
521 
522 	pr_info("Attempting to update LMB, drc index %x\n", drc_index);
523 
524 	lmb_found = 0;
525 	for_each_drmem_lmb(lmb) {
526 		if (lmb->drc_index == drc_index) {
527 			lmb_found = 1;
528 			rc = dlpar_remove_lmb(lmb);
529 			if (!rc) {
530 				rc = dlpar_add_lmb(lmb);
531 				if (rc)
532 					dlpar_release_drc(lmb->drc_index);
533 			}
534 			break;
535 		}
536 	}
537 
538 	if (!lmb_found)
539 		rc = -EINVAL;
540 
541 	if (rc)
542 		pr_info("Failed to update memory at %llx\n",
543 			lmb->base_addr);
544 	else
545 		pr_info("Memory at %llx was updated\n", lmb->base_addr);
546 
547 	return rc;
548 }
549 
dlpar_memory_remove_by_ic(u32 lmbs_to_remove,u32 drc_index)550 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
551 {
552 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
553 	int lmbs_available = 0;
554 	int rc;
555 
556 	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
557 		lmbs_to_remove, drc_index);
558 
559 	if (lmbs_to_remove == 0)
560 		return -EINVAL;
561 
562 	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
563 	if (rc)
564 		return -EINVAL;
565 
566 	/* Validate that there are enough LMBs to satisfy the request */
567 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
568 		if (lmb->flags & DRCONF_MEM_RESERVED)
569 			break;
570 
571 		lmbs_available++;
572 	}
573 
574 	if (lmbs_available < lmbs_to_remove)
575 		return -EINVAL;
576 
577 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
578 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
579 			continue;
580 
581 		rc = dlpar_remove_lmb(lmb);
582 		if (rc)
583 			break;
584 
585 		drmem_mark_lmb_reserved(lmb);
586 	}
587 
588 	if (rc) {
589 		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
590 
591 
592 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
593 			if (!drmem_lmb_reserved(lmb))
594 				continue;
595 
596 			rc = dlpar_add_lmb(lmb);
597 			if (rc)
598 				pr_err("Failed to add LMB, drc index %x\n",
599 				       lmb->drc_index);
600 
601 			drmem_remove_lmb_reservation(lmb);
602 		}
603 		rc = -EINVAL;
604 	} else {
605 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
606 			if (!drmem_lmb_reserved(lmb))
607 				continue;
608 
609 			dlpar_release_drc(lmb->drc_index);
610 			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
611 				lmb->base_addr, lmb->drc_index);
612 
613 			drmem_remove_lmb_reservation(lmb);
614 		}
615 	}
616 
617 	return rc;
618 }
619 
620 #else
pseries_remove_memblock(unsigned long base,unsigned int memblock_size)621 static inline int pseries_remove_memblock(unsigned long base,
622 					  unsigned int memblock_size)
623 {
624 	return -EOPNOTSUPP;
625 }
pseries_remove_mem_node(struct device_node * np)626 static inline int pseries_remove_mem_node(struct device_node *np)
627 {
628 	return 0;
629 }
dlpar_memory_remove(struct pseries_hp_errorlog * hp_elog)630 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
631 {
632 	return -EOPNOTSUPP;
633 }
dlpar_remove_lmb(struct drmem_lmb * lmb)634 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
635 {
636 	return -EOPNOTSUPP;
637 }
dlpar_memory_remove_by_count(u32 lmbs_to_remove)638 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
639 {
640 	return -EOPNOTSUPP;
641 }
dlpar_memory_remove_by_index(u32 drc_index)642 static int dlpar_memory_remove_by_index(u32 drc_index)
643 {
644 	return -EOPNOTSUPP;
645 }
dlpar_memory_readd_by_index(u32 drc_index)646 static int dlpar_memory_readd_by_index(u32 drc_index)
647 {
648 	return -EOPNOTSUPP;
649 }
650 
dlpar_memory_remove_by_ic(u32 lmbs_to_remove,u32 drc_index)651 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
652 {
653 	return -EOPNOTSUPP;
654 }
655 #endif /* CONFIG_MEMORY_HOTREMOVE */
656 
dlpar_add_lmb(struct drmem_lmb * lmb)657 static int dlpar_add_lmb(struct drmem_lmb *lmb)
658 {
659 	unsigned long block_sz;
660 	int nid, rc;
661 
662 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
663 		return -EINVAL;
664 
665 	rc = update_lmb_associativity_index(lmb);
666 	if (rc) {
667 		dlpar_release_drc(lmb->drc_index);
668 		return rc;
669 	}
670 
671 	block_sz = memory_block_size_bytes();
672 
673 	/* Find the node id for this address */
674 	nid = memory_add_physaddr_to_nid(lmb->base_addr);
675 
676 	/* Add the memory */
677 	rc = __add_memory(nid, lmb->base_addr, block_sz);
678 	if (rc) {
679 		invalidate_lmb_associativity_index(lmb);
680 		return rc;
681 	}
682 
683 	rc = dlpar_online_lmb(lmb);
684 	if (rc) {
685 		__remove_memory(nid, lmb->base_addr, block_sz);
686 		invalidate_lmb_associativity_index(lmb);
687 	} else {
688 		lmb->flags |= DRCONF_MEM_ASSIGNED;
689 	}
690 
691 	return rc;
692 }
693 
dlpar_memory_add_by_count(u32 lmbs_to_add)694 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
695 {
696 	struct drmem_lmb *lmb;
697 	int lmbs_available = 0;
698 	int lmbs_added = 0;
699 	int rc;
700 
701 	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
702 
703 	if (lmbs_to_add == 0)
704 		return -EINVAL;
705 
706 	/* Validate that there are enough LMBs to satisfy the request */
707 	for_each_drmem_lmb(lmb) {
708 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
709 			lmbs_available++;
710 
711 		if (lmbs_available == lmbs_to_add)
712 			break;
713 	}
714 
715 	if (lmbs_available < lmbs_to_add)
716 		return -EINVAL;
717 
718 	for_each_drmem_lmb(lmb) {
719 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
720 			continue;
721 
722 		rc = dlpar_acquire_drc(lmb->drc_index);
723 		if (rc)
724 			continue;
725 
726 		rc = dlpar_add_lmb(lmb);
727 		if (rc) {
728 			dlpar_release_drc(lmb->drc_index);
729 			continue;
730 		}
731 
732 		/* Mark this lmb so we can remove it later if all of the
733 		 * requested LMBs cannot be added.
734 		 */
735 		drmem_mark_lmb_reserved(lmb);
736 
737 		lmbs_added++;
738 		if (lmbs_added == lmbs_to_add)
739 			break;
740 	}
741 
742 	if (lmbs_added != lmbs_to_add) {
743 		pr_err("Memory hot-add failed, removing any added LMBs\n");
744 
745 		for_each_drmem_lmb(lmb) {
746 			if (!drmem_lmb_reserved(lmb))
747 				continue;
748 
749 			rc = dlpar_remove_lmb(lmb);
750 			if (rc)
751 				pr_err("Failed to remove LMB, drc index %x\n",
752 				       lmb->drc_index);
753 			else
754 				dlpar_release_drc(lmb->drc_index);
755 
756 			drmem_remove_lmb_reservation(lmb);
757 		}
758 		rc = -EINVAL;
759 	} else {
760 		for_each_drmem_lmb(lmb) {
761 			if (!drmem_lmb_reserved(lmb))
762 				continue;
763 
764 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
765 				lmb->base_addr, lmb->drc_index);
766 			drmem_remove_lmb_reservation(lmb);
767 		}
768 		rc = 0;
769 	}
770 
771 	return rc;
772 }
773 
dlpar_memory_add_by_index(u32 drc_index)774 static int dlpar_memory_add_by_index(u32 drc_index)
775 {
776 	struct drmem_lmb *lmb;
777 	int rc, lmb_found;
778 
779 	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
780 
781 	lmb_found = 0;
782 	for_each_drmem_lmb(lmb) {
783 		if (lmb->drc_index == drc_index) {
784 			lmb_found = 1;
785 			rc = dlpar_acquire_drc(lmb->drc_index);
786 			if (!rc) {
787 				rc = dlpar_add_lmb(lmb);
788 				if (rc)
789 					dlpar_release_drc(lmb->drc_index);
790 			}
791 
792 			break;
793 		}
794 	}
795 
796 	if (!lmb_found)
797 		rc = -EINVAL;
798 
799 	if (rc)
800 		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
801 	else
802 		pr_info("Memory at %llx (drc index %x) was hot-added\n",
803 			lmb->base_addr, drc_index);
804 
805 	return rc;
806 }
807 
dlpar_memory_add_by_ic(u32 lmbs_to_add,u32 drc_index)808 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
809 {
810 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
811 	int lmbs_available = 0;
812 	int rc;
813 
814 	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
815 		lmbs_to_add, drc_index);
816 
817 	if (lmbs_to_add == 0)
818 		return -EINVAL;
819 
820 	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
821 	if (rc)
822 		return -EINVAL;
823 
824 	/* Validate that the LMBs in this range are not reserved */
825 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
826 		if (lmb->flags & DRCONF_MEM_RESERVED)
827 			break;
828 
829 		lmbs_available++;
830 	}
831 
832 	if (lmbs_available < lmbs_to_add)
833 		return -EINVAL;
834 
835 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
836 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
837 			continue;
838 
839 		rc = dlpar_acquire_drc(lmb->drc_index);
840 		if (rc)
841 			break;
842 
843 		rc = dlpar_add_lmb(lmb);
844 		if (rc) {
845 			dlpar_release_drc(lmb->drc_index);
846 			break;
847 		}
848 
849 		drmem_mark_lmb_reserved(lmb);
850 	}
851 
852 	if (rc) {
853 		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
854 
855 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
856 			if (!drmem_lmb_reserved(lmb))
857 				continue;
858 
859 			rc = dlpar_remove_lmb(lmb);
860 			if (rc)
861 				pr_err("Failed to remove LMB, drc index %x\n",
862 				       lmb->drc_index);
863 			else
864 				dlpar_release_drc(lmb->drc_index);
865 
866 			drmem_remove_lmb_reservation(lmb);
867 		}
868 		rc = -EINVAL;
869 	} else {
870 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
871 			if (!drmem_lmb_reserved(lmb))
872 				continue;
873 
874 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
875 				lmb->base_addr, lmb->drc_index);
876 			drmem_remove_lmb_reservation(lmb);
877 		}
878 	}
879 
880 	return rc;
881 }
882 
dlpar_memory(struct pseries_hp_errorlog * hp_elog)883 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
884 {
885 	u32 count, drc_index;
886 	int rc;
887 
888 	lock_device_hotplug();
889 
890 	switch (hp_elog->action) {
891 	case PSERIES_HP_ELOG_ACTION_ADD:
892 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
893 			count = hp_elog->_drc_u.drc_count;
894 			rc = dlpar_memory_add_by_count(count);
895 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
896 			drc_index = hp_elog->_drc_u.drc_index;
897 			rc = dlpar_memory_add_by_index(drc_index);
898 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
899 			count = hp_elog->_drc_u.ic.count;
900 			drc_index = hp_elog->_drc_u.ic.index;
901 			rc = dlpar_memory_add_by_ic(count, drc_index);
902 		} else {
903 			rc = -EINVAL;
904 		}
905 
906 		break;
907 	case PSERIES_HP_ELOG_ACTION_REMOVE:
908 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
909 			count = hp_elog->_drc_u.drc_count;
910 			rc = dlpar_memory_remove_by_count(count);
911 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
912 			drc_index = hp_elog->_drc_u.drc_index;
913 			rc = dlpar_memory_remove_by_index(drc_index);
914 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
915 			count = hp_elog->_drc_u.ic.count;
916 			drc_index = hp_elog->_drc_u.ic.index;
917 			rc = dlpar_memory_remove_by_ic(count, drc_index);
918 		} else {
919 			rc = -EINVAL;
920 		}
921 
922 		break;
923 	case PSERIES_HP_ELOG_ACTION_READD:
924 		drc_index = hp_elog->_drc_u.drc_index;
925 		rc = dlpar_memory_readd_by_index(drc_index);
926 		break;
927 	default:
928 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
929 		rc = -EINVAL;
930 		break;
931 	}
932 
933 	if (!rc) {
934 		rtas_hp_event = true;
935 		rc = drmem_update_dt();
936 		rtas_hp_event = false;
937 	}
938 
939 	unlock_device_hotplug();
940 	return rc;
941 }
942 
pseries_add_mem_node(struct device_node * np)943 static int pseries_add_mem_node(struct device_node *np)
944 {
945 	const char *type;
946 	const __be32 *regs;
947 	unsigned long base;
948 	unsigned int lmb_size;
949 	int ret = -EINVAL;
950 
951 	/*
952 	 * Check to see if we are actually adding memory
953 	 */
954 	type = of_get_property(np, "device_type", NULL);
955 	if (type == NULL || strcmp(type, "memory") != 0)
956 		return 0;
957 
958 	/*
959 	 * Find the base and size of the memblock
960 	 */
961 	regs = of_get_property(np, "reg", NULL);
962 	if (!regs)
963 		return ret;
964 
965 	base = be64_to_cpu(*(unsigned long *)regs);
966 	lmb_size = be32_to_cpu(regs[3]);
967 
968 	/*
969 	 * Update memory region to represent the memory add
970 	 */
971 	ret = memblock_add(base, lmb_size);
972 	return (ret < 0) ? -EINVAL : 0;
973 }
974 
pseries_update_drconf_memory(struct of_reconfig_data * pr)975 static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
976 {
977 	struct of_drconf_cell_v1 *new_drmem, *old_drmem;
978 	unsigned long memblock_size;
979 	u32 entries;
980 	__be32 *p;
981 	int i, rc = -EINVAL;
982 
983 	if (rtas_hp_event)
984 		return 0;
985 
986 	memblock_size = pseries_memory_block_size();
987 	if (!memblock_size)
988 		return -EINVAL;
989 
990 	if (!pr->old_prop)
991 		return 0;
992 
993 	p = (__be32 *) pr->old_prop->value;
994 	if (!p)
995 		return -EINVAL;
996 
997 	/* The first int of the property is the number of lmb's described
998 	 * by the property. This is followed by an array of of_drconf_cell
999 	 * entries. Get the number of entries and skip to the array of
1000 	 * of_drconf_cell's.
1001 	 */
1002 	entries = be32_to_cpu(*p++);
1003 	old_drmem = (struct of_drconf_cell_v1 *)p;
1004 
1005 	p = (__be32 *)pr->prop->value;
1006 	p++;
1007 	new_drmem = (struct of_drconf_cell_v1 *)p;
1008 
1009 	for (i = 0; i < entries; i++) {
1010 		if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
1011 		    (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
1012 			rc = pseries_remove_memblock(
1013 				be64_to_cpu(old_drmem[i].base_addr),
1014 						     memblock_size);
1015 			break;
1016 		} else if ((!(be32_to_cpu(old_drmem[i].flags) &
1017 			    DRCONF_MEM_ASSIGNED)) &&
1018 			    (be32_to_cpu(new_drmem[i].flags) &
1019 			    DRCONF_MEM_ASSIGNED)) {
1020 			rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
1021 					  memblock_size);
1022 			rc = (rc < 0) ? -EINVAL : 0;
1023 			break;
1024 		}
1025 	}
1026 	return rc;
1027 }
1028 
pseries_memory_notifier(struct notifier_block * nb,unsigned long action,void * data)1029 static int pseries_memory_notifier(struct notifier_block *nb,
1030 				   unsigned long action, void *data)
1031 {
1032 	struct of_reconfig_data *rd = data;
1033 	int err = 0;
1034 
1035 	switch (action) {
1036 	case OF_RECONFIG_ATTACH_NODE:
1037 		err = pseries_add_mem_node(rd->dn);
1038 		break;
1039 	case OF_RECONFIG_DETACH_NODE:
1040 		err = pseries_remove_mem_node(rd->dn);
1041 		break;
1042 	case OF_RECONFIG_UPDATE_PROPERTY:
1043 		if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
1044 			err = pseries_update_drconf_memory(rd);
1045 		break;
1046 	}
1047 	return notifier_from_errno(err);
1048 }
1049 
1050 static struct notifier_block pseries_mem_nb = {
1051 	.notifier_call = pseries_memory_notifier,
1052 };
1053 
pseries_memory_hotplug_init(void)1054 static int __init pseries_memory_hotplug_init(void)
1055 {
1056 	if (firmware_has_feature(FW_FEATURE_LPAR))
1057 		of_reconfig_notifier_register(&pseries_mem_nb);
1058 
1059 	return 0;
1060 }
1061 machine_device_initcall(pseries, pseries_memory_hotplug_init);
1062