• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Support for dynamic reconfiguration for PCI, Memory, and CPU
3  * Hotplug and Dynamic Logical Partitioning on RPA platforms.
4  *
5  * Copyright (C) 2009 Nathan Fontenot
6  * Copyright (C) 2009 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/notifier.h>
15 #include <linux/spinlock.h>
16 #include <linux/cpu.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include "offline_states.h"
20 #include "pseries.h"
21 
22 #include <asm/prom.h>
23 #include <asm/machdep.h>
24 #include <asm/uaccess.h>
25 #include <asm/rtas.h>
26 
27 struct cc_workarea {
28 	__be32	drc_index;
29 	__be32	zero;
30 	__be32	name_offset;
31 	__be32	prop_length;
32 	__be32	prop_offset;
33 };
34 
dlpar_free_cc_property(struct property * prop)35 void dlpar_free_cc_property(struct property *prop)
36 {
37 	kfree(prop->name);
38 	kfree(prop->value);
39 	kfree(prop);
40 }
41 
dlpar_parse_cc_property(struct cc_workarea * ccwa)42 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
43 {
44 	struct property *prop;
45 	char *name;
46 	char *value;
47 
48 	prop = kzalloc(sizeof(*prop), GFP_KERNEL);
49 	if (!prop)
50 		return NULL;
51 
52 	name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
53 	prop->name = kstrdup(name, GFP_KERNEL);
54 
55 	prop->length = be32_to_cpu(ccwa->prop_length);
56 	value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
57 	prop->value = kmemdup(value, prop->length, GFP_KERNEL);
58 	if (!prop->value) {
59 		dlpar_free_cc_property(prop);
60 		return NULL;
61 	}
62 
63 	return prop;
64 }
65 
dlpar_parse_cc_node(struct cc_workarea * ccwa,const char * path)66 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
67 					       const char *path)
68 {
69 	struct device_node *dn;
70 	char *name;
71 
72 	/* If parent node path is "/" advance path to NULL terminator to
73 	 * prevent double leading slashs in full_name.
74 	 */
75 	if (!path[1])
76 		path++;
77 
78 	dn = kzalloc(sizeof(*dn), GFP_KERNEL);
79 	if (!dn)
80 		return NULL;
81 
82 	name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
83 	dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
84 	if (!dn->full_name) {
85 		kfree(dn);
86 		return NULL;
87 	}
88 
89 	of_node_set_flag(dn, OF_DYNAMIC);
90 	of_node_init(dn);
91 
92 	return dn;
93 }
94 
dlpar_free_one_cc_node(struct device_node * dn)95 static void dlpar_free_one_cc_node(struct device_node *dn)
96 {
97 	struct property *prop;
98 
99 	while (dn->properties) {
100 		prop = dn->properties;
101 		dn->properties = prop->next;
102 		dlpar_free_cc_property(prop);
103 	}
104 
105 	kfree(dn->full_name);
106 	kfree(dn);
107 }
108 
dlpar_free_cc_nodes(struct device_node * dn)109 void dlpar_free_cc_nodes(struct device_node *dn)
110 {
111 	if (dn->child)
112 		dlpar_free_cc_nodes(dn->child);
113 
114 	if (dn->sibling)
115 		dlpar_free_cc_nodes(dn->sibling);
116 
117 	dlpar_free_one_cc_node(dn);
118 }
119 
120 #define COMPLETE	0
121 #define NEXT_SIBLING    1
122 #define NEXT_CHILD      2
123 #define NEXT_PROPERTY   3
124 #define PREV_PARENT     4
125 #define MORE_MEMORY     5
126 #define CALL_AGAIN	-2
127 #define ERR_CFG_USE     -9003
128 
dlpar_configure_connector(__be32 drc_index,struct device_node * parent)129 struct device_node *dlpar_configure_connector(__be32 drc_index,
130 					      struct device_node *parent)
131 {
132 	struct device_node *dn;
133 	struct device_node *first_dn = NULL;
134 	struct device_node *last_dn = NULL;
135 	struct property *property;
136 	struct property *last_property = NULL;
137 	struct cc_workarea *ccwa;
138 	char *data_buf;
139 	const char *parent_path = parent->full_name;
140 	int cc_token;
141 	int rc = -1;
142 
143 	cc_token = rtas_token("ibm,configure-connector");
144 	if (cc_token == RTAS_UNKNOWN_SERVICE)
145 		return NULL;
146 
147 	data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
148 	if (!data_buf)
149 		return NULL;
150 
151 	ccwa = (struct cc_workarea *)&data_buf[0];
152 	ccwa->drc_index = drc_index;
153 	ccwa->zero = 0;
154 
155 	do {
156 		/* Since we release the rtas_data_buf lock between configure
157 		 * connector calls we want to re-populate the rtas_data_buffer
158 		 * with the contents of the previous call.
159 		 */
160 		spin_lock(&rtas_data_buf_lock);
161 
162 		memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
163 		rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
164 		memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
165 
166 		spin_unlock(&rtas_data_buf_lock);
167 
168 		switch (rc) {
169 		case COMPLETE:
170 			break;
171 
172 		case NEXT_SIBLING:
173 			dn = dlpar_parse_cc_node(ccwa, parent_path);
174 			if (!dn)
175 				goto cc_error;
176 
177 			dn->parent = last_dn->parent;
178 			last_dn->sibling = dn;
179 			last_dn = dn;
180 			break;
181 
182 		case NEXT_CHILD:
183 			if (first_dn)
184 				parent_path = last_dn->full_name;
185 
186 			dn = dlpar_parse_cc_node(ccwa, parent_path);
187 			if (!dn)
188 				goto cc_error;
189 
190 			if (!first_dn) {
191 				dn->parent = parent;
192 				first_dn = dn;
193 			} else {
194 				dn->parent = last_dn;
195 				if (last_dn)
196 					last_dn->child = dn;
197 			}
198 
199 			last_dn = dn;
200 			break;
201 
202 		case NEXT_PROPERTY:
203 			property = dlpar_parse_cc_property(ccwa);
204 			if (!property)
205 				goto cc_error;
206 
207 			if (!last_dn->properties)
208 				last_dn->properties = property;
209 			else
210 				last_property->next = property;
211 
212 			last_property = property;
213 			break;
214 
215 		case PREV_PARENT:
216 			last_dn = last_dn->parent;
217 			parent_path = last_dn->parent->full_name;
218 			break;
219 
220 		case CALL_AGAIN:
221 			break;
222 
223 		case MORE_MEMORY:
224 		case ERR_CFG_USE:
225 		default:
226 			printk(KERN_ERR "Unexpected Error (%d) "
227 			       "returned from configure-connector\n", rc);
228 			goto cc_error;
229 		}
230 	} while (rc);
231 
232 cc_error:
233 	kfree(data_buf);
234 
235 	if (rc) {
236 		if (first_dn)
237 			dlpar_free_cc_nodes(first_dn);
238 
239 		return NULL;
240 	}
241 
242 	return first_dn;
243 }
244 
derive_parent(const char * path)245 static struct device_node *derive_parent(const char *path)
246 {
247 	struct device_node *parent;
248 	char *last_slash;
249 
250 	last_slash = strrchr(path, '/');
251 	if (last_slash == path) {
252 		parent = of_find_node_by_path("/");
253 	} else {
254 		char *parent_path;
255 		int parent_path_len = last_slash - path + 1;
256 		parent_path = kmalloc(parent_path_len, GFP_KERNEL);
257 		if (!parent_path)
258 			return NULL;
259 
260 		strlcpy(parent_path, path, parent_path_len);
261 		parent = of_find_node_by_path(parent_path);
262 		kfree(parent_path);
263 	}
264 
265 	return parent;
266 }
267 
dlpar_attach_node(struct device_node * dn)268 int dlpar_attach_node(struct device_node *dn)
269 {
270 	int rc;
271 
272 	dn->parent = derive_parent(dn->full_name);
273 	if (!dn->parent)
274 		return -ENOMEM;
275 
276 	rc = of_attach_node(dn);
277 	if (rc) {
278 		printk(KERN_ERR "Failed to add device node %s\n",
279 		       dn->full_name);
280 		return rc;
281 	}
282 
283 	of_node_put(dn->parent);
284 	return 0;
285 }
286 
dlpar_detach_node(struct device_node * dn)287 int dlpar_detach_node(struct device_node *dn)
288 {
289 	struct device_node *child;
290 	int rc;
291 
292 	child = of_get_next_child(dn, NULL);
293 	while (child) {
294 		dlpar_detach_node(child);
295 		child = of_get_next_child(dn, child);
296 	}
297 
298 	rc = of_detach_node(dn);
299 	if (rc)
300 		return rc;
301 
302 	return 0;
303 }
304 
305 #define DR_ENTITY_SENSE		9003
306 #define DR_ENTITY_PRESENT	1
307 #define DR_ENTITY_UNUSABLE	2
308 #define ALLOCATION_STATE	9003
309 #define ALLOC_UNUSABLE		0
310 #define ALLOC_USABLE		1
311 #define ISOLATION_STATE		9001
312 #define ISOLATE			0
313 #define UNISOLATE		1
314 
dlpar_acquire_drc(u32 drc_index)315 int dlpar_acquire_drc(u32 drc_index)
316 {
317 	int dr_status, rc;
318 
319 	rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
320 		       DR_ENTITY_SENSE, drc_index);
321 	if (rc || dr_status != DR_ENTITY_UNUSABLE)
322 		return -1;
323 
324 	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
325 	if (rc)
326 		return rc;
327 
328 	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
329 	if (rc) {
330 		rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
331 		return rc;
332 	}
333 
334 	return 0;
335 }
336 
dlpar_release_drc(u32 drc_index)337 int dlpar_release_drc(u32 drc_index)
338 {
339 	int dr_status, rc;
340 
341 	rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
342 		       DR_ENTITY_SENSE, drc_index);
343 	if (rc || dr_status != DR_ENTITY_PRESENT)
344 		return -1;
345 
346 	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
347 	if (rc)
348 		return rc;
349 
350 	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
351 	if (rc) {
352 		rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
353 		return rc;
354 	}
355 
356 	return 0;
357 }
358 
359 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
360 
dlpar_online_cpu(struct device_node * dn)361 static int dlpar_online_cpu(struct device_node *dn)
362 {
363 	int rc = 0;
364 	unsigned int cpu;
365 	int len, nthreads, i;
366 	const __be32 *intserv;
367 	u32 thread;
368 
369 	intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
370 	if (!intserv)
371 		return -EINVAL;
372 
373 	nthreads = len / sizeof(u32);
374 
375 	cpu_maps_update_begin();
376 	for (i = 0; i < nthreads; i++) {
377 		thread = be32_to_cpu(intserv[i]);
378 		for_each_present_cpu(cpu) {
379 			if (get_hard_smp_processor_id(cpu) != thread)
380 				continue;
381 			BUG_ON(get_cpu_current_state(cpu)
382 					!= CPU_STATE_OFFLINE);
383 			cpu_maps_update_done();
384 			rc = device_online(get_cpu_device(cpu));
385 			if (rc)
386 				goto out;
387 			cpu_maps_update_begin();
388 
389 			break;
390 		}
391 		if (cpu == num_possible_cpus())
392 			printk(KERN_WARNING "Could not find cpu to online "
393 			       "with physical id 0x%x\n", thread);
394 	}
395 	cpu_maps_update_done();
396 
397 out:
398 	return rc;
399 
400 }
401 
dlpar_cpu_probe(const char * buf,size_t count)402 static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
403 {
404 	struct device_node *dn, *parent;
405 	u32 drc_index;
406 	int rc;
407 
408 	rc = kstrtou32(buf, 0, &drc_index);
409 	if (rc)
410 		return -EINVAL;
411 
412 	parent = of_find_node_by_path("/cpus");
413 	if (!parent)
414 		return -ENODEV;
415 
416 	dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
417 	if (!dn)
418 		return -EINVAL;
419 
420 	of_node_put(parent);
421 
422 	rc = dlpar_acquire_drc(drc_index);
423 	if (rc) {
424 		dlpar_free_cc_nodes(dn);
425 		return -EINVAL;
426 	}
427 
428 	rc = dlpar_attach_node(dn);
429 	if (rc) {
430 		dlpar_release_drc(drc_index);
431 		dlpar_free_cc_nodes(dn);
432 		return rc;
433 	}
434 
435 	rc = dlpar_online_cpu(dn);
436 	if (rc)
437 		return rc;
438 
439 	return count;
440 }
441 
dlpar_offline_cpu(struct device_node * dn)442 static int dlpar_offline_cpu(struct device_node *dn)
443 {
444 	int rc = 0;
445 	unsigned int cpu;
446 	int len, nthreads, i;
447 	const __be32 *intserv;
448 	u32 thread;
449 
450 	intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
451 	if (!intserv)
452 		return -EINVAL;
453 
454 	nthreads = len / sizeof(u32);
455 
456 	cpu_maps_update_begin();
457 	for (i = 0; i < nthreads; i++) {
458 		thread = be32_to_cpu(intserv[i]);
459 		for_each_present_cpu(cpu) {
460 			if (get_hard_smp_processor_id(cpu) != thread)
461 				continue;
462 
463 			if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
464 				break;
465 
466 			if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
467 				set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
468 				cpu_maps_update_done();
469 				rc = device_offline(get_cpu_device(cpu));
470 				if (rc)
471 					goto out;
472 				cpu_maps_update_begin();
473 				break;
474 
475 			}
476 
477 			/*
478 			 * The cpu is in CPU_STATE_INACTIVE.
479 			 * Upgrade it's state to CPU_STATE_OFFLINE.
480 			 */
481 			set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
482 			BUG_ON(plpar_hcall_norets(H_PROD, thread)
483 								!= H_SUCCESS);
484 			__cpu_die(cpu);
485 			break;
486 		}
487 		if (cpu == num_possible_cpus())
488 			printk(KERN_WARNING "Could not find cpu to offline "
489 			       "with physical id 0x%x\n", thread);
490 	}
491 	cpu_maps_update_done();
492 
493 out:
494 	return rc;
495 
496 }
497 
dlpar_cpu_release(const char * buf,size_t count)498 static ssize_t dlpar_cpu_release(const char *buf, size_t count)
499 {
500 	struct device_node *dn;
501 	u32 drc_index;
502 	int rc;
503 
504 	dn = of_find_node_by_path(buf);
505 	if (!dn)
506 		return -EINVAL;
507 
508 	rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
509 	if (rc) {
510 		of_node_put(dn);
511 		return -EINVAL;
512 	}
513 
514 	rc = dlpar_offline_cpu(dn);
515 	if (rc) {
516 		of_node_put(dn);
517 		return -EINVAL;
518 	}
519 
520 	rc = dlpar_release_drc(drc_index);
521 	if (rc) {
522 		of_node_put(dn);
523 		return rc;
524 	}
525 
526 	rc = dlpar_detach_node(dn);
527 	if (rc) {
528 		dlpar_acquire_drc(drc_index);
529 		return rc;
530 	}
531 
532 	of_node_put(dn);
533 
534 	return count;
535 }
536 
pseries_dlpar_init(void)537 static int __init pseries_dlpar_init(void)
538 {
539 	ppc_md.cpu_probe = dlpar_cpu_probe;
540 	ppc_md.cpu_release = dlpar_cpu_release;
541 
542 	return 0;
543 }
544 machine_device_initcall(pseries, pseries_dlpar_init);
545 
546 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
547