1 /*
2 * Support for dynamic reconfiguration for PCI, Memory, and CPU
3 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
4 *
5 * Copyright (C) 2009 Nathan Fontenot
6 * Copyright (C) 2009 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) "dlpar: " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/notifier.h>
17 #include <linux/spinlock.h>
18 #include <linux/cpu.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
21
22 #include "of_helpers.h"
23 #include "pseries.h"
24
25 #include <asm/prom.h>
26 #include <asm/machdep.h>
27 #include <linux/uaccess.h>
28 #include <asm/rtas.h>
29
30 static struct workqueue_struct *pseries_hp_wq;
31
32 struct pseries_hp_work {
33 struct work_struct work;
34 struct pseries_hp_errorlog *errlog;
35 struct completion *hp_completion;
36 int *rc;
37 };
38
39 struct cc_workarea {
40 __be32 drc_index;
41 __be32 zero;
42 __be32 name_offset;
43 __be32 prop_length;
44 __be32 prop_offset;
45 };
46
dlpar_free_cc_property(struct property * prop)47 void dlpar_free_cc_property(struct property *prop)
48 {
49 kfree(prop->name);
50 kfree(prop->value);
51 kfree(prop);
52 }
53
dlpar_parse_cc_property(struct cc_workarea * ccwa)54 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
55 {
56 struct property *prop;
57 char *name;
58 char *value;
59
60 prop = kzalloc(sizeof(*prop), GFP_KERNEL);
61 if (!prop)
62 return NULL;
63
64 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
65 prop->name = kstrdup(name, GFP_KERNEL);
66 if (!prop->name) {
67 dlpar_free_cc_property(prop);
68 return NULL;
69 }
70
71 prop->length = be32_to_cpu(ccwa->prop_length);
72 value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
73 prop->value = kmemdup(value, prop->length, GFP_KERNEL);
74 if (!prop->value) {
75 dlpar_free_cc_property(prop);
76 return NULL;
77 }
78
79 return prop;
80 }
81
dlpar_parse_cc_node(struct cc_workarea * ccwa,const char * path)82 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
83 const char *path)
84 {
85 struct device_node *dn;
86 char *name;
87
88 /* If parent node path is "/" advance path to NULL terminator to
89 * prevent double leading slashs in full_name.
90 */
91 if (!path[1])
92 path++;
93
94 dn = kzalloc(sizeof(*dn), GFP_KERNEL);
95 if (!dn)
96 return NULL;
97
98 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
99 dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
100 if (!dn->full_name) {
101 kfree(dn);
102 return NULL;
103 }
104
105 of_node_set_flag(dn, OF_DYNAMIC);
106 of_node_init(dn);
107
108 return dn;
109 }
110
dlpar_free_one_cc_node(struct device_node * dn)111 static void dlpar_free_one_cc_node(struct device_node *dn)
112 {
113 struct property *prop;
114
115 while (dn->properties) {
116 prop = dn->properties;
117 dn->properties = prop->next;
118 dlpar_free_cc_property(prop);
119 }
120
121 kfree(dn->full_name);
122 kfree(dn);
123 }
124
dlpar_free_cc_nodes(struct device_node * dn)125 void dlpar_free_cc_nodes(struct device_node *dn)
126 {
127 if (dn->child)
128 dlpar_free_cc_nodes(dn->child);
129
130 if (dn->sibling)
131 dlpar_free_cc_nodes(dn->sibling);
132
133 dlpar_free_one_cc_node(dn);
134 }
135
136 #define COMPLETE 0
137 #define NEXT_SIBLING 1
138 #define NEXT_CHILD 2
139 #define NEXT_PROPERTY 3
140 #define PREV_PARENT 4
141 #define MORE_MEMORY 5
142 #define CALL_AGAIN -2
143 #define ERR_CFG_USE -9003
144
dlpar_configure_connector(__be32 drc_index,struct device_node * parent)145 struct device_node *dlpar_configure_connector(__be32 drc_index,
146 struct device_node *parent)
147 {
148 struct device_node *dn;
149 struct device_node *first_dn = NULL;
150 struct device_node *last_dn = NULL;
151 struct property *property;
152 struct property *last_property = NULL;
153 struct cc_workarea *ccwa;
154 char *data_buf;
155 const char *parent_path = parent->full_name;
156 int cc_token;
157 int rc = -1;
158
159 cc_token = rtas_token("ibm,configure-connector");
160 if (cc_token == RTAS_UNKNOWN_SERVICE)
161 return NULL;
162
163 data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
164 if (!data_buf)
165 return NULL;
166
167 ccwa = (struct cc_workarea *)&data_buf[0];
168 ccwa->drc_index = drc_index;
169 ccwa->zero = 0;
170
171 do {
172 /* Since we release the rtas_data_buf lock between configure
173 * connector calls we want to re-populate the rtas_data_buffer
174 * with the contents of the previous call.
175 */
176 spin_lock(&rtas_data_buf_lock);
177
178 memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
179 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
180 memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
181
182 spin_unlock(&rtas_data_buf_lock);
183
184 switch (rc) {
185 case COMPLETE:
186 break;
187
188 case NEXT_SIBLING:
189 dn = dlpar_parse_cc_node(ccwa, parent_path);
190 if (!dn)
191 goto cc_error;
192
193 dn->parent = last_dn->parent;
194 last_dn->sibling = dn;
195 last_dn = dn;
196 break;
197
198 case NEXT_CHILD:
199 if (first_dn)
200 parent_path = last_dn->full_name;
201
202 dn = dlpar_parse_cc_node(ccwa, parent_path);
203 if (!dn)
204 goto cc_error;
205
206 if (!first_dn) {
207 dn->parent = parent;
208 first_dn = dn;
209 } else {
210 dn->parent = last_dn;
211 if (last_dn)
212 last_dn->child = dn;
213 }
214
215 last_dn = dn;
216 break;
217
218 case NEXT_PROPERTY:
219 property = dlpar_parse_cc_property(ccwa);
220 if (!property)
221 goto cc_error;
222
223 if (!last_dn->properties)
224 last_dn->properties = property;
225 else
226 last_property->next = property;
227
228 last_property = property;
229 break;
230
231 case PREV_PARENT:
232 last_dn = last_dn->parent;
233 parent_path = last_dn->parent->full_name;
234 break;
235
236 case CALL_AGAIN:
237 break;
238
239 case MORE_MEMORY:
240 case ERR_CFG_USE:
241 default:
242 printk(KERN_ERR "Unexpected Error (%d) "
243 "returned from configure-connector\n", rc);
244 goto cc_error;
245 }
246 } while (rc);
247
248 cc_error:
249 kfree(data_buf);
250
251 if (rc) {
252 if (first_dn)
253 dlpar_free_cc_nodes(first_dn);
254
255 return NULL;
256 }
257
258 return first_dn;
259 }
260
dlpar_attach_node(struct device_node * dn,struct device_node * parent)261 int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
262 {
263 int rc;
264
265 dn->parent = parent;
266
267 rc = of_attach_node(dn);
268 if (rc) {
269 printk(KERN_ERR "Failed to add device node %pOF\n", dn);
270 return rc;
271 }
272
273 return 0;
274 }
275
dlpar_detach_node(struct device_node * dn)276 int dlpar_detach_node(struct device_node *dn)
277 {
278 struct device_node *child;
279 int rc;
280
281 child = of_get_next_child(dn, NULL);
282 while (child) {
283 dlpar_detach_node(child);
284 child = of_get_next_child(dn, child);
285 }
286
287 rc = of_detach_node(dn);
288 if (rc)
289 return rc;
290
291 of_node_put(dn);
292
293 return 0;
294 }
295
296 #define DR_ENTITY_SENSE 9003
297 #define DR_ENTITY_PRESENT 1
298 #define DR_ENTITY_UNUSABLE 2
299 #define ALLOCATION_STATE 9003
300 #define ALLOC_UNUSABLE 0
301 #define ALLOC_USABLE 1
302 #define ISOLATION_STATE 9001
303 #define ISOLATE 0
304 #define UNISOLATE 1
305
dlpar_acquire_drc(u32 drc_index)306 int dlpar_acquire_drc(u32 drc_index)
307 {
308 int dr_status, rc;
309
310 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
311 DR_ENTITY_SENSE, drc_index);
312 if (rc || dr_status != DR_ENTITY_UNUSABLE)
313 return -1;
314
315 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
316 if (rc)
317 return rc;
318
319 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
320 if (rc) {
321 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
322 return rc;
323 }
324
325 return 0;
326 }
327
dlpar_release_drc(u32 drc_index)328 int dlpar_release_drc(u32 drc_index)
329 {
330 int dr_status, rc;
331
332 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
333 DR_ENTITY_SENSE, drc_index);
334 if (rc || dr_status != DR_ENTITY_PRESENT)
335 return -1;
336
337 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
338 if (rc)
339 return rc;
340
341 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
342 if (rc) {
343 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
344 return rc;
345 }
346
347 return 0;
348 }
349
handle_dlpar_errorlog(struct pseries_hp_errorlog * hp_elog)350 static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
351 {
352 int rc;
353
354 /* pseries error logs are in BE format, convert to cpu type */
355 switch (hp_elog->id_type) {
356 case PSERIES_HP_ELOG_ID_DRC_COUNT:
357 hp_elog->_drc_u.drc_count =
358 be32_to_cpu(hp_elog->_drc_u.drc_count);
359 break;
360 case PSERIES_HP_ELOG_ID_DRC_INDEX:
361 hp_elog->_drc_u.drc_index =
362 be32_to_cpu(hp_elog->_drc_u.drc_index);
363 break;
364 case PSERIES_HP_ELOG_ID_DRC_IC:
365 hp_elog->_drc_u.ic.count =
366 be32_to_cpu(hp_elog->_drc_u.ic.count);
367 hp_elog->_drc_u.ic.index =
368 be32_to_cpu(hp_elog->_drc_u.ic.index);
369 }
370
371 switch (hp_elog->resource) {
372 case PSERIES_HP_ELOG_RESOURCE_MEM:
373 rc = dlpar_memory(hp_elog);
374 break;
375 case PSERIES_HP_ELOG_RESOURCE_CPU:
376 rc = dlpar_cpu(hp_elog);
377 break;
378 default:
379 pr_warn_ratelimited("Invalid resource (%d) specified\n",
380 hp_elog->resource);
381 rc = -EINVAL;
382 }
383
384 return rc;
385 }
386
pseries_hp_work_fn(struct work_struct * work)387 static void pseries_hp_work_fn(struct work_struct *work)
388 {
389 struct pseries_hp_work *hp_work =
390 container_of(work, struct pseries_hp_work, work);
391
392 if (hp_work->rc)
393 *(hp_work->rc) = handle_dlpar_errorlog(hp_work->errlog);
394 else
395 handle_dlpar_errorlog(hp_work->errlog);
396
397 if (hp_work->hp_completion)
398 complete(hp_work->hp_completion);
399
400 kfree(hp_work->errlog);
401 kfree((void *)work);
402 }
403
queue_hotplug_event(struct pseries_hp_errorlog * hp_errlog,struct completion * hotplug_done,int * rc)404 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
405 struct completion *hotplug_done, int *rc)
406 {
407 struct pseries_hp_work *work;
408 struct pseries_hp_errorlog *hp_errlog_copy;
409
410 hp_errlog_copy = kmalloc(sizeof(struct pseries_hp_errorlog),
411 GFP_KERNEL);
412 memcpy(hp_errlog_copy, hp_errlog, sizeof(struct pseries_hp_errorlog));
413
414 work = kmalloc(sizeof(struct pseries_hp_work), GFP_KERNEL);
415 if (work) {
416 INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
417 work->errlog = hp_errlog_copy;
418 work->hp_completion = hotplug_done;
419 work->rc = rc;
420 queue_work(pseries_hp_wq, (struct work_struct *)work);
421 } else {
422 *rc = -ENOMEM;
423 kfree(hp_errlog_copy);
424 complete(hotplug_done);
425 }
426 }
427
dlpar_parse_resource(char ** cmd,struct pseries_hp_errorlog * hp_elog)428 static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
429 {
430 char *arg;
431
432 arg = strsep(cmd, " ");
433 if (!arg)
434 return -EINVAL;
435
436 if (sysfs_streq(arg, "memory")) {
437 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
438 } else if (sysfs_streq(arg, "cpu")) {
439 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
440 } else {
441 pr_err("Invalid resource specified.\n");
442 return -EINVAL;
443 }
444
445 return 0;
446 }
447
dlpar_parse_action(char ** cmd,struct pseries_hp_errorlog * hp_elog)448 static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
449 {
450 char *arg;
451
452 arg = strsep(cmd, " ");
453 if (!arg)
454 return -EINVAL;
455
456 if (sysfs_streq(arg, "add")) {
457 hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
458 } else if (sysfs_streq(arg, "remove")) {
459 hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
460 } else {
461 pr_err("Invalid action specified.\n");
462 return -EINVAL;
463 }
464
465 return 0;
466 }
467
dlpar_parse_id_type(char ** cmd,struct pseries_hp_errorlog * hp_elog)468 static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
469 {
470 char *arg;
471 u32 count, index;
472
473 arg = strsep(cmd, " ");
474 if (!arg)
475 return -EINVAL;
476
477 if (sysfs_streq(arg, "indexed-count")) {
478 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
479 arg = strsep(cmd, " ");
480 if (!arg) {
481 pr_err("No DRC count specified.\n");
482 return -EINVAL;
483 }
484
485 if (kstrtou32(arg, 0, &count)) {
486 pr_err("Invalid DRC count specified.\n");
487 return -EINVAL;
488 }
489
490 arg = strsep(cmd, " ");
491 if (!arg) {
492 pr_err("No DRC Index specified.\n");
493 return -EINVAL;
494 }
495
496 if (kstrtou32(arg, 0, &index)) {
497 pr_err("Invalid DRC Index specified.\n");
498 return -EINVAL;
499 }
500
501 hp_elog->_drc_u.ic.count = cpu_to_be32(count);
502 hp_elog->_drc_u.ic.index = cpu_to_be32(index);
503 } else if (sysfs_streq(arg, "index")) {
504 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
505 arg = strsep(cmd, " ");
506 if (!arg) {
507 pr_err("No DRC Index specified.\n");
508 return -EINVAL;
509 }
510
511 if (kstrtou32(arg, 0, &index)) {
512 pr_err("Invalid DRC Index specified.\n");
513 return -EINVAL;
514 }
515
516 hp_elog->_drc_u.drc_index = cpu_to_be32(index);
517 } else if (sysfs_streq(arg, "count")) {
518 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
519 arg = strsep(cmd, " ");
520 if (!arg) {
521 pr_err("No DRC count specified.\n");
522 return -EINVAL;
523 }
524
525 if (kstrtou32(arg, 0, &count)) {
526 pr_err("Invalid DRC count specified.\n");
527 return -EINVAL;
528 }
529
530 hp_elog->_drc_u.drc_count = cpu_to_be32(count);
531 } else {
532 pr_err("Invalid id_type specified.\n");
533 return -EINVAL;
534 }
535
536 return 0;
537 }
538
dlpar_store(struct class * class,struct class_attribute * attr,const char * buf,size_t count)539 static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
540 const char *buf, size_t count)
541 {
542 struct pseries_hp_errorlog *hp_elog;
543 struct completion hotplug_done;
544 char *argbuf;
545 char *args;
546 int rc;
547
548 args = argbuf = kstrdup(buf, GFP_KERNEL);
549 hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
550 if (!hp_elog || !argbuf) {
551 pr_info("Could not allocate resources for DLPAR operation\n");
552 kfree(argbuf);
553 kfree(hp_elog);
554 return -ENOMEM;
555 }
556
557 /*
558 * Parse out the request from the user, this will be in the form:
559 * <resource> <action> <id_type> <id>
560 */
561 rc = dlpar_parse_resource(&args, hp_elog);
562 if (rc)
563 goto dlpar_store_out;
564
565 rc = dlpar_parse_action(&args, hp_elog);
566 if (rc)
567 goto dlpar_store_out;
568
569 rc = dlpar_parse_id_type(&args, hp_elog);
570 if (rc)
571 goto dlpar_store_out;
572
573 init_completion(&hotplug_done);
574 queue_hotplug_event(hp_elog, &hotplug_done, &rc);
575 wait_for_completion(&hotplug_done);
576
577 dlpar_store_out:
578 kfree(argbuf);
579 kfree(hp_elog);
580
581 if (rc)
582 pr_err("Could not handle DLPAR request \"%s\"\n", buf);
583
584 return rc ? rc : count;
585 }
586
dlpar_show(struct class * class,struct class_attribute * attr,char * buf)587 static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
588 char *buf)
589 {
590 return sprintf(buf, "%s\n", "memory,cpu");
591 }
592
593 static CLASS_ATTR_RW(dlpar);
594
dlpar_workqueue_init(void)595 int __init dlpar_workqueue_init(void)
596 {
597 if (pseries_hp_wq)
598 return 0;
599
600 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
601 WQ_UNBOUND, 1);
602
603 return pseries_hp_wq ? 0 : -ENOMEM;
604 }
605
dlpar_sysfs_init(void)606 static int __init dlpar_sysfs_init(void)
607 {
608 int rc;
609
610 rc = dlpar_workqueue_init();
611 if (rc)
612 return rc;
613
614 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
615 }
616 machine_device_initcall(pseries, dlpar_sysfs_init);
617
618