1 /*
2 *
3 * Procedures for interfacing to the RTAS on CHRP machines.
4 *
5 * Peter Bergner, IBM March 2001.
6 * Copyright (C) 2001 IBM.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #include <stdarg.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/spinlock.h>
18 #include <linux/export.h>
19 #include <linux/init.h>
20 #include <linux/capability.h>
21 #include <linux/delay.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
24 #include <linux/completion.h>
25 #include <linux/cpumask.h>
26 #include <linux/memblock.h>
27 #include <linux/slab.h>
28 #include <linux/reboot.h>
29
30 #include <asm/prom.h>
31 #include <asm/rtas.h>
32 #include <asm/hvcall.h>
33 #include <asm/machdep.h>
34 #include <asm/firmware.h>
35 #include <asm/page.h>
36 #include <asm/param.h>
37 #include <asm/delay.h>
38 #include <linux/uaccess.h>
39 #include <asm/udbg.h>
40 #include <asm/syscalls.h>
41 #include <asm/smp.h>
42 #include <linux/atomic.h>
43 #include <asm/time.h>
44 #include <asm/mmu.h>
45 #include <asm/topology.h>
46
47 /* This is here deliberately so it's only used in this file */
48 void enter_rtas(unsigned long);
49
50 struct rtas_t rtas = {
51 .lock = __ARCH_SPIN_LOCK_UNLOCKED
52 };
53 EXPORT_SYMBOL(rtas);
54
55 DEFINE_SPINLOCK(rtas_data_buf_lock);
56 EXPORT_SYMBOL(rtas_data_buf_lock);
57
58 char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
59 EXPORT_SYMBOL(rtas_data_buf);
60
61 unsigned long rtas_rmo_buf;
62
63 /*
64 * If non-NULL, this gets called when the kernel terminates.
65 * This is done like this so rtas_flash can be a module.
66 */
67 void (*rtas_flash_term_hook)(int);
68 EXPORT_SYMBOL(rtas_flash_term_hook);
69
70 /* RTAS use home made raw locking instead of spin_lock_irqsave
71 * because those can be called from within really nasty contexts
72 * such as having the timebase stopped which would lockup with
73 * normal locks and spinlock debugging enabled
74 */
lock_rtas(void)75 static unsigned long lock_rtas(void)
76 {
77 unsigned long flags;
78
79 local_irq_save(flags);
80 preempt_disable();
81 arch_spin_lock_flags(&rtas.lock, flags);
82 return flags;
83 }
84
unlock_rtas(unsigned long flags)85 static void unlock_rtas(unsigned long flags)
86 {
87 arch_spin_unlock(&rtas.lock);
88 local_irq_restore(flags);
89 preempt_enable();
90 }
91
92 /*
93 * call_rtas_display_status and call_rtas_display_status_delay
94 * are designed only for very early low-level debugging, which
95 * is why the token is hard-coded to 10.
96 */
call_rtas_display_status(unsigned char c)97 static void call_rtas_display_status(unsigned char c)
98 {
99 unsigned long s;
100
101 if (!rtas.base)
102 return;
103
104 s = lock_rtas();
105 rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
106 unlock_rtas(s);
107 }
108
call_rtas_display_status_delay(char c)109 static void call_rtas_display_status_delay(char c)
110 {
111 static int pending_newline = 0; /* did last write end with unprinted newline? */
112 static int width = 16;
113
114 if (c == '\n') {
115 while (width-- > 0)
116 call_rtas_display_status(' ');
117 width = 16;
118 mdelay(500);
119 pending_newline = 1;
120 } else {
121 if (pending_newline) {
122 call_rtas_display_status('\r');
123 call_rtas_display_status('\n');
124 }
125 pending_newline = 0;
126 if (width--) {
127 call_rtas_display_status(c);
128 udelay(10000);
129 }
130 }
131 }
132
udbg_init_rtas_panel(void)133 void __init udbg_init_rtas_panel(void)
134 {
135 udbg_putc = call_rtas_display_status_delay;
136 }
137
138 #ifdef CONFIG_UDBG_RTAS_CONSOLE
139
140 /* If you think you're dying before early_init_dt_scan_rtas() does its
141 * work, you can hard code the token values for your firmware here and
142 * hardcode rtas.base/entry etc.
143 */
144 static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
145 static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
146
udbg_rtascon_putc(char c)147 static void udbg_rtascon_putc(char c)
148 {
149 int tries;
150
151 if (!rtas.base)
152 return;
153
154 /* Add CRs before LFs */
155 if (c == '\n')
156 udbg_rtascon_putc('\r');
157
158 /* if there is more than one character to be displayed, wait a bit */
159 for (tries = 0; tries < 16; tries++) {
160 if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
161 break;
162 udelay(1000);
163 }
164 }
165
udbg_rtascon_getc_poll(void)166 static int udbg_rtascon_getc_poll(void)
167 {
168 int c;
169
170 if (!rtas.base)
171 return -1;
172
173 if (rtas_call(rtas_getchar_token, 0, 2, &c))
174 return -1;
175
176 return c;
177 }
178
udbg_rtascon_getc(void)179 static int udbg_rtascon_getc(void)
180 {
181 int c;
182
183 while ((c = udbg_rtascon_getc_poll()) == -1)
184 ;
185
186 return c;
187 }
188
189
udbg_init_rtas_console(void)190 void __init udbg_init_rtas_console(void)
191 {
192 udbg_putc = udbg_rtascon_putc;
193 udbg_getc = udbg_rtascon_getc;
194 udbg_getc_poll = udbg_rtascon_getc_poll;
195 }
196 #endif /* CONFIG_UDBG_RTAS_CONSOLE */
197
rtas_progress(char * s,unsigned short hex)198 void rtas_progress(char *s, unsigned short hex)
199 {
200 struct device_node *root;
201 int width;
202 const __be32 *p;
203 char *os;
204 static int display_character, set_indicator;
205 static int display_width, display_lines, form_feed;
206 static const int *row_width;
207 static DEFINE_SPINLOCK(progress_lock);
208 static int current_line;
209 static int pending_newline = 0; /* did last write end with unprinted newline? */
210
211 if (!rtas.base)
212 return;
213
214 if (display_width == 0) {
215 display_width = 0x10;
216 if ((root = of_find_node_by_path("/rtas"))) {
217 if ((p = of_get_property(root,
218 "ibm,display-line-length", NULL)))
219 display_width = be32_to_cpu(*p);
220 if ((p = of_get_property(root,
221 "ibm,form-feed", NULL)))
222 form_feed = be32_to_cpu(*p);
223 if ((p = of_get_property(root,
224 "ibm,display-number-of-lines", NULL)))
225 display_lines = be32_to_cpu(*p);
226 row_width = of_get_property(root,
227 "ibm,display-truncation-length", NULL);
228 of_node_put(root);
229 }
230 display_character = rtas_token("display-character");
231 set_indicator = rtas_token("set-indicator");
232 }
233
234 if (display_character == RTAS_UNKNOWN_SERVICE) {
235 /* use hex display if available */
236 if (set_indicator != RTAS_UNKNOWN_SERVICE)
237 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
238 return;
239 }
240
241 spin_lock(&progress_lock);
242
243 /*
244 * Last write ended with newline, but we didn't print it since
245 * it would just clear the bottom line of output. Print it now
246 * instead.
247 *
248 * If no newline is pending and form feed is supported, clear the
249 * display with a form feed; otherwise, print a CR to start output
250 * at the beginning of the line.
251 */
252 if (pending_newline) {
253 rtas_call(display_character, 1, 1, NULL, '\r');
254 rtas_call(display_character, 1, 1, NULL, '\n');
255 pending_newline = 0;
256 } else {
257 current_line = 0;
258 if (form_feed)
259 rtas_call(display_character, 1, 1, NULL,
260 (char)form_feed);
261 else
262 rtas_call(display_character, 1, 1, NULL, '\r');
263 }
264
265 if (row_width)
266 width = row_width[current_line];
267 else
268 width = display_width;
269 os = s;
270 while (*os) {
271 if (*os == '\n' || *os == '\r') {
272 /* If newline is the last character, save it
273 * until next call to avoid bumping up the
274 * display output.
275 */
276 if (*os == '\n' && !os[1]) {
277 pending_newline = 1;
278 current_line++;
279 if (current_line > display_lines-1)
280 current_line = display_lines-1;
281 spin_unlock(&progress_lock);
282 return;
283 }
284
285 /* RTAS wants CR-LF, not just LF */
286
287 if (*os == '\n') {
288 rtas_call(display_character, 1, 1, NULL, '\r');
289 rtas_call(display_character, 1, 1, NULL, '\n');
290 } else {
291 /* CR might be used to re-draw a line, so we'll
292 * leave it alone and not add LF.
293 */
294 rtas_call(display_character, 1, 1, NULL, *os);
295 }
296
297 if (row_width)
298 width = row_width[current_line];
299 else
300 width = display_width;
301 } else {
302 width--;
303 rtas_call(display_character, 1, 1, NULL, *os);
304 }
305
306 os++;
307
308 /* if we overwrite the screen length */
309 if (width <= 0)
310 while ((*os != 0) && (*os != '\n') && (*os != '\r'))
311 os++;
312 }
313
314 spin_unlock(&progress_lock);
315 }
316 EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */
317
rtas_token(const char * service)318 int rtas_token(const char *service)
319 {
320 const __be32 *tokp;
321 if (rtas.dev == NULL)
322 return RTAS_UNKNOWN_SERVICE;
323 tokp = of_get_property(rtas.dev, service, NULL);
324 return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
325 }
326 EXPORT_SYMBOL(rtas_token);
327
rtas_service_present(const char * service)328 int rtas_service_present(const char *service)
329 {
330 return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
331 }
332 EXPORT_SYMBOL(rtas_service_present);
333
334 #ifdef CONFIG_RTAS_ERROR_LOGGING
335 /*
336 * Return the firmware-specified size of the error log buffer
337 * for all rtas calls that require an error buffer argument.
338 * This includes 'check-exception' and 'rtas-last-error'.
339 */
rtas_get_error_log_max(void)340 int rtas_get_error_log_max(void)
341 {
342 static int rtas_error_log_max;
343 if (rtas_error_log_max)
344 return rtas_error_log_max;
345
346 rtas_error_log_max = rtas_token ("rtas-error-log-max");
347 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
348 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
349 printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
350 rtas_error_log_max);
351 rtas_error_log_max = RTAS_ERROR_LOG_MAX;
352 }
353 return rtas_error_log_max;
354 }
355 EXPORT_SYMBOL(rtas_get_error_log_max);
356
357
358 static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
359 static int rtas_last_error_token;
360
361 /** Return a copy of the detailed error text associated with the
362 * most recent failed call to rtas. Because the error text
363 * might go stale if there are any other intervening rtas calls,
364 * this routine must be called atomically with whatever produced
365 * the error (i.e. with rtas.lock still held from the previous call).
366 */
__fetch_rtas_last_error(char * altbuf)367 static char *__fetch_rtas_last_error(char *altbuf)
368 {
369 struct rtas_args err_args, save_args;
370 u32 bufsz;
371 char *buf = NULL;
372
373 if (rtas_last_error_token == -1)
374 return NULL;
375
376 bufsz = rtas_get_error_log_max();
377
378 err_args.token = cpu_to_be32(rtas_last_error_token);
379 err_args.nargs = cpu_to_be32(2);
380 err_args.nret = cpu_to_be32(1);
381 err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf));
382 err_args.args[1] = cpu_to_be32(bufsz);
383 err_args.args[2] = 0;
384
385 save_args = rtas.args;
386 rtas.args = err_args;
387
388 enter_rtas(__pa(&rtas.args));
389
390 err_args = rtas.args;
391 rtas.args = save_args;
392
393 /* Log the error in the unlikely case that there was one. */
394 if (unlikely(err_args.args[2] == 0)) {
395 if (altbuf) {
396 buf = altbuf;
397 } else {
398 buf = rtas_err_buf;
399 if (slab_is_available())
400 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
401 }
402 if (buf)
403 memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
404 }
405
406 return buf;
407 }
408
409 #define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
410
411 #else /* CONFIG_RTAS_ERROR_LOGGING */
412 #define __fetch_rtas_last_error(x) NULL
413 #define get_errorlog_buffer() NULL
414 #endif
415
416
417 static void
va_rtas_call_unlocked(struct rtas_args * args,int token,int nargs,int nret,va_list list)418 va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
419 va_list list)
420 {
421 int i;
422
423 args->token = cpu_to_be32(token);
424 args->nargs = cpu_to_be32(nargs);
425 args->nret = cpu_to_be32(nret);
426 args->rets = &(args->args[nargs]);
427
428 for (i = 0; i < nargs; ++i)
429 args->args[i] = cpu_to_be32(va_arg(list, __u32));
430
431 for (i = 0; i < nret; ++i)
432 args->rets[i] = 0;
433
434 enter_rtas(__pa(args));
435 }
436
rtas_call_unlocked(struct rtas_args * args,int token,int nargs,int nret,...)437 void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
438 {
439 va_list list;
440
441 va_start(list, nret);
442 va_rtas_call_unlocked(args, token, nargs, nret, list);
443 va_end(list);
444 }
445
rtas_call(int token,int nargs,int nret,int * outputs,...)446 int rtas_call(int token, int nargs, int nret, int *outputs, ...)
447 {
448 va_list list;
449 int i;
450 unsigned long s;
451 struct rtas_args *rtas_args;
452 char *buff_copy = NULL;
453 int ret;
454
455 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
456 return -1;
457
458 s = lock_rtas();
459
460 /* We use the global rtas args buffer */
461 rtas_args = &rtas.args;
462
463 va_start(list, outputs);
464 va_rtas_call_unlocked(rtas_args, token, nargs, nret, list);
465 va_end(list);
466
467 /* A -1 return code indicates that the last command couldn't
468 be completed due to a hardware error. */
469 if (be32_to_cpu(rtas_args->rets[0]) == -1)
470 buff_copy = __fetch_rtas_last_error(NULL);
471
472 if (nret > 1 && outputs != NULL)
473 for (i = 0; i < nret-1; ++i)
474 outputs[i] = be32_to_cpu(rtas_args->rets[i+1]);
475 ret = (nret > 0)? be32_to_cpu(rtas_args->rets[0]): 0;
476
477 unlock_rtas(s);
478
479 if (buff_copy) {
480 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
481 if (slab_is_available())
482 kfree(buff_copy);
483 }
484 return ret;
485 }
486 EXPORT_SYMBOL(rtas_call);
487
488 /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
489 * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
490 */
rtas_busy_delay_time(int status)491 unsigned int rtas_busy_delay_time(int status)
492 {
493 int order;
494 unsigned int ms = 0;
495
496 if (status == RTAS_BUSY) {
497 ms = 1;
498 } else if (status >= RTAS_EXTENDED_DELAY_MIN &&
499 status <= RTAS_EXTENDED_DELAY_MAX) {
500 order = status - RTAS_EXTENDED_DELAY_MIN;
501 for (ms = 1; order > 0; order--)
502 ms *= 10;
503 }
504
505 return ms;
506 }
507 EXPORT_SYMBOL(rtas_busy_delay_time);
508
509 /* For an RTAS busy status code, perform the hinted delay. */
rtas_busy_delay(int status)510 unsigned int rtas_busy_delay(int status)
511 {
512 unsigned int ms;
513
514 might_sleep();
515 ms = rtas_busy_delay_time(status);
516 if (ms && need_resched())
517 msleep(ms);
518
519 return ms;
520 }
521 EXPORT_SYMBOL(rtas_busy_delay);
522
rtas_error_rc(int rtas_rc)523 static int rtas_error_rc(int rtas_rc)
524 {
525 int rc;
526
527 switch (rtas_rc) {
528 case -1: /* Hardware Error */
529 rc = -EIO;
530 break;
531 case -3: /* Bad indicator/domain/etc */
532 rc = -EINVAL;
533 break;
534 case -9000: /* Isolation error */
535 rc = -EFAULT;
536 break;
537 case -9001: /* Outstanding TCE/PTE */
538 rc = -EEXIST;
539 break;
540 case -9002: /* No usable slot */
541 rc = -ENODEV;
542 break;
543 default:
544 printk(KERN_ERR "%s: unexpected RTAS error %d\n",
545 __func__, rtas_rc);
546 rc = -ERANGE;
547 break;
548 }
549 return rc;
550 }
551
rtas_get_power_level(int powerdomain,int * level)552 int rtas_get_power_level(int powerdomain, int *level)
553 {
554 int token = rtas_token("get-power-level");
555 int rc;
556
557 if (token == RTAS_UNKNOWN_SERVICE)
558 return -ENOENT;
559
560 while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
561 udelay(1);
562
563 if (rc < 0)
564 return rtas_error_rc(rc);
565 return rc;
566 }
567 EXPORT_SYMBOL(rtas_get_power_level);
568
rtas_set_power_level(int powerdomain,int level,int * setlevel)569 int rtas_set_power_level(int powerdomain, int level, int *setlevel)
570 {
571 int token = rtas_token("set-power-level");
572 int rc;
573
574 if (token == RTAS_UNKNOWN_SERVICE)
575 return -ENOENT;
576
577 do {
578 rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
579 } while (rtas_busy_delay(rc));
580
581 if (rc < 0)
582 return rtas_error_rc(rc);
583 return rc;
584 }
585 EXPORT_SYMBOL(rtas_set_power_level);
586
rtas_get_sensor(int sensor,int index,int * state)587 int rtas_get_sensor(int sensor, int index, int *state)
588 {
589 int token = rtas_token("get-sensor-state");
590 int rc;
591
592 if (token == RTAS_UNKNOWN_SERVICE)
593 return -ENOENT;
594
595 do {
596 rc = rtas_call(token, 2, 2, state, sensor, index);
597 } while (rtas_busy_delay(rc));
598
599 if (rc < 0)
600 return rtas_error_rc(rc);
601 return rc;
602 }
603 EXPORT_SYMBOL(rtas_get_sensor);
604
rtas_get_sensor_fast(int sensor,int index,int * state)605 int rtas_get_sensor_fast(int sensor, int index, int *state)
606 {
607 int token = rtas_token("get-sensor-state");
608 int rc;
609
610 if (token == RTAS_UNKNOWN_SERVICE)
611 return -ENOENT;
612
613 rc = rtas_call(token, 2, 2, state, sensor, index);
614 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
615 rc <= RTAS_EXTENDED_DELAY_MAX));
616
617 if (rc < 0)
618 return rtas_error_rc(rc);
619 return rc;
620 }
621
rtas_indicator_present(int token,int * maxindex)622 bool rtas_indicator_present(int token, int *maxindex)
623 {
624 int proplen, count, i;
625 const struct indicator_elem {
626 __be32 token;
627 __be32 maxindex;
628 } *indicators;
629
630 indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
631 if (!indicators)
632 return false;
633
634 count = proplen / sizeof(struct indicator_elem);
635
636 for (i = 0; i < count; i++) {
637 if (__be32_to_cpu(indicators[i].token) != token)
638 continue;
639 if (maxindex)
640 *maxindex = __be32_to_cpu(indicators[i].maxindex);
641 return true;
642 }
643
644 return false;
645 }
646 EXPORT_SYMBOL(rtas_indicator_present);
647
rtas_set_indicator(int indicator,int index,int new_value)648 int rtas_set_indicator(int indicator, int index, int new_value)
649 {
650 int token = rtas_token("set-indicator");
651 int rc;
652
653 if (token == RTAS_UNKNOWN_SERVICE)
654 return -ENOENT;
655
656 do {
657 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
658 } while (rtas_busy_delay(rc));
659
660 if (rc < 0)
661 return rtas_error_rc(rc);
662 return rc;
663 }
664 EXPORT_SYMBOL(rtas_set_indicator);
665
666 /*
667 * Ignoring RTAS extended delay
668 */
rtas_set_indicator_fast(int indicator,int index,int new_value)669 int rtas_set_indicator_fast(int indicator, int index, int new_value)
670 {
671 int rc;
672 int token = rtas_token("set-indicator");
673
674 if (token == RTAS_UNKNOWN_SERVICE)
675 return -ENOENT;
676
677 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
678
679 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
680 rc <= RTAS_EXTENDED_DELAY_MAX));
681
682 if (rc < 0)
683 return rtas_error_rc(rc);
684
685 return rc;
686 }
687
rtas_restart(char * cmd)688 void __noreturn rtas_restart(char *cmd)
689 {
690 if (rtas_flash_term_hook)
691 rtas_flash_term_hook(SYS_RESTART);
692 printk("RTAS system-reboot returned %d\n",
693 rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
694 for (;;);
695 }
696
rtas_power_off(void)697 void rtas_power_off(void)
698 {
699 if (rtas_flash_term_hook)
700 rtas_flash_term_hook(SYS_POWER_OFF);
701 /* allow power on only with power button press */
702 printk("RTAS power-off returned %d\n",
703 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
704 for (;;);
705 }
706
rtas_halt(void)707 void __noreturn rtas_halt(void)
708 {
709 if (rtas_flash_term_hook)
710 rtas_flash_term_hook(SYS_HALT);
711 /* allow power on only with power button press */
712 printk("RTAS power-off returned %d\n",
713 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
714 for (;;);
715 }
716
717 /* Must be in the RMO region, so we place it here */
718 static char rtas_os_term_buf[2048];
719
rtas_os_term(char * str)720 void rtas_os_term(char *str)
721 {
722 int status;
723
724 /*
725 * Firmware with the ibm,extended-os-term property is guaranteed
726 * to always return from an ibm,os-term call. Earlier versions without
727 * this property may terminate the partition which we want to avoid
728 * since it interferes with panic_timeout.
729 */
730 if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
731 RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
732 return;
733
734 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
735
736 do {
737 status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
738 __pa(rtas_os_term_buf));
739 } while (rtas_busy_delay(status));
740
741 if (status != 0)
742 printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
743 }
744
745 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
746 #ifdef CONFIG_PPC_PSERIES
__rtas_suspend_last_cpu(struct rtas_suspend_me_data * data,int wake_when_done)747 static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
748 {
749 u16 slb_size = mmu_slb_size;
750 int rc = H_MULTI_THREADS_ACTIVE;
751 int cpu;
752
753 slb_set_size(SLB_MIN_SIZE);
754 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
755
756 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
757 !atomic_read(&data->error))
758 rc = rtas_call(data->token, 0, 1, NULL);
759
760 if (rc || atomic_read(&data->error)) {
761 printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
762 slb_set_size(slb_size);
763 }
764
765 if (atomic_read(&data->error))
766 rc = atomic_read(&data->error);
767
768 atomic_set(&data->error, rc);
769 pSeries_coalesce_init();
770
771 if (wake_when_done) {
772 atomic_set(&data->done, 1);
773
774 for_each_online_cpu(cpu)
775 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
776 }
777
778 if (atomic_dec_return(&data->working) == 0)
779 complete(data->complete);
780
781 return rc;
782 }
783
rtas_suspend_last_cpu(struct rtas_suspend_me_data * data)784 int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data)
785 {
786 atomic_inc(&data->working);
787 return __rtas_suspend_last_cpu(data, 0);
788 }
789
__rtas_suspend_cpu(struct rtas_suspend_me_data * data,int wake_when_done)790 static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
791 {
792 long rc = H_SUCCESS;
793 unsigned long msr_save;
794 int cpu;
795
796 atomic_inc(&data->working);
797
798 /* really need to ensure MSR.EE is off for H_JOIN */
799 msr_save = mfmsr();
800 mtmsr(msr_save & ~(MSR_EE));
801
802 while (rc == H_SUCCESS && !atomic_read(&data->done) && !atomic_read(&data->error))
803 rc = plpar_hcall_norets(H_JOIN);
804
805 mtmsr(msr_save);
806
807 if (rc == H_SUCCESS) {
808 /* This cpu was prodded and the suspend is complete. */
809 goto out;
810 } else if (rc == H_CONTINUE) {
811 /* All other cpus are in H_JOIN, this cpu does
812 * the suspend.
813 */
814 return __rtas_suspend_last_cpu(data, wake_when_done);
815 } else {
816 printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
817 smp_processor_id(), rc);
818 atomic_set(&data->error, rc);
819 }
820
821 if (wake_when_done) {
822 atomic_set(&data->done, 1);
823
824 /* This cpu did the suspend or got an error; in either case,
825 * we need to prod all other other cpus out of join state.
826 * Extra prods are harmless.
827 */
828 for_each_online_cpu(cpu)
829 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
830 }
831 out:
832 if (atomic_dec_return(&data->working) == 0)
833 complete(data->complete);
834 return rc;
835 }
836
rtas_suspend_cpu(struct rtas_suspend_me_data * data)837 int rtas_suspend_cpu(struct rtas_suspend_me_data *data)
838 {
839 return __rtas_suspend_cpu(data, 0);
840 }
841
rtas_percpu_suspend_me(void * info)842 static void rtas_percpu_suspend_me(void *info)
843 {
844 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
845 }
846
847 enum rtas_cpu_state {
848 DOWN,
849 UP,
850 };
851
852 #ifndef CONFIG_SMP
rtas_cpu_state_change_mask(enum rtas_cpu_state state,cpumask_var_t cpus)853 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
854 cpumask_var_t cpus)
855 {
856 if (!cpumask_empty(cpus)) {
857 cpumask_clear(cpus);
858 return -EINVAL;
859 } else
860 return 0;
861 }
862 #else
863 /* On return cpumask will be altered to indicate CPUs changed.
864 * CPUs with states changed will be set in the mask,
865 * CPUs with status unchanged will be unset in the mask. */
rtas_cpu_state_change_mask(enum rtas_cpu_state state,cpumask_var_t cpus)866 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
867 cpumask_var_t cpus)
868 {
869 int cpu;
870 int cpuret = 0;
871 int ret = 0;
872
873 if (cpumask_empty(cpus))
874 return 0;
875
876 for_each_cpu(cpu, cpus) {
877 struct device *dev = get_cpu_device(cpu);
878
879 switch (state) {
880 case DOWN:
881 cpuret = device_offline(dev);
882 break;
883 case UP:
884 cpuret = device_online(dev);
885 break;
886 }
887 if (cpuret < 0) {
888 pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
889 __func__,
890 ((state == UP) ? "up" : "down"),
891 cpu, cpuret);
892 if (!ret)
893 ret = cpuret;
894 if (state == UP) {
895 /* clear bits for unchanged cpus, return */
896 cpumask_shift_right(cpus, cpus, cpu);
897 cpumask_shift_left(cpus, cpus, cpu);
898 break;
899 } else {
900 /* clear bit for unchanged cpu, continue */
901 cpumask_clear_cpu(cpu, cpus);
902 }
903 }
904 }
905
906 return ret;
907 }
908 #endif
909
rtas_online_cpus_mask(cpumask_var_t cpus)910 int rtas_online_cpus_mask(cpumask_var_t cpus)
911 {
912 int ret;
913
914 ret = rtas_cpu_state_change_mask(UP, cpus);
915
916 if (ret) {
917 cpumask_var_t tmp_mask;
918
919 if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
920 return ret;
921
922 /* Use tmp_mask to preserve cpus mask from first failure */
923 cpumask_copy(tmp_mask, cpus);
924 rtas_offline_cpus_mask(tmp_mask);
925 free_cpumask_var(tmp_mask);
926 }
927
928 return ret;
929 }
930 EXPORT_SYMBOL(rtas_online_cpus_mask);
931
rtas_offline_cpus_mask(cpumask_var_t cpus)932 int rtas_offline_cpus_mask(cpumask_var_t cpus)
933 {
934 return rtas_cpu_state_change_mask(DOWN, cpus);
935 }
936 EXPORT_SYMBOL(rtas_offline_cpus_mask);
937
rtas_ibm_suspend_me(u64 handle)938 int rtas_ibm_suspend_me(u64 handle)
939 {
940 long state;
941 long rc;
942 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
943 struct rtas_suspend_me_data data;
944 DECLARE_COMPLETION_ONSTACK(done);
945 cpumask_var_t offline_mask;
946 int cpuret;
947
948 if (!rtas_service_present("ibm,suspend-me"))
949 return -ENOSYS;
950
951 /* Make sure the state is valid */
952 rc = plpar_hcall(H_VASI_STATE, retbuf, handle);
953
954 state = retbuf[0];
955
956 if (rc) {
957 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
958 return rc;
959 } else if (state == H_VASI_ENABLED) {
960 return -EAGAIN;
961 } else if (state != H_VASI_SUSPENDING) {
962 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
963 state);
964 return -EIO;
965 }
966
967 if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
968 return -ENOMEM;
969
970 atomic_set(&data.working, 0);
971 atomic_set(&data.done, 0);
972 atomic_set(&data.error, 0);
973 data.token = rtas_token("ibm,suspend-me");
974 data.complete = &done;
975
976 lock_device_hotplug();
977
978 /* All present CPUs must be online */
979 cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
980 cpuret = rtas_online_cpus_mask(offline_mask);
981 if (cpuret) {
982 pr_err("%s: Could not bring present CPUs online.\n", __func__);
983 atomic_set(&data.error, cpuret);
984 goto out;
985 }
986
987 cpu_hotplug_disable();
988 stop_topology_update();
989
990 /* Call function on all CPUs. One of us will make the
991 * rtas call
992 */
993 if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
994 atomic_set(&data.error, -EINVAL);
995
996 wait_for_completion(&done);
997
998 if (atomic_read(&data.error) != 0)
999 printk(KERN_ERR "Error doing global join\n");
1000
1001 start_topology_update();
1002 cpu_hotplug_enable();
1003
1004 /* Take down CPUs not online prior to suspend */
1005 cpuret = rtas_offline_cpus_mask(offline_mask);
1006 if (cpuret)
1007 pr_warn("%s: Could not restore CPUs to offline state.\n",
1008 __func__);
1009
1010 out:
1011 unlock_device_hotplug();
1012 free_cpumask_var(offline_mask);
1013 return atomic_read(&data.error);
1014 }
1015 #else /* CONFIG_PPC_PSERIES */
rtas_ibm_suspend_me(u64 handle)1016 int rtas_ibm_suspend_me(u64 handle)
1017 {
1018 return -ENOSYS;
1019 }
1020 #endif
1021
1022 /**
1023 * Find a specific pseries error log in an RTAS extended event log.
1024 * @log: RTAS error/event log
1025 * @section_id: two character section identifier
1026 *
1027 * Returns a pointer to the specified errorlog or NULL if not found.
1028 */
get_pseries_errorlog(struct rtas_error_log * log,uint16_t section_id)1029 struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
1030 uint16_t section_id)
1031 {
1032 struct rtas_ext_event_log_v6 *ext_log =
1033 (struct rtas_ext_event_log_v6 *)log->buffer;
1034 struct pseries_errorlog *sect;
1035 unsigned char *p, *log_end;
1036 uint32_t ext_log_length = rtas_error_extended_log_length(log);
1037 uint8_t log_format = rtas_ext_event_log_format(ext_log);
1038 uint32_t company_id = rtas_ext_event_company_id(ext_log);
1039
1040 /* Check that we understand the format */
1041 if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
1042 log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
1043 company_id != RTAS_V6EXT_COMPANY_ID_IBM)
1044 return NULL;
1045
1046 log_end = log->buffer + ext_log_length;
1047 p = ext_log->vendor_log;
1048
1049 while (p < log_end) {
1050 sect = (struct pseries_errorlog *)p;
1051 if (pseries_errorlog_id(sect) == section_id)
1052 return sect;
1053 p += pseries_errorlog_length(sect);
1054 }
1055
1056 return NULL;
1057 }
1058
1059 /* We assume to be passed big endian arguments */
ppc_rtas(struct rtas_args __user * uargs)1060 asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
1061 {
1062 struct rtas_args args;
1063 unsigned long flags;
1064 char *buff_copy, *errbuf = NULL;
1065 int nargs, nret, token;
1066
1067 if (!capable(CAP_SYS_ADMIN))
1068 return -EPERM;
1069
1070 if (!rtas.entry)
1071 return -EINVAL;
1072
1073 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
1074 return -EFAULT;
1075
1076 nargs = be32_to_cpu(args.nargs);
1077 nret = be32_to_cpu(args.nret);
1078 token = be32_to_cpu(args.token);
1079
1080 if (nargs >= ARRAY_SIZE(args.args)
1081 || nret > ARRAY_SIZE(args.args)
1082 || nargs + nret > ARRAY_SIZE(args.args))
1083 return -EINVAL;
1084
1085 /* Copy in args. */
1086 if (copy_from_user(args.args, uargs->args,
1087 nargs * sizeof(rtas_arg_t)) != 0)
1088 return -EFAULT;
1089
1090 if (token == RTAS_UNKNOWN_SERVICE)
1091 return -EINVAL;
1092
1093 args.rets = &args.args[nargs];
1094 memset(args.rets, 0, nret * sizeof(rtas_arg_t));
1095
1096 /* Need to handle ibm,suspend_me call specially */
1097 if (token == ibm_suspend_me_token) {
1098
1099 /*
1100 * rtas_ibm_suspend_me assumes the streamid handle is in cpu
1101 * endian, or at least the hcall within it requires it.
1102 */
1103 int rc = 0;
1104 u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
1105 | be32_to_cpu(args.args[1]);
1106 rc = rtas_ibm_suspend_me(handle);
1107 if (rc == -EAGAIN)
1108 args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
1109 else if (rc == -EIO)
1110 args.rets[0] = cpu_to_be32(-1);
1111 else if (rc)
1112 return rc;
1113 goto copy_return;
1114 }
1115
1116 buff_copy = get_errorlog_buffer();
1117
1118 flags = lock_rtas();
1119
1120 rtas.args = args;
1121 enter_rtas(__pa(&rtas.args));
1122 args = rtas.args;
1123
1124 /* A -1 return code indicates that the last command couldn't
1125 be completed due to a hardware error. */
1126 if (be32_to_cpu(args.rets[0]) == -1)
1127 errbuf = __fetch_rtas_last_error(buff_copy);
1128
1129 unlock_rtas(flags);
1130
1131 if (buff_copy) {
1132 if (errbuf)
1133 log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
1134 kfree(buff_copy);
1135 }
1136
1137 copy_return:
1138 /* Copy out args. */
1139 if (copy_to_user(uargs->args + nargs,
1140 args.args + nargs,
1141 nret * sizeof(rtas_arg_t)) != 0)
1142 return -EFAULT;
1143
1144 return 0;
1145 }
1146
1147 /*
1148 * Call early during boot, before mem init, to retrieve the RTAS
1149 * information from the device-tree and allocate the RMO buffer for userland
1150 * accesses.
1151 */
rtas_initialize(void)1152 void __init rtas_initialize(void)
1153 {
1154 unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
1155 u32 base, size, entry;
1156 int no_base, no_size, no_entry;
1157
1158 /* Get RTAS dev node and fill up our "rtas" structure with infos
1159 * about it.
1160 */
1161 rtas.dev = of_find_node_by_name(NULL, "rtas");
1162 if (!rtas.dev)
1163 return;
1164
1165 no_base = of_property_read_u32(rtas.dev, "linux,rtas-base", &base);
1166 no_size = of_property_read_u32(rtas.dev, "rtas-size", &size);
1167 if (no_base || no_size) {
1168 of_node_put(rtas.dev);
1169 rtas.dev = NULL;
1170 return;
1171 }
1172
1173 rtas.base = base;
1174 rtas.size = size;
1175 no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
1176 rtas.entry = no_entry ? rtas.base : entry;
1177
1178 /* If RTAS was found, allocate the RMO buffer for it and look for
1179 * the stop-self token if any
1180 */
1181 #ifdef CONFIG_PPC64
1182 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1183 rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
1184 ibm_suspend_me_token = rtas_token("ibm,suspend-me");
1185 }
1186 #endif
1187 rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
1188
1189 #ifdef CONFIG_RTAS_ERROR_LOGGING
1190 rtas_last_error_token = rtas_token("rtas-last-error");
1191 #endif
1192 }
1193
early_init_dt_scan_rtas(unsigned long node,const char * uname,int depth,void * data)1194 int __init early_init_dt_scan_rtas(unsigned long node,
1195 const char *uname, int depth, void *data)
1196 {
1197 const u32 *basep, *entryp, *sizep;
1198
1199 if (depth != 1 || strcmp(uname, "rtas") != 0)
1200 return 0;
1201
1202 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1203 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1204 sizep = of_get_flat_dt_prop(node, "rtas-size", NULL);
1205
1206 if (basep && entryp && sizep) {
1207 rtas.base = *basep;
1208 rtas.entry = *entryp;
1209 rtas.size = *sizep;
1210 }
1211
1212 #ifdef CONFIG_UDBG_RTAS_CONSOLE
1213 basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
1214 if (basep)
1215 rtas_putchar_token = *basep;
1216
1217 basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
1218 if (basep)
1219 rtas_getchar_token = *basep;
1220
1221 if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE &&
1222 rtas_getchar_token != RTAS_UNKNOWN_SERVICE)
1223 udbg_init_rtas_console();
1224
1225 #endif
1226
1227 /* break now */
1228 return 1;
1229 }
1230
1231 static arch_spinlock_t timebase_lock;
1232 static u64 timebase = 0;
1233
rtas_give_timebase(void)1234 void rtas_give_timebase(void)
1235 {
1236 unsigned long flags;
1237
1238 local_irq_save(flags);
1239 hard_irq_disable();
1240 arch_spin_lock(&timebase_lock);
1241 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
1242 timebase = get_tb();
1243 arch_spin_unlock(&timebase_lock);
1244
1245 while (timebase)
1246 barrier();
1247 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
1248 local_irq_restore(flags);
1249 }
1250
rtas_take_timebase(void)1251 void rtas_take_timebase(void)
1252 {
1253 while (!timebase)
1254 barrier();
1255 arch_spin_lock(&timebase_lock);
1256 set_tb(timebase >> 32, timebase & 0xffffffff);
1257 timebase = 0;
1258 arch_spin_unlock(&timebase_lock);
1259 }
1260