1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Procedures for interfacing to Open Firmware.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 */
11
12 #undef DEBUG_PROM
13
14 /* we cannot use FORTIFY as it brings in new symbols */
15 #define __NO_FORTIFY
16
17 #include <linux/stdarg.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/threads.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/proc_fs.h>
26 #include <linux/delay.h>
27 #include <linux/initrd.h>
28 #include <linux/bitops.h>
29 #include <linux/pgtable.h>
30 #include <linux/printk.h>
31 #include <asm/prom.h>
32 #include <asm/rtas.h>
33 #include <asm/page.h>
34 #include <asm/processor.h>
35 #include <asm/interrupt.h>
36 #include <asm/irq.h>
37 #include <asm/io.h>
38 #include <asm/smp.h>
39 #include <asm/mmu.h>
40 #include <asm/iommu.h>
41 #include <asm/btext.h>
42 #include <asm/sections.h>
43 #include <asm/machdep.h>
44 #include <asm/asm-prototypes.h>
45 #include <asm/ultravisor-api.h>
46
47 #include <linux/linux_logo.h>
48
49 /* All of prom_init bss lives here */
50 #define __prombss __section(".bss.prominit")
51
52 /*
53 * Eventually bump that one up
54 */
55 #define DEVTREE_CHUNK_SIZE 0x100000
56
57 /*
58 * This is the size of the local memory reserve map that gets copied
59 * into the boot params passed to the kernel. That size is totally
60 * flexible as the kernel just reads the list until it encounters an
61 * entry with size 0, so it can be changed without breaking binary
62 * compatibility
63 */
64 #define MEM_RESERVE_MAP_SIZE 8
65
66 /*
67 * prom_init() is called very early on, before the kernel text
68 * and data have been mapped to KERNELBASE. At this point the code
69 * is running at whatever address it has been loaded at.
70 * On ppc32 we compile with -mrelocatable, which means that references
71 * to extern and static variables get relocated automatically.
72 * ppc64 objects are always relocatable, we just need to relocate the
73 * TOC.
74 *
75 * Because OF may have mapped I/O devices into the area starting at
76 * KERNELBASE, particularly on CHRP machines, we can't safely call
77 * OF once the kernel has been mapped to KERNELBASE. Therefore all
78 * OF calls must be done within prom_init().
79 *
80 * ADDR is used in calls to call_prom. The 4th and following
81 * arguments to call_prom should be 32-bit values.
82 * On ppc64, 64 bit values are truncated to 32 bits (and
83 * fortunately don't get interpreted as two arguments).
84 */
85 #define ADDR(x) (u32)(unsigned long)(x)
86
87 #ifdef CONFIG_PPC64
88 #define OF_WORKAROUNDS 0
89 #else
90 #define OF_WORKAROUNDS of_workarounds
91 static int of_workarounds __prombss;
92 #endif
93
94 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
95 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
96
97 #define PROM_BUG() do { \
98 prom_printf("kernel BUG at %s line 0x%x!\n", \
99 __FILE__, __LINE__); \
100 __builtin_trap(); \
101 } while (0)
102
103 #ifdef DEBUG_PROM
104 #define prom_debug(x...) prom_printf(x)
105 #else
106 #define prom_debug(x...) do { } while (0)
107 #endif
108
109
110 typedef u32 prom_arg_t;
111
112 struct prom_args {
113 __be32 service;
114 __be32 nargs;
115 __be32 nret;
116 __be32 args[10];
117 };
118
119 struct prom_t {
120 ihandle root;
121 phandle chosen;
122 int cpu;
123 ihandle stdout;
124 ihandle mmumap;
125 ihandle memory;
126 };
127
128 struct mem_map_entry {
129 __be64 base;
130 __be64 size;
131 };
132
133 typedef __be32 cell_t;
134
135 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
136 unsigned long r6, unsigned long r7, unsigned long r8,
137 unsigned long r9);
138
139 #ifdef CONFIG_PPC64
140 extern int enter_prom(struct prom_args *args, unsigned long entry);
141 #else
enter_prom(struct prom_args * args,unsigned long entry)142 static inline int enter_prom(struct prom_args *args, unsigned long entry)
143 {
144 return ((int (*)(struct prom_args *))entry)(args);
145 }
146 #endif
147
148 extern void copy_and_flush(unsigned long dest, unsigned long src,
149 unsigned long size, unsigned long offset);
150
151 /* prom structure */
152 static struct prom_t __prombss prom;
153
154 static unsigned long __prombss prom_entry;
155
156 static char __prombss of_stdout_device[256];
157 static char __prombss prom_scratch[256];
158
159 static unsigned long __prombss dt_header_start;
160 static unsigned long __prombss dt_struct_start, dt_struct_end;
161 static unsigned long __prombss dt_string_start, dt_string_end;
162
163 static unsigned long __prombss prom_initrd_start, prom_initrd_end;
164
165 #ifdef CONFIG_PPC64
166 static int __prombss prom_iommu_force_on;
167 static int __prombss prom_iommu_off;
168 static unsigned long __prombss prom_tce_alloc_start;
169 static unsigned long __prombss prom_tce_alloc_end;
170 #endif
171
172 #ifdef CONFIG_PPC_PSERIES
173 static bool __prombss prom_radix_disable;
174 static bool __prombss prom_radix_gtse_disable;
175 static bool __prombss prom_xive_disable;
176 #endif
177
178 #ifdef CONFIG_PPC_SVM
179 static bool __prombss prom_svm_enable;
180 #endif
181
182 struct platform_support {
183 bool hash_mmu;
184 bool radix_mmu;
185 bool radix_gtse;
186 bool xive;
187 };
188
189 /* Platforms codes are now obsolete in the kernel. Now only used within this
190 * file and ultimately gone too. Feel free to change them if you need, they
191 * are not shared with anything outside of this file anymore
192 */
193 #define PLATFORM_PSERIES 0x0100
194 #define PLATFORM_PSERIES_LPAR 0x0101
195 #define PLATFORM_LPAR 0x0001
196 #define PLATFORM_POWERMAC 0x0400
197 #define PLATFORM_GENERIC 0x0500
198
199 static int __prombss of_platform;
200
201 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
202
203 static unsigned long __prombss prom_memory_limit;
204
205 static unsigned long __prombss alloc_top;
206 static unsigned long __prombss alloc_top_high;
207 static unsigned long __prombss alloc_bottom;
208 static unsigned long __prombss rmo_top;
209 static unsigned long __prombss ram_top;
210
211 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
212 static int __prombss mem_reserve_cnt;
213
214 static cell_t __prombss regbuf[1024];
215
216 static bool __prombss rtas_has_query_cpu_stopped;
217
218
219 /*
220 * Error results ... some OF calls will return "-1" on error, some
221 * will return 0, some will return either. To simplify, here are
222 * macros to use with any ihandle or phandle return value to check if
223 * it is valid
224 */
225
226 #define PROM_ERROR (-1u)
227 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
228 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
229
230 /* Copied from lib/string.c and lib/kstrtox.c */
231
prom_strcmp(const char * cs,const char * ct)232 static int __init prom_strcmp(const char *cs, const char *ct)
233 {
234 unsigned char c1, c2;
235
236 while (1) {
237 c1 = *cs++;
238 c2 = *ct++;
239 if (c1 != c2)
240 return c1 < c2 ? -1 : 1;
241 if (!c1)
242 break;
243 }
244 return 0;
245 }
246
prom_strscpy_pad(char * dest,const char * src,size_t n)247 static ssize_t __init prom_strscpy_pad(char *dest, const char *src, size_t n)
248 {
249 ssize_t rc;
250 size_t i;
251
252 if (n == 0 || n > INT_MAX)
253 return -E2BIG;
254
255 // Copy up to n bytes
256 for (i = 0; i < n && src[i] != '\0'; i++)
257 dest[i] = src[i];
258
259 rc = i;
260
261 // If we copied all n then we have run out of space for the nul
262 if (rc == n) {
263 // Rewind by one character to ensure nul termination
264 i--;
265 rc = -E2BIG;
266 }
267
268 for (; i < n; i++)
269 dest[i] = '\0';
270
271 return rc;
272 }
273
prom_strncmp(const char * cs,const char * ct,size_t count)274 static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
275 {
276 unsigned char c1, c2;
277
278 while (count) {
279 c1 = *cs++;
280 c2 = *ct++;
281 if (c1 != c2)
282 return c1 < c2 ? -1 : 1;
283 if (!c1)
284 break;
285 count--;
286 }
287 return 0;
288 }
289
prom_strlen(const char * s)290 static size_t __init prom_strlen(const char *s)
291 {
292 const char *sc;
293
294 for (sc = s; *sc != '\0'; ++sc)
295 /* nothing */;
296 return sc - s;
297 }
298
prom_memcmp(const void * cs,const void * ct,size_t count)299 static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
300 {
301 const unsigned char *su1, *su2;
302 int res = 0;
303
304 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
305 if ((res = *su1 - *su2) != 0)
306 break;
307 return res;
308 }
309
prom_strstr(const char * s1,const char * s2)310 static char __init *prom_strstr(const char *s1, const char *s2)
311 {
312 size_t l1, l2;
313
314 l2 = prom_strlen(s2);
315 if (!l2)
316 return (char *)s1;
317 l1 = prom_strlen(s1);
318 while (l1 >= l2) {
319 l1--;
320 if (!prom_memcmp(s1, s2, l2))
321 return (char *)s1;
322 s1++;
323 }
324 return NULL;
325 }
326
prom_strlcat(char * dest,const char * src,size_t count)327 static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
328 {
329 size_t dsize = prom_strlen(dest);
330 size_t len = prom_strlen(src);
331 size_t res = dsize + len;
332
333 /* This would be a bug */
334 if (dsize >= count)
335 return count;
336
337 dest += dsize;
338 count -= dsize;
339 if (len >= count)
340 len = count-1;
341 memcpy(dest, src, len);
342 dest[len] = 0;
343 return res;
344
345 }
346
347 #ifdef CONFIG_PPC_PSERIES
prom_strtobool(const char * s,bool * res)348 static int __init prom_strtobool(const char *s, bool *res)
349 {
350 if (!s)
351 return -EINVAL;
352
353 switch (s[0]) {
354 case 'y':
355 case 'Y':
356 case '1':
357 *res = true;
358 return 0;
359 case 'n':
360 case 'N':
361 case '0':
362 *res = false;
363 return 0;
364 case 'o':
365 case 'O':
366 switch (s[1]) {
367 case 'n':
368 case 'N':
369 *res = true;
370 return 0;
371 case 'f':
372 case 'F':
373 *res = false;
374 return 0;
375 default:
376 break;
377 }
378 break;
379 default:
380 break;
381 }
382
383 return -EINVAL;
384 }
385 #endif
386
387 /* This is the one and *ONLY* place where we actually call open
388 * firmware.
389 */
390
call_prom(const char * service,int nargs,int nret,...)391 static int __init call_prom(const char *service, int nargs, int nret, ...)
392 {
393 int i;
394 struct prom_args args;
395 va_list list;
396
397 args.service = cpu_to_be32(ADDR(service));
398 args.nargs = cpu_to_be32(nargs);
399 args.nret = cpu_to_be32(nret);
400
401 va_start(list, nret);
402 for (i = 0; i < nargs; i++)
403 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
404 va_end(list);
405
406 for (i = 0; i < nret; i++)
407 args.args[nargs+i] = 0;
408
409 if (enter_prom(&args, prom_entry) < 0)
410 return PROM_ERROR;
411
412 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
413 }
414
call_prom_ret(const char * service,int nargs,int nret,prom_arg_t * rets,...)415 static int __init call_prom_ret(const char *service, int nargs, int nret,
416 prom_arg_t *rets, ...)
417 {
418 int i;
419 struct prom_args args;
420 va_list list;
421
422 args.service = cpu_to_be32(ADDR(service));
423 args.nargs = cpu_to_be32(nargs);
424 args.nret = cpu_to_be32(nret);
425
426 va_start(list, rets);
427 for (i = 0; i < nargs; i++)
428 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
429 va_end(list);
430
431 for (i = 0; i < nret; i++)
432 args.args[nargs+i] = 0;
433
434 if (enter_prom(&args, prom_entry) < 0)
435 return PROM_ERROR;
436
437 if (rets != NULL)
438 for (i = 1; i < nret; ++i)
439 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
440
441 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
442 }
443
444
prom_print(const char * msg)445 static void __init prom_print(const char *msg)
446 {
447 const char *p, *q;
448
449 if (prom.stdout == 0)
450 return;
451
452 for (p = msg; *p != 0; p = q) {
453 for (q = p; *q != 0 && *q != '\n'; ++q)
454 ;
455 if (q > p)
456 call_prom("write", 3, 1, prom.stdout, p, q - p);
457 if (*q == 0)
458 break;
459 ++q;
460 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
461 }
462 }
463
464
465 /*
466 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
467 * we do not need __udivdi3 or __umoddi3 on 32bits.
468 */
prom_print_hex(unsigned long val)469 static void __init prom_print_hex(unsigned long val)
470 {
471 int i, nibbles = sizeof(val)*2;
472 char buf[sizeof(val)*2+1];
473
474 for (i = nibbles-1; i >= 0; i--) {
475 buf[i] = (val & 0xf) + '0';
476 if (buf[i] > '9')
477 buf[i] += ('a'-'0'-10);
478 val >>= 4;
479 }
480 buf[nibbles] = '\0';
481 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
482 }
483
484 /* max number of decimal digits in an unsigned long */
485 #define UL_DIGITS 21
prom_print_dec(unsigned long val)486 static void __init prom_print_dec(unsigned long val)
487 {
488 int i, size;
489 char buf[UL_DIGITS+1];
490
491 for (i = UL_DIGITS-1; i >= 0; i--) {
492 buf[i] = (val % 10) + '0';
493 val = val/10;
494 if (val == 0)
495 break;
496 }
497 /* shift stuff down */
498 size = UL_DIGITS - i;
499 call_prom("write", 3, 1, prom.stdout, buf+i, size);
500 }
501
502 __printf(1, 2)
prom_printf(const char * format,...)503 static void __init prom_printf(const char *format, ...)
504 {
505 const char *p, *q, *s;
506 va_list args;
507 unsigned long v;
508 long vs;
509 int n = 0;
510
511 va_start(args, format);
512 for (p = format; *p != 0; p = q) {
513 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
514 ;
515 if (q > p)
516 call_prom("write", 3, 1, prom.stdout, p, q - p);
517 if (*q == 0)
518 break;
519 if (*q == '\n') {
520 ++q;
521 call_prom("write", 3, 1, prom.stdout,
522 ADDR("\r\n"), 2);
523 continue;
524 }
525 ++q;
526 if (*q == 0)
527 break;
528 while (*q == 'l') {
529 ++q;
530 ++n;
531 }
532 switch (*q) {
533 case 's':
534 ++q;
535 s = va_arg(args, const char *);
536 prom_print(s);
537 break;
538 case 'x':
539 ++q;
540 switch (n) {
541 case 0:
542 v = va_arg(args, unsigned int);
543 break;
544 case 1:
545 v = va_arg(args, unsigned long);
546 break;
547 case 2:
548 default:
549 v = va_arg(args, unsigned long long);
550 break;
551 }
552 prom_print_hex(v);
553 break;
554 case 'u':
555 ++q;
556 switch (n) {
557 case 0:
558 v = va_arg(args, unsigned int);
559 break;
560 case 1:
561 v = va_arg(args, unsigned long);
562 break;
563 case 2:
564 default:
565 v = va_arg(args, unsigned long long);
566 break;
567 }
568 prom_print_dec(v);
569 break;
570 case 'd':
571 ++q;
572 switch (n) {
573 case 0:
574 vs = va_arg(args, int);
575 break;
576 case 1:
577 vs = va_arg(args, long);
578 break;
579 case 2:
580 default:
581 vs = va_arg(args, long long);
582 break;
583 }
584 if (vs < 0) {
585 prom_print("-");
586 vs = -vs;
587 }
588 prom_print_dec(vs);
589 break;
590 }
591 }
592 va_end(args);
593 }
594
595
prom_claim(unsigned long virt,unsigned long size,unsigned long align)596 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
597 unsigned long align)
598 {
599
600 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
601 /*
602 * Old OF requires we claim physical and virtual separately
603 * and then map explicitly (assuming virtual mode)
604 */
605 int ret;
606 prom_arg_t result;
607
608 ret = call_prom_ret("call-method", 5, 2, &result,
609 ADDR("claim"), prom.memory,
610 align, size, virt);
611 if (ret != 0 || result == -1)
612 return -1;
613 ret = call_prom_ret("call-method", 5, 2, &result,
614 ADDR("claim"), prom.mmumap,
615 align, size, virt);
616 if (ret != 0) {
617 call_prom("call-method", 4, 1, ADDR("release"),
618 prom.memory, size, virt);
619 return -1;
620 }
621 /* the 0x12 is M (coherence) + PP == read/write */
622 call_prom("call-method", 6, 1,
623 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
624 return virt;
625 }
626 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
627 (prom_arg_t)align);
628 }
629
prom_panic(const char * reason)630 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
631 {
632 prom_print(reason);
633 /* Do not call exit because it clears the screen on pmac
634 * it also causes some sort of double-fault on early pmacs */
635 if (of_platform == PLATFORM_POWERMAC)
636 asm("trap\n");
637
638 /* ToDo: should put up an SRC here on pSeries */
639 call_prom("exit", 0, 0);
640
641 for (;;) /* should never get here */
642 ;
643 }
644
645
prom_next_node(phandle * nodep)646 static int __init prom_next_node(phandle *nodep)
647 {
648 phandle node;
649
650 if ((node = *nodep) != 0
651 && (*nodep = call_prom("child", 1, 1, node)) != 0)
652 return 1;
653 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
654 return 1;
655 for (;;) {
656 if ((node = call_prom("parent", 1, 1, node)) == 0)
657 return 0;
658 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
659 return 1;
660 }
661 }
662
prom_getprop(phandle node,const char * pname,void * value,size_t valuelen)663 static inline int __init prom_getprop(phandle node, const char *pname,
664 void *value, size_t valuelen)
665 {
666 return call_prom("getprop", 4, 1, node, ADDR(pname),
667 (u32)(unsigned long) value, (u32) valuelen);
668 }
669
prom_getproplen(phandle node,const char * pname)670 static inline int __init prom_getproplen(phandle node, const char *pname)
671 {
672 return call_prom("getproplen", 2, 1, node, ADDR(pname));
673 }
674
add_string(char ** str,const char * q)675 static void add_string(char **str, const char *q)
676 {
677 char *p = *str;
678
679 while (*q)
680 *p++ = *q++;
681 *p++ = ' ';
682 *str = p;
683 }
684
tohex(unsigned int x)685 static char *tohex(unsigned int x)
686 {
687 static const char digits[] __initconst = "0123456789abcdef";
688 static char result[9] __prombss;
689 int i;
690
691 result[8] = 0;
692 i = 8;
693 do {
694 --i;
695 result[i] = digits[x & 0xf];
696 x >>= 4;
697 } while (x != 0 && i > 0);
698 return &result[i];
699 }
700
prom_setprop(phandle node,const char * nodename,const char * pname,void * value,size_t valuelen)701 static int __init prom_setprop(phandle node, const char *nodename,
702 const char *pname, void *value, size_t valuelen)
703 {
704 char cmd[256], *p;
705
706 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
707 return call_prom("setprop", 4, 1, node, ADDR(pname),
708 (u32)(unsigned long) value, (u32) valuelen);
709
710 /* gah... setprop doesn't work on longtrail, have to use interpret */
711 p = cmd;
712 add_string(&p, "dev");
713 add_string(&p, nodename);
714 add_string(&p, tohex((u32)(unsigned long) value));
715 add_string(&p, tohex(valuelen));
716 add_string(&p, tohex(ADDR(pname)));
717 add_string(&p, tohex(prom_strlen(pname)));
718 add_string(&p, "property");
719 *p = 0;
720 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
721 }
722
723 /* We can't use the standard versions because of relocation headaches. */
724 #define prom_isxdigit(c) \
725 (('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
726
727 #define prom_isdigit(c) ('0' <= (c) && (c) <= '9')
728 #define prom_islower(c) ('a' <= (c) && (c) <= 'z')
729 #define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
730
prom_strtoul(const char * cp,const char ** endp)731 static unsigned long prom_strtoul(const char *cp, const char **endp)
732 {
733 unsigned long result = 0, base = 10, value;
734
735 if (*cp == '0') {
736 base = 8;
737 cp++;
738 if (prom_toupper(*cp) == 'X') {
739 cp++;
740 base = 16;
741 }
742 }
743
744 while (prom_isxdigit(*cp) &&
745 (value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
746 result = result * base + value;
747 cp++;
748 }
749
750 if (endp)
751 *endp = cp;
752
753 return result;
754 }
755
prom_memparse(const char * ptr,const char ** retptr)756 static unsigned long prom_memparse(const char *ptr, const char **retptr)
757 {
758 unsigned long ret = prom_strtoul(ptr, retptr);
759 int shift = 0;
760
761 /*
762 * We can't use a switch here because GCC *may* generate a
763 * jump table which won't work, because we're not running at
764 * the address we're linked at.
765 */
766 if ('G' == **retptr || 'g' == **retptr)
767 shift = 30;
768
769 if ('M' == **retptr || 'm' == **retptr)
770 shift = 20;
771
772 if ('K' == **retptr || 'k' == **retptr)
773 shift = 10;
774
775 if (shift) {
776 ret <<= shift;
777 (*retptr)++;
778 }
779
780 return ret;
781 }
782
783 /*
784 * Early parsing of the command line passed to the kernel, used for
785 * "mem=x" and the options that affect the iommu
786 */
early_cmdline_parse(void)787 static void __init early_cmdline_parse(void)
788 {
789 const char *opt;
790
791 char *p;
792 int l = 0;
793
794 prom_cmd_line[0] = 0;
795 p = prom_cmd_line;
796
797 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
798 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
799
800 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
801 prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
802 sizeof(prom_cmd_line));
803
804 prom_printf("command line: %s\n", prom_cmd_line);
805
806 #ifdef CONFIG_PPC64
807 opt = prom_strstr(prom_cmd_line, "iommu=");
808 if (opt) {
809 prom_printf("iommu opt is: %s\n", opt);
810 opt += 6;
811 while (*opt && *opt == ' ')
812 opt++;
813 if (!prom_strncmp(opt, "off", 3))
814 prom_iommu_off = 1;
815 else if (!prom_strncmp(opt, "force", 5))
816 prom_iommu_force_on = 1;
817 }
818 #endif
819 opt = prom_strstr(prom_cmd_line, "mem=");
820 if (opt) {
821 opt += 4;
822 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
823 #ifdef CONFIG_PPC64
824 /* Align to 16 MB == size of ppc64 large page */
825 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
826 #endif
827 }
828
829 #ifdef CONFIG_PPC_PSERIES
830 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
831 opt = prom_strstr(prom_cmd_line, "disable_radix");
832 if (opt) {
833 opt += 13;
834 if (*opt && *opt == '=') {
835 bool val;
836
837 if (prom_strtobool(++opt, &val))
838 prom_radix_disable = false;
839 else
840 prom_radix_disable = val;
841 } else
842 prom_radix_disable = true;
843 }
844 if (prom_radix_disable)
845 prom_debug("Radix disabled from cmdline\n");
846
847 opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
848 if (opt) {
849 prom_radix_gtse_disable = true;
850 prom_debug("Radix GTSE disabled from cmdline\n");
851 }
852
853 opt = prom_strstr(prom_cmd_line, "xive=off");
854 if (opt) {
855 prom_xive_disable = true;
856 prom_debug("XIVE disabled from cmdline\n");
857 }
858 #endif /* CONFIG_PPC_PSERIES */
859
860 #ifdef CONFIG_PPC_SVM
861 opt = prom_strstr(prom_cmd_line, "svm=");
862 if (opt) {
863 bool val;
864
865 opt += sizeof("svm=") - 1;
866 if (!prom_strtobool(opt, &val))
867 prom_svm_enable = val;
868 }
869 #endif /* CONFIG_PPC_SVM */
870 }
871
872 #ifdef CONFIG_PPC_PSERIES
873 /*
874 * The architecture vector has an array of PVR mask/value pairs,
875 * followed by # option vectors - 1, followed by the option vectors.
876 *
877 * See prom.h for the definition of the bits specified in the
878 * architecture vector.
879 */
880
881 /* Firmware expects the value to be n - 1, where n is the # of vectors */
882 #define NUM_VECTORS(n) ((n) - 1)
883
884 /*
885 * Firmware expects 1 + n - 2, where n is the length of the option vector in
886 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
887 */
888 #define VECTOR_LENGTH(n) (1 + (n) - 2)
889
890 struct option_vector1 {
891 u8 byte1;
892 u8 arch_versions;
893 u8 arch_versions3;
894 } __packed;
895
896 struct option_vector2 {
897 u8 byte1;
898 __be16 reserved;
899 __be32 real_base;
900 __be32 real_size;
901 __be32 virt_base;
902 __be32 virt_size;
903 __be32 load_base;
904 __be32 min_rma;
905 __be32 min_load;
906 u8 min_rma_percent;
907 u8 max_pft_size;
908 } __packed;
909
910 struct option_vector3 {
911 u8 byte1;
912 u8 byte2;
913 } __packed;
914
915 struct option_vector4 {
916 u8 byte1;
917 u8 min_vp_cap;
918 } __packed;
919
920 struct option_vector5 {
921 u8 byte1;
922 u8 byte2;
923 u8 byte3;
924 u8 cmo;
925 u8 associativity;
926 u8 bin_opts;
927 u8 micro_checkpoint;
928 u8 reserved0;
929 __be32 max_cpus;
930 __be16 papr_level;
931 __be16 reserved1;
932 u8 platform_facilities;
933 u8 reserved2;
934 __be16 reserved3;
935 u8 subprocessors;
936 u8 byte22;
937 u8 intarch;
938 u8 mmu;
939 u8 hash_ext;
940 u8 radix_ext;
941 } __packed;
942
943 struct option_vector6 {
944 u8 reserved;
945 u8 secondary_pteg;
946 u8 os_name;
947 } __packed;
948
949 struct option_vector7 {
950 u8 os_id[256];
951 } __packed;
952
953 struct ibm_arch_vec {
954 struct { u32 mask, val; } pvrs[14];
955
956 u8 num_vectors;
957
958 u8 vec1_len;
959 struct option_vector1 vec1;
960
961 u8 vec2_len;
962 struct option_vector2 vec2;
963
964 u8 vec3_len;
965 struct option_vector3 vec3;
966
967 u8 vec4_len;
968 struct option_vector4 vec4;
969
970 u8 vec5_len;
971 struct option_vector5 vec5;
972
973 u8 vec6_len;
974 struct option_vector6 vec6;
975
976 u8 vec7_len;
977 struct option_vector7 vec7;
978 } __packed;
979
980 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
981 .pvrs = {
982 {
983 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
984 .val = cpu_to_be32(0x003a0000),
985 },
986 {
987 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
988 .val = cpu_to_be32(0x003e0000),
989 },
990 {
991 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
992 .val = cpu_to_be32(0x003f0000),
993 },
994 {
995 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
996 .val = cpu_to_be32(0x004b0000),
997 },
998 {
999 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
1000 .val = cpu_to_be32(0x004c0000),
1001 },
1002 {
1003 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
1004 .val = cpu_to_be32(0x004d0000),
1005 },
1006 {
1007 .mask = cpu_to_be32(0xffff0000), /* POWER9 */
1008 .val = cpu_to_be32(0x004e0000),
1009 },
1010 {
1011 .mask = cpu_to_be32(0xffff0000), /* POWER10 */
1012 .val = cpu_to_be32(0x00800000),
1013 },
1014 {
1015 .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
1016 .val = cpu_to_be32(0x0f000006),
1017 },
1018 {
1019 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
1020 .val = cpu_to_be32(0x0f000005),
1021 },
1022 {
1023 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
1024 .val = cpu_to_be32(0x0f000004),
1025 },
1026 {
1027 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
1028 .val = cpu_to_be32(0x0f000003),
1029 },
1030 {
1031 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
1032 .val = cpu_to_be32(0x0f000002),
1033 },
1034 {
1035 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
1036 .val = cpu_to_be32(0x0f000001),
1037 },
1038 },
1039
1040 .num_vectors = NUM_VECTORS(6),
1041
1042 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
1043 .vec1 = {
1044 .byte1 = 0,
1045 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
1046 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
1047 .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
1048 },
1049
1050 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
1051 /* option vector 2: Open Firmware options supported */
1052 .vec2 = {
1053 .byte1 = OV2_REAL_MODE,
1054 .reserved = 0,
1055 .real_base = cpu_to_be32(0xffffffff),
1056 .real_size = cpu_to_be32(0xffffffff),
1057 .virt_base = cpu_to_be32(0xffffffff),
1058 .virt_size = cpu_to_be32(0xffffffff),
1059 .load_base = cpu_to_be32(0xffffffff),
1060 .min_rma = cpu_to_be32(512), /* 512MB min RMA */
1061 .min_load = cpu_to_be32(0xffffffff), /* full client load */
1062 .min_rma_percent = 0, /* min RMA percentage of total RAM */
1063 .max_pft_size = 48, /* max log_2(hash table size) */
1064 },
1065
1066 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
1067 /* option vector 3: processor options supported */
1068 .vec3 = {
1069 .byte1 = 0, /* don't ignore, don't halt */
1070 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
1071 },
1072
1073 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
1074 /* option vector 4: IBM PAPR implementation */
1075 .vec4 = {
1076 .byte1 = 0, /* don't halt */
1077 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
1078 },
1079
1080 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
1081 /* option vector 5: PAPR/OF options */
1082 .vec5 = {
1083 .byte1 = 0, /* don't ignore, don't halt */
1084 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
1085 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
1086 #ifdef CONFIG_PCI_MSI
1087 /* PCIe/MSI support. Without MSI full PCIe is not supported */
1088 OV5_FEAT(OV5_MSI),
1089 #else
1090 0,
1091 #endif
1092 .byte3 = 0,
1093 .cmo =
1094 #ifdef CONFIG_PPC_SMLPAR
1095 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
1096 #else
1097 0,
1098 #endif
1099 .associativity = OV5_FEAT(OV5_FORM1_AFFINITY) | OV5_FEAT(OV5_PRRN) |
1100 OV5_FEAT(OV5_FORM2_AFFINITY),
1101 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
1102 .micro_checkpoint = 0,
1103 .reserved0 = 0,
1104 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
1105 .papr_level = 0,
1106 .reserved1 = 0,
1107 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
1108 .reserved2 = 0,
1109 .reserved3 = 0,
1110 .subprocessors = 1,
1111 .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
1112 .intarch = 0,
1113 .mmu = 0,
1114 .hash_ext = 0,
1115 .radix_ext = 0,
1116 },
1117
1118 /* option vector 6: IBM PAPR hints */
1119 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
1120 .vec6 = {
1121 .reserved = 0,
1122 .secondary_pteg = 0,
1123 .os_name = OV6_LINUX,
1124 },
1125
1126 /* option vector 7: OS Identification */
1127 .vec7_len = VECTOR_LENGTH(sizeof(struct option_vector7)),
1128 };
1129
1130 static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
1131
1132 /* Old method - ELF header with PT_NOTE sections only works on BE */
1133 #ifdef __BIG_ENDIAN__
1134 static const struct fake_elf {
1135 Elf32_Ehdr elfhdr;
1136 Elf32_Phdr phdr[2];
1137 struct chrpnote {
1138 u32 namesz;
1139 u32 descsz;
1140 u32 type;
1141 char name[8]; /* "PowerPC" */
1142 struct chrpdesc {
1143 u32 real_mode;
1144 u32 real_base;
1145 u32 real_size;
1146 u32 virt_base;
1147 u32 virt_size;
1148 u32 load_base;
1149 } chrpdesc;
1150 } chrpnote;
1151 struct rpanote {
1152 u32 namesz;
1153 u32 descsz;
1154 u32 type;
1155 char name[24]; /* "IBM,RPA-Client-Config" */
1156 struct rpadesc {
1157 u32 lpar_affinity;
1158 u32 min_rmo_size;
1159 u32 min_rmo_percent;
1160 u32 max_pft_size;
1161 u32 splpar;
1162 u32 min_load;
1163 u32 new_mem_def;
1164 u32 ignore_me;
1165 } rpadesc;
1166 } rpanote;
1167 } fake_elf __initconst = {
1168 .elfhdr = {
1169 .e_ident = { 0x7f, 'E', 'L', 'F',
1170 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
1171 .e_type = ET_EXEC, /* yeah right */
1172 .e_machine = EM_PPC,
1173 .e_version = EV_CURRENT,
1174 .e_phoff = offsetof(struct fake_elf, phdr),
1175 .e_phentsize = sizeof(Elf32_Phdr),
1176 .e_phnum = 2
1177 },
1178 .phdr = {
1179 [0] = {
1180 .p_type = PT_NOTE,
1181 .p_offset = offsetof(struct fake_elf, chrpnote),
1182 .p_filesz = sizeof(struct chrpnote)
1183 }, [1] = {
1184 .p_type = PT_NOTE,
1185 .p_offset = offsetof(struct fake_elf, rpanote),
1186 .p_filesz = sizeof(struct rpanote)
1187 }
1188 },
1189 .chrpnote = {
1190 .namesz = sizeof("PowerPC"),
1191 .descsz = sizeof(struct chrpdesc),
1192 .type = 0x1275,
1193 .name = "PowerPC",
1194 .chrpdesc = {
1195 .real_mode = ~0U, /* ~0 means "don't care" */
1196 .real_base = ~0U,
1197 .real_size = ~0U,
1198 .virt_base = ~0U,
1199 .virt_size = ~0U,
1200 .load_base = ~0U
1201 },
1202 },
1203 .rpanote = {
1204 .namesz = sizeof("IBM,RPA-Client-Config"),
1205 .descsz = sizeof(struct rpadesc),
1206 .type = 0x12759999,
1207 .name = "IBM,RPA-Client-Config",
1208 .rpadesc = {
1209 .lpar_affinity = 0,
1210 .min_rmo_size = 64, /* in megabytes */
1211 .min_rmo_percent = 0,
1212 .max_pft_size = 48, /* 2^48 bytes max PFT size */
1213 .splpar = 1,
1214 .min_load = ~0U,
1215 .new_mem_def = 0
1216 }
1217 }
1218 };
1219 #endif /* __BIG_ENDIAN__ */
1220
prom_count_smt_threads(void)1221 static int __init prom_count_smt_threads(void)
1222 {
1223 phandle node;
1224 char type[64];
1225 unsigned int plen;
1226
1227 /* Pick up th first CPU node we can find */
1228 for (node = 0; prom_next_node(&node); ) {
1229 type[0] = 0;
1230 prom_getprop(node, "device_type", type, sizeof(type));
1231
1232 if (prom_strcmp(type, "cpu"))
1233 continue;
1234 /*
1235 * There is an entry for each smt thread, each entry being
1236 * 4 bytes long. All cpus should have the same number of
1237 * smt threads, so return after finding the first.
1238 */
1239 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1240 if (plen == PROM_ERROR)
1241 break;
1242 plen >>= 2;
1243 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1244
1245 /* Sanity check */
1246 if (plen < 1 || plen > 64) {
1247 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1248 (unsigned long)plen);
1249 return 1;
1250 }
1251 return plen;
1252 }
1253 prom_debug("No threads found, assuming 1 per core\n");
1254
1255 return 1;
1256
1257 }
1258
prom_parse_mmu_model(u8 val,struct platform_support * support)1259 static void __init prom_parse_mmu_model(u8 val,
1260 struct platform_support *support)
1261 {
1262 switch (val) {
1263 case OV5_FEAT(OV5_MMU_DYNAMIC):
1264 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1265 prom_debug("MMU - either supported\n");
1266 support->radix_mmu = !prom_radix_disable;
1267 support->hash_mmu = true;
1268 break;
1269 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1270 prom_debug("MMU - radix only\n");
1271 if (prom_radix_disable) {
1272 /*
1273 * If we __have__ to do radix, we're better off ignoring
1274 * the command line rather than not booting.
1275 */
1276 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1277 }
1278 support->radix_mmu = true;
1279 break;
1280 case OV5_FEAT(OV5_MMU_HASH):
1281 prom_debug("MMU - hash only\n");
1282 support->hash_mmu = true;
1283 break;
1284 default:
1285 prom_debug("Unknown mmu support option: 0x%x\n", val);
1286 break;
1287 }
1288 }
1289
prom_parse_xive_model(u8 val,struct platform_support * support)1290 static void __init prom_parse_xive_model(u8 val,
1291 struct platform_support *support)
1292 {
1293 switch (val) {
1294 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1295 prom_debug("XIVE - either mode supported\n");
1296 support->xive = !prom_xive_disable;
1297 break;
1298 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1299 prom_debug("XIVE - exploitation mode supported\n");
1300 if (prom_xive_disable) {
1301 /*
1302 * If we __have__ to do XIVE, we're better off ignoring
1303 * the command line rather than not booting.
1304 */
1305 prom_printf("WARNING: Ignoring cmdline option xive=off\n");
1306 }
1307 support->xive = true;
1308 break;
1309 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1310 prom_debug("XIVE - legacy mode supported\n");
1311 break;
1312 default:
1313 prom_debug("Unknown xive support option: 0x%x\n", val);
1314 break;
1315 }
1316 }
1317
prom_parse_platform_support(u8 index,u8 val,struct platform_support * support)1318 static void __init prom_parse_platform_support(u8 index, u8 val,
1319 struct platform_support *support)
1320 {
1321 switch (index) {
1322 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1323 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1324 break;
1325 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1326 if (val & OV5_FEAT(OV5_RADIX_GTSE))
1327 support->radix_gtse = !prom_radix_gtse_disable;
1328 break;
1329 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1330 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1331 support);
1332 break;
1333 }
1334 }
1335
prom_check_platform_support(void)1336 static void __init prom_check_platform_support(void)
1337 {
1338 struct platform_support supported = {
1339 .hash_mmu = false,
1340 .radix_mmu = false,
1341 .radix_gtse = false,
1342 .xive = false
1343 };
1344 int prop_len = prom_getproplen(prom.chosen,
1345 "ibm,arch-vec-5-platform-support");
1346
1347 /*
1348 * First copy the architecture vec template
1349 *
1350 * use memcpy() instead of *vec = *vec_template so that GCC replaces it
1351 * by __memcpy() when KASAN is active
1352 */
1353 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
1354 sizeof(ibm_architecture_vec));
1355
1356 prom_strscpy_pad(ibm_architecture_vec.vec7.os_id, linux_banner, 256);
1357
1358 if (prop_len > 1) {
1359 int i;
1360 u8 vec[8];
1361 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1362 prop_len);
1363 if (prop_len > sizeof(vec))
1364 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1365 prop_len);
1366 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
1367 for (i = 0; i < prop_len; i += 2) {
1368 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
1369 prom_parse_platform_support(vec[i], vec[i + 1], &supported);
1370 }
1371 }
1372
1373 if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1374 /* Radix preferred - Check if GTSE is also supported */
1375 prom_debug("Asking for radix\n");
1376 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1377 if (supported.radix_gtse)
1378 ibm_architecture_vec.vec5.radix_ext =
1379 OV5_FEAT(OV5_RADIX_GTSE);
1380 else
1381 prom_debug("Radix GTSE isn't supported\n");
1382 } else if (supported.hash_mmu) {
1383 /* Default to hash mmu (if we can) */
1384 prom_debug("Asking for hash\n");
1385 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1386 } else {
1387 /* We're probably on a legacy hypervisor */
1388 prom_debug("Assuming legacy hash support\n");
1389 }
1390
1391 if (supported.xive) {
1392 prom_debug("Asking for XIVE\n");
1393 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1394 }
1395 }
1396
prom_send_capabilities(void)1397 static void __init prom_send_capabilities(void)
1398 {
1399 ihandle root;
1400 prom_arg_t ret;
1401 u32 cores;
1402
1403 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1404 prom_check_platform_support();
1405
1406 root = call_prom("open", 1, 1, ADDR("/"));
1407 if (root != 0) {
1408 /* We need to tell the FW about the number of cores we support.
1409 *
1410 * To do that, we count the number of threads on the first core
1411 * (we assume this is the same for all cores) and use it to
1412 * divide NR_CPUS.
1413 */
1414
1415 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1416 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1417 cores, NR_CPUS);
1418
1419 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1420
1421 /* try calling the ibm,client-architecture-support method */
1422 prom_printf("Calling ibm,client-architecture-support...");
1423 if (call_prom_ret("call-method", 3, 2, &ret,
1424 ADDR("ibm,client-architecture-support"),
1425 root,
1426 ADDR(&ibm_architecture_vec)) == 0) {
1427 /* the call exists... */
1428 if (ret)
1429 prom_printf("\nWARNING: ibm,client-architecture"
1430 "-support call FAILED!\n");
1431 call_prom("close", 1, 0, root);
1432 prom_printf(" done\n");
1433 return;
1434 }
1435 call_prom("close", 1, 0, root);
1436 prom_printf(" not implemented\n");
1437 }
1438
1439 #ifdef __BIG_ENDIAN__
1440 {
1441 ihandle elfloader;
1442
1443 /* no ibm,client-architecture-support call, try the old way */
1444 elfloader = call_prom("open", 1, 1,
1445 ADDR("/packages/elf-loader"));
1446 if (elfloader == 0) {
1447 prom_printf("couldn't open /packages/elf-loader\n");
1448 return;
1449 }
1450 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1451 elfloader, ADDR(&fake_elf));
1452 call_prom("close", 1, 0, elfloader);
1453 }
1454 #endif /* __BIG_ENDIAN__ */
1455 }
1456 #endif /* CONFIG_PPC_PSERIES */
1457
1458 /*
1459 * Memory allocation strategy... our layout is normally:
1460 *
1461 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1462 * rare cases, initrd might end up being before the kernel though.
1463 * We assume this won't override the final kernel at 0, we have no
1464 * provision to handle that in this version, but it should hopefully
1465 * never happen.
1466 *
1467 * alloc_top is set to the top of RMO, eventually shrink down if the
1468 * TCEs overlap
1469 *
1470 * alloc_bottom is set to the top of kernel/initrd
1471 *
1472 * from there, allocations are done this way : rtas is allocated
1473 * topmost, and the device-tree is allocated from the bottom. We try
1474 * to grow the device-tree allocation as we progress. If we can't,
1475 * then we fail, we don't currently have a facility to restart
1476 * elsewhere, but that shouldn't be necessary.
1477 *
1478 * Note that calls to reserve_mem have to be done explicitly, memory
1479 * allocated with either alloc_up or alloc_down isn't automatically
1480 * reserved.
1481 */
1482
1483
1484 /*
1485 * Allocates memory in the RMO upward from the kernel/initrd
1486 *
1487 * When align is 0, this is a special case, it means to allocate in place
1488 * at the current location of alloc_bottom or fail (that is basically
1489 * extending the previous allocation). Used for the device-tree flattening
1490 */
alloc_up(unsigned long size,unsigned long align)1491 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1492 {
1493 unsigned long base = alloc_bottom;
1494 unsigned long addr = 0;
1495
1496 if (align)
1497 base = ALIGN(base, align);
1498 prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1499 if (ram_top == 0)
1500 prom_panic("alloc_up() called with mem not initialized\n");
1501
1502 if (align)
1503 base = ALIGN(alloc_bottom, align);
1504 else
1505 base = alloc_bottom;
1506
1507 for(; (base + size) <= alloc_top;
1508 base = ALIGN(base + 0x100000, align)) {
1509 prom_debug(" trying: 0x%lx\n\r", base);
1510 addr = (unsigned long)prom_claim(base, size, 0);
1511 if (addr != PROM_ERROR && addr != 0)
1512 break;
1513 addr = 0;
1514 if (align == 0)
1515 break;
1516 }
1517 if (addr == 0)
1518 return 0;
1519 alloc_bottom = addr + size;
1520
1521 prom_debug(" -> %lx\n", addr);
1522 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1523 prom_debug(" alloc_top : %lx\n", alloc_top);
1524 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1525 prom_debug(" rmo_top : %lx\n", rmo_top);
1526 prom_debug(" ram_top : %lx\n", ram_top);
1527
1528 return addr;
1529 }
1530
1531 /*
1532 * Allocates memory downward, either from top of RMO, or if highmem
1533 * is set, from the top of RAM. Note that this one doesn't handle
1534 * failures. It does claim memory if highmem is not set.
1535 */
alloc_down(unsigned long size,unsigned long align,int highmem)1536 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1537 int highmem)
1538 {
1539 unsigned long base, addr = 0;
1540
1541 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1542 highmem ? "(high)" : "(low)");
1543 if (ram_top == 0)
1544 prom_panic("alloc_down() called with mem not initialized\n");
1545
1546 if (highmem) {
1547 /* Carve out storage for the TCE table. */
1548 addr = ALIGN_DOWN(alloc_top_high - size, align);
1549 if (addr <= alloc_bottom)
1550 return 0;
1551 /* Will we bump into the RMO ? If yes, check out that we
1552 * didn't overlap existing allocations there, if we did,
1553 * we are dead, we must be the first in town !
1554 */
1555 if (addr < rmo_top) {
1556 /* Good, we are first */
1557 if (alloc_top == rmo_top)
1558 alloc_top = rmo_top = addr;
1559 else
1560 return 0;
1561 }
1562 alloc_top_high = addr;
1563 goto bail;
1564 }
1565
1566 base = ALIGN_DOWN(alloc_top - size, align);
1567 for (; base > alloc_bottom;
1568 base = ALIGN_DOWN(base - 0x100000, align)) {
1569 prom_debug(" trying: 0x%lx\n\r", base);
1570 addr = (unsigned long)prom_claim(base, size, 0);
1571 if (addr != PROM_ERROR && addr != 0)
1572 break;
1573 addr = 0;
1574 }
1575 if (addr == 0)
1576 return 0;
1577 alloc_top = addr;
1578
1579 bail:
1580 prom_debug(" -> %lx\n", addr);
1581 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1582 prom_debug(" alloc_top : %lx\n", alloc_top);
1583 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1584 prom_debug(" rmo_top : %lx\n", rmo_top);
1585 prom_debug(" ram_top : %lx\n", ram_top);
1586
1587 return addr;
1588 }
1589
1590 /*
1591 * Parse a "reg" cell
1592 */
prom_next_cell(int s,cell_t ** cellp)1593 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1594 {
1595 cell_t *p = *cellp;
1596 unsigned long r = 0;
1597
1598 /* Ignore more than 2 cells */
1599 while (s > sizeof(unsigned long) / 4) {
1600 p++;
1601 s--;
1602 }
1603 r = be32_to_cpu(*p++);
1604 #ifdef CONFIG_PPC64
1605 if (s > 1) {
1606 r <<= 32;
1607 r |= be32_to_cpu(*(p++));
1608 }
1609 #endif
1610 *cellp = p;
1611 return r;
1612 }
1613
1614 /*
1615 * Very dumb function for adding to the memory reserve list, but
1616 * we don't need anything smarter at this point
1617 *
1618 * XXX Eventually check for collisions. They should NEVER happen.
1619 * If problems seem to show up, it would be a good start to track
1620 * them down.
1621 */
reserve_mem(u64 base,u64 size)1622 static void __init reserve_mem(u64 base, u64 size)
1623 {
1624 u64 top = base + size;
1625 unsigned long cnt = mem_reserve_cnt;
1626
1627 if (size == 0)
1628 return;
1629
1630 /* We need to always keep one empty entry so that we
1631 * have our terminator with "size" set to 0 since we are
1632 * dumb and just copy this entire array to the boot params
1633 */
1634 base = ALIGN_DOWN(base, PAGE_SIZE);
1635 top = ALIGN(top, PAGE_SIZE);
1636 size = top - base;
1637
1638 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1639 prom_panic("Memory reserve map exhausted !\n");
1640 mem_reserve_map[cnt].base = cpu_to_be64(base);
1641 mem_reserve_map[cnt].size = cpu_to_be64(size);
1642 mem_reserve_cnt = cnt + 1;
1643 }
1644
1645 /*
1646 * Initialize memory allocation mechanism, parse "memory" nodes and
1647 * obtain that way the top of memory and RMO to setup out local allocator
1648 */
prom_init_mem(void)1649 static void __init prom_init_mem(void)
1650 {
1651 phandle node;
1652 char type[64];
1653 unsigned int plen;
1654 cell_t *p, *endp;
1655 __be32 val;
1656 u32 rac, rsc;
1657
1658 /*
1659 * We iterate the memory nodes to find
1660 * 1) top of RMO (first node)
1661 * 2) top of memory
1662 */
1663 val = cpu_to_be32(2);
1664 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1665 rac = be32_to_cpu(val);
1666 val = cpu_to_be32(1);
1667 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1668 rsc = be32_to_cpu(val);
1669 prom_debug("root_addr_cells: %x\n", rac);
1670 prom_debug("root_size_cells: %x\n", rsc);
1671
1672 prom_debug("scanning memory:\n");
1673
1674 for (node = 0; prom_next_node(&node); ) {
1675 type[0] = 0;
1676 prom_getprop(node, "device_type", type, sizeof(type));
1677
1678 if (type[0] == 0) {
1679 /*
1680 * CHRP Longtrail machines have no device_type
1681 * on the memory node, so check the name instead...
1682 */
1683 prom_getprop(node, "name", type, sizeof(type));
1684 }
1685 if (prom_strcmp(type, "memory"))
1686 continue;
1687
1688 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1689 if (plen > sizeof(regbuf)) {
1690 prom_printf("memory node too large for buffer !\n");
1691 plen = sizeof(regbuf);
1692 }
1693 p = regbuf;
1694 endp = p + (plen / sizeof(cell_t));
1695
1696 #ifdef DEBUG_PROM
1697 memset(prom_scratch, 0, sizeof(prom_scratch));
1698 call_prom("package-to-path", 3, 1, node, prom_scratch,
1699 sizeof(prom_scratch) - 1);
1700 prom_debug(" node %s :\n", prom_scratch);
1701 #endif /* DEBUG_PROM */
1702
1703 while ((endp - p) >= (rac + rsc)) {
1704 unsigned long base, size;
1705
1706 base = prom_next_cell(rac, &p);
1707 size = prom_next_cell(rsc, &p);
1708
1709 if (size == 0)
1710 continue;
1711 prom_debug(" %lx %lx\n", base, size);
1712 if (base == 0 && (of_platform & PLATFORM_LPAR))
1713 rmo_top = size;
1714 if ((base + size) > ram_top)
1715 ram_top = base + size;
1716 }
1717 }
1718
1719 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1720
1721 /*
1722 * If prom_memory_limit is set we reduce the upper limits *except* for
1723 * alloc_top_high. This must be the real top of RAM so we can put
1724 * TCE's up there.
1725 */
1726
1727 alloc_top_high = ram_top;
1728
1729 if (prom_memory_limit) {
1730 if (prom_memory_limit <= alloc_bottom) {
1731 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1732 prom_memory_limit);
1733 prom_memory_limit = 0;
1734 } else if (prom_memory_limit >= ram_top) {
1735 prom_printf("Ignoring mem=%lx >= ram_top.\n",
1736 prom_memory_limit);
1737 prom_memory_limit = 0;
1738 } else {
1739 ram_top = prom_memory_limit;
1740 rmo_top = min(rmo_top, prom_memory_limit);
1741 }
1742 }
1743
1744 /*
1745 * Setup our top alloc point, that is top of RMO or top of
1746 * segment 0 when running non-LPAR.
1747 * Some RS64 machines have buggy firmware where claims up at
1748 * 1GB fail. Cap at 768MB as a workaround.
1749 * Since 768MB is plenty of room, and we need to cap to something
1750 * reasonable on 32-bit, cap at 768MB on all machines.
1751 */
1752 if (!rmo_top)
1753 rmo_top = ram_top;
1754 rmo_top = min(0x30000000ul, rmo_top);
1755 alloc_top = rmo_top;
1756 alloc_top_high = ram_top;
1757
1758 /*
1759 * Check if we have an initrd after the kernel but still inside
1760 * the RMO. If we do move our bottom point to after it.
1761 */
1762 if (prom_initrd_start &&
1763 prom_initrd_start < rmo_top &&
1764 prom_initrd_end > alloc_bottom)
1765 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1766
1767 prom_printf("memory layout at init:\n");
1768 prom_printf(" memory_limit : %lx (16 MB aligned)\n",
1769 prom_memory_limit);
1770 prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
1771 prom_printf(" alloc_top : %lx\n", alloc_top);
1772 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
1773 prom_printf(" rmo_top : %lx\n", rmo_top);
1774 prom_printf(" ram_top : %lx\n", ram_top);
1775 }
1776
prom_close_stdin(void)1777 static void __init prom_close_stdin(void)
1778 {
1779 __be32 val;
1780 ihandle stdin;
1781
1782 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1783 stdin = be32_to_cpu(val);
1784 call_prom("close", 1, 0, stdin);
1785 }
1786 }
1787
1788 #ifdef CONFIG_PPC_SVM
prom_rtas_hcall(uint64_t args)1789 static int prom_rtas_hcall(uint64_t args)
1790 {
1791 register uint64_t arg1 asm("r3") = H_RTAS;
1792 register uint64_t arg2 asm("r4") = args;
1793
1794 asm volatile("sc 1\n" : "=r" (arg1) :
1795 "r" (arg1),
1796 "r" (arg2) :);
1797 srr_regs_clobbered();
1798
1799 return arg1;
1800 }
1801
1802 static struct rtas_args __prombss os_term_args;
1803
prom_rtas_os_term(char * str)1804 static void __init prom_rtas_os_term(char *str)
1805 {
1806 phandle rtas_node;
1807 __be32 val;
1808 u32 token;
1809
1810 prom_debug("%s: start...\n", __func__);
1811 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1812 prom_debug("rtas_node: %x\n", rtas_node);
1813 if (!PHANDLE_VALID(rtas_node))
1814 return;
1815
1816 val = 0;
1817 prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val));
1818 token = be32_to_cpu(val);
1819 prom_debug("ibm,os-term: %x\n", token);
1820 if (token == 0)
1821 prom_panic("Could not get token for ibm,os-term\n");
1822 os_term_args.token = cpu_to_be32(token);
1823 os_term_args.nargs = cpu_to_be32(1);
1824 os_term_args.nret = cpu_to_be32(1);
1825 os_term_args.args[0] = cpu_to_be32(__pa(str));
1826 prom_rtas_hcall((uint64_t)&os_term_args);
1827 }
1828 #endif /* CONFIG_PPC_SVM */
1829
1830 /*
1831 * Allocate room for and instantiate RTAS
1832 */
prom_instantiate_rtas(void)1833 static void __init prom_instantiate_rtas(void)
1834 {
1835 phandle rtas_node;
1836 ihandle rtas_inst;
1837 u32 base, entry = 0;
1838 __be32 val;
1839 u32 size = 0;
1840
1841 prom_debug("prom_instantiate_rtas: start...\n");
1842
1843 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1844 prom_debug("rtas_node: %x\n", rtas_node);
1845 if (!PHANDLE_VALID(rtas_node))
1846 return;
1847
1848 val = 0;
1849 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1850 size = be32_to_cpu(val);
1851 if (size == 0)
1852 return;
1853
1854 base = alloc_down(size, PAGE_SIZE, 0);
1855 if (base == 0)
1856 prom_panic("Could not allocate memory for RTAS\n");
1857
1858 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1859 if (!IHANDLE_VALID(rtas_inst)) {
1860 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1861 return;
1862 }
1863
1864 prom_printf("instantiating rtas at 0x%x...", base);
1865
1866 if (call_prom_ret("call-method", 3, 2, &entry,
1867 ADDR("instantiate-rtas"),
1868 rtas_inst, base) != 0
1869 || entry == 0) {
1870 prom_printf(" failed\n");
1871 return;
1872 }
1873 prom_printf(" done\n");
1874
1875 reserve_mem(base, size);
1876
1877 val = cpu_to_be32(base);
1878 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1879 &val, sizeof(val));
1880 val = cpu_to_be32(entry);
1881 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1882 &val, sizeof(val));
1883
1884 /* Check if it supports "query-cpu-stopped-state" */
1885 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1886 &val, sizeof(val)) != PROM_ERROR)
1887 rtas_has_query_cpu_stopped = true;
1888
1889 prom_debug("rtas base = 0x%x\n", base);
1890 prom_debug("rtas entry = 0x%x\n", entry);
1891 prom_debug("rtas size = 0x%x\n", size);
1892
1893 prom_debug("prom_instantiate_rtas: end...\n");
1894 }
1895
1896 #ifdef CONFIG_PPC64
1897 /*
1898 * Allocate room for and instantiate Stored Measurement Log (SML)
1899 */
prom_instantiate_sml(void)1900 static void __init prom_instantiate_sml(void)
1901 {
1902 phandle ibmvtpm_node;
1903 ihandle ibmvtpm_inst;
1904 u32 entry = 0, size = 0, succ = 0;
1905 u64 base;
1906 __be32 val;
1907
1908 prom_debug("prom_instantiate_sml: start...\n");
1909
1910 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1911 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1912 if (!PHANDLE_VALID(ibmvtpm_node))
1913 return;
1914
1915 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1916 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1917 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1918 return;
1919 }
1920
1921 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1922 &val, sizeof(val)) != PROM_ERROR) {
1923 if (call_prom_ret("call-method", 2, 2, &succ,
1924 ADDR("reformat-sml-to-efi-alignment"),
1925 ibmvtpm_inst) != 0 || succ == 0) {
1926 prom_printf("Reformat SML to EFI alignment failed\n");
1927 return;
1928 }
1929
1930 if (call_prom_ret("call-method", 2, 2, &size,
1931 ADDR("sml-get-allocated-size"),
1932 ibmvtpm_inst) != 0 || size == 0) {
1933 prom_printf("SML get allocated size failed\n");
1934 return;
1935 }
1936 } else {
1937 if (call_prom_ret("call-method", 2, 2, &size,
1938 ADDR("sml-get-handover-size"),
1939 ibmvtpm_inst) != 0 || size == 0) {
1940 prom_printf("SML get handover size failed\n");
1941 return;
1942 }
1943 }
1944
1945 base = alloc_down(size, PAGE_SIZE, 0);
1946 if (base == 0)
1947 prom_panic("Could not allocate memory for sml\n");
1948
1949 prom_printf("instantiating sml at 0x%llx...", base);
1950
1951 memset((void *)base, 0, size);
1952
1953 if (call_prom_ret("call-method", 4, 2, &entry,
1954 ADDR("sml-handover"),
1955 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1956 prom_printf("SML handover failed\n");
1957 return;
1958 }
1959 prom_printf(" done\n");
1960
1961 reserve_mem(base, size);
1962
1963 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1964 &base, sizeof(base));
1965 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1966 &size, sizeof(size));
1967
1968 prom_debug("sml base = 0x%llx\n", base);
1969 prom_debug("sml size = 0x%x\n", size);
1970
1971 prom_debug("prom_instantiate_sml: end...\n");
1972 }
1973
1974 /*
1975 * Allocate room for and initialize TCE tables
1976 */
1977 #ifdef __BIG_ENDIAN__
prom_initialize_tce_table(void)1978 static void __init prom_initialize_tce_table(void)
1979 {
1980 phandle node;
1981 ihandle phb_node;
1982 char compatible[64], type[64], model[64];
1983 char *path = prom_scratch;
1984 u64 base, align;
1985 u32 minalign, minsize;
1986 u64 tce_entry, *tce_entryp;
1987 u64 local_alloc_top, local_alloc_bottom;
1988 u64 i;
1989
1990 if (prom_iommu_off)
1991 return;
1992
1993 prom_debug("starting prom_initialize_tce_table\n");
1994
1995 /* Cache current top of allocs so we reserve a single block */
1996 local_alloc_top = alloc_top_high;
1997 local_alloc_bottom = local_alloc_top;
1998
1999 /* Search all nodes looking for PHBs. */
2000 for (node = 0; prom_next_node(&node); ) {
2001 compatible[0] = 0;
2002 type[0] = 0;
2003 model[0] = 0;
2004 prom_getprop(node, "compatible",
2005 compatible, sizeof(compatible));
2006 prom_getprop(node, "device_type", type, sizeof(type));
2007 prom_getprop(node, "model", model, sizeof(model));
2008
2009 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
2010 continue;
2011
2012 /* Keep the old logic intact to avoid regression. */
2013 if (compatible[0] != 0) {
2014 if ((prom_strstr(compatible, "python") == NULL) &&
2015 (prom_strstr(compatible, "Speedwagon") == NULL) &&
2016 (prom_strstr(compatible, "Winnipeg") == NULL))
2017 continue;
2018 } else if (model[0] != 0) {
2019 if ((prom_strstr(model, "ython") == NULL) &&
2020 (prom_strstr(model, "peedwagon") == NULL) &&
2021 (prom_strstr(model, "innipeg") == NULL))
2022 continue;
2023 }
2024
2025 if (prom_getprop(node, "tce-table-minalign", &minalign,
2026 sizeof(minalign)) == PROM_ERROR)
2027 minalign = 0;
2028 if (prom_getprop(node, "tce-table-minsize", &minsize,
2029 sizeof(minsize)) == PROM_ERROR)
2030 minsize = 4UL << 20;
2031
2032 /*
2033 * Even though we read what OF wants, we just set the table
2034 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
2035 * By doing this, we avoid the pitfalls of trying to DMA to
2036 * MMIO space and the DMA alias hole.
2037 */
2038 minsize = 4UL << 20;
2039
2040 /* Align to the greater of the align or size */
2041 align = max(minalign, minsize);
2042 base = alloc_down(minsize, align, 1);
2043 if (base == 0)
2044 prom_panic("ERROR, cannot find space for TCE table.\n");
2045 if (base < local_alloc_bottom)
2046 local_alloc_bottom = base;
2047
2048 /* It seems OF doesn't null-terminate the path :-( */
2049 memset(path, 0, sizeof(prom_scratch));
2050 /* Call OF to setup the TCE hardware */
2051 if (call_prom("package-to-path", 3, 1, node,
2052 path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
2053 prom_printf("package-to-path failed\n");
2054 }
2055
2056 /* Save away the TCE table attributes for later use. */
2057 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
2058 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
2059
2060 prom_debug("TCE table: %s\n", path);
2061 prom_debug("\tnode = 0x%x\n", node);
2062 prom_debug("\tbase = 0x%llx\n", base);
2063 prom_debug("\tsize = 0x%x\n", minsize);
2064
2065 /* Initialize the table to have a one-to-one mapping
2066 * over the allocated size.
2067 */
2068 tce_entryp = (u64 *)base;
2069 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
2070 tce_entry = (i << PAGE_SHIFT);
2071 tce_entry |= 0x3;
2072 *tce_entryp = tce_entry;
2073 }
2074
2075 prom_printf("opening PHB %s", path);
2076 phb_node = call_prom("open", 1, 1, path);
2077 if (phb_node == 0)
2078 prom_printf("... failed\n");
2079 else
2080 prom_printf("... done\n");
2081
2082 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
2083 phb_node, -1, minsize,
2084 (u32) base, (u32) (base >> 32));
2085 call_prom("close", 1, 0, phb_node);
2086 }
2087
2088 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
2089
2090 /* These are only really needed if there is a memory limit in
2091 * effect, but we don't know so export them always. */
2092 prom_tce_alloc_start = local_alloc_bottom;
2093 prom_tce_alloc_end = local_alloc_top;
2094
2095 /* Flag the first invalid entry */
2096 prom_debug("ending prom_initialize_tce_table\n");
2097 }
2098 #endif /* __BIG_ENDIAN__ */
2099 #endif /* CONFIG_PPC64 */
2100
2101 /*
2102 * With CHRP SMP we need to use the OF to start the other processors.
2103 * We can't wait until smp_boot_cpus (the OF is trashed by then)
2104 * so we have to put the processors into a holding pattern controlled
2105 * by the kernel (not OF) before we destroy the OF.
2106 *
2107 * This uses a chunk of low memory, puts some holding pattern
2108 * code there and sends the other processors off to there until
2109 * smp_boot_cpus tells them to do something. The holding pattern
2110 * checks that address until its cpu # is there, when it is that
2111 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
2112 * of setting those values.
2113 *
2114 * We also use physical address 0x4 here to tell when a cpu
2115 * is in its holding pattern code.
2116 *
2117 * -- Cort
2118 */
2119 /*
2120 * We want to reference the copy of __secondary_hold_* in the
2121 * 0 - 0x100 address range
2122 */
2123 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
2124
prom_hold_cpus(void)2125 static void __init prom_hold_cpus(void)
2126 {
2127 unsigned long i;
2128 phandle node;
2129 char type[64];
2130 unsigned long *spinloop
2131 = (void *) LOW_ADDR(__secondary_hold_spinloop);
2132 unsigned long *acknowledge
2133 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
2134 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
2135
2136 /*
2137 * On pseries, if RTAS supports "query-cpu-stopped-state",
2138 * we skip this stage, the CPUs will be started by the
2139 * kernel using RTAS.
2140 */
2141 if ((of_platform == PLATFORM_PSERIES ||
2142 of_platform == PLATFORM_PSERIES_LPAR) &&
2143 rtas_has_query_cpu_stopped) {
2144 prom_printf("prom_hold_cpus: skipped\n");
2145 return;
2146 }
2147
2148 prom_debug("prom_hold_cpus: start...\n");
2149 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
2150 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
2151 prom_debug(" 1) acknowledge = 0x%lx\n",
2152 (unsigned long)acknowledge);
2153 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
2154 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
2155
2156 /* Set the common spinloop variable, so all of the secondary cpus
2157 * will block when they are awakened from their OF spinloop.
2158 * This must occur for both SMP and non SMP kernels, since OF will
2159 * be trashed when we move the kernel.
2160 */
2161 *spinloop = 0;
2162
2163 /* look for cpus */
2164 for (node = 0; prom_next_node(&node); ) {
2165 unsigned int cpu_no;
2166 __be32 reg;
2167
2168 type[0] = 0;
2169 prom_getprop(node, "device_type", type, sizeof(type));
2170 if (prom_strcmp(type, "cpu") != 0)
2171 continue;
2172
2173 /* Skip non-configured cpus. */
2174 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
2175 if (prom_strcmp(type, "okay") != 0)
2176 continue;
2177
2178 reg = cpu_to_be32(-1); /* make sparse happy */
2179 prom_getprop(node, "reg", ®, sizeof(reg));
2180 cpu_no = be32_to_cpu(reg);
2181
2182 prom_debug("cpu hw idx = %u\n", cpu_no);
2183
2184 /* Init the acknowledge var which will be reset by
2185 * the secondary cpu when it awakens from its OF
2186 * spinloop.
2187 */
2188 *acknowledge = (unsigned long)-1;
2189
2190 if (cpu_no != prom.cpu) {
2191 /* Primary Thread of non-boot cpu or any thread */
2192 prom_printf("starting cpu hw idx %u... ", cpu_no);
2193 call_prom("start-cpu", 3, 0, node,
2194 secondary_hold, cpu_no);
2195
2196 for (i = 0; (i < 100000000) &&
2197 (*acknowledge == ((unsigned long)-1)); i++ )
2198 mb();
2199
2200 if (*acknowledge == cpu_no)
2201 prom_printf("done\n");
2202 else
2203 prom_printf("failed: %lx\n", *acknowledge);
2204 }
2205 #ifdef CONFIG_SMP
2206 else
2207 prom_printf("boot cpu hw idx %u\n", cpu_no);
2208 #endif /* CONFIG_SMP */
2209 }
2210
2211 prom_debug("prom_hold_cpus: end...\n");
2212 }
2213
2214
prom_init_client_services(unsigned long pp)2215 static void __init prom_init_client_services(unsigned long pp)
2216 {
2217 /* Get a handle to the prom entry point before anything else */
2218 prom_entry = pp;
2219
2220 /* get a handle for the stdout device */
2221 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2222 if (!PHANDLE_VALID(prom.chosen))
2223 prom_panic("cannot find chosen"); /* msg won't be printed :( */
2224
2225 /* get device tree root */
2226 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2227 if (!PHANDLE_VALID(prom.root))
2228 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2229
2230 prom.mmumap = 0;
2231 }
2232
2233 #ifdef CONFIG_PPC32
2234 /*
2235 * For really old powermacs, we need to map things we claim.
2236 * For that, we need the ihandle of the mmu.
2237 * Also, on the longtrail, we need to work around other bugs.
2238 */
prom_find_mmu(void)2239 static void __init prom_find_mmu(void)
2240 {
2241 phandle oprom;
2242 char version[64];
2243
2244 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2245 if (!PHANDLE_VALID(oprom))
2246 return;
2247 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2248 return;
2249 version[sizeof(version) - 1] = 0;
2250 /* XXX might need to add other versions here */
2251 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
2252 of_workarounds = OF_WA_CLAIM;
2253 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
2254 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2255 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2256 } else
2257 return;
2258 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2259 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2260 sizeof(prom.mmumap));
2261 prom.mmumap = be32_to_cpu(prom.mmumap);
2262 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2263 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
2264 }
2265 #else
2266 #define prom_find_mmu()
2267 #endif
2268
prom_init_stdout(void)2269 static void __init prom_init_stdout(void)
2270 {
2271 char *path = of_stdout_device;
2272 char type[16];
2273 phandle stdout_node;
2274 __be32 val;
2275
2276 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2277 prom_panic("cannot find stdout");
2278
2279 prom.stdout = be32_to_cpu(val);
2280
2281 /* Get the full OF pathname of the stdout device */
2282 memset(path, 0, 256);
2283 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2284 prom_printf("OF stdout device is: %s\n", of_stdout_device);
2285 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2286 path, prom_strlen(path) + 1);
2287
2288 /* instance-to-package fails on PA-Semi */
2289 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2290 if (stdout_node != PROM_ERROR) {
2291 val = cpu_to_be32(stdout_node);
2292
2293 /* If it's a display, note it */
2294 memset(type, 0, sizeof(type));
2295 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2296 if (prom_strcmp(type, "display") == 0)
2297 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2298 }
2299 }
2300
prom_find_machine_type(void)2301 static int __init prom_find_machine_type(void)
2302 {
2303 char compat[256];
2304 int len, i = 0;
2305 #ifdef CONFIG_PPC64
2306 phandle rtas;
2307 int x;
2308 #endif
2309
2310 /* Look for a PowerMac or a Cell */
2311 len = prom_getprop(prom.root, "compatible",
2312 compat, sizeof(compat)-1);
2313 if (len > 0) {
2314 compat[len] = 0;
2315 while (i < len) {
2316 char *p = &compat[i];
2317 int sl = prom_strlen(p);
2318 if (sl == 0)
2319 break;
2320 if (prom_strstr(p, "Power Macintosh") ||
2321 prom_strstr(p, "MacRISC"))
2322 return PLATFORM_POWERMAC;
2323 #ifdef CONFIG_PPC64
2324 /* We must make sure we don't detect the IBM Cell
2325 * blades as pSeries due to some firmware issues,
2326 * so we do it here.
2327 */
2328 if (prom_strstr(p, "IBM,CBEA") ||
2329 prom_strstr(p, "IBM,CPBW-1.0"))
2330 return PLATFORM_GENERIC;
2331 #endif /* CONFIG_PPC64 */
2332 i += sl + 1;
2333 }
2334 }
2335 #ifdef CONFIG_PPC64
2336 /* Try to figure out if it's an IBM pSeries or any other
2337 * PAPR compliant platform. We assume it is if :
2338 * - /device_type is "chrp" (please, do NOT use that for future
2339 * non-IBM designs !
2340 * - it has /rtas
2341 */
2342 len = prom_getprop(prom.root, "device_type",
2343 compat, sizeof(compat)-1);
2344 if (len <= 0)
2345 return PLATFORM_GENERIC;
2346 if (prom_strcmp(compat, "chrp"))
2347 return PLATFORM_GENERIC;
2348
2349 /* Default to pSeries. We need to know if we are running LPAR */
2350 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2351 if (!PHANDLE_VALID(rtas))
2352 return PLATFORM_GENERIC;
2353 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2354 if (x != PROM_ERROR) {
2355 prom_debug("Hypertas detected, assuming LPAR !\n");
2356 return PLATFORM_PSERIES_LPAR;
2357 }
2358 return PLATFORM_PSERIES;
2359 #else
2360 return PLATFORM_GENERIC;
2361 #endif
2362 }
2363
prom_set_color(ihandle ih,int i,int r,int g,int b)2364 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2365 {
2366 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2367 }
2368
2369 /*
2370 * If we have a display that we don't know how to drive,
2371 * we will want to try to execute OF's open method for it
2372 * later. However, OF will probably fall over if we do that
2373 * we've taken over the MMU.
2374 * So we check whether we will need to open the display,
2375 * and if so, open it now.
2376 */
prom_check_displays(void)2377 static void __init prom_check_displays(void)
2378 {
2379 char type[16], *path;
2380 phandle node;
2381 ihandle ih;
2382 int i;
2383
2384 static const unsigned char default_colors[] __initconst = {
2385 0x00, 0x00, 0x00,
2386 0x00, 0x00, 0xaa,
2387 0x00, 0xaa, 0x00,
2388 0x00, 0xaa, 0xaa,
2389 0xaa, 0x00, 0x00,
2390 0xaa, 0x00, 0xaa,
2391 0xaa, 0xaa, 0x00,
2392 0xaa, 0xaa, 0xaa,
2393 0x55, 0x55, 0x55,
2394 0x55, 0x55, 0xff,
2395 0x55, 0xff, 0x55,
2396 0x55, 0xff, 0xff,
2397 0xff, 0x55, 0x55,
2398 0xff, 0x55, 0xff,
2399 0xff, 0xff, 0x55,
2400 0xff, 0xff, 0xff
2401 };
2402 const unsigned char *clut;
2403
2404 prom_debug("Looking for displays\n");
2405 for (node = 0; prom_next_node(&node); ) {
2406 memset(type, 0, sizeof(type));
2407 prom_getprop(node, "device_type", type, sizeof(type));
2408 if (prom_strcmp(type, "display") != 0)
2409 continue;
2410
2411 /* It seems OF doesn't null-terminate the path :-( */
2412 path = prom_scratch;
2413 memset(path, 0, sizeof(prom_scratch));
2414
2415 /*
2416 * leave some room at the end of the path for appending extra
2417 * arguments
2418 */
2419 if (call_prom("package-to-path", 3, 1, node, path,
2420 sizeof(prom_scratch) - 10) == PROM_ERROR)
2421 continue;
2422 prom_printf("found display : %s, opening... ", path);
2423
2424 ih = call_prom("open", 1, 1, path);
2425 if (ih == 0) {
2426 prom_printf("failed\n");
2427 continue;
2428 }
2429
2430 /* Success */
2431 prom_printf("done\n");
2432 prom_setprop(node, path, "linux,opened", NULL, 0);
2433
2434 /* Setup a usable color table when the appropriate
2435 * method is available. Should update this to set-colors */
2436 clut = default_colors;
2437 for (i = 0; i < 16; i++, clut += 3)
2438 if (prom_set_color(ih, i, clut[0], clut[1],
2439 clut[2]) != 0)
2440 break;
2441
2442 #ifdef CONFIG_LOGO_LINUX_CLUT224
2443 clut = PTRRELOC(logo_linux_clut224.clut);
2444 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2445 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2446 clut[2]) != 0)
2447 break;
2448 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2449
2450 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2451 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2452 PROM_ERROR) {
2453 u32 width, height, pitch, addr;
2454
2455 prom_printf("Setting btext !\n");
2456
2457 if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
2458 return;
2459
2460 if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
2461 return;
2462
2463 if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
2464 return;
2465
2466 if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
2467 return;
2468
2469 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2470 width, height, pitch, addr);
2471 btext_setup_display(width, height, 8, pitch, addr);
2472 btext_prepare_BAT();
2473 }
2474 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2475 }
2476 }
2477
2478
2479 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
make_room(unsigned long * mem_start,unsigned long * mem_end,unsigned long needed,unsigned long align)2480 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2481 unsigned long needed, unsigned long align)
2482 {
2483 void *ret;
2484
2485 *mem_start = ALIGN(*mem_start, align);
2486 while ((*mem_start + needed) > *mem_end) {
2487 unsigned long room, chunk;
2488
2489 prom_debug("Chunk exhausted, claiming more at %lx...\n",
2490 alloc_bottom);
2491 room = alloc_top - alloc_bottom;
2492 if (room > DEVTREE_CHUNK_SIZE)
2493 room = DEVTREE_CHUNK_SIZE;
2494 if (room < PAGE_SIZE)
2495 prom_panic("No memory for flatten_device_tree "
2496 "(no room)\n");
2497 chunk = alloc_up(room, 0);
2498 if (chunk == 0)
2499 prom_panic("No memory for flatten_device_tree "
2500 "(claim failed)\n");
2501 *mem_end = chunk + room;
2502 }
2503
2504 ret = (void *)*mem_start;
2505 *mem_start += needed;
2506
2507 return ret;
2508 }
2509
2510 #define dt_push_token(token, mem_start, mem_end) do { \
2511 void *room = make_room(mem_start, mem_end, 4, 4); \
2512 *(__be32 *)room = cpu_to_be32(token); \
2513 } while(0)
2514
dt_find_string(char * str)2515 static unsigned long __init dt_find_string(char *str)
2516 {
2517 char *s, *os;
2518
2519 s = os = (char *)dt_string_start;
2520 s += 4;
2521 while (s < (char *)dt_string_end) {
2522 if (prom_strcmp(s, str) == 0)
2523 return s - os;
2524 s += prom_strlen(s) + 1;
2525 }
2526 return 0;
2527 }
2528
2529 /*
2530 * The Open Firmware 1275 specification states properties must be 31 bytes or
2531 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2532 */
2533 #define MAX_PROPERTY_NAME 64
2534
scan_dt_build_strings(phandle node,unsigned long * mem_start,unsigned long * mem_end)2535 static void __init scan_dt_build_strings(phandle node,
2536 unsigned long *mem_start,
2537 unsigned long *mem_end)
2538 {
2539 char *prev_name, *namep, *sstart;
2540 unsigned long soff;
2541 phandle child;
2542
2543 sstart = (char *)dt_string_start;
2544
2545 /* get and store all property names */
2546 prev_name = "";
2547 for (;;) {
2548 /* 64 is max len of name including nul. */
2549 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2550 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2551 /* No more nodes: unwind alloc */
2552 *mem_start = (unsigned long)namep;
2553 break;
2554 }
2555
2556 /* skip "name" */
2557 if (prom_strcmp(namep, "name") == 0) {
2558 *mem_start = (unsigned long)namep;
2559 prev_name = "name";
2560 continue;
2561 }
2562 /* get/create string entry */
2563 soff = dt_find_string(namep);
2564 if (soff != 0) {
2565 *mem_start = (unsigned long)namep;
2566 namep = sstart + soff;
2567 } else {
2568 /* Trim off some if we can */
2569 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2570 dt_string_end = *mem_start;
2571 }
2572 prev_name = namep;
2573 }
2574
2575 /* do all our children */
2576 child = call_prom("child", 1, 1, node);
2577 while (child != 0) {
2578 scan_dt_build_strings(child, mem_start, mem_end);
2579 child = call_prom("peer", 1, 1, child);
2580 }
2581 }
2582
scan_dt_build_struct(phandle node,unsigned long * mem_start,unsigned long * mem_end)2583 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2584 unsigned long *mem_end)
2585 {
2586 phandle child;
2587 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2588 unsigned long soff;
2589 unsigned char *valp;
2590 static char pname[MAX_PROPERTY_NAME] __prombss;
2591 int l, room, has_phandle = 0;
2592
2593 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2594
2595 /* get the node's full name */
2596 namep = (char *)*mem_start;
2597 room = *mem_end - *mem_start;
2598 if (room > 255)
2599 room = 255;
2600 l = call_prom("package-to-path", 3, 1, node, namep, room);
2601 if (l >= 0) {
2602 /* Didn't fit? Get more room. */
2603 if (l >= room) {
2604 if (l >= *mem_end - *mem_start)
2605 namep = make_room(mem_start, mem_end, l+1, 1);
2606 call_prom("package-to-path", 3, 1, node, namep, l);
2607 }
2608 namep[l] = '\0';
2609
2610 /* Fixup an Apple bug where they have bogus \0 chars in the
2611 * middle of the path in some properties, and extract
2612 * the unit name (everything after the last '/').
2613 */
2614 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2615 if (*p == '/')
2616 lp = namep;
2617 else if (*p != 0)
2618 *lp++ = *p;
2619 }
2620 *lp = 0;
2621 *mem_start = ALIGN((unsigned long)lp + 1, 4);
2622 }
2623
2624 /* get it again for debugging */
2625 path = prom_scratch;
2626 memset(path, 0, sizeof(prom_scratch));
2627 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
2628
2629 /* get and store all properties */
2630 prev_name = "";
2631 sstart = (char *)dt_string_start;
2632 for (;;) {
2633 if (call_prom("nextprop", 3, 1, node, prev_name,
2634 pname) != 1)
2635 break;
2636
2637 /* skip "name" */
2638 if (prom_strcmp(pname, "name") == 0) {
2639 prev_name = "name";
2640 continue;
2641 }
2642
2643 /* find string offset */
2644 soff = dt_find_string(pname);
2645 if (soff == 0) {
2646 prom_printf("WARNING: Can't find string index for"
2647 " <%s>, node %s\n", pname, path);
2648 break;
2649 }
2650 prev_name = sstart + soff;
2651
2652 /* get length */
2653 l = call_prom("getproplen", 2, 1, node, pname);
2654
2655 /* sanity checks */
2656 if (l == PROM_ERROR)
2657 continue;
2658
2659 /* push property head */
2660 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2661 dt_push_token(l, mem_start, mem_end);
2662 dt_push_token(soff, mem_start, mem_end);
2663
2664 /* push property content */
2665 valp = make_room(mem_start, mem_end, l, 4);
2666 call_prom("getprop", 4, 1, node, pname, valp, l);
2667 *mem_start = ALIGN(*mem_start, 4);
2668
2669 if (!prom_strcmp(pname, "phandle"))
2670 has_phandle = 1;
2671 }
2672
2673 /* Add a "phandle" property if none already exist */
2674 if (!has_phandle) {
2675 soff = dt_find_string("phandle");
2676 if (soff == 0)
2677 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
2678 else {
2679 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2680 dt_push_token(4, mem_start, mem_end);
2681 dt_push_token(soff, mem_start, mem_end);
2682 valp = make_room(mem_start, mem_end, 4, 4);
2683 *(__be32 *)valp = cpu_to_be32(node);
2684 }
2685 }
2686
2687 /* do all our children */
2688 child = call_prom("child", 1, 1, node);
2689 while (child != 0) {
2690 scan_dt_build_struct(child, mem_start, mem_end);
2691 child = call_prom("peer", 1, 1, child);
2692 }
2693
2694 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2695 }
2696
flatten_device_tree(void)2697 static void __init flatten_device_tree(void)
2698 {
2699 phandle root;
2700 unsigned long mem_start, mem_end, room;
2701 struct boot_param_header *hdr;
2702 char *namep;
2703 u64 *rsvmap;
2704
2705 /*
2706 * Check how much room we have between alloc top & bottom (+/- a
2707 * few pages), crop to 1MB, as this is our "chunk" size
2708 */
2709 room = alloc_top - alloc_bottom - 0x4000;
2710 if (room > DEVTREE_CHUNK_SIZE)
2711 room = DEVTREE_CHUNK_SIZE;
2712 prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2713
2714 /* Now try to claim that */
2715 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2716 if (mem_start == 0)
2717 prom_panic("Can't allocate initial device-tree chunk\n");
2718 mem_end = mem_start + room;
2719
2720 /* Get root of tree */
2721 root = call_prom("peer", 1, 1, (phandle)0);
2722 if (root == (phandle)0)
2723 prom_panic ("couldn't get device tree root\n");
2724
2725 /* Build header and make room for mem rsv map */
2726 mem_start = ALIGN(mem_start, 4);
2727 hdr = make_room(&mem_start, &mem_end,
2728 sizeof(struct boot_param_header), 4);
2729 dt_header_start = (unsigned long)hdr;
2730 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2731
2732 /* Start of strings */
2733 mem_start = PAGE_ALIGN(mem_start);
2734 dt_string_start = mem_start;
2735 mem_start += 4; /* hole */
2736
2737 /* Add "phandle" in there, we'll need it */
2738 namep = make_room(&mem_start, &mem_end, 16, 1);
2739 prom_strscpy_pad(namep, "phandle", sizeof("phandle"));
2740 mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2741
2742 /* Build string array */
2743 prom_printf("Building dt strings...\n");
2744 scan_dt_build_strings(root, &mem_start, &mem_end);
2745 dt_string_end = mem_start;
2746
2747 /* Build structure */
2748 mem_start = PAGE_ALIGN(mem_start);
2749 dt_struct_start = mem_start;
2750 prom_printf("Building dt structure...\n");
2751 scan_dt_build_struct(root, &mem_start, &mem_end);
2752 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2753 dt_struct_end = PAGE_ALIGN(mem_start);
2754
2755 /* Finish header */
2756 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2757 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2758 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2759 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2760 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2761 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2762 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2763 hdr->version = cpu_to_be32(OF_DT_VERSION);
2764 /* Version 16 is not backward compatible */
2765 hdr->last_comp_version = cpu_to_be32(0x10);
2766
2767 /* Copy the reserve map in */
2768 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2769
2770 #ifdef DEBUG_PROM
2771 {
2772 int i;
2773 prom_printf("reserved memory map:\n");
2774 for (i = 0; i < mem_reserve_cnt; i++)
2775 prom_printf(" %llx - %llx\n",
2776 be64_to_cpu(mem_reserve_map[i].base),
2777 be64_to_cpu(mem_reserve_map[i].size));
2778 }
2779 #endif
2780 /* Bump mem_reserve_cnt to cause further reservations to fail
2781 * since it's too late.
2782 */
2783 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2784
2785 prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2786 dt_string_start, dt_string_end);
2787 prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
2788 dt_struct_start, dt_struct_end);
2789 }
2790
2791 #ifdef CONFIG_PPC_MAPLE
2792 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2793 * The values are bad, and it doesn't even have the right number of cells. */
fixup_device_tree_maple(void)2794 static void __init fixup_device_tree_maple(void)
2795 {
2796 phandle isa;
2797 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2798 u32 isa_ranges[6];
2799 char *name;
2800
2801 name = "/ht@0/isa@4";
2802 isa = call_prom("finddevice", 1, 1, ADDR(name));
2803 if (!PHANDLE_VALID(isa)) {
2804 name = "/ht@0/isa@6";
2805 isa = call_prom("finddevice", 1, 1, ADDR(name));
2806 rloc = 0x01003000; /* IO space; PCI device = 6 */
2807 }
2808 if (!PHANDLE_VALID(isa))
2809 return;
2810
2811 if (prom_getproplen(isa, "ranges") != 12)
2812 return;
2813 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2814 == PROM_ERROR)
2815 return;
2816
2817 if (isa_ranges[0] != 0x1 ||
2818 isa_ranges[1] != 0xf4000000 ||
2819 isa_ranges[2] != 0x00010000)
2820 return;
2821
2822 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2823
2824 isa_ranges[0] = 0x1;
2825 isa_ranges[1] = 0x0;
2826 isa_ranges[2] = rloc;
2827 isa_ranges[3] = 0x0;
2828 isa_ranges[4] = 0x0;
2829 isa_ranges[5] = 0x00010000;
2830 prom_setprop(isa, name, "ranges",
2831 isa_ranges, sizeof(isa_ranges));
2832 }
2833
2834 #define CPC925_MC_START 0xf8000000
2835 #define CPC925_MC_LENGTH 0x1000000
2836 /* The values for memory-controller don't have right number of cells */
fixup_device_tree_maple_memory_controller(void)2837 static void __init fixup_device_tree_maple_memory_controller(void)
2838 {
2839 phandle mc;
2840 u32 mc_reg[4];
2841 char *name = "/hostbridge@f8000000";
2842 u32 ac, sc;
2843
2844 mc = call_prom("finddevice", 1, 1, ADDR(name));
2845 if (!PHANDLE_VALID(mc))
2846 return;
2847
2848 if (prom_getproplen(mc, "reg") != 8)
2849 return;
2850
2851 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2852 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2853 if ((ac != 2) || (sc != 2))
2854 return;
2855
2856 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2857 return;
2858
2859 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2860 return;
2861
2862 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2863
2864 mc_reg[0] = 0x0;
2865 mc_reg[1] = CPC925_MC_START;
2866 mc_reg[2] = 0x0;
2867 mc_reg[3] = CPC925_MC_LENGTH;
2868 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2869 }
2870 #else
2871 #define fixup_device_tree_maple()
2872 #define fixup_device_tree_maple_memory_controller()
2873 #endif
2874
2875 #ifdef CONFIG_PPC_CHRP
2876 /*
2877 * Pegasos and BriQ lacks the "ranges" property in the isa node
2878 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2879 * Pegasos has the IDE configured in legacy mode, but advertised as native
2880 */
fixup_device_tree_chrp(void)2881 static void __init fixup_device_tree_chrp(void)
2882 {
2883 phandle ph;
2884 u32 prop[6];
2885 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2886 char *name;
2887 int rc;
2888
2889 name = "/pci@80000000/isa@c";
2890 ph = call_prom("finddevice", 1, 1, ADDR(name));
2891 if (!PHANDLE_VALID(ph)) {
2892 name = "/pci@ff500000/isa@6";
2893 ph = call_prom("finddevice", 1, 1, ADDR(name));
2894 rloc = 0x01003000; /* IO space; PCI device = 6 */
2895 }
2896 if (PHANDLE_VALID(ph)) {
2897 rc = prom_getproplen(ph, "ranges");
2898 if (rc == 0 || rc == PROM_ERROR) {
2899 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2900
2901 prop[0] = 0x1;
2902 prop[1] = 0x0;
2903 prop[2] = rloc;
2904 prop[3] = 0x0;
2905 prop[4] = 0x0;
2906 prop[5] = 0x00010000;
2907 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2908 }
2909 }
2910
2911 name = "/pci@80000000/ide@C,1";
2912 ph = call_prom("finddevice", 1, 1, ADDR(name));
2913 if (PHANDLE_VALID(ph)) {
2914 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2915 prop[0] = 14;
2916 prop[1] = 0x0;
2917 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2918 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2919 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2920 if (rc == sizeof(u32)) {
2921 prop[0] &= ~0x5;
2922 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2923 }
2924 }
2925 }
2926 #else
2927 #define fixup_device_tree_chrp()
2928 #endif
2929
2930 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
fixup_device_tree_pmac(void)2931 static void __init fixup_device_tree_pmac(void)
2932 {
2933 phandle u3, i2c, mpic;
2934 u32 u3_rev;
2935 u32 interrupts[2];
2936 u32 parent;
2937
2938 /* Some G5s have a missing interrupt definition, fix it up here */
2939 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2940 if (!PHANDLE_VALID(u3))
2941 return;
2942 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2943 if (!PHANDLE_VALID(i2c))
2944 return;
2945 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2946 if (!PHANDLE_VALID(mpic))
2947 return;
2948
2949 /* check if proper rev of u3 */
2950 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2951 == PROM_ERROR)
2952 return;
2953 if (u3_rev < 0x35 || u3_rev > 0x39)
2954 return;
2955 /* does it need fixup ? */
2956 if (prom_getproplen(i2c, "interrupts") > 0)
2957 return;
2958
2959 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2960
2961 /* interrupt on this revision of u3 is number 0 and level */
2962 interrupts[0] = 0;
2963 interrupts[1] = 1;
2964 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2965 &interrupts, sizeof(interrupts));
2966 parent = (u32)mpic;
2967 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2968 &parent, sizeof(parent));
2969 }
2970 #else
2971 #define fixup_device_tree_pmac()
2972 #endif
2973
2974 #ifdef CONFIG_PPC_EFIKA
2975 /*
2976 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2977 * to talk to the phy. If the phy-handle property is missing, then this
2978 * function is called to add the appropriate nodes and link it to the
2979 * ethernet node.
2980 */
fixup_device_tree_efika_add_phy(void)2981 static void __init fixup_device_tree_efika_add_phy(void)
2982 {
2983 u32 node;
2984 char prop[64];
2985 int rv;
2986
2987 /* Check if /builtin/ethernet exists - bail if it doesn't */
2988 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2989 if (!PHANDLE_VALID(node))
2990 return;
2991
2992 /* Check if the phy-handle property exists - bail if it does */
2993 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2994 if (rv <= 0)
2995 return;
2996
2997 /*
2998 * At this point the ethernet device doesn't have a phy described.
2999 * Now we need to add the missing phy node and linkage
3000 */
3001
3002 /* Check for an MDIO bus node - if missing then create one */
3003 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
3004 if (!PHANDLE_VALID(node)) {
3005 prom_printf("Adding Ethernet MDIO node\n");
3006 call_prom("interpret", 1, 1,
3007 " s\" /builtin\" find-device"
3008 " new-device"
3009 " 1 encode-int s\" #address-cells\" property"
3010 " 0 encode-int s\" #size-cells\" property"
3011 " s\" mdio\" device-name"
3012 " s\" fsl,mpc5200b-mdio\" encode-string"
3013 " s\" compatible\" property"
3014 " 0xf0003000 0x400 reg"
3015 " 0x2 encode-int"
3016 " 0x5 encode-int encode+"
3017 " 0x3 encode-int encode+"
3018 " s\" interrupts\" property"
3019 " finish-device");
3020 }
3021
3022 /* Check for a PHY device node - if missing then create one and
3023 * give it's phandle to the ethernet node */
3024 node = call_prom("finddevice", 1, 1,
3025 ADDR("/builtin/mdio/ethernet-phy"));
3026 if (!PHANDLE_VALID(node)) {
3027 prom_printf("Adding Ethernet PHY node\n");
3028 call_prom("interpret", 1, 1,
3029 " s\" /builtin/mdio\" find-device"
3030 " new-device"
3031 " s\" ethernet-phy\" device-name"
3032 " 0x10 encode-int s\" reg\" property"
3033 " my-self"
3034 " ihandle>phandle"
3035 " finish-device"
3036 " s\" /builtin/ethernet\" find-device"
3037 " encode-int"
3038 " s\" phy-handle\" property"
3039 " device-end");
3040 }
3041 }
3042
fixup_device_tree_efika(void)3043 static void __init fixup_device_tree_efika(void)
3044 {
3045 int sound_irq[3] = { 2, 2, 0 };
3046 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
3047 3,4,0, 3,5,0, 3,6,0, 3,7,0,
3048 3,8,0, 3,9,0, 3,10,0, 3,11,0,
3049 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
3050 u32 node;
3051 char prop[64];
3052 int rv, len;
3053
3054 /* Check if we're really running on a EFIKA */
3055 node = call_prom("finddevice", 1, 1, ADDR("/"));
3056 if (!PHANDLE_VALID(node))
3057 return;
3058
3059 rv = prom_getprop(node, "model", prop, sizeof(prop));
3060 if (rv == PROM_ERROR)
3061 return;
3062 if (prom_strcmp(prop, "EFIKA5K2"))
3063 return;
3064
3065 prom_printf("Applying EFIKA device tree fixups\n");
3066
3067 /* Claiming to be 'chrp' is death */
3068 node = call_prom("finddevice", 1, 1, ADDR("/"));
3069 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
3070 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
3071 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
3072
3073 /* CODEGEN,description is exposed in /proc/cpuinfo so
3074 fix that too */
3075 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
3076 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
3077 prom_setprop(node, "/", "CODEGEN,description",
3078 "Efika 5200B PowerPC System",
3079 sizeof("Efika 5200B PowerPC System"));
3080
3081 /* Fixup bestcomm interrupts property */
3082 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
3083 if (PHANDLE_VALID(node)) {
3084 len = prom_getproplen(node, "interrupts");
3085 if (len == 12) {
3086 prom_printf("Fixing bestcomm interrupts property\n");
3087 prom_setprop(node, "/builtin/bestcom", "interrupts",
3088 bcomm_irq, sizeof(bcomm_irq));
3089 }
3090 }
3091
3092 /* Fixup sound interrupts property */
3093 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
3094 if (PHANDLE_VALID(node)) {
3095 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
3096 if (rv == PROM_ERROR) {
3097 prom_printf("Adding sound interrupts property\n");
3098 prom_setprop(node, "/builtin/sound", "interrupts",
3099 sound_irq, sizeof(sound_irq));
3100 }
3101 }
3102
3103 /* Make sure ethernet phy-handle property exists */
3104 fixup_device_tree_efika_add_phy();
3105 }
3106 #else
3107 #define fixup_device_tree_efika()
3108 #endif
3109
3110 #ifdef CONFIG_PPC_PASEMI_NEMO
3111 /*
3112 * CFE supplied on Nemo is broken in several ways, biggest
3113 * problem is that it reassigns ISA interrupts to unused mpic ints.
3114 * Add an interrupt-controller property for the io-bridge to use
3115 * and correct the ints so we can attach them to an irq_domain
3116 */
fixup_device_tree_pasemi(void)3117 static void __init fixup_device_tree_pasemi(void)
3118 {
3119 u32 interrupts[2], parent, rval, val = 0;
3120 char *name, *pci_name;
3121 phandle iob, node;
3122
3123 /* Find the root pci node */
3124 name = "/pxp@0,e0000000";
3125 iob = call_prom("finddevice", 1, 1, ADDR(name));
3126 if (!PHANDLE_VALID(iob))
3127 return;
3128
3129 /* check if interrupt-controller node set yet */
3130 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
3131 return;
3132
3133 prom_printf("adding interrupt-controller property for SB600...\n");
3134
3135 prom_setprop(iob, name, "interrupt-controller", &val, 0);
3136
3137 pci_name = "/pxp@0,e0000000/pci@11";
3138 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
3139 parent = ADDR(iob);
3140
3141 for( ; prom_next_node(&node); ) {
3142 /* scan each node for one with an interrupt */
3143 if (!PHANDLE_VALID(node))
3144 continue;
3145
3146 rval = prom_getproplen(node, "interrupts");
3147 if (rval == 0 || rval == PROM_ERROR)
3148 continue;
3149
3150 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
3151 if ((interrupts[0] < 212) || (interrupts[0] > 222))
3152 continue;
3153
3154 /* found a node, update both interrupts and interrupt-parent */
3155 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
3156 interrupts[0] -= 203;
3157 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
3158 interrupts[0] -= 213;
3159 if (interrupts[0] == 221)
3160 interrupts[0] = 14;
3161 if (interrupts[0] == 222)
3162 interrupts[0] = 8;
3163
3164 prom_setprop(node, pci_name, "interrupts", interrupts,
3165 sizeof(interrupts));
3166 prom_setprop(node, pci_name, "interrupt-parent", &parent,
3167 sizeof(parent));
3168 }
3169
3170 /*
3171 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
3172 * so that generic isa-bridge code can add the SB600 and its on-board
3173 * peripherals.
3174 */
3175 name = "/pxp@0,e0000000/io-bridge@0";
3176 iob = call_prom("finddevice", 1, 1, ADDR(name));
3177 if (!PHANDLE_VALID(iob))
3178 return;
3179
3180 /* device_type is already set, just change it. */
3181
3182 prom_printf("Changing device_type of SB600 node...\n");
3183
3184 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
3185 }
3186 #else /* !CONFIG_PPC_PASEMI_NEMO */
fixup_device_tree_pasemi(void)3187 static inline void fixup_device_tree_pasemi(void) { }
3188 #endif
3189
fixup_device_tree(void)3190 static void __init fixup_device_tree(void)
3191 {
3192 fixup_device_tree_maple();
3193 fixup_device_tree_maple_memory_controller();
3194 fixup_device_tree_chrp();
3195 fixup_device_tree_pmac();
3196 fixup_device_tree_efika();
3197 fixup_device_tree_pasemi();
3198 }
3199
prom_find_boot_cpu(void)3200 static void __init prom_find_boot_cpu(void)
3201 {
3202 __be32 rval;
3203 ihandle prom_cpu;
3204 phandle cpu_pkg;
3205
3206 rval = 0;
3207 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
3208 return;
3209 prom_cpu = be32_to_cpu(rval);
3210
3211 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
3212
3213 if (!PHANDLE_VALID(cpu_pkg))
3214 return;
3215
3216 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3217 prom.cpu = be32_to_cpu(rval);
3218
3219 prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3220 }
3221
prom_check_initrd(unsigned long r3,unsigned long r4)3222 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3223 {
3224 #ifdef CONFIG_BLK_DEV_INITRD
3225 if (r3 && r4 && r4 != 0xdeadbeef) {
3226 __be64 val;
3227
3228 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3229 prom_initrd_end = prom_initrd_start + r4;
3230
3231 val = cpu_to_be64(prom_initrd_start);
3232 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3233 &val, sizeof(val));
3234 val = cpu_to_be64(prom_initrd_end);
3235 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3236 &val, sizeof(val));
3237
3238 reserve_mem(prom_initrd_start,
3239 prom_initrd_end - prom_initrd_start);
3240
3241 prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3242 prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3243 }
3244 #endif /* CONFIG_BLK_DEV_INITRD */
3245 }
3246
3247 #ifdef CONFIG_PPC_SVM
3248 /*
3249 * Perform the Enter Secure Mode ultracall.
3250 */
enter_secure_mode(unsigned long kbase,unsigned long fdt)3251 static int enter_secure_mode(unsigned long kbase, unsigned long fdt)
3252 {
3253 register unsigned long r3 asm("r3") = UV_ESM;
3254 register unsigned long r4 asm("r4") = kbase;
3255 register unsigned long r5 asm("r5") = fdt;
3256
3257 asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5));
3258
3259 return r3;
3260 }
3261
3262 /*
3263 * Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
3264 */
setup_secure_guest(unsigned long kbase,unsigned long fdt)3265 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3266 {
3267 int ret;
3268
3269 if (!prom_svm_enable)
3270 return;
3271
3272 /* Switch to secure mode. */
3273 prom_printf("Switching to secure mode.\n");
3274
3275 /*
3276 * The ultravisor will do an integrity check of the kernel image but we
3277 * relocated it so the check will fail. Restore the original image by
3278 * relocating it back to the kernel virtual base address.
3279 */
3280 relocate(KERNELBASE);
3281
3282 ret = enter_secure_mode(kbase, fdt);
3283
3284 /* Relocate the kernel again. */
3285 relocate(kbase);
3286
3287 if (ret != U_SUCCESS) {
3288 prom_printf("Returned %d from switching to secure mode.\n", ret);
3289 prom_rtas_os_term("Switch to secure mode failed.\n");
3290 }
3291 }
3292 #else
setup_secure_guest(unsigned long kbase,unsigned long fdt)3293 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3294 {
3295 }
3296 #endif /* CONFIG_PPC_SVM */
3297
3298 /*
3299 * We enter here early on, when the Open Firmware prom is still
3300 * handling exceptions and the MMU hash table for us.
3301 */
3302
prom_init(unsigned long r3,unsigned long r4,unsigned long pp,unsigned long r6,unsigned long r7,unsigned long kbase)3303 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3304 unsigned long pp,
3305 unsigned long r6, unsigned long r7,
3306 unsigned long kbase)
3307 {
3308 unsigned long hdr;
3309
3310 #ifdef CONFIG_PPC32
3311 unsigned long offset = reloc_offset();
3312 reloc_got2(offset);
3313 #endif
3314
3315 /*
3316 * First zero the BSS
3317 */
3318 memset(&__bss_start, 0, __bss_stop - __bss_start);
3319
3320 /*
3321 * Init interface to Open Firmware, get some node references,
3322 * like /chosen
3323 */
3324 prom_init_client_services(pp);
3325
3326 /*
3327 * See if this OF is old enough that we need to do explicit maps
3328 * and other workarounds
3329 */
3330 prom_find_mmu();
3331
3332 /*
3333 * Init prom stdout device
3334 */
3335 prom_init_stdout();
3336
3337 prom_printf("Preparing to boot %s", linux_banner);
3338
3339 /*
3340 * Get default machine type. At this point, we do not differentiate
3341 * between pSeries SMP and pSeries LPAR
3342 */
3343 of_platform = prom_find_machine_type();
3344 prom_printf("Detected machine type: %x\n", of_platform);
3345
3346 #ifndef CONFIG_NONSTATIC_KERNEL
3347 /* Bail if this is a kdump kernel. */
3348 if (PHYSICAL_START > 0)
3349 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3350 #endif
3351
3352 /*
3353 * Check for an initrd
3354 */
3355 prom_check_initrd(r3, r4);
3356
3357 /*
3358 * Do early parsing of command line
3359 */
3360 early_cmdline_parse();
3361
3362 #ifdef CONFIG_PPC_PSERIES
3363 /*
3364 * On pSeries, inform the firmware about our capabilities
3365 */
3366 if (of_platform == PLATFORM_PSERIES ||
3367 of_platform == PLATFORM_PSERIES_LPAR)
3368 prom_send_capabilities();
3369 #endif
3370
3371 /*
3372 * Copy the CPU hold code
3373 */
3374 if (of_platform != PLATFORM_POWERMAC)
3375 copy_and_flush(0, kbase, 0x100, 0);
3376
3377 /*
3378 * Initialize memory management within prom_init
3379 */
3380 prom_init_mem();
3381
3382 /*
3383 * Determine which cpu is actually running right _now_
3384 */
3385 prom_find_boot_cpu();
3386
3387 /*
3388 * Initialize display devices
3389 */
3390 prom_check_displays();
3391
3392 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3393 /*
3394 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3395 * that uses the allocator, we need to make sure we get the top of memory
3396 * available for us here...
3397 */
3398 if (of_platform == PLATFORM_PSERIES)
3399 prom_initialize_tce_table();
3400 #endif
3401
3402 /*
3403 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3404 * have a usable RTAS implementation.
3405 */
3406 if (of_platform != PLATFORM_POWERMAC)
3407 prom_instantiate_rtas();
3408
3409 #ifdef CONFIG_PPC64
3410 /* instantiate sml */
3411 prom_instantiate_sml();
3412 #endif
3413
3414 /*
3415 * On non-powermacs, put all CPUs in spin-loops.
3416 *
3417 * PowerMacs use a different mechanism to spin CPUs
3418 *
3419 * (This must be done after instanciating RTAS)
3420 */
3421 if (of_platform != PLATFORM_POWERMAC)
3422 prom_hold_cpus();
3423
3424 /*
3425 * Fill in some infos for use by the kernel later on
3426 */
3427 if (prom_memory_limit) {
3428 __be64 val = cpu_to_be64(prom_memory_limit);
3429 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3430 &val, sizeof(val));
3431 }
3432 #ifdef CONFIG_PPC64
3433 if (prom_iommu_off)
3434 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3435 NULL, 0);
3436
3437 if (prom_iommu_force_on)
3438 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3439 NULL, 0);
3440
3441 if (prom_tce_alloc_start) {
3442 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3443 &prom_tce_alloc_start,
3444 sizeof(prom_tce_alloc_start));
3445 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3446 &prom_tce_alloc_end,
3447 sizeof(prom_tce_alloc_end));
3448 }
3449 #endif
3450
3451 /*
3452 * Fixup any known bugs in the device-tree
3453 */
3454 fixup_device_tree();
3455
3456 /*
3457 * Now finally create the flattened device-tree
3458 */
3459 prom_printf("copying OF device tree...\n");
3460 flatten_device_tree();
3461
3462 /*
3463 * in case stdin is USB and still active on IBM machines...
3464 * Unfortunately quiesce crashes on some powermacs if we have
3465 * closed stdin already (in particular the powerbook 101).
3466 */
3467 if (of_platform != PLATFORM_POWERMAC)
3468 prom_close_stdin();
3469
3470 /*
3471 * Call OF "quiesce" method to shut down pending DMA's from
3472 * devices etc...
3473 */
3474 prom_printf("Quiescing Open Firmware ...\n");
3475 call_prom("quiesce", 0, 0);
3476
3477 /*
3478 * And finally, call the kernel passing it the flattened device
3479 * tree and NULL as r5, thus triggering the new entry point which
3480 * is common to us and kexec
3481 */
3482 hdr = dt_header_start;
3483
3484 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3485 prom_debug("->dt_header_start=0x%lx\n", hdr);
3486
3487 #ifdef CONFIG_PPC32
3488 reloc_got2(-offset);
3489 #endif
3490
3491 /* Move to secure memory if we're supposed to be secure guests. */
3492 setup_secure_guest(kbase, hdr);
3493
3494 __start(hdr, kbase, 0, 0, 0, 0, 0);
3495
3496 return 0;
3497 }
3498