1 /*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #undef DEBUG_PROM
17
18 /* we cannot use FORTIFY as it brings in new symbols */
19 #define __NO_FORTIFY
20
21 #include <stdarg.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/init.h>
25 #include <linux/threads.h>
26 #include <linux/spinlock.h>
27 #include <linux/types.h>
28 #include <linux/pci.h>
29 #include <linux/proc_fs.h>
30 #include <linux/stringify.h>
31 #include <linux/delay.h>
32 #include <linux/initrd.h>
33 #include <linux/bitops.h>
34 #include <asm/prom.h>
35 #include <asm/rtas.h>
36 #include <asm/page.h>
37 #include <asm/processor.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/mmu.h>
42 #include <asm/pgtable.h>
43 #include <asm/iommu.h>
44 #include <asm/btext.h>
45 #include <asm/sections.h>
46 #include <asm/machdep.h>
47 #include <asm/opal.h>
48 #include <asm/asm-prototypes.h>
49
50 #include <linux/linux_logo.h>
51
52 /*
53 * Eventually bump that one up
54 */
55 #define DEVTREE_CHUNK_SIZE 0x100000
56
57 /*
58 * This is the size of the local memory reserve map that gets copied
59 * into the boot params passed to the kernel. That size is totally
60 * flexible as the kernel just reads the list until it encounters an
61 * entry with size 0, so it can be changed without breaking binary
62 * compatibility
63 */
64 #define MEM_RESERVE_MAP_SIZE 8
65
66 /*
67 * prom_init() is called very early on, before the kernel text
68 * and data have been mapped to KERNELBASE. At this point the code
69 * is running at whatever address it has been loaded at.
70 * On ppc32 we compile with -mrelocatable, which means that references
71 * to extern and static variables get relocated automatically.
72 * ppc64 objects are always relocatable, we just need to relocate the
73 * TOC.
74 *
75 * Because OF may have mapped I/O devices into the area starting at
76 * KERNELBASE, particularly on CHRP machines, we can't safely call
77 * OF once the kernel has been mapped to KERNELBASE. Therefore all
78 * OF calls must be done within prom_init().
79 *
80 * ADDR is used in calls to call_prom. The 4th and following
81 * arguments to call_prom should be 32-bit values.
82 * On ppc64, 64 bit values are truncated to 32 bits (and
83 * fortunately don't get interpreted as two arguments).
84 */
85 #define ADDR(x) (u32)(unsigned long)(x)
86
87 #ifdef CONFIG_PPC64
88 #define OF_WORKAROUNDS 0
89 #else
90 #define OF_WORKAROUNDS of_workarounds
91 int of_workarounds;
92 #endif
93
94 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
95 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
96
97 #define PROM_BUG() do { \
98 prom_printf("kernel BUG at %s line 0x%x!\n", \
99 __FILE__, __LINE__); \
100 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
101 } while (0)
102
103 #ifdef DEBUG_PROM
104 #define prom_debug(x...) prom_printf(x)
105 #else
106 #define prom_debug(x...)
107 #endif
108
109
110 typedef u32 prom_arg_t;
111
112 struct prom_args {
113 __be32 service;
114 __be32 nargs;
115 __be32 nret;
116 __be32 args[10];
117 };
118
119 struct prom_t {
120 ihandle root;
121 phandle chosen;
122 int cpu;
123 ihandle stdout;
124 ihandle mmumap;
125 ihandle memory;
126 };
127
128 struct mem_map_entry {
129 __be64 base;
130 __be64 size;
131 };
132
133 typedef __be32 cell_t;
134
135 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
136 unsigned long r6, unsigned long r7, unsigned long r8,
137 unsigned long r9);
138
139 #ifdef CONFIG_PPC64
140 extern int enter_prom(struct prom_args *args, unsigned long entry);
141 #else
enter_prom(struct prom_args * args,unsigned long entry)142 static inline int enter_prom(struct prom_args *args, unsigned long entry)
143 {
144 return ((int (*)(struct prom_args *))entry)(args);
145 }
146 #endif
147
148 extern void copy_and_flush(unsigned long dest, unsigned long src,
149 unsigned long size, unsigned long offset);
150
151 /* prom structure */
152 static struct prom_t __initdata prom;
153
154 static unsigned long prom_entry __initdata;
155
156 #define PROM_SCRATCH_SIZE 256
157
158 static char __initdata of_stdout_device[256];
159 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
160
161 static unsigned long __initdata dt_header_start;
162 static unsigned long __initdata dt_struct_start, dt_struct_end;
163 static unsigned long __initdata dt_string_start, dt_string_end;
164
165 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
166
167 #ifdef CONFIG_PPC64
168 static int __initdata prom_iommu_force_on;
169 static int __initdata prom_iommu_off;
170 static unsigned long __initdata prom_tce_alloc_start;
171 static unsigned long __initdata prom_tce_alloc_end;
172 #endif
173
174 static bool __initdata prom_radix_disable;
175
176 struct platform_support {
177 bool hash_mmu;
178 bool radix_mmu;
179 bool radix_gtse;
180 bool xive;
181 };
182
183 /* Platforms codes are now obsolete in the kernel. Now only used within this
184 * file and ultimately gone too. Feel free to change them if you need, they
185 * are not shared with anything outside of this file anymore
186 */
187 #define PLATFORM_PSERIES 0x0100
188 #define PLATFORM_PSERIES_LPAR 0x0101
189 #define PLATFORM_LPAR 0x0001
190 #define PLATFORM_POWERMAC 0x0400
191 #define PLATFORM_GENERIC 0x0500
192 #define PLATFORM_OPAL 0x0600
193
194 static int __initdata of_platform;
195
196 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
197
198 static unsigned long __initdata prom_memory_limit;
199
200 static unsigned long __initdata alloc_top;
201 static unsigned long __initdata alloc_top_high;
202 static unsigned long __initdata alloc_bottom;
203 static unsigned long __initdata rmo_top;
204 static unsigned long __initdata ram_top;
205
206 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
207 static int __initdata mem_reserve_cnt;
208
209 static cell_t __initdata regbuf[1024];
210
211 static bool rtas_has_query_cpu_stopped;
212
213
214 /*
215 * Error results ... some OF calls will return "-1" on error, some
216 * will return 0, some will return either. To simplify, here are
217 * macros to use with any ihandle or phandle return value to check if
218 * it is valid
219 */
220
221 #define PROM_ERROR (-1u)
222 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
223 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
224
225
226 /* This is the one and *ONLY* place where we actually call open
227 * firmware.
228 */
229
call_prom(const char * service,int nargs,int nret,...)230 static int __init call_prom(const char *service, int nargs, int nret, ...)
231 {
232 int i;
233 struct prom_args args;
234 va_list list;
235
236 args.service = cpu_to_be32(ADDR(service));
237 args.nargs = cpu_to_be32(nargs);
238 args.nret = cpu_to_be32(nret);
239
240 va_start(list, nret);
241 for (i = 0; i < nargs; i++)
242 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
243 va_end(list);
244
245 for (i = 0; i < nret; i++)
246 args.args[nargs+i] = 0;
247
248 if (enter_prom(&args, prom_entry) < 0)
249 return PROM_ERROR;
250
251 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
252 }
253
call_prom_ret(const char * service,int nargs,int nret,prom_arg_t * rets,...)254 static int __init call_prom_ret(const char *service, int nargs, int nret,
255 prom_arg_t *rets, ...)
256 {
257 int i;
258 struct prom_args args;
259 va_list list;
260
261 args.service = cpu_to_be32(ADDR(service));
262 args.nargs = cpu_to_be32(nargs);
263 args.nret = cpu_to_be32(nret);
264
265 va_start(list, rets);
266 for (i = 0; i < nargs; i++)
267 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
268 va_end(list);
269
270 for (i = 0; i < nret; i++)
271 args.args[nargs+i] = 0;
272
273 if (enter_prom(&args, prom_entry) < 0)
274 return PROM_ERROR;
275
276 if (rets != NULL)
277 for (i = 1; i < nret; ++i)
278 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
279
280 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
281 }
282
283
prom_print(const char * msg)284 static void __init prom_print(const char *msg)
285 {
286 const char *p, *q;
287
288 if (prom.stdout == 0)
289 return;
290
291 for (p = msg; *p != 0; p = q) {
292 for (q = p; *q != 0 && *q != '\n'; ++q)
293 ;
294 if (q > p)
295 call_prom("write", 3, 1, prom.stdout, p, q - p);
296 if (*q == 0)
297 break;
298 ++q;
299 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
300 }
301 }
302
303
prom_print_hex(unsigned long val)304 static void __init prom_print_hex(unsigned long val)
305 {
306 int i, nibbles = sizeof(val)*2;
307 char buf[sizeof(val)*2+1];
308
309 for (i = nibbles-1; i >= 0; i--) {
310 buf[i] = (val & 0xf) + '0';
311 if (buf[i] > '9')
312 buf[i] += ('a'-'0'-10);
313 val >>= 4;
314 }
315 buf[nibbles] = '\0';
316 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
317 }
318
319 /* max number of decimal digits in an unsigned long */
320 #define UL_DIGITS 21
prom_print_dec(unsigned long val)321 static void __init prom_print_dec(unsigned long val)
322 {
323 int i, size;
324 char buf[UL_DIGITS+1];
325
326 for (i = UL_DIGITS-1; i >= 0; i--) {
327 buf[i] = (val % 10) + '0';
328 val = val/10;
329 if (val == 0)
330 break;
331 }
332 /* shift stuff down */
333 size = UL_DIGITS - i;
334 call_prom("write", 3, 1, prom.stdout, buf+i, size);
335 }
336
337 __printf(1, 2)
prom_printf(const char * format,...)338 static void __init prom_printf(const char *format, ...)
339 {
340 const char *p, *q, *s;
341 va_list args;
342 unsigned long v;
343 long vs;
344
345 va_start(args, format);
346 for (p = format; *p != 0; p = q) {
347 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
348 ;
349 if (q > p)
350 call_prom("write", 3, 1, prom.stdout, p, q - p);
351 if (*q == 0)
352 break;
353 if (*q == '\n') {
354 ++q;
355 call_prom("write", 3, 1, prom.stdout,
356 ADDR("\r\n"), 2);
357 continue;
358 }
359 ++q;
360 if (*q == 0)
361 break;
362 switch (*q) {
363 case 's':
364 ++q;
365 s = va_arg(args, const char *);
366 prom_print(s);
367 break;
368 case 'x':
369 ++q;
370 v = va_arg(args, unsigned long);
371 prom_print_hex(v);
372 break;
373 case 'd':
374 ++q;
375 vs = va_arg(args, int);
376 if (vs < 0) {
377 prom_print("-");
378 vs = -vs;
379 }
380 prom_print_dec(vs);
381 break;
382 case 'l':
383 ++q;
384 if (*q == 0)
385 break;
386 else if (*q == 'x') {
387 ++q;
388 v = va_arg(args, unsigned long);
389 prom_print_hex(v);
390 } else if (*q == 'u') { /* '%lu' */
391 ++q;
392 v = va_arg(args, unsigned long);
393 prom_print_dec(v);
394 } else if (*q == 'd') { /* %ld */
395 ++q;
396 vs = va_arg(args, long);
397 if (vs < 0) {
398 prom_print("-");
399 vs = -vs;
400 }
401 prom_print_dec(vs);
402 }
403 break;
404 }
405 }
406 va_end(args);
407 }
408
409
prom_claim(unsigned long virt,unsigned long size,unsigned long align)410 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
411 unsigned long align)
412 {
413
414 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
415 /*
416 * Old OF requires we claim physical and virtual separately
417 * and then map explicitly (assuming virtual mode)
418 */
419 int ret;
420 prom_arg_t result;
421
422 ret = call_prom_ret("call-method", 5, 2, &result,
423 ADDR("claim"), prom.memory,
424 align, size, virt);
425 if (ret != 0 || result == -1)
426 return -1;
427 ret = call_prom_ret("call-method", 5, 2, &result,
428 ADDR("claim"), prom.mmumap,
429 align, size, virt);
430 if (ret != 0) {
431 call_prom("call-method", 4, 1, ADDR("release"),
432 prom.memory, size, virt);
433 return -1;
434 }
435 /* the 0x12 is M (coherence) + PP == read/write */
436 call_prom("call-method", 6, 1,
437 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
438 return virt;
439 }
440 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
441 (prom_arg_t)align);
442 }
443
prom_panic(const char * reason)444 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
445 {
446 prom_print(reason);
447 /* Do not call exit because it clears the screen on pmac
448 * it also causes some sort of double-fault on early pmacs */
449 if (of_platform == PLATFORM_POWERMAC)
450 asm("trap\n");
451
452 /* ToDo: should put up an SRC here on pSeries */
453 call_prom("exit", 0, 0);
454
455 for (;;) /* should never get here */
456 ;
457 }
458
459
prom_next_node(phandle * nodep)460 static int __init prom_next_node(phandle *nodep)
461 {
462 phandle node;
463
464 if ((node = *nodep) != 0
465 && (*nodep = call_prom("child", 1, 1, node)) != 0)
466 return 1;
467 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
468 return 1;
469 for (;;) {
470 if ((node = call_prom("parent", 1, 1, node)) == 0)
471 return 0;
472 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
473 return 1;
474 }
475 }
476
prom_getprop(phandle node,const char * pname,void * value,size_t valuelen)477 static inline int prom_getprop(phandle node, const char *pname,
478 void *value, size_t valuelen)
479 {
480 return call_prom("getprop", 4, 1, node, ADDR(pname),
481 (u32)(unsigned long) value, (u32) valuelen);
482 }
483
prom_getproplen(phandle node,const char * pname)484 static inline int prom_getproplen(phandle node, const char *pname)
485 {
486 return call_prom("getproplen", 2, 1, node, ADDR(pname));
487 }
488
add_string(char ** str,const char * q)489 static void add_string(char **str, const char *q)
490 {
491 char *p = *str;
492
493 while (*q)
494 *p++ = *q++;
495 *p++ = ' ';
496 *str = p;
497 }
498
tohex(unsigned int x)499 static char *tohex(unsigned int x)
500 {
501 static char digits[] = "0123456789abcdef";
502 static char result[9];
503 int i;
504
505 result[8] = 0;
506 i = 8;
507 do {
508 --i;
509 result[i] = digits[x & 0xf];
510 x >>= 4;
511 } while (x != 0 && i > 0);
512 return &result[i];
513 }
514
prom_setprop(phandle node,const char * nodename,const char * pname,void * value,size_t valuelen)515 static int __init prom_setprop(phandle node, const char *nodename,
516 const char *pname, void *value, size_t valuelen)
517 {
518 char cmd[256], *p;
519
520 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
521 return call_prom("setprop", 4, 1, node, ADDR(pname),
522 (u32)(unsigned long) value, (u32) valuelen);
523
524 /* gah... setprop doesn't work on longtrail, have to use interpret */
525 p = cmd;
526 add_string(&p, "dev");
527 add_string(&p, nodename);
528 add_string(&p, tohex((u32)(unsigned long) value));
529 add_string(&p, tohex(valuelen));
530 add_string(&p, tohex(ADDR(pname)));
531 add_string(&p, tohex(strlen(pname)));
532 add_string(&p, "property");
533 *p = 0;
534 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
535 }
536
537 /* We can't use the standard versions because of relocation headaches. */
538 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
539 || ('a' <= (c) && (c) <= 'f') \
540 || ('A' <= (c) && (c) <= 'F'))
541
542 #define isdigit(c) ('0' <= (c) && (c) <= '9')
543 #define islower(c) ('a' <= (c) && (c) <= 'z')
544 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
545
prom_strtoul(const char * cp,const char ** endp)546 static unsigned long prom_strtoul(const char *cp, const char **endp)
547 {
548 unsigned long result = 0, base = 10, value;
549
550 if (*cp == '0') {
551 base = 8;
552 cp++;
553 if (toupper(*cp) == 'X') {
554 cp++;
555 base = 16;
556 }
557 }
558
559 while (isxdigit(*cp) &&
560 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
561 result = result * base + value;
562 cp++;
563 }
564
565 if (endp)
566 *endp = cp;
567
568 return result;
569 }
570
prom_memparse(const char * ptr,const char ** retptr)571 static unsigned long prom_memparse(const char *ptr, const char **retptr)
572 {
573 unsigned long ret = prom_strtoul(ptr, retptr);
574 int shift = 0;
575
576 /*
577 * We can't use a switch here because GCC *may* generate a
578 * jump table which won't work, because we're not running at
579 * the address we're linked at.
580 */
581 if ('G' == **retptr || 'g' == **retptr)
582 shift = 30;
583
584 if ('M' == **retptr || 'm' == **retptr)
585 shift = 20;
586
587 if ('K' == **retptr || 'k' == **retptr)
588 shift = 10;
589
590 if (shift) {
591 ret <<= shift;
592 (*retptr)++;
593 }
594
595 return ret;
596 }
597
598 /*
599 * Early parsing of the command line passed to the kernel, used for
600 * "mem=x" and the options that affect the iommu
601 */
early_cmdline_parse(void)602 static void __init early_cmdline_parse(void)
603 {
604 const char *opt;
605
606 char *p;
607 int l = 0;
608
609 prom_cmd_line[0] = 0;
610 p = prom_cmd_line;
611 if ((long)prom.chosen > 0)
612 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
613 #ifdef CONFIG_CMDLINE
614 if (l <= 0 || p[0] == '\0') /* dbl check */
615 strlcpy(prom_cmd_line,
616 CONFIG_CMDLINE, sizeof(prom_cmd_line));
617 #endif /* CONFIG_CMDLINE */
618 prom_printf("command line: %s\n", prom_cmd_line);
619
620 #ifdef CONFIG_PPC64
621 opt = strstr(prom_cmd_line, "iommu=");
622 if (opt) {
623 prom_printf("iommu opt is: %s\n", opt);
624 opt += 6;
625 while (*opt && *opt == ' ')
626 opt++;
627 if (!strncmp(opt, "off", 3))
628 prom_iommu_off = 1;
629 else if (!strncmp(opt, "force", 5))
630 prom_iommu_force_on = 1;
631 }
632 #endif
633 opt = strstr(prom_cmd_line, "mem=");
634 if (opt) {
635 opt += 4;
636 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
637 #ifdef CONFIG_PPC64
638 /* Align to 16 MB == size of ppc64 large page */
639 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
640 #endif
641 }
642
643 opt = strstr(prom_cmd_line, "disable_radix");
644 if (opt) {
645 prom_debug("Radix disabled from cmdline\n");
646 prom_radix_disable = true;
647 }
648 }
649
650 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
651 /*
652 * The architecture vector has an array of PVR mask/value pairs,
653 * followed by # option vectors - 1, followed by the option vectors.
654 *
655 * See prom.h for the definition of the bits specified in the
656 * architecture vector.
657 */
658
659 /* Firmware expects the value to be n - 1, where n is the # of vectors */
660 #define NUM_VECTORS(n) ((n) - 1)
661
662 /*
663 * Firmware expects 1 + n - 2, where n is the length of the option vector in
664 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
665 */
666 #define VECTOR_LENGTH(n) (1 + (n) - 2)
667
668 struct option_vector1 {
669 u8 byte1;
670 u8 arch_versions;
671 u8 arch_versions3;
672 } __packed;
673
674 struct option_vector2 {
675 u8 byte1;
676 __be16 reserved;
677 __be32 real_base;
678 __be32 real_size;
679 __be32 virt_base;
680 __be32 virt_size;
681 __be32 load_base;
682 __be32 min_rma;
683 __be32 min_load;
684 u8 min_rma_percent;
685 u8 max_pft_size;
686 } __packed;
687
688 struct option_vector3 {
689 u8 byte1;
690 u8 byte2;
691 } __packed;
692
693 struct option_vector4 {
694 u8 byte1;
695 u8 min_vp_cap;
696 } __packed;
697
698 struct option_vector5 {
699 u8 byte1;
700 u8 byte2;
701 u8 byte3;
702 u8 cmo;
703 u8 associativity;
704 u8 bin_opts;
705 u8 micro_checkpoint;
706 u8 reserved0;
707 __be32 max_cpus;
708 __be16 papr_level;
709 __be16 reserved1;
710 u8 platform_facilities;
711 u8 reserved2;
712 __be16 reserved3;
713 u8 subprocessors;
714 u8 byte22;
715 u8 intarch;
716 u8 mmu;
717 u8 hash_ext;
718 u8 radix_ext;
719 } __packed;
720
721 struct option_vector6 {
722 u8 reserved;
723 u8 secondary_pteg;
724 u8 os_name;
725 } __packed;
726
727 struct ibm_arch_vec {
728 struct { u32 mask, val; } pvrs[12];
729
730 u8 num_vectors;
731
732 u8 vec1_len;
733 struct option_vector1 vec1;
734
735 u8 vec2_len;
736 struct option_vector2 vec2;
737
738 u8 vec3_len;
739 struct option_vector3 vec3;
740
741 u8 vec4_len;
742 struct option_vector4 vec4;
743
744 u8 vec5_len;
745 struct option_vector5 vec5;
746
747 u8 vec6_len;
748 struct option_vector6 vec6;
749 } __packed;
750
751 struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
752 .pvrs = {
753 {
754 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
755 .val = cpu_to_be32(0x003a0000),
756 },
757 {
758 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
759 .val = cpu_to_be32(0x003e0000),
760 },
761 {
762 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
763 .val = cpu_to_be32(0x003f0000),
764 },
765 {
766 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
767 .val = cpu_to_be32(0x004b0000),
768 },
769 {
770 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
771 .val = cpu_to_be32(0x004c0000),
772 },
773 {
774 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
775 .val = cpu_to_be32(0x004d0000),
776 },
777 {
778 .mask = cpu_to_be32(0xffff0000), /* POWER9 */
779 .val = cpu_to_be32(0x004e0000),
780 },
781 {
782 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
783 .val = cpu_to_be32(0x0f000005),
784 },
785 {
786 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
787 .val = cpu_to_be32(0x0f000004),
788 },
789 {
790 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
791 .val = cpu_to_be32(0x0f000003),
792 },
793 {
794 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
795 .val = cpu_to_be32(0x0f000002),
796 },
797 {
798 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
799 .val = cpu_to_be32(0x0f000001),
800 },
801 },
802
803 .num_vectors = NUM_VECTORS(6),
804
805 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
806 .vec1 = {
807 .byte1 = 0,
808 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
809 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
810 .arch_versions3 = OV1_PPC_3_00,
811 },
812
813 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
814 /* option vector 2: Open Firmware options supported */
815 .vec2 = {
816 .byte1 = OV2_REAL_MODE,
817 .reserved = 0,
818 .real_base = cpu_to_be32(0xffffffff),
819 .real_size = cpu_to_be32(0xffffffff),
820 .virt_base = cpu_to_be32(0xffffffff),
821 .virt_size = cpu_to_be32(0xffffffff),
822 .load_base = cpu_to_be32(0xffffffff),
823 .min_rma = cpu_to_be32(512), /* 512MB min RMA */
824 .min_load = cpu_to_be32(0xffffffff), /* full client load */
825 .min_rma_percent = 0, /* min RMA percentage of total RAM */
826 .max_pft_size = 48, /* max log_2(hash table size) */
827 },
828
829 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
830 /* option vector 3: processor options supported */
831 .vec3 = {
832 .byte1 = 0, /* don't ignore, don't halt */
833 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
834 },
835
836 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
837 /* option vector 4: IBM PAPR implementation */
838 .vec4 = {
839 .byte1 = 0, /* don't halt */
840 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
841 },
842
843 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
844 /* option vector 5: PAPR/OF options */
845 .vec5 = {
846 .byte1 = 0, /* don't ignore, don't halt */
847 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
848 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
849 #ifdef CONFIG_PCI_MSI
850 /* PCIe/MSI support. Without MSI full PCIe is not supported */
851 OV5_FEAT(OV5_MSI),
852 #else
853 0,
854 #endif
855 .byte3 = 0,
856 .cmo =
857 #ifdef CONFIG_PPC_SMLPAR
858 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
859 #else
860 0,
861 #endif
862 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
863 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
864 .micro_checkpoint = 0,
865 .reserved0 = 0,
866 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
867 .papr_level = 0,
868 .reserved1 = 0,
869 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
870 .reserved2 = 0,
871 .reserved3 = 0,
872 .subprocessors = 1,
873 .intarch = 0,
874 .mmu = 0,
875 .hash_ext = 0,
876 .radix_ext = 0,
877 },
878
879 /* option vector 6: IBM PAPR hints */
880 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
881 .vec6 = {
882 .reserved = 0,
883 .secondary_pteg = 0,
884 .os_name = OV6_LINUX,
885 },
886 };
887
888 /* Old method - ELF header with PT_NOTE sections only works on BE */
889 #ifdef __BIG_ENDIAN__
890 static struct fake_elf {
891 Elf32_Ehdr elfhdr;
892 Elf32_Phdr phdr[2];
893 struct chrpnote {
894 u32 namesz;
895 u32 descsz;
896 u32 type;
897 char name[8]; /* "PowerPC" */
898 struct chrpdesc {
899 u32 real_mode;
900 u32 real_base;
901 u32 real_size;
902 u32 virt_base;
903 u32 virt_size;
904 u32 load_base;
905 } chrpdesc;
906 } chrpnote;
907 struct rpanote {
908 u32 namesz;
909 u32 descsz;
910 u32 type;
911 char name[24]; /* "IBM,RPA-Client-Config" */
912 struct rpadesc {
913 u32 lpar_affinity;
914 u32 min_rmo_size;
915 u32 min_rmo_percent;
916 u32 max_pft_size;
917 u32 splpar;
918 u32 min_load;
919 u32 new_mem_def;
920 u32 ignore_me;
921 } rpadesc;
922 } rpanote;
923 } fake_elf = {
924 .elfhdr = {
925 .e_ident = { 0x7f, 'E', 'L', 'F',
926 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
927 .e_type = ET_EXEC, /* yeah right */
928 .e_machine = EM_PPC,
929 .e_version = EV_CURRENT,
930 .e_phoff = offsetof(struct fake_elf, phdr),
931 .e_phentsize = sizeof(Elf32_Phdr),
932 .e_phnum = 2
933 },
934 .phdr = {
935 [0] = {
936 .p_type = PT_NOTE,
937 .p_offset = offsetof(struct fake_elf, chrpnote),
938 .p_filesz = sizeof(struct chrpnote)
939 }, [1] = {
940 .p_type = PT_NOTE,
941 .p_offset = offsetof(struct fake_elf, rpanote),
942 .p_filesz = sizeof(struct rpanote)
943 }
944 },
945 .chrpnote = {
946 .namesz = sizeof("PowerPC"),
947 .descsz = sizeof(struct chrpdesc),
948 .type = 0x1275,
949 .name = "PowerPC",
950 .chrpdesc = {
951 .real_mode = ~0U, /* ~0 means "don't care" */
952 .real_base = ~0U,
953 .real_size = ~0U,
954 .virt_base = ~0U,
955 .virt_size = ~0U,
956 .load_base = ~0U
957 },
958 },
959 .rpanote = {
960 .namesz = sizeof("IBM,RPA-Client-Config"),
961 .descsz = sizeof(struct rpadesc),
962 .type = 0x12759999,
963 .name = "IBM,RPA-Client-Config",
964 .rpadesc = {
965 .lpar_affinity = 0,
966 .min_rmo_size = 64, /* in megabytes */
967 .min_rmo_percent = 0,
968 .max_pft_size = 48, /* 2^48 bytes max PFT size */
969 .splpar = 1,
970 .min_load = ~0U,
971 .new_mem_def = 0
972 }
973 }
974 };
975 #endif /* __BIG_ENDIAN__ */
976
prom_count_smt_threads(void)977 static int __init prom_count_smt_threads(void)
978 {
979 phandle node;
980 char type[64];
981 unsigned int plen;
982
983 /* Pick up th first CPU node we can find */
984 for (node = 0; prom_next_node(&node); ) {
985 type[0] = 0;
986 prom_getprop(node, "device_type", type, sizeof(type));
987
988 if (strcmp(type, "cpu"))
989 continue;
990 /*
991 * There is an entry for each smt thread, each entry being
992 * 4 bytes long. All cpus should have the same number of
993 * smt threads, so return after finding the first.
994 */
995 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
996 if (plen == PROM_ERROR)
997 break;
998 plen >>= 2;
999 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1000
1001 /* Sanity check */
1002 if (plen < 1 || plen > 64) {
1003 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1004 (unsigned long)plen);
1005 return 1;
1006 }
1007 return plen;
1008 }
1009 prom_debug("No threads found, assuming 1 per core\n");
1010
1011 return 1;
1012
1013 }
1014
prom_parse_mmu_model(u8 val,struct platform_support * support)1015 static void __init prom_parse_mmu_model(u8 val,
1016 struct platform_support *support)
1017 {
1018 switch (val) {
1019 case OV5_FEAT(OV5_MMU_DYNAMIC):
1020 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1021 prom_debug("MMU - either supported\n");
1022 support->radix_mmu = !prom_radix_disable;
1023 support->hash_mmu = true;
1024 break;
1025 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1026 prom_debug("MMU - radix only\n");
1027 if (prom_radix_disable) {
1028 /*
1029 * If we __have__ to do radix, we're better off ignoring
1030 * the command line rather than not booting.
1031 */
1032 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1033 }
1034 support->radix_mmu = true;
1035 break;
1036 case OV5_FEAT(OV5_MMU_HASH):
1037 prom_debug("MMU - hash only\n");
1038 support->hash_mmu = true;
1039 break;
1040 default:
1041 prom_debug("Unknown mmu support option: 0x%x\n", val);
1042 break;
1043 }
1044 }
1045
prom_parse_xive_model(u8 val,struct platform_support * support)1046 static void __init prom_parse_xive_model(u8 val,
1047 struct platform_support *support)
1048 {
1049 switch (val) {
1050 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1051 prom_debug("XIVE - either mode supported\n");
1052 support->xive = true;
1053 break;
1054 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1055 prom_debug("XIVE - exploitation mode supported\n");
1056 support->xive = true;
1057 break;
1058 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1059 prom_debug("XIVE - legacy mode supported\n");
1060 break;
1061 default:
1062 prom_debug("Unknown xive support option: 0x%x\n", val);
1063 break;
1064 }
1065 }
1066
prom_parse_platform_support(u8 index,u8 val,struct platform_support * support)1067 static void __init prom_parse_platform_support(u8 index, u8 val,
1068 struct platform_support *support)
1069 {
1070 switch (index) {
1071 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1072 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1073 break;
1074 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1075 if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
1076 prom_debug("Radix - GTSE supported\n");
1077 support->radix_gtse = true;
1078 }
1079 break;
1080 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1081 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1082 support);
1083 break;
1084 }
1085 }
1086
prom_check_platform_support(void)1087 static void __init prom_check_platform_support(void)
1088 {
1089 struct platform_support supported = {
1090 .hash_mmu = false,
1091 .radix_mmu = false,
1092 .radix_gtse = false,
1093 .xive = false
1094 };
1095 int prop_len = prom_getproplen(prom.chosen,
1096 "ibm,arch-vec-5-platform-support");
1097 if (prop_len > 1) {
1098 int i;
1099 u8 vec[prop_len];
1100 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1101 prop_len);
1102 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
1103 &vec, sizeof(vec));
1104 for (i = 0; i < prop_len; i += 2) {
1105 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
1106 , vec[i]
1107 , vec[i + 1]);
1108 prom_parse_platform_support(vec[i], vec[i + 1],
1109 &supported);
1110 }
1111 }
1112
1113 if (supported.radix_mmu && supported.radix_gtse) {
1114 /* Radix preferred - but we require GTSE for now */
1115 prom_debug("Asking for radix with GTSE\n");
1116 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1117 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
1118 } else if (supported.hash_mmu) {
1119 /* Default to hash mmu (if we can) */
1120 prom_debug("Asking for hash\n");
1121 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1122 } else {
1123 /* We're probably on a legacy hypervisor */
1124 prom_debug("Assuming legacy hash support\n");
1125 }
1126
1127 if (supported.xive) {
1128 prom_debug("Asking for XIVE\n");
1129 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1130 }
1131 }
1132
prom_send_capabilities(void)1133 static void __init prom_send_capabilities(void)
1134 {
1135 ihandle root;
1136 prom_arg_t ret;
1137 u32 cores;
1138
1139 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1140 prom_check_platform_support();
1141
1142 root = call_prom("open", 1, 1, ADDR("/"));
1143 if (root != 0) {
1144 /* We need to tell the FW about the number of cores we support.
1145 *
1146 * To do that, we count the number of threads on the first core
1147 * (we assume this is the same for all cores) and use it to
1148 * divide NR_CPUS.
1149 */
1150
1151 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1152 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1153 cores, NR_CPUS);
1154
1155 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1156
1157 /* try calling the ibm,client-architecture-support method */
1158 prom_printf("Calling ibm,client-architecture-support...");
1159 if (call_prom_ret("call-method", 3, 2, &ret,
1160 ADDR("ibm,client-architecture-support"),
1161 root,
1162 ADDR(&ibm_architecture_vec)) == 0) {
1163 /* the call exists... */
1164 if (ret)
1165 prom_printf("\nWARNING: ibm,client-architecture"
1166 "-support call FAILED!\n");
1167 call_prom("close", 1, 0, root);
1168 prom_printf(" done\n");
1169 return;
1170 }
1171 call_prom("close", 1, 0, root);
1172 prom_printf(" not implemented\n");
1173 }
1174
1175 #ifdef __BIG_ENDIAN__
1176 {
1177 ihandle elfloader;
1178
1179 /* no ibm,client-architecture-support call, try the old way */
1180 elfloader = call_prom("open", 1, 1,
1181 ADDR("/packages/elf-loader"));
1182 if (elfloader == 0) {
1183 prom_printf("couldn't open /packages/elf-loader\n");
1184 return;
1185 }
1186 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1187 elfloader, ADDR(&fake_elf));
1188 call_prom("close", 1, 0, elfloader);
1189 }
1190 #endif /* __BIG_ENDIAN__ */
1191 }
1192 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1193
1194 /*
1195 * Memory allocation strategy... our layout is normally:
1196 *
1197 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1198 * rare cases, initrd might end up being before the kernel though.
1199 * We assume this won't override the final kernel at 0, we have no
1200 * provision to handle that in this version, but it should hopefully
1201 * never happen.
1202 *
1203 * alloc_top is set to the top of RMO, eventually shrink down if the
1204 * TCEs overlap
1205 *
1206 * alloc_bottom is set to the top of kernel/initrd
1207 *
1208 * from there, allocations are done this way : rtas is allocated
1209 * topmost, and the device-tree is allocated from the bottom. We try
1210 * to grow the device-tree allocation as we progress. If we can't,
1211 * then we fail, we don't currently have a facility to restart
1212 * elsewhere, but that shouldn't be necessary.
1213 *
1214 * Note that calls to reserve_mem have to be done explicitly, memory
1215 * allocated with either alloc_up or alloc_down isn't automatically
1216 * reserved.
1217 */
1218
1219
1220 /*
1221 * Allocates memory in the RMO upward from the kernel/initrd
1222 *
1223 * When align is 0, this is a special case, it means to allocate in place
1224 * at the current location of alloc_bottom or fail (that is basically
1225 * extending the previous allocation). Used for the device-tree flattening
1226 */
alloc_up(unsigned long size,unsigned long align)1227 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1228 {
1229 unsigned long base = alloc_bottom;
1230 unsigned long addr = 0;
1231
1232 if (align)
1233 base = _ALIGN_UP(base, align);
1234 prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1235 if (ram_top == 0)
1236 prom_panic("alloc_up() called with mem not initialized\n");
1237
1238 if (align)
1239 base = _ALIGN_UP(alloc_bottom, align);
1240 else
1241 base = alloc_bottom;
1242
1243 for(; (base + size) <= alloc_top;
1244 base = _ALIGN_UP(base + 0x100000, align)) {
1245 prom_debug(" trying: 0x%lx\n\r", base);
1246 addr = (unsigned long)prom_claim(base, size, 0);
1247 if (addr != PROM_ERROR && addr != 0)
1248 break;
1249 addr = 0;
1250 if (align == 0)
1251 break;
1252 }
1253 if (addr == 0)
1254 return 0;
1255 alloc_bottom = addr + size;
1256
1257 prom_debug(" -> %lx\n", addr);
1258 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1259 prom_debug(" alloc_top : %lx\n", alloc_top);
1260 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1261 prom_debug(" rmo_top : %lx\n", rmo_top);
1262 prom_debug(" ram_top : %lx\n", ram_top);
1263
1264 return addr;
1265 }
1266
1267 /*
1268 * Allocates memory downward, either from top of RMO, or if highmem
1269 * is set, from the top of RAM. Note that this one doesn't handle
1270 * failures. It does claim memory if highmem is not set.
1271 */
alloc_down(unsigned long size,unsigned long align,int highmem)1272 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1273 int highmem)
1274 {
1275 unsigned long base, addr = 0;
1276
1277 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1278 highmem ? "(high)" : "(low)");
1279 if (ram_top == 0)
1280 prom_panic("alloc_down() called with mem not initialized\n");
1281
1282 if (highmem) {
1283 /* Carve out storage for the TCE table. */
1284 addr = _ALIGN_DOWN(alloc_top_high - size, align);
1285 if (addr <= alloc_bottom)
1286 return 0;
1287 /* Will we bump into the RMO ? If yes, check out that we
1288 * didn't overlap existing allocations there, if we did,
1289 * we are dead, we must be the first in town !
1290 */
1291 if (addr < rmo_top) {
1292 /* Good, we are first */
1293 if (alloc_top == rmo_top)
1294 alloc_top = rmo_top = addr;
1295 else
1296 return 0;
1297 }
1298 alloc_top_high = addr;
1299 goto bail;
1300 }
1301
1302 base = _ALIGN_DOWN(alloc_top - size, align);
1303 for (; base > alloc_bottom;
1304 base = _ALIGN_DOWN(base - 0x100000, align)) {
1305 prom_debug(" trying: 0x%lx\n\r", base);
1306 addr = (unsigned long)prom_claim(base, size, 0);
1307 if (addr != PROM_ERROR && addr != 0)
1308 break;
1309 addr = 0;
1310 }
1311 if (addr == 0)
1312 return 0;
1313 alloc_top = addr;
1314
1315 bail:
1316 prom_debug(" -> %lx\n", addr);
1317 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1318 prom_debug(" alloc_top : %lx\n", alloc_top);
1319 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1320 prom_debug(" rmo_top : %lx\n", rmo_top);
1321 prom_debug(" ram_top : %lx\n", ram_top);
1322
1323 return addr;
1324 }
1325
1326 /*
1327 * Parse a "reg" cell
1328 */
prom_next_cell(int s,cell_t ** cellp)1329 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1330 {
1331 cell_t *p = *cellp;
1332 unsigned long r = 0;
1333
1334 /* Ignore more than 2 cells */
1335 while (s > sizeof(unsigned long) / 4) {
1336 p++;
1337 s--;
1338 }
1339 r = be32_to_cpu(*p++);
1340 #ifdef CONFIG_PPC64
1341 if (s > 1) {
1342 r <<= 32;
1343 r |= be32_to_cpu(*(p++));
1344 }
1345 #endif
1346 *cellp = p;
1347 return r;
1348 }
1349
1350 /*
1351 * Very dumb function for adding to the memory reserve list, but
1352 * we don't need anything smarter at this point
1353 *
1354 * XXX Eventually check for collisions. They should NEVER happen.
1355 * If problems seem to show up, it would be a good start to track
1356 * them down.
1357 */
reserve_mem(u64 base,u64 size)1358 static void __init reserve_mem(u64 base, u64 size)
1359 {
1360 u64 top = base + size;
1361 unsigned long cnt = mem_reserve_cnt;
1362
1363 if (size == 0)
1364 return;
1365
1366 /* We need to always keep one empty entry so that we
1367 * have our terminator with "size" set to 0 since we are
1368 * dumb and just copy this entire array to the boot params
1369 */
1370 base = _ALIGN_DOWN(base, PAGE_SIZE);
1371 top = _ALIGN_UP(top, PAGE_SIZE);
1372 size = top - base;
1373
1374 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1375 prom_panic("Memory reserve map exhausted !\n");
1376 mem_reserve_map[cnt].base = cpu_to_be64(base);
1377 mem_reserve_map[cnt].size = cpu_to_be64(size);
1378 mem_reserve_cnt = cnt + 1;
1379 }
1380
1381 /*
1382 * Initialize memory allocation mechanism, parse "memory" nodes and
1383 * obtain that way the top of memory and RMO to setup out local allocator
1384 */
prom_init_mem(void)1385 static void __init prom_init_mem(void)
1386 {
1387 phandle node;
1388 char *path, type[64];
1389 unsigned int plen;
1390 cell_t *p, *endp;
1391 __be32 val;
1392 u32 rac, rsc;
1393
1394 /*
1395 * We iterate the memory nodes to find
1396 * 1) top of RMO (first node)
1397 * 2) top of memory
1398 */
1399 val = cpu_to_be32(2);
1400 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1401 rac = be32_to_cpu(val);
1402 val = cpu_to_be32(1);
1403 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1404 rsc = be32_to_cpu(val);
1405 prom_debug("root_addr_cells: %x\n", rac);
1406 prom_debug("root_size_cells: %x\n", rsc);
1407
1408 prom_debug("scanning memory:\n");
1409 path = prom_scratch;
1410
1411 for (node = 0; prom_next_node(&node); ) {
1412 type[0] = 0;
1413 prom_getprop(node, "device_type", type, sizeof(type));
1414
1415 if (type[0] == 0) {
1416 /*
1417 * CHRP Longtrail machines have no device_type
1418 * on the memory node, so check the name instead...
1419 */
1420 prom_getprop(node, "name", type, sizeof(type));
1421 }
1422 if (strcmp(type, "memory"))
1423 continue;
1424
1425 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1426 if (plen > sizeof(regbuf)) {
1427 prom_printf("memory node too large for buffer !\n");
1428 plen = sizeof(regbuf);
1429 }
1430 p = regbuf;
1431 endp = p + (plen / sizeof(cell_t));
1432
1433 #ifdef DEBUG_PROM
1434 memset(path, 0, PROM_SCRATCH_SIZE);
1435 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1436 prom_debug(" node %s :\n", path);
1437 #endif /* DEBUG_PROM */
1438
1439 while ((endp - p) >= (rac + rsc)) {
1440 unsigned long base, size;
1441
1442 base = prom_next_cell(rac, &p);
1443 size = prom_next_cell(rsc, &p);
1444
1445 if (size == 0)
1446 continue;
1447 prom_debug(" %lx %lx\n", base, size);
1448 if (base == 0 && (of_platform & PLATFORM_LPAR))
1449 rmo_top = size;
1450 if ((base + size) > ram_top)
1451 ram_top = base + size;
1452 }
1453 }
1454
1455 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1456
1457 /*
1458 * If prom_memory_limit is set we reduce the upper limits *except* for
1459 * alloc_top_high. This must be the real top of RAM so we can put
1460 * TCE's up there.
1461 */
1462
1463 alloc_top_high = ram_top;
1464
1465 if (prom_memory_limit) {
1466 if (prom_memory_limit <= alloc_bottom) {
1467 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1468 prom_memory_limit);
1469 prom_memory_limit = 0;
1470 } else if (prom_memory_limit >= ram_top) {
1471 prom_printf("Ignoring mem=%lx >= ram_top.\n",
1472 prom_memory_limit);
1473 prom_memory_limit = 0;
1474 } else {
1475 ram_top = prom_memory_limit;
1476 rmo_top = min(rmo_top, prom_memory_limit);
1477 }
1478 }
1479
1480 /*
1481 * Setup our top alloc point, that is top of RMO or top of
1482 * segment 0 when running non-LPAR.
1483 * Some RS64 machines have buggy firmware where claims up at
1484 * 1GB fail. Cap at 768MB as a workaround.
1485 * Since 768MB is plenty of room, and we need to cap to something
1486 * reasonable on 32-bit, cap at 768MB on all machines.
1487 */
1488 if (!rmo_top)
1489 rmo_top = ram_top;
1490 rmo_top = min(0x30000000ul, rmo_top);
1491 alloc_top = rmo_top;
1492 alloc_top_high = ram_top;
1493
1494 /*
1495 * Check if we have an initrd after the kernel but still inside
1496 * the RMO. If we do move our bottom point to after it.
1497 */
1498 if (prom_initrd_start &&
1499 prom_initrd_start < rmo_top &&
1500 prom_initrd_end > alloc_bottom)
1501 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1502
1503 prom_printf("memory layout at init:\n");
1504 prom_printf(" memory_limit : %lx (16 MB aligned)\n",
1505 prom_memory_limit);
1506 prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
1507 prom_printf(" alloc_top : %lx\n", alloc_top);
1508 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
1509 prom_printf(" rmo_top : %lx\n", rmo_top);
1510 prom_printf(" ram_top : %lx\n", ram_top);
1511 }
1512
prom_close_stdin(void)1513 static void __init prom_close_stdin(void)
1514 {
1515 __be32 val;
1516 ihandle stdin;
1517
1518 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1519 stdin = be32_to_cpu(val);
1520 call_prom("close", 1, 0, stdin);
1521 }
1522 }
1523
1524 #ifdef CONFIG_PPC_POWERNV
1525
1526 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1527 static u64 __initdata prom_opal_base;
1528 static u64 __initdata prom_opal_entry;
1529 #endif
1530
1531 /*
1532 * Allocate room for and instantiate OPAL
1533 */
prom_instantiate_opal(void)1534 static void __init prom_instantiate_opal(void)
1535 {
1536 phandle opal_node;
1537 ihandle opal_inst;
1538 u64 base, entry;
1539 u64 size = 0, align = 0x10000;
1540 __be64 val64;
1541 u32 rets[2];
1542
1543 prom_debug("prom_instantiate_opal: start...\n");
1544
1545 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1546 prom_debug("opal_node: %x\n", opal_node);
1547 if (!PHANDLE_VALID(opal_node))
1548 return;
1549
1550 val64 = 0;
1551 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1552 size = be64_to_cpu(val64);
1553 if (size == 0)
1554 return;
1555 val64 = 0;
1556 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1557 align = be64_to_cpu(val64);
1558
1559 base = alloc_down(size, align, 0);
1560 if (base == 0) {
1561 prom_printf("OPAL allocation failed !\n");
1562 return;
1563 }
1564
1565 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1566 if (!IHANDLE_VALID(opal_inst)) {
1567 prom_printf("opening opal package failed (%x)\n", opal_inst);
1568 return;
1569 }
1570
1571 prom_printf("instantiating opal at 0x%llx...", base);
1572
1573 if (call_prom_ret("call-method", 4, 3, rets,
1574 ADDR("load-opal-runtime"),
1575 opal_inst,
1576 base >> 32, base & 0xffffffff) != 0
1577 || (rets[0] == 0 && rets[1] == 0)) {
1578 prom_printf(" failed\n");
1579 return;
1580 }
1581 entry = (((u64)rets[0]) << 32) | rets[1];
1582
1583 prom_printf(" done\n");
1584
1585 reserve_mem(base, size);
1586
1587 prom_debug("opal base = 0x%llx\n", base);
1588 prom_debug("opal align = 0x%llx\n", align);
1589 prom_debug("opal entry = 0x%llx\n", entry);
1590 prom_debug("opal size = 0x%llx\n", size);
1591
1592 prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1593 &base, sizeof(base));
1594 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1595 &entry, sizeof(entry));
1596
1597 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1598 prom_opal_base = base;
1599 prom_opal_entry = entry;
1600 #endif
1601 prom_debug("prom_instantiate_opal: end...\n");
1602 }
1603
1604 #endif /* CONFIG_PPC_POWERNV */
1605
1606 /*
1607 * Allocate room for and instantiate RTAS
1608 */
prom_instantiate_rtas(void)1609 static void __init prom_instantiate_rtas(void)
1610 {
1611 phandle rtas_node;
1612 ihandle rtas_inst;
1613 u32 base, entry = 0;
1614 __be32 val;
1615 u32 size = 0;
1616
1617 prom_debug("prom_instantiate_rtas: start...\n");
1618
1619 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1620 prom_debug("rtas_node: %x\n", rtas_node);
1621 if (!PHANDLE_VALID(rtas_node))
1622 return;
1623
1624 val = 0;
1625 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1626 size = be32_to_cpu(val);
1627 if (size == 0)
1628 return;
1629
1630 base = alloc_down(size, PAGE_SIZE, 0);
1631 if (base == 0)
1632 prom_panic("Could not allocate memory for RTAS\n");
1633
1634 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1635 if (!IHANDLE_VALID(rtas_inst)) {
1636 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1637 return;
1638 }
1639
1640 prom_printf("instantiating rtas at 0x%x...", base);
1641
1642 if (call_prom_ret("call-method", 3, 2, &entry,
1643 ADDR("instantiate-rtas"),
1644 rtas_inst, base) != 0
1645 || entry == 0) {
1646 prom_printf(" failed\n");
1647 return;
1648 }
1649 prom_printf(" done\n");
1650
1651 reserve_mem(base, size);
1652
1653 val = cpu_to_be32(base);
1654 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1655 &val, sizeof(val));
1656 val = cpu_to_be32(entry);
1657 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1658 &val, sizeof(val));
1659
1660 /* Check if it supports "query-cpu-stopped-state" */
1661 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1662 &val, sizeof(val)) != PROM_ERROR)
1663 rtas_has_query_cpu_stopped = true;
1664
1665 prom_debug("rtas base = 0x%x\n", base);
1666 prom_debug("rtas entry = 0x%x\n", entry);
1667 prom_debug("rtas size = 0x%x\n", size);
1668
1669 prom_debug("prom_instantiate_rtas: end...\n");
1670 }
1671
1672 #ifdef CONFIG_PPC64
1673 /*
1674 * Allocate room for and instantiate Stored Measurement Log (SML)
1675 */
prom_instantiate_sml(void)1676 static void __init prom_instantiate_sml(void)
1677 {
1678 phandle ibmvtpm_node;
1679 ihandle ibmvtpm_inst;
1680 u32 entry = 0, size = 0, succ = 0;
1681 u64 base;
1682 __be32 val;
1683
1684 prom_debug("prom_instantiate_sml: start...\n");
1685
1686 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1687 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1688 if (!PHANDLE_VALID(ibmvtpm_node))
1689 return;
1690
1691 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1692 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1693 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1694 return;
1695 }
1696
1697 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1698 &val, sizeof(val)) != PROM_ERROR) {
1699 if (call_prom_ret("call-method", 2, 2, &succ,
1700 ADDR("reformat-sml-to-efi-alignment"),
1701 ibmvtpm_inst) != 0 || succ == 0) {
1702 prom_printf("Reformat SML to EFI alignment failed\n");
1703 return;
1704 }
1705
1706 if (call_prom_ret("call-method", 2, 2, &size,
1707 ADDR("sml-get-allocated-size"),
1708 ibmvtpm_inst) != 0 || size == 0) {
1709 prom_printf("SML get allocated size failed\n");
1710 return;
1711 }
1712 } else {
1713 if (call_prom_ret("call-method", 2, 2, &size,
1714 ADDR("sml-get-handover-size"),
1715 ibmvtpm_inst) != 0 || size == 0) {
1716 prom_printf("SML get handover size failed\n");
1717 return;
1718 }
1719 }
1720
1721 base = alloc_down(size, PAGE_SIZE, 0);
1722 if (base == 0)
1723 prom_panic("Could not allocate memory for sml\n");
1724
1725 prom_printf("instantiating sml at 0x%llx...", base);
1726
1727 memset((void *)base, 0, size);
1728
1729 if (call_prom_ret("call-method", 4, 2, &entry,
1730 ADDR("sml-handover"),
1731 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1732 prom_printf("SML handover failed\n");
1733 return;
1734 }
1735 prom_printf(" done\n");
1736
1737 reserve_mem(base, size);
1738
1739 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1740 &base, sizeof(base));
1741 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1742 &size, sizeof(size));
1743
1744 prom_debug("sml base = 0x%llx\n", base);
1745 prom_debug("sml size = 0x%x\n", size);
1746
1747 prom_debug("prom_instantiate_sml: end...\n");
1748 }
1749
1750 /*
1751 * Allocate room for and initialize TCE tables
1752 */
1753 #ifdef __BIG_ENDIAN__
prom_initialize_tce_table(void)1754 static void __init prom_initialize_tce_table(void)
1755 {
1756 phandle node;
1757 ihandle phb_node;
1758 char compatible[64], type[64], model[64];
1759 char *path = prom_scratch;
1760 u64 base, align;
1761 u32 minalign, minsize;
1762 u64 tce_entry, *tce_entryp;
1763 u64 local_alloc_top, local_alloc_bottom;
1764 u64 i;
1765
1766 if (prom_iommu_off)
1767 return;
1768
1769 prom_debug("starting prom_initialize_tce_table\n");
1770
1771 /* Cache current top of allocs so we reserve a single block */
1772 local_alloc_top = alloc_top_high;
1773 local_alloc_bottom = local_alloc_top;
1774
1775 /* Search all nodes looking for PHBs. */
1776 for (node = 0; prom_next_node(&node); ) {
1777 compatible[0] = 0;
1778 type[0] = 0;
1779 model[0] = 0;
1780 prom_getprop(node, "compatible",
1781 compatible, sizeof(compatible));
1782 prom_getprop(node, "device_type", type, sizeof(type));
1783 prom_getprop(node, "model", model, sizeof(model));
1784
1785 if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1786 continue;
1787
1788 /* Keep the old logic intact to avoid regression. */
1789 if (compatible[0] != 0) {
1790 if ((strstr(compatible, "python") == NULL) &&
1791 (strstr(compatible, "Speedwagon") == NULL) &&
1792 (strstr(compatible, "Winnipeg") == NULL))
1793 continue;
1794 } else if (model[0] != 0) {
1795 if ((strstr(model, "ython") == NULL) &&
1796 (strstr(model, "peedwagon") == NULL) &&
1797 (strstr(model, "innipeg") == NULL))
1798 continue;
1799 }
1800
1801 if (prom_getprop(node, "tce-table-minalign", &minalign,
1802 sizeof(minalign)) == PROM_ERROR)
1803 minalign = 0;
1804 if (prom_getprop(node, "tce-table-minsize", &minsize,
1805 sizeof(minsize)) == PROM_ERROR)
1806 minsize = 4UL << 20;
1807
1808 /*
1809 * Even though we read what OF wants, we just set the table
1810 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1811 * By doing this, we avoid the pitfalls of trying to DMA to
1812 * MMIO space and the DMA alias hole.
1813 *
1814 * On POWER4, firmware sets the TCE region by assuming
1815 * each TCE table is 8MB. Using this memory for anything
1816 * else will impact performance, so we always allocate 8MB.
1817 * Anton
1818 */
1819 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1820 minsize = 8UL << 20;
1821 else
1822 minsize = 4UL << 20;
1823
1824 /* Align to the greater of the align or size */
1825 align = max(minalign, minsize);
1826 base = alloc_down(minsize, align, 1);
1827 if (base == 0)
1828 prom_panic("ERROR, cannot find space for TCE table.\n");
1829 if (base < local_alloc_bottom)
1830 local_alloc_bottom = base;
1831
1832 /* It seems OF doesn't null-terminate the path :-( */
1833 memset(path, 0, PROM_SCRATCH_SIZE);
1834 /* Call OF to setup the TCE hardware */
1835 if (call_prom("package-to-path", 3, 1, node,
1836 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1837 prom_printf("package-to-path failed\n");
1838 }
1839
1840 /* Save away the TCE table attributes for later use. */
1841 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1842 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1843
1844 prom_debug("TCE table: %s\n", path);
1845 prom_debug("\tnode = 0x%x\n", node);
1846 prom_debug("\tbase = 0x%llx\n", base);
1847 prom_debug("\tsize = 0x%x\n", minsize);
1848
1849 /* Initialize the table to have a one-to-one mapping
1850 * over the allocated size.
1851 */
1852 tce_entryp = (u64 *)base;
1853 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1854 tce_entry = (i << PAGE_SHIFT);
1855 tce_entry |= 0x3;
1856 *tce_entryp = tce_entry;
1857 }
1858
1859 prom_printf("opening PHB %s", path);
1860 phb_node = call_prom("open", 1, 1, path);
1861 if (phb_node == 0)
1862 prom_printf("... failed\n");
1863 else
1864 prom_printf("... done\n");
1865
1866 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1867 phb_node, -1, minsize,
1868 (u32) base, (u32) (base >> 32));
1869 call_prom("close", 1, 0, phb_node);
1870 }
1871
1872 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1873
1874 /* These are only really needed if there is a memory limit in
1875 * effect, but we don't know so export them always. */
1876 prom_tce_alloc_start = local_alloc_bottom;
1877 prom_tce_alloc_end = local_alloc_top;
1878
1879 /* Flag the first invalid entry */
1880 prom_debug("ending prom_initialize_tce_table\n");
1881 }
1882 #endif /* __BIG_ENDIAN__ */
1883 #endif /* CONFIG_PPC64 */
1884
1885 /*
1886 * With CHRP SMP we need to use the OF to start the other processors.
1887 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1888 * so we have to put the processors into a holding pattern controlled
1889 * by the kernel (not OF) before we destroy the OF.
1890 *
1891 * This uses a chunk of low memory, puts some holding pattern
1892 * code there and sends the other processors off to there until
1893 * smp_boot_cpus tells them to do something. The holding pattern
1894 * checks that address until its cpu # is there, when it is that
1895 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1896 * of setting those values.
1897 *
1898 * We also use physical address 0x4 here to tell when a cpu
1899 * is in its holding pattern code.
1900 *
1901 * -- Cort
1902 */
1903 /*
1904 * We want to reference the copy of __secondary_hold_* in the
1905 * 0 - 0x100 address range
1906 */
1907 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1908
prom_hold_cpus(void)1909 static void __init prom_hold_cpus(void)
1910 {
1911 unsigned long i;
1912 phandle node;
1913 char type[64];
1914 unsigned long *spinloop
1915 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1916 unsigned long *acknowledge
1917 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1918 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1919
1920 /*
1921 * On pseries, if RTAS supports "query-cpu-stopped-state",
1922 * we skip this stage, the CPUs will be started by the
1923 * kernel using RTAS.
1924 */
1925 if ((of_platform == PLATFORM_PSERIES ||
1926 of_platform == PLATFORM_PSERIES_LPAR) &&
1927 rtas_has_query_cpu_stopped) {
1928 prom_printf("prom_hold_cpus: skipped\n");
1929 return;
1930 }
1931
1932 prom_debug("prom_hold_cpus: start...\n");
1933 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
1934 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
1935 prom_debug(" 1) acknowledge = 0x%lx\n",
1936 (unsigned long)acknowledge);
1937 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
1938 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
1939
1940 /* Set the common spinloop variable, so all of the secondary cpus
1941 * will block when they are awakened from their OF spinloop.
1942 * This must occur for both SMP and non SMP kernels, since OF will
1943 * be trashed when we move the kernel.
1944 */
1945 *spinloop = 0;
1946
1947 /* look for cpus */
1948 for (node = 0; prom_next_node(&node); ) {
1949 unsigned int cpu_no;
1950 __be32 reg;
1951
1952 type[0] = 0;
1953 prom_getprop(node, "device_type", type, sizeof(type));
1954 if (strcmp(type, "cpu") != 0)
1955 continue;
1956
1957 /* Skip non-configured cpus. */
1958 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1959 if (strcmp(type, "okay") != 0)
1960 continue;
1961
1962 reg = cpu_to_be32(-1); /* make sparse happy */
1963 prom_getprop(node, "reg", ®, sizeof(reg));
1964 cpu_no = be32_to_cpu(reg);
1965
1966 prom_debug("cpu hw idx = %u\n", cpu_no);
1967
1968 /* Init the acknowledge var which will be reset by
1969 * the secondary cpu when it awakens from its OF
1970 * spinloop.
1971 */
1972 *acknowledge = (unsigned long)-1;
1973
1974 if (cpu_no != prom.cpu) {
1975 /* Primary Thread of non-boot cpu or any thread */
1976 prom_printf("starting cpu hw idx %u... ", cpu_no);
1977 call_prom("start-cpu", 3, 0, node,
1978 secondary_hold, cpu_no);
1979
1980 for (i = 0; (i < 100000000) &&
1981 (*acknowledge == ((unsigned long)-1)); i++ )
1982 mb();
1983
1984 if (*acknowledge == cpu_no)
1985 prom_printf("done\n");
1986 else
1987 prom_printf("failed: %lx\n", *acknowledge);
1988 }
1989 #ifdef CONFIG_SMP
1990 else
1991 prom_printf("boot cpu hw idx %u\n", cpu_no);
1992 #endif /* CONFIG_SMP */
1993 }
1994
1995 prom_debug("prom_hold_cpus: end...\n");
1996 }
1997
1998
prom_init_client_services(unsigned long pp)1999 static void __init prom_init_client_services(unsigned long pp)
2000 {
2001 /* Get a handle to the prom entry point before anything else */
2002 prom_entry = pp;
2003
2004 /* get a handle for the stdout device */
2005 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2006 if (!PHANDLE_VALID(prom.chosen))
2007 prom_panic("cannot find chosen"); /* msg won't be printed :( */
2008
2009 /* get device tree root */
2010 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2011 if (!PHANDLE_VALID(prom.root))
2012 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2013
2014 prom.mmumap = 0;
2015 }
2016
2017 #ifdef CONFIG_PPC32
2018 /*
2019 * For really old powermacs, we need to map things we claim.
2020 * For that, we need the ihandle of the mmu.
2021 * Also, on the longtrail, we need to work around other bugs.
2022 */
prom_find_mmu(void)2023 static void __init prom_find_mmu(void)
2024 {
2025 phandle oprom;
2026 char version[64];
2027
2028 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2029 if (!PHANDLE_VALID(oprom))
2030 return;
2031 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2032 return;
2033 version[sizeof(version) - 1] = 0;
2034 /* XXX might need to add other versions here */
2035 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
2036 of_workarounds = OF_WA_CLAIM;
2037 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
2038 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2039 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2040 } else
2041 return;
2042 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2043 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2044 sizeof(prom.mmumap));
2045 prom.mmumap = be32_to_cpu(prom.mmumap);
2046 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2047 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
2048 }
2049 #else
2050 #define prom_find_mmu()
2051 #endif
2052
prom_init_stdout(void)2053 static void __init prom_init_stdout(void)
2054 {
2055 char *path = of_stdout_device;
2056 char type[16];
2057 phandle stdout_node;
2058 __be32 val;
2059
2060 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2061 prom_panic("cannot find stdout");
2062
2063 prom.stdout = be32_to_cpu(val);
2064
2065 /* Get the full OF pathname of the stdout device */
2066 memset(path, 0, 256);
2067 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2068 prom_printf("OF stdout device is: %s\n", of_stdout_device);
2069 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2070 path, strlen(path) + 1);
2071
2072 /* instance-to-package fails on PA-Semi */
2073 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2074 if (stdout_node != PROM_ERROR) {
2075 val = cpu_to_be32(stdout_node);
2076 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
2077 &val, sizeof(val));
2078
2079 /* If it's a display, note it */
2080 memset(type, 0, sizeof(type));
2081 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2082 if (strcmp(type, "display") == 0)
2083 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2084 }
2085 }
2086
prom_find_machine_type(void)2087 static int __init prom_find_machine_type(void)
2088 {
2089 char compat[256];
2090 int len, i = 0;
2091 #ifdef CONFIG_PPC64
2092 phandle rtas;
2093 int x;
2094 #endif
2095
2096 /* Look for a PowerMac or a Cell */
2097 len = prom_getprop(prom.root, "compatible",
2098 compat, sizeof(compat)-1);
2099 if (len > 0) {
2100 compat[len] = 0;
2101 while (i < len) {
2102 char *p = &compat[i];
2103 int sl = strlen(p);
2104 if (sl == 0)
2105 break;
2106 if (strstr(p, "Power Macintosh") ||
2107 strstr(p, "MacRISC"))
2108 return PLATFORM_POWERMAC;
2109 #ifdef CONFIG_PPC64
2110 /* We must make sure we don't detect the IBM Cell
2111 * blades as pSeries due to some firmware issues,
2112 * so we do it here.
2113 */
2114 if (strstr(p, "IBM,CBEA") ||
2115 strstr(p, "IBM,CPBW-1.0"))
2116 return PLATFORM_GENERIC;
2117 #endif /* CONFIG_PPC64 */
2118 i += sl + 1;
2119 }
2120 }
2121 #ifdef CONFIG_PPC64
2122 /* Try to detect OPAL */
2123 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
2124 return PLATFORM_OPAL;
2125
2126 /* Try to figure out if it's an IBM pSeries or any other
2127 * PAPR compliant platform. We assume it is if :
2128 * - /device_type is "chrp" (please, do NOT use that for future
2129 * non-IBM designs !
2130 * - it has /rtas
2131 */
2132 len = prom_getprop(prom.root, "device_type",
2133 compat, sizeof(compat)-1);
2134 if (len <= 0)
2135 return PLATFORM_GENERIC;
2136 if (strcmp(compat, "chrp"))
2137 return PLATFORM_GENERIC;
2138
2139 /* Default to pSeries. We need to know if we are running LPAR */
2140 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2141 if (!PHANDLE_VALID(rtas))
2142 return PLATFORM_GENERIC;
2143 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2144 if (x != PROM_ERROR) {
2145 prom_debug("Hypertas detected, assuming LPAR !\n");
2146 return PLATFORM_PSERIES_LPAR;
2147 }
2148 return PLATFORM_PSERIES;
2149 #else
2150 return PLATFORM_GENERIC;
2151 #endif
2152 }
2153
prom_set_color(ihandle ih,int i,int r,int g,int b)2154 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2155 {
2156 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2157 }
2158
2159 /*
2160 * If we have a display that we don't know how to drive,
2161 * we will want to try to execute OF's open method for it
2162 * later. However, OF will probably fall over if we do that
2163 * we've taken over the MMU.
2164 * So we check whether we will need to open the display,
2165 * and if so, open it now.
2166 */
prom_check_displays(void)2167 static void __init prom_check_displays(void)
2168 {
2169 char type[16], *path;
2170 phandle node;
2171 ihandle ih;
2172 int i;
2173
2174 static unsigned char default_colors[] = {
2175 0x00, 0x00, 0x00,
2176 0x00, 0x00, 0xaa,
2177 0x00, 0xaa, 0x00,
2178 0x00, 0xaa, 0xaa,
2179 0xaa, 0x00, 0x00,
2180 0xaa, 0x00, 0xaa,
2181 0xaa, 0xaa, 0x00,
2182 0xaa, 0xaa, 0xaa,
2183 0x55, 0x55, 0x55,
2184 0x55, 0x55, 0xff,
2185 0x55, 0xff, 0x55,
2186 0x55, 0xff, 0xff,
2187 0xff, 0x55, 0x55,
2188 0xff, 0x55, 0xff,
2189 0xff, 0xff, 0x55,
2190 0xff, 0xff, 0xff
2191 };
2192 const unsigned char *clut;
2193
2194 prom_debug("Looking for displays\n");
2195 for (node = 0; prom_next_node(&node); ) {
2196 memset(type, 0, sizeof(type));
2197 prom_getprop(node, "device_type", type, sizeof(type));
2198 if (strcmp(type, "display") != 0)
2199 continue;
2200
2201 /* It seems OF doesn't null-terminate the path :-( */
2202 path = prom_scratch;
2203 memset(path, 0, PROM_SCRATCH_SIZE);
2204
2205 /*
2206 * leave some room at the end of the path for appending extra
2207 * arguments
2208 */
2209 if (call_prom("package-to-path", 3, 1, node, path,
2210 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2211 continue;
2212 prom_printf("found display : %s, opening... ", path);
2213
2214 ih = call_prom("open", 1, 1, path);
2215 if (ih == 0) {
2216 prom_printf("failed\n");
2217 continue;
2218 }
2219
2220 /* Success */
2221 prom_printf("done\n");
2222 prom_setprop(node, path, "linux,opened", NULL, 0);
2223
2224 /* Setup a usable color table when the appropriate
2225 * method is available. Should update this to set-colors */
2226 clut = default_colors;
2227 for (i = 0; i < 16; i++, clut += 3)
2228 if (prom_set_color(ih, i, clut[0], clut[1],
2229 clut[2]) != 0)
2230 break;
2231
2232 #ifdef CONFIG_LOGO_LINUX_CLUT224
2233 clut = PTRRELOC(logo_linux_clut224.clut);
2234 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2235 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2236 clut[2]) != 0)
2237 break;
2238 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2239
2240 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2241 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2242 PROM_ERROR) {
2243 u32 width, height, pitch, addr;
2244
2245 prom_printf("Setting btext !\n");
2246 prom_getprop(node, "width", &width, 4);
2247 prom_getprop(node, "height", &height, 4);
2248 prom_getprop(node, "linebytes", &pitch, 4);
2249 prom_getprop(node, "address", &addr, 4);
2250 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2251 width, height, pitch, addr);
2252 btext_setup_display(width, height, 8, pitch, addr);
2253 }
2254 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2255 }
2256 }
2257
2258
2259 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
make_room(unsigned long * mem_start,unsigned long * mem_end,unsigned long needed,unsigned long align)2260 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2261 unsigned long needed, unsigned long align)
2262 {
2263 void *ret;
2264
2265 *mem_start = _ALIGN(*mem_start, align);
2266 while ((*mem_start + needed) > *mem_end) {
2267 unsigned long room, chunk;
2268
2269 prom_debug("Chunk exhausted, claiming more at %lx...\n",
2270 alloc_bottom);
2271 room = alloc_top - alloc_bottom;
2272 if (room > DEVTREE_CHUNK_SIZE)
2273 room = DEVTREE_CHUNK_SIZE;
2274 if (room < PAGE_SIZE)
2275 prom_panic("No memory for flatten_device_tree "
2276 "(no room)\n");
2277 chunk = alloc_up(room, 0);
2278 if (chunk == 0)
2279 prom_panic("No memory for flatten_device_tree "
2280 "(claim failed)\n");
2281 *mem_end = chunk + room;
2282 }
2283
2284 ret = (void *)*mem_start;
2285 *mem_start += needed;
2286
2287 return ret;
2288 }
2289
2290 #define dt_push_token(token, mem_start, mem_end) do { \
2291 void *room = make_room(mem_start, mem_end, 4, 4); \
2292 *(__be32 *)room = cpu_to_be32(token); \
2293 } while(0)
2294
dt_find_string(char * str)2295 static unsigned long __init dt_find_string(char *str)
2296 {
2297 char *s, *os;
2298
2299 s = os = (char *)dt_string_start;
2300 s += 4;
2301 while (s < (char *)dt_string_end) {
2302 if (strcmp(s, str) == 0)
2303 return s - os;
2304 s += strlen(s) + 1;
2305 }
2306 return 0;
2307 }
2308
2309 /*
2310 * The Open Firmware 1275 specification states properties must be 31 bytes or
2311 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2312 */
2313 #define MAX_PROPERTY_NAME 64
2314
scan_dt_build_strings(phandle node,unsigned long * mem_start,unsigned long * mem_end)2315 static void __init scan_dt_build_strings(phandle node,
2316 unsigned long *mem_start,
2317 unsigned long *mem_end)
2318 {
2319 char *prev_name, *namep, *sstart;
2320 unsigned long soff;
2321 phandle child;
2322
2323 sstart = (char *)dt_string_start;
2324
2325 /* get and store all property names */
2326 prev_name = "";
2327 for (;;) {
2328 /* 64 is max len of name including nul. */
2329 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2330 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2331 /* No more nodes: unwind alloc */
2332 *mem_start = (unsigned long)namep;
2333 break;
2334 }
2335
2336 /* skip "name" */
2337 if (strcmp(namep, "name") == 0) {
2338 *mem_start = (unsigned long)namep;
2339 prev_name = "name";
2340 continue;
2341 }
2342 /* get/create string entry */
2343 soff = dt_find_string(namep);
2344 if (soff != 0) {
2345 *mem_start = (unsigned long)namep;
2346 namep = sstart + soff;
2347 } else {
2348 /* Trim off some if we can */
2349 *mem_start = (unsigned long)namep + strlen(namep) + 1;
2350 dt_string_end = *mem_start;
2351 }
2352 prev_name = namep;
2353 }
2354
2355 /* do all our children */
2356 child = call_prom("child", 1, 1, node);
2357 while (child != 0) {
2358 scan_dt_build_strings(child, mem_start, mem_end);
2359 child = call_prom("peer", 1, 1, child);
2360 }
2361 }
2362
scan_dt_build_struct(phandle node,unsigned long * mem_start,unsigned long * mem_end)2363 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2364 unsigned long *mem_end)
2365 {
2366 phandle child;
2367 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2368 unsigned long soff;
2369 unsigned char *valp;
2370 static char pname[MAX_PROPERTY_NAME];
2371 int l, room, has_phandle = 0;
2372
2373 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2374
2375 /* get the node's full name */
2376 namep = (char *)*mem_start;
2377 room = *mem_end - *mem_start;
2378 if (room > 255)
2379 room = 255;
2380 l = call_prom("package-to-path", 3, 1, node, namep, room);
2381 if (l >= 0) {
2382 /* Didn't fit? Get more room. */
2383 if (l >= room) {
2384 if (l >= *mem_end - *mem_start)
2385 namep = make_room(mem_start, mem_end, l+1, 1);
2386 call_prom("package-to-path", 3, 1, node, namep, l);
2387 }
2388 namep[l] = '\0';
2389
2390 /* Fixup an Apple bug where they have bogus \0 chars in the
2391 * middle of the path in some properties, and extract
2392 * the unit name (everything after the last '/').
2393 */
2394 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2395 if (*p == '/')
2396 lp = namep;
2397 else if (*p != 0)
2398 *lp++ = *p;
2399 }
2400 *lp = 0;
2401 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
2402 }
2403
2404 /* get it again for debugging */
2405 path = prom_scratch;
2406 memset(path, 0, PROM_SCRATCH_SIZE);
2407 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2408
2409 /* get and store all properties */
2410 prev_name = "";
2411 sstart = (char *)dt_string_start;
2412 for (;;) {
2413 if (call_prom("nextprop", 3, 1, node, prev_name,
2414 pname) != 1)
2415 break;
2416
2417 /* skip "name" */
2418 if (strcmp(pname, "name") == 0) {
2419 prev_name = "name";
2420 continue;
2421 }
2422
2423 /* find string offset */
2424 soff = dt_find_string(pname);
2425 if (soff == 0) {
2426 prom_printf("WARNING: Can't find string index for"
2427 " <%s>, node %s\n", pname, path);
2428 break;
2429 }
2430 prev_name = sstart + soff;
2431
2432 /* get length */
2433 l = call_prom("getproplen", 2, 1, node, pname);
2434
2435 /* sanity checks */
2436 if (l == PROM_ERROR)
2437 continue;
2438
2439 /* push property head */
2440 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2441 dt_push_token(l, mem_start, mem_end);
2442 dt_push_token(soff, mem_start, mem_end);
2443
2444 /* push property content */
2445 valp = make_room(mem_start, mem_end, l, 4);
2446 call_prom("getprop", 4, 1, node, pname, valp, l);
2447 *mem_start = _ALIGN(*mem_start, 4);
2448
2449 if (!strcmp(pname, "phandle"))
2450 has_phandle = 1;
2451 }
2452
2453 /* Add a "linux,phandle" property if no "phandle" property already
2454 * existed (can happen with OPAL)
2455 */
2456 if (!has_phandle) {
2457 soff = dt_find_string("linux,phandle");
2458 if (soff == 0)
2459 prom_printf("WARNING: Can't find string index for"
2460 " <linux-phandle> node %s\n", path);
2461 else {
2462 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2463 dt_push_token(4, mem_start, mem_end);
2464 dt_push_token(soff, mem_start, mem_end);
2465 valp = make_room(mem_start, mem_end, 4, 4);
2466 *(__be32 *)valp = cpu_to_be32(node);
2467 }
2468 }
2469
2470 /* do all our children */
2471 child = call_prom("child", 1, 1, node);
2472 while (child != 0) {
2473 scan_dt_build_struct(child, mem_start, mem_end);
2474 child = call_prom("peer", 1, 1, child);
2475 }
2476
2477 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2478 }
2479
flatten_device_tree(void)2480 static void __init flatten_device_tree(void)
2481 {
2482 phandle root;
2483 unsigned long mem_start, mem_end, room;
2484 struct boot_param_header *hdr;
2485 char *namep;
2486 u64 *rsvmap;
2487
2488 /*
2489 * Check how much room we have between alloc top & bottom (+/- a
2490 * few pages), crop to 1MB, as this is our "chunk" size
2491 */
2492 room = alloc_top - alloc_bottom - 0x4000;
2493 if (room > DEVTREE_CHUNK_SIZE)
2494 room = DEVTREE_CHUNK_SIZE;
2495 prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2496
2497 /* Now try to claim that */
2498 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2499 if (mem_start == 0)
2500 prom_panic("Can't allocate initial device-tree chunk\n");
2501 mem_end = mem_start + room;
2502
2503 /* Get root of tree */
2504 root = call_prom("peer", 1, 1, (phandle)0);
2505 if (root == (phandle)0)
2506 prom_panic ("couldn't get device tree root\n");
2507
2508 /* Build header and make room for mem rsv map */
2509 mem_start = _ALIGN(mem_start, 4);
2510 hdr = make_room(&mem_start, &mem_end,
2511 sizeof(struct boot_param_header), 4);
2512 dt_header_start = (unsigned long)hdr;
2513 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2514
2515 /* Start of strings */
2516 mem_start = PAGE_ALIGN(mem_start);
2517 dt_string_start = mem_start;
2518 mem_start += 4; /* hole */
2519
2520 /* Add "linux,phandle" in there, we'll need it */
2521 namep = make_room(&mem_start, &mem_end, 16, 1);
2522 strcpy(namep, "linux,phandle");
2523 mem_start = (unsigned long)namep + strlen(namep) + 1;
2524
2525 /* Build string array */
2526 prom_printf("Building dt strings...\n");
2527 scan_dt_build_strings(root, &mem_start, &mem_end);
2528 dt_string_end = mem_start;
2529
2530 /* Build structure */
2531 mem_start = PAGE_ALIGN(mem_start);
2532 dt_struct_start = mem_start;
2533 prom_printf("Building dt structure...\n");
2534 scan_dt_build_struct(root, &mem_start, &mem_end);
2535 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2536 dt_struct_end = PAGE_ALIGN(mem_start);
2537
2538 /* Finish header */
2539 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2540 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2541 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2542 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2543 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2544 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2545 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2546 hdr->version = cpu_to_be32(OF_DT_VERSION);
2547 /* Version 16 is not backward compatible */
2548 hdr->last_comp_version = cpu_to_be32(0x10);
2549
2550 /* Copy the reserve map in */
2551 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2552
2553 #ifdef DEBUG_PROM
2554 {
2555 int i;
2556 prom_printf("reserved memory map:\n");
2557 for (i = 0; i < mem_reserve_cnt; i++)
2558 prom_printf(" %llx - %llx\n",
2559 be64_to_cpu(mem_reserve_map[i].base),
2560 be64_to_cpu(mem_reserve_map[i].size));
2561 }
2562 #endif
2563 /* Bump mem_reserve_cnt to cause further reservations to fail
2564 * since it's too late.
2565 */
2566 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2567
2568 prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2569 dt_string_start, dt_string_end);
2570 prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
2571 dt_struct_start, dt_struct_end);
2572 }
2573
2574 #ifdef CONFIG_PPC_MAPLE
2575 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2576 * The values are bad, and it doesn't even have the right number of cells. */
fixup_device_tree_maple(void)2577 static void __init fixup_device_tree_maple(void)
2578 {
2579 phandle isa;
2580 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2581 u32 isa_ranges[6];
2582 char *name;
2583
2584 name = "/ht@0/isa@4";
2585 isa = call_prom("finddevice", 1, 1, ADDR(name));
2586 if (!PHANDLE_VALID(isa)) {
2587 name = "/ht@0/isa@6";
2588 isa = call_prom("finddevice", 1, 1, ADDR(name));
2589 rloc = 0x01003000; /* IO space; PCI device = 6 */
2590 }
2591 if (!PHANDLE_VALID(isa))
2592 return;
2593
2594 if (prom_getproplen(isa, "ranges") != 12)
2595 return;
2596 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2597 == PROM_ERROR)
2598 return;
2599
2600 if (isa_ranges[0] != 0x1 ||
2601 isa_ranges[1] != 0xf4000000 ||
2602 isa_ranges[2] != 0x00010000)
2603 return;
2604
2605 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2606
2607 isa_ranges[0] = 0x1;
2608 isa_ranges[1] = 0x0;
2609 isa_ranges[2] = rloc;
2610 isa_ranges[3] = 0x0;
2611 isa_ranges[4] = 0x0;
2612 isa_ranges[5] = 0x00010000;
2613 prom_setprop(isa, name, "ranges",
2614 isa_ranges, sizeof(isa_ranges));
2615 }
2616
2617 #define CPC925_MC_START 0xf8000000
2618 #define CPC925_MC_LENGTH 0x1000000
2619 /* The values for memory-controller don't have right number of cells */
fixup_device_tree_maple_memory_controller(void)2620 static void __init fixup_device_tree_maple_memory_controller(void)
2621 {
2622 phandle mc;
2623 u32 mc_reg[4];
2624 char *name = "/hostbridge@f8000000";
2625 u32 ac, sc;
2626
2627 mc = call_prom("finddevice", 1, 1, ADDR(name));
2628 if (!PHANDLE_VALID(mc))
2629 return;
2630
2631 if (prom_getproplen(mc, "reg") != 8)
2632 return;
2633
2634 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2635 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2636 if ((ac != 2) || (sc != 2))
2637 return;
2638
2639 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2640 return;
2641
2642 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2643 return;
2644
2645 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2646
2647 mc_reg[0] = 0x0;
2648 mc_reg[1] = CPC925_MC_START;
2649 mc_reg[2] = 0x0;
2650 mc_reg[3] = CPC925_MC_LENGTH;
2651 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2652 }
2653 #else
2654 #define fixup_device_tree_maple()
2655 #define fixup_device_tree_maple_memory_controller()
2656 #endif
2657
2658 #ifdef CONFIG_PPC_CHRP
2659 /*
2660 * Pegasos and BriQ lacks the "ranges" property in the isa node
2661 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2662 * Pegasos has the IDE configured in legacy mode, but advertised as native
2663 */
fixup_device_tree_chrp(void)2664 static void __init fixup_device_tree_chrp(void)
2665 {
2666 phandle ph;
2667 u32 prop[6];
2668 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2669 char *name;
2670 int rc;
2671
2672 name = "/pci@80000000/isa@c";
2673 ph = call_prom("finddevice", 1, 1, ADDR(name));
2674 if (!PHANDLE_VALID(ph)) {
2675 name = "/pci@ff500000/isa@6";
2676 ph = call_prom("finddevice", 1, 1, ADDR(name));
2677 rloc = 0x01003000; /* IO space; PCI device = 6 */
2678 }
2679 if (PHANDLE_VALID(ph)) {
2680 rc = prom_getproplen(ph, "ranges");
2681 if (rc == 0 || rc == PROM_ERROR) {
2682 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2683
2684 prop[0] = 0x1;
2685 prop[1] = 0x0;
2686 prop[2] = rloc;
2687 prop[3] = 0x0;
2688 prop[4] = 0x0;
2689 prop[5] = 0x00010000;
2690 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2691 }
2692 }
2693
2694 name = "/pci@80000000/ide@C,1";
2695 ph = call_prom("finddevice", 1, 1, ADDR(name));
2696 if (PHANDLE_VALID(ph)) {
2697 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2698 prop[0] = 14;
2699 prop[1] = 0x0;
2700 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2701 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2702 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2703 if (rc == sizeof(u32)) {
2704 prop[0] &= ~0x5;
2705 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2706 }
2707 }
2708 }
2709 #else
2710 #define fixup_device_tree_chrp()
2711 #endif
2712
2713 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
fixup_device_tree_pmac(void)2714 static void __init fixup_device_tree_pmac(void)
2715 {
2716 phandle u3, i2c, mpic;
2717 u32 u3_rev;
2718 u32 interrupts[2];
2719 u32 parent;
2720
2721 /* Some G5s have a missing interrupt definition, fix it up here */
2722 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2723 if (!PHANDLE_VALID(u3))
2724 return;
2725 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2726 if (!PHANDLE_VALID(i2c))
2727 return;
2728 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2729 if (!PHANDLE_VALID(mpic))
2730 return;
2731
2732 /* check if proper rev of u3 */
2733 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2734 == PROM_ERROR)
2735 return;
2736 if (u3_rev < 0x35 || u3_rev > 0x39)
2737 return;
2738 /* does it need fixup ? */
2739 if (prom_getproplen(i2c, "interrupts") > 0)
2740 return;
2741
2742 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2743
2744 /* interrupt on this revision of u3 is number 0 and level */
2745 interrupts[0] = 0;
2746 interrupts[1] = 1;
2747 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2748 &interrupts, sizeof(interrupts));
2749 parent = (u32)mpic;
2750 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2751 &parent, sizeof(parent));
2752 }
2753 #else
2754 #define fixup_device_tree_pmac()
2755 #endif
2756
2757 #ifdef CONFIG_PPC_EFIKA
2758 /*
2759 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2760 * to talk to the phy. If the phy-handle property is missing, then this
2761 * function is called to add the appropriate nodes and link it to the
2762 * ethernet node.
2763 */
fixup_device_tree_efika_add_phy(void)2764 static void __init fixup_device_tree_efika_add_phy(void)
2765 {
2766 u32 node;
2767 char prop[64];
2768 int rv;
2769
2770 /* Check if /builtin/ethernet exists - bail if it doesn't */
2771 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2772 if (!PHANDLE_VALID(node))
2773 return;
2774
2775 /* Check if the phy-handle property exists - bail if it does */
2776 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2777 if (!rv)
2778 return;
2779
2780 /*
2781 * At this point the ethernet device doesn't have a phy described.
2782 * Now we need to add the missing phy node and linkage
2783 */
2784
2785 /* Check for an MDIO bus node - if missing then create one */
2786 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2787 if (!PHANDLE_VALID(node)) {
2788 prom_printf("Adding Ethernet MDIO node\n");
2789 call_prom("interpret", 1, 1,
2790 " s\" /builtin\" find-device"
2791 " new-device"
2792 " 1 encode-int s\" #address-cells\" property"
2793 " 0 encode-int s\" #size-cells\" property"
2794 " s\" mdio\" device-name"
2795 " s\" fsl,mpc5200b-mdio\" encode-string"
2796 " s\" compatible\" property"
2797 " 0xf0003000 0x400 reg"
2798 " 0x2 encode-int"
2799 " 0x5 encode-int encode+"
2800 " 0x3 encode-int encode+"
2801 " s\" interrupts\" property"
2802 " finish-device");
2803 };
2804
2805 /* Check for a PHY device node - if missing then create one and
2806 * give it's phandle to the ethernet node */
2807 node = call_prom("finddevice", 1, 1,
2808 ADDR("/builtin/mdio/ethernet-phy"));
2809 if (!PHANDLE_VALID(node)) {
2810 prom_printf("Adding Ethernet PHY node\n");
2811 call_prom("interpret", 1, 1,
2812 " s\" /builtin/mdio\" find-device"
2813 " new-device"
2814 " s\" ethernet-phy\" device-name"
2815 " 0x10 encode-int s\" reg\" property"
2816 " my-self"
2817 " ihandle>phandle"
2818 " finish-device"
2819 " s\" /builtin/ethernet\" find-device"
2820 " encode-int"
2821 " s\" phy-handle\" property"
2822 " device-end");
2823 }
2824 }
2825
fixup_device_tree_efika(void)2826 static void __init fixup_device_tree_efika(void)
2827 {
2828 int sound_irq[3] = { 2, 2, 0 };
2829 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2830 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2831 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2832 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2833 u32 node;
2834 char prop[64];
2835 int rv, len;
2836
2837 /* Check if we're really running on a EFIKA */
2838 node = call_prom("finddevice", 1, 1, ADDR("/"));
2839 if (!PHANDLE_VALID(node))
2840 return;
2841
2842 rv = prom_getprop(node, "model", prop, sizeof(prop));
2843 if (rv == PROM_ERROR)
2844 return;
2845 if (strcmp(prop, "EFIKA5K2"))
2846 return;
2847
2848 prom_printf("Applying EFIKA device tree fixups\n");
2849
2850 /* Claiming to be 'chrp' is death */
2851 node = call_prom("finddevice", 1, 1, ADDR("/"));
2852 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2853 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2854 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2855
2856 /* CODEGEN,description is exposed in /proc/cpuinfo so
2857 fix that too */
2858 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2859 if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2860 prom_setprop(node, "/", "CODEGEN,description",
2861 "Efika 5200B PowerPC System",
2862 sizeof("Efika 5200B PowerPC System"));
2863
2864 /* Fixup bestcomm interrupts property */
2865 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2866 if (PHANDLE_VALID(node)) {
2867 len = prom_getproplen(node, "interrupts");
2868 if (len == 12) {
2869 prom_printf("Fixing bestcomm interrupts property\n");
2870 prom_setprop(node, "/builtin/bestcom", "interrupts",
2871 bcomm_irq, sizeof(bcomm_irq));
2872 }
2873 }
2874
2875 /* Fixup sound interrupts property */
2876 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2877 if (PHANDLE_VALID(node)) {
2878 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2879 if (rv == PROM_ERROR) {
2880 prom_printf("Adding sound interrupts property\n");
2881 prom_setprop(node, "/builtin/sound", "interrupts",
2882 sound_irq, sizeof(sound_irq));
2883 }
2884 }
2885
2886 /* Make sure ethernet phy-handle property exists */
2887 fixup_device_tree_efika_add_phy();
2888 }
2889 #else
2890 #define fixup_device_tree_efika()
2891 #endif
2892
2893 #ifdef CONFIG_PPC_PASEMI_NEMO
2894 /*
2895 * CFE supplied on Nemo is broken in several ways, biggest
2896 * problem is that it reassigns ISA interrupts to unused mpic ints.
2897 * Add an interrupt-controller property for the io-bridge to use
2898 * and correct the ints so we can attach them to an irq_domain
2899 */
fixup_device_tree_pasemi(void)2900 static void __init fixup_device_tree_pasemi(void)
2901 {
2902 u32 interrupts[2], parent, rval, val = 0;
2903 char *name, *pci_name;
2904 phandle iob, node;
2905
2906 /* Find the root pci node */
2907 name = "/pxp@0,e0000000";
2908 iob = call_prom("finddevice", 1, 1, ADDR(name));
2909 if (!PHANDLE_VALID(iob))
2910 return;
2911
2912 /* check if interrupt-controller node set yet */
2913 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
2914 return;
2915
2916 prom_printf("adding interrupt-controller property for SB600...\n");
2917
2918 prom_setprop(iob, name, "interrupt-controller", &val, 0);
2919
2920 pci_name = "/pxp@0,e0000000/pci@11";
2921 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
2922 parent = ADDR(iob);
2923
2924 for( ; prom_next_node(&node); ) {
2925 /* scan each node for one with an interrupt */
2926 if (!PHANDLE_VALID(node))
2927 continue;
2928
2929 rval = prom_getproplen(node, "interrupts");
2930 if (rval == 0 || rval == PROM_ERROR)
2931 continue;
2932
2933 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
2934 if ((interrupts[0] < 212) || (interrupts[0] > 222))
2935 continue;
2936
2937 /* found a node, update both interrupts and interrupt-parent */
2938 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
2939 interrupts[0] -= 203;
2940 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
2941 interrupts[0] -= 213;
2942 if (interrupts[0] == 221)
2943 interrupts[0] = 14;
2944 if (interrupts[0] == 222)
2945 interrupts[0] = 8;
2946
2947 prom_setprop(node, pci_name, "interrupts", interrupts,
2948 sizeof(interrupts));
2949 prom_setprop(node, pci_name, "interrupt-parent", &parent,
2950 sizeof(parent));
2951 }
2952
2953 /*
2954 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
2955 * so that generic isa-bridge code can add the SB600 and its on-board
2956 * peripherals.
2957 */
2958 name = "/pxp@0,e0000000/io-bridge@0";
2959 iob = call_prom("finddevice", 1, 1, ADDR(name));
2960 if (!PHANDLE_VALID(iob))
2961 return;
2962
2963 /* device_type is already set, just change it. */
2964
2965 prom_printf("Changing device_type of SB600 node...\n");
2966
2967 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
2968 }
2969 #else /* !CONFIG_PPC_PASEMI_NEMO */
fixup_device_tree_pasemi(void)2970 static inline void fixup_device_tree_pasemi(void) { }
2971 #endif
2972
fixup_device_tree(void)2973 static void __init fixup_device_tree(void)
2974 {
2975 fixup_device_tree_maple();
2976 fixup_device_tree_maple_memory_controller();
2977 fixup_device_tree_chrp();
2978 fixup_device_tree_pmac();
2979 fixup_device_tree_efika();
2980 fixup_device_tree_pasemi();
2981 }
2982
prom_find_boot_cpu(void)2983 static void __init prom_find_boot_cpu(void)
2984 {
2985 __be32 rval;
2986 ihandle prom_cpu;
2987 phandle cpu_pkg;
2988
2989 rval = 0;
2990 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2991 return;
2992 prom_cpu = be32_to_cpu(rval);
2993
2994 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2995
2996 if (!PHANDLE_VALID(cpu_pkg))
2997 return;
2998
2999 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3000 prom.cpu = be32_to_cpu(rval);
3001
3002 prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3003 }
3004
prom_check_initrd(unsigned long r3,unsigned long r4)3005 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3006 {
3007 #ifdef CONFIG_BLK_DEV_INITRD
3008 if (r3 && r4 && r4 != 0xdeadbeef) {
3009 __be64 val;
3010
3011 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3012 prom_initrd_end = prom_initrd_start + r4;
3013
3014 val = cpu_to_be64(prom_initrd_start);
3015 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3016 &val, sizeof(val));
3017 val = cpu_to_be64(prom_initrd_end);
3018 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3019 &val, sizeof(val));
3020
3021 reserve_mem(prom_initrd_start,
3022 prom_initrd_end - prom_initrd_start);
3023
3024 prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3025 prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3026 }
3027 #endif /* CONFIG_BLK_DEV_INITRD */
3028 }
3029
3030 #ifdef CONFIG_PPC64
3031 #ifdef CONFIG_RELOCATABLE
reloc_toc(void)3032 static void reloc_toc(void)
3033 {
3034 }
3035
unreloc_toc(void)3036 static void unreloc_toc(void)
3037 {
3038 }
3039 #else
__reloc_toc(unsigned long offset,unsigned long nr_entries)3040 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
3041 {
3042 unsigned long i;
3043 unsigned long *toc_entry;
3044
3045 /* Get the start of the TOC by using r2 directly. */
3046 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
3047
3048 for (i = 0; i < nr_entries; i++) {
3049 *toc_entry = *toc_entry + offset;
3050 toc_entry++;
3051 }
3052 }
3053
reloc_toc(void)3054 static void reloc_toc(void)
3055 {
3056 unsigned long offset = reloc_offset();
3057 unsigned long nr_entries =
3058 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3059
3060 __reloc_toc(offset, nr_entries);
3061
3062 mb();
3063 }
3064
unreloc_toc(void)3065 static void unreloc_toc(void)
3066 {
3067 unsigned long offset = reloc_offset();
3068 unsigned long nr_entries =
3069 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3070
3071 mb();
3072
3073 __reloc_toc(-offset, nr_entries);
3074 }
3075 #endif
3076 #endif
3077
3078 /*
3079 * We enter here early on, when the Open Firmware prom is still
3080 * handling exceptions and the MMU hash table for us.
3081 */
3082
prom_init(unsigned long r3,unsigned long r4,unsigned long pp,unsigned long r6,unsigned long r7,unsigned long kbase)3083 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3084 unsigned long pp,
3085 unsigned long r6, unsigned long r7,
3086 unsigned long kbase)
3087 {
3088 unsigned long hdr;
3089
3090 #ifdef CONFIG_PPC32
3091 unsigned long offset = reloc_offset();
3092 reloc_got2(offset);
3093 #else
3094 reloc_toc();
3095 #endif
3096
3097 /*
3098 * First zero the BSS
3099 */
3100 memset(&__bss_start, 0, __bss_stop - __bss_start);
3101
3102 /*
3103 * Init interface to Open Firmware, get some node references,
3104 * like /chosen
3105 */
3106 prom_init_client_services(pp);
3107
3108 /*
3109 * See if this OF is old enough that we need to do explicit maps
3110 * and other workarounds
3111 */
3112 prom_find_mmu();
3113
3114 /*
3115 * Init prom stdout device
3116 */
3117 prom_init_stdout();
3118
3119 prom_printf("Preparing to boot %s", linux_banner);
3120
3121 /*
3122 * Get default machine type. At this point, we do not differentiate
3123 * between pSeries SMP and pSeries LPAR
3124 */
3125 of_platform = prom_find_machine_type();
3126 prom_printf("Detected machine type: %x\n", of_platform);
3127
3128 #ifndef CONFIG_NONSTATIC_KERNEL
3129 /* Bail if this is a kdump kernel. */
3130 if (PHYSICAL_START > 0)
3131 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3132 #endif
3133
3134 /*
3135 * Check for an initrd
3136 */
3137 prom_check_initrd(r3, r4);
3138
3139 /*
3140 * Do early parsing of command line
3141 */
3142 early_cmdline_parse();
3143
3144 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
3145 /*
3146 * On pSeries, inform the firmware about our capabilities
3147 */
3148 if (of_platform == PLATFORM_PSERIES ||
3149 of_platform == PLATFORM_PSERIES_LPAR)
3150 prom_send_capabilities();
3151 #endif
3152
3153 /*
3154 * Copy the CPU hold code
3155 */
3156 if (of_platform != PLATFORM_POWERMAC)
3157 copy_and_flush(0, kbase, 0x100, 0);
3158
3159 /*
3160 * Initialize memory management within prom_init
3161 */
3162 prom_init_mem();
3163
3164 /*
3165 * Determine which cpu is actually running right _now_
3166 */
3167 prom_find_boot_cpu();
3168
3169 /*
3170 * Initialize display devices
3171 */
3172 prom_check_displays();
3173
3174 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3175 /*
3176 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3177 * that uses the allocator, we need to make sure we get the top of memory
3178 * available for us here...
3179 */
3180 if (of_platform == PLATFORM_PSERIES)
3181 prom_initialize_tce_table();
3182 #endif
3183
3184 /*
3185 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3186 * have a usable RTAS implementation.
3187 */
3188 if (of_platform != PLATFORM_POWERMAC &&
3189 of_platform != PLATFORM_OPAL)
3190 prom_instantiate_rtas();
3191
3192 #ifdef CONFIG_PPC_POWERNV
3193 if (of_platform == PLATFORM_OPAL)
3194 prom_instantiate_opal();
3195 #endif /* CONFIG_PPC_POWERNV */
3196
3197 #ifdef CONFIG_PPC64
3198 /* instantiate sml */
3199 prom_instantiate_sml();
3200 #endif
3201
3202 /*
3203 * On non-powermacs, put all CPUs in spin-loops.
3204 *
3205 * PowerMacs use a different mechanism to spin CPUs
3206 *
3207 * (This must be done after instanciating RTAS)
3208 */
3209 if (of_platform != PLATFORM_POWERMAC &&
3210 of_platform != PLATFORM_OPAL)
3211 prom_hold_cpus();
3212
3213 /*
3214 * Fill in some infos for use by the kernel later on
3215 */
3216 if (prom_memory_limit) {
3217 __be64 val = cpu_to_be64(prom_memory_limit);
3218 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3219 &val, sizeof(val));
3220 }
3221 #ifdef CONFIG_PPC64
3222 if (prom_iommu_off)
3223 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3224 NULL, 0);
3225
3226 if (prom_iommu_force_on)
3227 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3228 NULL, 0);
3229
3230 if (prom_tce_alloc_start) {
3231 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3232 &prom_tce_alloc_start,
3233 sizeof(prom_tce_alloc_start));
3234 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3235 &prom_tce_alloc_end,
3236 sizeof(prom_tce_alloc_end));
3237 }
3238 #endif
3239
3240 /*
3241 * Fixup any known bugs in the device-tree
3242 */
3243 fixup_device_tree();
3244
3245 /*
3246 * Now finally create the flattened device-tree
3247 */
3248 prom_printf("copying OF device tree...\n");
3249 flatten_device_tree();
3250
3251 /*
3252 * in case stdin is USB and still active on IBM machines...
3253 * Unfortunately quiesce crashes on some powermacs if we have
3254 * closed stdin already (in particular the powerbook 101). It
3255 * appears that the OPAL version of OFW doesn't like it either.
3256 */
3257 if (of_platform != PLATFORM_POWERMAC &&
3258 of_platform != PLATFORM_OPAL)
3259 prom_close_stdin();
3260
3261 /*
3262 * Call OF "quiesce" method to shut down pending DMA's from
3263 * devices etc...
3264 */
3265 prom_printf("Quiescing Open Firmware ...\n");
3266 call_prom("quiesce", 0, 0);
3267
3268 /*
3269 * And finally, call the kernel passing it the flattened device
3270 * tree and NULL as r5, thus triggering the new entry point which
3271 * is common to us and kexec
3272 */
3273 hdr = dt_header_start;
3274
3275 /* Don't print anything after quiesce under OPAL, it crashes OFW */
3276 if (of_platform != PLATFORM_OPAL) {
3277 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3278 prom_debug("->dt_header_start=0x%lx\n", hdr);
3279 }
3280
3281 #ifdef CONFIG_PPC32
3282 reloc_got2(-offset);
3283 #else
3284 unreloc_toc();
3285 #endif
3286
3287 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3288 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3289 __start(hdr, kbase, 0, 0, 0,
3290 prom_opal_base, prom_opal_entry);
3291 #else
3292 __start(hdr, kbase, 0, 0, 0, 0, 0);
3293 #endif
3294
3295 return 0;
3296 }
3297