• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/sparc64/kernel/setup.c
4  *
5  *  Copyright (C) 1995,1996  David S. Miller (davem@caip.rutgers.edu)
6  *  Copyright (C) 1997       Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <asm/smp.h>
17 #include <linux/user.h>
18 #include <linux/screen_info.h>
19 #include <linux/delay.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/syscalls.h>
23 #include <linux/kdev_t.h>
24 #include <linux/major.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/inet.h>
28 #include <linux/console.h>
29 #include <linux/root_dev.h>
30 #include <linux/interrupt.h>
31 #include <linux/cpu.h>
32 #include <linux/initrd.h>
33 #include <linux/module.h>
34 #include <linux/start_kernel.h>
35 #include <linux/memblock.h>
36 #include <uapi/linux/mount.h>
37 
38 #include <asm/io.h>
39 #include <asm/processor.h>
40 #include <asm/oplib.h>
41 #include <asm/page.h>
42 #include <asm/pgtable.h>
43 #include <asm/idprom.h>
44 #include <asm/head.h>
45 #include <asm/starfire.h>
46 #include <asm/mmu_context.h>
47 #include <asm/timer.h>
48 #include <asm/sections.h>
49 #include <asm/setup.h>
50 #include <asm/mmu.h>
51 #include <asm/ns87303.h>
52 #include <asm/btext.h>
53 #include <asm/elf.h>
54 #include <asm/mdesc.h>
55 #include <asm/cacheflush.h>
56 #include <asm/dma.h>
57 #include <asm/irq.h>
58 
59 #ifdef CONFIG_IP_PNP
60 #include <net/ipconfig.h>
61 #endif
62 
63 #include "entry.h"
64 #include "kernel.h"
65 
66 /* Used to synchronize accesses to NatSemi SUPER I/O chip configure
67  * operations in asm/ns87303.h
68  */
69 DEFINE_SPINLOCK(ns87303_lock);
70 EXPORT_SYMBOL(ns87303_lock);
71 
72 struct screen_info screen_info = {
73 	0, 0,			/* orig-x, orig-y */
74 	0,			/* unused */
75 	0,			/* orig-video-page */
76 	0,			/* orig-video-mode */
77 	128,			/* orig-video-cols */
78 	0, 0, 0,		/* unused, ega_bx, unused */
79 	54,			/* orig-video-lines */
80 	0,                      /* orig-video-isVGA */
81 	16                      /* orig-video-points */
82 };
83 
84 static void
prom_console_write(struct console * con,const char * s,unsigned int n)85 prom_console_write(struct console *con, const char *s, unsigned int n)
86 {
87 	prom_write(s, n);
88 }
89 
90 /* Exported for mm/init.c:paging_init. */
91 unsigned long cmdline_memory_size = 0;
92 
93 static struct console prom_early_console = {
94 	.name =		"earlyprom",
95 	.write =	prom_console_write,
96 	.flags =	CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
97 	.index =	-1,
98 };
99 
100 /*
101  * Process kernel command line switches that are specific to the
102  * SPARC or that require special low-level processing.
103  */
process_switch(char c)104 static void __init process_switch(char c)
105 {
106 	switch (c) {
107 	case 'd':
108 	case 's':
109 		break;
110 	case 'h':
111 		prom_printf("boot_flags_init: Halt!\n");
112 		prom_halt();
113 		break;
114 	case 'p':
115 		prom_early_console.flags &= ~CON_BOOT;
116 		break;
117 	case 'P':
118 		/* Force UltraSPARC-III P-Cache on. */
119 		if (tlb_type != cheetah) {
120 			printk("BOOT: Ignoring P-Cache force option.\n");
121 			break;
122 		}
123 		cheetah_pcache_forced_on = 1;
124 		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
125 		cheetah_enable_pcache();
126 		break;
127 
128 	default:
129 		printk("Unknown boot switch (-%c)\n", c);
130 		break;
131 	}
132 }
133 
boot_flags_init(char * commands)134 static void __init boot_flags_init(char *commands)
135 {
136 	while (*commands) {
137 		/* Move to the start of the next "argument". */
138 		while (*commands == ' ')
139 			commands++;
140 
141 		/* Process any command switches, otherwise skip it. */
142 		if (*commands == '\0')
143 			break;
144 		if (*commands == '-') {
145 			commands++;
146 			while (*commands && *commands != ' ')
147 				process_switch(*commands++);
148 			continue;
149 		}
150 		if (!strncmp(commands, "mem=", 4))
151 			cmdline_memory_size = memparse(commands + 4, &commands);
152 
153 		while (*commands && *commands != ' ')
154 			commands++;
155 	}
156 }
157 
158 extern unsigned short root_flags;
159 extern unsigned short root_dev;
160 extern unsigned short ram_flags;
161 #define RAMDISK_IMAGE_START_MASK	0x07FF
162 #define RAMDISK_PROMPT_FLAG		0x8000
163 #define RAMDISK_LOAD_FLAG		0x4000
164 
165 extern int root_mountflags;
166 
167 char reboot_command[COMMAND_LINE_SIZE];
168 
169 static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
170 
per_cpu_patch(void)171 static void __init per_cpu_patch(void)
172 {
173 	struct cpuid_patch_entry *p;
174 	unsigned long ver;
175 	int is_jbus;
176 
177 	if (tlb_type == spitfire && !this_is_starfire)
178 		return;
179 
180 	is_jbus = 0;
181 	if (tlb_type != hypervisor) {
182 		__asm__ ("rdpr %%ver, %0" : "=r" (ver));
183 		is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
184 			   (ver >> 32UL) == __SERRANO_ID);
185 	}
186 
187 	p = &__cpuid_patch;
188 	while (p < &__cpuid_patch_end) {
189 		unsigned long addr = p->addr;
190 		unsigned int *insns;
191 
192 		switch (tlb_type) {
193 		case spitfire:
194 			insns = &p->starfire[0];
195 			break;
196 		case cheetah:
197 		case cheetah_plus:
198 			if (is_jbus)
199 				insns = &p->cheetah_jbus[0];
200 			else
201 				insns = &p->cheetah_safari[0];
202 			break;
203 		case hypervisor:
204 			insns = &p->sun4v[0];
205 			break;
206 		default:
207 			prom_printf("Unknown cpu type, halting.\n");
208 			prom_halt();
209 		}
210 
211 		*(unsigned int *) (addr +  0) = insns[0];
212 		wmb();
213 		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
214 
215 		*(unsigned int *) (addr +  4) = insns[1];
216 		wmb();
217 		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
218 
219 		*(unsigned int *) (addr +  8) = insns[2];
220 		wmb();
221 		__asm__ __volatile__("flush	%0" : : "r" (addr +  8));
222 
223 		*(unsigned int *) (addr + 12) = insns[3];
224 		wmb();
225 		__asm__ __volatile__("flush	%0" : : "r" (addr + 12));
226 
227 		p++;
228 	}
229 }
230 
sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry * start,struct sun4v_1insn_patch_entry * end)231 void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
232 			     struct sun4v_1insn_patch_entry *end)
233 {
234 	while (start < end) {
235 		unsigned long addr = start->addr;
236 
237 		*(unsigned int *) (addr +  0) = start->insn;
238 		wmb();
239 		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
240 
241 		start++;
242 	}
243 }
244 
sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry * start,struct sun4v_2insn_patch_entry * end)245 void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
246 			     struct sun4v_2insn_patch_entry *end)
247 {
248 	while (start < end) {
249 		unsigned long addr = start->addr;
250 
251 		*(unsigned int *) (addr +  0) = start->insns[0];
252 		wmb();
253 		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
254 
255 		*(unsigned int *) (addr +  4) = start->insns[1];
256 		wmb();
257 		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
258 
259 		start++;
260 	}
261 }
262 
sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry * start,struct sun4v_2insn_patch_entry * end)263 void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
264 			     struct sun4v_2insn_patch_entry *end)
265 {
266 	while (start < end) {
267 		unsigned long addr = start->addr;
268 
269 		*(unsigned int *) (addr +  0) = start->insns[0];
270 		wmb();
271 		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
272 
273 		*(unsigned int *) (addr +  4) = start->insns[1];
274 		wmb();
275 		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
276 
277 		start++;
278 	}
279 }
280 
sun4v_patch(void)281 static void __init sun4v_patch(void)
282 {
283 	extern void sun4v_hvapi_init(void);
284 
285 	if (tlb_type != hypervisor)
286 		return;
287 
288 	sun4v_patch_1insn_range(&__sun4v_1insn_patch,
289 				&__sun4v_1insn_patch_end);
290 
291 	sun4v_patch_2insn_range(&__sun4v_2insn_patch,
292 				&__sun4v_2insn_patch_end);
293 
294 	switch (sun4v_chip_type) {
295 	case SUN4V_CHIP_SPARC_M7:
296 	case SUN4V_CHIP_SPARC_M8:
297 	case SUN4V_CHIP_SPARC_SN:
298 		sun4v_patch_1insn_range(&__sun_m7_1insn_patch,
299 					&__sun_m7_1insn_patch_end);
300 		sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
301 					 &__sun_m7_2insn_patch_end);
302 		break;
303 	default:
304 		break;
305 	}
306 
307 	if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) {
308 		sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch,
309 					&__fast_win_ctrl_1insn_patch_end);
310 	}
311 
312 	sun4v_hvapi_init();
313 }
314 
popc_patch(void)315 static void __init popc_patch(void)
316 {
317 	struct popc_3insn_patch_entry *p3;
318 	struct popc_6insn_patch_entry *p6;
319 
320 	p3 = &__popc_3insn_patch;
321 	while (p3 < &__popc_3insn_patch_end) {
322 		unsigned long i, addr = p3->addr;
323 
324 		for (i = 0; i < 3; i++) {
325 			*(unsigned int *) (addr +  (i * 4)) = p3->insns[i];
326 			wmb();
327 			__asm__ __volatile__("flush	%0"
328 					     : : "r" (addr +  (i * 4)));
329 		}
330 
331 		p3++;
332 	}
333 
334 	p6 = &__popc_6insn_patch;
335 	while (p6 < &__popc_6insn_patch_end) {
336 		unsigned long i, addr = p6->addr;
337 
338 		for (i = 0; i < 6; i++) {
339 			*(unsigned int *) (addr +  (i * 4)) = p6->insns[i];
340 			wmb();
341 			__asm__ __volatile__("flush	%0"
342 					     : : "r" (addr +  (i * 4)));
343 		}
344 
345 		p6++;
346 	}
347 }
348 
pause_patch(void)349 static void __init pause_patch(void)
350 {
351 	struct pause_patch_entry *p;
352 
353 	p = &__pause_3insn_patch;
354 	while (p < &__pause_3insn_patch_end) {
355 		unsigned long i, addr = p->addr;
356 
357 		for (i = 0; i < 3; i++) {
358 			*(unsigned int *) (addr +  (i * 4)) = p->insns[i];
359 			wmb();
360 			__asm__ __volatile__("flush	%0"
361 					     : : "r" (addr +  (i * 4)));
362 		}
363 
364 		p++;
365 	}
366 }
367 
start_early_boot(void)368 void __init start_early_boot(void)
369 {
370 	int cpu;
371 
372 	check_if_starfire();
373 	per_cpu_patch();
374 	sun4v_patch();
375 	smp_init_cpu_poke();
376 
377 	cpu = hard_smp_processor_id();
378 	if (cpu >= NR_CPUS) {
379 		prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
380 			    cpu, NR_CPUS);
381 		prom_halt();
382 	}
383 	current_thread_info()->cpu = cpu;
384 
385 	time_init_early();
386 	prom_init_report();
387 	start_kernel();
388 }
389 
390 /* On Ultra, we support all of the v8 capabilities. */
391 unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
392 				   HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
393 				   HWCAP_SPARC_V9);
394 EXPORT_SYMBOL(sparc64_elf_hwcap);
395 
396 static const char *hwcaps[] = {
397 	"flush", "stbar", "swap", "muldiv", "v9",
398 	"ultra3", "blkinit", "n2",
399 
400 	/* These strings are as they appear in the machine description
401 	 * 'hwcap-list' property for cpu nodes.
402 	 */
403 	"mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
404 	"ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
405 	"ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
406 	"adp",
407 };
408 
409 static const char *crypto_hwcaps[] = {
410 	"aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256",
411 	"sha512", "mpmul", "montmul", "montsqr", "crc32c",
412 };
413 
cpucap_info(struct seq_file * m)414 void cpucap_info(struct seq_file *m)
415 {
416 	unsigned long caps = sparc64_elf_hwcap;
417 	int i, printed = 0;
418 
419 	seq_puts(m, "cpucaps\t\t: ");
420 	for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
421 		unsigned long bit = 1UL << i;
422 		if (hwcaps[i] && (caps & bit)) {
423 			seq_printf(m, "%s%s",
424 				   printed ? "," : "", hwcaps[i]);
425 			printed++;
426 		}
427 	}
428 	if (caps & HWCAP_SPARC_CRYPTO) {
429 		unsigned long cfr;
430 
431 		__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
432 		for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
433 			unsigned long bit = 1UL << i;
434 			if (cfr & bit) {
435 				seq_printf(m, "%s%s",
436 					   printed ? "," : "", crypto_hwcaps[i]);
437 				printed++;
438 			}
439 		}
440 	}
441 	seq_putc(m, '\n');
442 }
443 
report_one_hwcap(int * printed,const char * name)444 static void __init report_one_hwcap(int *printed, const char *name)
445 {
446 	if ((*printed) == 0)
447 		printk(KERN_INFO "CPU CAPS: [");
448 	printk(KERN_CONT "%s%s",
449 	       (*printed) ? "," : "", name);
450 	if (++(*printed) == 8) {
451 		printk(KERN_CONT "]\n");
452 		*printed = 0;
453 	}
454 }
455 
report_crypto_hwcaps(int * printed)456 static void __init report_crypto_hwcaps(int *printed)
457 {
458 	unsigned long cfr;
459 	int i;
460 
461 	__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
462 
463 	for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
464 		unsigned long bit = 1UL << i;
465 		if (cfr & bit)
466 			report_one_hwcap(printed, crypto_hwcaps[i]);
467 	}
468 }
469 
report_hwcaps(unsigned long caps)470 static void __init report_hwcaps(unsigned long caps)
471 {
472 	int i, printed = 0;
473 
474 	for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
475 		unsigned long bit = 1UL << i;
476 		if (hwcaps[i] && (caps & bit))
477 			report_one_hwcap(&printed, hwcaps[i]);
478 	}
479 	if (caps & HWCAP_SPARC_CRYPTO)
480 		report_crypto_hwcaps(&printed);
481 	if (printed != 0)
482 		printk(KERN_CONT "]\n");
483 }
484 
mdesc_cpu_hwcap_list(void)485 static unsigned long __init mdesc_cpu_hwcap_list(void)
486 {
487 	struct mdesc_handle *hp;
488 	unsigned long caps = 0;
489 	const char *prop;
490 	int len;
491 	u64 pn;
492 
493 	hp = mdesc_grab();
494 	if (!hp)
495 		return 0;
496 
497 	pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
498 	if (pn == MDESC_NODE_NULL)
499 		goto out;
500 
501 	prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
502 	if (!prop)
503 		goto out;
504 
505 	while (len) {
506 		int i, plen;
507 
508 		for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
509 			unsigned long bit = 1UL << i;
510 
511 			if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
512 				caps |= bit;
513 				break;
514 			}
515 		}
516 		for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
517 			if (!strcmp(prop, crypto_hwcaps[i]))
518 				caps |= HWCAP_SPARC_CRYPTO;
519 		}
520 
521 		plen = strlen(prop) + 1;
522 		prop += plen;
523 		len -= plen;
524 	}
525 
526 out:
527 	mdesc_release(hp);
528 	return caps;
529 }
530 
531 /* This yields a mask that user programs can use to figure out what
532  * instruction set this cpu supports.
533  */
init_sparc64_elf_hwcap(void)534 static void __init init_sparc64_elf_hwcap(void)
535 {
536 	unsigned long cap = sparc64_elf_hwcap;
537 	unsigned long mdesc_caps;
538 
539 	if (tlb_type == cheetah || tlb_type == cheetah_plus)
540 		cap |= HWCAP_SPARC_ULTRA3;
541 	else if (tlb_type == hypervisor) {
542 		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
543 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
544 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
545 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
546 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
547 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
548 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
549 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
550 		    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
551 		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
552 			cap |= HWCAP_SPARC_BLKINIT;
553 		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
554 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
555 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
556 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
557 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
558 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
559 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
560 		    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
561 		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
562 			cap |= HWCAP_SPARC_N2;
563 	}
564 
565 	cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
566 
567 	mdesc_caps = mdesc_cpu_hwcap_list();
568 	if (!mdesc_caps) {
569 		if (tlb_type == spitfire)
570 			cap |= AV_SPARC_VIS;
571 		if (tlb_type == cheetah || tlb_type == cheetah_plus)
572 			cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
573 		if (tlb_type == cheetah_plus) {
574 			unsigned long impl, ver;
575 
576 			__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
577 			impl = ((ver >> 32) & 0xffff);
578 			if (impl == PANTHER_IMPL)
579 				cap |= AV_SPARC_POPC;
580 		}
581 		if (tlb_type == hypervisor) {
582 			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
583 				cap |= AV_SPARC_ASI_BLK_INIT;
584 			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
585 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
586 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
587 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
588 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
589 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
590 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
591 			    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
592 			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
593 				cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
594 					AV_SPARC_ASI_BLK_INIT |
595 					AV_SPARC_POPC);
596 			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
597 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
598 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
599 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
600 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
601 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
602 			    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
603 			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
604 				cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
605 					AV_SPARC_FMAF);
606 		}
607 	}
608 	sparc64_elf_hwcap = cap | mdesc_caps;
609 
610 	report_hwcaps(sparc64_elf_hwcap);
611 
612 	if (sparc64_elf_hwcap & AV_SPARC_POPC)
613 		popc_patch();
614 	if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
615 		pause_patch();
616 }
617 
alloc_irqstack_bootmem(void)618 void __init alloc_irqstack_bootmem(void)
619 {
620 	unsigned int i, node;
621 
622 	for_each_possible_cpu(i) {
623 		node = cpu_to_node(i);
624 
625 		softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
626 						       THREAD_SIZE, node);
627 		if (!softirq_stack[i])
628 			panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
629 			      __func__, THREAD_SIZE, THREAD_SIZE, node);
630 		hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
631 						       THREAD_SIZE, node);
632 		if (!hardirq_stack[i])
633 			panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
634 			      __func__, THREAD_SIZE, THREAD_SIZE, node);
635 	}
636 }
637 
setup_arch(char ** cmdline_p)638 void __init setup_arch(char **cmdline_p)
639 {
640 	/* Initialize PROM console and command line. */
641 	*cmdline_p = prom_getbootargs();
642 	strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
643 	parse_early_param();
644 
645 	boot_flags_init(*cmdline_p);
646 #ifdef CONFIG_EARLYFB
647 	if (btext_find_display())
648 #endif
649 		register_console(&prom_early_console);
650 
651 	if (tlb_type == hypervisor)
652 		pr_info("ARCH: SUN4V\n");
653 	else
654 		pr_info("ARCH: SUN4U\n");
655 
656 #ifdef CONFIG_DUMMY_CONSOLE
657 	conswitchp = &dummy_con;
658 #endif
659 
660 	idprom_init();
661 
662 	if (!root_flags)
663 		root_mountflags &= ~MS_RDONLY;
664 	ROOT_DEV = old_decode_dev(root_dev);
665 #ifdef CONFIG_BLK_DEV_RAM
666 	rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
667 	rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
668 	rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
669 #endif
670 
671 	task_thread_info(&init_task)->kregs = &fake_swapper_regs;
672 
673 #ifdef CONFIG_IP_PNP
674 	if (!ic_set_manually) {
675 		phandle chosen = prom_finddevice("/chosen");
676 		u32 cl, sv, gw;
677 
678 		cl = prom_getintdefault (chosen, "client-ip", 0);
679 		sv = prom_getintdefault (chosen, "server-ip", 0);
680 		gw = prom_getintdefault (chosen, "gateway-ip", 0);
681 		if (cl && sv) {
682 			ic_myaddr = cl;
683 			ic_servaddr = sv;
684 			if (gw)
685 				ic_gateway = gw;
686 #if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
687 			ic_proto_enabled = 0;
688 #endif
689 		}
690 	}
691 #endif
692 
693 	/* Get boot processor trap_block[] setup.  */
694 	init_cur_cpu_trap(current_thread_info());
695 
696 	paging_init();
697 	init_sparc64_elf_hwcap();
698 	smp_fill_in_cpu_possible_map();
699 	/*
700 	 * Once the OF device tree and MDESC have been setup and nr_cpus has
701 	 * been parsed, we know the list of possible cpus.  Therefore we can
702 	 * allocate the IRQ stacks.
703 	 */
704 	alloc_irqstack_bootmem();
705 }
706 
707 extern int stop_a_enabled;
708 
sun_do_break(void)709 void sun_do_break(void)
710 {
711 	if (!stop_a_enabled)
712 		return;
713 
714 	prom_printf("\n");
715 	flush_user_windows();
716 
717 	prom_cmdline();
718 }
719 EXPORT_SYMBOL(sun_do_break);
720 
721 int stop_a_enabled = 1;
722 EXPORT_SYMBOL(stop_a_enabled);
723