• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* This file is subject to the terms and conditions of the GNU General Public
2 * License.  See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/psw.h>
17#include <asm/pdc.h>
18
19#include <asm/assembly.h>
20
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <linux/pgtable.h>
24
25	.level	1.1
26
27	__INITDATA
28ENTRY(boot_args)
29	.word 0 /* arg0 */
30	.word 0 /* arg1 */
31	.word 0 /* arg2 */
32	.word 0 /* arg3 */
33END(boot_args)
34
35	__HEAD
36
37	.align	4
38	.import init_thread_union,data
39	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
40#ifndef CONFIG_64BIT
41        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
42	.import	$global$		/* forward declaration */
43#endif /*!CONFIG_64BIT*/
44ENTRY(parisc_kernel_start)
45	.proc
46	.callinfo
47
48	/* Make sure sr4-sr7 are set to zero for the kernel address space */
49	mtsp	%r0,%sr4
50	mtsp	%r0,%sr5
51	mtsp	%r0,%sr6
52	mtsp	%r0,%sr7
53
54	/* Clear BSS (shouldn't the boot loader do this?) */
55
56	.import __bss_start,data
57	.import __bss_stop,data
58
59	load32		PA(__bss_start),%r3
60	load32		PA(__bss_stop),%r4
61$bss_loop:
62	cmpb,<<,n       %r3,%r4,$bss_loop
63	stw,ma          %r0,4(%r3)
64
65	/* Save away the arguments the boot loader passed in (32 bit args) */
66	load32		PA(boot_args),%r1
67	stw,ma          %arg0,4(%r1)
68	stw,ma          %arg1,4(%r1)
69	stw,ma          %arg2,4(%r1)
70	stw,ma          %arg3,4(%r1)
71
72#if defined(CONFIG_PA20)
73	/* check for 64-bit capable CPU as required by current kernel */
74	ldi		32,%r10
75	mtctl		%r10,%cr11
76	.level 2.0
77	mfctl,w		%cr11,%r10
78	.level 1.1
79	comib,<>,n	0,%r10,$cpu_ok
80
81	load32		PA(msg1),%arg0
82	ldi		msg1_end-msg1,%arg1
83$iodc_panic:
84	copy		%arg0, %r10
85	copy		%arg1, %r11
86	load32		PA(init_stack),%sp
87#define MEM_CONS 0x3A0
88	ldw		MEM_CONS+32(%r0),%arg0	// HPA
89	ldi		ENTRY_IO_COUT,%arg1
90	ldw		MEM_CONS+36(%r0),%arg2	// SPA
91	ldw		MEM_CONS+8(%r0),%arg3	// layers
92	load32		PA(__bss_start),%r1
93	stw		%r1,-52(%sp)		// arg4
94	stw		%r0,-56(%sp)		// arg5
95	stw		%r10,-60(%sp)		// arg6 = ptr to text
96	stw		%r11,-64(%sp)		// arg7 = len
97	stw		%r0,-68(%sp)		// arg8
98	load32		PA(.iodc_panic_ret), %rp
99	ldw		MEM_CONS+40(%r0),%r1	// ENTRY_IODC
100	bv,n		(%r1)
101.iodc_panic_ret:
102	b .				/* wait endless with ... */
103	or		%r10,%r10,%r10	/* qemu idle sleep */
104msg1:	.ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
105msg1_end:
106
107$cpu_ok:
108#endif
109
110	.level	PA_ASM_LEVEL
111
112	/* Initialize startup VM. Just map first 16/32 MB of memory */
113	load32		PA(swapper_pg_dir),%r4
114	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
115	mtctl		%r4,%cr25	/* Initialize user root pointer */
116
117#if CONFIG_PGTABLE_LEVELS == 3
118	/* Set pmd in pgd */
119	load32		PA(pmd0),%r5
120	shrd            %r5,PxD_VALUE_SHIFT,%r3
121	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
122	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
123	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
124#else
125	/* 2-level page table, so pmd == pgd */
126	ldo		ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
127#endif
128
129	/* Fill in pmd with enough pte directories */
130	load32		PA(pg0),%r1
131	SHRREG		%r1,PxD_VALUE_SHIFT,%r3
132	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
133
134	ldi		ASM_PT_INITIAL,%r1
135
1361:
137	stw		%r3,0(%r4)
138	ldo		(PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
139	addib,>		-1,%r1,1b
140#if CONFIG_PGTABLE_LEVELS == 3
141	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
142#else
143	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
144#endif
145
146
147	/* Now initialize the PTEs themselves.  We use RWX for
148	 * everything ... it will get remapped correctly later */
149	ldo		0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
150	load32		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
151	load32		PA(pg0),%r1
152
153$pgt_fill_loop:
154	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
155	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
156	addib,>		-1,%r11,$pgt_fill_loop
157	nop
158
159	/* Load the return address...er...crash 'n burn */
160	copy		%r0,%r2
161
162	/* And the RFI Target address too */
163	load32		start_parisc,%r11
164
165	/* And the initial task pointer */
166	load32		init_thread_union,%r6
167	mtctl           %r6,%cr30
168
169	/* And the stack pointer too */
170	ldo             THREAD_SZ_ALGN(%r6),%sp
171
172#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
173	.import _mcount,data
174	/* initialize mcount FPTR */
175	/* Get the global data pointer */
176	loadgp
177	load32		PA(_mcount), %r10
178	std		%dp,0x18(%r10)
179#endif
180
181#ifdef CONFIG_64BIT
182	/* Get PDCE_PROC for monarch CPU. */
183#define MEM_PDC_LO 0x388
184#define MEM_PDC_HI 0x35C
185	ldw             MEM_PDC_LO(%r0),%r3
186	ldw             MEM_PDC_HI(%r0),%r10
187	depd            %r10, 31, 32, %r3        /* move to upper word */
188#endif
189
190
191#ifdef CONFIG_SMP
192	/* Set the smp rendezvous address into page zero.
193	** It would be safer to do this in init_smp_config() but
194	** it's just way easier to deal with here because
195	** of 64-bit function ptrs and the address is local to this file.
196	*/
197	load32		PA(smp_slave_stext),%r10
198	stw		%r10,0x10(%r0)	/* MEM_RENDEZ */
199	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI - assume addr < 4GB */
200
201	/* FALLTHROUGH */
202	.procend
203
204	/*
205	** Code Common to both Monarch and Slave processors.
206	** Entry:
207	**
208	**  1.1:
209	**    %r11 must contain RFI target address.
210	**    %r25/%r26 args to pass to target function
211	**    %r2  in case rfi target decides it didn't like something
212	**
213	**  2.0w:
214	**    %r3  PDCE_PROC address
215	**    %r11 RFI target address
216	**
217	** Caller must init: SR4-7, %sp, %r10, %cr24/25,
218	*/
219common_stext:
220	.proc
221	.callinfo
222#else
223	/* Clear PDC entry point - we won't use it */
224	stw		%r0,0x10(%r0)	/* MEM_RENDEZ */
225	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
226#endif /*CONFIG_SMP*/
227
228#ifdef CONFIG_64BIT
229	tophys_r1	%sp
230
231	/* Save the rfi target address */
232	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
233	tophys_r1       %r10
234	std             %r11,  TASK_PT_GR11(%r10)
235	/* Switch to wide mode Superdome doesn't support narrow PDC
236	** calls.
237	*/
2381:	mfia            %rp             /* clear upper part of pcoq */
239	ldo             2f-1b(%rp),%rp
240	depdi           0,31,32,%rp
241	bv              (%rp)
242	ssm             PSW_SM_W,%r0
243
244        /* Set Wide mode as the "Default" (eg for traps)
245        ** First trap occurs *right* after (or part of) rfi for slave CPUs.
246        ** Someday, palo might not do this for the Monarch either.
247        */
2482:
249	mfctl		%cr30,%r6		/* PCX-W2 firmware bug */
250
251	ldo             PDC_PSW(%r0),%arg0              /* 21 */
252	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
253	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
254	load32          PA(stext_pdc_ret), %rp
255	bv              (%r3)
256	copy            %r0,%arg3
257
258stext_pdc_ret:
259	mtctl		%r6,%cr30		/* restore task thread info */
260
261	/* restore rfi target address*/
262	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
263	tophys_r1       %r10
264	ldd             TASK_PT_GR11(%r10), %r11
265	tovirt_r1       %sp
266#endif
267
268	/* PARANOID: clear user scratch/user space SR's */
269	mtsp	%r0,%sr0
270	mtsp	%r0,%sr1
271	mtsp	%r0,%sr2
272	mtsp	%r0,%sr3
273
274	/* Initialize Protection Registers */
275	mtctl	%r0,%cr8
276	mtctl	%r0,%cr9
277	mtctl	%r0,%cr12
278	mtctl	%r0,%cr13
279
280	/* Initialize the global data pointer */
281	loadgp
282
283	/* Set up our interrupt table.  HPMCs might not work after this!
284	 *
285	 * We need to install the correct iva for PA1.1 or PA2.0. The
286	 * following short sequence of instructions can determine this
287	 * (without being illegal on a PA1.1 machine).
288	 */
289#ifndef CONFIG_64BIT
290	ldi		32,%r10
291	mtctl		%r10,%cr11
292	.level 2.0
293	mfctl,w		%cr11,%r10
294	.level 1.1
295	comib,<>,n	0,%r10,$is_pa20
296	ldil		L%PA(fault_vector_11),%r10
297	b		$install_iva
298	ldo		R%PA(fault_vector_11)(%r10),%r10
299
300$is_pa20:
301	.level		PA_ASM_LEVEL /* restore 1.1 || 2.0w */
302#endif /*!CONFIG_64BIT*/
303	load32		PA(fault_vector_20),%r10
304
305$install_iva:
306	mtctl		%r10,%cr14
307
308	b		aligned_rfi  /* Prepare to RFI! Man all the cannons! */
309	nop
310
311	.align 128
312aligned_rfi:
313	pcxt_ssm_bug
314
315	copy		%r3, %arg0	/* PDCE_PROC for smp_callin() */
316
317	rsm		PSW_SM_QUIET,%r0	/* off troublesome PSW bits */
318	/* Don't need NOPs, have 8 compliant insn before rfi */
319
320	mtctl		%r0,%cr17	/* Clear IIASQ tail */
321	mtctl		%r0,%cr17	/* Clear IIASQ head */
322
323	/* Load RFI target into PC queue */
324	mtctl		%r11,%cr18	/* IIAOQ head */
325	ldo		4(%r11),%r11
326	mtctl		%r11,%cr18	/* IIAOQ tail */
327
328	load32		KERNEL_PSW,%r10
329	mtctl		%r10,%ipsw
330
331	/* Jump through hyperspace to Virt Mode */
332	rfi
333	nop
334
335	.procend
336
337#ifdef CONFIG_SMP
338
339	.import smp_init_current_idle_task,data
340	.import	smp_callin,code
341
342#ifndef CONFIG_64BIT
343smp_callin_rtn:
344        .proc
345	.callinfo
346	break	1,1		/*  Break if returned from start_secondary */
347	nop
348	nop
349        .procend
350#endif /*!CONFIG_64BIT*/
351
352/***************************************************************************
353* smp_slave_stext is executed by all non-monarch Processors when the Monarch
354* pokes the slave CPUs in smp.c:smp_boot_cpus().
355*
356* Once here, registers values are initialized in order to branch to virtual
357* mode. Once all available/eligible CPUs are in virtual mode, all are
358* released and start out by executing their own idle task.
359*****************************************************************************/
360smp_slave_stext:
361        .proc
362	.callinfo
363
364	/*
365	** Initialize Space registers
366	*/
367	mtsp	   %r0,%sr4
368	mtsp	   %r0,%sr5
369	mtsp	   %r0,%sr6
370	mtsp	   %r0,%sr7
371
372#ifdef CONFIG_64BIT
373	/*
374	 *  Enable Wide mode early, in case the task_struct for the idle
375	 *  task in smp_init_current_idle_task was allocated above 4GB.
376	 */
3771:	mfia            %rp             /* clear upper part of pcoq */
378	ldo             2f-1b(%rp),%rp
379	depdi           0,31,32,%rp
380	bv              (%rp)
381	ssm             PSW_SM_W,%r0
3822:
383#endif
384
385	/*  Initialize the SP - monarch sets up smp_init_current_idle_task */
386	load32		PA(smp_init_current_idle_task),%sp
387	LDREG		0(%sp),%sp	/* load task address */
388	tophys_r1	%sp
389	LDREG		TASK_THREAD_INFO(%sp),%sp
390	mtctl           %sp,%cr30       /* store in cr30 */
391	ldo             THREAD_SZ_ALGN(%sp),%sp
392
393	/* point CPU to kernel page tables */
394	load32		PA(swapper_pg_dir),%r4
395	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
396	mtctl		%r4,%cr25	/* Initialize user root pointer */
397
398#ifdef CONFIG_64BIT
399	/* Setup PDCE_PROC entry */
400	copy            %arg0,%r3
401#else
402	/* Load RFI *return* address in case smp_callin bails */
403	load32		smp_callin_rtn,%r2
404#endif
405
406	/* Load RFI target address.  */
407	load32		smp_callin,%r11
408
409	/* ok...common code can handle the rest */
410	b		common_stext
411	nop
412
413	.procend
414#endif /* CONFIG_SMP */
415
416ENDPROC(parisc_kernel_start)
417
418#ifndef CONFIG_64BIT
419	.section .data..ro_after_init
420
421	.align	4
422	.export	$global$,data
423
424	.type	$global$,@object
425	.size	$global$,4
426$global$:
427	.word 0
428#endif /*!CONFIG_64BIT*/
429