• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * arch/sh/kernel/head_64.S
3 *
4 * Copyright (C) 2000, 2001  Paolo Alberelli
5 * Copyright (C) 2003, 2004  Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License.  See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11
12#include <linux/init.h>
13
14#include <asm/page.h>
15#include <asm/cache.h>
16#include <asm/tlb.h>
17#include <cpu/registers.h>
18#include <cpu/mmu_context.h>
19#include <asm/thread_info.h>
20
21/*
22 * MMU defines: TLB boundaries.
23 */
24
25#define MMUIR_FIRST	ITLB_FIXED
26#define MMUIR_END	ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
27#define MMUIR_STEP	TLB_STEP
28
29#define MMUDR_FIRST	DTLB_FIXED
30#define MMUDR_END	DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
31#define MMUDR_STEP	TLB_STEP
32
33/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
34#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
35#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
36#endif
37
38/*
39 * MMU defines: Fixed TLBs.
40 */
41/* Deal safely with the case where the base of RAM is not 512Mb aligned */
42
43#define ALIGN_512M_MASK (0xffffffffe0000000)
44#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
45#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
46
47#define MMUIR_TEXT_H	(0x0000000000000003 | ALIGNED_EFFECTIVE)
48			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
49
50#define MMUIR_TEXT_L	(0x000000000000009a | ALIGNED_PHYSICAL)
51			/* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
52
53#define MMUDR_CACHED_H	0x0000000000000003 | ALIGNED_EFFECTIVE
54			/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
55#define MMUDR_CACHED_L	0x000000000000015a | ALIGNED_PHYSICAL
56			/* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
57
58#ifdef CONFIG_CACHE_OFF
59#define	ICCR0_INIT_VAL	ICCR0_OFF			/* ICACHE off */
60#else
61#define	ICCR0_INIT_VAL	ICCR0_ON | ICCR0_ICI		/* ICE + ICI */
62#endif
63#define	ICCR1_INIT_VAL	ICCR1_NOLOCK			/* No locking */
64
65#if defined (CONFIG_CACHE_OFF)
66#define	OCCR0_INIT_VAL	OCCR0_OFF			   /* D-cache: off  */
67#elif defined (CONFIG_CACHE_WRITETHROUGH)
68#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WT	   /* D-cache: on,   */
69							   /* WT, invalidate */
70#elif defined (CONFIG_CACHE_WRITEBACK)
71#define	OCCR0_INIT_VAL	OCCR0_ON | OCCR0_OCI | OCCR0_WB	   /* D-cache: on,   */
72							   /* WB, invalidate */
73#else
74#error preprocessor flag CONFIG_CACHE_... not recognized!
75#endif
76
77#define	OCCR1_INIT_VAL	OCCR1_NOLOCK			   /* No locking     */
78
79	.section	.empty_zero_page, "aw"
80	.global empty_zero_page
81
82empty_zero_page:
83	.long	1		/* MOUNT_ROOT_RDONLY */
84	.long	0		/* RAMDISK_FLAGS */
85	.long	0x0200		/* ORIG_ROOT_DEV */
86	.long	1		/* LOADER_TYPE */
87	.long	0x00800000	/* INITRD_START */
88	.long	0x00800000	/* INITRD_SIZE */
89	.long	0
90
91	.text
92	.balign 4096,0,4096
93
94	.section	.data, "aw"
95	.balign	PAGE_SIZE
96
97	.section	.data, "aw"
98	.balign	PAGE_SIZE
99
100	.global mmu_pdtp_cache
101mmu_pdtp_cache:
102	.space PAGE_SIZE, 0
103
104	.global empty_bad_page
105empty_bad_page:
106	.space PAGE_SIZE, 0
107
108	.global empty_bad_pte_table
109empty_bad_pte_table:
110	.space PAGE_SIZE, 0
111
112	.global	fpu_in_use
113fpu_in_use:	.quad	0
114
115
116	__HEAD
117	.balign L1_CACHE_BYTES
118/*
119 * Condition at the entry of __stext:
120 * . Reset state:
121 *   . SR.FD    = 1		(FPU disabled)
122 *   . SR.BL    = 1		(Exceptions disabled)
123 *   . SR.MD    = 1		(Privileged Mode)
124 *   . SR.MMU   = 0		(MMU Disabled)
125 *   . SR.CD    = 0		(CTC User Visible)
126 *   . SR.IMASK = Undefined	(Interrupt Mask)
127 *
128 * Operations supposed to be performed by __stext:
129 * . prevent speculative fetch onto device memory while MMU is off
130 * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
131 * . first, save CPU state and set it to something harmless
132 * . any CPU detection and/or endianness settings (?)
133 * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
134 * . set initial TLB entries for cached and uncached regions
135 *   (no fine granularity paging)
136 * . set initial cache state
137 * . enable MMU and caches
138 * . set CPU to a consistent state
139 *   . registers (including stack pointer and current/KCR0)
140 *   . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
141 *     at this stage. This is all to later Linux initialization steps.
142 *   . initialize FPU
143 * . clear BSS
144 * . jump into start_kernel()
145 * . be prepared to hopeless start_kernel() returns.
146 *
147 */
148	.global _stext
149_stext:
150	/*
151	 * Prevent speculative fetch on device memory due to
152	 * uninitialized target registers.
153	 */
154	ptabs/u	ZERO, tr0
155	ptabs/u	ZERO, tr1
156	ptabs/u	ZERO, tr2
157	ptabs/u	ZERO, tr3
158	ptabs/u	ZERO, tr4
159	ptabs/u	ZERO, tr5
160	ptabs/u	ZERO, tr6
161	ptabs/u	ZERO, tr7
162	synci
163
164	/*
165	 * Read/Set CPU state. After this block:
166	 * r29 = Initial SR
167	 */
168	getcon	SR, r29
169	movi	SR_HARMLESS, r20
170	putcon	r20, SR
171
172	/*
173	 * Initialize EMI/LMI. To Be Done.
174	 */
175
176	/*
177	 * CPU detection and/or endianness settings (?). To Be Done.
178	 * Pure PIC code here, please ! Just save state into r30.
179         * After this block:
180	 * r30 = CPU type/Platform Endianness
181	 */
182
183	/*
184	 * Set initial TLB entries for cached and uncached regions.
185	 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
186	 */
187	/* Clear ITLBs */
188	pta	clear_ITLB, tr1
189	movi	MMUIR_FIRST, r21
190	movi	MMUIR_END, r22
191clear_ITLB:
192	putcfg	r21, 0, ZERO		/* Clear MMUIR[n].PTEH.V */
193	addi	r21, MMUIR_STEP, r21
194        bne	r21, r22, tr1
195
196	/* Clear DTLBs */
197	pta	clear_DTLB, tr1
198	movi	MMUDR_FIRST, r21
199	movi	MMUDR_END, r22
200clear_DTLB:
201	putcfg	r21, 0, ZERO		/* Clear MMUDR[n].PTEH.V */
202	addi	r21, MMUDR_STEP, r21
203        bne	r21, r22, tr1
204
205	/* Map one big (512Mb) page for ITLB */
206	movi	MMUIR_FIRST, r21
207	movi	MMUIR_TEXT_L, r22	/* PTEL first */
208	add.l	r22, r63, r22		/* Sign extend */
209	putcfg	r21, 1, r22		/* Set MMUIR[0].PTEL */
210	movi	MMUIR_TEXT_H, r22	/* PTEH last */
211	add.l	r22, r63, r22		/* Sign extend */
212	putcfg	r21, 0, r22		/* Set MMUIR[0].PTEH */
213
214	/* Map one big CACHED (512Mb) page for DTLB */
215	movi	MMUDR_FIRST, r21
216	movi	MMUDR_CACHED_L, r22	/* PTEL first */
217	add.l	r22, r63, r22		/* Sign extend */
218	putcfg	r21, 1, r22		/* Set MMUDR[0].PTEL */
219	movi	MMUDR_CACHED_H, r22	/* PTEH last */
220	add.l	r22, r63, r22		/* Sign extend */
221	putcfg	r21, 0, r22		/* Set MMUDR[0].PTEH */
222
223	/*
224	 * Setup a DTLB translation for SCIF phys.
225	 */
226	addi    r21, MMUDR_STEP, r21
227	movi    0x0a03, r22	/* SCIF phys */
228	shori   0x0148, r22
229	putcfg  r21, 1, r22	/* PTEL first */
230	movi    0xfa03, r22	/* 0xfa030000, fixed SCIF virt */
231	shori   0x0003, r22
232	putcfg  r21, 0, r22	/* PTEH last */
233
234	/*
235	 * Set cache behaviours.
236	 */
237	/* ICache */
238	movi	ICCR_BASE, r21
239	movi	ICCR0_INIT_VAL, r22
240	movi	ICCR1_INIT_VAL, r23
241	putcfg	r21, ICCR_REG0, r22
242	putcfg	r21, ICCR_REG1, r23
243
244	/* OCache */
245	movi	OCCR_BASE, r21
246	movi	OCCR0_INIT_VAL, r22
247	movi	OCCR1_INIT_VAL, r23
248	putcfg	r21, OCCR_REG0, r22
249	putcfg	r21, OCCR_REG1, r23
250
251
252	/*
253	 * Enable Caches and MMU. Do the first non-PIC jump.
254         * Now head.S global variables, constants and externs
255	 * can be used.
256	 */
257	getcon	SR, r21
258	movi	SR_ENABLE_MMU, r22
259	or	r21, r22, r21
260	putcon	r21, SSR
261	movi	hyperspace, r22
262	ori	r22, 1, r22	    /* Make it SHmedia, not required but..*/
263	putcon	r22, SPC
264	synco
265	rte			    /* And now go into the hyperspace ... */
266hyperspace:			    /* ... that's the next instruction !  */
267
268	/*
269	 * Set CPU to a consistent state.
270	 * r31 = FPU support flag
271	 * tr0/tr7 in use. Others give a chance to loop somewhere safe
272	 */
273	movi	start_kernel, r32
274	ori	r32, 1, r32
275
276	ptabs	r32, tr0		    /* r32 = _start_kernel address        */
277	pta/u	hopeless, tr1
278	pta/u	hopeless, tr2
279	pta/u	hopeless, tr3
280	pta/u	hopeless, tr4
281	pta/u	hopeless, tr5
282	pta/u	hopeless, tr6
283	pta/u	hopeless, tr7
284	gettr	tr1, r28			/* r28 = hopeless address */
285
286	/* Set initial stack pointer */
287	movi	init_thread_union, SP
288	putcon	SP, KCR0		/* Set current to init_task */
289	movi	THREAD_SIZE, r22	/* Point to the end */
290	add	SP, r22, SP
291
292	/*
293	 * Initialize FPU.
294	 * Keep FPU flag in r31. After this block:
295	 * r31 = FPU flag
296	 */
297	movi fpu_in_use, r31	/* Temporary */
298
299#ifdef CONFIG_SH_FPU
300	getcon	SR, r21
301	movi	SR_ENABLE_FPU, r22
302	and	r21, r22, r22
303	putcon	r22, SR			/* Try to enable */
304	getcon	SR, r22
305	xor	r21, r22, r21
306	shlri	r21, 15, r21		/* Supposedly 0/1 */
307	st.q	r31, 0 , r21		/* Set fpu_in_use */
308#else
309	movi	0, r21
310	st.q	r31, 0 , r21		/* Set fpu_in_use */
311#endif
312	or	r21, ZERO, r31		/* Set FPU flag at last */
313
314#ifndef CONFIG_SH_NO_BSS_INIT
315/* Don't clear BSS if running on slow platforms such as an RTL simulation,
316   remote memory via SHdebug link, etc.  For these the memory can be guaranteed
317   to be all zero on boot anyway. */
318	/*
319	 * Clear bss
320	 */
321	pta	clear_quad, tr1
322	movi	__bss_start, r22
323	movi	_end, r23
324clear_quad:
325	st.q	r22, 0, ZERO
326	addi	r22, 8, r22
327	bne	r22, r23, tr1		/* Both quad aligned, see vmlinux.lds.S */
328#endif
329	pta/u	hopeless, tr1
330
331	/* Say bye to head.S but be prepared to wrongly get back ... */
332	blink	tr0, LINK
333
334	/* If we ever get back here through LINK/tr1-tr7 */
335	pta/u	hopeless, tr7
336
337hopeless:
338	/*
339	 * Something's badly wrong here. Loop endlessly,
340         * there's nothing more we can do about it.
341	 *
342	 * Note on hopeless: it can be jumped into invariably
343	 * before or after jumping into hyperspace. The only
344	 * requirement is to be PIC called (PTA) before and
345	 * any way (PTA/PTABS) after. According to Virtual
346	 * to Physical mapping a simulator/emulator can easily
347	 * tell where we came here from just looking at hopeless
348	 * (PC) address.
349	 *
350	 * For debugging purposes:
351	 * (r28) hopeless/loop address
352	 * (r29) Original SR
353	 * (r30) CPU type/Platform endianness
354	 * (r31) FPU Support
355	 * (r32) _start_kernel address
356	 */
357	blink	tr7, ZERO
358