• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * This file contains low level CPU setup functions.
3 * Kumar Gala <galak@kernel.crashing.org>
4 * Copyright 2009 Freescale Semiconductor, Inc.
5 *
6 * Based on cpu_setup_6xx code by
7 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16#include <asm/page.h>
17#include <asm/processor.h>
18#include <asm/cputable.h>
19#include <asm/ppc_asm.h>
20#include <asm/mmu-book3e.h>
21#include <asm/asm-offsets.h>
22#include <asm/mpc85xx.h>
23
24_GLOBAL(__e500_icache_setup)
25	mfspr	r0, SPRN_L1CSR1
26	andi.	r3, r0, L1CSR1_ICE
27	bnelr				/* Already enabled */
28	oris	r0, r0, L1CSR1_CPE@h
29	ori	r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR |  L1CSR1_ICE)
30	mtspr	SPRN_L1CSR1, r0		/* Enable I-Cache */
31	isync
32	blr
33
34_GLOBAL(__e500_dcache_setup)
35	mfspr	r0, SPRN_L1CSR0
36	andi.	r3, r0, L1CSR0_DCE
37	bnelr				/* Already enabled */
38	msync
39	isync
40	li	r0, 0
41	mtspr	SPRN_L1CSR0, r0		/* Disable */
42	msync
43	isync
44	li	r0, (L1CSR0_DCFI | L1CSR0_CLFC)
45	mtspr	SPRN_L1CSR0, r0		/* Invalidate */
46	isync
471:	mfspr	r0, SPRN_L1CSR0
48	andi.	r3, r0, L1CSR0_CLFC
49	bne+	1b			/* Wait for lock bits reset */
50	oris	r0, r0, L1CSR0_CPE@h
51	ori	r0, r0, L1CSR0_DCE
52	msync
53	isync
54	mtspr	SPRN_L1CSR0, r0		/* Enable */
55	isync
56	blr
57
58/*
59 * FIXME - we haven't yet done testing to determine a reasonable default
60 * value for PW20_WAIT_IDLE_BIT.
61 */
62#define PW20_WAIT_IDLE_BIT		50 /* 1ms, TB frequency is 41.66MHZ */
63_GLOBAL(setup_pw20_idle)
64	mfspr	r3, SPRN_PWRMGTCR0
65
66	/* Set PW20_WAIT bit, enable pw20 state*/
67	ori	r3, r3, PWRMGTCR0_PW20_WAIT
68	li	r11, PW20_WAIT_IDLE_BIT
69
70	/* Set Automatic PW20 Core Idle Count */
71	rlwimi	r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
72
73	mtspr	SPRN_PWRMGTCR0, r3
74
75	blr
76
77/*
78 * FIXME - we haven't yet done testing to determine a reasonable default
79 * value for AV_WAIT_IDLE_BIT.
80 */
81#define AV_WAIT_IDLE_BIT		50 /* 1ms, TB frequency is 41.66MHZ */
82_GLOBAL(setup_altivec_idle)
83	mfspr	r3, SPRN_PWRMGTCR0
84
85	/* Enable Altivec Idle */
86	oris	r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
87	li	r11, AV_WAIT_IDLE_BIT
88
89	/* Set Automatic AltiVec Idle Count */
90	rlwimi	r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
91
92	mtspr	SPRN_PWRMGTCR0, r3
93
94	blr
95
96#ifdef CONFIG_PPC_E500MC
97_GLOBAL(__setup_cpu_e6500)
98	mflr	r6
99#ifdef CONFIG_PPC64
100	bl	setup_altivec_ivors
101	/* Touch IVOR42 only if the CPU supports E.HV category */
102	mfspr	r10,SPRN_MMUCFG
103	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
104	beq	1f
105	bl	setup_lrat_ivor
1061:
107#endif
108	bl	setup_pw20_idle
109	bl	setup_altivec_idle
110	bl	__setup_cpu_e5500
111	mtlr	r6
112	blr
113#endif /* CONFIG_PPC_E500MC */
114
115#ifdef CONFIG_PPC32
116#ifdef CONFIG_E200
117_GLOBAL(__setup_cpu_e200)
118	/* enable dedicated debug exception handling resources (Debug APU) */
119	mfspr	r3,SPRN_HID0
120	ori	r3,r3,HID0_DAPUEN@l
121	mtspr	SPRN_HID0,r3
122	b	__setup_e200_ivors
123#endif /* CONFIG_E200 */
124
125#ifdef CONFIG_E500
126#ifndef CONFIG_PPC_E500MC
127_GLOBAL(__setup_cpu_e500v1)
128_GLOBAL(__setup_cpu_e500v2)
129	mflr	r4
130	bl	__e500_icache_setup
131	bl	__e500_dcache_setup
132	bl	__setup_e500_ivors
133#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
134	/* Ensure that RFXE is set */
135	mfspr	r3,SPRN_HID1
136	oris	r3,r3,HID1_RFXE@h
137	mtspr	SPRN_HID1,r3
138#endif
139	mtlr	r4
140	blr
141#else /* CONFIG_PPC_E500MC */
142_GLOBAL(__setup_cpu_e500mc)
143_GLOBAL(__setup_cpu_e5500)
144	mflr	r5
145	bl	__e500_icache_setup
146	bl	__e500_dcache_setup
147	bl	__setup_e500mc_ivors
148	/*
149	 * We only want to touch IVOR38-41 if we're running on hardware
150	 * that supports category E.HV.  The architectural way to determine
151	 * this is MMUCFG[LPIDSIZE].
152	 */
153	mfspr	r3, SPRN_MMUCFG
154	rlwinm.	r3, r3, 0, MMUCFG_LPIDSIZE
155	beq	1f
156	bl	__setup_ehv_ivors
157	b	2f
1581:
159	lwz	r3, CPU_SPEC_FEATURES(r4)
160	/* We need this check as cpu_setup is also called for
161	 * the secondary cores. So, if we have already cleared
162	 * the feature on the primary core, avoid doing it on the
163	 * secondary core.
164	 */
165	andis.	r6, r3, CPU_FTR_EMB_HV@h
166	beq	2f
167	rlwinm	r3, r3, 0, ~CPU_FTR_EMB_HV
168	stw	r3, CPU_SPEC_FEATURES(r4)
1692:
170	mtlr	r5
171	blr
172#endif /* CONFIG_PPC_E500MC */
173#endif /* CONFIG_E500 */
174#endif /* CONFIG_PPC32 */
175
176#ifdef CONFIG_PPC_BOOK3E_64
177_GLOBAL(__restore_cpu_e6500)
178	mflr	r5
179	bl	setup_altivec_ivors
180	/* Touch IVOR42 only if the CPU supports E.HV category */
181	mfspr	r10,SPRN_MMUCFG
182	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
183	beq	1f
184	bl	setup_lrat_ivor
1851:
186	bl	setup_pw20_idle
187	bl	setup_altivec_idle
188	bl	__restore_cpu_e5500
189	mtlr	r5
190	blr
191
192_GLOBAL(__restore_cpu_e5500)
193	mflr	r4
194	bl	__e500_icache_setup
195	bl	__e500_dcache_setup
196	bl	__setup_base_ivors
197	bl	setup_perfmon_ivor
198	bl	setup_doorbell_ivors
199	/*
200	 * We only want to touch IVOR38-41 if we're running on hardware
201	 * that supports category E.HV.  The architectural way to determine
202	 * this is MMUCFG[LPIDSIZE].
203	 */
204	mfspr	r10,SPRN_MMUCFG
205	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
206	beq	1f
207	bl	setup_ehv_ivors
2081:
209	mtlr	r4
210	blr
211
212_GLOBAL(__setup_cpu_e5500)
213	mflr	r5
214	bl	__e500_icache_setup
215	bl	__e500_dcache_setup
216	bl	__setup_base_ivors
217	bl	setup_perfmon_ivor
218	bl	setup_doorbell_ivors
219	/*
220	 * We only want to touch IVOR38-41 if we're running on hardware
221	 * that supports category E.HV.  The architectural way to determine
222	 * this is MMUCFG[LPIDSIZE].
223	 */
224	mfspr	r10,SPRN_MMUCFG
225	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
226	beq	1f
227	bl	setup_ehv_ivors
228	b	2f
2291:
230	ld	r10,CPU_SPEC_FEATURES(r4)
231	LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
232	andc	r10,r10,r9
233	std	r10,CPU_SPEC_FEATURES(r4)
2342:
235	mtlr	r5
236	blr
237#endif
238
239/* flush L1 date cache, it can apply to e500v2, e500mc and e5500 */
240_GLOBAL(flush_dcache_L1)
241	mfmsr	r10
242	wrteei	0
243
244	mfspr	r3,SPRN_L1CFG0
245	rlwinm	r5,r3,9,3	/* Extract cache block size */
246	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
247				 * are currently defined.
248				 */
249	li	r4,32
250	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
251				 *      log2(number of ways)
252				 */
253	slw	r5,r4,r5	/* r5 = cache block size */
254
255	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
256	mulli	r7,r7,13	/* An 8-way cache will require 13
257				 * loads per set.
258				 */
259	slw	r7,r7,r6
260
261	/* save off HID0 and set DCFA */
262	mfspr	r8,SPRN_HID0
263	ori	r9,r8,HID0_DCFA@l
264	mtspr	SPRN_HID0,r9
265	isync
266
267	LOAD_REG_IMMEDIATE(r6, KERNELBASE)
268	mr	r4, r6
269	mtctr	r7
270
2711:	lwz	r3,0(r4)	/* Load... */
272	add	r4,r4,r5
273	bdnz	1b
274
275	msync
276	mr	r4, r6
277	mtctr	r7
278
2791:	dcbf	0,r4		/* ...and flush. */
280	add	r4,r4,r5
281	bdnz	1b
282
283	/* restore HID0 */
284	mtspr	SPRN_HID0,r8
285	isync
286
287	wrtee r10
288
289	blr
290
291has_L2_cache:
292	/* skip L2 cache on P2040/P2040E as they have no L2 cache */
293	mfspr	r3, SPRN_SVR
294	/* shift right by 8 bits and clear E bit of SVR */
295	rlwinm	r4, r3, 24, ~0x800
296
297	lis	r3, SVR_P2040@h
298	ori	r3, r3, SVR_P2040@l
299	cmpw	r4, r3
300	beq	1f
301
302	li	r3, 1
303	blr
3041:
305	li	r3, 0
306	blr
307
308/* flush backside L2 cache */
309flush_backside_L2_cache:
310	mflr	r10
311	bl	has_L2_cache
312	mtlr	r10
313	cmpwi	r3, 0
314	beq	2f
315
316	/* Flush the L2 cache */
317	mfspr	r3, SPRN_L2CSR0
318	ori	r3, r3, L2CSR0_L2FL@l
319	msync
320	isync
321	mtspr	SPRN_L2CSR0,r3
322	isync
323
324	/* check if it is complete */
3251:	mfspr	r3,SPRN_L2CSR0
326	andi.	r3, r3, L2CSR0_L2FL@l
327	bne	1b
3282:
329	blr
330
331_GLOBAL(cpu_down_flush_e500v2)
332	mflr r0
333	bl	flush_dcache_L1
334	mtlr r0
335	blr
336
337_GLOBAL(cpu_down_flush_e500mc)
338_GLOBAL(cpu_down_flush_e5500)
339	mflr r0
340	bl	flush_dcache_L1
341	bl	flush_backside_L2_cache
342	mtlr r0
343	blr
344
345/* L1 Data Cache of e6500 contains no modified data, no flush is required */
346_GLOBAL(cpu_down_flush_e6500)
347	blr
348