• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#include <config.h>
2
3#define P15_CTRL_RR	(1<<14)			/* cache replace method bit */
4#define P15_CTRL_IC	(1<<12)			/* I-cache enable bit */
5#define P15_CTRL_DC	(1<<2)			/* D-cache enable bit */
6#define P15_CTRL_MMU	(1<<0)			/* MMU enable bit */
7
8/* A:permit |Domain|Not Use|Not Use*/
9#define MMU_SEC_DESC	( (3<<10) | (0<<5) | (1<<4) | (2<<0) )
10#define MMU_SEC_CB	( (1<<3) | (1<<2) )	/* cached, write back */
11#define MMU_SEC_NCB	~((1<<3) | (1<<2))	/* no cached, no writebuf */
12
13#define VM_EXEC 0x00000004
14
15/* r0: page directory address, must align to 16KB */
16.global mmu_pagedir_init
17mmu_pagedir_init:
18
19	/*
20	 * init page dir param for  setction address
21	 */
22	add	r3, r0, #SZ_16K
23	ldr	r2, =MMU_SEC_DESC	/* param */
241:
25	str	r2, [r0], #4		/* section 1:1 mapping */
26	add	r2, r2, #SZ_1M		/* setction base address */
27	teq	r0, r3
28	bne	1b
29
30	mov	pc,lr
31
32/*
33 * r0: page directory address, must align to 16KB
34 * r1: start of cached MEM, must align to 1MB
35 * r2: size of cached MEM, must align to 1MB
36 */
37.global mmu_pagedir_cached_range
38mmu_pagedir_cached_range:
39
40	/*
41	 * init page dir param for cached , writebuffer
42	 */
43	add	r1, r0, r1, lsr #18	/* r1 = r0 + r1>>18 the start index addr of map mem */
44	add	r2, r1, r2, lsr #18	/* the end index addr of map mem */
45
461:
47	ldr	r0, [r1]
48	orr	r0, #MMU_SEC_CB
49	str	r0, [r1], #4		/*store and index addr ++ */
50
51	cmp	r1, r2
52	blt	1b
53
54	mov	pc,lr
55
56/*
57 * r0: page directory address, must align to 16KB
58 * r1: start of cached MEM, must align to 1MB
59 * r2: size of cached MEM, must align to 1MB
60 */
61.global mmu_pagedir_nocached_range
62mmu_pagedir_nocached_range:
63clean_loop:
64
65	/*
66	 * init page dir param for no cache ,no writebuffer
67	 */
68	mrc	p15, 0, r15, c7, c14, 3
69	bne	clean_loop		/* test, clean and invalidate D-cache */
70
71	mov	r3, #0
72	mcr	p15, 0, r3, c8, c7, 0	/* flush TLBs */
73
74	add	r1, r0, r1, lsr #18	/* the start section index of map mem */
75	add	r2, r1, r2, lsr #18	/* the end section index of map mem */
76
771:
78	ldr	r0, [r1]
79	and	r0, #MMU_SEC_NCB	/* the setcion is no cache,no writebuf */
80	str	r0, [r1], #4
81
82	cmp	r1, r2
83	blt	1b
84
85	mov	pc,lr
86
87.global flush_cache_off
88flush_cache_off:
89
90	/*
91	 * invalidate(flush) TLB
92	 */
93	mrc	p15, 0, r0, c1, c0, 0	/* read control reg >> r0 */
94	mcr	p15, 0, r0, c1, c0, 0	/* write r0 >> control reg */
95
96	mov	r0, #0
97	mcr	p15, 0, r0, c8, c7, 0	/* flush TLBs */
98
99	mov	pc,lr
100
101/* r0: page directory address, must align to 16KB */
102.global mmu_startup
103mmu_startup:
104
105	/*
106	 * enable mmu
107	 */
108	stmdb	sp!, {r0, lr}
109	bl	flush_cache_off		/* r0,lr >> stack */
110	ldmia	sp!, {r0, lr}
111
112	mrc	p15, 0, r3, c1, c0, 0	/* read control reg */
113	bic	r3, r3, #P15_CTRL_RR	/* cache replace method */
114	orr	r3, r3, #P15_CTRL_MMU	/* mmu enable bit */
115	orr     r3, r3, #P15_CTRL_DC    /* Dcache enable bit */
116
117	mov	r2, #0
118	mov	r1, #-1
119
120	mcr	p15, 0, r0, c2, c0, 0	/* write page table pointer to Base  Reg */
121	mcr	p15, 0, r1, c3, c0, 0	/* write domain access control reg */
122	mcr	p15, 0, r3, c1, c0, 0	/* enable mmu */
123	mcr	p15, 0, r2, c8, c7, 0	/* flush TLBs */
124#if __ARM_ARCH__ >= 7
125	isb
126#endif
127
128	mov	pc,lr
129
130.global mmu_turnoff
131mmu_turnoff:
132
133	/*
134	 * disable d-cache, mmu
135	 */
136	mrc	p15, 0, r3, c1, c0, 0	/* read control reg */
137	bic	r3, r3, #P15_CTRL_DC	/* disable d-cache bit */
138	bic	r3, r3, #P15_CTRL_MMU	/* disable mmu bit */
139	mcr	p15, 0, r3, c1, c0, 0	/* load control register */
140
141#if __ARM_ARCH__ >= 7
142	isb
143#endif
144
145	mov	pc,lr
146
147.global dcache_stop
148dcache_stop:
149
150	mrc	p15, 0, r0, c1, c0, 0
151	bic	r0, r0, #P15_CTRL_DC
152	mcr	p15, 0, r0, c1, c0, 0
153
154	mov	pc,lr
155
156.global dcache_start
157dcache_start:
158	mrc	p15, 0, r0, c1, c0, 0
159	orr	r0, r0, #P15_CTRL_DC
160	mcr	p15, 0, r0, c1, c0, 0
161
162	mov	pc,lr
163
164.global dcache_stop_noflush
165dcache_stop_noflush:
166
167	mrc	p15, 0, r0, c1, c0, 0
168	bic	r0, r0, #P15_CTRL_DC
169	mcr	p15, 0, r0, c1, c0, 0
170
171	mov	pc,lr
172
173
174.global dcache_flush_all
175dcache_flush_all:
176#if __ARM_ARCH__ >= 7
177	mov	r0, #0			/* set up for MCR */
178	mcr	p15, 0, r0, c8, c7, 0	/* invalidate TLBs */
179	mcr	p15, 0, r0, c7, c5, 0	/* invalidate icache */
180
181	/* Invalidate L1 D-cache */
182	mcr     p15, 2, r0, c0, c0, 0	/* select L1 data cache */
183
184	/* Read Current Cache Size Identification Register */
185	mrc     p15, 1, r3, c0, c0, 0
186	ldr    r1, =0x1ff
187	and     r3, r1, r3, LSR #13	/* r3 = number of sets -1 */
188	mov     r0, #0
189way_loop:
190	mov     r1, #0			/* r1->set counter */
191line_loop:
192	mov     r2, r0, LSL #30
193	orr     r2, r1, LSL #5		/* r2->set/way cache-op format */
194
195	/* Clean and Invalidate line described by r2 */
196	mcr     p15, 0, r2, c7, c14, 2
197	add     r1, r1, #1		/* Increment set counter */
198	cmp     r1, r3			/* Check if the last set is reached... */
199	ble     line_loop		/* if not, continue the set_loop */
200	add     r0, r0, #1		/* else, Increment way counter */
201	cmp     r0, #4			/* Check if the last way is reached... */
202	blt     way_loop		/* if not, continue the way_loop */
203
204	mov	pc,lr
205#else
206	stmfd	r13!, {r2, ip, lr}
207	mov     r2, #VM_EXEC
208	mov     ip, #0
2091:  mrc     p15, 0, r15, c7, c14, 3         @ test,clean,invalidate
210	bne     1b
211
212	tst     r2, #VM_EXEC
213	mcrne   p15, 0, ip, c7, c5, 0           @ invalidate I cache
214	mcrne   p15, 0, ip, c7, c10, 4          @ drain WB
215	ldmfd	r13!, {r2, ip, pc}
216#endif
217
218.global dcache_inv_all
219dcache_inv_all:
220#if __ARM_ARCH__ >= 7
221	mov	r0, #0			/* set up for MCR */
222	mcr	p15, 0, r0, c8, c7, 0	/* invalidate TLBs */
223	mcr	p15, 0, r0, c7, c5, 0	/* invalidate icache */
224
225	/* Invalidate L1 D-cache */
226	mcr     p15, 2, r0, c0, c0, 0	/* select L1 data cache*/
227
228	/* Read Current Cache Size Identification Register */
229	mrc     p15, 1, r3, c0, c0, 0
230	ldr     r1, =0x1ff
231	and     r3, r1, r3, LSR #13	/* r3 = number of sets -1 */
232	mov     r0, #0
233way_lp:
234    mov     r1, #0			/* r1->set counter */
235line_lp:
236	mov     r2, r0, LSL #30
237	orr     r2, r1, LSL #5		/* r2->set/way cache-op format */
238	mcr     p15, 0, r2, c7, c6, 2	/* Invalidate line described by r2 */
239	add     r1, r1, #1		/* Increment set counter */
240	cmp     r1, r3			/* Check if the last set is reached... */
241	ble     line_lp			/* if not, continue the set_loop */
242	add     r0, r0, #1		/* else, Increment way counter */
243	cmp     r0, #4			/* Check if the last way is reached... */
244	blt     way_lp			/* if not, continue the way_loop */
245
246	mov	pc,lr
247#else
248	mcr	p15, 0, r0, c7, c7, 0
249	mov     pc,lr
250#endif
251