1/* 2 * linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core 3 * 4 * PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core. 5 * 6 * Heavily based on proc-arm926.S and proc-xsc3.S 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <asm/assembler.h> 26#include <asm/hwcap.h> 27#include <asm/pgtable-hwdef.h> 28#include <asm/pgtable.h> 29#include <asm/page.h> 30#include <asm/ptrace.h> 31#include "proc-macros.S" 32 33/* 34 * This is the maximum size of an area which will be flushed. If the 35 * area is larger than this, then we flush the whole cache. 36 */ 37#define CACHE_DLIMIT 32768 38 39/* 40 * The cache line size of the L1 D cache. 41 */ 42#define CACHE_DLINESIZE 32 43 44/* 45 * cpu_mohawk_proc_init() 46 */ 47ENTRY(cpu_mohawk_proc_init) 48 mov pc, lr 49 50/* 51 * cpu_mohawk_proc_fin() 52 */ 53ENTRY(cpu_mohawk_proc_fin) 54 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 55 bic r0, r0, #0x1800 @ ...iz........... 56 bic r0, r0, #0x0006 @ .............ca. 57 mcr p15, 0, r0, c1, c0, 0 @ disable caches 58 mov pc, lr 59 60/* 61 * cpu_mohawk_reset(loc) 62 * 63 * Perform a soft reset of the system. Put the CPU into the 64 * same state as it would be if it had been reset, and branch 65 * to what would be the reset vector. 66 * 67 * loc: location to jump to for soft reset 68 * 69 * (same as arm926) 70 */ 71 .align 5 72 .pushsection .idmap.text, "ax" 73ENTRY(cpu_mohawk_reset) 74 mov ip, #0 75 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 76 mcr p15, 0, ip, c7, c10, 4 @ drain WB 77 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 78 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 79 bic ip, ip, #0x0007 @ .............cam 80 bic ip, ip, #0x1100 @ ...i...s........ 81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 82 mov pc, r0 83ENDPROC(cpu_mohawk_reset) 84 .popsection 85 86/* 87 * cpu_mohawk_do_idle() 88 * 89 * Called with IRQs disabled 90 */ 91 .align 5 92ENTRY(cpu_mohawk_do_idle) 93 mov r0, #0 94 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 95 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt 96 mov pc, lr 97 98/* 99 * flush_icache_all() 100 * 101 * Unconditionally clean and invalidate the entire icache. 102 */ 103ENTRY(mohawk_flush_icache_all) 104 mov r0, #0 105 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 106 mov pc, lr 107ENDPROC(mohawk_flush_icache_all) 108 109/* 110 * flush_user_cache_all() 111 * 112 * Clean and invalidate all cache entries in a particular 113 * address space. 114 */ 115ENTRY(mohawk_flush_user_cache_all) 116 /* FALLTHROUGH */ 117 118/* 119 * flush_kern_cache_all() 120 * 121 * Clean and invalidate the entire cache. 122 */ 123ENTRY(mohawk_flush_kern_cache_all) 124 mov r2, #VM_EXEC 125 mov ip, #0 126__flush_whole_cache: 127 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache 128 tst r2, #VM_EXEC 129 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 130 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer 131 mov pc, lr 132 133/* 134 * flush_user_cache_range(start, end, flags) 135 * 136 * Clean and invalidate a range of cache entries in the 137 * specified address range. 138 * 139 * - start - start address (inclusive) 140 * - end - end address (exclusive) 141 * - flags - vm_flags describing address space 142 * 143 * (same as arm926) 144 */ 145ENTRY(mohawk_flush_user_cache_range) 146 mov ip, #0 147 sub r3, r1, r0 @ calculate total size 148 cmp r3, #CACHE_DLIMIT 149 bgt __flush_whole_cache 1501: tst r2, #VM_EXEC 151 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 152 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 153 add r0, r0, #CACHE_DLINESIZE 154 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 155 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 156 add r0, r0, #CACHE_DLINESIZE 157 cmp r0, r1 158 blo 1b 159 tst r2, #VM_EXEC 160 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 161 mov pc, lr 162 163/* 164 * coherent_kern_range(start, end) 165 * 166 * Ensure coherency between the Icache and the Dcache in the 167 * region described by start, end. If you have non-snooping 168 * Harvard caches, you need to implement this function. 169 * 170 * - start - virtual start address 171 * - end - virtual end address 172 */ 173ENTRY(mohawk_coherent_kern_range) 174 /* FALLTHROUGH */ 175 176/* 177 * coherent_user_range(start, end) 178 * 179 * Ensure coherency between the Icache and the Dcache in the 180 * region described by start, end. If you have non-snooping 181 * Harvard caches, you need to implement this function. 182 * 183 * - start - virtual start address 184 * - end - virtual end address 185 * 186 * (same as arm926) 187 */ 188ENTRY(mohawk_coherent_user_range) 189 bic r0, r0, #CACHE_DLINESIZE - 1 1901: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 191 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 192 add r0, r0, #CACHE_DLINESIZE 193 cmp r0, r1 194 blo 1b 195 mcr p15, 0, r0, c7, c10, 4 @ drain WB 196 mov pc, lr 197 198/* 199 * flush_kern_dcache_area(void *addr, size_t size) 200 * 201 * Ensure no D cache aliasing occurs, either with itself or 202 * the I cache 203 * 204 * - addr - kernel address 205 * - size - region size 206 */ 207ENTRY(mohawk_flush_kern_dcache_area) 208 add r1, r0, r1 2091: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 210 add r0, r0, #CACHE_DLINESIZE 211 cmp r0, r1 212 blo 1b 213 mov r0, #0 214 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 215 mcr p15, 0, r0, c7, c10, 4 @ drain WB 216 mov pc, lr 217 218/* 219 * dma_inv_range(start, end) 220 * 221 * Invalidate (discard) the specified virtual address range. 222 * May not write back any entries. If 'start' or 'end' 223 * are not cache line aligned, those lines must be written 224 * back. 225 * 226 * - start - virtual start address 227 * - end - virtual end address 228 * 229 * (same as v4wb) 230 */ 231mohawk_dma_inv_range: 232 tst r0, #CACHE_DLINESIZE - 1 233 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 234 tst r1, #CACHE_DLINESIZE - 1 235 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 236 bic r0, r0, #CACHE_DLINESIZE - 1 2371: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 238 add r0, r0, #CACHE_DLINESIZE 239 cmp r0, r1 240 blo 1b 241 mcr p15, 0, r0, c7, c10, 4 @ drain WB 242 mov pc, lr 243 244/* 245 * dma_clean_range(start, end) 246 * 247 * Clean the specified virtual address range. 248 * 249 * - start - virtual start address 250 * - end - virtual end address 251 * 252 * (same as v4wb) 253 */ 254mohawk_dma_clean_range: 255 bic r0, r0, #CACHE_DLINESIZE - 1 2561: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 257 add r0, r0, #CACHE_DLINESIZE 258 cmp r0, r1 259 blo 1b 260 mcr p15, 0, r0, c7, c10, 4 @ drain WB 261 mov pc, lr 262 263/* 264 * dma_flush_range(start, end) 265 * 266 * Clean and invalidate the specified virtual address range. 267 * 268 * - start - virtual start address 269 * - end - virtual end address 270 */ 271ENTRY(mohawk_dma_flush_range) 272 bic r0, r0, #CACHE_DLINESIZE - 1 2731: 274 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 275 add r0, r0, #CACHE_DLINESIZE 276 cmp r0, r1 277 blo 1b 278 mcr p15, 0, r0, c7, c10, 4 @ drain WB 279 mov pc, lr 280 281/* 282 * dma_map_area(start, size, dir) 283 * - start - kernel virtual start address 284 * - size - size of region 285 * - dir - DMA direction 286 */ 287ENTRY(mohawk_dma_map_area) 288 add r1, r1, r0 289 cmp r2, #DMA_TO_DEVICE 290 beq mohawk_dma_clean_range 291 bcs mohawk_dma_inv_range 292 b mohawk_dma_flush_range 293ENDPROC(mohawk_dma_map_area) 294 295/* 296 * dma_unmap_area(start, size, dir) 297 * - start - kernel virtual start address 298 * - size - size of region 299 * - dir - DMA direction 300 */ 301ENTRY(mohawk_dma_unmap_area) 302 mov pc, lr 303ENDPROC(mohawk_dma_unmap_area) 304 305 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 306 define_cache_functions mohawk 307 308ENTRY(cpu_mohawk_dcache_clean_area) 3091: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 310 add r0, r0, #CACHE_DLINESIZE 311 subs r1, r1, #CACHE_DLINESIZE 312 bhi 1b 313 mcr p15, 0, r0, c7, c10, 4 @ drain WB 314 mov pc, lr 315 316/* 317 * cpu_mohawk_switch_mm(pgd) 318 * 319 * Set the translation base pointer to be as described by pgd. 320 * 321 * pgd: new page tables 322 */ 323 .align 5 324ENTRY(cpu_mohawk_switch_mm) 325 mov ip, #0 326 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache 327 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 328 mcr p15, 0, ip, c7, c10, 4 @ drain WB 329 orr r0, r0, #0x18 @ cache the page table in L2 330 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 331 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 332 mov pc, lr 333 334/* 335 * cpu_mohawk_set_pte_ext(ptep, pte, ext) 336 * 337 * Set a PTE and flush it out 338 */ 339 .align 5 340ENTRY(cpu_mohawk_set_pte_ext) 341 armv3_set_pte_ext 342 mov r0, r0 343 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 344 mcr p15, 0, r0, c7, c10, 4 @ drain WB 345 mov pc, lr 346 347 __CPUINIT 348 349 .type __mohawk_setup, #function 350__mohawk_setup: 351 mov r0, #0 352 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches 353 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 354 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs 355 orr r4, r4, #0x18 @ cache the page table in L2 356 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 357 358 mov r0, #0 @ don't allow CP access 359 mcr p15, 0, r0, c15, c1, 0 @ write CP access register 360 361 adr r5, mohawk_crval 362 ldmia r5, {r5, r6} 363 mrc p15, 0, r0, c1, c0 @ get control register 364 bic r0, r0, r5 365 orr r0, r0, r6 366 mov pc, lr 367 368 .size __mohawk_setup, . - __mohawk_setup 369 370 /* 371 * R 372 * .RVI ZFRS BLDP WCAM 373 * .011 1001 ..00 0101 374 * 375 */ 376 .type mohawk_crval, #object 377mohawk_crval: 378 crval clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134 379 380 __INITDATA 381 382 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 383 define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort 384 385 .section ".rodata" 386 387 string cpu_arch_name, "armv5te" 388 string cpu_elf_name, "v5" 389 string cpu_mohawk_name, "Marvell 88SV331x" 390 391 .align 392 393 .section ".proc.info.init", #alloc, #execinstr 394 395 .type __88sv331x_proc_info,#object 396__88sv331x_proc_info: 397 .long 0x56158000 @ Marvell 88SV331x (MOHAWK) 398 .long 0xfffff000 399 .long PMD_TYPE_SECT | \ 400 PMD_SECT_BUFFERABLE | \ 401 PMD_SECT_CACHEABLE | \ 402 PMD_BIT4 | \ 403 PMD_SECT_AP_WRITE | \ 404 PMD_SECT_AP_READ 405 .long PMD_TYPE_SECT | \ 406 PMD_BIT4 | \ 407 PMD_SECT_AP_WRITE | \ 408 PMD_SECT_AP_READ 409 b __mohawk_setup 410 .long cpu_arch_name 411 .long cpu_elf_name 412 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 413 .long cpu_mohawk_name 414 .long mohawk_processor_functions 415 .long v4wbi_tlb_fns 416 .long v4wb_user_fns 417 .long mohawk_cache_fns 418 .size __88sv331x_proc_info, . - __88sv331x_proc_info 419