1/* 2 * linux/arch/arm/mm/proc-arm1026.S: MMU functions for ARM1026EJ-S 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * 14 * These are the low level assembler for performing cache and TLB 15 * functions on the ARM1026EJ-S. 16 */ 17#include <linux/linkage.h> 18#include <linux/init.h> 19#include <asm/assembler.h> 20#include <asm/asm-offsets.h> 21#include <asm/hwcap.h> 22#include <asm/pgtable-hwdef.h> 23#include <asm/pgtable.h> 24#include <asm/ptrace.h> 25 26#include "proc-macros.S" 27 28/* 29 * This is the maximum size of an area which will be invalidated 30 * using the single invalidate entry instructions. Anything larger 31 * than this, and we go for the whole cache. 32 * 33 * This value should be chosen such that we choose the cheapest 34 * alternative. 35 */ 36#define MAX_AREA_SIZE 32768 37 38/* 39 * The size of one data cache line. 40 */ 41#define CACHE_DLINESIZE 32 42 43/* 44 * The number of data cache segments. 45 */ 46#define CACHE_DSEGMENTS 16 47 48/* 49 * The number of lines in a cache segment. 50 */ 51#define CACHE_DENTRIES 64 52 53/* 54 * This is the size at which it becomes more efficient to 55 * clean the whole cache, rather than using the individual 56 * cache line maintainence instructions. 57 */ 58#define CACHE_DLIMIT 32768 59 60 .text 61/* 62 * cpu_arm1026_proc_init() 63 */ 64ENTRY(cpu_arm1026_proc_init) 65 mov pc, lr 66 67/* 68 * cpu_arm1026_proc_fin() 69 */ 70ENTRY(cpu_arm1026_proc_fin) 71 stmfd sp!, {lr} 72 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE 73 msr cpsr_c, ip 74 bl arm1026_flush_kern_cache_all 75 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 76 bic r0, r0, #0x1000 @ ...i............ 77 bic r0, r0, #0x000e @ ............wca. 78 mcr p15, 0, r0, c1, c0, 0 @ disable caches 79 ldmfd sp!, {pc} 80 81/* 82 * cpu_arm1026_reset(loc) 83 * 84 * Perform a soft reset of the system. Put the CPU into the 85 * same state as it would be if it had been reset, and branch 86 * to what would be the reset vector. 87 * 88 * loc: location to jump to for soft reset 89 */ 90 .align 5 91ENTRY(cpu_arm1026_reset) 92 mov ip, #0 93 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 94 mcr p15, 0, ip, c7, c10, 4 @ drain WB 95#ifdef CONFIG_MMU 96 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 97#endif 98 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 99 bic ip, ip, #0x000f @ ............wcam 100 bic ip, ip, #0x1100 @ ...i...s........ 101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 102 mov pc, r0 103 104/* 105 * cpu_arm1026_do_idle() 106 */ 107 .align 5 108ENTRY(cpu_arm1026_do_idle) 109 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 110 mov pc, lr 111 112/* ================================= CACHE ================================ */ 113 114 .align 5 115/* 116 * flush_user_cache_all() 117 * 118 * Invalidate all cache entries in a particular address 119 * space. 120 */ 121ENTRY(arm1026_flush_user_cache_all) 122 /* FALLTHROUGH */ 123/* 124 * flush_kern_cache_all() 125 * 126 * Clean and invalidate the entire cache. 127 */ 128ENTRY(arm1026_flush_kern_cache_all) 129 mov r2, #VM_EXEC 130 mov ip, #0 131__flush_whole_cache: 132#ifndef CONFIG_CPU_DCACHE_DISABLE 1331: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate 134 bne 1b 135#endif 136 tst r2, #VM_EXEC 137#ifndef CONFIG_CPU_ICACHE_DISABLE 138 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 139#endif 140 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 141 mov pc, lr 142 143/* 144 * flush_user_cache_range(start, end, flags) 145 * 146 * Invalidate a range of cache entries in the specified 147 * address space. 148 * 149 * - start - start address (inclusive) 150 * - end - end address (exclusive) 151 * - flags - vm_flags for this space 152 */ 153ENTRY(arm1026_flush_user_cache_range) 154 mov ip, #0 155 sub r3, r1, r0 @ calculate total size 156 cmp r3, #CACHE_DLIMIT 157 bhs __flush_whole_cache 158 159#ifndef CONFIG_CPU_DCACHE_DISABLE 1601: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 161 add r0, r0, #CACHE_DLINESIZE 162 cmp r0, r1 163 blo 1b 164#endif 165 tst r2, #VM_EXEC 166#ifndef CONFIG_CPU_ICACHE_DISABLE 167 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 168#endif 169 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 170 mov pc, lr 171 172/* 173 * coherent_kern_range(start, end) 174 * 175 * Ensure coherency between the Icache and the Dcache in the 176 * region described by start. If you have non-snooping 177 * Harvard caches, you need to implement this function. 178 * 179 * - start - virtual start address 180 * - end - virtual end address 181 */ 182ENTRY(arm1026_coherent_kern_range) 183 /* FALLTHROUGH */ 184/* 185 * coherent_user_range(start, end) 186 * 187 * Ensure coherency between the Icache and the Dcache in the 188 * region described by start. If you have non-snooping 189 * Harvard caches, you need to implement this function. 190 * 191 * - start - virtual start address 192 * - end - virtual end address 193 */ 194ENTRY(arm1026_coherent_user_range) 195 mov ip, #0 196 bic r0, r0, #CACHE_DLINESIZE - 1 1971: 198#ifndef CONFIG_CPU_DCACHE_DISABLE 199 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 200#endif 201#ifndef CONFIG_CPU_ICACHE_DISABLE 202 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 203#endif 204 add r0, r0, #CACHE_DLINESIZE 205 cmp r0, r1 206 blo 1b 207 mcr p15, 0, ip, c7, c10, 4 @ drain WB 208 mov pc, lr 209 210/* 211 * flush_kern_dcache_page(void *page) 212 * 213 * Ensure no D cache aliasing occurs, either with itself or 214 * the I cache 215 * 216 * - page - page aligned address 217 */ 218ENTRY(arm1026_flush_kern_dcache_page) 219 mov ip, #0 220#ifndef CONFIG_CPU_DCACHE_DISABLE 221 add r1, r0, #PAGE_SZ 2221: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 223 add r0, r0, #CACHE_DLINESIZE 224 cmp r0, r1 225 blo 1b 226#endif 227 mcr p15, 0, ip, c7, c10, 4 @ drain WB 228 mov pc, lr 229 230/* 231 * dma_inv_range(start, end) 232 * 233 * Invalidate (discard) the specified virtual address range. 234 * May not write back any entries. If 'start' or 'end' 235 * are not cache line aligned, those lines must be written 236 * back. 237 * 238 * - start - virtual start address 239 * - end - virtual end address 240 * 241 * (same as v4wb) 242 */ 243ENTRY(arm1026_dma_inv_range) 244 mov ip, #0 245#ifndef CONFIG_CPU_DCACHE_DISABLE 246 tst r0, #CACHE_DLINESIZE - 1 247 bic r0, r0, #CACHE_DLINESIZE - 1 248 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 249 tst r1, #CACHE_DLINESIZE - 1 250 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 2511: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 252 add r0, r0, #CACHE_DLINESIZE 253 cmp r0, r1 254 blo 1b 255#endif 256 mcr p15, 0, ip, c7, c10, 4 @ drain WB 257 mov pc, lr 258 259/* 260 * dma_clean_range(start, end) 261 * 262 * Clean the specified virtual address range. 263 * 264 * - start - virtual start address 265 * - end - virtual end address 266 * 267 * (same as v4wb) 268 */ 269ENTRY(arm1026_dma_clean_range) 270 mov ip, #0 271#ifndef CONFIG_CPU_DCACHE_DISABLE 272 bic r0, r0, #CACHE_DLINESIZE - 1 2731: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 274 add r0, r0, #CACHE_DLINESIZE 275 cmp r0, r1 276 blo 1b 277#endif 278 mcr p15, 0, ip, c7, c10, 4 @ drain WB 279 mov pc, lr 280 281/* 282 * dma_flush_range(start, end) 283 * 284 * Clean and invalidate the specified virtual address range. 285 * 286 * - start - virtual start address 287 * - end - virtual end address 288 */ 289ENTRY(arm1026_dma_flush_range) 290 mov ip, #0 291#ifndef CONFIG_CPU_DCACHE_DISABLE 292 bic r0, r0, #CACHE_DLINESIZE - 1 2931: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 294 add r0, r0, #CACHE_DLINESIZE 295 cmp r0, r1 296 blo 1b 297#endif 298 mcr p15, 0, ip, c7, c10, 4 @ drain WB 299 mov pc, lr 300 301ENTRY(arm1026_cache_fns) 302 .long arm1026_flush_kern_cache_all 303 .long arm1026_flush_user_cache_all 304 .long arm1026_flush_user_cache_range 305 .long arm1026_coherent_kern_range 306 .long arm1026_coherent_user_range 307 .long arm1026_flush_kern_dcache_page 308 .long arm1026_dma_inv_range 309 .long arm1026_dma_clean_range 310 .long arm1026_dma_flush_range 311 312 .align 5 313ENTRY(cpu_arm1026_dcache_clean_area) 314#ifndef CONFIG_CPU_DCACHE_DISABLE 315 mov ip, #0 3161: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 317 add r0, r0, #CACHE_DLINESIZE 318 subs r1, r1, #CACHE_DLINESIZE 319 bhi 1b 320#endif 321 mov pc, lr 322 323/* =============================== PageTable ============================== */ 324 325/* 326 * cpu_arm1026_switch_mm(pgd) 327 * 328 * Set the translation base pointer to be as described by pgd. 329 * 330 * pgd: new page tables 331 */ 332 .align 5 333ENTRY(cpu_arm1026_switch_mm) 334#ifdef CONFIG_MMU 335 mov r1, #0 336#ifndef CONFIG_CPU_DCACHE_DISABLE 3371: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate 338 bne 1b 339#endif 340#ifndef CONFIG_CPU_ICACHE_DISABLE 341 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache 342#endif 343 mcr p15, 0, r1, c7, c10, 4 @ drain WB 344 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 345 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 346#endif 347 mov pc, lr 348 349/* 350 * cpu_arm1026_set_pte_ext(ptep, pte, ext) 351 * 352 * Set a PTE and flush it out 353 */ 354 .align 5 355ENTRY(cpu_arm1026_set_pte_ext) 356#ifdef CONFIG_MMU 357 armv3_set_pte_ext 358 mov r0, r0 359#ifndef CONFIG_CPU_DCACHE_DISABLE 360 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 361#endif 362#endif /* CONFIG_MMU */ 363 mov pc, lr 364 365 366 __INIT 367 368 .type __arm1026_setup, #function 369__arm1026_setup: 370 mov r0, #0 371 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 372 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 373#ifdef CONFIG_MMU 374 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 375 mcr p15, 0, r4, c2, c0 @ load page table pointer 376#endif 377#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 378 mov r0, #4 @ explicitly disable writeback 379 mcr p15, 7, r0, c15, c0, 0 380#endif 381 adr r5, arm1026_crval 382 ldmia r5, {r5, r6} 383 mrc p15, 0, r0, c1, c0 @ get control register v4 384 bic r0, r0, r5 385 orr r0, r0, r6 386#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 387 orr r0, r0, #0x4000 @ .R.. .... .... .... 388#endif 389 mov pc, lr 390 .size __arm1026_setup, . - __arm1026_setup 391 392 /* 393 * R 394 * .RVI ZFRS BLDP WCAM 395 * .011 1001 ..11 0101 396 * 397 */ 398 .type arm1026_crval, #object 399arm1026_crval: 400 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934 401 402 __INITDATA 403 404/* 405 * Purpose : Function pointers used to access above functions - all calls 406 * come through these 407 */ 408 .type arm1026_processor_functions, #object 409arm1026_processor_functions: 410 .word v5t_early_abort 411 .word pabort_noifar 412 .word cpu_arm1026_proc_init 413 .word cpu_arm1026_proc_fin 414 .word cpu_arm1026_reset 415 .word cpu_arm1026_do_idle 416 .word cpu_arm1026_dcache_clean_area 417 .word cpu_arm1026_switch_mm 418 .word cpu_arm1026_set_pte_ext 419 .size arm1026_processor_functions, . - arm1026_processor_functions 420 421 .section .rodata 422 423 .type cpu_arch_name, #object 424cpu_arch_name: 425 .asciz "armv5tej" 426 .size cpu_arch_name, . - cpu_arch_name 427 428 .type cpu_elf_name, #object 429cpu_elf_name: 430 .asciz "v5" 431 .size cpu_elf_name, . - cpu_elf_name 432 .align 433 434 .type cpu_arm1026_name, #object 435cpu_arm1026_name: 436 .asciz "ARM1026EJ-S" 437 .size cpu_arm1026_name, . - cpu_arm1026_name 438 439 .align 440 441 .section ".proc.info.init", #alloc, #execinstr 442 443 .type __arm1026_proc_info,#object 444__arm1026_proc_info: 445 .long 0x4106a260 @ ARM 1026EJ-S (v5TEJ) 446 .long 0xff0ffff0 447 .long PMD_TYPE_SECT | \ 448 PMD_BIT4 | \ 449 PMD_SECT_AP_WRITE | \ 450 PMD_SECT_AP_READ 451 .long PMD_TYPE_SECT | \ 452 PMD_BIT4 | \ 453 PMD_SECT_AP_WRITE | \ 454 PMD_SECT_AP_READ 455 b __arm1026_setup 456 .long cpu_arch_name 457 .long cpu_elf_name 458 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 459 .long cpu_arm1026_name 460 .long arm1026_processor_functions 461 .long v4wbi_tlb_fns 462 .long v4wb_user_fns 463 .long arm1026_cache_fns 464 .size __arm1026_proc_info, . - __arm1026_proc_info 465