1/* 2 * linux/arch/arm/mm/proc-v7.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This is the "shell" of the ARMv7 processor support. 11 */ 12#include <linux/linkage.h> 13#include <asm/assembler.h> 14#include <asm/asm-offsets.h> 15#include <asm/hwcap.h> 16#include <asm/pgtable-hwdef.h> 17#include <asm/pgtable.h> 18 19#include "proc-macros.S" 20 21#define TTB_C (1 << 0) 22#define TTB_S (1 << 1) 23#define TTB_RGN_NC (0 << 3) 24#define TTB_RGN_OC_WBWA (1 << 3) 25#define TTB_RGN_OC_WT (2 << 3) 26#define TTB_RGN_OC_WB (3 << 3) 27 28#ifndef CONFIG_SMP 29#define TTB_FLAGS TTB_C|TTB_RGN_OC_WB @ mark PTWs cacheable, outer WB 30#else 31#define TTB_FLAGS TTB_C|TTB_S|TTB_RGN_OC_WBWA @ mark PTWs cacheable and shared, outer WBWA 32#endif 33 34ENTRY(cpu_v7_proc_init) 35 mov pc, lr 36ENDPROC(cpu_v7_proc_init) 37 38ENTRY(cpu_v7_proc_fin) 39 mov pc, lr 40ENDPROC(cpu_v7_proc_fin) 41 42/* 43 * cpu_v7_reset(loc) 44 * 45 * Perform a soft reset of the system. Put the CPU into the 46 * same state as it would be if it had been reset, and branch 47 * to what would be the reset vector. 48 * 49 * - loc - location to jump to for soft reset 50 * 51 * It is assumed that: 52 */ 53 .align 5 54ENTRY(cpu_v7_reset) 55 mov pc, r0 56ENDPROC(cpu_v7_reset) 57 58/* 59 * cpu_v7_do_idle() 60 * 61 * Idle the processor (eg, wait for interrupt). 62 * 63 * IRQs are already disabled. 64 */ 65ENTRY(cpu_v7_do_idle) 66 dsb @ WFI may enter a low-power mode 67 wfi 68 mov pc, lr 69ENDPROC(cpu_v7_do_idle) 70 71ENTRY(cpu_v7_dcache_clean_area) 72#ifndef TLB_CAN_READ_FROM_L1_CACHE 73 dcache_line_size r2, r3 741: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 75 add r0, r0, r2 76 subs r1, r1, r2 77 bhi 1b 78 dsb 79#endif 80 mov pc, lr 81ENDPROC(cpu_v7_dcache_clean_area) 82 83/* 84 * cpu_v7_switch_mm(pgd_phys, tsk) 85 * 86 * Set the translation table base pointer to be pgd_phys 87 * 88 * - pgd_phys - physical address of new TTB 89 * 90 * It is assumed that: 91 * - we are not using split page tables 92 */ 93ENTRY(cpu_v7_switch_mm) 94#ifdef CONFIG_MMU 95 mov r2, #0 96 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 97 orr r0, r0, #TTB_FLAGS 98 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID 99 isb 1001: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 101 isb 102 mcr p15, 0, r1, c13, c0, 1 @ set context ID 103 isb 104#endif 105 mov pc, lr 106ENDPROC(cpu_v7_switch_mm) 107 108/* 109 * cpu_v7_set_pte_ext(ptep, pte) 110 * 111 * Set a level 2 translation table entry. 112 * 113 * - ptep - pointer to level 2 translation table entry 114 * (hardware version is stored at -1024 bytes) 115 * - pte - PTE value to store 116 * - ext - value for extended PTE bits 117 */ 118ENTRY(cpu_v7_set_pte_ext) 119#ifdef CONFIG_MMU 120 str r1, [r0], #-2048 @ linux version 121 122 bic r3, r1, #0x000003f0 123 bic r3, r3, #PTE_TYPE_MASK 124 orr r3, r3, r2 125 orr r3, r3, #PTE_EXT_AP0 | 2 126 127 tst r1, #1 << 4 128 orrne r3, r3, #PTE_EXT_TEX(1) 129 130 tst r1, #L_PTE_WRITE 131 tstne r1, #L_PTE_DIRTY 132 orreq r3, r3, #PTE_EXT_APX 133 134 tst r1, #L_PTE_USER 135 orrne r3, r3, #PTE_EXT_AP1 136 tstne r3, #PTE_EXT_APX 137 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 138 139 tst r1, #L_PTE_EXEC 140 orreq r3, r3, #PTE_EXT_XN 141 142 tst r1, #L_PTE_YOUNG 143 tstne r1, #L_PTE_PRESENT 144 moveq r3, #0 145 146 str r3, [r0] 147 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 148#endif 149 mov pc, lr 150ENDPROC(cpu_v7_set_pte_ext) 151 152cpu_v7_name: 153 .ascii "ARMv7 Processor" 154 .align 155 156 .section ".text.init", #alloc, #execinstr 157 158/* 159 * __v7_setup 160 * 161 * Initialise TLB, Caches, and MMU state ready to switch the MMU 162 * on. Return in r0 the new CP15 C1 control register setting. 163 * 164 * We automatically detect if we have a Harvard cache, and use the 165 * Harvard cache control instructions insead of the unified cache 166 * control instructions. 167 * 168 * This should be able to cover all ARMv7 cores. 169 * 170 * It is assumed that: 171 * - cache type register is implemented 172 */ 173__v7_setup: 174#ifdef CONFIG_SMP 175 mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode 176 orr r0, r0, #(0x1 << 6) 177 mcr p15, 0, r0, c1, c0, 1 178#endif 179 adr r12, __v7_setup_stack @ the local stack 180 stmia r12, {r0-r5, r7, r9, r11, lr} 181 bl v7_flush_dcache_all 182 ldmia r12, {r0-r5, r7, r9, r11, lr} 183 mov r10, #0 184#ifdef HARVARD_CACHE 185 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 186#endif 187 dsb 188#ifdef CONFIG_MMU 189 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 190 mcr p15, 0, r10, c2, c0, 2 @ TTB control register 191 orr r4, r4, #TTB_FLAGS 192 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 193 mov r10, #0x1f @ domains 0, 1 = manager 194 mcr p15, 0, r10, c3, c0, 0 @ load domain access register 195#endif 196 ldr r5, =0xff0aa1a8 197 ldr r6, =0x40e040e0 198 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 199 mcr p15, 0, r6, c10, c2, 1 @ write NMRR 200 adr r5, v7_crval 201 ldmia r5, {r5, r6} 202 mrc p15, 0, r0, c1, c0, 0 @ read control register 203 bic r0, r0, r5 @ clear bits them 204 orr r0, r0, r6 @ set them 205 mov pc, lr @ return to head.S:__ret 206ENDPROC(__v7_setup) 207 208 /* AT 209 * TFR EV X F I D LR 210 * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM 211 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced 212 * 1 0 110 0011 1.00 .111 1101 < we want 213 */ 214 .type v7_crval, #object 215v7_crval: 216 crval clear=0x0120c302, mmuset=0x10c0387d, ucset=0x00c0187c 217 218__v7_setup_stack: 219 .space 4 * 11 @ 11 registers 220 221 .type v7_processor_functions, #object 222ENTRY(v7_processor_functions) 223 .word v7_early_abort 224 .word pabort_ifar 225 .word cpu_v7_proc_init 226 .word cpu_v7_proc_fin 227 .word cpu_v7_reset 228 .word cpu_v7_do_idle 229 .word cpu_v7_dcache_clean_area 230 .word cpu_v7_switch_mm 231 .word cpu_v7_set_pte_ext 232 .size v7_processor_functions, . - v7_processor_functions 233 234 .type cpu_arch_name, #object 235cpu_arch_name: 236 .asciz "armv7" 237 .size cpu_arch_name, . - cpu_arch_name 238 239 .type cpu_elf_name, #object 240cpu_elf_name: 241 .asciz "v7" 242 .size cpu_elf_name, . - cpu_elf_name 243 .align 244 245 .section ".proc.info.init", #alloc, #execinstr 246 247 /* 248 * Match any ARMv7 processor core. 249 */ 250 .type __v7_proc_info, #object 251__v7_proc_info: 252 .long 0x000f0000 @ Required ID value 253 .long 0x000f0000 @ Mask for ID 254 .long PMD_TYPE_SECT | \ 255 PMD_SECT_BUFFERABLE | \ 256 PMD_SECT_CACHEABLE | \ 257 PMD_SECT_AP_WRITE | \ 258 PMD_SECT_AP_READ 259 .long PMD_TYPE_SECT | \ 260 PMD_SECT_XN | \ 261 PMD_SECT_AP_WRITE | \ 262 PMD_SECT_AP_READ 263 b __v7_setup 264 .long cpu_arch_name 265 .long cpu_elf_name 266 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 267 .long cpu_v7_name 268 .long v7_processor_functions 269 .long v7wbi_tlb_fns 270 .long v6_user_fns 271 .long v7_cache_fns 272 .size __v7_proc_info, . - __v7_proc_info 273