1/* 2 * linux/arch/arm/mm/cache-v3.S 3 * 4 * Copyright (C) 1997-2002 Russell king 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/linkage.h> 11#include <linux/init.h> 12#include <asm/page.h> 13#include "proc-macros.S" 14 15/* 16 * flush_icache_all() 17 * 18 * Unconditionally clean and invalidate the entire icache. 19 */ 20ENTRY(v3_flush_icache_all) 21 mov pc, lr 22ENDPROC(v3_flush_icache_all) 23 24/* 25 * flush_user_cache_all() 26 * 27 * Invalidate all cache entries in a particular address 28 * space. 29 * 30 * - mm - mm_struct describing address space 31 */ 32ENTRY(v3_flush_user_cache_all) 33 /* FALLTHROUGH */ 34/* 35 * flush_kern_cache_all() 36 * 37 * Clean and invalidate the entire cache. 38 */ 39ENTRY(v3_flush_kern_cache_all) 40 /* FALLTHROUGH */ 41 42/* 43 * flush_user_cache_range(start, end, flags) 44 * 45 * Invalidate a range of cache entries in the specified 46 * address space. 47 * 48 * - start - start address (may not be aligned) 49 * - end - end address (exclusive, may not be aligned) 50 * - flags - vma_area_struct flags describing address space 51 */ 52ENTRY(v3_flush_user_cache_range) 53 mov ip, #0 54 mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache 55 mov pc, lr 56 57/* 58 * coherent_kern_range(start, end) 59 * 60 * Ensure coherency between the Icache and the Dcache in the 61 * region described by start. If you have non-snooping 62 * Harvard caches, you need to implement this function. 63 * 64 * - start - virtual start address 65 * - end - virtual end address 66 */ 67ENTRY(v3_coherent_kern_range) 68 /* FALLTHROUGH */ 69 70/* 71 * coherent_user_range(start, end) 72 * 73 * Ensure coherency between the Icache and the Dcache in the 74 * region described by start. If you have non-snooping 75 * Harvard caches, you need to implement this function. 76 * 77 * - start - virtual start address 78 * - end - virtual end address 79 */ 80ENTRY(v3_coherent_user_range) 81 mov pc, lr 82 83/* 84 * flush_kern_dcache_area(void *page, size_t size) 85 * 86 * Ensure no D cache aliasing occurs, either with itself or 87 * the I cache 88 * 89 * - addr - kernel address 90 * - size - region size 91 */ 92ENTRY(v3_flush_kern_dcache_area) 93 /* FALLTHROUGH */ 94 95/* 96 * dma_flush_range(start, end) 97 * 98 * Clean and invalidate the specified virtual address range. 99 * 100 * - start - virtual start address 101 * - end - virtual end address 102 */ 103ENTRY(v3_dma_flush_range) 104 mov r0, #0 105 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache 106 mov pc, lr 107 108/* 109 * dma_unmap_area(start, size, dir) 110 * - start - kernel virtual start address 111 * - size - size of region 112 * - dir - DMA direction 113 */ 114ENTRY(v3_dma_unmap_area) 115 teq r2, #DMA_TO_DEVICE 116 bne v3_dma_flush_range 117 /* FALLTHROUGH */ 118 119/* 120 * dma_map_area(start, size, dir) 121 * - start - kernel virtual start address 122 * - size - size of region 123 * - dir - DMA direction 124 */ 125ENTRY(v3_dma_map_area) 126 mov pc, lr 127ENDPROC(v3_dma_unmap_area) 128ENDPROC(v3_dma_map_area) 129 130 __INITDATA 131 132 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 133 define_cache_functions v3 134