1 /*
2 * Based on arch/arm/mm/flush.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/export.h>
21 #include <linux/mm.h>
22 #include <linux/pagemap.h>
23
24 #include <asm/cacheflush.h>
25 #include <asm/cache.h>
26 #include <asm/tlbflush.h>
27
sync_icache_aliases(void * kaddr,unsigned long len)28 void sync_icache_aliases(void *kaddr, unsigned long len)
29 {
30 unsigned long addr = (unsigned long)kaddr;
31
32 if (icache_is_aliasing()) {
33 __clean_dcache_area_pou(kaddr, len);
34 __flush_icache_all();
35 } else {
36 /*
37 * Don't issue kick_all_cpus_sync() after I-cache invalidation
38 * for user mappings.
39 */
40 __flush_icache_range(addr, addr + len);
41 }
42 }
43
flush_ptrace_access(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * kaddr,unsigned long len)44 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
45 unsigned long uaddr, void *kaddr,
46 unsigned long len)
47 {
48 if (vma->vm_flags & VM_EXEC)
49 sync_icache_aliases(kaddr, len);
50 }
51
52 /*
53 * Copy user data from/to a page which is mapped into a different processes
54 * address space. Really, we want to allow our "user space" model to handle
55 * this.
56 */
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * dst,const void * src,unsigned long len)57 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
58 unsigned long uaddr, void *dst, const void *src,
59 unsigned long len)
60 {
61 memcpy(dst, src, len);
62 flush_ptrace_access(vma, page, uaddr, dst, len);
63 }
64
__sync_icache_dcache(pte_t pte)65 void __sync_icache_dcache(pte_t pte)
66 {
67 struct page *page = pte_page(pte);
68
69 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
70 sync_icache_aliases(page_address(page),
71 PAGE_SIZE << compound_order(page));
72 }
73 EXPORT_SYMBOL_GPL(__sync_icache_dcache);
74
75 /*
76 * This function is called when a page has been modified by the kernel. Mark
77 * it as dirty for later flushing when mapped in user space (if executable,
78 * see __sync_icache_dcache).
79 */
flush_dcache_page(struct page * page)80 void flush_dcache_page(struct page *page)
81 {
82 if (test_bit(PG_dcache_clean, &page->flags))
83 clear_bit(PG_dcache_clean, &page->flags);
84 }
85 EXPORT_SYMBOL(flush_dcache_page);
86
87 /*
88 * Additional functions defined in assembly.
89 */
90 EXPORT_SYMBOL(__flush_icache_range);
91
92 #ifdef CONFIG_ARCH_HAS_PMEM_API
arch_wb_cache_pmem(void * addr,size_t size)93 void arch_wb_cache_pmem(void *addr, size_t size)
94 {
95 /* Ensure order against any prior non-cacheable writes */
96 dmb(osh);
97 __clean_dcache_area_pop(addr, size);
98 }
99 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
100
arch_invalidate_pmem(void * addr,size_t size)101 void arch_invalidate_pmem(void *addr, size_t size)
102 {
103 __inval_dcache_area(addr, size);
104 }
105 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
106 #endif
107