1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/unicore32/mm/flush.c
4 *
5 * Code specific to PKUnity SoC and UniCore ISA
6 *
7 * Copyright (C) 2001-2010 GUAN Xue-tao
8 */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12
13 #include <asm/cacheflush.h>
14 #include <asm/tlbflush.h>
15
flush_cache_mm(struct mm_struct * mm)16 void flush_cache_mm(struct mm_struct *mm)
17 {
18 }
19
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)20 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
21 unsigned long end)
22 {
23 if (vma->vm_flags & VM_EXEC)
24 __flush_icache_all();
25 }
26
flush_cache_page(struct vm_area_struct * vma,unsigned long user_addr,unsigned long pfn)27 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
28 unsigned long pfn)
29 {
30 }
31
flush_ptrace_access(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * kaddr,unsigned long len)32 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
33 unsigned long uaddr, void *kaddr, unsigned long len)
34 {
35 /* VIPT non-aliasing D-cache */
36 if (vma->vm_flags & VM_EXEC) {
37 unsigned long addr = (unsigned long)kaddr;
38
39 __cpuc_coherent_kern_range(addr, addr + len);
40 }
41 }
42
43 /*
44 * Copy user data from/to a page which is mapped into a different
45 * processes address space. Really, we want to allow our "user
46 * space" model to handle this.
47 *
48 * Note that this code needs to run on the current CPU.
49 */
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * dst,const void * src,unsigned long len)50 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
51 unsigned long uaddr, void *dst, const void *src,
52 unsigned long len)
53 {
54 memcpy(dst, src, len);
55 flush_ptrace_access(vma, page, uaddr, dst, len);
56 }
57
__flush_dcache_page(struct address_space * mapping,struct page * page)58 void __flush_dcache_page(struct address_space *mapping, struct page *page)
59 {
60 /*
61 * Writeback any data associated with the kernel mapping of this
62 * page. This ensures that data in the physical page is mutually
63 * coherent with the kernels mapping.
64 */
65 __cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE);
66 }
67
68 /*
69 * Ensure cache coherency between kernel mapping and userspace mapping
70 * of this page.
71 */
flush_dcache_page(struct page * page)72 void flush_dcache_page(struct page *page)
73 {
74 struct address_space *mapping;
75
76 /*
77 * The zero page is never written to, so never has any dirty
78 * cache lines, and therefore never needs to be flushed.
79 */
80 if (page == ZERO_PAGE(0))
81 return;
82
83 mapping = page_mapping_file(page);
84
85 if (mapping && !mapping_mapped(mapping))
86 clear_bit(PG_dcache_clean, &page->flags);
87 else {
88 __flush_dcache_page(mapping, page);
89 if (mapping)
90 __flush_icache_all();
91 set_bit(PG_dcache_clean, &page->flags);
92 }
93 }
94 EXPORT_SYMBOL(flush_dcache_page);
95