• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _METAG_CACHEFLUSH_H
3 #define _METAG_CACHEFLUSH_H
4 
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <linux/io.h>
8 
9 #include <asm/l2cache.h>
10 #include <asm/metag_isa.h>
11 #include <asm/metag_mem.h>
12 
13 void metag_cache_probe(void);
14 
15 void metag_data_cache_flush_all(const void *start);
16 void metag_code_cache_flush_all(const void *start);
17 
18 /*
19  * Routines to flush physical cache lines that may be used to cache data or code
20  * normally accessed via the linear address range supplied. The region flushed
21  * must either lie in local or global address space determined by the top bit of
22  * the pStart address. If Bytes is >= 4K then the whole of the related cache
23  * state will be flushed rather than a limited range.
24  */
25 void metag_data_cache_flush(const void *start, int bytes);
26 void metag_code_cache_flush(const void *start, int bytes);
27 
28 #ifdef CONFIG_METAG_META12
29 
30 /* Write through, virtually tagged, split I/D cache. */
31 
__flush_cache_all(void)32 static inline void __flush_cache_all(void)
33 {
34 	metag_code_cache_flush_all((void *) PAGE_OFFSET);
35 	metag_data_cache_flush_all((void *) PAGE_OFFSET);
36 }
37 
38 #define flush_cache_all() __flush_cache_all()
39 
40 /* flush the entire user address space referenced in this mm structure */
flush_cache_mm(struct mm_struct * mm)41 static inline void flush_cache_mm(struct mm_struct *mm)
42 {
43 	if (mm == current->mm)
44 		__flush_cache_all();
45 }
46 
47 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
48 
49 /* flush a range of addresses from this mm */
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)50 static inline void flush_cache_range(struct vm_area_struct *vma,
51 				     unsigned long start, unsigned long end)
52 {
53 	flush_cache_mm(vma->vm_mm);
54 }
55 
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)56 static inline void flush_cache_page(struct vm_area_struct *vma,
57 				    unsigned long vmaddr, unsigned long pfn)
58 {
59 	flush_cache_mm(vma->vm_mm);
60 }
61 
62 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE	1
flush_dcache_page(struct page * page)63 static inline void flush_dcache_page(struct page *page)
64 {
65 	metag_data_cache_flush_all((void *) PAGE_OFFSET);
66 }
67 
68 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
69 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
70 
flush_icache_page(struct vm_area_struct * vma,struct page * page)71 static inline void flush_icache_page(struct vm_area_struct *vma,
72 				     struct page *page)
73 {
74 	metag_code_cache_flush(page_to_virt(page), PAGE_SIZE);
75 }
76 
flush_cache_vmap(unsigned long start,unsigned long end)77 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
78 {
79 	metag_data_cache_flush_all((void *) PAGE_OFFSET);
80 }
81 
flush_cache_vunmap(unsigned long start,unsigned long end)82 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
83 {
84 	metag_data_cache_flush_all((void *) PAGE_OFFSET);
85 }
86 
87 #else
88 
89 /* Write through, physically tagged, split I/D cache. */
90 
91 #define flush_cache_all()			do { } while (0)
92 #define flush_cache_mm(mm)			do { } while (0)
93 #define flush_cache_dup_mm(mm)			do { } while (0)
94 #define flush_cache_range(vma, start, end)	do { } while (0)
95 #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
96 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
97 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
98 #define flush_icache_page(vma, pg)		do { } while (0)
99 #define flush_cache_vmap(start, end)		do { } while (0)
100 #define flush_cache_vunmap(start, end)		do { } while (0)
101 
102 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE	1
flush_dcache_page(struct page * page)103 static inline void flush_dcache_page(struct page *page)
104 {
105 	/* FIXME: We can do better than this. All we are trying to do is
106 	 * make the i-cache coherent, we should use the PG_arch_1 bit like
107 	 * e.g. powerpc.
108 	 */
109 #ifdef CONFIG_SMP
110 	metag_out32(1, SYSC_ICACHE_FLUSH);
111 #else
112 	metag_code_cache_flush_all((void *) PAGE_OFFSET);
113 #endif
114 }
115 
116 #endif
117 
118 /* Push n pages at kernel virtual address and clear the icache */
flush_icache_range(unsigned long address,unsigned long endaddr)119 static inline void flush_icache_range(unsigned long address,
120 				      unsigned long endaddr)
121 {
122 #ifdef CONFIG_SMP
123 	metag_out32(1, SYSC_ICACHE_FLUSH);
124 #else
125 	metag_code_cache_flush((void *) address, endaddr - address);
126 #endif
127 }
128 
flush_cache_sigtramp(unsigned long addr,int size)129 static inline void flush_cache_sigtramp(unsigned long addr, int size)
130 {
131 	/*
132 	 * Flush the icache in case there was previously some code
133 	 * fetched from this address, perhaps a previous sigtramp.
134 	 *
135 	 * We don't need to flush the dcache, it's write through and
136 	 * we just wrote the sigtramp code through it.
137 	 */
138 #ifdef CONFIG_SMP
139 	metag_out32(1, SYSC_ICACHE_FLUSH);
140 #else
141 	metag_code_cache_flush((void *) addr, size);
142 #endif
143 }
144 
145 #ifdef CONFIG_METAG_L2C
146 
147 /*
148  * Perform a single specific CACHEWD operation on an address, masking lower bits
149  * of address first.
150  */
cachewd_line(void * addr,unsigned int data)151 static inline void cachewd_line(void *addr, unsigned int data)
152 {
153 	unsigned long masked = (unsigned long)addr & -0x40;
154 	__builtin_meta2_cachewd((void *)masked, data);
155 }
156 
157 /* Perform a certain CACHEW op on each cache line in a range */
cachew_region_op(void * start,unsigned long size,unsigned int op)158 static inline void cachew_region_op(void *start, unsigned long size,
159 				    unsigned int op)
160 {
161 	unsigned long offset = (unsigned long)start & 0x3f;
162 	int i;
163 	if (offset) {
164 		size += offset;
165 		start -= offset;
166 	}
167 	i = (size - 1) >> 6;
168 	do {
169 		__builtin_meta2_cachewd(start, op);
170 		start += 0x40;
171 	} while (i--);
172 }
173 
174 /* prevent write fence and flushbacks being reordered in L2 */
l2c_fence_flush(void * addr)175 static inline void l2c_fence_flush(void *addr)
176 {
177 	/*
178 	 * Synchronise by reading back and re-flushing.
179 	 * It is assumed this access will miss, as the caller should have just
180 	 * flushed the cache line.
181 	 */
182 	(void)(volatile u8 *)addr;
183 	cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
184 }
185 
186 /* prevent write fence and writebacks being reordered in L2 */
l2c_fence(void * addr)187 static inline void l2c_fence(void *addr)
188 {
189 	/*
190 	 * A write back has occurred, but not necessarily an invalidate, so the
191 	 * readback in l2c_fence_flush() would hit in the cache and have no
192 	 * effect. Therefore fully flush the line first.
193 	 */
194 	cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
195 	l2c_fence_flush(addr);
196 }
197 
198 /* Used to keep memory consistent when doing DMA. */
flush_dcache_region(void * start,unsigned long size)199 static inline void flush_dcache_region(void *start, unsigned long size)
200 {
201 	/* metag_data_cache_flush won't flush L2 cache lines if size >= 4096 */
202 	if (meta_l2c_is_enabled()) {
203 		cachew_region_op(start, size, CACHEW_FLUSH_L1D_L2);
204 		if (meta_l2c_is_writeback())
205 			l2c_fence_flush(start + size - 1);
206 	} else {
207 		metag_data_cache_flush(start, size);
208 	}
209 }
210 
211 /* Write back dirty lines to memory (or do nothing if no writeback caches) */
writeback_dcache_region(void * start,unsigned long size)212 static inline void writeback_dcache_region(void *start, unsigned long size)
213 {
214 	if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) {
215 		cachew_region_op(start, size, CACHEW_WRITEBACK_L1D_L2);
216 		l2c_fence(start + size - 1);
217 	}
218 }
219 
220 /* Invalidate (may also write back if necessary) */
invalidate_dcache_region(void * start,unsigned long size)221 static inline void invalidate_dcache_region(void *start, unsigned long size)
222 {
223 	if (meta_l2c_is_enabled())
224 		cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
225 	else
226 		metag_data_cache_flush(start, size);
227 }
228 #else
229 #define flush_dcache_region(s, l)	metag_data_cache_flush((s), (l))
230 #define writeback_dcache_region(s, l)	do {} while (0)
231 #define invalidate_dcache_region(s, l)	flush_dcache_region((s), (l))
232 #endif
233 
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)234 static inline void copy_to_user_page(struct vm_area_struct *vma,
235 				     struct page *page, unsigned long vaddr,
236 				     void *dst, const void *src,
237 				     unsigned long len)
238 {
239 	memcpy(dst, src, len);
240 	flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
241 }
242 
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)243 static inline void copy_from_user_page(struct vm_area_struct *vma,
244 				       struct page *page, unsigned long vaddr,
245 				       void *dst, const void *src,
246 				       unsigned long len)
247 {
248 	memcpy(dst, src, len);
249 }
250 
251 #endif /* _METAG_CACHEFLUSH_H */
252