• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * arch/sh/mm/cache-sh5.c
3  *
4  * Copyright (C) 2000, 2001  Paolo Alberelli
5  * Copyright (C) 2002  Benedict Gaster
6  * Copyright (C) 2003  Richard Curnow
7  * Copyright (C) 2003 - 2008  Paul Mundt
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 #include <linux/init.h>
14 #include <linux/mman.h>
15 #include <linux/mm.h>
16 #include <asm/tlb.h>
17 #include <asm/processor.h>
18 #include <asm/cache.h>
19 #include <asm/pgalloc.h>
20 #include <asm/uaccess.h>
21 #include <asm/mmu_context.h>
22 
23 /* Wired TLB entry for the D-cache */
24 static unsigned long long dtlb_cache_slot;
25 
p3_cache_init(void)26 void __init p3_cache_init(void)
27 {
28 	/* Reserve a slot for dcache colouring in the DTLB */
29 	dtlb_cache_slot	= sh64_get_wired_dtlb_entry();
30 }
31 
32 #ifdef CONFIG_DCACHE_DISABLED
33 #define sh64_dcache_purge_all()					do { } while (0)
34 #define sh64_dcache_purge_coloured_phy_page(paddr, eaddr)	do { } while (0)
35 #define sh64_dcache_purge_user_range(mm, start, end)		do { } while (0)
36 #define sh64_dcache_purge_phy_page(paddr)			do { } while (0)
37 #define sh64_dcache_purge_virt_page(mm, eaddr)			do { } while (0)
38 #endif
39 
40 /*
41  * The following group of functions deal with mapping and unmapping a
42  * temporary page into a DTLB slot that has been set aside for exclusive
43  * use.
44  */
45 static inline void
sh64_setup_dtlb_cache_slot(unsigned long eaddr,unsigned long asid,unsigned long paddr)46 sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
47 			   unsigned long paddr)
48 {
49 	local_irq_disable();
50 	sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
51 }
52 
sh64_teardown_dtlb_cache_slot(void)53 static inline void sh64_teardown_dtlb_cache_slot(void)
54 {
55 	sh64_teardown_tlb_slot(dtlb_cache_slot);
56 	local_irq_enable();
57 }
58 
59 #ifndef CONFIG_ICACHE_DISABLED
sh64_icache_inv_all(void)60 static inline void sh64_icache_inv_all(void)
61 {
62 	unsigned long long addr, flag, data;
63 	unsigned int flags;
64 
65 	addr = ICCR0;
66 	flag = ICCR0_ICI;
67 	data = 0;
68 
69 	/* Make this a critical section for safety (probably not strictly necessary.) */
70 	local_irq_save(flags);
71 
72 	/* Without %1 it gets unexplicably wrong */
73 	__asm__ __volatile__ (
74 		"getcfg	%3, 0, %0\n\t"
75 		"or	%0, %2, %0\n\t"
76 		"putcfg	%3, 0, %0\n\t"
77 		"synci"
78 		: "=&r" (data)
79 		: "0" (data), "r" (flag), "r" (addr));
80 
81 	local_irq_restore(flags);
82 }
83 
sh64_icache_inv_kernel_range(unsigned long start,unsigned long end)84 static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
85 {
86 	/* Invalidate range of addresses [start,end] from the I-cache, where
87 	 * the addresses lie in the kernel superpage. */
88 
89 	unsigned long long ullend, addr, aligned_start;
90 	aligned_start = (unsigned long long)(signed long long)(signed long) start;
91 	addr = L1_CACHE_ALIGN(aligned_start);
92 	ullend = (unsigned long long) (signed long long) (signed long) end;
93 
94 	while (addr <= ullend) {
95 		__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
96 		addr += L1_CACHE_BYTES;
97 	}
98 }
99 
sh64_icache_inv_user_page(struct vm_area_struct * vma,unsigned long eaddr)100 static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
101 {
102 	/* If we get called, we know that vma->vm_flags contains VM_EXEC.
103 	   Also, eaddr is page-aligned. */
104 	unsigned int cpu = smp_processor_id();
105 	unsigned long long addr, end_addr;
106 	unsigned long flags = 0;
107 	unsigned long running_asid, vma_asid;
108 	addr = eaddr;
109 	end_addr = addr + PAGE_SIZE;
110 
111 	/* Check whether we can use the current ASID for the I-cache
112 	   invalidation.  For example, if we're called via
113 	   access_process_vm->flush_cache_page->here, (e.g. when reading from
114 	   /proc), 'running_asid' will be that of the reader, not of the
115 	   victim.
116 
117 	   Also, note the risk that we might get pre-empted between the ASID
118 	   compare and blocking IRQs, and before we regain control, the
119 	   pid->ASID mapping changes.  However, the whole cache will get
120 	   invalidated when the mapping is renewed, so the worst that can
121 	   happen is that the loop below ends up invalidating somebody else's
122 	   cache entries.
123 	*/
124 
125 	running_asid = get_asid();
126 	vma_asid = cpu_asid(cpu, vma->vm_mm);
127 	if (running_asid != vma_asid) {
128 		local_irq_save(flags);
129 		switch_and_save_asid(vma_asid);
130 	}
131 	while (addr < end_addr) {
132 		/* Worth unrolling a little */
133 		__asm__ __volatile__("icbi %0,  0" : : "r" (addr));
134 		__asm__ __volatile__("icbi %0, 32" : : "r" (addr));
135 		__asm__ __volatile__("icbi %0, 64" : : "r" (addr));
136 		__asm__ __volatile__("icbi %0, 96" : : "r" (addr));
137 		addr += 128;
138 	}
139 	if (running_asid != vma_asid) {
140 		switch_and_save_asid(running_asid);
141 		local_irq_restore(flags);
142 	}
143 }
144 
sh64_icache_inv_user_page_range(struct mm_struct * mm,unsigned long start,unsigned long end)145 static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
146 			  unsigned long start, unsigned long end)
147 {
148 	/* Used for invalidating big chunks of I-cache, i.e. assume the range
149 	   is whole pages.  If 'start' or 'end' is not page aligned, the code
150 	   is conservative and invalidates to the ends of the enclosing pages.
151 	   This is functionally OK, just a performance loss. */
152 
153 	/* See the comments below in sh64_dcache_purge_user_range() regarding
154 	   the choice of algorithm.  However, for the I-cache option (2) isn't
155 	   available because there are no physical tags so aliases can't be
156 	   resolved.  The icbi instruction has to be used through the user
157 	   mapping.   Because icbi is cheaper than ocbp on a cache hit, it
158 	   would be cheaper to use the selective code for a large range than is
159 	   possible with the D-cache.  Just assume 64 for now as a working
160 	   figure.
161 	   */
162 	int n_pages;
163 
164 	if (!mm)
165 		return;
166 
167 	n_pages = ((end - start) >> PAGE_SHIFT);
168 	if (n_pages >= 64) {
169 		sh64_icache_inv_all();
170 	} else {
171 		unsigned long aligned_start;
172 		unsigned long eaddr;
173 		unsigned long after_last_page_start;
174 		unsigned long mm_asid, current_asid;
175 		unsigned long long flags = 0ULL;
176 
177 		mm_asid = cpu_asid(smp_processor_id(), mm);
178 		current_asid = get_asid();
179 
180 		if (mm_asid != current_asid) {
181 			/* Switch ASID and run the invalidate loop under cli */
182 			local_irq_save(flags);
183 			switch_and_save_asid(mm_asid);
184 		}
185 
186 		aligned_start = start & PAGE_MASK;
187 		after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
188 
189 		while (aligned_start < after_last_page_start) {
190 			struct vm_area_struct *vma;
191 			unsigned long vma_end;
192 			vma = find_vma(mm, aligned_start);
193 			if (!vma || (aligned_start <= vma->vm_end)) {
194 				/* Avoid getting stuck in an error condition */
195 				aligned_start += PAGE_SIZE;
196 				continue;
197 			}
198 			vma_end = vma->vm_end;
199 			if (vma->vm_flags & VM_EXEC) {
200 				/* Executable */
201 				eaddr = aligned_start;
202 				while (eaddr < vma_end) {
203 					sh64_icache_inv_user_page(vma, eaddr);
204 					eaddr += PAGE_SIZE;
205 				}
206 			}
207 			aligned_start = vma->vm_end; /* Skip to start of next region */
208 		}
209 
210 		if (mm_asid != current_asid) {
211 			switch_and_save_asid(current_asid);
212 			local_irq_restore(flags);
213 		}
214 	}
215 }
216 
217 /*
218  * Invalidate a small range of user context I-cache, not necessarily page
219  * (or even cache-line) aligned.
220  *
221  * Since this is used inside ptrace, the ASID in the mm context typically
222  * won't match current_asid.  We'll have to switch ASID to do this.  For
223  * safety, and given that the range will be small, do all this under cli.
224  *
225  * Note, there is a hazard that the ASID in mm->context is no longer
226  * actually associated with mm, i.e. if the mm->context has started a new
227  * cycle since mm was last active.  However, this is just a performance
228  * issue: all that happens is that we invalidate lines belonging to
229  * another mm, so the owning process has to refill them when that mm goes
230  * live again.  mm itself can't have any cache entries because there will
231  * have been a flush_cache_all when the new mm->context cycle started.
232  */
sh64_icache_inv_user_small_range(struct mm_struct * mm,unsigned long start,int len)233 static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
234 						unsigned long start, int len)
235 {
236 	unsigned long long eaddr = start;
237 	unsigned long long eaddr_end = start + len;
238 	unsigned long current_asid, mm_asid;
239 	unsigned long long flags;
240 	unsigned long long epage_start;
241 
242 	/*
243 	 * Align to start of cache line.  Otherwise, suppose len==8 and
244 	 * start was at 32N+28 : the last 4 bytes wouldn't get invalidated.
245 	 */
246 	eaddr = L1_CACHE_ALIGN(start);
247 	eaddr_end = start + len;
248 
249 	mm_asid = cpu_asid(smp_processor_id(), mm);
250 	local_irq_save(flags);
251 	current_asid = switch_and_save_asid(mm_asid);
252 
253 	epage_start = eaddr & PAGE_MASK;
254 
255 	while (eaddr < eaddr_end) {
256 		__asm__ __volatile__("icbi %0, 0" : : "r" (eaddr));
257 		eaddr += L1_CACHE_BYTES;
258 	}
259 	switch_and_save_asid(current_asid);
260 	local_irq_restore(flags);
261 }
262 
sh64_icache_inv_current_user_range(unsigned long start,unsigned long end)263 static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
264 {
265 	/* The icbi instruction never raises ITLBMISS.  i.e. if there's not a
266 	   cache hit on the virtual tag the instruction ends there, without a
267 	   TLB lookup. */
268 
269 	unsigned long long aligned_start;
270 	unsigned long long ull_end;
271 	unsigned long long addr;
272 
273 	ull_end = end;
274 
275 	/* Just invalidate over the range using the natural addresses.  TLB
276 	   miss handling will be OK (TBC).  Since it's for the current process,
277 	   either we're already in the right ASID context, or the ASIDs have
278 	   been recycled since we were last active in which case we might just
279 	   invalidate another processes I-cache entries : no worries, just a
280 	   performance drop for him. */
281 	aligned_start = L1_CACHE_ALIGN(start);
282 	addr = aligned_start;
283 	while (addr < ull_end) {
284 		__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
285 		__asm__ __volatile__ ("nop");
286 		__asm__ __volatile__ ("nop");
287 		addr += L1_CACHE_BYTES;
288 	}
289 }
290 #endif /* !CONFIG_ICACHE_DISABLED */
291 
292 #ifndef CONFIG_DCACHE_DISABLED
293 /* Buffer used as the target of alloco instructions to purge data from cache
294    sets by natural eviction. -- RPC */
295 #define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
296 static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
297 
sh64_dcache_purge_sets(int sets_to_purge_base,int n_sets)298 static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
299 {
300 	/* Purge all ways in a particular block of sets, specified by the base
301 	   set number and number of sets.  Can handle wrap-around, if that's
302 	   needed.  */
303 
304 	int dummy_buffer_base_set;
305 	unsigned long long eaddr, eaddr0, eaddr1;
306 	int j;
307 	int set_offset;
308 
309 	dummy_buffer_base_set = ((int)&dummy_alloco_area &
310 				 cpu_data->dcache.entry_mask) >>
311 				 cpu_data->dcache.entry_shift;
312 	set_offset = sets_to_purge_base - dummy_buffer_base_set;
313 
314 	for (j = 0; j < n_sets; j++, set_offset++) {
315 		set_offset &= (cpu_data->dcache.sets - 1);
316 		eaddr0 = (unsigned long long)dummy_alloco_area +
317 			(set_offset << cpu_data->dcache.entry_shift);
318 
319 		/*
320 		 * Do one alloco which hits the required set per cache
321 		 * way.  For write-back mode, this will purge the #ways
322 		 * resident lines.  There's little point unrolling this
323 		 * loop because the allocos stall more if they're too
324 		 * close together.
325 		 */
326 		eaddr1 = eaddr0 + cpu_data->dcache.way_size *
327 				  cpu_data->dcache.ways;
328 
329 		for (eaddr = eaddr0; eaddr < eaddr1;
330 		     eaddr += cpu_data->dcache.way_size) {
331 			__asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
332 			__asm__ __volatile__ ("synco"); /* TAKum03020 */
333 		}
334 
335 		eaddr1 = eaddr0 + cpu_data->dcache.way_size *
336 				  cpu_data->dcache.ways;
337 
338 		for (eaddr = eaddr0; eaddr < eaddr1;
339 		     eaddr += cpu_data->dcache.way_size) {
340 			/*
341 			 * Load from each address.  Required because
342 			 * alloco is a NOP if the cache is write-through.
343 			 */
344 			if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
345 				ctrl_inb(eaddr);
346 		}
347 	}
348 
349 	/*
350 	 * Don't use OCBI to invalidate the lines.  That costs cycles
351 	 * directly.  If the dummy block is just left resident, it will
352 	 * naturally get evicted as required.
353 	 */
354 }
355 
356 /*
357  * Purge the entire contents of the dcache.  The most efficient way to
358  * achieve this is to use alloco instructions on a region of unused
359  * memory equal in size to the cache, thereby causing the current
360  * contents to be discarded by natural eviction.  The alternative, namely
361  * reading every tag, setting up a mapping for the corresponding page and
362  * doing an OCBP for the line, would be much more expensive.
363  */
sh64_dcache_purge_all(void)364 static void sh64_dcache_purge_all(void)
365 {
366 
367 	sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
368 }
369 
370 
371 /* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
372    anything else in the kernel */
373 #define MAGIC_PAGE0_START 0xffffffffec000000ULL
374 
375 /* Purge the physical page 'paddr' from the cache.  It's known that any
376  * cache lines requiring attention have the same page colour as the the
377  * address 'eaddr'.
378  *
379  * This relies on the fact that the D-cache matches on physical tags when
380  * no virtual tag matches.  So we create an alias for the original page
381  * and purge through that.  (Alternatively, we could have done this by
382  * switching ASID to match the original mapping and purged through that,
383  * but that involves ASID switching cost + probably a TLBMISS + refill
384  * anyway.)
385  */
sh64_dcache_purge_coloured_phy_page(unsigned long paddr,unsigned long eaddr)386 static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
387 					        unsigned long eaddr)
388 {
389 	unsigned long long magic_page_start;
390 	unsigned long long magic_eaddr, magic_eaddr_end;
391 
392 	magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
393 
394 	/* As long as the kernel is not pre-emptible, this doesn't need to be
395 	   under cli/sti. */
396 	sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
397 
398 	magic_eaddr = magic_page_start;
399 	magic_eaddr_end = magic_eaddr + PAGE_SIZE;
400 
401 	while (magic_eaddr < magic_eaddr_end) {
402 		/* Little point in unrolling this loop - the OCBPs are blocking
403 		   and won't go any quicker (i.e. the loop overhead is parallel
404 		   to part of the OCBP execution.) */
405 		__asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
406 		magic_eaddr += L1_CACHE_BYTES;
407 	}
408 
409 	sh64_teardown_dtlb_cache_slot();
410 }
411 
412 /*
413  * Purge a page given its physical start address, by creating a temporary
414  * 1 page mapping and purging across that.  Even if we know the virtual
415  * address (& vma or mm) of the page, the method here is more elegant
416  * because it avoids issues of coping with page faults on the purge
417  * instructions (i.e. no special-case code required in the critical path
418  * in the TLB miss handling).
419  */
sh64_dcache_purge_phy_page(unsigned long paddr)420 static void sh64_dcache_purge_phy_page(unsigned long paddr)
421 {
422 	unsigned long long eaddr_start, eaddr, eaddr_end;
423 	int i;
424 
425 	/* As long as the kernel is not pre-emptible, this doesn't need to be
426 	   under cli/sti. */
427 	eaddr_start = MAGIC_PAGE0_START;
428 	for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
429 		sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
430 
431 		eaddr = eaddr_start;
432 		eaddr_end = eaddr + PAGE_SIZE;
433 		while (eaddr < eaddr_end) {
434 			__asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
435 			eaddr += L1_CACHE_BYTES;
436 		}
437 
438 		sh64_teardown_dtlb_cache_slot();
439 		eaddr_start += PAGE_SIZE;
440 	}
441 }
442 
sh64_dcache_purge_user_pages(struct mm_struct * mm,unsigned long addr,unsigned long end)443 static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
444 				unsigned long addr, unsigned long end)
445 {
446 	pgd_t *pgd;
447 	pud_t *pud;
448 	pmd_t *pmd;
449 	pte_t *pte;
450 	pte_t entry;
451 	spinlock_t *ptl;
452 	unsigned long paddr;
453 
454 	if (!mm)
455 		return; /* No way to find physical address of page */
456 
457 	pgd = pgd_offset(mm, addr);
458 	if (pgd_bad(*pgd))
459 		return;
460 
461 	pud = pud_offset(pgd, addr);
462 	if (pud_none(*pud) || pud_bad(*pud))
463 		return;
464 
465 	pmd = pmd_offset(pud, addr);
466 	if (pmd_none(*pmd) || pmd_bad(*pmd))
467 		return;
468 
469 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
470 	do {
471 		entry = *pte;
472 		if (pte_none(entry) || !pte_present(entry))
473 			continue;
474 		paddr = pte_val(entry) & PAGE_MASK;
475 		sh64_dcache_purge_coloured_phy_page(paddr, addr);
476 	} while (pte++, addr += PAGE_SIZE, addr != end);
477 	pte_unmap_unlock(pte - 1, ptl);
478 }
479 
480 /*
481  * There are at least 5 choices for the implementation of this, with
482  * pros (+), cons(-), comments(*):
483  *
484  * 1. ocbp each line in the range through the original user's ASID
485  *    + no lines spuriously evicted
486  *    - tlbmiss handling (must either handle faults on demand => extra
487  *	special-case code in tlbmiss critical path), or map the page in
488  *	advance (=> flush_tlb_range in advance to avoid multiple hits)
489  *    - ASID switching
490  *    - expensive for large ranges
491  *
492  * 2. temporarily map each page in the range to a special effective
493  *    address and ocbp through the temporary mapping; relies on the
494  *    fact that SH-5 OCB* always do TLB lookup and match on ptags (they
495  *    never look at the etags)
496  *    + no spurious evictions
497  *    - expensive for large ranges
498  *    * surely cheaper than (1)
499  *
500  * 3. walk all the lines in the cache, check the tags, if a match
501  *    occurs create a page mapping to ocbp the line through
502  *    + no spurious evictions
503  *    - tag inspection overhead
504  *    - (especially for small ranges)
505  *    - potential cost of setting up/tearing down page mapping for
506  *	every line that matches the range
507  *    * cost partly independent of range size
508  *
509  * 4. walk all the lines in the cache, check the tags, if a match
510  *    occurs use 4 * alloco to purge the line (+3 other probably
511  *    innocent victims) by natural eviction
512  *    + no tlb mapping overheads
513  *    - spurious evictions
514  *    - tag inspection overhead
515  *
516  * 5. implement like flush_cache_all
517  *    + no tag inspection overhead
518  *    - spurious evictions
519  *    - bad for small ranges
520  *
521  * (1) can be ruled out as more expensive than (2).  (2) appears best
522  * for small ranges.  The choice between (3), (4) and (5) for large
523  * ranges and the range size for the large/small boundary need
524  * benchmarking to determine.
525  *
526  * For now use approach (2) for small ranges and (5) for large ones.
527  */
sh64_dcache_purge_user_range(struct mm_struct * mm,unsigned long start,unsigned long end)528 static void sh64_dcache_purge_user_range(struct mm_struct *mm,
529 			  unsigned long start, unsigned long end)
530 {
531 	int n_pages = ((end - start) >> PAGE_SHIFT);
532 
533 	if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
534 		sh64_dcache_purge_all();
535 	} else {
536 		/* Small range, covered by a single page table page */
537 		start &= PAGE_MASK;	/* should already be so */
538 		end = PAGE_ALIGN(end);	/* should already be so */
539 		sh64_dcache_purge_user_pages(mm, start, end);
540 	}
541 }
542 
543 /*
544  * Purge the range of addresses from the D-cache.
545  *
546  * The addresses lie in the superpage mapping. There's no harm if we
547  * overpurge at either end - just a small performance loss.
548  */
__flush_purge_region(void * start,int size)549 void __flush_purge_region(void *start, int size)
550 {
551 	unsigned long long ullend, addr, aligned_start;
552 
553 	aligned_start = (unsigned long long)(signed long long)(signed long) start;
554 	addr = L1_CACHE_ALIGN(aligned_start);
555 	ullend = (unsigned long long) (signed long long) (signed long) start + size;
556 
557 	while (addr <= ullend) {
558 		__asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr));
559 		addr += L1_CACHE_BYTES;
560 	}
561 }
562 
__flush_wback_region(void * start,int size)563 void __flush_wback_region(void *start, int size)
564 {
565 	unsigned long long ullend, addr, aligned_start;
566 
567 	aligned_start = (unsigned long long)(signed long long)(signed long) start;
568 	addr = L1_CACHE_ALIGN(aligned_start);
569 	ullend = (unsigned long long) (signed long long) (signed long) start + size;
570 
571 	while (addr < ullend) {
572 		__asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr));
573 		addr += L1_CACHE_BYTES;
574 	}
575 }
576 
__flush_invalidate_region(void * start,int size)577 void __flush_invalidate_region(void *start, int size)
578 {
579 	unsigned long long ullend, addr, aligned_start;
580 
581 	aligned_start = (unsigned long long)(signed long long)(signed long) start;
582 	addr = L1_CACHE_ALIGN(aligned_start);
583 	ullend = (unsigned long long) (signed long long) (signed long) start + size;
584 
585 	while (addr < ullend) {
586 		__asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr));
587 		addr += L1_CACHE_BYTES;
588 	}
589 }
590 #endif /* !CONFIG_DCACHE_DISABLED */
591 
592 /*
593  * Invalidate the entire contents of both caches, after writing back to
594  * memory any dirty data from the D-cache.
595  */
flush_cache_all(void)596 void flush_cache_all(void)
597 {
598 	sh64_dcache_purge_all();
599 	sh64_icache_inv_all();
600 }
601 
602 /*
603  * Invalidate an entire user-address space from both caches, after
604  * writing back dirty data (e.g. for shared mmap etc).
605  *
606  * This could be coded selectively by inspecting all the tags then
607  * doing 4*alloco on any set containing a match (as for
608  * flush_cache_range), but fork/exit/execve (where this is called from)
609  * are expensive anyway.
610  *
611  * Have to do a purge here, despite the comments re I-cache below.
612  * There could be odd-coloured dirty data associated with the mm still
613  * in the cache - if this gets written out through natural eviction
614  * after the kernel has reused the page there will be chaos.
615  *
616  * The mm being torn down won't ever be active again, so any Icache
617  * lines tagged with its ASID won't be visible for the rest of the
618  * lifetime of this ASID cycle.  Before the ASID gets reused, there
619  * will be a flush_cache_all.  Hence we don't need to touch the
620  * I-cache.  This is similar to the lack of action needed in
621  * flush_tlb_mm - see fault.c.
622  */
flush_cache_mm(struct mm_struct * mm)623 void flush_cache_mm(struct mm_struct *mm)
624 {
625 	sh64_dcache_purge_all();
626 }
627 
628 /*
629  * Invalidate (from both caches) the range [start,end) of virtual
630  * addresses from the user address space specified by mm, after writing
631  * back any dirty data.
632  *
633  * Note, 'end' is 1 byte beyond the end of the range to flush.
634  */
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)635 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
636 		       unsigned long end)
637 {
638 	struct mm_struct *mm = vma->vm_mm;
639 
640 	sh64_dcache_purge_user_range(mm, start, end);
641 	sh64_icache_inv_user_page_range(mm, start, end);
642 }
643 
644 /*
645  * Invalidate any entries in either cache for the vma within the user
646  * address space vma->vm_mm for the page starting at virtual address
647  * 'eaddr'.   This seems to be used primarily in breaking COW.  Note,
648  * the I-cache must be searched too in case the page in question is
649  * both writable and being executed from (e.g. stack trampolines.)
650  *
651  * Note, this is called with pte lock held.
652  */
flush_cache_page(struct vm_area_struct * vma,unsigned long eaddr,unsigned long pfn)653 void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr,
654 		      unsigned long pfn)
655 {
656 	sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
657 
658 	if (vma->vm_flags & VM_EXEC)
659 		sh64_icache_inv_user_page(vma, eaddr);
660 }
661 
flush_dcache_page(struct page * page)662 void flush_dcache_page(struct page *page)
663 {
664 	sh64_dcache_purge_phy_page(page_to_phys(page));
665 	wmb();
666 }
667 
668 /*
669  * Flush the range [start,end] of kernel virtual adddress space from
670  * the I-cache.  The corresponding range must be purged from the
671  * D-cache also because the SH-5 doesn't have cache snooping between
672  * the caches.  The addresses will be visible through the superpage
673  * mapping, therefore it's guaranteed that there no cache entries for
674  * the range in cache sets of the wrong colour.
675  */
flush_icache_range(unsigned long start,unsigned long end)676 void flush_icache_range(unsigned long start, unsigned long end)
677 {
678 	__flush_purge_region((void *)start, end);
679 	wmb();
680 	sh64_icache_inv_kernel_range(start, end);
681 }
682 
683 /*
684  * Flush the range of user (defined by vma->vm_mm) address space starting
685  * at 'addr' for 'len' bytes from the cache.  The range does not straddle
686  * a page boundary, the unique physical page containing the range is
687  * 'page'.  This seems to be used mainly for invalidating an address
688  * range following a poke into the program text through the ptrace() call
689  * from another process (e.g. for BRK instruction insertion).
690  */
flush_icache_user_range(struct vm_area_struct * vma,struct page * page,unsigned long addr,int len)691 void flush_icache_user_range(struct vm_area_struct *vma,
692 			struct page *page, unsigned long addr, int len)
693 {
694 
695 	sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
696 	mb();
697 
698 	if (vma->vm_flags & VM_EXEC)
699 		sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
700 }
701 
702 /*
703  * For the address range [start,end), write back the data from the
704  * D-cache and invalidate the corresponding region of the I-cache for the
705  * current process.  Used to flush signal trampolines on the stack to
706  * make them executable.
707  */
flush_cache_sigtramp(unsigned long vaddr)708 void flush_cache_sigtramp(unsigned long vaddr)
709 {
710 	unsigned long end = vaddr + L1_CACHE_BYTES;
711 
712 	__flush_wback_region((void *)vaddr, L1_CACHE_BYTES);
713 	wmb();
714 	sh64_icache_inv_current_user_range(vaddr, end);
715 }
716 
717 #ifdef CONFIG_MMU
718 /*
719  * These *MUST* lie in an area of virtual address space that's otherwise
720  * unused.
721  */
722 #define UNIQUE_EADDR_START 0xe0000000UL
723 #define UNIQUE_EADDR_END   0xe8000000UL
724 
725 /*
726  * Given a physical address paddr, and a user virtual address user_eaddr
727  * which will eventually be mapped to it, create a one-off kernel-private
728  * eaddr mapped to the same paddr.  This is used for creating special
729  * destination pages for copy_user_page and clear_user_page.
730  */
sh64_make_unique_eaddr(unsigned long user_eaddr,unsigned long paddr)731 static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr,
732 					    unsigned long paddr)
733 {
734 	static unsigned long current_pointer = UNIQUE_EADDR_START;
735 	unsigned long coloured_pointer;
736 
737 	if (current_pointer == UNIQUE_EADDR_END) {
738 		sh64_dcache_purge_all();
739 		current_pointer = UNIQUE_EADDR_START;
740 	}
741 
742 	coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) |
743 				(user_eaddr & CACHE_OC_SYN_MASK);
744 	sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
745 
746 	current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
747 
748 	return coloured_pointer;
749 }
750 
sh64_copy_user_page_coloured(void * to,void * from,unsigned long address)751 static void sh64_copy_user_page_coloured(void *to, void *from,
752 					 unsigned long address)
753 {
754 	void *coloured_to;
755 
756 	/*
757 	 * Discard any existing cache entries of the wrong colour.  These are
758 	 * present quite often, if the kernel has recently used the page
759 	 * internally, then given it up, then it's been allocated to the user.
760 	 */
761 	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
762 
763 	coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
764 	copy_page(from, coloured_to);
765 
766 	sh64_teardown_dtlb_cache_slot();
767 }
768 
sh64_clear_user_page_coloured(void * to,unsigned long address)769 static void sh64_clear_user_page_coloured(void *to, unsigned long address)
770 {
771 	void *coloured_to;
772 
773 	/*
774 	 * Discard any existing kernel-originated lines of the wrong
775 	 * colour (as above)
776 	 */
777 	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
778 
779 	coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
780 	clear_page(coloured_to);
781 
782 	sh64_teardown_dtlb_cache_slot();
783 }
784 
785 /*
786  * 'from' and 'to' are kernel virtual addresses (within the superpage
787  * mapping of the physical RAM).  'address' is the user virtual address
788  * where the copy 'to' will be mapped after.  This allows a custom
789  * mapping to be used to ensure that the new copy is placed in the
790  * right cache sets for the user to see it without having to bounce it
791  * out via memory.  Note however : the call to flush_page_to_ram in
792  * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
793  * very important case!
794  *
795  * TBD : can we guarantee that on every call, any cache entries for
796  * 'from' are in the same colour sets as 'address' also?  i.e. is this
797  * always used just to deal with COW?  (I suspect not).
798  *
799  * There are two possibilities here for when the page 'from' was last accessed:
800  * - by the kernel : this is OK, no purge required.
801  * - by the/a user (e.g. for break_COW) : need to purge.
802  *
803  * If the potential user mapping at 'address' is the same colour as
804  * 'from' there is no need to purge any cache lines from the 'from'
805  * page mapped into cache sets of colour 'address'.  (The copy will be
806  * accessing the page through 'from').
807  */
copy_user_page(void * to,void * from,unsigned long address,struct page * page)808 void copy_user_page(void *to, void *from, unsigned long address,
809 		    struct page *page)
810 {
811 	if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0)
812 		sh64_dcache_purge_coloured_phy_page(__pa(from), address);
813 
814 	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
815 		copy_page(to, from);
816 	else
817 		sh64_copy_user_page_coloured(to, from, address);
818 }
819 
820 /*
821  * 'to' is a kernel virtual address (within the superpage mapping of the
822  * physical RAM).  'address' is the user virtual address where the 'to'
823  * page will be mapped after.  This allows a custom mapping to be used to
824  * ensure that the new copy is placed in the right cache sets for the
825  * user to see it without having to bounce it out via memory.
826  */
clear_user_page(void * to,unsigned long address,struct page * page)827 void clear_user_page(void *to, unsigned long address, struct page *page)
828 {
829 	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
830 		clear_page(to);
831 	else
832 		sh64_clear_user_page_coloured(to, address);
833 }
834 #endif
835