Lines Matching +full:d +full:- +full:tlb +full:- +full:sets
2 * arch/sh/mm/cache-sh5.c
7 * Copyright (C) 2003 - 2008 Paul Mundt
16 #include <asm/tlb.h>
25 /* Wired TLB entry for the D-cache */
73 /* Invalidate range of addresses [start,end] from the I-cache, where in sh64_icache_inv_kernel_range()
89 /* If we get called, we know that vma->vm_flags contains VM_EXEC. in sh64_icache_inv_user_page()
90 Also, eaddr is page-aligned. */ in sh64_icache_inv_user_page()
98 /* Check whether we can use the current ASID for the I-cache in sh64_icache_inv_user_page()
100 access_process_vm->flush_cache_page->here, (e.g. when reading from in sh64_icache_inv_user_page()
104 Also, note the risk that we might get pre-empted between the ASID in sh64_icache_inv_user_page()
106 pid->ASID mapping changes. However, the whole cache will get in sh64_icache_inv_user_page()
113 vma_asid = cpu_asid(cpu, vma->vm_mm); in sh64_icache_inv_user_page()
135 /* Used for invalidating big chunks of I-cache, i.e. assume the range in sh64_icache_inv_user_page_range()
141 the choice of algorithm. However, for the I-cache option (2) isn't in sh64_icache_inv_user_page_range()
146 possible with the D-cache. Just assume 64 for now as a working in sh64_icache_inv_user_page_range()
154 n_pages = ((end - start) >> PAGE_SHIFT); in sh64_icache_inv_user_page_range()
174 after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); in sh64_icache_inv_user_page_range()
180 if (!vma || (aligned_start <= vma->vm_end)) { in sh64_icache_inv_user_page_range()
185 vma_end = vma->vm_end; in sh64_icache_inv_user_page_range()
186 if (vma->vm_flags & VM_EXEC) { in sh64_icache_inv_user_page_range()
194 aligned_start = vma->vm_end; /* Skip to start of next region */ in sh64_icache_inv_user_page_range()
208 TLB lookup. */ in sh64_icache_inv_current_user_range()
216 /* Just invalidate over the range using the natural addresses. TLB in sh64_icache_inv_current_user_range()
220 invalidate another processes I-cache entries : no worries, just a in sh64_icache_inv_current_user_range()
233 sets by natural eviction. -- RPC */
239 /* Purge all ways in a particular block of sets, specified by the base in sh64_dcache_purge_sets()
240 set number and number of sets. Can handle wrap-around, if that's in sh64_dcache_purge_sets()
249 cpu_data->dcache.entry_mask) >> in sh64_dcache_purge_sets()
250 cpu_data->dcache.entry_shift; in sh64_dcache_purge_sets()
251 set_offset = sets_to_purge_base - dummy_buffer_base_set; in sh64_dcache_purge_sets()
254 set_offset &= (cpu_data->dcache.sets - 1); in sh64_dcache_purge_sets()
256 (set_offset << cpu_data->dcache.entry_shift); in sh64_dcache_purge_sets()
260 * way. For write-back mode, this will purge the #ways in sh64_dcache_purge_sets()
265 eaddr1 = eaddr0 + cpu_data->dcache.way_size * in sh64_dcache_purge_sets()
266 cpu_data->dcache.ways; in sh64_dcache_purge_sets()
269 eaddr += cpu_data->dcache.way_size) { in sh64_dcache_purge_sets()
274 eaddr1 = eaddr0 + cpu_data->dcache.way_size * in sh64_dcache_purge_sets()
275 cpu_data->dcache.ways; in sh64_dcache_purge_sets()
278 eaddr += cpu_data->dcache.way_size) { in sh64_dcache_purge_sets()
281 * alloco is a NOP if the cache is write-through. in sh64_dcache_purge_sets()
283 if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags))) in sh64_dcache_purge_sets()
306 sh64_dcache_purge_sets(0, cpu_data->dcache.sets); in sh64_dcache_purge_all()
318 * This relies on the fact that the D-cache matches on physical tags when
333 /* As long as the kernel is not pre-emptible, this doesn't need to be in sh64_dcache_purge_coloured_phy_page()
341 /* Little point in unrolling this loop - the OCBPs are blocking in sh64_dcache_purge_coloured_phy_page()
356 * instructions (i.e. no special-case code required in the critical path
357 * in the TLB miss handling).
364 /* As long as the kernel is not pre-emptible, this doesn't need to be in sh64_dcache_purge_phy_page()
416 pte_unmap_unlock(pte - 1, ptl); in sh64_dcache_purge_user_pages()
421 * pros (+), cons(-), comments(*):
425 * - tlbmiss handling (must either handle faults on demand => extra
426 * special-case code in tlbmiss critical path), or map the page in
428 * - ASID switching
429 * - expensive for large ranges
433 * fact that SH-5 OCB* always do TLB lookup and match on ptags (they
436 * - expensive for large ranges
442 * - tag inspection overhead
443 * - (especially for small ranges)
444 * - potential cost of setting up/tearing down page mapping for
451 * + no tlb mapping overheads
452 * - spurious evictions
453 * - tag inspection overhead
457 * - spurious evictions
458 * - bad for small ranges
470 int n_pages = ((end - start) >> PAGE_SHIFT); in sh64_dcache_purge_user_range()
472 if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) { in sh64_dcache_purge_user_range()
484 * memory any dirty data from the D-cache.
493 * Invalidate an entire user-address space from both caches, after
501 * Have to do a purge here, despite the comments re I-cache below.
502 * There could be odd-coloured dirty data associated with the mm still
503 * in the cache - if this gets written out through natural eviction
510 * I-cache. This is similar to the lack of action needed in
511 * flush_tlb_mm - see fault.c.
531 vma = data->vma; in sh5_flush_cache_range()
532 start = data->addr1; in sh5_flush_cache_range()
533 end = data->addr2; in sh5_flush_cache_range()
535 sh64_dcache_purge_user_range(vma->vm_mm, start, end); in sh5_flush_cache_range()
536 sh64_icache_inv_user_page_range(vma->vm_mm, start, end); in sh5_flush_cache_range()
541 * address space vma->vm_mm for the page starting at virtual address
543 * the I-cache must be searched too in case the page in question is
554 vma = data->vma; in sh5_flush_cache_page()
555 eaddr = data->addr1; in sh5_flush_cache_page()
556 pfn = data->addr2; in sh5_flush_cache_page()
560 if (vma->vm_flags & VM_EXEC) in sh5_flush_cache_page()
572 * the I-cache. The corresponding range must be purged from the
573 * D-cache also because the SH-5 doesn't have cache snooping between
576 * the range in cache sets of the wrong colour.
583 start = data->addr1; in sh5_flush_icache_range()
584 end = data->addr2; in sh5_flush_icache_range()
593 * D-cache and invalidate the corresponding region of the I-cache for the