Lines Matching +full:data +full:- +full:mapping
4 * Copyright (C) 1995-2002 Russell King
51 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero) in flush_pfn_alias()
58 unsigned long offset = vaddr & (PAGE_SIZE - 1); in flush_icache_alias()
97 if (vma->vm_flags & VM_EXEC) in flush_cache_range()
113 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) in flush_cache_page()
148 /* VIPT non-aliasing D-cache */ in __flush_ptrace_access()
166 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) in flush_ptrace_access()
168 if (vma->vm_flags & VM_EXEC) in flush_ptrace_access()
182 * Copy user data from/to a page which is mapped into a different
202 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument
205 * Writeback any data associated with the kernel mapping of this in __flush_dcache_page()
206 * page. This ensures that data in the physical page is mutually in __flush_dcache_page()
207 * coherent with the kernels mapping. in __flush_dcache_page()
233 * we only need to do one flush - which would be at the relevant in __flush_dcache_page()
234 * userspace colour, which is congruent with page->index. in __flush_dcache_page()
236 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_page()
238 page->index << PAGE_SHIFT); in __flush_dcache_page()
241 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) in __flush_dcache_aliases() argument
243 struct mm_struct *mm = current->active_mm; in __flush_dcache_aliases()
249 * - VIVT cache: we need to also write back and invalidate all user in __flush_dcache_aliases()
250 * data in the current VM view associated with this page. in __flush_dcache_aliases()
251 * - aliasing VIPT: we only need to find one mapping of this page. in __flush_dcache_aliases()
253 pgoff = page->index; in __flush_dcache_aliases()
255 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases()
256 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in __flush_dcache_aliases()
262 if (mpnt->vm_mm != mm) in __flush_dcache_aliases()
264 if (!(mpnt->vm_flags & VM_MAYSHARE)) in __flush_dcache_aliases()
266 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; in __flush_dcache_aliases()
267 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); in __flush_dcache_aliases()
269 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases()
277 struct address_space *mapping; in __sync_icache_dcache() local
280 /* only flush non-aliasing VIPT caches for exec mappings */ in __sync_icache_dcache()
288 mapping = page_mapping_file(page); in __sync_icache_dcache()
290 mapping = NULL; in __sync_icache_dcache()
292 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) in __sync_icache_dcache()
293 __flush_dcache_page(mapping, page); in __sync_icache_dcache()
301 * Ensure cache coherency between kernel mapping and userspace mapping
305 * - VIPT non-aliasing cache: fully coherent so nothing required.
306 * - VIVT: fully aliasing, so we need to handle every alias in our
308 * - VIPT aliasing: need to handle one alias in our current VM view.
321 struct address_space *mapping; in flush_dcache_page() local
331 if (test_bit(PG_dcache_clean, &page->flags)) in flush_dcache_page()
332 clear_bit(PG_dcache_clean, &page->flags); in flush_dcache_page()
336 mapping = page_mapping_file(page); in flush_dcache_page()
339 mapping && !page_mapcount(page)) in flush_dcache_page()
340 clear_bit(PG_dcache_clean, &page->flags); in flush_dcache_page()
342 __flush_dcache_page(mapping, page); in flush_dcache_page()
343 if (mapping && cache_is_vivt()) in flush_dcache_page()
344 __flush_dcache_aliases(mapping, page); in flush_dcache_page()
345 else if (mapping) in flush_dcache_page()
347 set_bit(PG_dcache_clean, &page->flags); in flush_dcache_page()
353 * Ensure cache coherency for the kernel mapping of this page. We can
357 * space mappings, this is a no-op since the page was already marked
364 struct address_space *mapping; in flush_kernel_dcache_page() local
366 mapping = page_mapping_file(page); in flush_kernel_dcache_page()
368 if (!mapping || mapping_mapped(mapping)) { in flush_kernel_dcache_page()
387 * can safely access the data. The expected sequence is:
390 * -> flush_anon_page
398 /* VIPT non-aliasing caches need do nothing */ in __flush_anon_page()
403 * Write back and invalidate userspace mapping. in __flush_anon_page()
418 * Invalidate kernel mapping. No data should be contained in __flush_anon_page()
419 * in this mapping of the page. FIXME: this is overkill in __flush_anon_page()
420 * since we actually ask for a write-back and invalidate. in __flush_anon_page()