• Home
  • Raw
  • Download

Lines Matching +full:armv8 +full:- +full:based

1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/tlbflush.h
5 * Copyright (C) 1999-2003 Russell King
68 * - 4KB : 1
69 * - 16KB : 2
70 * - 64KB : 3
91 * Level-based TLBI operations.
93 * When ARMv8.4-TTL exists, TLBI operations take an additional hint for
97 * perform a non-hinted invalidation.
99 * For Stage-2 invalidation, use the level values provided to that effect
127 * +----------+------+-------+-------+-------+----------------------+
129 * +-----------------+-------+-------+-------+----------------------+
154 * Generate 'num' values from -1 to 30 with -1 rejected by the
159 ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
165 * This header file implements the low-level TLB invalidation routines
170 * DSB ISHST // Ensure prior page-table updates have completed
178 * as documented in Documentation/core-api/cachetlb.rst:
188 * Invalidate the virtual-address range '[start, end)' on all
189 * CPUs for the user address space corresponding to 'vma->mm'.
190 * Note that this operation also invalidates any walk-cache
202 * address space corresponding to 'vma->mm'. Note that this
203 * operation only invalidates a single, last-level page-table
204 * entry and therefore does not affect any walk-caches.
215 * CPUs, ensuring that any walk-cache entries associated with the
219 * Invalidate the virtual-address range '[start, end)' on all
220 * CPUs for the user address space corresponding to 'vma->mm'.
222 * determined by 'stride' and only affect any walk-cache entries
263 addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); in flush_tlb_page_nosync()
276 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
292 pages = (end - start) >> PAGE_SHIFT; in __flush_tlb_range()
296 * (MAX_TLBI_OPS - 1) pages; in __flush_tlb_range()
298 * (MAX_TLBI_RANGE_PAGES - 1) pages. in __flush_tlb_range()
301 (end - start) >= (MAX_TLBI_OPS * stride)) || in __flush_tlb_range()
303 flush_tlb_mm(vma->vm_mm); in __flush_tlb_range()
308 asid = ASID(vma->vm_mm); in __flush_tlb_range()
315 * 1. If 'pages' is odd, flush the first page through non-range in __flush_tlb_range()
340 pages -= stride >> PAGE_SHIFT; in __flush_tlb_range()
356 pages -= __TLBI_RANGE_PAGES(num, scale); in __flush_tlb_range()
367 * We cannot use leaf-only invalidation here, since we may be invalidating in flush_tlb_range()
378 if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) { in flush_tlb_kernel_range()
387 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) in flush_tlb_kernel_range()