• Home
  • Raw
  • Download

Lines Matching full:tlb

2 /* include/asm-generic/tlb.h
4 * Generic TLB shootdown code
35 * correct and efficient ordering of freeing pages and TLB invalidations.
40 * 2) TLB invalidate page
51 * Finish in particular will issue a (final) TLB invalidate and free
86 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
89 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
106 * flush the entire TLB irrespective of the range. For instance
125 * returns the smallest TLB entry size unmapped in this range.
138 * This might be useful if your architecture has size specific TLB
156 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
170 * This is useful if your architecture already flushes TLB entries in the
187 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
195 #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page)) argument
246 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
271 * requires a complete flush of the tlb
307 void tlb_flush_mmu(struct mmu_gather *tlb);
309 static inline void __tlb_adjust_range(struct mmu_gather *tlb, in __tlb_adjust_range() argument
313 tlb->start = min(tlb->start, address); in __tlb_adjust_range()
314 tlb->end = max(tlb->end, address + range_size); in __tlb_adjust_range()
317 static inline void __tlb_reset_range(struct mmu_gather *tlb) in __tlb_reset_range() argument
319 if (tlb->fullmm) { in __tlb_reset_range()
320 tlb->start = tlb->end = ~0; in __tlb_reset_range()
322 tlb->start = TASK_SIZE; in __tlb_reset_range()
323 tlb->end = 0; in __tlb_reset_range()
325 tlb->freed_tables = 0; in __tlb_reset_range()
326 tlb->cleared_ptes = 0; in __tlb_reset_range()
327 tlb->cleared_pmds = 0; in __tlb_reset_range()
328 tlb->cleared_puds = 0; in __tlb_reset_range()
329 tlb->cleared_p4ds = 0; in __tlb_reset_range()
351 static inline void tlb_flush(struct mmu_gather *tlb) in tlb_flush() argument
353 if (tlb->end) in tlb_flush()
354 flush_tlb_mm(tlb->mm); in tlb_flush()
358 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_update_vma_flags() argument
361 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_end_vma() argument
376 static inline void tlb_flush(struct mmu_gather *tlb) in tlb_flush() argument
378 if (tlb->fullmm || tlb->need_flush_all) { in tlb_flush()
379 flush_tlb_mm(tlb->mm); in tlb_flush()
380 } else if (tlb->end) { in tlb_flush()
382 .vm_mm = tlb->mm, in tlb_flush()
383 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | in tlb_flush()
384 (tlb->vma_huge ? VM_HUGETLB : 0), in tlb_flush()
387 flush_tlb_range(&vma, tlb->start, tlb->end); in tlb_flush()
392 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_update_vma_flags() argument
398 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB in tlb_update_vma_flags()
405 tlb->vma_huge = is_vm_hugetlb_page(vma); in tlb_update_vma_flags()
406 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags()
412 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_update_vma_flags() argument
418 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) in tlb_flush_mmu_tlbonly() argument
424 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || in tlb_flush_mmu_tlbonly()
425 tlb->cleared_puds || tlb->cleared_p4ds)) in tlb_flush_mmu_tlbonly()
428 tlb_flush(tlb); in tlb_flush_mmu_tlbonly()
429 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); in tlb_flush_mmu_tlbonly()
430 __tlb_reset_range(tlb); in tlb_flush_mmu_tlbonly()
433 static inline void tlb_remove_page_size(struct mmu_gather *tlb, in tlb_remove_page_size() argument
436 if (__tlb_remove_page_size(tlb, page, page_size)) in tlb_remove_page_size()
437 tlb_flush_mmu(tlb); in tlb_remove_page_size()
440 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) in __tlb_remove_page() argument
442 return __tlb_remove_page_size(tlb, page, PAGE_SIZE); in __tlb_remove_page()
449 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) in tlb_remove_page() argument
451 return tlb_remove_page_size(tlb, page, PAGE_SIZE); in tlb_remove_page()
454 static inline void tlb_change_page_size(struct mmu_gather *tlb, in tlb_change_page_size() argument
458 if (tlb->page_size && tlb->page_size != page_size) { in tlb_change_page_size()
459 if (!tlb->fullmm && !tlb->need_flush_all) in tlb_change_page_size()
460 tlb_flush_mmu(tlb); in tlb_change_page_size()
463 tlb->page_size = page_size; in tlb_change_page_size()
467 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) in tlb_get_unmap_shift() argument
469 if (tlb->cleared_ptes) in tlb_get_unmap_shift()
471 if (tlb->cleared_pmds) in tlb_get_unmap_shift()
473 if (tlb->cleared_puds) in tlb_get_unmap_shift()
475 if (tlb->cleared_p4ds) in tlb_get_unmap_shift()
481 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) in tlb_get_unmap_size() argument
483 return 1UL << tlb_get_unmap_shift(tlb); in tlb_get_unmap_size()
487 * In the case of tlb vma handling, we can optimise these away in the
492 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument
494 if (tlb->fullmm) in tlb_start_vma()
497 tlb_update_vma_flags(tlb, vma); in tlb_start_vma()
503 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_end_vma() argument
505 if (tlb->fullmm) in tlb_end_vma()
509 * Do a TLB flush and reset the range at VMA boundaries; this avoids in tlb_end_vma()
514 tlb_flush_mmu_tlbonly(tlb); in tlb_end_vma()
519 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
522 static inline void tlb_flush_pte_range(struct mmu_gather *tlb, in tlb_flush_pte_range() argument
525 __tlb_adjust_range(tlb, address, size); in tlb_flush_pte_range()
526 tlb->cleared_ptes = 1; in tlb_flush_pte_range()
529 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, in tlb_flush_pmd_range() argument
532 __tlb_adjust_range(tlb, address, size); in tlb_flush_pmd_range()
533 tlb->cleared_pmds = 1; in tlb_flush_pmd_range()
536 static inline void tlb_flush_pud_range(struct mmu_gather *tlb, in tlb_flush_pud_range() argument
539 __tlb_adjust_range(tlb, address, size); in tlb_flush_pud_range()
540 tlb->cleared_puds = 1; in tlb_flush_pud_range()
543 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, in tlb_flush_p4d_range() argument
546 __tlb_adjust_range(tlb, address, size); in tlb_flush_p4d_range()
547 tlb->cleared_p4ds = 1; in tlb_flush_p4d_range()
551 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
555 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
558 * so we can later optimise away the tlb invalidate. This helps when
561 #define tlb_remove_tlb_entry(tlb, ptep, address) \ argument
563 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
564 __tlb_remove_tlb_entry(tlb, ptep, address); \
567 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ argument
571 tlb_flush_p4d_range(tlb, address, _sz); \
573 tlb_flush_pud_range(tlb, address, _sz); \
575 tlb_flush_pmd_range(tlb, address, _sz); \
577 tlb_flush_pte_range(tlb, address, _sz); \
578 __tlb_remove_tlb_entry(tlb, ptep, address); \
582 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
586 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) argument
589 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ argument
591 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
592 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
596 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
600 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) argument
603 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ argument
605 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
606 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
616 * explicit flushing for that, likely *separate* from a regular TLB entry
628 #define pte_free_tlb(tlb, ptep, address) \ argument
630 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
631 tlb->freed_tables = 1; \
632 __pte_free_tlb(tlb, ptep, address); \
637 #define pmd_free_tlb(tlb, pmdp, address) \ argument
639 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
640 tlb->freed_tables = 1; \
641 __pmd_free_tlb(tlb, pmdp, address); \
646 #define pud_free_tlb(tlb, pudp, address) \ argument
648 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
649 tlb->freed_tables = 1; \
650 __pud_free_tlb(tlb, pudp, address); \
655 #define p4d_free_tlb(tlb, pudp, address) \ argument
657 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
658 tlb->freed_tables = 1; \
659 __p4d_free_tlb(tlb, pudp, address); \