• Home
  • Raw
  • Download

Lines Matching refs:tlb

187 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
195 #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page)) argument
242 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
303 void tlb_flush_mmu(struct mmu_gather *tlb);
305 static inline void __tlb_adjust_range(struct mmu_gather *tlb, in __tlb_adjust_range() argument
309 tlb->start = min(tlb->start, address); in __tlb_adjust_range()
310 tlb->end = max(tlb->end, address + range_size); in __tlb_adjust_range()
313 static inline void __tlb_reset_range(struct mmu_gather *tlb) in __tlb_reset_range() argument
315 if (tlb->fullmm) { in __tlb_reset_range()
316 tlb->start = tlb->end = ~0; in __tlb_reset_range()
318 tlb->start = TASK_SIZE; in __tlb_reset_range()
319 tlb->end = 0; in __tlb_reset_range()
321 tlb->freed_tables = 0; in __tlb_reset_range()
322 tlb->cleared_ptes = 0; in __tlb_reset_range()
323 tlb->cleared_pmds = 0; in __tlb_reset_range()
324 tlb->cleared_puds = 0; in __tlb_reset_range()
325 tlb->cleared_p4ds = 0; in __tlb_reset_range()
347 static inline void tlb_flush(struct mmu_gather *tlb) in tlb_flush() argument
349 if (tlb->end) in tlb_flush()
350 flush_tlb_mm(tlb->mm); in tlb_flush()
354 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_update_vma_flags() argument
357 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_end_vma() argument
372 static inline void tlb_flush(struct mmu_gather *tlb) in tlb_flush() argument
374 if (tlb->fullmm || tlb->need_flush_all) { in tlb_flush()
375 flush_tlb_mm(tlb->mm); in tlb_flush()
376 } else if (tlb->end) { in tlb_flush()
378 .vm_mm = tlb->mm, in tlb_flush()
379 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | in tlb_flush()
380 (tlb->vma_huge ? VM_HUGETLB : 0), in tlb_flush()
383 flush_tlb_range(&vma, tlb->start, tlb->end); in tlb_flush()
388 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_update_vma_flags() argument
401 tlb->vma_huge = is_vm_hugetlb_page(vma); in tlb_update_vma_flags()
402 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags()
408 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_update_vma_flags() argument
414 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) in tlb_flush_mmu_tlbonly() argument
420 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || in tlb_flush_mmu_tlbonly()
421 tlb->cleared_puds || tlb->cleared_p4ds)) in tlb_flush_mmu_tlbonly()
424 tlb_flush(tlb); in tlb_flush_mmu_tlbonly()
425 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); in tlb_flush_mmu_tlbonly()
426 __tlb_reset_range(tlb); in tlb_flush_mmu_tlbonly()
429 static inline void tlb_remove_page_size(struct mmu_gather *tlb, in tlb_remove_page_size() argument
432 if (__tlb_remove_page_size(tlb, page, page_size)) in tlb_remove_page_size()
433 tlb_flush_mmu(tlb); in tlb_remove_page_size()
436 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) in __tlb_remove_page() argument
438 return __tlb_remove_page_size(tlb, page, PAGE_SIZE); in __tlb_remove_page()
445 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) in tlb_remove_page() argument
447 return tlb_remove_page_size(tlb, page, PAGE_SIZE); in tlb_remove_page()
450 static inline void tlb_change_page_size(struct mmu_gather *tlb, in tlb_change_page_size() argument
454 if (tlb->page_size && tlb->page_size != page_size) { in tlb_change_page_size()
455 if (!tlb->fullmm && !tlb->need_flush_all) in tlb_change_page_size()
456 tlb_flush_mmu(tlb); in tlb_change_page_size()
459 tlb->page_size = page_size; in tlb_change_page_size()
463 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) in tlb_get_unmap_shift() argument
465 if (tlb->cleared_ptes) in tlb_get_unmap_shift()
467 if (tlb->cleared_pmds) in tlb_get_unmap_shift()
469 if (tlb->cleared_puds) in tlb_get_unmap_shift()
471 if (tlb->cleared_p4ds) in tlb_get_unmap_shift()
477 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) in tlb_get_unmap_size() argument
479 return 1UL << tlb_get_unmap_shift(tlb); in tlb_get_unmap_size()
488 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument
490 if (tlb->fullmm) in tlb_start_vma()
493 tlb_update_vma_flags(tlb, vma); in tlb_start_vma()
499 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_end_vma() argument
501 if (tlb->fullmm) in tlb_end_vma()
510 tlb_flush_mmu_tlbonly(tlb); in tlb_end_vma()
518 static inline void tlb_flush_pte_range(struct mmu_gather *tlb, in tlb_flush_pte_range() argument
521 __tlb_adjust_range(tlb, address, size); in tlb_flush_pte_range()
522 tlb->cleared_ptes = 1; in tlb_flush_pte_range()
525 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, in tlb_flush_pmd_range() argument
528 __tlb_adjust_range(tlb, address, size); in tlb_flush_pmd_range()
529 tlb->cleared_pmds = 1; in tlb_flush_pmd_range()
532 static inline void tlb_flush_pud_range(struct mmu_gather *tlb, in tlb_flush_pud_range() argument
535 __tlb_adjust_range(tlb, address, size); in tlb_flush_pud_range()
536 tlb->cleared_puds = 1; in tlb_flush_pud_range()
539 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, in tlb_flush_p4d_range() argument
542 __tlb_adjust_range(tlb, address, size); in tlb_flush_p4d_range()
543 tlb->cleared_p4ds = 1; in tlb_flush_p4d_range()
547 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
557 #define tlb_remove_tlb_entry(tlb, ptep, address) \ argument
559 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
560 __tlb_remove_tlb_entry(tlb, ptep, address); \
563 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ argument
567 tlb_flush_pmd_range(tlb, address, _sz); \
569 tlb_flush_pud_range(tlb, address, _sz); \
570 __tlb_remove_tlb_entry(tlb, ptep, address); \
578 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) argument
581 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ argument
583 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
584 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
592 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) argument
595 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ argument
597 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
598 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
620 #define pte_free_tlb(tlb, ptep, address) \ argument
622 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
623 tlb->freed_tables = 1; \
624 __pte_free_tlb(tlb, ptep, address); \
629 #define pmd_free_tlb(tlb, pmdp, address) \ argument
631 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
632 tlb->freed_tables = 1; \
633 __pmd_free_tlb(tlb, pmdp, address); \
638 #define pud_free_tlb(tlb, pudp, address) \ argument
640 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
641 tlb->freed_tables = 1; \
642 __pud_free_tlb(tlb, pudp, address); \
647 #define p4d_free_tlb(tlb, pudp, address) \ argument
649 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
650 tlb->freed_tables = 1; \
651 __p4d_free_tlb(tlb, pudp, address); \