/mm/ |
D | mmu_gather.c | 16 static bool tlb_next_batch(struct mmu_gather *tlb) in tlb_next_batch() argument 20 batch = tlb->active; in tlb_next_batch() 22 tlb->active = batch->next; in tlb_next_batch() 26 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch() 33 tlb->batch_count++; in tlb_next_batch() 38 tlb->active->next = batch; in tlb_next_batch() 39 tlb->active = batch; in tlb_next_batch() 44 static void tlb_batch_pages_flush(struct mmu_gather *tlb) in tlb_batch_pages_flush() argument 48 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_batch_pages_flush() 52 tlb->active = &tlb->local; in tlb_batch_pages_flush() [all …]
|
D | madvise.c | 39 struct mmu_gather *tlb; member 315 struct mmu_gather *tlb = private->tlb; in madvise_cold_or_pageout_pte_range() local 318 struct mm_struct *mm = tlb->mm; in madvise_cold_or_pageout_pte_range() 338 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in madvise_cold_or_pageout_pte_range() 381 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_cold_or_pageout_pte_range() 406 tlb_change_page_size(tlb, PAGE_SIZE); in madvise_cold_or_pageout_pte_range() 470 tlb->fullmm); in madvise_cold_or_pageout_pte_range() 473 tlb_remove_tlb_entry(tlb, pte, addr); in madvise_cold_or_pageout_pte_range() 510 static void madvise_cold_page_range(struct mmu_gather *tlb, in madvise_cold_page_range() argument 516 .tlb = tlb, in madvise_cold_page_range() [all …]
|
D | memory.c | 245 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in free_pte_range() argument 256 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); in free_pte_range() 260 pte_free_tlb(tlb, token, addr); in free_pte_range() 261 mm_dec_nr_ptes(tlb->mm); in free_pte_range() 264 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in free_pmd_range() argument 278 free_pte_range(tlb, pmd, addr); in free_pmd_range() 294 pmd_free_tlb(tlb, pmd, start); in free_pmd_range() 295 mm_dec_nr_pmds(tlb->mm); in free_pmd_range() 298 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, in free_pud_range() argument 312 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range() [all …]
|
D | huge_memory.c | 1571 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in madvise_free_huge_pmd() argument 1577 struct mm_struct *mm = tlb->mm; in madvise_free_huge_pmd() 1580 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in madvise_free_huge_pmd() 1630 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd() 1650 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument 1656 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in zap_huge_pmd() 1668 tlb->fullmm); in zap_huge_pmd() 1669 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd() 1672 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd() 1675 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); in zap_huge_pmd() [all …]
|
D | oom_kill.c | 583 struct mmu_gather tlb; in __oom_reap_task_mm() local 588 tlb_gather_mmu(&tlb, mm, range.start, range.end); in __oom_reap_task_mm() 590 tlb_finish_mmu(&tlb, range.start, range.end); in __oom_reap_task_mm() 594 unmap_page_range(&tlb, vma, range.start, range.end, NULL); in __oom_reap_task_mm() 596 tlb_finish_mmu(&tlb, range.start, range.end); in __oom_reap_task_mm()
|
D | mmap.c | 2787 struct mmu_gather tlb; in unmap_region() local 2791 tlb_gather_mmu(&tlb, mm, start, end); in unmap_region() 2793 unmap_vmas(&tlb, vma, start, end); in unmap_region() 2805 tlb_flush_mmu(&tlb); in unmap_region() 2810 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region() 2812 tlb_finish_mmu(&tlb, start, end); in unmap_region() 3302 struct mmu_gather tlb; in exit_mmap() local 3352 tlb_gather_mmu(&tlb, mm, 0, -1); in exit_mmap() 3355 unmap_vmas(&tlb, vma, 0, -1); in exit_mmap() 3356 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); in exit_mmap() [all …]
|
D | hugetlb.c | 3926 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, in __unmap_hugepage_range() argument 3949 tlb_change_page_size(tlb, sz); in __unmap_hugepage_range() 3950 tlb_start_vma(tlb, vma); in __unmap_hugepage_range() 3968 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); in __unmap_hugepage_range() 4009 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); in __unmap_hugepage_range() 4017 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range() 4025 tlb_end_vma(tlb, vma); in __unmap_hugepage_range() 4041 tlb_flush_mmu_tlbonly(tlb); in __unmap_hugepage_range() 4044 void __unmap_hugepage_range_final(struct mmu_gather *tlb, in __unmap_hugepage_range_final() argument 4048 __unmap_hugepage_range(tlb, vma, start, end, ref_page); in __unmap_hugepage_range_final() [all …]
|
D | internal.h | 59 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 67 void unmap_page_range(struct mmu_gather *tlb,
|
D | Kconfig | 395 huge tlb transparently to the applications whenever possible. 398 allocation, by reducing the number of tlb misses and by speeding
|