/mm/ |
D | mmu_gather.c | 16 static bool tlb_next_batch(struct mmu_gather *tlb) in tlb_next_batch() argument 20 batch = tlb->active; in tlb_next_batch() 22 tlb->active = batch->next; in tlb_next_batch() 26 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch() 33 tlb->batch_count++; in tlb_next_batch() 38 tlb->active->next = batch; in tlb_next_batch() 39 tlb->active = batch; in tlb_next_batch() 44 static void tlb_batch_pages_flush(struct mmu_gather *tlb) in tlb_batch_pages_flush() argument 48 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_batch_pages_flush() 52 tlb->active = &tlb->local; in tlb_batch_pages_flush() [all …]
|
D | madvise.c | 36 struct mmu_gather *tlb; member 305 struct mmu_gather *tlb = private->tlb; in madvise_cold_or_pageout_pte_range() local 307 struct mm_struct *mm = tlb->mm; in madvise_cold_or_pageout_pte_range() 322 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in madvise_cold_or_pageout_pte_range() 360 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_cold_or_pageout_pte_range() 385 tlb_change_page_size(tlb, PAGE_SIZE); in madvise_cold_or_pageout_pte_range() 433 tlb->fullmm); in madvise_cold_or_pageout_pte_range() 436 tlb_remove_tlb_entry(tlb, pte, addr); in madvise_cold_or_pageout_pte_range() 471 static void madvise_cold_page_range(struct mmu_gather *tlb, in madvise_cold_page_range() argument 477 .tlb = tlb, in madvise_cold_page_range() [all …]
|
D | memory.c | 214 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in free_pte_range() argument 219 pte_free_tlb(tlb, token, addr); in free_pte_range() 220 mm_dec_nr_ptes(tlb->mm); in free_pte_range() 223 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in free_pmd_range() argument 237 free_pte_range(tlb, pmd, addr); in free_pmd_range() 253 pmd_free_tlb(tlb, pmd, start); in free_pmd_range() 254 mm_dec_nr_pmds(tlb->mm); in free_pmd_range() 257 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, in free_pud_range() argument 271 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range() 287 pud_free_tlb(tlb, pud, start); in free_pud_range() [all …]
|
D | huge_memory.c | 1703 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in madvise_free_huge_pmd() argument 1709 struct mm_struct *mm = tlb->mm; in madvise_free_huge_pmd() 1712 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in madvise_free_huge_pmd() 1762 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd() 1782 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument 1788 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in zap_huge_pmd() 1799 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, in zap_huge_pmd() 1800 tlb->fullmm); in zap_huge_pmd() 1801 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd() 1804 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd() [all …]
|
D | oom_kill.c | 541 struct mmu_gather tlb; in __oom_reap_task_mm() local 546 tlb_gather_mmu(&tlb, mm, range.start, range.end); in __oom_reap_task_mm() 548 tlb_finish_mmu(&tlb, range.start, range.end); in __oom_reap_task_mm() 552 unmap_page_range(&tlb, vma, range.start, range.end, NULL); in __oom_reap_task_mm() 554 tlb_finish_mmu(&tlb, range.start, range.end); in __oom_reap_task_mm()
|
D | internal.h | 39 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 47 void unmap_page_range(struct mmu_gather *tlb,
|
D | mmap.c | 2623 struct mmu_gather tlb; in unmap_region() local 2626 tlb_gather_mmu(&tlb, mm, start, end); in unmap_region() 2628 unmap_vmas(&tlb, vma, start, end); in unmap_region() 2629 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region() 2631 tlb_finish_mmu(&tlb, start, end); in unmap_region() 3113 struct mmu_gather tlb; in exit_mmap() local 3161 tlb_gather_mmu(&tlb, mm, 0, -1); in exit_mmap() 3164 unmap_vmas(&tlb, vma, 0, -1); in exit_mmap() 3165 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); in exit_mmap() 3166 tlb_finish_mmu(&tlb, 0, -1); in exit_mmap()
|
D | hugetlb.c | 3519 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, in __unmap_hugepage_range() argument 3541 tlb_change_page_size(tlb, sz); in __unmap_hugepage_range() 3542 tlb_start_vma(tlb, vma); in __unmap_hugepage_range() 3603 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); in __unmap_hugepage_range() 3611 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range() 3619 tlb_end_vma(tlb, vma); in __unmap_hugepage_range() 3622 void __unmap_hugepage_range_final(struct mmu_gather *tlb, in __unmap_hugepage_range_final() argument 3626 __unmap_hugepage_range(tlb, vma, start, end, ref_page); in __unmap_hugepage_range_final() 3645 struct mmu_gather tlb; in unmap_hugepage_range() local 3660 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end); in unmap_hugepage_range() [all …]
|
D | Kconfig | 377 huge tlb transparently to the applications whenever possible. 380 allocation, by reducing the number of tlb misses and by speeding
|