Searched refs:tlb (Results 1 – 6 of 6) sorted by relevance
/mm/ |
D | memory.c | 184 static bool tlb_next_batch(struct mmu_gather *tlb) in tlb_next_batch() argument 188 batch = tlb->active; in tlb_next_batch() 190 tlb->active = batch->next; in tlb_next_batch() 194 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch() 201 tlb->batch_count++; in tlb_next_batch() 206 tlb->active->next = batch; in tlb_next_batch() 207 tlb->active = batch; in tlb_next_batch() 217 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned lon… in tlb_gather_mmu() argument 219 tlb->mm = mm; in tlb_gather_mmu() 222 tlb->fullmm = !(start | (end+1)); in tlb_gather_mmu() [all …]
|
D | huge_memory.c | 1459 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument 1473 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, in zap_huge_pmd() 1474 tlb->fullmm); in zap_huge_pmd() 1475 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd() 1481 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); in zap_huge_pmd() 1482 atomic_long_dec(&tlb->mm->nr_ptes); in zap_huge_pmd() 1489 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); in zap_huge_pmd() 1491 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); in zap_huge_pmd() 1492 atomic_long_dec(&tlb->mm->nr_ptes); in zap_huge_pmd() 1494 tlb_remove_page(tlb, page); in zap_huge_pmd()
|
D | hugetlb.c | 3263 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, in __unmap_hugepage_range() argument 3283 tlb_start_vma(tlb, vma); in __unmap_hugepage_range() 3299 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); in __unmap_hugepage_range() 3336 tlb_remove_tlb_entry(tlb, ptep, address); in __unmap_hugepage_range() 3342 force_flush = !__tlb_remove_page(tlb, page); in __unmap_hugepage_range() 3363 tlb_flush_mmu(tlb); in __unmap_hugepage_range() 3368 tlb_end_vma(tlb, vma); in __unmap_hugepage_range() 3371 void __unmap_hugepage_range_final(struct mmu_gather *tlb, in __unmap_hugepage_range_final() argument 3375 __unmap_hugepage_range(tlb, vma, start, end, ref_page); in __unmap_hugepage_range_final() 3394 struct mmu_gather tlb; in unmap_hugepage_range() local [all …]
|
D | mmap.c | 2500 struct mmu_gather tlb; in unmap_region() local 2503 tlb_gather_mmu(&tlb, mm, start, end); in unmap_region() 2505 unmap_vmas(&tlb, vma, start, end); in unmap_region() 2506 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region() 2508 tlb_finish_mmu(&tlb, start, end); in unmap_region() 2940 struct mmu_gather tlb; in exit_mmap() local 2964 tlb_gather_mmu(&tlb, mm, 0, -1); in exit_mmap() 2967 unmap_vmas(&tlb, vma, 0, -1); in exit_mmap() 2969 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); in exit_mmap() 2970 tlb_finish_mmu(&tlb, 0, -1); in exit_mmap()
|
D | internal.h | 37 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
|
D | Kconfig | 399 huge tlb transparently to the applications whenever possible. 402 allocation, by reducing the number of tlb misses and by speeding
|