Home
last modified time | relevance | path

Searched refs:tlb (Results 1 – 8 of 8) sorted by relevance

/mm/
Dmemory.c192 static bool tlb_next_batch(struct mmu_gather *tlb) in tlb_next_batch() argument
196 batch = tlb->active; in tlb_next_batch()
198 tlb->active = batch->next; in tlb_next_batch()
202 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch()
209 tlb->batch_count++; in tlb_next_batch()
214 tlb->active->next = batch; in tlb_next_batch()
215 tlb->active = batch; in tlb_next_batch()
220 void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, in arch_tlb_gather_mmu() argument
223 tlb->mm = mm; in arch_tlb_gather_mmu()
226 tlb->fullmm = !(start | (end+1)); in arch_tlb_gather_mmu()
[all …]
Dmadvise.c314 struct mmu_gather *tlb = walk->private; in madvise_free_pte_range() local
315 struct mm_struct *mm = tlb->mm; in madvise_free_pte_range()
325 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) in madvise_free_pte_range()
331 tlb_remove_check_page_size_change(tlb, PAGE_SIZE); in madvise_free_pte_range()
353 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in madvise_free_pte_range()
420 tlb->fullmm); in madvise_free_pte_range()
425 tlb_remove_tlb_entry(tlb, pte, addr); in madvise_free_pte_range()
443 static void madvise_free_page_range(struct mmu_gather *tlb, in madvise_free_page_range() argument
450 .private = tlb, in madvise_free_page_range()
453 tlb_start_vma(tlb, vma); in madvise_free_page_range()
[all …]
Dhuge_memory.c1599 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in madvise_free_huge_pmd() argument
1605 struct mm_struct *mm = tlb->mm; in madvise_free_huge_pmd()
1608 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); in madvise_free_huge_pmd()
1658 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd()
1678 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument
1684 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); in zap_huge_pmd()
1695 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, in zap_huge_pmd()
1696 tlb->fullmm); in zap_huge_pmd()
1697 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd()
1700 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
[all …]
Doom_kill.c496 struct mmu_gather tlb; in __oom_reap_task_mm() local
498 tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end); in __oom_reap_task_mm()
499 unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, in __oom_reap_task_mm()
501 tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end); in __oom_reap_task_mm()
Dmmap.c2530 struct mmu_gather tlb; in unmap_region() local
2533 tlb_gather_mmu(&tlb, mm, start, end); in unmap_region()
2535 unmap_vmas(&tlb, vma, start, end); in unmap_region()
2536 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region()
2538 tlb_finish_mmu(&tlb, start, end); in unmap_region()
3013 struct mmu_gather tlb; in exit_mmap() local
3063 tlb_gather_mmu(&tlb, mm, 0, -1); in exit_mmap()
3066 unmap_vmas(&tlb, vma, 0, -1); in exit_mmap()
3067 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); in exit_mmap()
3068 tlb_finish_mmu(&tlb, 0, -1); in exit_mmap()
Dinternal.h43 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
51 void unmap_page_range(struct mmu_gather *tlb,
Dhugetlb.c3315 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, in __unmap_hugepage_range() argument
3338 tlb_remove_check_page_size_change(tlb, sz); in __unmap_hugepage_range()
3339 tlb_start_vma(tlb, vma); in __unmap_hugepage_range()
3389 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); in __unmap_hugepage_range()
3397 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range()
3405 tlb_end_vma(tlb, vma); in __unmap_hugepage_range()
3408 void __unmap_hugepage_range_final(struct mmu_gather *tlb, in __unmap_hugepage_range_final() argument
3412 __unmap_hugepage_range(tlb, vma, start, end, ref_page); in __unmap_hugepage_range_final()
3431 struct mmu_gather tlb; in unmap_hugepage_range() local
3435 tlb_gather_mmu(&tlb, mm, start, end); in unmap_hugepage_range()
[all …]
DKconfig393 huge tlb transparently to the applications whenever possible.
396 allocation, by reducing the number of tlb misses and by speeding