Home
last modified time | relevance | path

Searched refs:tlb (Results 1 – 12 of 12) sorted by relevance

/mm/
Dmmu_gather.c16 static bool tlb_next_batch(struct mmu_gather *tlb) in tlb_next_batch() argument
20 batch = tlb->active; in tlb_next_batch()
22 tlb->active = batch->next; in tlb_next_batch()
26 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch()
33 tlb->batch_count++; in tlb_next_batch()
38 tlb->active->next = batch; in tlb_next_batch()
39 tlb->active = batch; in tlb_next_batch()
44 static void tlb_batch_pages_flush(struct mmu_gather *tlb) in tlb_batch_pages_flush() argument
48 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_batch_pages_flush()
52 tlb->active = &tlb->local; in tlb_batch_pages_flush()
[all …]
Dmadvise.c41 struct mmu_gather *tlb; member
321 struct mmu_gather *tlb = private->tlb; in madvise_cold_or_pageout_pte_range() local
324 struct mm_struct *mm = tlb->mm; in madvise_cold_or_pageout_pte_range()
343 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in madvise_cold_or_pageout_pte_range()
386 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_cold_or_pageout_pte_range()
411 tlb_change_page_size(tlb, PAGE_SIZE); in madvise_cold_or_pageout_pte_range()
471 tlb->fullmm); in madvise_cold_or_pageout_pte_range()
474 tlb_remove_tlb_entry(tlb, pte, addr); in madvise_cold_or_pageout_pte_range()
509 static void madvise_cold_page_range(struct mmu_gather *tlb, in madvise_cold_page_range() argument
515 .tlb = tlb, in madvise_cold_page_range()
[all …]
Dmprotect.c39 static unsigned long change_pte_range(struct mmu_gather *tlb, in change_pte_range() argument
52 tlb_change_page_size(tlb, PAGE_SIZE); in change_pte_range()
145 tlb_flush_pte_range(tlb, addr, PAGE_SIZE); in change_pte_range()
227 static inline unsigned long change_pmd_range(struct mmu_gather *tlb, in change_pmd_range() argument
273 int nr_ptes = change_huge_pmd(tlb, vma, pmd, in change_pmd_range()
288 this_pages = change_pte_range(tlb, vma, pmd, addr, next, in change_pmd_range()
303 static inline unsigned long change_pud_range(struct mmu_gather *tlb, in change_pud_range() argument
316 pages += change_pmd_range(tlb, vma, pud, addr, next, newprot, in change_pud_range()
323 static inline unsigned long change_p4d_range(struct mmu_gather *tlb, in change_p4d_range() argument
336 pages += change_pud_range(tlb, vma, p4d, addr, next, newprot, in change_p4d_range()
[all …]
Dmemory.c267 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in free_pte_range() argument
278 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); in free_pte_range()
290 pte_free_tlb(tlb, token, addr); in free_pte_range()
291 mm_dec_nr_ptes(tlb->mm); in free_pte_range()
294 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in free_pmd_range() argument
308 free_pte_range(tlb, pmd, addr); in free_pmd_range()
324 pmd_free_tlb(tlb, pmd, start); in free_pmd_range()
325 mm_dec_nr_pmds(tlb->mm); in free_pmd_range()
328 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, in free_pud_range() argument
342 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range()
[all …]
Dhuge_memory.c1501 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in madvise_free_huge_pmd() argument
1507 struct mm_struct *mm = tlb->mm; in madvise_free_huge_pmd()
1510 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in madvise_free_huge_pmd()
1560 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd()
1580 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument
1586 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in zap_huge_pmd()
1598 tlb->fullmm); in zap_huge_pmd()
1599 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd()
1602 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1605 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
[all …]
Doom_kill.c548 struct mmu_gather tlb; in __oom_reap_task_mm() local
553 tlb_gather_mmu(&tlb, mm); in __oom_reap_task_mm()
555 tlb_finish_mmu(&tlb); in __oom_reap_task_mm()
559 unmap_page_range(&tlb, vma, range.start, range.end, NULL); in __oom_reap_task_mm()
561 tlb_finish_mmu(&tlb); in __oom_reap_task_mm()
Duserfaultfd.c707 struct mmu_gather tlb; in mwriteprotect_range() local
749 tlb_gather_mmu(&tlb, dst_mm); in mwriteprotect_range()
750 change_protection(&tlb, dst_vma, start, start + len, newprot, in mwriteprotect_range()
752 tlb_finish_mmu(&tlb); in mwriteprotect_range()
Dmmap.c2663 struct mmu_gather tlb; in unmap_region() local
2667 tlb_gather_mmu(&tlb, mm); in unmap_region()
2669 unmap_vmas(&tlb, vma, start, end); in unmap_region()
2681 tlb_flush_mmu(&tlb); in unmap_region()
2686 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region()
2688 tlb_finish_mmu(&tlb); in unmap_region()
3162 struct mmu_gather tlb; in exit_mmap() local
3206 tlb_gather_mmu_fullmm(&tlb, mm); in exit_mmap()
3209 unmap_vmas(&tlb, vma, 0, -1); in exit_mmap()
3210 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); in exit_mmap()
[all …]
Dinternal.h40 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
48 void unmap_page_range(struct mmu_gather *tlb,
Dhugetlb.c4449 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, in __unmap_hugepage_range() argument
4472 tlb_change_page_size(tlb, sz); in __unmap_hugepage_range()
4473 tlb_start_vma(tlb, vma); in __unmap_hugepage_range()
4491 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); in __unmap_hugepage_range()
4532 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); in __unmap_hugepage_range()
4540 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range()
4548 tlb_end_vma(tlb, vma); in __unmap_hugepage_range()
4564 tlb_flush_mmu_tlbonly(tlb); in __unmap_hugepage_range()
4567 void __unmap_hugepage_range_final(struct mmu_gather *tlb, in __unmap_hugepage_range_final() argument
4571 __unmap_hugepage_range(tlb, vma, start, end, ref_page); in __unmap_hugepage_range_final()
[all …]
Dmempolicy.c638 struct mmu_gather tlb; in change_prot_numa() local
641 tlb_gather_mmu(&tlb, vma->vm_mm); in change_prot_numa()
643 nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE, in change_prot_numa()
648 tlb_finish_mmu(&tlb); in change_prot_numa()
DKconfig379 huge tlb transparently to the applications whenever possible.
382 allocation, by reducing the number of tlb misses and by speeding