/include/asm-generic/ |
D | tlb.h | 183 extern void tlb_remove_table(struct mmu_gather *tlb, void *table); 231 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, 292 void arch_tlb_gather_mmu(struct mmu_gather *tlb, 294 void tlb_flush_mmu(struct mmu_gather *tlb); 295 void arch_tlb_finish_mmu(struct mmu_gather *tlb, 298 static inline void __tlb_adjust_range(struct mmu_gather *tlb, in __tlb_adjust_range() argument 302 tlb->start = min(tlb->start, address); in __tlb_adjust_range() 303 tlb->end = max(tlb->end, address + range_size); in __tlb_adjust_range() 306 static inline void __tlb_reset_range(struct mmu_gather *tlb) in __tlb_reset_range() argument 308 if (tlb->fullmm) { in __tlb_reset_range() [all …]
|
D | 4level-fixup.h | 31 #define pud_free_tlb(tlb, x, addr) do { } while (0) argument 33 #define __pud_free_tlb(tlb, x, addr) do { } while (0) argument
|
D | 5level-fixup.h | 52 #define p4d_free_tlb(tlb, x, addr) do { } while (0) argument 54 #define __p4d_free_tlb(tlb, x, addr) do { } while (0) argument
|
D | hugetlb.h | 44 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, in hugetlb_free_pgd_range() argument 48 free_pgd_range(tlb, addr, end, floor, ceiling); in hugetlb_free_pgd_range()
|
D | pgtable-nop4d-hack.h | 58 #define __pud_free_tlb(tlb, x, a) do { } while (0) argument
|
D | pgtable-nopmd.h | 63 #define __pmd_free_tlb(tlb, x, a) do { } while (0) argument
|
D | pgtable-nopud.h | 62 #define __pud_free_tlb(tlb, x, a) do { } while (0) argument
|
D | pgtable-nop4d.h | 53 #define __p4d_free_tlb(tlb, x, a) do { } while (0) argument
|
/include/linux/ |
D | io-pgtable.h | 112 const struct iommu_flush_ops *tlb; member 240 iop->cfg.tlb->tlb_flush_all(iop->cookie); in io_pgtable_tlb_flush_all() 247 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_walk() 254 iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_leaf() 262 if (iop->cfg.tlb->tlb_add_page) in io_pgtable_tlb_add_page() 263 iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); in io_pgtable_tlb_add_page()
|
D | swiotlb.h | 34 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); 37 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
|
D | hugetlb.h | 80 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 84 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 186 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) argument 204 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, in __unmap_hugepage_range_final() argument 211 static inline void __unmap_hugepage_range(struct mmu_gather *tlb, in __unmap_hugepage_range() argument
|
D | huge_mm.h | 32 extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, 35 extern int zap_huge_pmd(struct mmu_gather *tlb, 38 extern int zap_huge_pud(struct mmu_gather *tlb,
|
D | mm_types.h | 571 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 573 extern void tlb_finish_mmu(struct mmu_gather *tlb,
|
D | mm.h | 1496 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 1501 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
/include/trace/events/ |
D | tlb.h | 3 #define TRACE_SYSTEM tlb
|