/kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
D | i915_vma.c | 51 void i915_vma_free(struct i915_vma *vma) in i915_vma_free() argument 53 return kmem_cache_free(global.slab_vmas, vma); in i915_vma_free() 60 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument 66 if (!vma->node.stack) { in vma_print_allocator() 68 vma->node.start, vma->node.size, reason); in vma_print_allocator() 72 nr_entries = stack_depot_fetch(vma->node.stack, &entries); in vma_print_allocator() 75 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator() 80 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument 108 struct i915_vma *vma; in vma_create() local 114 vma = i915_vma_alloc(); in vma_create() [all …]
|
D | i915_vma.h | 50 static inline bool i915_vma_is_active(const struct i915_vma *vma) in i915_vma_is_active() argument 52 return !i915_active_is_idle(&vma->active); in i915_vma_is_active() 55 int __must_check __i915_vma_move_to_active(struct i915_vma *vma, 57 int __must_check i915_vma_move_to_active(struct i915_vma *vma, 63 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) in i915_vma_is_ggtt() argument 65 return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); in i915_vma_is_ggtt() 68 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) in i915_vma_has_ggtt_write() argument 70 return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma)); in i915_vma_has_ggtt_write() 73 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma) in i915_vma_set_ggtt_write() argument 75 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); in i915_vma_set_ggtt_write() [all …]
|
D | i915_gem_evict.c | 53 struct i915_vma *vma, in mark_free() argument 57 if (i915_vma_is_pinned(vma)) in mark_free() 60 list_add(&vma->evict_link, unwind); in mark_free() 61 return drm_mm_scan_add_block(scan, &vma->node); in mark_free() 96 struct i915_vma *vma, *next; in i915_gem_evict_something() local 130 list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { in i915_gem_evict_something() 131 if (vma == active) { /* now seen this vma twice */ in i915_gem_evict_something() 153 if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) { in i915_gem_evict_something() 155 active = vma; in i915_gem_evict_something() 157 list_move_tail(&vma->vm_link, &vm->bound_list); in i915_gem_evict_something() [all …]
|
/kernel/linux/linux-5.10/mm/ |
D | mmap.c | 87 struct vm_area_struct *vma, struct vm_area_struct *prev, 132 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument 134 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() 137 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot() 138 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot() 143 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot() 149 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument 152 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct() 154 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct() 158 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() [all …]
|
D | mremap.c | 59 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument 84 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument 86 if (vma->vm_file) in take_rmap_locks() 87 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks() 88 if (vma->anon_vma) in take_rmap_locks() 89 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks() 92 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument 94 if (vma->anon_vma) in drop_rmap_locks() 95 anon_vma_unlock_write(vma->anon_vma); in drop_rmap_locks() 96 if (vma->vm_file) in drop_rmap_locks() [all …]
|
D | nommu.c | 100 struct vm_area_struct *vma; in kobjsize() local 102 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize() 103 if (vma) in kobjsize() 104 return vma->vm_end - vma->vm_start; in kobjsize() 124 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument 127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn() 173 struct vm_area_struct *vma; in __vmalloc_user_flags() local 176 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags() 177 if (vma) in __vmalloc_user_flags() 178 vma->vm_flags |= VM_USERMAP; in __vmalloc_user_flags() [all …]
|
D | madvise.c | 89 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) in anon_vma_name() argument 91 mmap_assert_locked(vma->vm_mm); in anon_vma_name() 93 if (vma->vm_file) in anon_vma_name() 96 return vma->anon_name; in anon_vma_name() 100 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument 103 struct anon_vma_name *orig_name = anon_vma_name(vma); in replace_anon_vma_name() 106 vma->anon_name = NULL; in replace_anon_vma_name() 114 vma->anon_name = anon_vma_name_reuse(anon_name); in replace_anon_vma_name() 120 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument 135 static int madvise_update_vma(struct vm_area_struct *vma, in madvise_update_vma() argument [all …]
|
D | mprotect.c | 41 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument 68 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 71 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range() 72 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range() 75 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range() 94 page = vm_normal_page(vma, addr, oldpte); in change_pte_range() 99 if (is_cow_mapping(vma->vm_flags) && in change_pte_range() 119 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range() 140 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range() 146 unlikely(xpm_integrity_validate_hook(vma, 0, addr, in change_pte_range() [all …]
|
D | memory.c | 392 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument 395 while (vma) { in free_pgtables() 396 struct vm_area_struct *next = vma->vm_next; in free_pgtables() 397 unsigned long addr = vma->vm_start; in free_pgtables() 403 unlink_anon_vmas(vma); in free_pgtables() 404 unlink_file_vma(vma); in free_pgtables() 406 if (is_vm_hugetlb_page(vma)) { in free_pgtables() 407 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables() 413 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables() 415 vma = next; in free_pgtables() [all …]
|
D | mlock.c | 380 struct vm_area_struct *vma, struct zone *zone, in __munlock_pagevec_fill() argument 391 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill() 404 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill() 450 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() argument 453 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in munlock_vma_pages_range() 470 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); in munlock_vma_pages_range() 502 start = __munlock_pagevec_fill(&pvec, vma, in munlock_vma_pages_range() 524 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument 527 struct mm_struct *mm = vma->vm_mm; in mlock_fixup() 532 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup() [all …]
|
D | rmap.c | 146 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument 150 avc->vma = vma; in anon_vma_chain_link() 152 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link() 184 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument 186 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare() 196 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare() 209 if (likely(!vma->anon_vma)) { in __anon_vma_prepare() 210 vma->anon_vma = anon_vma; in __anon_vma_prepare() 211 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare() 330 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument [all …]
|
D | pgtable-generic.c | 64 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument 70 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 71 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags() 78 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument 82 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() 84 flush_tlb_page(vma, address); in ptep_clear_flush_young() 90 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument 93 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 97 flush_tlb_page(vma, address); in ptep_clear_flush() 105 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/ |
D | drm_vm.c | 61 struct vm_area_struct *vma; member 65 static void drm_vm_open(struct vm_area_struct *vma); 66 static void drm_vm_close(struct vm_area_struct *vma); 69 struct vm_area_struct *vma) in drm_io_prot() argument 71 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot() 83 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot() 84 vma->vm_start)) in drm_io_prot() 94 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument 96 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot() 117 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() local [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/ |
D | nouveau_vmm.c | 29 nouveau_vma_unmap(struct nouveau_vma *vma) in nouveau_vma_unmap() argument 31 if (vma->mem) { in nouveau_vma_unmap() 32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap() 33 vma->mem = NULL; in nouveau_vma_unmap() 38 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) in nouveau_vma_map() argument 40 struct nvif_vma tmp = { .addr = vma->addr }; in nouveau_vma_map() 41 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); in nouveau_vma_map() 44 vma->mem = mem; in nouveau_vma_map() 51 struct nouveau_vma *vma; in nouveau_vma_find() local 53 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_vma_find() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | vmm.c | 750 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in nvkm_vma_new() local 751 if (vma) { in nvkm_vma_new() 752 vma->addr = addr; in nvkm_vma_new() 753 vma->size = size; in nvkm_vma_new() 754 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new() 755 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vma_new() 757 return vma; in nvkm_vma_new() 761 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) in nvkm_vma_tail() argument 765 BUG_ON(vma->size == tail); in nvkm_vma_tail() 767 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail))) in nvkm_vma_tail() [all …]
|
D | uvmm.c | 116 struct nvkm_vma *vma; in nvkm_uvmm_mthd_unmap() local 126 vma = nvkm_vmm_node_search(vmm, addr); in nvkm_uvmm_mthd_unmap() 127 if (ret = -ENOENT, !vma || vma->addr != addr) { in nvkm_uvmm_mthd_unmap() 129 addr, vma ? vma->addr : ~0ULL); in nvkm_uvmm_mthd_unmap() 133 if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { in nvkm_uvmm_mthd_unmap() 135 vma->user, !client->super, vma->busy); in nvkm_uvmm_mthd_unmap() 139 if (ret = -EINVAL, !vma->memory) { in nvkm_uvmm_mthd_unmap() 144 nvkm_vmm_unmap_locked(vmm, vma, false); in nvkm_uvmm_mthd_unmap() 160 struct nvkm_vma *vma; in nvkm_uvmm_mthd_map() local 179 if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) { in nvkm_uvmm_mthd_map() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/ |
D | msm_gem_vma.c | 42 struct msm_gem_vma *vma) in msm_gem_purge_vma() argument 44 unsigned size = vma->node.size << PAGE_SHIFT; in msm_gem_purge_vma() 47 if (WARN_ON(vma->inuse > 0)) in msm_gem_purge_vma() 51 if (!vma->mapped) in msm_gem_purge_vma() 55 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_purge_vma() 57 vma->mapped = false; in msm_gem_purge_vma() 62 struct msm_gem_vma *vma) in msm_gem_unmap_vma() argument 64 if (!WARN_ON(!vma->iova)) in msm_gem_unmap_vma() 65 vma->inuse--; in msm_gem_unmap_vma() 70 struct msm_gem_vma *vma, int prot, in msm_gem_map_vma() argument [all …]
|
/kernel/linux/linux-5.10/drivers/pci/ |
D | mmap.c | 23 struct vm_area_struct *vma, in pci_mmap_page_range() argument 31 vma->vm_pgoff -= start >> PAGE_SHIFT; in pci_mmap_page_range() 32 return pci_mmap_resource_range(pdev, bar, vma, mmap_state, in pci_mmap_page_range() 44 struct vm_area_struct *vma, in pci_mmap_resource_range() argument 51 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range() 55 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range() 57 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range() 60 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range() 64 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); in pci_mmap_resource_range() 66 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range() [all …]
|
/kernel/linux/linux-5.10/include/linux/ |
D | userfaultfd_k.h | 49 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument 52 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx() 55 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() argument 57 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 60 static inline bool userfaultfd_wp(struct vm_area_struct *vma) in userfaultfd_wp() argument 62 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp() 65 static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, in userfaultfd_pte_wp() argument 68 return userfaultfd_wp(vma) && pte_uffd_wp(pte); in userfaultfd_pte_wp() 71 static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, in userfaultfd_huge_pmd_wp() argument 74 return userfaultfd_wp(vma) && pmd_uffd_wp(pmd); in userfaultfd_huge_pmd_wp() [all …]
|
D | huge_mm.h | 17 struct vm_area_struct *vma); 28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 31 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, 35 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, 37 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 39 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, 58 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); in vmf_insert_pfn_pmd() 77 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); in vmf_insert_pfn_pud() 121 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, in transhuge_vma_suitable() argument [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/ |
D | i915_gem_gtt.c | 354 struct i915_vma *vma; in close_object_list() local 356 vma = i915_vma_instance(obj, vm, NULL); in close_object_list() 357 if (!IS_ERR(vma)) in close_object_list() 358 ignored = i915_vma_unbind(vma); in close_object_list() 375 struct i915_vma *vma; in fill_hole() local 413 vma = i915_vma_instance(obj, vm, NULL); in fill_hole() 414 if (IS_ERR(vma)) in fill_hole() 423 err = i915_vma_pin(vma, 0, 0, offset | flags); in fill_hole() 430 if (!drm_mm_node_allocated(&vma->node) || in fill_hole() 431 i915_vma_misplaced(vma, 0, 0, offset | flags)) { in fill_hole() [all …]
|
D | i915_vma.c | 36 static bool assert_vma(struct i915_vma *vma, in assert_vma() argument 42 if (vma->vm != rcu_access_pointer(ctx->vm)) { in assert_vma() 47 if (vma->size != obj->base.size) { in assert_vma() 49 vma->size, obj->base.size); in assert_vma() 53 if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) { in assert_vma() 55 vma->ggtt_view.type); in assert_vma() 67 struct i915_vma *vma; in checked_vma_instance() local 70 vma = i915_vma_instance(obj, vm, view); in checked_vma_instance() 71 if (IS_ERR(vma)) in checked_vma_instance() 72 return vma; in checked_vma_instance() [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/book3s/64/ |
D | tlbflush.h | 50 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, in flush_pmd_tlb_range() argument 54 return radix__flush_pmd_tlb_range(vma, start, end); in flush_pmd_tlb_range() 55 return hash__flush_tlb_range(vma, start, end); in flush_pmd_tlb_range() 59 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, in flush_hugetlb_tlb_range() argument 64 return radix__flush_hugetlb_tlb_range(vma, start, end); in flush_hugetlb_tlb_range() 65 return hash__flush_tlb_range(vma, start, end); in flush_hugetlb_tlb_range() 68 static inline void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument 72 return radix__flush_tlb_range(vma, start, end); in flush_tlb_range() 73 return hash__flush_tlb_range(vma, start, end); in flush_tlb_range() 91 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument [all …]
|
/kernel/linux/linux-5.10/arch/x86/entry/vdso/ |
D | vma.c | 60 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() argument 62 const struct vdso_image *image = vma->vm_mm->context.vdso_image; in vdso_fault() 117 static struct page *find_timens_vvar_page(struct vm_area_struct *vma) in find_timens_vvar_page() argument 119 if (likely(vma->vm_mm == current->mm)) in find_timens_vvar_page() 145 struct vm_area_struct *vma; in vdso_join_timens() local 149 for (vma = mm->mmap; vma; vma = vma->vm_next) { in vdso_join_timens() 150 unsigned long size = vma->vm_end - vma->vm_start; in vdso_join_timens() 152 if (vma_is_special_mapping(vma, &vvar_mapping)) in vdso_join_timens() 153 zap_page_range(vma, vma->vm_start, size); in vdso_join_timens() 160 static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma) in find_timens_vvar_page() argument [all …]
|
/kernel/linux/linux-5.10/fs/proc/ |
D | task_mmu.c | 145 struct vm_area_struct *vma; in m_start() local 172 vma = find_vma(mm, last_addr); in m_start() 173 if (vma) in m_start() 174 return vma; in m_start() 182 struct vm_area_struct *next, *vma = v; in m_next() local 184 if (vma == priv->tail_vma) in m_next() 186 else if (vma->vm_next) in m_next() 187 next = vma->vm_next; in m_next() 253 static int is_stack(struct vm_area_struct *vma) in is_stack() argument 260 return vma->vm_start <= vma->vm_mm->start_stack && in is_stack() [all …]
|