Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 25 of 394) sorted by relevance

12345678910>>...16

/drivers/gpu/drm/i915/
Di915_vma.c48 void i915_vma_free(struct i915_vma *vma) in i915_vma_free() argument
50 return kmem_cache_free(global.slab_vmas, vma); in i915_vma_free()
57 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument
63 if (!vma->node.stack) { in vma_print_allocator()
65 vma->node.start, vma->node.size, reason); in vma_print_allocator()
69 nr_entries = stack_depot_fetch(vma->node.stack, &entries); in vma_print_allocator()
72 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator()
77 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument
103 struct i915_vma *vma; in vma_create() local
109 vma = i915_vma_alloc(); in vma_create()
[all …]
Di915_vma.h156 static inline bool i915_vma_is_active(const struct i915_vma *vma) in i915_vma_is_active() argument
158 return !i915_active_is_idle(&vma->active); in i915_vma_is_active()
161 int __must_check i915_vma_move_to_active(struct i915_vma *vma,
165 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) in i915_vma_is_ggtt() argument
167 return vma->flags & I915_VMA_GGTT; in i915_vma_is_ggtt()
170 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) in i915_vma_has_ggtt_write() argument
172 return vma->flags & I915_VMA_GGTT_WRITE; in i915_vma_has_ggtt_write()
175 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma) in i915_vma_set_ggtt_write() argument
177 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); in i915_vma_set_ggtt_write()
178 vma->flags |= I915_VMA_GGTT_WRITE; in i915_vma_set_ggtt_write()
[all …]
Di915_gem_fence_reg.c63 struct i915_vma *vma) in i965_write_fence_reg() argument
81 if (vma) { in i965_write_fence_reg()
82 unsigned int stride = i915_gem_object_get_stride(vma->obj); in i965_write_fence_reg()
84 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); in i965_write_fence_reg()
85 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE)); in i965_write_fence_reg()
86 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE)); in i965_write_fence_reg()
89 val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32; in i965_write_fence_reg()
90 val |= vma->node.start; in i965_write_fence_reg()
92 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y) in i965_write_fence_reg()
120 struct i915_vma *vma) in i915_write_fence_reg() argument
[all …]
Di915_gem_evict.c57 struct i915_vma *vma, in mark_free() argument
61 if (i915_vma_is_pinned(vma)) in mark_free()
64 list_add(&vma->evict_link, unwind); in mark_free()
65 return drm_mm_scan_add_block(scan, &vma->node); in mark_free()
101 struct i915_vma *vma, *next; in i915_gem_evict_something() local
142 list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { in i915_gem_evict_something()
158 if (i915_vma_is_active(vma)) { in i915_gem_evict_something()
159 if (vma == active) { in i915_gem_evict_something()
168 active = vma; in i915_gem_evict_something()
170 list_move_tail(&vma->vm_link, &vm->bound_list); in i915_gem_evict_something()
[all …]
/drivers/gpu/drm/
Ddrm_vm.c62 struct vm_area_struct *vma; member
66 static void drm_vm_open(struct vm_area_struct *vma);
67 static void drm_vm_close(struct vm_area_struct *vma);
70 struct vm_area_struct *vma) in drm_io_prot() argument
72 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot()
84 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot()
85 vma->vm_start)) in drm_io_prot()
95 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument
97 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot()
118 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() local
[all …]
/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dvmm.c750 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in nvkm_vma_new() local
751 if (vma) { in nvkm_vma_new()
752 vma->addr = addr; in nvkm_vma_new()
753 vma->size = size; in nvkm_vma_new()
754 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
755 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
757 return vma; in nvkm_vma_new()
761 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) in nvkm_vma_tail() argument
765 BUG_ON(vma->size == tail); in nvkm_vma_tail()
767 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail))) in nvkm_vma_tail()
[all …]
Duvmm.c116 struct nvkm_vma *vma; in nvkm_uvmm_mthd_unmap() local
126 vma = nvkm_vmm_node_search(vmm, addr); in nvkm_uvmm_mthd_unmap()
127 if (ret = -ENOENT, !vma || vma->addr != addr) { in nvkm_uvmm_mthd_unmap()
129 addr, vma ? vma->addr : ~0ULL); in nvkm_uvmm_mthd_unmap()
133 if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { in nvkm_uvmm_mthd_unmap()
135 vma->user, !client->super, vma->busy); in nvkm_uvmm_mthd_unmap()
139 if (ret = -EINVAL, !vma->memory) { in nvkm_uvmm_mthd_unmap()
144 nvkm_vmm_unmap_locked(vmm, vma, false); in nvkm_uvmm_mthd_unmap()
160 struct nvkm_vma *vma; in nvkm_uvmm_mthd_map() local
179 if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) { in nvkm_uvmm_mthd_map()
[all …]
/drivers/gpu/drm/nouveau/
Dnouveau_vmm.c29 nouveau_vma_unmap(struct nouveau_vma *vma) in nouveau_vma_unmap() argument
31 if (vma->mem) { in nouveau_vma_unmap()
32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap()
33 vma->mem = NULL; in nouveau_vma_unmap()
38 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) in nouveau_vma_map() argument
40 struct nvif_vma tmp = { .addr = vma->addr }; in nouveau_vma_map()
41 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); in nouveau_vma_map()
44 vma->mem = mem; in nouveau_vma_map()
51 struct nouveau_vma *vma; in nouveau_vma_find() local
53 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_vma_find()
[all …]
/drivers/gpu/drm/i915/selftests/
Di915_gem_gtt.c328 struct i915_vma *vma; in close_object_list() local
330 vma = i915_vma_instance(obj, vm, NULL); in close_object_list()
331 if (!IS_ERR(vma)) in close_object_list()
332 ignored = i915_vma_unbind(vma); in close_object_list()
334 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma)) in close_object_list()
335 i915_vma_close(vma); in close_object_list()
353 struct i915_vma *vma; in fill_hole() local
391 vma = i915_vma_instance(obj, vm, NULL); in fill_hole()
392 if (IS_ERR(vma)) in fill_hole()
401 err = i915_vma_pin(vma, 0, 0, offset | flags); in fill_hole()
[all …]
Di915_vma.c35 static bool assert_vma(struct i915_vma *vma, in assert_vma() argument
41 if (vma->vm != ctx->vm) { in assert_vma()
46 if (vma->size != obj->base.size) { in assert_vma()
48 vma->size, obj->base.size); in assert_vma()
52 if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) { in assert_vma()
54 vma->ggtt_view.type); in assert_vma()
66 struct i915_vma *vma; in checked_vma_instance() local
69 vma = i915_vma_instance(obj, vm, view); in checked_vma_instance()
70 if (IS_ERR(vma)) in checked_vma_instance()
71 return vma; in checked_vma_instance()
[all …]
/drivers/pci/
Dmmap.c23 struct vm_area_struct *vma, in pci_mmap_page_range() argument
31 vma->vm_pgoff -= start >> PAGE_SHIFT; in pci_mmap_page_range()
32 return pci_mmap_resource_range(pdev, bar, vma, mmap_state, in pci_mmap_page_range()
44 struct vm_area_struct *vma, in pci_mmap_resource_range() argument
51 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range()
55 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range()
57 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range()
60 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range()
64 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); in pci_mmap_resource_range()
66 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range()
[all …]
/drivers/gpu/drm/i915/gem/selftests/
Dhuge_pages.c331 static int igt_check_page_sizes(struct i915_vma *vma) in igt_check_page_sizes() argument
333 struct drm_i915_private *i915 = vma->vm->i915; in igt_check_page_sizes()
335 struct drm_i915_gem_object *obj = vma->obj; in igt_check_page_sizes()
338 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) { in igt_check_page_sizes()
340 vma->page_sizes.sg & ~supported, supported); in igt_check_page_sizes()
344 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) { in igt_check_page_sizes()
346 vma->page_sizes.gtt & ~supported, supported); in igt_check_page_sizes()
350 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) { in igt_check_page_sizes()
352 vma->page_sizes.phys, obj->mm.page_sizes.phys); in igt_check_page_sizes()
356 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) { in igt_check_page_sizes()
[all …]
Digt_gem_utils.c39 igt_emit_store_dw(struct i915_vma *vma, in igt_emit_store_dw() argument
45 const int gen = INTEL_GEN(vma->vm->i915); in igt_emit_store_dw()
52 obj = i915_gem_object_create_internal(vma->vm->i915, size); in igt_emit_store_dw()
62 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size); in igt_emit_store_dw()
63 offset += vma->node.start; in igt_emit_store_dw()
87 vma = i915_vma_instance(obj, vma->vm, NULL); in igt_emit_store_dw()
88 if (IS_ERR(vma)) { in igt_emit_store_dw()
89 err = PTR_ERR(vma); in igt_emit_store_dw()
93 err = i915_vma_pin(vma, 0, 0, PIN_USER); in igt_emit_store_dw()
97 return vma; in igt_emit_store_dw()
[all …]
/drivers/gpu/drm/msm/
Dmsm_gem_vma.c32 struct msm_gem_vma *vma) in msm_gem_purge_vma() argument
34 unsigned size = vma->node.size << PAGE_SHIFT; in msm_gem_purge_vma()
37 if (WARN_ON(vma->inuse > 0)) in msm_gem_purge_vma()
41 if (!vma->mapped) in msm_gem_purge_vma()
45 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_purge_vma()
47 vma->mapped = false; in msm_gem_purge_vma()
52 struct msm_gem_vma *vma) in msm_gem_unmap_vma() argument
54 if (!WARN_ON(!vma->iova)) in msm_gem_unmap_vma()
55 vma->inuse--; in msm_gem_unmap_vma()
60 struct msm_gem_vma *vma, int prot, in msm_gem_map_vma() argument
[all …]
Dmsm_gem.c209 struct vm_area_struct *vma) in msm_gem_mmap_obj() argument
213 vma->vm_flags &= ~VM_PFNMAP; in msm_gem_mmap_obj()
214 vma->vm_flags |= VM_MIXEDMAP; in msm_gem_mmap_obj()
217 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); in msm_gem_mmap_obj()
219 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); in msm_gem_mmap_obj()
226 fput(vma->vm_file); in msm_gem_mmap_obj()
228 vma->vm_pgoff = 0; in msm_gem_mmap_obj()
229 vma->vm_file = obj->filp; in msm_gem_mmap_obj()
231 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in msm_gem_mmap_obj()
237 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) in msm_gem_mmap() argument
[all …]
/drivers/gpu/drm/i915/gem/
Di915_gem_execbuffer.c221 struct i915_vma **vma; member
352 const struct i915_vma *vma, in eb_vma_misplaced() argument
355 if (vma->node.size < entry->pad_to_size) in eb_vma_misplaced()
358 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment)) in eb_vma_misplaced()
362 vma->node.start != entry->offset) in eb_vma_misplaced()
366 vma->node.start < BATCH_OFFSET_BIAS) in eb_vma_misplaced()
370 (vma->node.start + vma->node.size - 1) >> 32) in eb_vma_misplaced()
374 !i915_vma_is_map_and_fenceable(vma)) in eb_vma_misplaced()
383 struct i915_vma *vma) in eb_pin_vma() argument
385 unsigned int exec_flags = *vma->exec_flags; in eb_pin_vma()
[all …]
Di915_gem_object_blt.c15 struct i915_vma *vma, in intel_emit_vma_fill_blt() argument
32 count = div_u64(vma->size, block_size); in intel_emit_vma_fill_blt()
47 rem = vma->size; in intel_emit_vma_fill_blt()
48 offset = vma->node.start; in intel_emit_vma_fill_blt()
104 int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq) in intel_emit_vma_mark_active() argument
108 i915_vma_lock(vma); in intel_emit_vma_mark_active()
109 err = i915_request_await_object(rq, vma->obj, false); in intel_emit_vma_mark_active()
111 err = i915_vma_move_to_active(vma, rq, 0); in intel_emit_vma_mark_active()
112 i915_vma_unlock(vma); in intel_emit_vma_mark_active()
116 return intel_engine_pool_mark_active(vma->private, rq); in intel_emit_vma_mark_active()
[all …]
Di915_gem_mman.c20 __vma_matches(struct vm_area_struct *vma, struct file *filp, in __vma_matches() argument
23 if (vma->vm_file != filp) in __vma_matches()
26 return vma->vm_start == addr && in __vma_matches()
27 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); in __vma_matches()
89 struct vm_area_struct *vma; in i915_gem_mmap_ioctl() local
95 vma = find_vma(mm, addr); in i915_gem_mmap_ioctl()
96 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) in i915_gem_mmap_ioctl()
97 vma->vm_page_prot = in i915_gem_mmap_ioctl()
98 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); in i915_gem_mmap_ioctl()
220 struct vm_area_struct *area = vmf->vma; in i915_gem_fault()
[all …]
Di915_gem_client_blt.c14 struct i915_vma *vma; member
20 static int vma_set_pages(struct i915_vma *vma) in vma_set_pages() argument
22 struct i915_sleeve *sleeve = vma->private; in vma_set_pages()
24 vma->pages = sleeve->pages; in vma_set_pages()
25 vma->page_sizes = sleeve->page_sizes; in vma_set_pages()
30 static void vma_clear_pages(struct i915_vma *vma) in vma_clear_pages() argument
32 GEM_BUG_ON(!vma->pages); in vma_clear_pages()
33 vma->pages = NULL; in vma_clear_pages()
36 static int vma_bind(struct i915_vma *vma, in vma_bind() argument
40 return vma->vm->vma_ops.bind_vma(vma, cache_level, flags); in vma_bind()
[all …]
Di915_gem_domain.c178 struct i915_vma *vma; in i915_gem_object_set_cache_level() local
192 list_for_each_entry(vma, &obj->vma.list, obj_link) { in i915_gem_object_set_cache_level()
193 if (!drm_mm_node_allocated(&vma->node)) in i915_gem_object_set_cache_level()
196 if (i915_vma_is_pinned(vma)) { in i915_gem_object_set_cache_level()
201 if (!i915_vma_is_closed(vma) && in i915_gem_object_set_cache_level()
202 i915_gem_valid_gtt_space(vma, cache_level)) in i915_gem_object_set_cache_level()
205 ret = i915_vma_unbind(vma); in i915_gem_object_set_cache_level()
267 for_each_ggtt_vma(vma, obj) { in i915_gem_object_set_cache_level()
268 ret = i915_vma_revoke_fence(vma); in i915_gem_object_set_cache_level()
287 list_for_each_entry(vma, &obj->vma.list, obj_link) { in i915_gem_object_set_cache_level()
[all …]
/drivers/char/
Dmspec.c89 mspec_open(struct vm_area_struct *vma) in mspec_open() argument
93 vdata = vma->vm_private_data; in mspec_open()
104 mspec_close(struct vm_area_struct *vma) in mspec_close() argument
110 vdata = vma->vm_private_data; in mspec_close()
143 struct vma_data *vdata = vmf->vma->vm_private_data; in mspec_fault()
165 return vmf_insert_pfn(vmf->vma, vmf->address, pfn); in mspec_fault()
182 mspec_mmap(struct file *file, struct vm_area_struct *vma, in mspec_mmap() argument
188 if (vma->vm_pgoff != 0) in mspec_mmap()
191 if ((vma->vm_flags & VM_SHARED) == 0) in mspec_mmap()
194 if ((vma->vm_flags & VM_WRITE) == 0) in mspec_mmap()
[all …]
/drivers/xen/
Dprivcmd.c66 struct vm_area_struct *vma,
219 struct vm_area_struct *vma; member
227 struct vm_area_struct *vma = st->vma; in mmap_gfn_range() local
237 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) in mmap_gfn_range()
240 rc = xen_remap_domain_gfn_range(vma, in mmap_gfn_range()
243 vma->vm_page_prot, in mmap_gfn_range()
258 struct vm_area_struct *vma; in privcmd_ioctl_mmap() local
288 vma = find_vma(mm, msg->va); in privcmd_ioctl_mmap()
291 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data) in privcmd_ioctl_mmap()
293 vma->vm_private_data = PRIV_VMA_LOCKED; in privcmd_ioctl_mmap()
[all …]
/drivers/gpu/drm/ttm/
Dttm_bo_vm.c72 up_read(&vmf->vma->vm_mm->mmap_sem); in ttm_bo_vm_fault_idle()
111 struct vm_area_struct *vma = vmf->vma; in ttm_bo_vm_fault() local
113 vma->vm_private_data; in ttm_bo_vm_fault()
138 up_read(&vmf->vma->vm_mm->mmap_sem); in ttm_bo_vm_fault()
213 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + in ttm_bo_vm_fault()
214 vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); in ttm_bo_vm_fault()
215 page_last = vma_pages(vma) + vma->vm_pgoff - in ttm_bo_vm_fault()
228 cvma = *vma; in ttm_bo_vm_fault()
275 if (vma->vm_flags & VM_MIXEDMAP) in ttm_bo_vm_fault()
301 static void ttm_bo_vm_open(struct vm_area_struct *vma) in ttm_bo_vm_open() argument
[all …]
/drivers/misc/mic/scif/
Dscif_mmap.c18 struct vm_area_struct *vma; member
70 struct vm_area_struct *vma; in __scif_zap_mmaps() local
76 vma = info->vma; in __scif_zap_mmaps()
77 size = vma->vm_end - vma->vm_start; in __scif_zap_mmaps()
78 zap_vma_ptes(vma, vma->vm_start, size); in __scif_zap_mmaps()
81 __func__, ep, info->vma, size); in __scif_zap_mmaps()
160 static int scif_insert_vma(struct scif_endpt *ep, struct vm_area_struct *vma) in scif_insert_vma() argument
170 info->vma = vma; in scif_insert_vma()
179 static void scif_delete_vma(struct scif_endpt *ep, struct vm_area_struct *vma) in scif_delete_vma() argument
187 if (info->vma == vma) { in scif_delete_vma()
[all …]
/drivers/misc/ocxl/
Dcontext.c96 static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address, in map_afu_irq() argument
106 return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT); in map_afu_irq()
109 static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address, in map_pp_mmio() argument
132 ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT); in map_pp_mmio()
139 struct vm_area_struct *vma = vmf->vma; in ocxl_mmap_fault() local
140 struct ocxl_context *ctx = vma->vm_file->private_data; in ocxl_mmap_fault()
149 ret = map_pp_mmio(vma, vmf->address, offset, ctx); in ocxl_mmap_fault()
151 ret = map_afu_irq(vma, vmf->address, offset, ctx); in ocxl_mmap_fault()
160 struct vm_area_struct *vma) in check_mmap_afu_irq() argument
162 int irq_id = ocxl_irq_offset_to_id(ctx, vma->vm_pgoff << PAGE_SHIFT); in check_mmap_afu_irq()
[all …]

12345678910>>...16