Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 25 of 305) sorted by relevance

12345678910>>...13

/drivers/gpu/drm/
Ddrm_vm.c49 struct vm_area_struct *vma; member
53 static void drm_vm_open(struct vm_area_struct *vma);
54 static void drm_vm_close(struct vm_area_struct *vma);
57 struct vm_area_struct *vma) in drm_io_prot() argument
59 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot()
67 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot()
68 vma->vm_start)) in drm_io_prot()
78 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument
80 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot()
99 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in drm_do_vm_fault() argument
[all …]
/drivers/staging/rdma/ehca/
Dehca_uverbs.c71 static void ehca_mm_open(struct vm_area_struct *vma) in ehca_mm_open() argument
73 u32 *count = (u32 *)vma->vm_private_data; in ehca_mm_open()
76 vma->vm_start, vma->vm_end); in ehca_mm_open()
82 vma->vm_start, vma->vm_end); in ehca_mm_open()
84 vma->vm_start, vma->vm_end, *count); in ehca_mm_open()
87 static void ehca_mm_close(struct vm_area_struct *vma) in ehca_mm_close() argument
89 u32 *count = (u32 *)vma->vm_private_data; in ehca_mm_close()
92 vma->vm_start, vma->vm_end); in ehca_mm_close()
97 vma->vm_start, vma->vm_end, *count); in ehca_mm_close()
105 static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas, in ehca_mmap_fw() argument
[all …]
/drivers/gpu/drm/i915/
Di915_gem_evict.c37 mark_free(struct i915_vma *vma, struct list_head *unwind) in mark_free() argument
39 if (vma->pin_count) in mark_free()
42 if (WARN_ON(!list_empty(&vma->exec_list))) in mark_free()
45 list_add(&vma->exec_list, unwind); in mark_free()
46 return drm_mm_scan_add_block(&vma->node); in mark_free()
80 struct i915_vma *vma; in i915_gem_evict_something() local
119 list_for_each_entry(vma, &vm->inactive_list, mm_list) { in i915_gem_evict_something()
120 if (mark_free(vma, &unwind_list)) in i915_gem_evict_something()
128 list_for_each_entry(vma, &vm->active_list, mm_list) { in i915_gem_evict_something()
129 if (mark_free(vma, &unwind_list)) in i915_gem_evict_something()
[all …]
Di915_gem_execbuffer.c132 struct i915_vma *vma; in eb_lookup_vmas() local
146 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); in eb_lookup_vmas()
147 if (IS_ERR(vma)) { in eb_lookup_vmas()
149 ret = PTR_ERR(vma); in eb_lookup_vmas()
154 list_add_tail(&vma->exec_list, &eb->vmas); in eb_lookup_vmas()
157 vma->exec_entry = &exec[i]; in eb_lookup_vmas()
159 eb->lut[i] = vma; in eb_lookup_vmas()
162 vma->exec_handle = handle; in eb_lookup_vmas()
163 hlist_add_head(&vma->exec_node, in eb_lookup_vmas()
200 struct i915_vma *vma; in eb_get_vma() local
[all …]
Di915_gem.c136 struct i915_vma *vma; in i915_gem_get_aperture_ioctl() local
141 list_for_each_entry(vma, &ggtt->base.active_list, mm_list) in i915_gem_get_aperture_ioctl()
142 if (vma->pin_count) in i915_gem_get_aperture_ioctl()
143 pinned += vma->node.size; in i915_gem_get_aperture_ioctl()
144 list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list) in i915_gem_get_aperture_ioctl()
145 if (vma->pin_count) in i915_gem_get_aperture_ioctl()
146 pinned += vma->node.size; in i915_gem_get_aperture_ioctl()
271 struct i915_vma *vma, *next; in drop_pages() local
275 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) in drop_pages()
276 if (i915_vma_unbind(vma)) in drop_pages()
[all …]
/drivers/xen/
Dprivcmd.c47 struct vm_area_struct *vma,
198 struct vm_area_struct *vma; member
206 struct vm_area_struct *vma = st->vma; in mmap_gfn_range() local
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) in mmap_gfn_range()
219 rc = xen_remap_domain_gfn_range(vma, in mmap_gfn_range()
222 vma->vm_page_prot, in mmap_gfn_range()
236 struct vm_area_struct *vma; in privcmd_ioctl_mmap() local
262 vma = find_vma(mm, msg->va); in privcmd_ioctl_mmap()
265 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data) in privcmd_ioctl_mmap()
267 vma->vm_private_data = PRIV_VMA_LOCKED; in privcmd_ioctl_mmap()
[all …]
Dgntdev.c84 struct vm_area_struct *vma; member
241 unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; in find_grant_ptes()
406 static void gntdev_vma_open(struct vm_area_struct *vma) in gntdev_vma_open() argument
408 struct grant_map *map = vma->vm_private_data; in gntdev_vma_open()
410 pr_debug("gntdev_vma_open %p\n", vma); in gntdev_vma_open()
414 static void gntdev_vma_close(struct vm_area_struct *vma) in gntdev_vma_close() argument
416 struct grant_map *map = vma->vm_private_data; in gntdev_vma_close()
417 struct file *file = vma->vm_file; in gntdev_vma_close()
420 pr_debug("gntdev_vma_close %p\n", vma); in gntdev_vma_close()
430 map->vma = NULL; in gntdev_vma_close()
[all …]
/drivers/staging/lustre/lustre/llite/
Dllite_mmap.c57 struct vm_area_struct *vma, unsigned long addr, in policy_from_vma() argument
60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + in policy_from_vma()
61 (vma->vm_pgoff << PAGE_CACHE_SHIFT); in policy_from_vma()
69 struct vm_area_struct *vma, *ret = NULL; in our_vma() local
74 for (vma = find_vma(mm, addr); in our_vma()
75 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { in our_vma()
76 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && in our_vma()
77 vma->vm_flags & VM_SHARED) { in our_vma()
78 ret = vma; in our_vma()
98 ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, in ll_fault_io_init() argument
[all …]
/drivers/gpu/drm/ttm/
Dttm_bo_vm.c45 struct vm_area_struct *vma, in ttm_bo_vm_fault_idle() argument
70 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault_idle()
89 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in ttm_bo_vm_fault() argument
92 vma->vm_private_data; in ttm_bo_vm_fault()
121 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault()
165 ret = ttm_bo_vm_fault_idle(bo, vma, vmf); in ttm_bo_vm_fault()
189 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + in ttm_bo_vm_fault()
190 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); in ttm_bo_vm_fault()
191 page_last = vma_pages(vma) + vma->vm_pgoff - in ttm_bo_vm_fault()
204 cvma = *vma; in ttm_bo_vm_fault()
[all …]
/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dbase.c30 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) in nvkm_vm_map_at() argument
32 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_at()
35 int big = vma->node->type != mmu->func->spg_shift; in nvkm_vm_map_at()
36 u32 offset = vma->node->offset + (delta >> 12); in nvkm_vm_map_at()
37 u32 bits = vma->node->type - 12; in nvkm_vm_map_at()
56 mmu->func->map(vma, pgt, node, pte, len, phys, delta); in nvkm_vm_map_at()
66 delta += (u64)len << vma->node->type; in nvkm_vm_map_at()
74 nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, in nvkm_vm_map_sg_table() argument
77 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_sg_table()
79 int big = vma->node->type != mmu->func->spg_shift; in nvkm_vm_map_sg_table()
[all …]
/drivers/misc/mic/scif/
Dscif_mmap.c27 struct vm_area_struct *vma; member
79 struct vm_area_struct *vma; in __scif_zap_mmaps() local
85 vma = info->vma; in __scif_zap_mmaps()
86 size = vma->vm_end - vma->vm_start; in __scif_zap_mmaps()
87 zap_vma_ptes(vma, vma->vm_start, size); in __scif_zap_mmaps()
90 __func__, ep, info->vma, size); in __scif_zap_mmaps()
169 static int scif_insert_vma(struct scif_endpt *ep, struct vm_area_struct *vma) in scif_insert_vma() argument
179 info->vma = vma; in scif_insert_vma()
188 static void scif_delete_vma(struct scif_endpt *ep, struct vm_area_struct *vma) in scif_delete_vma() argument
196 if (info->vma == vma) { in scif_delete_vma()
[all …]
/drivers/char/
Dmspec.c145 mspec_open(struct vm_area_struct *vma) in mspec_open() argument
149 vdata = vma->vm_private_data; in mspec_open()
160 mspec_close(struct vm_area_struct *vma) in mspec_close() argument
166 vdata = vma->vm_private_data; in mspec_close()
200 mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in mspec_fault() argument
205 struct vma_data *vdata = vma->vm_private_data; in mspec_fault()
236 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); in mspec_fault()
255 mspec_mmap(struct file *file, struct vm_area_struct *vma, in mspec_mmap() argument
261 if (vma->vm_pgoff != 0) in mspec_mmap()
264 if ((vma->vm_flags & VM_SHARED) == 0) in mspec_mmap()
[all …]
Duv_mmtimer.c43 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
147 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma) in uv_mmtimer_mmap() argument
151 if (vma->vm_end - vma->vm_start != PAGE_SIZE) in uv_mmtimer_mmap()
154 if (vma->vm_flags & VM_WRITE) in uv_mmtimer_mmap()
160 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in uv_mmtimer_mmap()
166 if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT, in uv_mmtimer_mmap()
167 PAGE_SIZE, vma->vm_page_prot)) { in uv_mmtimer_mmap()
Dmem.c332 static inline int private_mapping_ok(struct vm_area_struct *vma) in private_mapping_ok() argument
334 return vma->vm_flags & VM_MAYSHARE; in private_mapping_ok()
338 static inline int private_mapping_ok(struct vm_area_struct *vma) in private_mapping_ok() argument
350 static int mmap_mem(struct file *file, struct vm_area_struct *vma) in mmap_mem() argument
352 size_t size = vma->vm_end - vma->vm_start; in mmap_mem()
353 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; in mmap_mem()
359 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) in mmap_mem()
362 if (!private_mapping_ok(vma)) in mmap_mem()
365 if (!range_is_allowed(vma->vm_pgoff, size)) in mmap_mem()
368 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, in mmap_mem()
[all …]
/drivers/media/v4l2-core/
Dvideobuf-dma-contig.c66 static void videobuf_vm_open(struct vm_area_struct *vma) in videobuf_vm_open() argument
68 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_open()
71 map, map->count, vma->vm_start, vma->vm_end); in videobuf_vm_open()
76 static void videobuf_vm_close(struct vm_area_struct *vma) in videobuf_vm_close() argument
78 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_close()
83 map, map->count, vma->vm_start, vma->vm_end); in videobuf_vm_close()
164 struct vm_area_struct *vma; in videobuf_dma_contig_user_get() local
176 vma = find_vma(mm, vb->baddr); in videobuf_dma_contig_user_get()
177 if (!vma) in videobuf_dma_contig_user_get()
180 if ((vb->baddr + mem->size) > vma->vm_end) in videobuf_dma_contig_user_get()
[all …]
Dvideobuf-vmalloc.c54 static void videobuf_vm_open(struct vm_area_struct *vma) in videobuf_vm_open() argument
56 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_open()
59 map->count, vma->vm_start, vma->vm_end); in videobuf_vm_open()
64 static void videobuf_vm_close(struct vm_area_struct *vma) in videobuf_vm_close() argument
66 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_close()
71 map->count, vma->vm_start, vma->vm_end); in videobuf_vm_close()
212 rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0); in __videobuf_iolock()
234 struct vm_area_struct *vma) in __videobuf_mmap_mapper() argument
250 buf->baddr = vma->vm_start; in __videobuf_mmap_mapper()
256 pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); in __videobuf_mmap_mapper()
[all …]
Dvideobuf2-memops.c94 static void vb2_common_vm_open(struct vm_area_struct *vma) in vb2_common_vm_open() argument
96 struct vb2_vmarea_handler *h = vma->vm_private_data; in vb2_common_vm_open()
99 __func__, h, atomic_read(h->refcount), vma->vm_start, in vb2_common_vm_open()
100 vma->vm_end); in vb2_common_vm_open()
112 static void vb2_common_vm_close(struct vm_area_struct *vma) in vb2_common_vm_close() argument
114 struct vb2_vmarea_handler *h = vma->vm_private_data; in vb2_common_vm_close()
117 __func__, h, atomic_read(h->refcount), vma->vm_start, in vb2_common_vm_close()
118 vma->vm_end); in vb2_common_vm_close()
/drivers/sbus/char/
Dflash.c36 flash_mmap(struct file *file, struct vm_area_struct *vma) in flash_mmap() argument
46 if ((vma->vm_flags & VM_READ) && in flash_mmap()
47 (vma->vm_flags & VM_WRITE)) { in flash_mmap()
51 if (vma->vm_flags & VM_READ) { in flash_mmap()
54 } else if (vma->vm_flags & VM_WRITE) { in flash_mmap()
64 if ((vma->vm_pgoff << PAGE_SHIFT) > size) in flash_mmap()
66 addr = vma->vm_pgoff + (addr >> PAGE_SHIFT); in flash_mmap()
68 if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) in flash_mmap()
69 size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); in flash_mmap()
71 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in flash_mmap()
[all …]
/drivers/gpu/drm/udl/
Dudl_gem.c61 struct vm_area_struct *vma) in update_vm_cache_attr() argument
67 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in update_vm_cache_attr()
69 vma->vm_page_prot = in update_vm_cache_attr()
70 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); in update_vm_cache_attr()
72 vma->vm_page_prot = in update_vm_cache_attr()
73 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); in update_vm_cache_attr()
87 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) in udl_drm_gem_mmap() argument
91 ret = drm_gem_mmap(filp, vma); in udl_drm_gem_mmap()
95 vma->vm_flags &= ~VM_PFNMAP; in udl_drm_gem_mmap()
96 vma->vm_flags |= VM_MIXEDMAP; in udl_drm_gem_mmap()
[all …]
/drivers/staging/rdma/ipath/
Dipath_mmap.c64 static void ipath_vma_open(struct vm_area_struct *vma) in ipath_vma_open() argument
66 struct ipath_mmap_info *ip = vma->vm_private_data; in ipath_vma_open()
71 static void ipath_vma_close(struct vm_area_struct *vma) in ipath_vma_close() argument
73 struct ipath_mmap_info *ip = vma->vm_private_data; in ipath_vma_close()
89 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) in ipath_mmap() argument
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in ipath_mmap()
93 unsigned long size = vma->vm_end - vma->vm_start; in ipath_mmap()
115 ret = remap_vmalloc_range(vma, ip->obj, 0); in ipath_mmap()
118 vma->vm_ops = &ipath_vm_ops; in ipath_mmap()
119 vma->vm_private_data = ip; in ipath_mmap()
[all …]
/drivers/infiniband/hw/qib/
Dqib_mmap.c64 static void qib_vma_open(struct vm_area_struct *vma) in qib_vma_open() argument
66 struct qib_mmap_info *ip = vma->vm_private_data; in qib_vma_open()
71 static void qib_vma_close(struct vm_area_struct *vma) in qib_vma_close() argument
73 struct qib_mmap_info *ip = vma->vm_private_data; in qib_vma_close()
89 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) in qib_mmap() argument
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in qib_mmap()
93 unsigned long size = vma->vm_end - vma->vm_start; in qib_mmap()
115 ret = remap_vmalloc_range(vma, ip->obj, 0); in qib_mmap()
118 vma->vm_ops = &qib_vm_ops; in qib_mmap()
119 vma->vm_private_data = ip; in qib_mmap()
[all …]
/drivers/staging/rdma/hfi1/
Dmmap.c82 static void hfi1_vma_open(struct vm_area_struct *vma) in hfi1_vma_open() argument
84 struct hfi1_mmap_info *ip = vma->vm_private_data; in hfi1_vma_open()
89 static void hfi1_vma_close(struct vm_area_struct *vma) in hfi1_vma_close() argument
91 struct hfi1_mmap_info *ip = vma->vm_private_data; in hfi1_vma_close()
107 int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) in hfi1_mmap() argument
110 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in hfi1_mmap()
111 unsigned long size = vma->vm_end - vma->vm_start; in hfi1_mmap()
133 ret = remap_vmalloc_range(vma, ip->obj, 0); in hfi1_mmap()
136 vma->vm_ops = &hfi1_vm_ops; in hfi1_mmap()
137 vma->vm_private_data = ip; in hfi1_mmap()
[all …]
/drivers/gpu/drm/amd/amdkfd/
Dkfd_doorbell.c120 int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) in kfd_doorbell_mmap() argument
129 if (vma->vm_end - vma->vm_start != doorbell_process_allocation()) in kfd_doorbell_mmap()
133 dev = kfd_device_by_id(vma->vm_pgoff); in kfd_doorbell_mmap()
140 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | in kfd_doorbell_mmap()
143 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in kfd_doorbell_mmap()
147 (unsigned long long) vma->vm_start); in kfd_doorbell_mmap()
149 pr_debug(" vm_flags == 0x%04lX\n", vma->vm_flags); in kfd_doorbell_mmap()
153 return io_remap_pfn_range(vma, in kfd_doorbell_mmap()
154 vma->vm_start, in kfd_doorbell_mmap()
157 vma->vm_page_prot); in kfd_doorbell_mmap()
/drivers/misc/sgi-gru/
Dgrufile.c73 static void gru_vma_close(struct vm_area_struct *vma) in gru_vma_close() argument
79 if (!vma->vm_private_data) in gru_vma_close()
82 vdata = vma->vm_private_data; in gru_vma_close()
83 vma->vm_private_data = NULL; in gru_vma_close()
84 gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file, in gru_vma_close()
107 static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) in gru_file_mmap() argument
109 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE)) in gru_file_mmap()
112 if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || in gru_file_mmap()
113 vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) in gru_file_mmap()
116 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED | in gru_file_mmap()
[all …]
/drivers/uio/
Duio.c582 static int uio_find_mem_index(struct vm_area_struct *vma) in uio_find_mem_index() argument
584 struct uio_device *idev = vma->vm_private_data; in uio_find_mem_index()
586 if (vma->vm_pgoff < MAX_UIO_MAPS) { in uio_find_mem_index()
587 if (idev->info->mem[vma->vm_pgoff].size == 0) in uio_find_mem_index()
589 return (int)vma->vm_pgoff; in uio_find_mem_index()
594 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in uio_vma_fault() argument
596 struct uio_device *idev = vma->vm_private_data; in uio_vma_fault()
601 int mi = uio_find_mem_index(vma); in uio_vma_fault()
625 static int uio_mmap_logical(struct vm_area_struct *vma) in uio_mmap_logical() argument
627 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in uio_mmap_logical()
[all …]

12345678910>>...13