/drivers/dax/ |
D | device.c | 76 static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, in dax_set_mapping() argument 80 struct file *filp = vmf->vma->vm_file; in dax_set_mapping() 88 pgoff = linear_page_index(vmf->vma, in dax_set_mapping() 89 ALIGN(vmf->address, fault_size)); in dax_set_mapping() 104 struct vm_fault *vmf) in __dev_dax_pte_fault() argument 111 if (check_vma(dev_dax, vmf->vma, __func__)) in __dev_dax_pte_fault() 123 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); in __dev_dax_pte_fault() 125 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); in __dev_dax_pte_fault() 131 dax_set_mapping(vmf, pfn, fault_size); in __dev_dax_pte_fault() 133 return vmf_insert_mixed(vmf->vma, vmf->address, pfn); in __dev_dax_pte_fault() [all …]
|
/drivers/gpu/drm/ttm/ |
D | ttm_bo_vm.c | 42 struct vm_fault *vmf) in ttm_bo_vm_fault_idle() argument 57 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_fault_idle() 58 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in ttm_bo_vm_fault_idle() 62 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_fault_idle() 117 struct vm_fault *vmf) in ttm_bo_vm_reserve() argument 131 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_reserve() 132 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { in ttm_bo_vm_reserve() 134 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_reserve() 181 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, in ttm_bo_vm_fault_reserved() argument 185 struct vm_area_struct *vma = vmf->vma; in ttm_bo_vm_fault_reserved() [all …]
|
/drivers/video/fbdev/core/ |
D | fb_defio.c | 94 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) in fb_deferred_io_fault() argument 98 struct fb_info *info = vmf->vma->vm_private_data; in fb_deferred_io_fault() 100 offset = vmf->pgoff << PAGE_SHIFT; in fb_deferred_io_fault() 110 if (vmf->vma->vm_file) in fb_deferred_io_fault() 111 page->mapping = vmf->vma->vm_file->f_mapping; in fb_deferred_io_fault() 116 page->index = vmf->pgoff; /* for page_mkclean() */ in fb_deferred_io_fault() 118 vmf->page = page; in fb_deferred_io_fault() 197 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf) in fb_deferred_io_page_mkwrite() argument 199 unsigned long offset = vmf->pgoff << PAGE_SHIFT; in fb_deferred_io_page_mkwrite() 200 struct page *page = vmf->page; in fb_deferred_io_page_mkwrite() [all …]
|
/drivers/gpu/drm/ |
D | drm_vm.c | 110 static vm_fault_t drm_vm_fault(struct vm_fault *vmf) in drm_vm_fault() argument 112 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() 139 resource_size_t offset = vmf->address - vma->vm_start; in drm_vm_fault() 169 vmf->page = page; in drm_vm_fault() 183 static vm_fault_t drm_vm_fault(struct vm_fault *vmf) in drm_vm_fault() argument 199 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf) in drm_vm_shm_fault() argument 201 struct vm_area_struct *vma = vmf->vma; in drm_vm_shm_fault() 210 offset = vmf->address - vma->vm_start; in drm_vm_shm_fault() 216 vmf->page = page; in drm_vm_shm_fault() 299 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf) in drm_vm_dma_fault() argument [all …]
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_page_dirty.c | 375 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) in vmw_bo_vm_mkwrite() argument 377 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_mkwrite() 389 save_flags = vmf->flags; in vmw_bo_vm_mkwrite() 390 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; in vmw_bo_vm_mkwrite() 391 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_mkwrite() 392 vmf->flags = save_flags; in vmw_bo_vm_mkwrite() 396 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_mkwrite() 416 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) in vmw_bo_vm_fault() argument 418 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_fault() 426 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_fault() [all …]
|
/drivers/xen/ |
D | privcmd-buf.c | 117 static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf) in privcmd_buf_vma_fault() argument 120 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end, in privcmd_buf_vma_fault() 121 vmf->pgoff, (void *)vmf->address); in privcmd_buf_vma_fault()
|
/drivers/char/ |
D | mspec.c | 137 mspec_fault(struct vm_fault *vmf) in mspec_fault() argument 141 pgoff_t index = vmf->pgoff; in mspec_fault() 142 struct vma_data *vdata = vmf->vma->vm_private_data; in mspec_fault() 164 return vmf_insert_pfn(vmf->vma, vmf->address, pfn); in mspec_fault()
|
/drivers/misc/cxl/ |
D | context.c | 126 static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf) in cxl_mmap_fault() argument 128 struct vm_area_struct *vma = vmf->vma; in cxl_mmap_fault() 133 offset = vmf->pgoff << PAGE_SHIFT; in cxl_mmap_fault() 136 __func__, ctx->pe, vmf->address, offset); in cxl_mmap_fault() 161 vmf->page = ctx->ff_page; in cxl_mmap_fault() 168 ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); in cxl_mmap_fault()
|
/drivers/misc/ocxl/ |
D | sysfs.c | 109 static vm_fault_t global_mmio_fault(struct vm_fault *vmf) in global_mmio_fault() argument 111 struct vm_area_struct *vma = vmf->vma; in global_mmio_fault() 115 if (vmf->pgoff >= (afu->config.global_mmio_size >> PAGE_SHIFT)) in global_mmio_fault() 118 offset = vmf->pgoff; in global_mmio_fault() 120 return vmf_insert_pfn(vma, vmf->address, offset); in global_mmio_fault()
|
D | context.c | 139 static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf) in ocxl_mmap_fault() argument 141 struct vm_area_struct *vma = vmf->vma; in ocxl_mmap_fault() 146 offset = vmf->pgoff << PAGE_SHIFT; in ocxl_mmap_fault() 148 ctx->pasid, vmf->address, offset); in ocxl_mmap_fault() 151 ret = map_pp_mmio(vma, vmf->address, offset, ctx); in ocxl_mmap_fault() 153 ret = map_afu_irq(vma, vmf->address, offset, ctx); in ocxl_mmap_fault()
|
/drivers/char/agp/ |
D | alpha-agp.c | 14 static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf) in alpha_core_agp_vm_fault() argument 21 dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base; in alpha_core_agp_vm_fault() 32 vmf->page = page; in alpha_core_agp_vm_fault()
|
/drivers/dma-buf/heaps/ |
D | cma_heap.c | 165 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) in cma_heap_vm_fault() argument 167 struct vm_area_struct *vma = vmf->vma; in cma_heap_vm_fault() 170 if (vmf->pgoff > buffer->pagecount) in cma_heap_vm_fault() 173 return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff])); in cma_heap_vm_fault()
|
/drivers/gpu/drm/gma500/ |
D | gem.c | 109 static vm_fault_t psb_gem_fault(struct vm_fault *vmf); 254 static vm_fault_t psb_gem_fault(struct vm_fault *vmf) in psb_gem_fault() argument 256 struct vm_area_struct *vma = vmf->vma; in psb_gem_fault() 290 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in psb_gem_fault() 297 ret = vmf_insert_pfn(vma, vmf->address, pfn); in psb_gem_fault()
|
D | fbdev.c | 23 static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf) in psb_fbdev_vm_fault() argument 25 struct vm_area_struct *vma = vmf->vma; in psb_fbdev_vm_fault() 27 unsigned long address = vmf->address - (vmf->pgoff << PAGE_SHIFT); in psb_fbdev_vm_fault()
|
/drivers/infiniband/hw/hfi1/ |
D | file_ops.c | 73 static vm_fault_t vma_fault(struct vm_fault *vmf); 307 static inline void mmap_cdbg(u16 ctxt, u8 subctxt, u8 type, u8 mapio, u8 vmf, in mmap_cdbg() argument 313 ctxt, subctxt, type, mapio, vmf, !!memdma, in mmap_cdbg() 328 u8 subctxt, mapio = 0, vmf = 0, type; in hfi1_file_mmap() local 441 mmap_cdbg(ctxt, subctxt, type, mapio, vmf, memaddr, in hfi1_file_mmap() 487 vmf = 1; in hfi1_file_mmap() 520 vmf = 1; in hfi1_file_mmap() 526 vmf = 1; in hfi1_file_mmap() 533 vmf = 1; in hfi1_file_mmap() 545 vmf = 1; in hfi1_file_mmap() [all …]
|
/drivers/gpu/drm/armada/ |
D | armada_gem.c | 20 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf) in armada_gem_vm_fault() argument 22 struct drm_gem_object *gobj = vmf->vma->vm_private_data; in armada_gem_vm_fault() 26 pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT; in armada_gem_vm_fault() 27 return vmf_insert_pfn(vmf->vma, vmf->address, pfn); in armada_gem_vm_fault()
|
/drivers/dma-buf/ |
D | udmabuf.c | 33 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) in udmabuf_vm_fault() argument 35 struct vm_area_struct *vma = vmf->vma; in udmabuf_vm_fault() 37 pgoff_t pgoff = vmf->pgoff; in udmabuf_vm_fault() 41 vmf->page = ubuf->pages[pgoff]; in udmabuf_vm_fault() 42 get_page(vmf->page); in udmabuf_vm_fault()
|
/drivers/gpu/drm/omapdrm/ |
D | omap_gem.c | 347 struct vm_area_struct *vma, struct vm_fault *vmf) in omap_gem_fault_1d() argument 354 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in omap_gem_fault_1d() 364 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, in omap_gem_fault_1d() 367 return vmf_insert_mixed(vma, vmf->address, in omap_gem_fault_1d() 373 struct vm_area_struct *vma, struct vm_fault *vmf) in omap_gem_fault_2d() argument 403 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in omap_gem_fault_2d() 414 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT); in omap_gem_fault_2d() 459 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, in omap_gem_fault_2d() 490 static vm_fault_t omap_gem_fault(struct vm_fault *vmf) in omap_gem_fault() argument 492 struct vm_area_struct *vma = vmf->vma; in omap_gem_fault() [all …]
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_dmem.c | 161 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) in nouveau_dmem_migrate_to_ram() argument 163 struct nouveau_drm *drm = page_to_drm(vmf->page); in nouveau_dmem_migrate_to_ram() 172 .vma = vmf->vma, in nouveau_dmem_migrate_to_ram() 173 .start = vmf->address, in nouveau_dmem_migrate_to_ram() 174 .end = vmf->address + PAGE_SIZE, in nouveau_dmem_migrate_to_ram() 178 .fault_page = vmf->page, in nouveau_dmem_migrate_to_ram() 196 dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address); in nouveau_dmem_migrate_to_ram()
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_gem.c | 48 static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) in amdgpu_gem_fault() argument 50 struct ttm_buffer_object *bo = vmf->vma->vm_private_data; in amdgpu_gem_fault() 55 ret = ttm_bo_vm_reserve(bo, vmf); in amdgpu_gem_fault() 66 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in amdgpu_gem_fault() 71 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); in amdgpu_gem_fault() 73 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in amdgpu_gem_fault()
|
/drivers/infiniband/core/ |
D | uverbs_main.c | 778 static vm_fault_t rdma_umap_fault(struct vm_fault *vmf) in rdma_umap_fault() argument 780 struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data; in rdma_umap_fault() 781 struct rdma_umap_priv *priv = vmf->vma->vm_private_data; in rdma_umap_fault() 788 if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) { in rdma_umap_fault() 789 vmf->page = ZERO_PAGE(vmf->address); in rdma_umap_fault() 790 get_page(vmf->page); in rdma_umap_fault() 797 alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0); in rdma_umap_fault() 804 vmf->page = ufile->disassociate_page; in rdma_umap_fault() 805 get_page(vmf->page); in rdma_umap_fault()
|
/drivers/gpu/drm/etnaviv/ |
D | etnaviv_gem.c | 163 static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf) in etnaviv_gem_fault() argument 165 struct vm_area_struct *vma = vmf->vma; in etnaviv_gem_fault() 191 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in etnaviv_gem_fault() 195 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, in etnaviv_gem_fault() 198 return vmf_insert_pfn(vma, vmf->address, pfn); in etnaviv_gem_fault()
|
/drivers/media/v4l2-core/ |
D | videobuf-dma-sg.c | 430 static vm_fault_t videobuf_vm_fault(struct vm_fault *vmf) in videobuf_vm_fault() argument 432 struct vm_area_struct *vma = vmf->vma; in videobuf_vm_fault() 436 vmf->address, vma->vm_start, vma->vm_end); in videobuf_vm_fault() 441 clear_user_highpage(page, vmf->address); in videobuf_vm_fault() 442 vmf->page = page; in videobuf_vm_fault()
|
/drivers/virt/gunyah/ |
D | gunyah_vcpu.c | 419 static vm_fault_t gunyah_vcpu_fault(struct vm_fault *vmf) in gunyah_vcpu_fault() argument 421 struct gunyah_vcpu *vcpu = vmf->vma->vm_file->private_data; in gunyah_vcpu_fault() 424 if (vmf->pgoff) in gunyah_vcpu_fault() 429 vmf->page = page; in gunyah_vcpu_fault()
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_ttm.c | 1041 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) in vm_fault_ttm() argument 1043 struct vm_area_struct *area = vmf->vma; in vm_fault_ttm() 1056 ret = ttm_bo_vm_reserve(bo, vmf); in vm_fault_ttm() 1116 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in vm_fault_ttm() 1120 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); in vm_fault_ttm() 1123 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in vm_fault_ttm()
|