Home
last modified time | relevance | path

Searched refs:vmf (Results 1 – 9 of 9) sorted by relevance

/arch/x86/entry/vdso/
Dvma.c43 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() argument
47 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size) in vdso_fault()
50 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT)); in vdso_fault()
51 get_page(vmf->page); in vdso_fault()
88 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() argument
96 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) + in vvar_fault()
110 return vmf_insert_pfn(vma, vmf->address, in vvar_fault()
116 return vmf_insert_pfn_prot(vma, vmf->address, in vvar_fault()
124 return vmf_insert_pfn(vma, vmf->address, in vvar_fault()
/arch/s390/kernel/
Dvdso.c51 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() argument
65 if (vmf->pgoff >= vdso_pages) in vdso_fault()
68 vmf->page = vdso_pagelist[vmf->pgoff]; in vdso_fault()
69 get_page(vmf->page); in vdso_fault()
/arch/powerpc/platforms/cell/spufs/
Dfile.c223 spufs_mem_mmap_fault(struct vm_fault *vmf) in spufs_mem_mmap_fault() argument
225 struct vm_area_struct *vma = vmf->vma; in spufs_mem_mmap_fault()
230 offset = vmf->pgoff << PAGE_SHIFT; in spufs_mem_mmap_fault()
235 vmf->address, offset); in spufs_mem_mmap_fault()
247 ret = vmf_insert_pfn(vma, vmf->address, pfn); in spufs_mem_mmap_fault()
303 static vm_fault_t spufs_ps_fault(struct vm_fault *vmf, in spufs_ps_fault() argument
307 struct spu_context *ctx = vmf->vma->vm_file->private_data; in spufs_ps_fault()
308 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; in spufs_ps_fault()
346 ret = vmf_insert_pfn(vmf->vma, vmf->address, in spufs_ps_fault()
360 static vm_fault_t spufs_cntl_mmap_fault(struct vm_fault *vmf) in spufs_cntl_mmap_fault() argument
[all …]
/arch/powerpc/kvm/
Dbook3s_xive_native.c227 static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf) in xive_native_esb_fault() argument
229 struct vm_area_struct *vma = vmf->vma; in xive_native_esb_fault()
245 page_offset = vmf->pgoff - vma->vm_pgoff; in xive_native_esb_fault()
279 vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT); in xive_native_esb_fault()
287 static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf) in xive_native_tima_fault() argument
289 struct vm_area_struct *vma = vmf->vma; in xive_native_tima_fault()
291 switch (vmf->pgoff - vma->vm_pgoff) { in xive_native_tima_fault()
296 vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT); in xive_native_tima_fault()
Dbook3s_64_vio.c220 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) in kvm_spapr_tce_fault() argument
222 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; in kvm_spapr_tce_fault()
225 if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) in kvm_spapr_tce_fault()
228 page = kvm_spapr_get_tce_page(stt, vmf->pgoff); in kvm_spapr_tce_fault()
233 vmf->page = page; in kvm_spapr_tce_fault()
Dpowerpc.c2080 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
/arch/s390/kvm/
Dkvm-s390.c4490 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
4493 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) in kvm_arch_vcpu_fault()
4495 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
4496 get_page(vmf->page); in kvm_arch_vcpu_fault()
/arch/mips/kvm/
Dmips.c1094 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
/arch/x86/kvm/
Dx86.c4605 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument