Lines Matching refs:vma
47 struct vm_area_struct *vma,
198 struct vm_area_struct *vma; member
206 struct vm_area_struct *vma = st->vma; in mmap_gfn_range() local
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) in mmap_gfn_range()
219 rc = xen_remap_domain_gfn_range(vma, in mmap_gfn_range()
222 vma->vm_page_prot, in mmap_gfn_range()
236 struct vm_area_struct *vma; in privcmd_ioctl_mmap() local
262 vma = find_vma(mm, msg->va); in privcmd_ioctl_mmap()
265 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data) in privcmd_ioctl_mmap()
267 vma->vm_private_data = PRIV_VMA_LOCKED; in privcmd_ioctl_mmap()
270 state.va = vma->vm_start; in privcmd_ioctl_mmap()
271 state.vma = vma; in privcmd_ioctl_mmap()
291 struct vm_area_struct *vma; member
315 struct vm_area_struct *vma = st->vma; in mmap_batch_fn() local
316 struct page **pages = vma->vm_private_data; in mmap_batch_fn()
324 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr, in mmap_batch_fn()
325 (int *)gfnp, st->vma->vm_page_prot, in mmap_batch_fn()
395 static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) in alloc_empty_pages() argument
411 BUG_ON(vma->vm_private_data != NULL); in alloc_empty_pages()
412 vma->vm_private_data = pages; in alloc_empty_pages()
424 struct vm_area_struct *vma; in privcmd_ioctl_mmap_batch() local
472 vma = find_vma(mm, m.addr); in privcmd_ioctl_mmap_batch()
473 if (!vma || in privcmd_ioctl_mmap_batch()
474 vma->vm_ops != &privcmd_vm_ops) { in privcmd_ioctl_mmap_batch()
490 if (vma->vm_private_data == NULL) { in privcmd_ioctl_mmap_batch()
491 if (m.addr != vma->vm_start || in privcmd_ioctl_mmap_batch()
492 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) { in privcmd_ioctl_mmap_batch()
497 ret = alloc_empty_pages(vma, nr_pages); in privcmd_ioctl_mmap_batch()
501 vma->vm_private_data = PRIV_VMA_LOCKED; in privcmd_ioctl_mmap_batch()
503 if (m.addr < vma->vm_start || in privcmd_ioctl_mmap_batch()
504 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) { in privcmd_ioctl_mmap_batch()
508 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) { in privcmd_ioctl_mmap_batch()
515 state.vma = vma; in privcmd_ioctl_mmap_batch()
582 static void privcmd_close(struct vm_area_struct *vma) in privcmd_close() argument
584 struct page **pages = vma->vm_private_data; in privcmd_close()
585 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in privcmd_close()
586 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT; in privcmd_close()
592 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages); in privcmd_close()
601 static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in privcmd_fault() argument
604 vma, vma->vm_start, vma->vm_end, in privcmd_fault()
615 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) in privcmd_mmap() argument
619 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY | in privcmd_mmap()
621 vma->vm_ops = &privcmd_vm_ops; in privcmd_mmap()
622 vma->vm_private_data = NULL; in privcmd_mmap()
639 struct vm_area_struct *vma, in privcmd_vma_range_is_mapped() argument
643 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, in privcmd_vma_range_is_mapped()