Lines Matching refs:vma
5975 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault()
5998 vmf->page->mapping = vmf->vma->vm_file->f_mapping; in perf_mmap_fault()
6113 static void perf_mmap_open(struct vm_area_struct *vma) in perf_mmap_open() argument
6115 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open()
6120 if (vma->vm_pgoff) in perf_mmap_open()
6124 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
6137 static void perf_mmap_close(struct vm_area_struct *vma) in perf_mmap_close() argument
6139 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close()
6147 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6154 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && in perf_mmap_close()
6166 atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm); in perf_mmap_close()
6241 atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm); in perf_mmap_close()
6255 static int perf_mmap(struct file *file, struct vm_area_struct *vma) in perf_mmap() argument
6275 if (!(vma->vm_flags & VM_SHARED)) in perf_mmap()
6282 vma_size = vma->vm_end - vma->vm_start; in perf_mmap()
6284 if (vma->vm_pgoff == 0) { in perf_mmap()
6312 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) in perf_mmap()
6316 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) in perf_mmap()
6407 locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra; in perf_mmap()
6417 if (vma->vm_flags & VM_WRITE) in perf_mmap()
6440 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
6449 atomic64_add(extra, &vma->vm_mm->pinned_vm); in perf_mmap()
6462 vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP); in perf_mmap()
6463 vma->vm_ops = &perf_mmap_vmops; in perf_mmap()
6466 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
8379 struct vm_area_struct *vma; member
8405 struct vm_area_struct *vma = mmap_event->vma; in perf_event_mmap_match() local
8406 int executable = vma->vm_flags & VM_EXEC; in perf_event_mmap_match()
8481 struct vm_area_struct *vma = mmap_event->vma; in perf_event_mmap_event() local
8482 struct file *file = vma->vm_file; in perf_event_mmap_event()
8491 if (vma->vm_flags & VM_READ) in perf_event_mmap_event()
8493 if (vma->vm_flags & VM_WRITE) in perf_event_mmap_event()
8495 if (vma->vm_flags & VM_EXEC) in perf_event_mmap_event()
8498 if (vma->vm_flags & VM_MAYSHARE) in perf_event_mmap_event()
8503 if (vma->vm_flags & VM_LOCKED) in perf_event_mmap_event()
8505 if (is_vm_hugetlb_page(vma)) in perf_event_mmap_event()
8527 inode = file_inode(vma->vm_file); in perf_event_mmap_event()
8536 if (vma->vm_ops && vma->vm_ops->name) { in perf_event_mmap_event()
8537 name = (char *) vma->vm_ops->name(vma); in perf_event_mmap_event()
8542 name = (char *)arch_vma_name(vma); in perf_event_mmap_event()
8546 if (vma->vm_start <= vma->vm_mm->start_brk && in perf_event_mmap_event()
8547 vma->vm_end >= vma->vm_mm->brk) { in perf_event_mmap_event()
8551 if (vma->vm_start <= vma->vm_mm->start_stack && in perf_event_mmap_event()
8552 vma->vm_end >= vma->vm_mm->start_stack) { in perf_event_mmap_event()
8583 if (!(vma->vm_flags & VM_EXEC)) in perf_event_mmap_event()
8589 build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size); in perf_event_mmap_event()
8622 struct vm_area_struct *vma, in perf_addr_filter_vma_adjust() argument
8625 unsigned long vma_size = vma->vm_end - vma->vm_start; in perf_addr_filter_vma_adjust()
8626 unsigned long off = vma->vm_pgoff << PAGE_SHIFT; in perf_addr_filter_vma_adjust()
8627 struct file *file = vma->vm_file; in perf_addr_filter_vma_adjust()
8633 fr->start = vma->vm_start; in perf_addr_filter_vma_adjust()
8636 fr->start = vma->vm_start + filter->offset - off; in perf_addr_filter_vma_adjust()
8637 fr->size = min(vma->vm_end - fr->start, filter->size); in perf_addr_filter_vma_adjust()
8646 struct vm_area_struct *vma = data; in __perf_addr_filters_adjust() local
8654 if (!vma->vm_file) in __perf_addr_filters_adjust()
8659 if (perf_addr_filter_vma_adjust(filter, vma, in __perf_addr_filters_adjust()
8677 static void perf_addr_filters_adjust(struct vm_area_struct *vma) in perf_addr_filters_adjust() argument
8686 if (!(vma->vm_flags & VM_EXEC)) in perf_addr_filters_adjust()
8695 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true); in perf_addr_filters_adjust()
8700 void perf_event_mmap(struct vm_area_struct *vma) in perf_event_mmap() argument
8708 .vma = vma, in perf_event_mmap()
8719 .start = vma->vm_start, in perf_event_mmap()
8720 .len = vma->vm_end - vma->vm_start, in perf_event_mmap()
8721 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, in perf_event_mmap()
8731 perf_addr_filters_adjust(vma); in perf_event_mmap()
10438 struct vm_area_struct *vma; in perf_addr_filter_apply() local
10441 for_each_vma(vmi, vma) { in perf_addr_filter_apply()
10442 if (!vma->vm_file) in perf_addr_filter_apply()
10445 if (perf_addr_filter_vma_adjust(filter, vma, fr)) in perf_addr_filter_apply()