/kernel/events/ |
D | uprobes.c | 121 static bool valid_vma(struct vm_area_struct *vma, bool is_register) in valid_vma() argument 128 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; in valid_vma() 131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) in offset_to_vaddr() argument 133 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in offset_to_vaddr() 136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) in vaddr_to_offset() argument 138 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); in vaddr_to_offset() 154 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, in __replace_page() argument 159 struct mm_struct *mm = vma->vm_mm; in __replace_page() 160 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); in __replace_page() 164 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr, in __replace_page() [all …]
|
D | core.c | 5975 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault() 5998 vmf->page->mapping = vmf->vma->vm_file->f_mapping; in perf_mmap_fault() 6113 static void perf_mmap_open(struct vm_area_struct *vma) in perf_mmap_open() argument 6115 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() 6120 if (vma->vm_pgoff) in perf_mmap_open() 6124 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open() 6137 static void perf_mmap_close(struct vm_area_struct *vma) in perf_mmap_close() argument 6139 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() 6147 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close() 6154 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && in perf_mmap_close() [all …]
|
/kernel/dma/ |
D | ops_helpers.c | 35 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, in dma_common_mmap() argument 40 unsigned long user_count = vma_pages(vma); in dma_common_mmap() 42 unsigned long off = vma->vm_pgoff; in dma_common_mmap() 46 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_common_mmap() 48 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_common_mmap() 54 return remap_pfn_range(vma, vma->vm_start, in dma_common_mmap() 55 page_to_pfn(page) + vma->vm_pgoff, in dma_common_mmap() 56 user_count << PAGE_SHIFT, vma->vm_page_prot); in dma_common_mmap()
|
D | coherent.c | 235 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) in __dma_mmap_from_coherent() argument 239 unsigned long off = vma->vm_pgoff; in __dma_mmap_from_coherent() 241 unsigned long user_count = vma_pages(vma); in __dma_mmap_from_coherent() 247 *ret = remap_pfn_range(vma, vma->vm_start, pfn, in __dma_mmap_from_coherent() 249 vma->vm_page_prot); in __dma_mmap_from_coherent() 271 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, in dma_mmap_from_dev_coherent() argument 276 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); in dma_mmap_from_dev_coherent() 301 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, in dma_mmap_from_global_coherent() argument 307 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, in dma_mmap_from_global_coherent()
|
D | mapping.c | 460 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, in dma_mmap_attrs() argument 467 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, in dma_mmap_attrs() 471 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs() 598 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, in dma_mmap_pages() argument 603 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) in dma_mmap_pages() 605 return remap_pfn_range(vma, vma->vm_start, in dma_mmap_pages() 606 page_to_pfn(page) + vma->vm_pgoff, in dma_mmap_pages() 607 vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); in dma_mmap_pages() 700 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, in dma_mmap_noncontiguous() argument 708 if (vma->vm_pgoff >= count || in dma_mmap_noncontiguous() [all …]
|
D | direct.c | 560 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, in dma_direct_mmap() argument 564 unsigned long user_count = vma_pages(vma); in dma_direct_mmap() 569 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_direct_mmap() 571 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); in dma_direct_mmap() 573 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_direct_mmap() 575 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) in dma_direct_mmap() 578 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) in dma_direct_mmap() 580 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, in dma_direct_mmap() 581 user_count << PAGE_SHIFT, vma->vm_page_prot); in dma_direct_mmap()
|
D | dummy.c | 7 static int dma_dummy_mmap(struct device *dev, struct vm_area_struct *vma, in dma_dummy_mmap() argument
|
D | direct.h | 17 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
|
/kernel/bpf/ |
D | task_iter.c | 442 struct vm_area_struct *vma; member 471 curr_vma = info->vma; in task_vma_seq_get_next() 583 info->vma = curr_vma; in task_vma_seq_get_next() 601 info->vma = NULL; in task_vma_seq_get_next() 609 struct vm_area_struct *vma; in task_vma_seq_start() local 611 vma = task_vma_seq_get_next(info); in task_vma_seq_start() 612 if (vma && *pos == 0) in task_vma_seq_start() 615 return vma; in task_vma_seq_start() 629 __bpf_md_ptr(struct vm_area_struct *, vma); 633 struct task_struct *task, struct vm_area_struct *vma) in DEFINE_BPF_ITER_FUNC() argument [all …]
|
D | ringbuf.c | 261 static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma) in ringbuf_map_mmap_kern() argument 267 if (vma->vm_flags & VM_WRITE) { in ringbuf_map_mmap_kern() 269 if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE) in ringbuf_map_mmap_kern() 272 vm_flags_clear(vma, VM_MAYWRITE); in ringbuf_map_mmap_kern() 275 return remap_vmalloc_range(vma, rb_map->rb, in ringbuf_map_mmap_kern() 276 vma->vm_pgoff + RINGBUF_PGOFF); in ringbuf_map_mmap_kern() 279 static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma) in ringbuf_map_mmap_user() argument 285 if (vma->vm_flags & VM_WRITE) { in ringbuf_map_mmap_user() 286 if (vma->vm_pgoff == 0) in ringbuf_map_mmap_user() 293 vm_flags_clear(vma, VM_MAYWRITE); in ringbuf_map_mmap_user() [all …]
|
D | stackmap.c | 133 struct vm_area_struct *vma, *prev_vma = NULL; in stack_map_get_build_id_offset() local 153 vma = prev_vma; in stack_map_get_build_id_offset() 158 vma = find_vma(current->mm, ips[i]); in stack_map_get_build_id_offset() 159 if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) { in stack_map_get_build_id_offset() 167 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] in stack_map_get_build_id_offset() 168 - vma->vm_start; in stack_map_get_build_id_offset() 170 prev_vma = vma; in stack_map_get_build_id_offset()
|
D | arraymap.c | 529 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) in array_map_mmap() argument 537 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > in array_map_mmap() 541 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), in array_map_mmap() 542 vma->vm_pgoff + pgoff); in array_map_mmap()
|
D | syscall.c | 756 static void bpf_map_mmap_open(struct vm_area_struct *vma) in bpf_map_mmap_open() argument 758 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_open() 760 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_open() 765 static void bpf_map_mmap_close(struct vm_area_struct *vma) in bpf_map_mmap_close() argument 767 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_close() 769 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_close() 778 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) in bpf_map_mmap() argument 787 if (!(vma->vm_flags & VM_SHARED)) in bpf_map_mmap() 792 if (vma->vm_flags & VM_WRITE) { in bpf_map_mmap() 809 vma->vm_ops = &bpf_map_default_vmops; in bpf_map_mmap() [all …]
|
/kernel/ |
D | fork.c | 466 static bool vma_lock_alloc(struct vm_area_struct *vma) in vma_lock_alloc() argument 468 vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL); in vma_lock_alloc() 469 if (!vma->vm_lock) in vma_lock_alloc() 472 init_rwsem(&vma->vm_lock->lock); in vma_lock_alloc() 473 vma->vm_lock_seq = -1; in vma_lock_alloc() 478 static inline void vma_lock_free(struct vm_area_struct *vma) in vma_lock_free() argument 480 kmem_cache_free(vma_lock_cachep, vma->vm_lock); in vma_lock_free() 485 static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; } in vma_lock_alloc() argument 486 static inline void vma_lock_free(struct vm_area_struct *vma) {} in vma_lock_free() argument 492 struct vm_area_struct *vma; in vm_area_alloc() local [all …]
|
D | kcov.c | 476 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) in kcov_mmap() argument 479 struct kcov *kcov = vma->vm_file->private_data; in kcov_mmap() 486 if (kcov->area == NULL || vma->vm_pgoff != 0 || in kcov_mmap() 487 vma->vm_end - vma->vm_start != size) { in kcov_mmap() 492 vm_flags_set(vma, VM_DONTEXPAND); in kcov_mmap() 495 res = vm_insert_page(vma, vma->vm_start + off, page); in kcov_mmap()
|
D | relay.c | 36 struct rchan_buf *buf = vmf->vma->vm_private_data; in relay_buf_fault() 83 static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma) in relay_mmap_buf() argument 85 unsigned long length = vma->vm_end - vma->vm_start; in relay_mmap_buf() 93 vma->vm_ops = &relay_file_mmap_ops; in relay_mmap_buf() 94 vm_flags_set(vma, VM_DONTEXPAND); in relay_mmap_buf() 95 vma->vm_private_data = buf; in relay_mmap_buf() 829 static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma) in relay_file_mmap() argument 832 return relay_mmap_buf(buf, vma); in relay_file_mmap()
|
D | acct.c | 562 struct vm_area_struct *vma; in acct_collect() local 565 for_each_vma(vmi, vma) in acct_collect() 566 vsize += vma->vm_end - vma->vm_start; in acct_collect()
|
D | sys.c | 2131 struct vm_area_struct *vma; in prctl_set_mm() local 2164 vma = find_vma(mm, addr); in prctl_set_mm() 2234 if (!vma) { in prctl_set_mm()
|
D | signal.c | 4700 __weak const char *arch_vma_name(struct vm_area_struct *vma) in arch_vma_name() argument
|
/kernel/trace/ |
D | trace_events_user.c | 1748 static int user_status_mmap(struct file *file, struct vm_area_struct *vma) in user_status_mmap() argument 1752 unsigned long size = vma->vm_end - vma->vm_start; in user_status_mmap() 1762 return remap_pfn_range(vma, vma->vm_start, in user_status_mmap()
|
D | trace_output.c | 395 const struct vm_area_struct *vma; in seq_print_user_ip() local 398 vma = find_vma(mm, ip); in seq_print_user_ip() 399 if (vma) { in seq_print_user_ip() 400 file = vma->vm_file; in seq_print_user_ip() 401 vmstart = vma->vm_start; in seq_print_user_ip()
|
/kernel/sched/ |
D | fair.c | 2942 struct vm_area_struct *vma; in task_numa_work() local 2999 vma = mas_find(&mas, ULONG_MAX); in task_numa_work() 3000 if (!vma) { in task_numa_work() 3004 vma = mas_find(&mas, ULONG_MAX); in task_numa_work() 3007 for (; vma; vma = mas_find(&mas, ULONG_MAX)) { in task_numa_work() 3008 if (!vma_migratable(vma) || !vma_policy_mof(vma) || in task_numa_work() 3009 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { in task_numa_work() 3019 if (!vma->vm_mm || in task_numa_work() 3020 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) in task_numa_work() 3027 if (!vma_is_accessible(vma)) in task_numa_work() [all …]
|