Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 21 of 21) sorted by relevance

/kernel/events/
Duprobes.c121 static bool valid_vma(struct vm_area_struct *vma, bool is_register) in valid_vma() argument
128 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; in valid_vma()
131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) in offset_to_vaddr() argument
133 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in offset_to_vaddr()
136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) in vaddr_to_offset() argument
138 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); in vaddr_to_offset()
154 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, in __replace_page() argument
157 struct mm_struct *mm = vma->vm_mm; in __replace_page()
160 .vma = vma, in __replace_page()
166 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr, in __replace_page()
[all …]
Dcore.c6050 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault()
6073 vmf->page->mapping = vmf->vma->vm_file->f_mapping; in perf_mmap_fault()
6188 static void perf_mmap_open(struct vm_area_struct *vma) in perf_mmap_open() argument
6190 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open()
6195 if (vma->vm_pgoff) in perf_mmap_open()
6199 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
6212 static void perf_mmap_close(struct vm_area_struct *vma) in perf_mmap_close() argument
6214 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close()
6222 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6229 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && in perf_mmap_close()
[all …]
/kernel/dma/
Dops_helpers.c34 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, in dma_common_mmap() argument
39 unsigned long user_count = vma_pages(vma); in dma_common_mmap()
41 unsigned long off = vma->vm_pgoff; in dma_common_mmap()
45 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_common_mmap()
47 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_common_mmap()
53 return remap_pfn_range(vma, vma->vm_start, in dma_common_mmap()
54 page_to_pfn(page) + vma->vm_pgoff, in dma_common_mmap()
55 user_count << PAGE_SHIFT, vma->vm_page_prot); in dma_common_mmap()
Dcoherent.c236 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) in __dma_mmap_from_coherent() argument
240 unsigned long off = vma->vm_pgoff; in __dma_mmap_from_coherent()
242 unsigned long user_count = vma_pages(vma); in __dma_mmap_from_coherent()
248 *ret = remap_pfn_range(vma, vma->vm_start, pfn, in __dma_mmap_from_coherent()
250 vma->vm_page_prot); in __dma_mmap_from_coherent()
272 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, in dma_mmap_from_dev_coherent() argument
277 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); in dma_mmap_from_dev_coherent()
302 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, in dma_mmap_from_global_coherent() argument
308 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, in dma_mmap_from_global_coherent()
Dmapping.c455 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, in dma_mmap_attrs() argument
462 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, in dma_mmap_attrs()
466 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs()
593 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, in dma_mmap_pages() argument
598 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) in dma_mmap_pages()
600 return remap_pfn_range(vma, vma->vm_start, in dma_mmap_pages()
601 page_to_pfn(page) + vma->vm_pgoff, in dma_mmap_pages()
602 vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); in dma_mmap_pages()
695 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, in dma_mmap_noncontiguous() argument
703 if (vma->vm_pgoff >= count || in dma_mmap_noncontiguous()
[all …]
Ddirect.c494 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, in dma_direct_mmap() argument
498 unsigned long user_count = vma_pages(vma); in dma_direct_mmap()
503 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_direct_mmap()
505 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_direct_mmap()
507 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) in dma_direct_mmap()
510 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) in dma_direct_mmap()
512 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, in dma_direct_mmap()
513 user_count << PAGE_SHIFT, vma->vm_page_prot); in dma_direct_mmap()
Ddummy.c7 static int dma_dummy_mmap(struct device *dev, struct vm_area_struct *vma, in dma_dummy_mmap() argument
Ddirect.h16 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
/kernel/bpf/
Dtask_iter.c295 struct vm_area_struct *vma; member
323 curr_vma = info->vma; in task_vma_seq_get_next()
429 info->vma = curr_vma; in task_vma_seq_get_next()
442 info->vma = NULL; in task_vma_seq_get_next()
449 struct vm_area_struct *vma; in task_vma_seq_start() local
451 vma = task_vma_seq_get_next(info); in task_vma_seq_start()
452 if (vma && *pos == 0) in task_vma_seq_start()
455 return vma; in task_vma_seq_start()
469 __bpf_md_ptr(struct vm_area_struct *, vma);
473 struct task_struct *task, struct vm_area_struct *vma) in DEFINE_BPF_ITER_FUNC() argument
[all …]
Dringbuf.c226 static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) in ringbuf_map_mmap() argument
232 if (vma->vm_flags & VM_WRITE) { in ringbuf_map_mmap()
234 if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE) in ringbuf_map_mmap()
237 vma->vm_flags &= ~VM_MAYWRITE; in ringbuf_map_mmap()
240 return remap_vmalloc_range(vma, rb_map->rb, in ringbuf_map_mmap()
241 vma->vm_pgoff + RINGBUF_PGOFF); in ringbuf_map_mmap()
Dstackmap.c151 struct vm_area_struct *vma; in stack_map_get_build_id_offset() local
193 vma = find_vma(current->mm, ips[i]); in stack_map_get_build_id_offset()
194 if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) { in stack_map_get_build_id_offset()
201 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] in stack_map_get_build_id_offset()
202 - vma->vm_start; in stack_map_get_build_id_offset()
Darraymap.c487 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) in array_map_mmap() argument
495 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > in array_map_mmap()
499 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), in array_map_mmap()
500 vma->vm_pgoff + pgoff); in array_map_mmap()
Dsyscall.c612 static void bpf_map_mmap_open(struct vm_area_struct *vma) in bpf_map_mmap_open() argument
614 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_open()
616 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_open()
621 static void bpf_map_mmap_close(struct vm_area_struct *vma) in bpf_map_mmap_close() argument
623 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_close()
625 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_close()
634 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) in bpf_map_mmap() argument
643 if (!(vma->vm_flags & VM_SHARED)) in bpf_map_mmap()
648 if (vma->vm_flags & VM_WRITE) { in bpf_map_mmap()
665 vma->vm_ops = &bpf_map_default_vmops; in bpf_map_mmap()
[all …]
/kernel/
Dacct.c541 struct vm_area_struct *vma; in acct_collect() local
544 vma = current->mm->mmap; in acct_collect()
545 while (vma) { in acct_collect()
546 vsize += vma->vm_end - vma->vm_start; in acct_collect()
547 vma = vma->vm_next; in acct_collect()
Dfork.c357 struct vm_area_struct *vma; in vm_area_alloc() local
359 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); in vm_area_alloc()
360 if (vma) in vm_area_alloc()
361 vma_init(vma, mm); in vm_area_alloc()
362 return vma; in vm_area_alloc()
387 struct vm_area_struct *vma = container_of(head, struct vm_area_struct, in __free_vm_area_struct() local
389 kmem_cache_free(vm_area_cachep, vma); in __free_vm_area_struct()
392 static inline void free_vm_area_struct(struct vm_area_struct *vma) in free_vm_area_struct() argument
394 call_rcu(&vma->vm_rcu, __free_vm_area_struct); in free_vm_area_struct()
397 static inline void free_vm_area_struct(struct vm_area_struct *vma) in free_vm_area_struct() argument
[all …]
Dkcov.c456 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) in kcov_mmap() argument
460 struct kcov *kcov = vma->vm_file->private_data; in kcov_mmap()
465 area = vmalloc_user(vma->vm_end - vma->vm_start); in kcov_mmap()
471 if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 || in kcov_mmap()
472 vma->vm_end - vma->vm_start != size) { in kcov_mmap()
478 vma->vm_flags |= VM_DONTEXPAND; in kcov_mmap()
482 if (vm_insert_page(vma, vma->vm_start + off, page)) in kcov_mmap()
Drelay.c36 struct rchan_buf *buf = vmf->vma->vm_private_data; in relay_buf_fault()
86 static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma) in relay_mmap_buf() argument
88 unsigned long length = vma->vm_end - vma->vm_start; in relay_mmap_buf()
96 vma->vm_ops = &relay_file_mmap_ops; in relay_mmap_buf()
97 vma->vm_flags |= VM_DONTEXPAND; in relay_mmap_buf()
98 vma->vm_private_data = buf; in relay_mmap_buf()
832 static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma) in relay_file_mmap() argument
835 return relay_mmap_buf(buf, vma); in relay_file_mmap()
Dsys.c2123 struct vm_area_struct *vma; in prctl_set_mm() local
2156 vma = find_vma(mm, addr); in prctl_set_mm()
2226 if (!vma) { in prctl_set_mm()
Dsignal.c4678 __weak const char *arch_vma_name(struct vm_area_struct *vma) in arch_vma_name() argument
/kernel/trace/
Dtrace_output.c404 const struct vm_area_struct *vma; in seq_print_user_ip() local
407 vma = find_vma(mm, ip); in seq_print_user_ip()
408 if (vma) { in seq_print_user_ip()
409 file = vma->vm_file; in seq_print_user_ip()
410 vmstart = vma->vm_start; in seq_print_user_ip()
/kernel/sched/
Dfair.c2743 struct vm_area_struct *vma; in task_numa_work() local
2799 vma = find_vma(mm, start); in task_numa_work()
2800 if (!vma) { in task_numa_work()
2803 vma = mm->mmap; in task_numa_work()
2805 for (; vma; vma = vma->vm_next) { in task_numa_work()
2806 if (!vma_migratable(vma) || !vma_policy_mof(vma) || in task_numa_work()
2807 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { in task_numa_work()
2817 if (!vma->vm_mm || in task_numa_work()
2818 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) in task_numa_work()
2825 if (!vma_is_accessible(vma)) in task_numa_work()
[all …]