/mm/ |
D | maccess.c | 12 long ret; in probe_read_common() local 15 ret = __copy_from_user_inatomic(dst, src, size); in probe_read_common() 18 return ret ? -EFAULT : 0; in probe_read_common() 24 long ret; in probe_write_common() local 27 ret = __copy_to_user_inatomic(dst, src, size); in probe_write_common() 30 return ret ? -EFAULT : 0; in probe_write_common() 53 long ret; in __probe_kernel_read() local 57 ret = probe_read_common(dst, (__force const void __user *)src, size); in __probe_kernel_read() 60 return ret; in __probe_kernel_read() 79 long ret = -EFAULT; in __probe_user_read() local [all …]
|
D | backing-dev.c | 140 ssize_t ret; in read_ahead_kb_store() local 142 ret = kstrtoul(buf, 10, &read_ahead_kb); in read_ahead_kb_store() 143 if (ret < 0) in read_ahead_kb_store() 144 return ret; in read_ahead_kb_store() 170 ssize_t ret; in min_ratio_store() local 172 ret = kstrtouint(buf, 10, &ratio); in min_ratio_store() 173 if (ret < 0) in min_ratio_store() 174 return ret; in min_ratio_store() 176 ret = bdi_set_min_ratio(bdi, ratio); in min_ratio_store() 177 if (!ret) in min_ratio_store() [all …]
|
D | memory_hotplug.c | 791 int ret; in online_pages() local 813 ret = memory_notify(MEM_GOING_ONLINE, &arg); in online_pages() 814 ret = notifier_to_errno(ret); in online_pages() 815 if (ret) in online_pages() 828 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, in online_pages() 830 if (ret) { in online_pages() 871 return ret; in online_pages() 968 int ret = 1; in __try_online_node() local 976 ret = -ENOMEM; in __try_online_node() 982 ret = register_one_node(nid); in __try_online_node() [all …]
|
D | page_io.c | 161 int ret; in generic_swapfile_activate() local 217 ret = add_swap_extent(sis, page_no, 1, first_block); in generic_swapfile_activate() 218 if (ret < 0) in generic_swapfile_activate() 220 nr_extents += ret; in generic_swapfile_activate() 226 ret = nr_extents; in generic_swapfile_activate() 234 return ret; in generic_swapfile_activate() 237 ret = -EINVAL; in generic_swapfile_activate() 247 int ret = 0; in swap_writepage() local 259 ret = __swap_writepage(page, wbc, end_swap_bio_write); in swap_writepage() 261 return ret; in swap_writepage() [all …]
|
D | gup.c | 234 int ret; in follow_page_pte() local 236 ret = follow_pfn_pte(vma, address, ptep, flags); in follow_page_pte() 237 page = ERR_PTR(ret); in follow_page_pte() 243 int ret; in follow_page_pte() local 247 ret = split_huge_page(page); in follow_page_pte() 250 if (ret) in follow_page_pte() 251 return ERR_PTR(ret); in follow_page_pte() 388 int ret; in follow_pmd_mask() local 392 ret = 0; in follow_pmd_mask() 395 ret = -EBUSY; in follow_pmd_mask() [all …]
|
D | util.c | 428 int ret = 0; in __account_locked_vm() local 437 ret = -ENOMEM; in __account_locked_vm() 439 if (!ret) in __account_locked_vm() 449 ret ? " - exceeded" : ""); in __account_locked_vm() 451 return ret; in __account_locked_vm() 469 int ret; in account_locked_vm() local 475 ret = __account_locked_vm(mm, pages, inc, current, in account_locked_vm() 479 return ret; in account_locked_vm() 487 unsigned long ret; in vm_mmap_pgoff() local 492 ret = security_mmap_file(file, prot, flag); in vm_mmap_pgoff() [all …]
|
D | memory-failure.c | 213 int ret; in kill_proc() local 219 ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)tk->addr, in kill_proc() 228 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr, in kill_proc() 231 if (ret < 0) in kill_proc() 233 t->comm, t->pid, ret); in kill_proc() 234 return ret; in kill_proc() 596 int ret = MF_FAILED; in truncate_error_page() local 609 ret = MF_RECOVERED; in truncate_error_page() 617 ret = MF_RECOVERED; in truncate_error_page() 623 return ret; in truncate_error_page() [all …]
|
D | page-writeback.c | 518 int ret; in dirty_background_ratio_handler() local 520 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in dirty_background_ratio_handler() 521 if (ret == 0 && write) in dirty_background_ratio_handler() 523 return ret; in dirty_background_ratio_handler() 530 int ret; in dirty_background_bytes_handler() local 532 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); in dirty_background_bytes_handler() 533 if (ret == 0 && write) in dirty_background_bytes_handler() 535 return ret; in dirty_background_bytes_handler() 543 int ret; in dirty_ratio_handler() local 545 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in dirty_ratio_handler() [all …]
|
D | zswap.c | 509 int ret; in zswap_pool_create() local 543 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, in zswap_pool_create() 545 if (ret) in zswap_pool_create() 671 int ret; in __zswap_param_set() local 720 ret = param_set_charp(s, kp); in __zswap_param_set() 722 ret = -EINVAL; in __zswap_param_set() 726 if (!ret) { in __zswap_param_set() 749 ret = param_set_charp(s, kp); in __zswap_param_set() 758 return ret; in __zswap_param_set() 851 int ret; in zswap_writeback_entry() local [all …]
|
D | memory.c | 965 int ret; in copy_page_range() local 985 ret = track_pfn_copy(vma); in copy_page_range() 986 if (ret) in copy_page_range() 987 return ret; in copy_page_range() 1004 ret = 0; in copy_page_range() 1013 ret = -ENOMEM; in copy_page_range() 1020 return ret; in copy_page_range() 1535 int ret, i; in __vm_map_pages() local 1546 ret = vm_insert_page(vma, uaddr, pages[offset + i]); in __vm_map_pages() 1547 if (ret < 0) in __vm_map_pages() [all …]
|
D | frame_vector.c | 39 int ret = 0; in get_vaddr_frames() local 55 ret = -EFAULT; in get_vaddr_frames() 68 ret = -EOPNOTSUPP; in get_vaddr_frames() 75 ret = get_user_pages_locked(start, nr_frames, in get_vaddr_frames() 85 while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { in get_vaddr_frames() 86 err = follow_pfn(vma, start, &nums[ret]); in get_vaddr_frames() 88 if (ret == 0) in get_vaddr_frames() 89 ret = err; in get_vaddr_frames() 93 ret++; in get_vaddr_frames() 99 if (ret >= nr_frames || start < vma->vm_end) in get_vaddr_frames() [all …]
|
D | nommu.c | 160 void *ret; in vmalloc_user() local 162 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); in vmalloc_user() 163 if (ret) { in vmalloc_user() 167 vma = find_vma(current->mm, (unsigned long)ret); in vmalloc_user() 173 return ret; in vmalloc_user() 440 int ret; in mmap_init() local 442 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); in mmap_init() 443 VM_BUG_ON(ret); in mmap_init() 789 int ret; in validate_mmap_request() local 928 ret = security_mmap_addr(addr); in validate_mmap_request() [all …]
|
D | cma.c | 154 int ret = cma_activate_area(&cma_areas[i]); in cma_init_reserved_areas() local 156 if (ret) in cma_init_reserved_areas() 157 return ret; in cma_init_reserved_areas() 252 int ret = 0; in cma_declare_contiguous() local 284 ret = -EINVAL; in cma_declare_contiguous() 305 ret = -EINVAL; in cma_declare_contiguous() 320 ret = -EINVAL; in cma_declare_contiguous() 330 ret = -EBUSY; in cma_declare_contiguous() 352 ret = -ENOMEM; in cma_declare_contiguous() 365 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); in cma_declare_contiguous() [all …]
|
D | frontswap.c | 247 int ret = -1; in __frontswap_store() local 272 ret = ops->store(type, offset, page); in __frontswap_store() 273 if (!ret) /* successful store */ in __frontswap_store() 276 if (ret == 0) { in __frontswap_store() 284 ret = -1; in __frontswap_store() 285 return ret; in __frontswap_store() 296 int ret = -1; in __frontswap_load() local 312 ret = ops->load(type, offset, page); in __frontswap_load() 313 if (!ret) /* successful load */ in __frontswap_load() 316 if (ret == 0) { in __frontswap_load() [all …]
|
D | huge_memory.c | 178 ssize_t ret = count; in enabled_store() local 193 ret = -EINVAL; in enabled_store() 195 if (ret > 0) { in enabled_store() 198 ret = err; in enabled_store() 200 return ret; in enabled_store() 219 int ret; in single_hugepage_flag_store() local 221 ret = kstrtoul(buf, 10, &value); in single_hugepage_flag_store() 222 if (ret < 0) in single_hugepage_flag_store() 223 return ret; in single_hugepage_flag_store() 463 int ret = 0; in setup_transparent_hugepage() local [all …]
|
D | mremap.c | 506 unsigned long ret = -EINVAL; in mremap_to() local 537 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); in mremap_to() 538 if (ret) in mremap_to() 542 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); in mremap_to() 543 if (ret && old_len != new_len) in mremap_to() 550 ret = PTR_ERR(vma); in mremap_to() 558 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + in mremap_to() 561 if (offset_in_page(ret)) in mremap_to() 564 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf, in mremap_to() 566 if (!(offset_in_page(ret))) in mremap_to() [all …]
|
D | page_owner.c | 342 int ret, pageblock_mt, page_mt; in print_page_owner() local 352 ret = snprintf(kbuf, count, in print_page_owner() 357 if (ret >= count) in print_page_owner() 363 ret += snprintf(kbuf + ret, count - ret, in print_page_owner() 371 if (ret >= count) in print_page_owner() 375 ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0); in print_page_owner() 376 if (ret >= count) in print_page_owner() 380 ret += snprintf(kbuf + ret, count - ret, in print_page_owner() 383 if (ret >= count) in print_page_owner() 387 ret += snprintf(kbuf + ret, count - ret, "\n"); in print_page_owner() [all …]
|
D | filemap.c | 366 int ret = 0; in filemap_check_errors() local 370 ret = -ENOSPC; in filemap_check_errors() 373 ret = -EIO; in filemap_check_errors() 374 return ret; in filemap_check_errors() 408 int ret; in __filemap_fdatawrite_range() local 421 ret = do_writepages(mapping, &wbc); in __filemap_fdatawrite_range() 423 return ret; in __filemap_fdatawrite_range() 940 int ret; in add_to_page_cache_lru() local 943 ret = __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_lru() 945 if (unlikely(ret)) in add_to_page_cache_lru() [all …]
|
D | hmm.c | 104 int ret = 0; in hmm_invalidate_range_start() local 119 ret = -EAGAIN; in hmm_invalidate_range_start() 131 ret = -EAGAIN; in hmm_invalidate_range_start() 138 if (ret) in hmm_invalidate_range_start() 140 return ret; in hmm_invalidate_range_start() 230 vm_fault_t ret; in hmm_vma_do_fault() local 240 ret = handle_mm_fault(vma, addr, flags); in hmm_vma_do_fault() 241 if (ret & VM_FAULT_RETRY) { in hmm_vma_do_fault() 245 if (ret & VM_FAULT_ERROR) in hmm_vma_do_fault() 301 int ret; in hmm_vma_walk_hole_() local [all …]
|
D | vmpressure.c | 163 bool ret = false; in vmpressure_event() local 174 ret = true; in vmpressure_event() 178 return ret; in vmpressure_event() 371 int ret = 0; in vmpressure_register_event() local 375 ret = -ENOMEM; in vmpressure_register_event() 381 ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token); in vmpressure_register_event() 382 if (ret < 0) in vmpressure_register_event() 384 level = ret; in vmpressure_register_event() 389 ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token); in vmpressure_register_event() 390 if (ret < 0) in vmpressure_register_event() [all …]
|
D | hugetlb_cgroup.c | 79 int ret; in hugetlb_cgroup_init() local 87 ret = page_counter_set_max(counter, limit); in hugetlb_cgroup_init() 88 VM_BUG_ON(ret); in hugetlb_cgroup_init() 184 int ret = 0; in hugetlb_cgroup_charge_cgroup() local 206 ret = -ENOMEM; in hugetlb_cgroup_charge_cgroup() 210 return ret; in hugetlb_cgroup_charge_cgroup() 291 int ret, idx; in hugetlb_cgroup_write() local 299 ret = page_counter_memparse(buf, "-1", &nr_pages); in hugetlb_cgroup_write() 300 if (ret) in hugetlb_cgroup_write() 301 return ret; in hugetlb_cgroup_write() [all …]
|
D | memcontrol.c | 382 int nid, size, ret = 0; in memcg_alloc_shrinker_maps() local 393 ret = -ENOMEM; in memcg_alloc_shrinker_maps() 400 return ret; in memcg_alloc_shrinker_maps() 405 int size, old_size, ret = 0; in memcg_expand_shrinker_maps() local 420 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); in memcg_expand_shrinker_maps() 421 if (ret) in memcg_expand_shrinker_maps() 425 if (!ret) in memcg_expand_shrinker_maps() 428 return ret; in memcg_expand_shrinker_maps() 1205 int ret = 0; in mem_cgroup_scan_tasks() local 1214 while (!ret && (task = css_task_iter_next(&it))) in mem_cgroup_scan_tasks() [all …]
|
D | hugetlb.c | 137 long ret = delta; in hugepage_subpool_get_pages() local 140 return ret; in hugepage_subpool_get_pages() 148 ret = -ENOMEM; in hugepage_subpool_get_pages() 160 ret = delta - spool->rsv_hpages; in hugepage_subpool_get_pages() 163 ret = 0; /* reserves already accounted for */ in hugepage_subpool_get_pages() 170 return ret; in hugepage_subpool_get_pages() 182 long ret = delta; in hugepage_subpool_put_pages() local 195 ret = 0; in hugepage_subpool_put_pages() 197 ret = spool->rsv_hpages + delta - spool->min_hpages; in hugepage_subpool_put_pages() 210 return ret; in hugepage_subpool_put_pages() [all …]
|
D | mmu_notifier.c | 165 int ret = 0; in __mmu_notifier_invalidate_range_start() local 184 ret = _ret; in __mmu_notifier_invalidate_range_start() 190 return ret; in __mmu_notifier_invalidate_range_start() 252 int ret; in __mmu_notifier_register() local 282 ret = mm_take_all_locks(mm); in __mmu_notifier_register() 283 if (unlikely(ret)) in __mmu_notifier_register() 310 return ret; in __mmu_notifier_register() 335 int ret; in mmu_notifier_register() local 338 ret = __mmu_notifier_register(mn, mm); in mmu_notifier_register() 340 return ret; in mmu_notifier_register() [all …]
|
D | oom_kill.c | 89 bool ret = false; in oom_cpuset_eligible() local 104 ret = mempolicy_nodemask_intersects(tsk, mask); in oom_cpuset_eligible() 110 ret = cpuset_mems_allowed_intersects(current, tsk); in oom_cpuset_eligible() 112 if (ret) in oom_cpuset_eligible() 117 return ret; in oom_cpuset_eligible() 515 bool ret = true; in __oom_reap_task_mm() local 549 ret = false; in __oom_reap_task_mm() 558 return ret; in __oom_reap_task_mm() 569 bool ret = true; in oom_reap_task_mm() local 590 ret = __oom_reap_task_mm(mm); in oom_reap_task_mm() [all …]
|