/mm/ |
D | memory_hotplug.c | 350 int ret; in move_pfn_range_left() local 354 ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn); in move_pfn_range_left() 355 if (ret) in move_pfn_range_left() 356 return ret; in move_pfn_range_left() 392 int ret; in move_pfn_range_right() local 396 ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn); in move_pfn_range_right() 397 if (ret) in move_pfn_range_right() 398 return ret; in move_pfn_range_right() 450 int ret; in __add_zone() local 453 ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages); in __add_zone() [all …]
|
D | page_io.c | 150 int ret; in generic_swapfile_activate() local 204 ret = add_swap_extent(sis, page_no, 1, first_block); in generic_swapfile_activate() 205 if (ret < 0) in generic_swapfile_activate() 207 nr_extents += ret; in generic_swapfile_activate() 213 ret = nr_extents; in generic_swapfile_activate() 221 return ret; in generic_swapfile_activate() 224 ret = -EINVAL; in generic_swapfile_activate() 234 int ret = 0; in swap_writepage() local 246 ret = __swap_writepage(page, wbc, end_swap_bio_write); in swap_writepage() 248 return ret; in swap_writepage() [all …]
|
D | memory-failure.c | 194 int ret; in kill_proc() local 209 ret = force_sig_info(SIGBUS, &si, current); in kill_proc() 218 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ in kill_proc() 220 if (ret < 0) in kill_proc() 222 t->comm, t->pid, ret); in kill_proc() 223 return ret; in kill_proc() 592 int ret = FAILED; in me_pagecache_clean() local 633 ret = RECOVERED; in me_pagecache_clean() 641 ret = RECOVERED; in me_pagecache_clean() 646 return ret; in me_pagecache_clean() [all …]
|
D | page-writeback.c | 336 int ret; in dirty_background_ratio_handler() local 338 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in dirty_background_ratio_handler() 339 if (ret == 0 && write) in dirty_background_ratio_handler() 341 return ret; in dirty_background_ratio_handler() 348 int ret; in dirty_background_bytes_handler() local 350 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); in dirty_background_bytes_handler() 351 if (ret == 0 && write) in dirty_background_bytes_handler() 353 return ret; in dirty_background_bytes_handler() 361 int ret; in dirty_ratio_handler() local 363 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in dirty_ratio_handler() [all …]
|
D | frontswap.c | 194 bool ret = false; in __frontswap_test() local 197 ret = test_bit(offset, sis->frontswap_map); in __frontswap_test() 198 return ret; in __frontswap_test() 218 int ret = -1, dup = 0; in __frontswap_store() local 229 return ret; in __frontswap_store() 235 ret = frontswap_ops->store(type, offset, page); in __frontswap_store() 236 if (ret == 0) { in __frontswap_store() 254 ret = -1; in __frontswap_store() 255 return ret; in __frontswap_store() 266 int ret = -1; in __frontswap_load() local [all …]
|
D | gup.c | 230 int ret = -EFAULT; in get_gate_page() local 260 ret = 0; in get_gate_page() 263 return ret; in get_gate_page() 276 int ret; in faultin_page() local 289 ret = handle_mm_fault(mm, vma, address, fault_flags); in faultin_page() 290 if (ret & VM_FAULT_ERROR) { in faultin_page() 291 if (ret & VM_FAULT_OOM) in faultin_page() 293 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) in faultin_page() 295 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) in faultin_page() 301 if (ret & VM_FAULT_MAJOR) in faultin_page() [all …]
|
D | mremap.c | 400 unsigned long ret = -EINVAL; in mremap_to() local 419 ret = do_munmap(mm, new_addr, new_len); in mremap_to() 420 if (ret) in mremap_to() 424 ret = do_munmap(mm, addr+new_len, old_len - new_len); in mremap_to() 425 if (ret && old_len != new_len) in mremap_to() 432 ret = PTR_ERR(vma); in mremap_to() 440 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + in mremap_to() 443 if (ret & ~PAGE_MASK) in mremap_to() 446 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked); in mremap_to() 447 if (!(ret & ~PAGE_MASK)) in mremap_to() [all …]
|
D | backing-dev.c | 168 ssize_t ret; in read_ahead_kb_store() local 170 ret = kstrtoul(buf, 10, &read_ahead_kb); in read_ahead_kb_store() 171 if (ret < 0) in read_ahead_kb_store() 172 return ret; in read_ahead_kb_store() 198 ssize_t ret; in min_ratio_store() local 200 ret = kstrtouint(buf, 10, &ratio); in min_ratio_store() 201 if (ret < 0) in min_ratio_store() 202 return ret; in min_ratio_store() 204 ret = bdi_set_min_ratio(bdi, ratio); in min_ratio_store() 205 if (!ret) in min_ratio_store() [all …]
|
D | mlock.c | 122 int ret = SWAP_AGAIN; in __munlock_isolated_page() local 129 ret = try_to_munlock(page); in __munlock_isolated_page() 132 if (ret != SWAP_MLOCK) in __munlock_isolated_page() 561 int ret = 0; in mlock_fixup() local 578 ret = split_vma(mm, vma, start, 1); in mlock_fixup() 579 if (ret) in mlock_fixup() 584 ret = split_vma(mm, vma, end, 0); in mlock_fixup() 585 if (ret) in mlock_fixup() 611 return ret; in mlock_fixup() 678 long ret = 0; in __mm_populate() local [all …]
|
D | filemap.c | 246 int ret = 0; in filemap_check_errors() local 250 ret = -ENOSPC; in filemap_check_errors() 253 ret = -EIO; in filemap_check_errors() 254 return ret; in filemap_check_errors() 275 int ret; in __filemap_fdatawrite_range() local 286 ret = do_writepages(mapping, &wbc); in __filemap_fdatawrite_range() 287 return ret; in __filemap_fdatawrite_range() 338 int ret2, ret = 0; in filemap_fdatawait_range() local 359 ret = -EIO; in filemap_fdatawait_range() 366 if (!ret) in filemap_fdatawait_range() [all …]
|
D | zswap.c | 115 int ret; in zswap_comp_op() local 120 ret = crypto_comp_compress(tfm, src, slen, dst, dlen); in zswap_comp_op() 123 ret = crypto_comp_decompress(tfm, src, slen, dst, dlen); in zswap_comp_op() 126 ret = -EINVAL; in zswap_comp_op() 130 return ret; in zswap_comp_op() 543 int ret; in zswap_writeback_entry() local 569 ret = -ENOMEM; in zswap_writeback_entry() 575 ret = -EEXIST; in zswap_writeback_entry() 584 ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, in zswap_writeback_entry() 588 BUG_ON(ret); in zswap_writeback_entry() [all …]
|
D | nommu.c | 257 void *ret; in vmalloc_user() local 259 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, in vmalloc_user() 261 if (ret) { in vmalloc_user() 265 vma = find_vma(current->mm, (unsigned long)ret); in vmalloc_user() 271 return ret; in vmalloc_user() 540 int ret; in mmap_init() local 542 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); in mmap_init() 543 VM_BUG_ON(ret); in mmap_init() 922 int ret; in validate_mmap_request() local 1075 ret = security_mmap_addr(addr); in validate_mmap_request() [all …]
|
D | util.c | 243 int ret; in get_user_pages_fast() local 246 ret = get_user_pages(current, mm, start, nr_pages, in get_user_pages_fast() 250 return ret; in get_user_pages_fast() 258 unsigned long ret; in vm_mmap_pgoff() local 262 ret = security_mmap_file(file, prot, flag); in vm_mmap_pgoff() 263 if (!ret) { in vm_mmap_pgoff() 265 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, in vm_mmap_pgoff() 269 mm_populate(ret, populate); in vm_mmap_pgoff() 271 return ret; in vm_mmap_pgoff() 319 int ret; in overcommit_ratio_handler() local [all …]
|
D | workingset.c | 311 int ret; in shadow_lru_isolate() local 331 ret = LRU_RETRY; in shadow_lru_isolate() 363 ret = LRU_REMOVED_RETRY; in shadow_lru_isolate() 369 return ret; in shadow_lru_isolate() 375 unsigned long ret; in scan_shadow_nodes() local 379 ret = list_lru_walk_node(&workingset_shadow_nodes, sc->nid, in scan_shadow_nodes() 382 return ret; in scan_shadow_nodes() 400 int ret; in workingset_init() local 402 ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); in workingset_init() 403 if (ret) in workingset_init() [all …]
|
D | memory.c | 1014 int ret; in copy_page_range() local 1036 ret = track_pfn_copy(vma); in copy_page_range() 1037 if (ret) in copy_page_range() 1038 return ret; in copy_page_range() 1054 ret = 0; in copy_page_range() 1063 ret = -ENOMEM; in copy_page_range() 1070 return ret; in copy_page_range() 1590 int ret; in vm_insert_pfn() local 1609 ret = insert_pfn(vma, addr, pfn, pgprot); in vm_insert_pfn() 1611 return ret; in vm_insert_pfn() [all …]
|
D | memcontrol.c | 1434 bool ret; in mem_cgroup_same_or_subtree() local 1437 ret = __mem_cgroup_same_or_subtree(root_memcg, memcg); in mem_cgroup_same_or_subtree() 1439 return ret; in mem_cgroup_same_or_subtree() 1447 bool ret; in task_in_mem_cgroup() local 1471 ret = mem_cgroup_same_or_subtree(memcg, curr); in task_in_mem_cgroup() 1473 return ret; in task_in_mem_cgroup() 1566 bool ret = false; in mem_cgroup_under_move() local 1577 ret = mem_cgroup_same_or_subtree(memcg, from) in mem_cgroup_under_move() 1581 return ret; in mem_cgroup_under_move() 2319 bool ret = true; in consume_stock() local [all …]
|
D | readahead.c | 87 int ret = 0; in read_cache_pages() local 99 ret = filler(data, page); in read_cache_pages() 100 if (unlikely(ret)) { in read_cache_pages() 106 return ret; in read_cache_pages() 116 int ret; in read_pages() local 121 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); in read_pages() 136 ret = 0; in read_pages() 141 return ret; in read_pages() 161 int ret = 0; in __do_page_cache_readahead() local 191 ret++; in __do_page_cache_readahead() [all …]
|
D | rmap.c | 822 int ret; in page_referenced() local 856 ret = rmap_walk(page, &rwc); in page_referenced() 871 int ret = 0; in page_mkclean_one() local 886 ret = 1; in page_mkclean_one() 891 if (ret) { in page_mkclean_one() 1192 int ret = SWAP_AGAIN; in try_to_unmap_one() local 1213 ret = SWAP_FAIL; in try_to_unmap_one() 1259 ret = SWAP_FAIL; in try_to_unmap_one() 1298 if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK)) in try_to_unmap_one() 1301 return ret; in try_to_unmap_one() [all …]
|
D | huge_memory.c | 298 ssize_t ret; in enabled_store() local 300 ret = double_flag_store(kobj, attr, buf, count, in enabled_store() 304 if (ret > 0) { in enabled_store() 312 ret = err; in enabled_store() 315 return ret; in enabled_store() 334 int ret; in single_flag_store() local 336 ret = kstrtoul(buf, 10, &value); in single_flag_store() 337 if (ret < 0) in single_flag_store() 338 return ret; in single_flag_store() 669 int ret = 0; in setup_transparent_hugepage() local [all …]
|
D | slab_common.c | 171 int ret = 0; in memcg_update_all_caches() local 178 ret = memcg_update_cache_params(s, num_memcgs); in memcg_update_all_caches() 183 if (ret) in memcg_update_all_caches() 190 return ret; in memcg_update_all_caches() 553 int ret; in kmem_cache_shrink() local 557 ret = __kmem_cache_shrink(cachep); in kmem_cache_shrink() 560 return ret; in kmem_cache_shrink() 785 void *ret; in kmalloc_order() local 790 ret = page ? page_address(page) : NULL; in kmalloc_order() 791 kmemleak_alloc(ret, size, 1, flags); in kmalloc_order() [all …]
|
D | cma.c | 150 int ret = cma_activate_area(&cma_areas[i]); in cma_init_reserved_areas() local 152 if (ret) in cma_init_reserved_areas() 153 return ret; in cma_init_reserved_areas() 236 int ret = 0; in cma_declare_contiguous() local 288 ret = -EINVAL; in cma_declare_contiguous() 306 ret = -EBUSY; in cma_declare_contiguous() 328 ret = -ENOMEM; in cma_declare_contiguous() 341 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); in cma_declare_contiguous() 342 if (ret) in cma_declare_contiguous() 351 return ret; in cma_declare_contiguous() [all …]
|
D | hugetlb.c | 103 int ret = 0; in hugepage_subpool_get_pages() local 112 ret = -ENOMEM; in hugepage_subpool_get_pages() 116 return ret; in hugepage_subpool_get_pages() 747 unsigned long ret, pfn, flags; in alloc_gigantic_page() local 765 ret = __alloc_gigantic_page(pfn, nr_pages); in alloc_gigantic_page() 766 if (!ret) in alloc_gigantic_page() 1032 int ret = 0; in alloc_fresh_huge_page() local 1037 ret = 1; in alloc_fresh_huge_page() 1042 if (ret) in alloc_fresh_huge_page() 1047 return ret; in alloc_fresh_huge_page() [all …]
|
D | hugetlb_cgroup.c | 168 int ret = 0; in hugetlb_cgroup_charge_cgroup() local 190 ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res); in hugetlb_cgroup_charge_cgroup() 194 return ret; in hugetlb_cgroup_charge_cgroup() 259 int idx, name, ret; in hugetlb_cgroup_write() local 271 ret = -EINVAL; in hugetlb_cgroup_write() 275 ret = res_counter_memparse_write_strategy(buf, &val); in hugetlb_cgroup_write() 276 if (ret) in hugetlb_cgroup_write() 279 ret = res_counter_set_limit(&h_cg->hugepage[idx], val); in hugetlb_cgroup_write() 282 ret = -EINVAL; in hugetlb_cgroup_write() 285 return ret ?: nbytes; in hugetlb_cgroup_write() [all …]
|
D | filemap_xip.c | 275 int err, ret = VM_FAULT_OOM; in xip_file_fault() local 297 ret = VM_FAULT_NOPAGE; in xip_file_fault() 302 return ret; in xip_file_fault() 405 ssize_t ret; in xip_file_write() local 410 ret=-EFAULT; in xip_file_write() 420 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); in xip_file_write() 421 if (ret) in xip_file_write() 426 ret = file_remove_suid(filp); in xip_file_write() 427 if (ret) in xip_file_write() 430 ret = file_update_time(filp); in xip_file_write() [all …]
|
D | maccess.c | 23 long ret; in __probe_kernel_read() local 28 ret = __copy_from_user_inatomic(dst, in __probe_kernel_read() 33 return ret ? -EFAULT : 0; in __probe_kernel_read() 51 long ret; in __probe_kernel_write() local 56 ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); in __probe_kernel_write() 60 return ret ? -EFAULT : 0; in __probe_kernel_write()
|