/mm/ |
D | pagewalk.c | 9 int err = 0; in walk_pte_range() local 13 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range() 14 if (err) in walk_pte_range() 23 return err; in walk_pte_range() 31 int err = 0; in walk_pmd_range() local 38 err = walk->pte_hole(addr, next, walk); in walk_pmd_range() 39 if (err) in walk_pmd_range() 44 err = walk->pmd_entry(pmd, addr, next, walk); in walk_pmd_range() 45 if (!err && walk->pte_entry) in walk_pmd_range() 46 err = walk_pte_range(pmd, addr, next, walk); in walk_pmd_range() [all …]
|
D | mempolicy.c | 489 int err; in check_range() local 514 err = check_pgd_range(vma, start, endvma, nodes, in check_range() 516 if (err) { in check_range() 517 first = ERR_PTR(err); in check_range() 529 int err = 0; in policy_vma() local 538 err = vma->vm_ops->set_policy(vma, new); in policy_vma() 539 if (!err) { in policy_vma() 544 return err; in policy_vma() 552 int err; in mbind_range() local 554 err = 0; in mbind_range() [all …]
|
D | migrate.c | 819 int err; in do_move_page_to_node_array() local 833 err = -EFAULT; in do_move_page_to_node_array() 840 err = PTR_ERR(page); in do_move_page_to_node_array() 844 err = -ENOENT; in do_move_page_to_node_array() 852 err = page_to_nid(page); in do_move_page_to_node_array() 854 if (err == pp->node) in do_move_page_to_node_array() 860 err = -EACCES; in do_move_page_to_node_array() 865 err = isolate_lru_page(page); in do_move_page_to_node_array() 866 if (!err) in do_move_page_to_node_array() 876 pp->status = err; in do_move_page_to_node_array() [all …]
|
D | fremap.c | 59 int err = -ENOMEM; in install_file_pte() local 79 err = 0; in install_file_pte() 81 return err; in install_file_pte() 87 int err; in populate_range() local 90 err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot); in populate_range() 91 if (err) in populate_range() 92 return err; in populate_range() 130 int err = -EINVAL; in SYSCALL_DEFINE5() local 134 return err; in SYSCALL_DEFINE5() 143 return err; in SYSCALL_DEFINE5() [all …]
|
D | bounce.c | 125 static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) in bounce_end_io() argument 146 bio_endio(bio_orig, err); in bounce_end_io() 150 static void bounce_end_io_write(struct bio *bio, int err) in bounce_end_io_write() argument 152 bounce_end_io(bio, page_pool, err); in bounce_end_io_write() 155 static void bounce_end_io_write_isa(struct bio *bio, int err) in bounce_end_io_write_isa() argument 158 bounce_end_io(bio, isa_page_pool, err); in bounce_end_io_write_isa() 161 static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) in __bounce_end_io_read() argument 168 bounce_end_io(bio, pool, err); in __bounce_end_io_read() 171 static void bounce_end_io_read(struct bio *bio, int err) in bounce_end_io_read() argument 173 __bounce_end_io_read(bio, page_pool, err); in bounce_end_io_read() [all …]
|
D | filemap.c | 394 int err = 0; in filemap_write_and_wait() local 397 err = filemap_fdatawrite(mapping); in filemap_write_and_wait() 404 if (err != -EIO) { in filemap_write_and_wait() 406 if (!err) in filemap_write_and_wait() 407 err = err2; in filemap_write_and_wait() 410 return err; in filemap_write_and_wait() 428 int err = 0; in filemap_write_and_wait_range() local 431 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range() 434 if (err != -EIO) { in filemap_write_and_wait_range() 438 if (!err) in filemap_write_and_wait_range() [all …]
|
D | failslab.c | 37 int err; in failslab_debugfs_init() local 39 err = init_fault_attr_dentries(&failslab.attr, "failslab"); in failslab_debugfs_init() 40 if (err) in failslab_debugfs_init() 41 return err; in failslab_debugfs_init() 49 err = -ENOMEM; in failslab_debugfs_init() 54 return err; in failslab_debugfs_init()
|
D | filemap_xip.c | 248 int err; in xip_file_fault() local 261 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, in xip_file_fault() 263 if (err == -ENOMEM) in xip_file_fault() 265 BUG_ON(err); in xip_file_fault() 268 int err, ret = VM_FAULT_OOM; in xip_file_fault() local 285 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, in xip_file_fault() 287 if (err == -ENOMEM) in xip_file_fault() 449 int err; in xip_truncate_page() local 462 err = mapping->a_ops->get_xip_mem(mapping, index, 0, in xip_truncate_page() 464 if (unlikely(err)) { in xip_truncate_page() [all …]
|
D | readahead.c | 191 int err; in force_page_cache_readahead() local 197 err = __do_page_cache_readahead(mapping, filp, in force_page_cache_readahead() 199 if (err < 0) { in force_page_cache_readahead() 200 ret = err; in force_page_cache_readahead() 203 ret += err; in force_page_cache_readahead() 238 int err; in readahead_init() local 240 err = bdi_init(&default_backing_dev_info); in readahead_init() 241 if (!err) in readahead_init() 244 return err; in readahead_init()
|
D | backing-dev.c | 217 int err; in bdi_init() local 226 err = percpu_counter_init(&bdi->bdi_stat[i], 0); in bdi_init() 227 if (err) in bdi_init() 228 goto err; in bdi_init() 232 err = prop_local_init_percpu(&bdi->completions); in bdi_init() 234 if (err) { in bdi_init() 235 err: in bdi_init() 240 return err; in bdi_init()
|
D | swap_state.c | 138 int err; in add_to_swap() local 159 err = add_to_swap_cache(page, entry, in add_to_swap() 162 switch (err) { in add_to_swap() 274 int err; in read_swap_cache_async() local 311 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); in read_swap_cache_async() 312 if (likely(!err)) { in read_swap_cache_async() 323 } while (err != -ENOMEM); in read_swap_cache_async()
|
D | vmalloc.c | 161 int err = 0; in vmap_page_range() local 168 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); in vmap_page_range() 169 if (err) in vmap_page_range() 174 if (unlikely(err)) in vmap_page_range() 175 return err; in vmap_page_range() 703 int node, err; in new_vmap_block() local 720 err = radix_tree_preload(gfp_mask); in new_vmap_block() 721 if (unlikely(err)) { in new_vmap_block() 724 return ERR_PTR(err); in new_vmap_block() 738 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); in new_vmap_block() [all …]
|
D | memory.c | 1648 int err; in remap_pfn_range() local 1675 err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size)); in remap_pfn_range() 1676 if (err) { in remap_pfn_range() 1691 err = remap_pud_range(mm, pgd, addr, next, in remap_pfn_range() 1693 if (err) in remap_pfn_range() 1697 if (err) in remap_pfn_range() 1700 return err; in remap_pfn_range() 1709 int err; in apply_to_pte_range() local 1726 err = fn(pte, token, addr, data); in apply_to_pte_range() 1727 if (err) in apply_to_pte_range() [all …]
|
D | hugetlb.c | 936 int err; in vma_needs_reservation() local 940 err = region_chg(&reservations->regions, idx, idx + 1); in vma_needs_reservation() 941 if (err < 0) in vma_needs_reservation() 942 return err; in vma_needs_reservation() 1245 int err; in nr_hugepages_store() local 1249 err = strict_strtoul(buf, 10, &input); in nr_hugepages_store() 1250 if (err) in nr_hugepages_store() 1268 int err; in nr_overcommit_hugepages_store() local 1272 err = strict_strtoul(buf, 10, &input); in nr_overcommit_hugepages_store() 1273 if (err) in nr_overcommit_hugepages_store() [all …]
|
D | slub.c | 3138 goto err; in kmem_cache_create() 3154 goto err; in kmem_cache_create() 3162 err: in kmem_cache_create() 3820 int err; in order_store() local 3822 err = strict_strtoul(buf, 10, &order); in order_store() 3823 if (err) in order_store() 3824 return err; in order_store() 4078 int err; in remote_node_defrag_ratio_store() local 4080 err = strict_strtoul(buf, 10, &ratio); in remote_node_defrag_ratio_store() 4081 if (err) in remote_node_defrag_ratio_store() [all …]
|
D | swapfile.c | 96 int err = 0; in discard_swap() local 110 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap() 112 if (err) in discard_swap() 117 return err; /* That will often be -EOPNOTSUPP */ in discard_swap() 1392 int err; in SYSCALL_DEFINE1() local 1398 err = PTR_ERR(pathname); in SYSCALL_DEFINE1() 1404 err = PTR_ERR(victim); in SYSCALL_DEFINE1() 1420 err = -EINVAL; in SYSCALL_DEFINE1() 1427 err = -ENOMEM; in SYSCALL_DEFINE1() 1451 err = try_to_unuse(type); in SYSCALL_DEFINE1() [all …]
|
D | memory_hotplug.c | 281 int err = 0; in __add_pages() local 288 err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); in __add_pages() 295 if (err && (err != -EEXIST)) in __add_pages() 297 err = 0; in __add_pages() 300 return err; in __add_pages()
|
D | page_io.c | 47 static void end_swap_bio_write(struct bio *bio, int err) in end_swap_bio_write() argument 73 void end_swap_bio_read(struct bio *bio, int err) in end_swap_bio_read() argument
|
D | bootmem.c | 299 int err; in mark_bootmem() local 310 err = mark_bootmem_node(bdata, pos, max, reserve, flags); in mark_bootmem() 311 if (reserve && err) { in mark_bootmem() 313 return err; in mark_bootmem()
|
D | page_alloc.c | 1191 int err; in fail_page_alloc_debugfs() local 1193 err = init_fault_attr_dentries(&fail_page_alloc.attr, in fail_page_alloc_debugfs() 1195 if (err) in fail_page_alloc_debugfs() 1196 return err; in fail_page_alloc_debugfs() 1213 err = -ENOMEM; in fail_page_alloc_debugfs() 1220 return err; in fail_page_alloc_debugfs() 2871 int err; in setup_per_cpu_pageset() local 2877 err = process_zones(smp_processor_id()); in setup_per_cpu_pageset() 2878 BUG_ON(err); in setup_per_cpu_pageset()
|
D | slab.c | 1329 int err = 0; in cpuup_callback() local 1335 err = cpuup_prepare(cpu); in cpuup_callback() 1378 return err ? NOTIFY_BAD : NOTIFY_OK; in cpuup_callback() 3894 int err; in enable_cpucache() local 3938 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); in enable_cpucache() 3939 if (err) in enable_cpucache() 3941 cachep->name, -err); in enable_cpucache() 3942 return err; in enable_cpucache()
|
D | shmem.c | 2287 int err = -ENOMEM; in shmem_fill_super() local 2313 err = -EINVAL; in shmem_fill_super() 2352 return err; in shmem_fill_super()
|