/mm/ |
D | pagewalk.c | 11 int err = 0; in walk_pte_range() local 16 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range() 17 if (err) in walk_pte_range() 26 return err; in walk_pte_range() 35 int err = 0; in walk_pmd_range() local 43 err = ops->pte_hole(addr, next, walk); in walk_pmd_range() 44 if (err) in walk_pmd_range() 53 err = ops->pmd_entry(pmd, addr, next, walk); in walk_pmd_range() 54 if (err) in walk_pmd_range() 67 err = walk_pte_range(pmd, addr, next, walk); in walk_pmd_range() [all …]
|
D | userfaultfd.c | 182 ssize_t err; in __mcopy_atomic_hugetlb() local 213 err = -EINVAL; in __mcopy_atomic_hugetlb() 223 err = -ENOENT; in __mcopy_atomic_hugetlb() 239 err = -EINVAL; in __mcopy_atomic_hugetlb() 253 err = -ENOMEM; in __mcopy_atomic_hugetlb() 275 err = -ENOMEM; in __mcopy_atomic_hugetlb() 282 err = -EEXIST; in __mcopy_atomic_hugetlb() 289 err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, in __mcopy_atomic_hugetlb() 297 if (unlikely(err == -ENOENT)) { in __mcopy_atomic_hugetlb() 301 err = copy_huge_page_from_user(page, in __mcopy_atomic_hugetlb() [all …]
|
D | ksm.c | 847 int err = 0; in unmerge_ksm_pages() local 849 for (addr = start; addr < end && !err; addr += PAGE_SIZE) { in unmerge_ksm_pages() 853 err = -ERESTARTSYS; in unmerge_ksm_pages() 855 err = break_ksm(vma, addr); in unmerge_ksm_pages() 857 return err; in unmerge_ksm_pages() 878 int err; in remove_stable_node() local 893 err = -EBUSY; in remove_stable_node() 905 err = 0; in remove_stable_node() 910 return err; in remove_stable_node() 942 int err = 0; in remove_all_stable_nodes() local [all …]
|
D | mempolicy.c | 701 int err; in vma_replace_policy() local 715 err = vma->vm_ops->set_policy(vma, new); in vma_replace_policy() 716 if (err) in vma_replace_policy() 727 return err; in vma_replace_policy() 737 int err = 0; in mbind_range() local 773 err = split_vma(vma->vm_mm, vma, vmstart, 1); in mbind_range() 774 if (err) in mbind_range() 778 err = split_vma(vma->vm_mm, vma, vmend, 0); in mbind_range() 779 if (err) in mbind_range() 783 err = vma_replace_policy(vma, new_pol); in mbind_range() [all …]
|
D | migrate.c | 1504 int err; in do_move_pages_to_node() local 1509 err = migrate_pages(pagelist, alloc_new_node_page, NULL, node, in do_move_pages_to_node() 1511 if (err) in do_move_pages_to_node() 1513 return err; in do_move_pages_to_node() 1531 int err; in add_page_for_migration() local 1534 err = -EFAULT; in add_page_for_migration() 1543 err = PTR_ERR(page); in add_page_for_migration() 1547 err = -ENOENT; in add_page_for_migration() 1551 err = 0; in add_page_for_migration() 1555 err = -EACCES; in add_page_for_migration() [all …]
|
D | filemap.c | 637 int err = 0; in filemap_write_and_wait() local 640 err = filemap_fdatawrite(mapping); in filemap_write_and_wait() 647 if (err != -EIO) { in filemap_write_and_wait() 649 if (!err) in filemap_write_and_wait() 650 err = err2; in filemap_write_and_wait() 656 err = filemap_check_errors(mapping); in filemap_write_and_wait() 658 return err; in filemap_write_and_wait() 678 int err = 0; in filemap_write_and_wait_range() local 681 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range() 684 if (err != -EIO) { in filemap_write_and_wait_range() [all …]
|
D | slab_common.c | 386 int err; in create_cache() local 391 err = -ENOMEM; in create_cache() 403 err = init_memcg_params(s, root_cache); in create_cache() 404 if (err) in create_cache() 407 err = __kmem_cache_create(s, flags); in create_cache() 408 if (err) in create_cache() 415 if (err) in create_cache() 416 return ERR_PTR(err); in create_cache() 462 int err; in kmem_cache_create_usercopy() local 470 err = kmem_cache_sanity_check(name, size); in kmem_cache_create_usercopy() [all …]
|
D | percpu-vm.c | 96 goto err; in pcpu_alloc_pages() 101 err: in pcpu_alloc_pages() 217 int i, err; in pcpu_map_pages() local 220 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), in pcpu_map_pages() 223 if (err < 0) in pcpu_map_pages() 224 goto err; in pcpu_map_pages() 231 err: in pcpu_map_pages() 239 return err; in pcpu_map_pages()
|
D | swap_state.c | 191 int err; in add_to_swap() local 211 err = add_to_swap_cache(page, entry, in add_to_swap() 213 if (err) in add_to_swap() 365 int err; in __read_swap_cache_async() local 406 err = swapcache_prepare(entry); in __read_swap_cache_async() 407 if (err == -EEXIST) { in __read_swap_cache_async() 415 } else if (err) /* swp entry is obsolete ? */ in __read_swap_cache_async() 421 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); in __read_swap_cache_async() 422 if (likely(!err)) { in __read_swap_cache_async() 435 } while (err != -ENOMEM); in __read_swap_cache_async() [all …]
|
D | memory.c | 881 int err; in copy_pmd_range() local 883 err = copy_huge_pmd(dst_mm, src_mm, in copy_pmd_range() 885 if (err == -ENOMEM) in copy_pmd_range() 887 if (!err) in copy_pmd_range() 914 int err; in copy_pud_range() local 917 err = copy_huge_pud(dst_mm, src_mm, in copy_pud_range() 919 if (err == -ENOMEM) in copy_pud_range() 921 if (!err) in copy_pud_range() 1744 int err; in __vm_insert_mixed() local 1773 err = insert_page(vma, addr, page, pgprot); in __vm_insert_mixed() [all …]
|
D | backing-dev.c | 240 int err; in default_bdi_init() local 247 err = bdi_init(&noop_backing_dev_info); in default_bdi_init() 249 return err; in default_bdi_init() 286 int i, err; in wb_init() local 313 err = -ENOMEM; in wb_init() 317 err = fprop_local_init_percpu(&wb->completions, gfp); in wb_init() 318 if (err) in wb_init() 322 err = percpu_counter_init(&wb->stat[i], 0, gfp); in wb_init() 323 if (err) in wb_init() 338 return err; in wb_init() [all …]
|
D | swapfile.c | 176 int err = 0; in discard_swap() local 183 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap() 185 if (err) in discard_swap() 186 return err; in discard_swap() 194 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap() 196 if (err) in discard_swap() 201 return err; /* That will often be -EOPNOTSUPP */ in discard_swap() 2525 int err, found = 0; in SYSCALL_DEFINE1() local 2538 err = PTR_ERR(victim); in SYSCALL_DEFINE1() 2553 err = -EINVAL; in SYSCALL_DEFINE1() [all …]
|
D | khugepaged.c | 129 int err; in scan_sleep_millisecs_store() local 131 err = kstrtoul(buf, 10, &msecs); in scan_sleep_millisecs_store() 132 if (err || msecs > UINT_MAX) in scan_sleep_millisecs_store() 157 int err; in alloc_sleep_millisecs_store() local 159 err = kstrtoul(buf, 10, &msecs); in alloc_sleep_millisecs_store() 160 if (err || msecs > UINT_MAX) in alloc_sleep_millisecs_store() 183 int err; in pages_to_scan_store() local 186 err = kstrtoul(buf, 10, &pages); in pages_to_scan_store() 187 if (err || !pages || pages > UINT_MAX) in pages_to_scan_store() 251 int err; in khugepaged_max_ptes_none_store() local [all …]
|
D | frame_vector.c | 40 int err; in get_vaddr_frames() local 86 err = follow_pfn(vma, start, &nums[ret]); in get_vaddr_frames() 87 if (err) { in get_vaddr_frames() 89 ret = err; in get_vaddr_frames()
|
D | slub.c | 1512 int err; in init_cache_random_seq() local 1518 err = cache_random_seq_create(s, count, GFP_KERNEL); in init_cache_random_seq() 1519 if (err) { in init_cache_random_seq() 1522 return err; in init_cache_random_seq() 4303 int err; in __kmem_cache_create() local 4305 err = kmem_cache_open(s, flags); in __kmem_cache_create() 4306 if (err) in __kmem_cache_create() 4307 return err; in __kmem_cache_create() 4314 err = sysfs_slab_add(s); in __kmem_cache_create() 4315 if (err) in __kmem_cache_create() [all …]
|
D | mremap.c | 332 int err; in move_vma() local 349 err = ksm_madvise(vma, old_addr, old_addr + old_len, in move_vma() 351 if (err) in move_vma() 352 return err; in move_vma() 363 err = -ENOMEM; in move_vma() 365 err = vma->vm_ops->mremap(new_vma); in move_vma() 368 if (unlikely(err)) { in move_vma() 379 new_addr = err; in move_vma()
|
D | cma.c | 287 goto err; in cma_declare_contiguous() 308 goto err; in cma_declare_contiguous() 323 goto err; in cma_declare_contiguous() 331 goto err; in cma_declare_contiguous() 353 goto err; in cma_declare_contiguous() 375 err: in cma_declare_contiguous()
|
D | page_idle.c | 229 int err; in page_idle_init() local 231 err = sysfs_create_group(mm_kobj, &page_idle_attr_group); in page_idle_init() 232 if (err) { in page_idle_init() 234 return err; in page_idle_init()
|
D | hwpoison-inject.c | 19 int err; in hwpoison_inject() local 50 err = hwpoison_filter(hpage); in hwpoison_inject() 51 if (err) in hwpoison_inject()
|
D | mincore.c | 211 int err; in do_mincore() local 222 err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec); in do_mincore() 223 if (err < 0) in do_mincore() 224 return err; in do_mincore()
|
D | page_owner.c | 358 goto err; in print_page_owner() 372 goto err; in print_page_owner() 377 goto err; in print_page_owner() 384 goto err; in print_page_owner() 389 goto err; in print_page_owner() 397 err: in print_page_owner()
|
D | hugetlb.c | 957 goto err; in dequeue_huge_page_vma() 961 goto err; in dequeue_huge_page_vma() 974 err: in dequeue_huge_page_vma() 2598 int err; in __nr_hugepages_store_common() local 2622 err = set_max_huge_pages(h, count, nid, n_mask); in __nr_hugepages_store_common() 2624 return err ? err : len; in __nr_hugepages_store_common() 2634 int err; in nr_hugepages_store_common() local 2636 err = kstrtoul(buf, 10, &count); in nr_hugepages_store_common() 2637 if (err) in nr_hugepages_store_common() 2638 return err; in nr_hugepages_store_common() [all …]
|
D | huge_memory.c | 196 int err = start_stop_khugepaged(); in enabled_store() local 197 if (err) in enabled_store() 198 ret = err; in enabled_store() 352 int err; in hugepage_init_sysfs() local 360 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); in hugepage_init_sysfs() 361 if (err) { in hugepage_init_sysfs() 366 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); in hugepage_init_sysfs() 367 if (err) { in hugepage_init_sysfs() 378 return err; in hugepage_init_sysfs() 400 int err; in hugepage_init() local [all …]
|
D | mmap.c | 1152 int err; in vma_merge() local 1196 err = __vma_adjust(prev, prev->vm_start, in vma_merge() 1200 err = __vma_adjust(prev, prev->vm_start, in vma_merge() 1202 if (err) in vma_merge() 1218 err = __vma_adjust(prev, prev->vm_start, in vma_merge() 1221 err = __vma_adjust(area, addr, next->vm_end, in vma_merge() 1230 if (err) in vma_merge() 2673 int err; in __split_vma() local 2676 err = vma->vm_ops->split(vma, addr); in __split_vma() 2677 if (err) in __split_vma() [all …]
|
D | shmem.c | 1511 int err = -ENOSPC; in shmem_alloc_and_acct_page() local 1530 err = -ENOMEM; in shmem_alloc_and_acct_page() 1533 return ERR_PTR(err); in shmem_alloc_and_acct_page() 1996 int err; in shmem_fault() local 2066 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, in shmem_fault() 2068 if (err) in shmem_fault() 2069 return vmf_error(err); in shmem_fault() 3492 int err = security_sb_eat_lsm_opts(options, &fc->security); in shmem_parse_options() local 3493 if (err) in shmem_parse_options() 3494 return err; in shmem_parse_options() [all …]
|