/mm/ |
D | gup.c | 950 unsigned long address, unsigned int *flags, int *locked) in faultin_page() argument 964 if (locked) in faultin_page() 986 if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) in faultin_page() 987 *locked = 0; in faultin_page() 1121 struct vm_area_struct **vmas, int *locked) in __get_user_pages() argument 1171 gup_flags, locked); in __get_user_pages() 1172 if (locked && *locked == 0) { in __get_user_pages() 1197 ret = faultin_page(vma, start, &foll_flags, locked); in __get_user_pages() 1351 int *locked, in __get_user_pages_locked() argument 1357 if (locked) { in __get_user_pages_locked() [all …]
|
D | mlock.c | 650 unsigned long locked; in do_mlock() local 664 locked = len >> PAGE_SHIFT; in do_mlock() 669 locked += current->mm->locked_vm; in do_mlock() 670 if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { in do_mlock() 677 locked -= count_mm_mlocked_page_nr(current->mm, in do_mlock() 682 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) in do_mlock() 824 unsigned long lock_limit, locked; in user_shm_lock() local 828 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in user_shm_lock() 833 memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); in user_shm_lock() 836 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); in user_shm_lock() [all …]
|
D | mremap.c | 619 bool *locked, unsigned long flags, in move_vma() argument 751 *locked = true; in move_vma() 815 unsigned long locked, lock_limit; in vma_to_resize() local 816 locked = mm->locked_vm << PAGE_SHIFT; in vma_to_resize() 818 locked += new_len - old_len; in vma_to_resize() 819 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) in vma_to_resize() 838 unsigned long new_addr, unsigned long new_len, bool *locked, in mremap_to() argument 918 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, in mremap_to() 959 bool locked = false; in SYSCALL_DEFINE5() local 1011 &locked, flags, &uf, &uf_unmap_early, in SYSCALL_DEFINE5() [all …]
|
D | compaction.c | 530 unsigned long flags, bool *locked, struct compact_control *cc) in compact_unlock_should_abort() argument 532 if (*locked) { in compact_unlock_should_abort() 534 *locked = false; in compact_unlock_should_abort() 562 bool locked = false; in isolate_freepages_block() local 584 &locked, cc)) in isolate_freepages_block() 615 if (!locked) { in isolate_freepages_block() 616 locked = compact_lock_irqsave(&cc->zone->lock, in isolate_freepages_block() 652 if (locked) in isolate_freepages_block() 829 struct lruvec *locked = NULL; in isolate_migratepages_block() local 898 if (locked) { in isolate_migratepages_block() [all …]
|
D | process_vm_access.c | 96 int locked = 1; in process_vm_rw_single_vec() local 107 NULL, &locked); in process_vm_rw_single_vec() 108 if (locked) in process_vm_rw_single_vec()
|
D | z3fold.c | 263 int locked = 0; in get_z3fold_header() local 273 locked = z3fold_page_trylock(zhdr); in get_z3fold_header() 275 if (locked) in get_z3fold_header() 504 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) in __release_z3fold_page() argument 517 if (locked) in __release_z3fold_page() 802 static void do_compact_page(struct z3fold_header *zhdr, bool locked) in do_compact_page() argument 808 if (locked) in do_compact_page()
|
D | rmap.c | 2321 bool locked) in rmap_walk_anon() argument 2327 if (locked) { in rmap_walk_anon() 2356 if (!locked) in rmap_walk_anon() 2374 bool locked) in rmap_walk_file() argument 2393 if (!locked) { in rmap_walk_file() 2422 if (!locked) in rmap_walk_file()
|
D | internal.h | 356 unsigned long start, unsigned long end, int *locked); 359 bool write, int *locked);
|
D | madvise.c | 864 int locked = 1; in madvise_populate() local 883 &locked); in madvise_populate() 884 if (!locked) { in madvise_populate() 886 locked = 1; in madvise_populate()
|
D | mmap.c | 1367 unsigned long locked, lock_limit; in mlock_future_check() local 1371 locked = len >> PAGE_SHIFT; in mlock_future_check() 1372 locked += mm->locked_vm; in mlock_future_check() 1375 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) in mlock_future_check() 2369 unsigned long locked; in acct_stack_growth() local 2371 locked = mm->locked_vm + grow; in acct_stack_growth() 2374 if (locked > limit && !capable(CAP_IPC_LOCK)) in acct_stack_growth()
|
D | memcontrol.c | 1848 bool locked; in mem_cgroup_oom() local 1886 locked = mem_cgroup_oom_trylock(memcg); in mem_cgroup_oom() 1888 if (locked) in mem_cgroup_oom() 1897 if (locked) in mem_cgroup_oom() 1924 bool locked; in mem_cgroup_oom_synchronize() local 1942 locked = mem_cgroup_oom_trylock(memcg); in mem_cgroup_oom_synchronize() 1944 if (locked) in mem_cgroup_oom_synchronize() 1947 if (locked && !memcg->oom_kill_disable) { in mem_cgroup_oom_synchronize() 1958 if (locked) { in mem_cgroup_oom_synchronize()
|
D | memory.c | 2926 bool locked = false; in cow_user_page() local 2956 locked = true; in cow_user_page() 2979 if (locked) in cow_user_page() 2984 locked = true; in cow_user_page() 3010 if (locked) in cow_user_page() 3718 int locked; in do_swap_page() local 3851 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); in do_swap_page() 3854 if (!locked) { in do_swap_page()
|
D | mempolicy.c | 904 int locked = 1; in lookup_node() local 905 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); in lookup_node() 910 if (locked) in lookup_node()
|
D | migrate.c | 272 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes() argument 279 if (locked) in remove_migration_ptes()
|
D | hugetlb.c | 5461 long i, unsigned int flags, int *locked) in follow_hugetlb_page() argument 5532 if (locked) in follow_hugetlb_page() 5552 if (locked && in follow_hugetlb_page() 5554 *locked = 0; in follow_hugetlb_page()
|
D | Kconfig | 171 # a per-page lock leads to problems when multiple tables need to be locked
|