/mm/ |
D | mempool.c | 186 gfp_t gfp_mask, int node_id) in mempool_create_node() argument 189 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node() 193 gfp_mask, node_id); in mempool_create_node() 211 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_create_node() 312 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument 319 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc() 320 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); in mempool_alloc() 322 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc() 323 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ in mempool_alloc() 324 gfp_mask |= __GFP_NOWARN; /* failures are OK */ in mempool_alloc() [all …]
|
D | page_owner.c | 24 gfp_t gfp_mask; member 173 depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask) in __set_page_owner_handle() argument 180 page_owner->gfp_mask = gfp_mask; in __set_page_owner_handle() 187 gfp_t gfp_mask) in __set_page_owner() argument 195 handle = save_stack(gfp_mask); in __set_page_owner() 196 __set_page_owner_handle(page_ext, handle, order, gfp_mask); in __set_page_owner() 238 new_page_owner->gfp_mask = old_page_owner->gfp_mask; in __copy_page_owner() 318 page_owner->gfp_mask); in pagetypeinfo_showmixedcount_print() 361 page_owner->order, page_owner->gfp_mask, in print_page_owner() 362 &page_owner->gfp_mask); in print_page_owner() [all …]
|
D | page_alloc.c | 2892 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 2896 if (gfp_mask & __GFP_NOFAIL) in should_fail_alloc_page() 2898 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in should_fail_alloc_page() 2901 (gfp_mask & __GFP_DIRECT_RECLAIM)) in should_fail_alloc_page() 2942 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 3097 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument 3115 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist() 3160 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist() 3180 gfp_mask, alloc_flags, ac->migratetype); in get_page_from_freelist() 3182 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist() [all …]
|
D | vmscan.c | 70 gfp_t gfp_mask; member 461 static unsigned long shrink_slab(gfp_t gfp_mask, int nid, in shrink_slab() argument 488 .gfp_mask = gfp_mask, in shrink_slab() 1021 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list() 1022 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list() 1148 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list() 1290 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list() 1387 .gfp_mask = GFP_KERNEL, in reclaim_clean_pages_from_list() 1691 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in too_many_isolated() 2552 if (sc->gfp_mask & __GFP_RETRY_MAYFAIL) { in should_continue_reclaim() [all …]
|
D | swap_state.c | 160 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) in add_to_swap_cache() argument 164 error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page)); in add_to_swap_cache() 364 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument 398 new_page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async() 406 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); in __read_swap_cache_async() 463 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument 467 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async() 555 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, in swapin_readahead() argument 582 gfp_mask, vma, addr, &page_allocated); in swapin_readahead() 599 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); in swapin_readahead() [all …]
|
D | vmalloc.c | 405 int node, gfp_t gfp_mask) in alloc_vmap_area() argument 420 gfp_mask & GFP_RECLAIM_MASK, node); in alloc_vmap_area() 428 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); in alloc_vmap_area() 529 if (gfpflags_allow_blocking(gfp_mask)) { in alloc_vmap_area() 538 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) in alloc_vmap_area() 866 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument 878 gfp_mask & GFP_RECLAIM_MASK, node); in new_vmap_block() 884 node, gfp_mask); in new_vmap_block() 890 err = radix_tree_preload(gfp_mask); in new_vmap_block() 982 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) in vb_alloc() argument [all …]
|
D | oom_kill.c | 239 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); in constrained_alloc() 261 if (oc->gfp_mask & __GFP_THISNODE) in constrained_alloc() 280 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) in constrained_alloc() 411 current->comm, oc->gfp_mask, &oc->gfp_mask); in dump_header() 1056 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) in out_of_memory() 1106 .gfp_mask = 0, in pagefault_out_of_memory()
|
D | hugetlb.c | 891 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, in dequeue_huge_page_nodemask() argument 900 zonelist = node_zonelist(nid, gfp_mask); in dequeue_huge_page_nodemask() 904 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { in dequeue_huge_page_nodemask() 907 if (!cpuset_zone_allowed(zone, gfp_mask)) in dequeue_huge_page_nodemask() 943 gfp_t gfp_mask; in dequeue_huge_page_vma() local 960 gfp_mask = htlb_alloc_mask(h); in dequeue_huge_page_vma() 961 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma() 962 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); in dequeue_huge_page_vma() 1070 unsigned long nr_pages, gfp_t gfp_mask) in __alloc_gigantic_page() argument 1074 gfp_mask); in __alloc_gigantic_page() [all …]
|
D | filemap.c | 706 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument 714 error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK); in replace_page_cache_page() 753 pgoff_t offset, gfp_t gfp_mask, in __add_to_page_cache_locked() argument 765 gfp_mask, &memcg, false); in __add_to_page_cache_locked() 770 error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); in __add_to_page_cache_locked() 816 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_locked() argument 819 gfp_mask, NULL); in add_to_page_cache_locked() 824 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_lru() argument 831 gfp_mask, &shadow); in add_to_page_cache_lru() 844 if (!(gfp_mask & __GFP_WRITE) && shadow) in add_to_page_cache_lru() [all …]
|
D | compaction.c | 822 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) in isolate_migratepages_block() 1532 cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); in compact_zone() 1690 gfp_t gfp_mask, enum compact_priority prio, in compact_zone_order() argument 1696 .gfp_mask = gfp_mask, in compact_zone_order() 1728 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, in try_to_compact_pages() argument 1732 int may_perform_io = gfp_mask & __GFP_IO; in try_to_compact_pages() 1744 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); in try_to_compact_pages() 1757 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages() 1808 .gfp_mask = GFP_KERNEL, in compact_node() 1930 .gfp_mask = GFP_KERNEL, in kcompactd_do_work()
|
D | memcontrol.c | 1261 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument 1268 .gfp_mask = gfp_mask, in mem_cgroup_out_of_memory() 1375 gfp_t gfp_mask, in mem_cgroup_soft_reclaim() argument 1414 total += mem_cgroup_shrink_node(victim, gfp_mask, false, in mem_cgroup_soft_reclaim() 1888 gfp_t gfp_mask) in reclaim_high() argument 1894 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); in reclaim_high() 1924 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument 1981 if (!gfpflags_allow_blocking(gfp_mask)) in try_charge() 1987 gfp_mask, may_swap); in try_charge() 1998 if (gfp_mask & __GFP_NORETRY) in try_charge() [all …]
|
D | readahead.c | 161 gfp_t gfp_mask = readahead_gfp_mask(mapping); in __do_page_cache_readahead() local 183 page = __page_cache_alloc(gfp_mask); in __do_page_cache_readahead() 199 read_pages(mapping, filp, &page_pool, ret, gfp_mask); in __do_page_cache_readahead()
|
D | cma.c | 418 gfp_t gfp_mask) in cma_alloc() argument 464 gfp_mask); in cma_alloc() 483 if (ret && !(gfp_mask & __GFP_NOWARN)) { in cma_alloc()
|
D | backing-dev.c | 855 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id) in bdi_alloc_node() argument 860 gfp_mask | __GFP_ZERO, node_id); in bdi_alloc_node()
|
D | internal.h | 194 const gfp_t gfp_mask; /* gfp mask of a direct compactor */ member
|
D | nommu.c | 230 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) in __vmalloc() argument 236 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); in __vmalloc()
|
D | swapfile.c | 3514 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) in add_swap_count_continuation() argument 3528 page = alloc_page(gfp_mask | __GFP_HIGHMEM); in add_swap_count_continuation()
|
D | memory.c | 4041 .gfp_mask = __get_fault_gfp_mask(vma), in __handle_mm_fault()
|