Home
last modified time | relevance | path

Searched refs:gfp_mask (Results 1 – 19 of 19) sorted by relevance

/mm/
Dmempool.c182 gfp_t gfp_mask, int node_id) in mempool_init_node() argument
192 gfp_mask, node_id); in mempool_init_node()
202 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node()
263 gfp_t gfp_mask, int node_id) in mempool_create_node() argument
267 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node()
272 gfp_mask, node_id)) { in mempool_create_node()
375 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument
382 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc()
383 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); in mempool_alloc()
385 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc()
[all …]
Dpage_owner.c25 gfp_t gfp_mask; member
164 unsigned int order, gfp_t gfp_mask) in __set_page_owner_handle() argument
173 page_owner->gfp_mask = gfp_mask; in __set_page_owner_handle()
183 gfp_t gfp_mask) in __set_page_owner() argument
191 handle = save_stack(gfp_mask); in __set_page_owner()
192 __set_page_owner_handle(page, page_ext, handle, order, gfp_mask); in __set_page_owner()
235 new_page_owner->gfp_mask = old_page_owner->gfp_mask; in __copy_page_owner()
316 page_owner->gfp_mask); in pagetypeinfo_showmixedcount_print()
354 page_owner->order, page_owner->gfp_mask, in print_page_owner()
355 &page_owner->gfp_mask); in print_page_owner()
[all …]
Dpage_alloc.c3343 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3347 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page()
3349 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page()
3352 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page()
3383 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3390 static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3392 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page()
3548 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3552 if (gfp_mask & __GFP_KSWAPD_RECLAIM) in alloc_flags_nofragment()
3581 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
[all …]
Dvmscan.c114 gfp_t gfp_mask; member
593 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument
613 .gfp_mask = gfp_mask, in shrink_slab_memcg()
668 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument
695 static unsigned long shrink_slab(gfp_t gfp_mask, int nid, in shrink_slab() argument
710 return shrink_slab_memcg(gfp_mask, nid, memcg, priority); in shrink_slab()
717 .gfp_mask = gfp_mask, in shrink_slab()
1163 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()
1164 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()
1290 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list()
[all …]
Dswap_state.c359 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument
398 new_page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async()
421 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); in __read_swap_cache_async()
448 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument
452 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async()
539 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() argument
578 gfp_mask, vma, addr, &page_allocated); in swap_cluster_readahead()
594 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); in swap_cluster_readahead()
722 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, in swap_vma_readahead() argument
749 page = __read_swap_cache_async(entry, gfp_mask, vma, in swap_vma_readahead()
[all …]
Dvmalloc.c1052 int node, gfp_t gfp_mask) in alloc_vmap_area() argument
1068 gfp_mask & GFP_RECLAIM_MASK, node); in alloc_vmap_area()
1076 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); in alloc_vmap_area()
1135 if (gfpflags_allow_blocking(gfp_mask)) { in alloc_vmap_area()
1144 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) in alloc_vmap_area()
1456 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument
1468 gfp_mask & GFP_RECLAIM_MASK, node); in new_vmap_block()
1474 node, gfp_mask); in new_vmap_block()
1480 err = radix_tree_preload(gfp_mask); in new_vmap_block()
1572 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) in vb_alloc() argument
[all …]
Dhugetlb.c894 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, in dequeue_huge_page_nodemask() argument
903 zonelist = node_zonelist(nid, gfp_mask); in dequeue_huge_page_nodemask()
907 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { in dequeue_huge_page_nodemask()
910 if (!cpuset_zone_allowed(zone, gfp_mask)) in dequeue_huge_page_nodemask()
946 gfp_t gfp_mask; in dequeue_huge_page_vma() local
963 gfp_mask = htlb_alloc_mask(h); in dequeue_huge_page_vma()
964 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma()
965 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); in dequeue_huge_page_vma()
1074 unsigned long nr_pages, gfp_t gfp_mask) in __alloc_gigantic_page() argument
1078 gfp_mask); in __alloc_gigantic_page()
[all …]
Doom_kill.c256 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); in constrained_alloc()
278 if (oc->gfp_mask & __GFP_THISNODE) in constrained_alloc()
297 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) in constrained_alloc()
455 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, in dump_header()
1075 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) in out_of_memory()
1127 .gfp_mask = 0, in pagefault_out_of_memory()
Dfilemap.c811 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument
852 pgoff_t offset, gfp_t gfp_mask, in __add_to_page_cache_locked() argument
867 gfp_mask, &memcg, false); in __add_to_page_cache_locked()
897 } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK)); in __add_to_page_cache_locked()
929 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_locked() argument
932 gfp_mask, NULL); in add_to_page_cache_locked()
937 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_lru() argument
944 gfp_mask, &shadow); in add_to_page_cache_lru()
957 if (!(gfp_mask & __GFP_WRITE) && shadow) in add_to_page_cache_lru()
1630 int fgp_flags, gfp_t gfp_mask) in pagecache_get_page() argument
[all …]
Dreadahead.c166 gfp_t gfp_mask = readahead_gfp_mask(mapping); in __do_page_cache_readahead() local
191 gfp_mask); in __do_page_cache_readahead()
196 page = __page_cache_alloc(gfp_mask); in __do_page_cache_readahead()
212 read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask); in __do_page_cache_readahead()
Dsparse-vmemmap.c53 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; in vmemmap_alloc_block() local
58 page = alloc_pages_node(node, gfp_mask, order); in vmemmap_alloc_block()
63 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, in vmemmap_alloc_block()
Dcompaction.c948 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) in isolate_migratepages_block()
2092 cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); in compact_zone()
2289 gfp_t gfp_mask, enum compact_priority prio, in compact_zone_order() argument
2297 .gfp_mask = gfp_mask, in compact_zone_order()
2339 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, in try_to_compact_pages() argument
2343 int may_perform_io = gfp_mask & __GFP_IO; in try_to_compact_pages()
2355 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); in try_to_compact_pages()
2368 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
2419 .gfp_mask = GFP_KERNEL, in compact_node()
2534 .gfp_mask = GFP_KERNEL, in kcompactd_do_work()
Dmemcontrol.c1575 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument
1582 .gfp_mask = gfp_mask, in mem_cgroup_out_of_memory()
1698 gfp_t gfp_mask, in mem_cgroup_soft_reclaim() argument
1737 total += mem_cgroup_shrink_node(victim, gfp_mask, false, in mem_cgroup_soft_reclaim()
2343 gfp_t gfp_mask) in reclaim_high() argument
2349 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); in reclaim_high()
2502 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument
2543 if (gfp_mask & __GFP_ATOMIC) in try_charge()
2567 if (!gfpflags_allow_blocking(gfp_mask)) in try_charge()
2573 gfp_mask, may_swap); in try_charge()
[all …]
Dgup.c1396 gfp_t gfp_mask = GFP_USER | __GFP_NOWARN; in new_non_cma_page() local
1399 gfp_mask |= __GFP_HIGHMEM; in new_non_cma_page()
1408 return alloc_migrate_huge_page(h, gfp_mask, nid, NULL); in new_non_cma_page()
1430 return __alloc_pages_node(nid, gfp_mask, 0); in new_non_cma_page()
Dbacking-dev.c867 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id) in bdi_alloc_node() argument
872 gfp_mask | __GFP_ZERO, node_id); in bdi_alloc_node()
Dinternal.h193 const gfp_t gfp_mask; /* gfp mask of a direct compactor */ member
Dswapfile.c3522 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) in add_swap_count_continuation() argument
3537 page = alloc_page(gfp_mask | __GFP_HIGHMEM); in add_swap_count_continuation()
3742 gfp_t gfp_mask) in mem_cgroup_throttle_swaprate() argument
3745 if (!(gfp_mask & __GFP_IO) || !memcg) in mem_cgroup_throttle_swaprate()
Dnommu.c143 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) in __vmalloc() argument
149 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); in __vmalloc()
Dmemory.c3936 .gfp_mask = __get_fault_gfp_mask(vma), in __handle_mm_fault()