Home
last modified time | relevance | path

Searched refs:gfp_mask (Results 1 – 15 of 15) sorted by relevance

/mm/
Dmempool.c74 gfp_t gfp_mask, int node_id) in mempool_create_node() argument
77 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node()
81 gfp_mask, node_id); in mempool_create_node()
99 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_create_node()
126 int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) in mempool_resize() argument
148 new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask); in mempool_resize()
167 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_resize()
198 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument
205 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc()
206 might_sleep_if(gfp_mask & __GFP_WAIT); in mempool_alloc()
[all …]
Doom_kill.c199 gfp_t gfp_mask, nodemask_t *nodemask, in constrained_alloc() argument
204 enum zone_type high_zoneidx = gfp_zone(gfp_mask); in constrained_alloc()
218 if (gfp_mask & __GFP_THISNODE) in constrained_alloc()
236 if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) in constrained_alloc()
249 gfp_t gfp_mask, nodemask_t *nodemask, in constrained_alloc() argument
388 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, in dump_header() argument
394 current->comm, gfp_mask, order, in dump_header()
429 void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, in oom_kill_process() argument
453 dump_header(p, gfp_mask, order, memcg, nodemask); in oom_kill_process()
539 void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, in check_panic_on_oom() argument
[all …]
Dpage_alloc.c1683 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
1687 if (gfp_mask & __GFP_NOFAIL) in should_fail_alloc_page()
1689 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in should_fail_alloc_page()
1691 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) in should_fail_alloc_page()
1732 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
1975 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, in get_page_from_freelist() argument
1986 (gfp_mask & __GFP_WRITE); in get_page_from_freelist()
2006 !cpuset_zone_allowed_softwall(zone, gfp_mask)) in get_page_from_freelist()
2085 ret = zone_reclaim(zone, gfp_mask, order); in get_page_from_freelist()
2118 gfp_mask, migratetype); in get_page_from_freelist()
[all …]
Dvmscan.c67 gfp_t gfp_mask; member
197 sc.gfp_mask = -1; in debug_shrinker_show()
898 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()
899 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()
1009 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list()
1112 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list()
1192 .gfp_mask = GFP_KERNEL, in reclaim_clean_pages_from_list()
1432 if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS) in too_many_isolated()
2222 throttle_vm_writeout(sc->gfp_mask); in shrink_lruvec()
2256 if (sc->gfp_mask & __GFP_REPEAT) { in should_continue_reclaim()
[all …]
Dswap_state.c126 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) in add_to_swap_cache() argument
130 error = radix_tree_maybe_preload(gfp_mask); in add_to_swap_cache()
304 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument
325 new_page = alloc_page_vma(gfp_mask, vma, addr); in read_swap_cache_async()
333 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); in read_swap_cache_async()
457 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, in swapin_readahead() argument
481 gfp_mask, vma, addr); in swapin_readahead()
492 return read_swap_cache_async(entry, gfp_mask, vma, addr); in swapin_readahead()
Dvmalloc.c347 int node, gfp_t gfp_mask) in alloc_vmap_area() argument
360 gfp_mask & GFP_RECLAIM_MASK, node); in alloc_vmap_area()
368 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); in alloc_vmap_area()
795 static struct vmap_block *new_vmap_block(gfp_t gfp_mask) in new_vmap_block() argument
806 gfp_mask & GFP_RECLAIM_MASK, node); in new_vmap_block()
812 node, gfp_mask); in new_vmap_block()
818 err = radix_tree_preload(gfp_mask); in new_vmap_block()
905 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) in vb_alloc() argument
954 vb = new_vmap_block(gfp_mask); in vb_alloc()
1311 unsigned long end, int node, gfp_t gfp_mask, const void *caller) in __get_vm_area_node() argument
[all …]
Dcompaction.c1194 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); in compact_zone()
1286 gfp_t gfp_mask, enum migrate_mode mode, int *contended) in compact_zone_order() argument
1293 .gfp_mask = gfp_mask, in compact_zone_order()
1325 int order, gfp_t gfp_mask, nodemask_t *nodemask, in try_to_compact_pages() argument
1329 enum zone_type high_zoneidx = gfp_zone(gfp_mask); in try_to_compact_pages()
1330 int may_enter_fs = gfp_mask & __GFP_FS; in try_to_compact_pages()
1331 int may_perform_io = gfp_mask & __GFP_IO; in try_to_compact_pages()
1345 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) in try_to_compact_pages()
1357 status = compact_zone_order(zone, order, gfp_mask, mode, in try_to_compact_pages()
Dfilemap.c139 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; in page_cache_tree_delete()
463 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument
471 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); in replace_page_cache_page()
547 pgoff_t offset, gfp_t gfp_mask, in __add_to_page_cache_locked() argument
559 gfp_mask, &memcg); in __add_to_page_cache_locked()
564 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); in __add_to_page_cache_locked()
607 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_locked() argument
610 gfp_mask, NULL); in add_to_page_cache_locked()
615 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_lru() argument
622 gfp_mask, &shadow); in add_to_page_cache_lru()
[all …]
Dpage_isolation.c294 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; in alloc_migrate_target() local
310 gfp_mask |= __GFP_HIGHMEM; in alloc_migrate_target()
312 return alloc_page(gfp_mask); in alloc_migrate_target()
Dmemcontrol.c1723 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument
1742 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); in mem_cgroup_out_of_memory()
1790 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg, in mem_cgroup_out_of_memory()
1933 gfp_t gfp_mask, in mem_cgroup_soft_reclaim() argument
1974 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, in mem_cgroup_soft_reclaim()
2127 current->memcg_oom.gfp_mask = mask; in mem_cgroup_oom()
2178 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask, in mem_cgroup_oom_synchronize()
2503 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument
2554 if (!(gfp_mask & __GFP_WAIT)) in try_charge()
2558 gfp_mask, may_swap); in try_charge()
[all …]
Dinternal.h171 const gfp_t gfp_mask; /* gfp mask of a direct compactor */ member
Dpage-writeback.c1624 void throttle_vm_writeout(gfp_t gfp_mask) in throttle_vm_writeout() argument
1649 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) in throttle_vm_writeout()
Dnommu.c245 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) in __vmalloc() argument
251 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); in __vmalloc()
Dmemory-failure.c250 .gfp_mask = GFP_KERNEL, in shake_page()
Dswapfile.c2785 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) in add_swap_count_continuation() argument
2798 page = alloc_page(gfp_mask | __GFP_HIGHMEM); in add_swap_count_continuation()