Home
last modified time | relevance | path

Searched refs:gfp_mask (Results 1 – 13 of 13) sorted by relevance

/mm/
Dmempool.c114 int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) in mempool_resize() argument
136 new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask); in mempool_resize()
155 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_resize()
202 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument
209 might_sleep_if(gfp_mask & __GFP_WAIT); in mempool_alloc()
211 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc()
212 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ in mempool_alloc()
213 gfp_mask |= __GFP_NOWARN; /* failures are OK */ in mempool_alloc()
215 gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); in mempool_alloc()
232 if (!(gfp_mask & __GFP_WAIT)) in mempool_alloc()
[all …]
Doom_kill.c175 gfp_t gfp_mask) in constrained_alloc() argument
180 enum zone_type high_zoneidx = gfp_zone(gfp_mask); in constrained_alloc()
184 if (cpuset_zone_allowed_softwall(zone, gfp_mask)) in constrained_alloc()
385 static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, in oom_kill_process() argument
394 current->comm, gfp_mask, order, current->oomkilladj); in oom_kill_process()
427 void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) in mem_cgroup_out_of_memory() argument
441 if (oom_kill_process(p, gfp_mask, 0, points, mem, in mem_cgroup_out_of_memory()
468 int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask) in try_set_zone_oom() argument
475 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { in try_set_zone_oom()
482 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { in try_set_zone_oom()
[all …]
Dswap_state.c72 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) in add_to_swap_cache() argument
80 error = radix_tree_preload(gfp_mask); in add_to_swap_cache()
270 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument
290 new_page = alloc_page_vma(gfp_mask, vma, addr); in read_swap_cache_async()
311 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); in read_swap_cache_async()
349 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, in swapin_readahead() argument
368 gfp_mask, vma, addr); in swapin_readahead()
374 return read_swap_cache_async(entry, gfp_mask, vma, addr); in swapin_readahead()
Dvmscan.c59 gfp_t gfp_mask; member
195 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, in shrink_slab() argument
210 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask); in shrink_slab()
238 nr_before = (*shrinker->shrink)(0, gfp_mask); in shrink_slab()
239 shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask); in shrink_slab()
616 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()
617 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()
645 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list()
724 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list()
1521 throttle_vm_writeout(sc->gfp_mask); in shrink_zone()
[all …]
Dvmalloc.c319 int node, gfp_t gfp_mask) in alloc_vmap_area() argument
330 gfp_mask & GFP_RECLAIM_MASK, node); in alloc_vmap_area()
697 static struct vmap_block *new_vmap_block(gfp_t gfp_mask) in new_vmap_block() argument
708 gfp_mask & GFP_RECLAIM_MASK, node); in new_vmap_block()
714 node, gfp_mask); in new_vmap_block()
720 err = radix_tree_preload(gfp_mask); in new_vmap_block()
782 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) in vb_alloc() argument
822 vb = new_vmap_block(gfp_mask); in vb_alloc()
1051 int node, gfp_t gfp_mask, void *caller) in __get_vm_area_node() argument
1074 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
[all …]
Dpage_alloc.c1171 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
1175 if (gfp_mask & __GFP_NOFAIL) in should_fail_alloc_page()
1177 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in should_fail_alloc_page()
1179 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) in should_fail_alloc_page()
1229 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
1393 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, in get_page_from_freelist() argument
1422 !cpuset_zone_allowed_softwall(zone, gfp_mask)) in get_page_from_freelist()
1436 !zone_reclaim(zone, gfp_mask, order)) in get_page_from_freelist()
1441 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask); in get_page_from_freelist()
1468 __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, in __alloc_pages_internal() argument
[all …]
Dmemcontrol.c765 gfp_t gfp_mask, bool noswap) in mem_cgroup_hierarchical_reclaim() argument
777 ret += try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap, in mem_cgroup_hierarchical_reclaim()
791 ret += try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap, in mem_cgroup_hierarchical_reclaim()
821 gfp_t gfp_mask, struct mem_cgroup **memcg, in __mem_cgroup_try_charge() argument
874 if (!(gfp_mask & __GFP_WAIT)) in __mem_cgroup_try_charge()
877 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask, in __mem_cgroup_try_charge()
896 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask); in __mem_cgroup_try_charge()
1018 gfp_t gfp_mask) in mem_cgroup_move_parent() argument
1034 ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false); in mem_cgroup_move_parent()
1077 gfp_t gfp_mask, enum charge_type ctype, in mem_cgroup_charge_common() argument
[all …]
Dfilemap.c456 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_locked() argument
463 gfp_mask & GFP_RECLAIM_MASK); in add_to_page_cache_locked()
467 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); in add_to_page_cache_locked()
494 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_lru() argument
507 ret = add_to_page_cache(page, mapping, offset, gfp_mask); in add_to_page_cache_lru()
734 pgoff_t index, gfp_t gfp_mask) in find_or_create_page() argument
741 page = __page_cache_alloc(gfp_mask); in find_or_create_page()
751 (gfp_mask & GFP_RECLAIM_MASK)); in find_or_create_page()
2471 int try_to_release_page(struct page *page, gfp_t gfp_mask) in try_to_release_page() argument
2480 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()
Dbounce.c71 static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) in mempool_alloc_pages_isa() argument
73 return mempool_alloc_pages(gfp_mask | GFP_DMA, data); in mempool_alloc_pages_isa()
Dashmem.c279 static int ashmem_shrink(int nr_to_scan, gfp_t gfp_mask) in ashmem_shrink() argument
284 if (nr_to_scan && !(gfp_mask & __GFP_FS)) in ashmem_shrink()
Dpage-writeback.c651 void throttle_vm_writeout(gfp_t gfp_mask) in throttle_vm_writeout() argument
675 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) in throttle_vm_writeout()
Dnommu.c249 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) in __vmalloc() argument
255 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); in __vmalloc()
Dshmem.c109 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) in shmem_dir_alloc() argument
118 return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, in shmem_dir_alloc()