• Home
  • Raw
  • Download

Lines Matching refs:gfp_mask

2892 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)  in should_fail_alloc_page()  argument
2896 if (gfp_mask & __GFP_NOFAIL) in should_fail_alloc_page()
2898 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in should_fail_alloc_page()
2901 (gfp_mask & __GFP_DIRECT_RECLAIM)) in should_fail_alloc_page()
2942 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3097 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
3115 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3160 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3180 gfp_mask, alloc_flags, ac->migratetype); in get_page_from_freelist()
3182 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
3212 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) in warn_alloc_show_mem() argument
3225 if (!(gfp_mask & __GFP_NOMEMALLOC)) in warn_alloc_show_mem()
3229 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) in warn_alloc_show_mem()
3235 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) in warn_alloc() argument
3242 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs)) in warn_alloc()
3253 pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask); in warn_alloc()
3262 warn_alloc_show_mem(gfp_mask, nodemask); in warn_alloc()
3266 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
3272 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3279 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3286 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
3293 .gfp_mask = gfp_mask, in __alloc_pages_may_oom()
3317 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & in __alloc_pages_may_oom()
3335 if (gfp_mask & __GFP_RETRY_MAYFAIL) in __alloc_pages_may_oom()
3353 if (gfp_mask & __GFP_THISNODE) in __alloc_pages_may_oom()
3357 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { in __alloc_pages_may_oom()
3364 if (gfp_mask & __GFP_NOFAIL) in __alloc_pages_may_oom()
3365 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
3382 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
3396 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
3411 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
3504 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
3544 static bool __need_fs_reclaim(gfp_t gfp_mask) in __need_fs_reclaim() argument
3546 gfp_mask = current_gfp_context(gfp_mask); in __need_fs_reclaim()
3549 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) in __need_fs_reclaim()
3557 if (!(gfp_mask & __GFP_FS)) in __need_fs_reclaim()
3560 if (gfp_mask & __GFP_NOLOCKDEP) in __need_fs_reclaim()
3566 void fs_reclaim_acquire(gfp_t gfp_mask) in fs_reclaim_acquire() argument
3568 if (__need_fs_reclaim(gfp_mask)) in fs_reclaim_acquire()
3573 void fs_reclaim_release(gfp_t gfp_mask) in fs_reclaim_release() argument
3575 if (__need_fs_reclaim(gfp_mask)) in fs_reclaim_release()
3583 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
3597 fs_reclaim_acquire(gfp_mask); in __perform_reclaim()
3601 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
3605 fs_reclaim_release(gfp_mask); in __perform_reclaim()
3616 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
3623 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
3628 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
3660 gfp_to_alloc_flags(gfp_t gfp_mask) in gfp_to_alloc_flags() argument
3673 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); in gfp_to_alloc_flags()
3675 if (gfp_mask & __GFP_ATOMIC) { in gfp_to_alloc_flags()
3680 if (!(gfp_mask & __GFP_NOMEMALLOC)) in gfp_to_alloc_flags()
3691 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) in gfp_to_alloc_flags()
3716 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) in __gfp_pfmemalloc_flags() argument
3718 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) in __gfp_pfmemalloc_flags()
3720 if (gfp_mask & __GFP_MEMALLOC) in __gfp_pfmemalloc_flags()
3734 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) in gfp_pfmemalloc_allowed() argument
3736 return !!__gfp_pfmemalloc_flags(gfp_mask); in gfp_pfmemalloc_allowed()
3750 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
3874 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
3877 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; in __alloc_pages_slowpath()
3893 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == in __alloc_pages_slowpath()
3895 gfp_mask &= ~__GFP_ATOMIC; in __alloc_pages_slowpath()
3908 alloc_flags = gfp_to_alloc_flags(gfp_mask); in __alloc_pages_slowpath()
3921 if (gfp_mask & __GFP_KSWAPD_RECLAIM) in __alloc_pages_slowpath()
3928 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
3944 && !gfp_pfmemalloc_allowed(gfp_mask)) { in __alloc_pages_slowpath()
3945 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
3956 if (costly_order && (gfp_mask & __GFP_NORETRY)) { in __alloc_pages_slowpath()
3979 if (gfp_mask & __GFP_KSWAPD_RECLAIM) in __alloc_pages_slowpath()
3982 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); in __alloc_pages_slowpath()
3997 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4010 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4016 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4022 if (gfp_mask & __GFP_NORETRY) in __alloc_pages_slowpath()
4029 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) in __alloc_pages_slowpath()
4032 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
4054 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
4061 (gfp_mask & __GFP_NOMEMALLOC))) in __alloc_pages_slowpath()
4079 if (gfp_mask & __GFP_NOFAIL) { in __alloc_pages_slowpath()
4108 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); in __alloc_pages_slowpath()
4116 warn_alloc(gfp_mask, ac->nodemask, in __alloc_pages_slowpath()
4122 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
4127 ac->high_zoneidx = gfp_zone(gfp_mask); in prepare_alloc_pages()
4128 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); in prepare_alloc_pages()
4130 ac->migratetype = gfpflags_to_migratetype(gfp_mask); in prepare_alloc_pages()
4140 fs_reclaim_acquire(gfp_mask); in prepare_alloc_pages()
4141 fs_reclaim_release(gfp_mask); in prepare_alloc_pages()
4143 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); in prepare_alloc_pages()
4145 if (should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
4155 static inline void finalise_ac(gfp_t gfp_mask, in finalise_ac() argument
4159 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); in finalise_ac()
4174 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, in __alloc_pages_nodemask() argument
4187 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); in __alloc_pages_nodemask()
4191 gfp_mask &= gfp_allowed_mask; in __alloc_pages_nodemask()
4192 alloc_mask = gfp_mask; in __alloc_pages_nodemask()
4193 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) in __alloc_pages_nodemask()
4196 finalise_ac(gfp_mask, order, &ac); in __alloc_pages_nodemask()
4209 alloc_mask = current_gfp_context(gfp_mask); in __alloc_pages_nodemask()
4222 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && in __alloc_pages_nodemask()
4223 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) { in __alloc_pages_nodemask()
4237 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
4245 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); in __get_free_pages()
4247 page = alloc_pages(gfp_mask, order); in __get_free_pages()
4254 unsigned long get_zeroed_page(gfp_t gfp_mask) in get_zeroed_page() argument
4256 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); in get_zeroed_page()
4294 gfp_t gfp_mask) in __page_frag_cache_refill() argument
4297 gfp_t gfp = gfp_mask; in __page_frag_cache_refill()
4300 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | in __page_frag_cache_refill()
4302 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, in __page_frag_cache_refill()
4330 unsigned int fragsz, gfp_t gfp_mask) in page_frag_alloc() argument
4338 page = __page_frag_cache_refill(nc, gfp_mask); in page_frag_alloc()
4424 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) in alloc_pages_exact() argument
4429 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
4444 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) in alloc_pages_exact_nid() argument
4447 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
7544 unsigned migratetype, gfp_t gfp_mask) in alloc_contig_range() argument
7556 .gfp_mask = current_gfp_context(gfp_mask), in alloc_contig_range()