Lines Matching refs:ac
2556 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, in unreserve_highatomic_pageblock() argument
2559 struct zonelist *zonelist = ac->zonelist; in unreserve_highatomic_pageblock()
2567 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, in unreserve_highatomic_pageblock()
2568 ac->nodemask) { in unreserve_highatomic_pageblock()
2614 set_pageblock_migratetype(page, ac->migratetype); in unreserve_highatomic_pageblock()
2615 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
3582 const struct alloc_context *ac) in get_page_from_freelist() argument
3595 z = ac->preferred_zoneref; in get_page_from_freelist()
3596 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in get_page_from_freelist()
3597 ac->nodemask) { in get_page_from_freelist()
3624 if (ac->spread_dirty_pages) { in get_page_from_freelist()
3635 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
3643 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
3652 ac_classzone_idx(ac), alloc_flags)) { in get_page_from_freelist()
3671 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
3685 ac_classzone_idx(ac), alloc_flags)) in get_page_from_freelist()
3693 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3694 gfp_mask, alloc_flags, ac->migratetype); in get_page_from_freelist()
3774 const struct alloc_context *ac) in __alloc_pages_cpuset_fallback() argument
3779 alloc_flags|ALLOC_CPUSET, ac); in __alloc_pages_cpuset_fallback()
3786 alloc_flags, ac); in __alloc_pages_cpuset_fallback()
3793 const struct alloc_context *ac, unsigned long *did_some_progress) in __alloc_pages_may_oom() argument
3796 .zonelist = ac->zonelist, in __alloc_pages_may_oom()
3797 .nodemask = ac->nodemask, in __alloc_pages_may_oom()
3825 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); in __alloc_pages_may_oom()
3844 if (ac->high_zoneidx < ZONE_NORMAL) in __alloc_pages_may_oom()
3872 ALLOC_NO_WATERMARKS, ac); in __alloc_pages_may_oom()
3889 unsigned int alloc_flags, const struct alloc_context *ac, in __alloc_pages_direct_compact() argument
3902 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
3920 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
3943 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, in should_compact_retry() argument
3973 ret = compaction_zonelist_suitable(ac, order, alloc_flags); in should_compact_retry()
4022 unsigned int alloc_flags, const struct alloc_context *ac, in __alloc_pages_direct_compact() argument
4030 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, in should_compact_retry() argument
4047 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in should_compact_retry()
4048 ac->nodemask) { in should_compact_retry()
4050 ac_classzone_idx(ac), alloc_flags)) in should_compact_retry()
4111 const struct alloc_context *ac) in __perform_reclaim() argument
4125 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
4126 ac->nodemask); in __perform_reclaim()
4140 unsigned int alloc_flags, const struct alloc_context *ac, in __alloc_pages_direct_reclaim() argument
4146 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
4151 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
4159 unreserve_highatomic_pageblock(ac, false); in __alloc_pages_direct_reclaim()
4169 const struct alloc_context *ac) in wake_all_kswapds() argument
4174 enum zone_type high_zoneidx = ac->high_zoneidx; in wake_all_kswapds()
4176 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, high_zoneidx, in wake_all_kswapds()
4177 ac->nodemask) { in wake_all_kswapds()
4279 struct alloc_context *ac, int alloc_flags, in should_reclaim_retry() argument
4302 return unreserve_highatomic_pageblock(ac, true); in should_reclaim_retry()
4311 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in should_reclaim_retry()
4312 ac->nodemask) { in should_reclaim_retry()
4326 ac_classzone_idx(ac), alloc_flags, available); in should_reclaim_retry()
4369 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) in check_retry_cpuset() argument
4382 if (cpusets_enabled() && ac->nodemask && in check_retry_cpuset()
4383 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { in check_retry_cpuset()
4384 ac->nodemask = NULL; in check_retry_cpuset()
4403 struct alloc_context *ac) in __alloc_pages_slowpath() argument
4444 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath()
4445 ac->high_zoneidx, ac->nodemask); in __alloc_pages_slowpath()
4446 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
4450 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4456 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4471 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) in __alloc_pages_slowpath()
4474 alloc_flags, ac, in __alloc_pages_slowpath()
4532 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4544 ac->nodemask = NULL; in __alloc_pages_slowpath()
4545 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath()
4546 ac->high_zoneidx, ac->nodemask); in __alloc_pages_slowpath()
4550 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4563 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4569 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4585 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
4596 should_compact_retry(ac, order, alloc_flags, in __alloc_pages_slowpath()
4603 if (check_retry_cpuset(cpuset_mems_cookie, ac)) in __alloc_pages_slowpath()
4607 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
4625 if (check_retry_cpuset(cpuset_mems_cookie, ac)) in __alloc_pages_slowpath()
4661 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); in __alloc_pages_slowpath()
4669 warn_alloc(gfp_mask, ac->nodemask, in __alloc_pages_slowpath()
4677 struct alloc_context *ac, gfp_t *alloc_mask, in prepare_alloc_pages() argument
4680 ac->high_zoneidx = gfp_zone(gfp_mask); in prepare_alloc_pages()
4681 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); in prepare_alloc_pages()
4682 ac->nodemask = nodemask; in prepare_alloc_pages()
4683 ac->migratetype = gfpflags_to_migratetype(gfp_mask); in prepare_alloc_pages()
4687 if (!ac->nodemask) in prepare_alloc_pages()
4688 ac->nodemask = &cpuset_current_mems_allowed; in prepare_alloc_pages()
4701 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE) in prepare_alloc_pages()
4708 static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac) in finalise_ac() argument
4711 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); in finalise_ac()
4718 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, in finalise_ac()
4719 ac->high_zoneidx, ac->nodemask); in finalise_ac()
4732 struct alloc_context ac = { }; in __alloc_pages_nodemask() local
4745 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) in __alloc_pages_nodemask()
4748 finalise_ac(gfp_mask, &ac); in __alloc_pages_nodemask()
4754 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); in __alloc_pages_nodemask()
4757 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); in __alloc_pages_nodemask()
4768 ac.spread_dirty_pages = false; in __alloc_pages_nodemask()
4774 if (unlikely(ac.nodemask != nodemask)) in __alloc_pages_nodemask()
4775 ac.nodemask = nodemask; in __alloc_pages_nodemask()
4777 page = __alloc_pages_slowpath(alloc_mask, order, &ac); in __alloc_pages_nodemask()
4786 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); in __alloc_pages_nodemask()