Lines Matching full:zone
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
104 * shuffle the whole zone).
113 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
137 struct zone *zone; member
385 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
446 * prev_end_pfn static that contains the end of previous zone in defer_init()
592 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
600 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
601 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
602 sp = zone->spanned_pages; in page_outside_zone_boundaries()
603 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
605 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
608 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", in page_outside_zone_boundaries()
609 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
615 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
619 if (zone != page_zone(page)) in page_is_consistent()
625 * Temporary debugging check for pages not lying within a given zone.
627 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
629 if (page_outside_zone_boundaries(zone, page)) in bad_range()
631 if (!page_is_consistent(zone, page)) in bad_range()
637 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
767 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
780 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
785 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
795 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
798 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
800 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
816 * (d) a page and its buddy are in the same zone.
819 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
833 * zone check is done late to avoid uselessly calculating in page_is_buddy()
834 * zone/node ids for pages that could never merge. in page_is_buddy()
845 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
852 capc->cc->zone == zone ? capc : NULL; in task_capc()
881 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
895 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
898 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
905 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, in add_to_free_list_tail() argument
908 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
919 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
922 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
927 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
937 zone->free_area[order].nr_free--; in del_page_from_free_list()
996 struct zone *zone, unsigned int order, in __free_one_page() argument
999 struct capture_control *capc = task_capc(zone); in __free_one_page()
1008 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
1013 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
1016 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
1021 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1037 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
1039 del_page_from_free_list(buddy, zone, order); in __free_one_page()
1054 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
1081 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1083 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1342 * Assumes all pages on list are in same zone, and of same order.
1345 * If the zone was previously in an "all pages pinned" state then look to
1348 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1351 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1401 * under zone->lock. It is believed the overhead of in free_pcppages_bulk()
1412 spin_lock(&zone->lock); in free_pcppages_bulk()
1413 isolated_pageblocks = has_isolate_pageblock(zone); in free_pcppages_bulk()
1427 __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); in free_pcppages_bulk()
1430 spin_unlock(&zone->lock); in free_pcppages_bulk()
1433 static void free_one_page(struct zone *zone, in free_one_page() argument
1438 spin_lock(&zone->lock); in free_one_page()
1439 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
1443 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1444 spin_unlock(&zone->lock); in free_one_page()
1448 unsigned long zone, int nid) in __init_single_page() argument
1451 set_page_links(page, zone, nid, pfn); in __init_single_page()
1460 if (!is_highmem_idx(zone)) in __init_single_page()
1478 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
1480 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1622 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1631 * belong to a single zone. We assume that a border between node0 and node1
1638 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1653 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1665 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
1667 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1671 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
1675 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
1678 block_end_pfn, zone)) in set_zone_contiguous()
1684 zone->contiguous = true; in set_zone_contiguous()
1687 void clear_zone_contiguous(struct zone *zone) in clear_zone_contiguous() argument
1689 zone->contiguous = false; in clear_zone_contiguous()
1778 static unsigned long __init deferred_init_pages(struct zone *zone, in deferred_init_pages() argument
1783 int nid = zone_to_nid(zone); in deferred_init_pages()
1785 int zid = zone_idx(zone); in deferred_init_pages()
1804 * This function is meant to pre-load the iterator for the zone init.
1810 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, in deferred_init_mem_pfn_range_in_zone() argument
1817 * Start out by walking through the ranges in this zone that have in deferred_init_mem_pfn_range_in_zone()
1821 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { in deferred_init_mem_pfn_range_in_zone()
1844 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, in deferred_init_maxorder() argument
1853 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { in deferred_init_maxorder()
1860 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder()
1871 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { in deferred_init_maxorder()
1892 struct zone *zone = arg; in deferred_init_memmap_chunk() local
1895 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); in deferred_init_memmap_chunk()
1902 deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_init_memmap_chunk()
1922 struct zone *zone; in deferred_init_memmap() local
1944 * Once we unlock here, the zone cannot be grown anymore, thus if an in deferred_init_memmap()
1945 * interrupt thread must allocate this early in boot, zone must be in deferred_init_memmap()
1950 /* Only the highest zone is deferred so find it */ in deferred_init_memmap()
1952 zone = pgdat->node_zones + zid; in deferred_init_memmap()
1953 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
1957 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_init_memmap()
1958 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
1968 .fn_arg = zone, in deferred_init_memmap()
1977 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
1981 /* Sanity check that the next zone really is unpopulated */ in deferred_init_memmap()
1982 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
1992 * If this zone has deferred pages, try to grow it by initializing enough
1998 * Return true when zone was grown, otherwise return false. We return true even
2007 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2010 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2016 /* Only the last zone may have deferred pages */ in deferred_grow_zone()
2017 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
2023 * If someone grew this zone while we were waiting for spinlock, return in deferred_grow_zone()
2031 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_grow_zone()
2032 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_grow_zone()
2049 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_grow_zone()
2074 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
2076 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
2083 struct zone *zone; in page_alloc_init_late() local
2102 for_each_populated_zone(zone) in page_alloc_init_late()
2103 zone_pcp_update(zone); in page_alloc_init_late()
2121 for_each_populated_zone(zone) in page_alloc_init_late()
2122 set_zone_contiguous(zone); in page_alloc_init_late()
2170 static inline void expand(struct zone *zone, struct page *page, in expand() argument
2178 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
2186 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
2189 add_to_free_list(&page[size], zone, high, migratetype); in expand()
2317 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2326 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
2330 del_page_from_free_list(page, zone, current_order); in __rmqueue_smallest()
2331 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2357 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2360 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2363 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2372 static int move_freepages(struct zone *zone, in move_freepages() argument
2402 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2403 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in move_freepages()
2406 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2414 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2426 /* Do not cross zone boundaries */ in move_freepages_block()
2427 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
2429 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
2432 return move_freepages(zone, start_pfn, end_pfn, migratetype, in move_freepages_block()
2480 static inline bool boost_watermark(struct zone *zone) in boost_watermark() argument
2492 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) in boost_watermark()
2495 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
2511 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
2525 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2552 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) in steal_suitable_fallback()
2553 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in steal_suitable_fallback()
2559 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2583 /* moving whole block can fail due to zone boundary conditions */ in steal_suitable_fallback()
2598 move_to_free_list(page, zone, current_order, start_type); in steal_suitable_fallback()
2642 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2649 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. in reserve_highatomic_pageblock()
2652 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
2653 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2656 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2659 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2666 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2668 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2672 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2690 struct zone *zone; in unreserve_highatomic_pageblock() local
2695 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock()
2701 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2705 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2707 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2728 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2730 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2743 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2746 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2750 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2767 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
2792 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2818 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2834 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback()
2845 __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, in __rmqueue_with_cma_reuse() argument
2850 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue_with_cma_reuse()
2855 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue_with_cma_reuse()
2859 __rmqueue_fallback(zone, order, migratetype, alloc_flags)) in __rmqueue_with_cma_reuse()
2867 * Call me with the zone->lock already held.
2870 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2876 page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); in __rmqueue()
2883 * allocating from CMA when over half of the zone's free memory in __rmqueue()
2887 zone_page_state(zone, NR_FREE_CMA_PAGES) > in __rmqueue()
2888 zone_page_state(zone, NR_FREE_PAGES) / 2) { in __rmqueue()
2889 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2895 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2898 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2900 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2915 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2921 spin_lock(&zone->lock); in rmqueue_bulk()
2923 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
2944 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
2954 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2955 spin_unlock(&zone->lock); in rmqueue_bulk()
2968 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
2977 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
2983 * Drain pcplists of the indicated processor and zone.
2989 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
2996 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
3000 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
3013 struct zone *zone; in drain_pages() local
3015 for_each_populated_zone(zone) { in drain_pages()
3016 drain_pages_zone(cpu, zone); in drain_pages()
3023 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3024 * the single zone's pages.
3026 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
3030 if (zone) in drain_local_pages()
3031 drain_pages_zone(cpu, zone); in drain_local_pages()
3050 drain_local_pages(drain->zone); in drain_local_pages_wq()
3057 * When zone parameter is non-NULL, spill just the single zone's pages.
3061 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
3080 * a zone. Such callers are primarily CMA and memory hotplug and need in drain_all_pages()
3084 if (!zone) in drain_all_pages()
3097 struct zone *z; in drain_all_pages()
3100 if (zone) { in drain_all_pages()
3101 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
3123 drain->zone = zone; in drain_all_pages()
3140 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
3147 if (zone_is_empty(zone)) in mark_free_pages()
3150 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
3152 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
3153 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
3162 if (page_zone(page) != zone) in mark_free_pages()
3171 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
3184 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
3202 struct zone *zone = page_zone(page); in free_unref_page_commit() local
3218 free_one_page(zone, page, pfn, 0, migratetype, in free_unref_page_commit()
3225 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_unref_page_commit()
3230 free_pcppages_bulk(zone, batch, pcp); in free_unref_page_commit()
3313 struct zone *zone; in __isolate_free_page() local
3318 zone = page_zone(page); in __isolate_free_page()
3328 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3329 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
3332 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3337 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3369 struct zone *zone = page_zone(page); in __putback_isolated_page() local
3371 /* zone lock should be held when this function is called */ in __putback_isolated_page()
3372 lockdep_assert_held(&zone->lock); in __putback_isolated_page()
3375 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3384 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) in zone_statistics()
3407 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, in __rmqueue_pcplist() argument
3416 pcp->count += rmqueue_bulk(zone, 0, in __rmqueue_pcplist()
3432 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3433 struct zone *zone, gfp_t gfp_flags, in rmqueue_pcplist() argument
3442 pcp = &this_cpu_ptr(zone->pageset)->pcp; in rmqueue_pcplist()
3444 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3447 zone_statistics(preferred_zone, zone); in rmqueue_pcplist()
3454 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3457 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3458 struct zone *zone, unsigned int order, in rmqueue() argument
3473 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, in rmqueue()
3484 spin_lock_irqsave(&zone->lock, flags); in rmqueue()
3495 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3500 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue()
3502 spin_unlock(&zone->lock); in rmqueue()
3505 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3509 zone_statistics(preferred_zone, zone); in rmqueue()
3514 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { in rmqueue()
3515 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
3516 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); in rmqueue()
3519 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3601 static inline long __zone_watermark_unusable_free(struct zone *z, in __zone_watermark_unusable_free()
3627 * one free page of a suitable size. Checking now avoids taking the zone lock
3630 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
3694 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
3701 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
3745 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
3758 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3760 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
3764 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3771 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3772 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3773 * premature use of a lower zone may cause lowmem pressure problems that
3774 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3779 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3790 if (!zone) in alloc_flags_nofragment()
3793 if (zone_idx(zone) != ZONE_NORMAL) in alloc_flags_nofragment()
3798 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume in alloc_flags_nofragment()
3802 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
3833 struct zone *zone; in get_page_from_freelist() local
3839 * Scan zonelist, looking for a zone with enough free. in get_page_from_freelist()
3844 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, in get_page_from_freelist()
3851 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3873 if (last_pgdat_dirty_limit == zone->zone_pgdat) in get_page_from_freelist()
3876 if (!node_dirty_ok(zone->zone_pgdat)) { in get_page_from_freelist()
3877 last_pgdat_dirty_limit = zone->zone_pgdat; in get_page_from_freelist()
3883 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
3891 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
3892 if (zone_to_nid(zone) != local_nid) { in get_page_from_freelist()
3898 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in get_page_from_freelist()
3899 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3906 * Watermark failed for this zone, but see if we can in get_page_from_freelist()
3907 * grow this zone if it contains deferred pages. in get_page_from_freelist()
3910 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3920 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
3923 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3933 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3942 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3952 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
3957 /* Try again if zone has deferred pages */ in get_page_from_freelist()
3959 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4158 * At least in one zone compaction wasn't deferred or skipped, so let's in __alloc_pages_direct_compact()
4172 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
4174 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
4175 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4210 * compaction considers all the zone as desperately out of memory in should_compact_retry()
4284 struct zone *zone; in should_compact_retry() local
4296 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry()
4298 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
4450 struct zone *zone; in wake_all_kswapds() local
4454 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds()
4456 if (last_pgdat != zone->zone_pgdat) in wake_all_kswapds()
4457 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4458 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
4561 struct zone *zone; in should_reclaim_retry() local
4590 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry()
4594 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
4597 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
4598 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
4604 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4618 write_pending = zone_page_state_snapshot(zone, in should_reclaim_retry()
4730 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
4996 /* Dirty zone balancing only done in the fast path */ in prepare_alloc_pages()
5000 * The preferred zone is used for statistics but crucially it is in prepare_alloc_pages()
5040 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); in __alloc_pages_nodemask()
5375 * @offset: The zone index of the highest zone
5378 * high watermark within all zones at or below a given zone index. For each
5379 * zone, the number of pages is calculated as:
5388 struct zone *zone; in nr_free_zone_pages() local
5395 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
5396 unsigned long size = zone_managed_pages(zone); in nr_free_zone_pages()
5397 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
5420 static inline void show_node(struct zone *zone) in show_node() argument
5423 printk("Node %d ", zone_to_nid(zone)); in show_node()
5433 struct zone *zone; in si_mem_available() local
5439 for_each_zone(zone) in si_mem_available()
5440 wmark_low += low_wmark_pages(zone); in si_mem_available()
5501 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
5503 if (is_highmem(zone)) { in si_meminfo_node()
5504 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()
5505 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
5580 struct zone *zone; in show_free_areas() local
5583 for_each_populated_zone(zone) { in show_free_areas()
5584 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5588 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5671 for_each_populated_zone(zone) { in show_free_areas()
5674 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5679 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5681 show_node(zone); in show_free_areas()
5704 zone->name, in show_free_areas()
5705 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
5706 K(min_wmark_pages(zone)), in show_free_areas()
5707 K(low_wmark_pages(zone)), in show_free_areas()
5708 K(high_wmark_pages(zone)), in show_free_areas()
5709 K(zone->nr_reserved_highatomic), in show_free_areas()
5710 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in show_free_areas()
5711 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in show_free_areas()
5712 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in show_free_areas()
5713 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in show_free_areas()
5714 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in show_free_areas()
5715 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in show_free_areas()
5716 K(zone->present_pages), in show_free_areas()
5717 K(zone_managed_pages(zone)), in show_free_areas()
5718 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
5719 K(zone_page_state(zone, NR_PAGETABLE)), in show_free_areas()
5720 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
5722 K(this_cpu_read(zone->pageset->pcp.count)), in show_free_areas()
5723 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in show_free_areas()
5726 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
5730 for_each_populated_zone(zone) { in show_free_areas()
5735 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5737 show_node(zone); in show_free_areas()
5738 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
5740 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
5742 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5754 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
5771 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
5773 zoneref->zone = zone; in zoneref_set_zone()
5774 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
5778 * Builds allocation fallback zone lists.
5784 struct zone *zone; in build_zonerefs_node() local
5790 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5791 if (populated_zone(zone)) { in build_zonerefs_node()
5792 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
5896 * This results in maximum locality--normal zone overflows into local
5897 * DMA zone, if any--but risks exhausting DMA zone.
5915 zonerefs->zone = NULL; in build_zonelists_in_node_order()
5930 zonerefs->zone = NULL; in build_thisnode_zonelists()
5935 * Build zonelists ordered by zone and nodes within zones.
5936 * This results in conserving DMA zone[s] until all Normal memory is
5938 * may still exist in local DMA zone.
5976 * I.e., first node id of first zone in arg node's generic zonelist.
5987 return zone_to_nid(z->zone); in local_memory_node()
6028 zonerefs->zone = NULL; in build_zonelists()
6047 * Other parts of the kernel may not check if the zone is available.
6081 * i.e., the node of the first zone in the generic zonelist. in __build_all_zonelists()
6105 * each zone will be allocated later when the per cpu in build_all_zonelists_init()
6143 * more accurate, but expensive to check per-zone. This check is in build_all_zonelists()
6157 pr_info("Policy zone: %s\n", zone_names[policy_zone]); in build_all_zonelists()
6161 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6163 overlap_memmap_init(unsigned long zone, unsigned long *pfn) in overlap_memmap_init() argument
6167 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in overlap_memmap_init()
6190 * zone stats (e.g., nr_isolate_pageblock) are touched.
6192 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, in memmap_init_zone() argument
6211 if (zone == ZONE_DEVICE) { in memmap_init_zone()
6227 if (overlap_memmap_init(zone, &pfn)) in memmap_init_zone()
6234 __init_single_page(page, pfn, zone, nid); in memmap_init_zone()
6252 void __ref memmap_init_zone_device(struct zone *zone, in memmap_init_zone_device() argument
6258 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
6260 unsigned long zone_idx = zone_idx(zone); in memmap_init_zone_device()
6264 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) in memmap_init_zone_device()
6284 * phase for it to be fully associated with a zone. in memmap_init_zone_device()
6320 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
6324 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6325 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6345 * - zone and node links point to zone and node that span the page if the
6346 * hole is in the middle of a zone
6347 * - zone and node links point to adjacent zone/node if the hole falls on
6348 * the zone boundary; the pages in such holes will be prepended to the
6349 * zone/node above the hole except for the trailing pages in the last
6350 * section that will be appended to the zone/node below.
6354 int zone, int node) in init_unavailable_range() argument
6365 __init_single_page(pfn_to_page(pfn), pfn, zone, node); in init_unavailable_range()
6371 pr_info("On node %d, zone %s: %lld pages in unavailable ranges", in init_unavailable_range()
6372 node, zone_names[zone], pgcnt); in init_unavailable_range()
6377 int zone, int node) in init_unavailable_range() argument
6382 static void __init memmap_init_zone_range(struct zone *zone, in memmap_init_zone_range() argument
6387 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
6388 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
6389 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); in memmap_init_zone_range()
6416 struct zone *zone = node->node_zones + j; in memmap_init() local
6418 if (!populated_zone(zone)) in memmap_init()
6421 memmap_init_zone_range(zone, start_pfn, end_pfn, in memmap_init()
6431 * Append the pages in this hole to the highest zone in the last in memmap_init()
6445 unsigned long zone, in arch_memmap_init() argument
6450 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
6457 * size of the zone. in zone_batchsize()
6459 batch = zone_managed_pages(zone) / 1024; in zone_batchsize()
6564 static void pageset_set_high_and_batch(struct zone *zone, in pageset_set_high_and_batch() argument
6569 (zone_managed_pages(zone) / in pageset_set_high_and_batch()
6572 pageset_set_batch(pcp, zone_batchsize(zone)); in pageset_set_high_and_batch()
6575 static void __meminit zone_pageset_init(struct zone *zone, int cpu) in zone_pageset_init() argument
6577 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
6580 pageset_set_high_and_batch(zone, pcp); in zone_pageset_init()
6583 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
6586 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
6588 zone_pageset_init(zone, cpu); in setup_zone_pageset()
6598 struct zone *zone; in setup_per_cpu_pageset() local
6601 for_each_populated_zone(zone) in setup_per_cpu_pageset()
6602 setup_zone_pageset(zone); in setup_per_cpu_pageset()
6623 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
6630 zone->pageset = &boot_pageset; in zone_pcp_init()
6632 if (populated_zone(zone)) in zone_pcp_init()
6633 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", in zone_pcp_init()
6634 zone->name, zone->present_pages, in zone_pcp_init()
6635 zone_batchsize(zone)); in zone_pcp_init()
6638 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
6642 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
6643 int zone_idx = zone_idx(zone) + 1; in init_currently_empty_zone()
6648 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
6651 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
6653 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
6656 zone_init_free_lists(zone); in init_currently_empty_zone()
6657 zone->initialized = 1; in init_currently_empty_zone()
6690 * This finds a zone that can be used for ZONE_MOVABLE pages. The
6692 * increasing memory addresses so that the "highest" populated zone is used
6711 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
6715 * is distributed. This helper function adjusts the zone ranges
6717 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6748 * Return the number of pages a zone spans in a node, including holes
6764 /* Get the start and end of the zone */ in zone_spanned_pages_in_node()
6771 /* Check that this node has pages within the zone's required range */ in zone_spanned_pages_in_node()
6775 /* Move the zone boundaries inside the node if necessary */ in zone_spanned_pages_in_node()
6816 /* Return the number of page frames in holes in a zone on a node */
6875 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
6893 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
6895 zone->zone_start_pfn = 0; in calculate_node_totalpages()
6896 zone->spanned_pages = size; in calculate_node_totalpages()
6897 zone->present_pages = real_size; in calculate_node_totalpages()
6911 * Calculate the size of the zone->blockflags rounded to an unsigned long
6931 struct zone *zone, in setup_usemap() argument
6936 zone->pageblock_flags = NULL; in setup_usemap()
6938 zone->pageblock_flags = in setup_usemap()
6941 if (!zone->pageblock_flags) in setup_usemap()
6942 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", in setup_usemap()
6943 usemapsize, zone->name, pgdat->node_id); in setup_usemap()
6947 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, in setup_usemap() argument
6995 * the zone and SPARSEMEM is in use. If there are holes within the in calc_memmap_size()
6996 * zone, each populated memory region may cost us one or two extra in calc_memmap_size()
7051 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
7054 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
7055 zone_set_nid(zone, nid); in zone_init_internals()
7056 zone->name = zone_names[idx]; in zone_init_internals()
7057 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
7058 spin_lock_init(&zone->lock); in zone_init_internals()
7059 zone_seqlock_init(zone); in zone_init_internals()
7060 zone_pcp_init(zone); in zone_init_internals()
7064 * Set up the zone data structures
7083 * Set up the zone data structures:
7100 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
7102 unsigned long zone_start_pfn = zone->zone_start_pfn; in free_area_init_core()
7104 size = zone->spanned_pages; in free_area_init_core()
7105 freesize = zone->present_pages; in free_area_init_core()
7109 * is used by this zone for memmap. This affects the watermark in free_area_init_core()
7118 " %s zone: %lu pages used for memmap\n", in free_area_init_core()
7121 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n", in free_area_init_core()
7128 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", in free_area_init_core()
7144 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
7150 setup_usemap(pgdat, zone, zone_start_pfn, size); in free_area_init_core()
7151 init_currently_empty_zone(zone, zone_start_pfn, size); in free_area_init_core()
7174 * The zone's endpoints aren't required to be MAX_ORDER in alloc_node_mem_map()
7322 * Sum pages in active regions for movable zone.
7342 * Find the PFN the Movable zone begins in each node. Kernel memory
7567 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
7568 if (populated_zone(zone)) { in check_for_memory()
7588 * free_area_init - Initialise all pg_data_t and zone data
7589 * @max_zone_pfn: an array of max PFNs for each zone
7593 * zone in each node and their holes is calculated. If the maximum PFN
7594 * between two adjacent zones match, it is assumed that the zone is empty.
7596 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7603 int i, nid, zone; in free_area_init() local
7606 /* Record where the zone boundaries are */ in free_area_init()
7617 zone = MAX_NR_ZONES - i - 1; in free_area_init()
7619 zone = i; in free_area_init()
7621 if (zone == ZONE_MOVABLE) in free_area_init()
7624 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init()
7625 arch_zone_lowest_possible_pfn[zone] = start_pfn; in free_area_init()
7626 arch_zone_highest_possible_pfn[zone] = end_pfn; in free_area_init()
7635 /* Print out the zone ranges */ in free_area_init()
7636 pr_info("Zone ranges:\n"); in free_area_init()
7653 pr_info("Movable zone start for each node\n"); in free_area_init()
7852 * set_dma_reserve - set the specified number of pages reserved in the first zone
7855 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7856 * In the DMA zone, a significant percentage may be consumed by kernel image
7859 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7935 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
7937 unsigned long managed_pages = zone_managed_pages(zone); in calculate_totalreserve_pages()
7939 /* Find valid and maximum lowmem_reserve in the zone */ in calculate_totalreserve_pages()
7941 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
7942 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
7946 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
7961 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
7963 * pages are left in the zone after a successful __alloc_pages().
7972 struct zone *zone = &pgdat->node_zones[i]; in setup_per_zone_lowmem_reserve() local
7974 bool clear = !ratio || !zone_managed_pages(zone); in setup_per_zone_lowmem_reserve()
7978 struct zone *upper_zone = &pgdat->node_zones[j]; in setup_per_zone_lowmem_reserve()
7983 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
7985 zone->lowmem_reserve[j] = managed_pages / ratio; in setup_per_zone_lowmem_reserve()
7998 struct zone *zone; in __setup_per_zone_wmarks() local
8002 for_each_zone(zone) { in __setup_per_zone_wmarks()
8003 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
8004 lowmem_pages += zone_managed_pages(zone); in __setup_per_zone_wmarks()
8007 for_each_zone(zone) { in __setup_per_zone_wmarks()
8010 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
8011 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks()
8013 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
8025 min_pages = zone_managed_pages(zone) / 1024; in __setup_per_zone_wmarks()
8027 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
8030 * If it's a lowmem zone, reserve a number of pages in __setup_per_zone_wmarks()
8031 * proportionate to the zone's size. in __setup_per_zone_wmarks()
8033 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
8042 mult_frac(zone_managed_pages(zone), in __setup_per_zone_wmarks()
8045 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
8046 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
8047 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; in __setup_per_zone_wmarks()
8049 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
8060 * Ensures that the watermark[min,low,high] values for each zone are set
8169 struct zone *zone; in setup_min_unmapped_ratio() local
8174 for_each_zone(zone) in setup_min_unmapped_ratio()
8175 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
8197 struct zone *zone; in setup_min_slab_ratio() local
8202 for_each_zone(zone) in setup_min_slab_ratio()
8203 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
8229 * if in function of the boot time zone sizes.
8247 static void __zone_pcp_update(struct zone *zone) in __zone_pcp_update() argument
8252 pageset_set_high_and_batch(zone, in __zone_pcp_update()
8253 per_cpu_ptr(zone->pageset, cpu)); in __zone_pcp_update()
8257 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8258 * cpu. It is the fraction of total pages in each zone that a hot per cpu
8264 struct zone *zone; in percpu_pagelist_fraction_sysctl_handler() local
8287 for_each_populated_zone(zone) in percpu_pagelist_fraction_sysctl_handler()
8288 __zone_pcp_update(zone); in percpu_pagelist_fraction_sysctl_handler()
8447 struct page *has_unmovable_pages(struct zone *zone, struct page *page, in has_unmovable_pages() argument
8482 * If the zone is movable and we have ruled out all reserved in has_unmovable_pages()
8486 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
8569 /* [start, end) must belong to a single zone. */
8579 .nid = zone_to_nid(cc->zone), in __alloc_contig_migrate_range()
8604 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
8629 * aligned. The PFN range must belong to a single zone.
8649 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
8715 * We don't have to hold zone->lock here because the pages are in alloc_contig_range()
8781 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, in pfn_range_valid_contig()
8807 static bool zone_spans_last_pfn(const struct zone *zone, in zone_spans_last_pfn() argument
8812 return zone_spans_pfn(zone, last_pfn); in zone_spans_last_pfn()
8841 struct zone *zone; in alloc_contig_pages() local
8845 for_each_zone_zonelist_nodemask(zone, z, zonelist, in alloc_contig_pages()
8847 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8849 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages()
8850 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { in alloc_contig_pages()
8851 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { in alloc_contig_pages()
8853 * We release the zone lock here because in alloc_contig_pages()
8854 * alloc_contig_range() will also lock the zone in alloc_contig_pages()
8859 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8864 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8868 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8889 * The zone indicated has a new number of managed_pages; batch sizes and percpu
8892 void __meminit zone_pcp_update(struct zone *zone) in zone_pcp_update() argument
8895 __zone_pcp_update(zone); in zone_pcp_update()
8899 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
8907 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
8909 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
8910 drain_zonestat(zone, pset); in zone_pcp_reset()
8912 free_percpu(zone->pageset); in zone_pcp_reset()
8913 zone->pageset = &boot_pageset; in zone_pcp_reset()
8920 * All pages in the range must be in a single zone, must not contain holes,
8927 struct zone *zone; in __offline_isolated_pages() local
8932 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
8933 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
8958 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
8961 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
8967 struct zone *zone = page_zone(page); in is_free_buddy_page() local
8972 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
8979 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()
8989 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
9008 if (set_page_guard(zone, current_buddy, high, migratetype)) in break_down_buddy_pages()
9012 add_to_free_list(current_buddy, zone, high, migratetype); in break_down_buddy_pages()
9024 struct zone *zone = page_zone(page); in take_page_off_buddy() local
9030 spin_lock_irqsave(&zone->lock, flags); in take_page_off_buddy()
9040 del_page_from_free_list(page_head, zone, page_order); in take_page_off_buddy()
9041 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()
9044 __mod_zone_freepage_state(zone, -1, migratetype); in take_page_off_buddy()
9051 spin_unlock_irqrestore(&zone->lock, flags); in take_page_off_buddy()
9062 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; in has_managed_dma() local
9064 if (managed_zone(zone)) in has_managed_dma()