• Home
  • Raw
  • Download

Lines Matching full:zone

13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
104 * shuffle the whole zone).
113 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
137 struct zone *zone; member
385 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
446 * prev_end_pfn static that contains the end of previous zone in defer_init()
592 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
600 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
601 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
602 sp = zone->spanned_pages; in page_outside_zone_boundaries()
603 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
605 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
608 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", in page_outside_zone_boundaries()
609 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
615 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
619 if (zone != page_zone(page)) in page_is_consistent()
625 * Temporary debugging check for pages not lying within a given zone.
627 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
629 if (page_outside_zone_boundaries(zone, page)) in bad_range()
631 if (!page_is_consistent(zone, page)) in bad_range()
637 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
767 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
780 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
785 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
795 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
798 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
800 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
816 * (d) a page and its buddy are in the same zone.
819 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
833 * zone check is done late to avoid uselessly calculating in page_is_buddy()
834 * zone/node ids for pages that could never merge. in page_is_buddy()
845 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
852 capc->cc->zone == zone ? capc : NULL; in task_capc()
881 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
895 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
898 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
905 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, in add_to_free_list_tail() argument
908 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
919 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
922 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
927 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
937 zone->free_area[order].nr_free--; in del_page_from_free_list()
996 struct zone *zone, unsigned int order, in __free_one_page() argument
999 struct capture_control *capc = task_capc(zone); in __free_one_page()
1008 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
1013 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
1016 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
1021 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1037 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
1039 del_page_from_free_list(buddy, zone, order); in __free_one_page()
1054 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
1081 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1083 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1342 * Assumes all pages on list are in same zone, and of same order.
1345 * If the zone was previously in an "all pages pinned" state then look to
1348 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1351 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1401 * under zone->lock. It is believed the overhead of in free_pcppages_bulk()
1412 spin_lock(&zone->lock); in free_pcppages_bulk()
1413 isolated_pageblocks = has_isolate_pageblock(zone); in free_pcppages_bulk()
1427 __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); in free_pcppages_bulk()
1430 spin_unlock(&zone->lock); in free_pcppages_bulk()
1433 static void free_one_page(struct zone *zone, in free_one_page() argument
1438 spin_lock(&zone->lock); in free_one_page()
1439 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
1443 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1444 spin_unlock(&zone->lock); in free_one_page()
1448 unsigned long zone, int nid) in __init_single_page() argument
1451 set_page_links(page, zone, nid, pfn); in __init_single_page()
1460 if (!is_highmem_idx(zone)) in __init_single_page()
1478 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
1480 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1622 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1631 * belong to a single zone. We assume that a border between node0 and node1
1638 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1653 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1665 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
1667 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1671 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
1675 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
1678 block_end_pfn, zone)) in set_zone_contiguous()
1684 zone->contiguous = true; in set_zone_contiguous()
1687 void clear_zone_contiguous(struct zone *zone) in clear_zone_contiguous() argument
1689 zone->contiguous = false; in clear_zone_contiguous()
1778 static unsigned long __init deferred_init_pages(struct zone *zone, in deferred_init_pages() argument
1783 int nid = zone_to_nid(zone); in deferred_init_pages()
1785 int zid = zone_idx(zone); in deferred_init_pages()
1804 * This function is meant to pre-load the iterator for the zone init.
1810 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, in deferred_init_mem_pfn_range_in_zone() argument
1817 * Start out by walking through the ranges in this zone that have in deferred_init_mem_pfn_range_in_zone()
1821 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { in deferred_init_mem_pfn_range_in_zone()
1844 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, in deferred_init_maxorder() argument
1853 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { in deferred_init_maxorder()
1860 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder()
1871 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { in deferred_init_maxorder()
1892 struct zone *zone = arg; in deferred_init_memmap_chunk() local
1895 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); in deferred_init_memmap_chunk()
1902 deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_init_memmap_chunk()
1922 struct zone *zone; in deferred_init_memmap() local
1944 * Once we unlock here, the zone cannot be grown anymore, thus if an in deferred_init_memmap()
1945 * interrupt thread must allocate this early in boot, zone must be in deferred_init_memmap()
1950 /* Only the highest zone is deferred so find it */ in deferred_init_memmap()
1952 zone = pgdat->node_zones + zid; in deferred_init_memmap()
1953 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
1957 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_init_memmap()
1958 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
1968 .fn_arg = zone, in deferred_init_memmap()
1977 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
1981 /* Sanity check that the next zone really is unpopulated */ in deferred_init_memmap()
1982 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
1992 * If this zone has deferred pages, try to grow it by initializing enough
1998 * Return true when zone was grown, otherwise return false. We return true even
2007 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2010 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2016 /* Only the last zone may have deferred pages */ in deferred_grow_zone()
2017 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
2023 * If someone grew this zone while we were waiting for spinlock, return in deferred_grow_zone()
2031 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_grow_zone()
2032 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_grow_zone()
2049 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_grow_zone()
2074 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
2076 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
2083 struct zone *zone; in page_alloc_init_late() local
2102 for_each_populated_zone(zone) in page_alloc_init_late()
2103 zone_pcp_update(zone); in page_alloc_init_late()
2121 for_each_populated_zone(zone) in page_alloc_init_late()
2122 set_zone_contiguous(zone); in page_alloc_init_late()
2170 static inline void expand(struct zone *zone, struct page *page, in expand() argument
2178 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
2186 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
2189 add_to_free_list(&page[size], zone, high, migratetype); in expand()
2317 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2326 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
2330 del_page_from_free_list(page, zone, current_order); in __rmqueue_smallest()
2331 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2357 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2360 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2363 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2372 static int move_freepages(struct zone *zone, in move_freepages() argument
2402 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2403 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in move_freepages()
2406 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2414 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2426 /* Do not cross zone boundaries */ in move_freepages_block()
2427 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
2429 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
2432 return move_freepages(zone, start_pfn, end_pfn, migratetype, in move_freepages_block()
2480 static inline bool boost_watermark(struct zone *zone) in boost_watermark() argument
2492 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) in boost_watermark()
2495 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
2511 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
2525 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2552 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) in steal_suitable_fallback()
2553 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in steal_suitable_fallback()
2559 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2583 /* moving whole block can fail due to zone boundary conditions */ in steal_suitable_fallback()
2598 move_to_free_list(page, zone, current_order, start_type); in steal_suitable_fallback()
2642 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2649 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. in reserve_highatomic_pageblock()
2652 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
2653 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2656 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2659 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2666 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2668 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2672 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2690 struct zone *zone; in unreserve_highatomic_pageblock() local
2695 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock()
2701 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2705 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2707 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2728 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2730 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2743 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2746 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2750 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2767 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
2792 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2818 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2834 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback()
2845 __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, in __rmqueue_with_cma_reuse() argument
2850 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue_with_cma_reuse()
2855 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue_with_cma_reuse()
2859 __rmqueue_fallback(zone, order, migratetype, alloc_flags)) in __rmqueue_with_cma_reuse()
2867 * Call me with the zone->lock already held.
2870 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2876 page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); in __rmqueue()
2883 * allocating from CMA when over half of the zone's free memory in __rmqueue()
2887 zone_page_state(zone, NR_FREE_CMA_PAGES) > in __rmqueue()
2888 zone_page_state(zone, NR_FREE_PAGES) / 2) { in __rmqueue()
2889 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2895 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2898 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2900 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2915 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2921 spin_lock(&zone->lock); in rmqueue_bulk()
2923 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
2944 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
2954 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2955 spin_unlock(&zone->lock); in rmqueue_bulk()
2968 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
2977 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
2983 * Drain pcplists of the indicated processor and zone.
2989 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
2996 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
3000 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
3013 struct zone *zone; in drain_pages() local
3015 for_each_populated_zone(zone) { in drain_pages()
3016 drain_pages_zone(cpu, zone); in drain_pages()
3023 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3024 * the single zone's pages.
3026 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
3030 if (zone) in drain_local_pages()
3031 drain_pages_zone(cpu, zone); in drain_local_pages()
3050 drain_local_pages(drain->zone); in drain_local_pages_wq()
3057 * When zone parameter is non-NULL, spill just the single zone's pages.
3061 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
3080 * a zone. Such callers are primarily CMA and memory hotplug and need in drain_all_pages()
3084 if (!zone) in drain_all_pages()
3097 struct zone *z; in drain_all_pages()
3100 if (zone) { in drain_all_pages()
3101 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
3123 drain->zone = zone; in drain_all_pages()
3140 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
3147 if (zone_is_empty(zone)) in mark_free_pages()
3150 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
3152 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
3153 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
3162 if (page_zone(page) != zone) in mark_free_pages()
3171 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
3184 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
3202 struct zone *zone = page_zone(page); in free_unref_page_commit() local
3218 free_one_page(zone, page, pfn, 0, migratetype, in free_unref_page_commit()
3225 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_unref_page_commit()
3230 free_pcppages_bulk(zone, batch, pcp); in free_unref_page_commit()
3313 struct zone *zone; in __isolate_free_page() local
3318 zone = page_zone(page); in __isolate_free_page()
3328 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3329 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
3332 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3337 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3369 struct zone *zone = page_zone(page); in __putback_isolated_page() local
3371 /* zone lock should be held when this function is called */ in __putback_isolated_page()
3372 lockdep_assert_held(&zone->lock); in __putback_isolated_page()
3375 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3384 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) in zone_statistics()
3407 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, in __rmqueue_pcplist() argument
3416 pcp->count += rmqueue_bulk(zone, 0, in __rmqueue_pcplist()
3432 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3433 struct zone *zone, gfp_t gfp_flags, in rmqueue_pcplist() argument
3442 pcp = &this_cpu_ptr(zone->pageset)->pcp; in rmqueue_pcplist()
3444 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3447 zone_statistics(preferred_zone, zone); in rmqueue_pcplist()
3454 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3457 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3458 struct zone *zone, unsigned int order, in rmqueue() argument
3473 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, in rmqueue()
3484 spin_lock_irqsave(&zone->lock, flags); in rmqueue()
3495 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3500 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue()
3502 spin_unlock(&zone->lock); in rmqueue()
3505 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3509 zone_statistics(preferred_zone, zone); in rmqueue()
3514 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { in rmqueue()
3515 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
3516 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); in rmqueue()
3519 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3601 static inline long __zone_watermark_unusable_free(struct zone *z, in __zone_watermark_unusable_free()
3627 * one free page of a suitable size. Checking now avoids taking the zone lock
3630 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
3694 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
3701 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
3741 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
3754 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3756 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
3760 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3767 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3768 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3769 * premature use of a lower zone may cause lowmem pressure problems that
3770 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3775 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3786 if (!zone) in alloc_flags_nofragment()
3789 if (zone_idx(zone) != ZONE_NORMAL) in alloc_flags_nofragment()
3794 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume in alloc_flags_nofragment()
3798 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
3829 struct zone *zone; in get_page_from_freelist() local
3835 * Scan zonelist, looking for a zone with enough free. in get_page_from_freelist()
3840 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, in get_page_from_freelist()
3847 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3869 if (last_pgdat_dirty_limit == zone->zone_pgdat) in get_page_from_freelist()
3872 if (!node_dirty_ok(zone->zone_pgdat)) { in get_page_from_freelist()
3873 last_pgdat_dirty_limit = zone->zone_pgdat; in get_page_from_freelist()
3879 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
3887 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
3888 if (zone_to_nid(zone) != local_nid) { in get_page_from_freelist()
3894 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in get_page_from_freelist()
3895 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3902 * Watermark failed for this zone, but see if we can in get_page_from_freelist()
3903 * grow this zone if it contains deferred pages. in get_page_from_freelist()
3906 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3916 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
3919 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3929 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3938 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3948 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
3953 /* Try again if zone has deferred pages */ in get_page_from_freelist()
3955 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4154 * At least in one zone compaction wasn't deferred or skipped, so let's in __alloc_pages_direct_compact()
4168 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
4170 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
4171 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4206 * compaction considers all the zone as desperately out of memory in should_compact_retry()
4280 struct zone *zone; in should_compact_retry() local
4292 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry()
4294 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
4422 struct zone *zone; in wake_all_kswapds() local
4426 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds()
4428 if (last_pgdat != zone->zone_pgdat) in wake_all_kswapds()
4429 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4430 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
4533 struct zone *zone; in should_reclaim_retry() local
4562 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry()
4566 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
4569 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
4570 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
4576 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4590 write_pending = zone_page_state_snapshot(zone, in should_reclaim_retry()
4700 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
4958 /* Dirty zone balancing only done in the fast path */ in prepare_alloc_pages()
4962 * The preferred zone is used for statistics but crucially it is in prepare_alloc_pages()
5002 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); in __alloc_pages_nodemask()
5325 * @offset: The zone index of the highest zone
5328 * high watermark within all zones at or below a given zone index. For each
5329 * zone, the number of pages is calculated as:
5338 struct zone *zone; in nr_free_zone_pages() local
5345 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
5346 unsigned long size = zone_managed_pages(zone); in nr_free_zone_pages()
5347 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
5370 static inline void show_node(struct zone *zone) in show_node() argument
5373 printk("Node %d ", zone_to_nid(zone)); in show_node()
5383 struct zone *zone; in si_mem_available() local
5389 for_each_zone(zone) in si_mem_available()
5390 wmark_low += low_wmark_pages(zone); in si_mem_available()
5451 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
5453 if (is_highmem(zone)) { in si_meminfo_node()
5454 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()
5455 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
5530 struct zone *zone; in show_free_areas() local
5533 for_each_populated_zone(zone) { in show_free_areas()
5534 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5538 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5621 for_each_populated_zone(zone) { in show_free_areas()
5624 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5629 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5631 show_node(zone); in show_free_areas()
5654 zone->name, in show_free_areas()
5655 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
5656 K(min_wmark_pages(zone)), in show_free_areas()
5657 K(low_wmark_pages(zone)), in show_free_areas()
5658 K(high_wmark_pages(zone)), in show_free_areas()
5659 K(zone->nr_reserved_highatomic), in show_free_areas()
5660 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in show_free_areas()
5661 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in show_free_areas()
5662 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in show_free_areas()
5663 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in show_free_areas()
5664 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in show_free_areas()
5665 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in show_free_areas()
5666 K(zone->present_pages), in show_free_areas()
5667 K(zone_managed_pages(zone)), in show_free_areas()
5668 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
5669 K(zone_page_state(zone, NR_PAGETABLE)), in show_free_areas()
5670 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
5672 K(this_cpu_read(zone->pageset->pcp.count)), in show_free_areas()
5673 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in show_free_areas()
5676 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
5680 for_each_populated_zone(zone) { in show_free_areas()
5685 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5687 show_node(zone); in show_free_areas()
5688 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
5690 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
5692 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5704 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
5721 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
5723 zoneref->zone = zone; in zoneref_set_zone()
5724 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
5728 * Builds allocation fallback zone lists.
5734 struct zone *zone; in build_zonerefs_node() local
5740 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5741 if (managed_zone(zone)) { in build_zonerefs_node()
5742 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
5846 * This results in maximum locality--normal zone overflows into local
5847 * DMA zone, if any--but risks exhausting DMA zone.
5865 zonerefs->zone = NULL; in build_zonelists_in_node_order()
5880 zonerefs->zone = NULL; in build_thisnode_zonelists()
5885 * Build zonelists ordered by zone and nodes within zones.
5886 * This results in conserving DMA zone[s] until all Normal memory is
5888 * may still exist in local DMA zone.
5926 * I.e., first node id of first zone in arg node's generic zonelist.
5937 return zone_to_nid(z->zone); in local_memory_node()
5978 zonerefs->zone = NULL; in build_zonelists()
5997 * Other parts of the kernel may not check if the zone is available.
6032 * i.e., the node of the first zone in the generic zonelist. in __build_all_zonelists()
6056 * each zone will be allocated later when the per cpu in build_all_zonelists_init()
6094 * more accurate, but expensive to check per-zone. This check is in build_all_zonelists()
6108 pr_info("Policy zone: %s\n", zone_names[policy_zone]); in build_all_zonelists()
6112 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6114 overlap_memmap_init(unsigned long zone, unsigned long *pfn) in overlap_memmap_init() argument
6118 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in overlap_memmap_init()
6141 * zone stats (e.g., nr_isolate_pageblock) are touched.
6143 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, in memmap_init_zone() argument
6162 if (zone == ZONE_DEVICE) { in memmap_init_zone()
6178 if (overlap_memmap_init(zone, &pfn)) in memmap_init_zone()
6185 __init_single_page(page, pfn, zone, nid); in memmap_init_zone()
6203 void __ref memmap_init_zone_device(struct zone *zone, in memmap_init_zone_device() argument
6209 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
6211 unsigned long zone_idx = zone_idx(zone); in memmap_init_zone_device()
6215 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) in memmap_init_zone_device()
6235 * phase for it to be fully associated with a zone. in memmap_init_zone_device()
6271 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
6275 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6276 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6296 * - zone and node links point to zone and node that span the page if the
6297 * hole is in the middle of a zone
6298 * - zone and node links point to adjacent zone/node if the hole falls on
6299 * the zone boundary; the pages in such holes will be prepended to the
6300 * zone/node above the hole except for the trailing pages in the last
6301 * section that will be appended to the zone/node below.
6305 int zone, int node) in init_unavailable_range() argument
6316 __init_single_page(pfn_to_page(pfn), pfn, zone, node); in init_unavailable_range()
6322 pr_info("On node %d, zone %s: %lld pages in unavailable ranges", in init_unavailable_range()
6323 node, zone_names[zone], pgcnt); in init_unavailable_range()
6328 int zone, int node) in init_unavailable_range() argument
6333 static void __init memmap_init_zone_range(struct zone *zone, in memmap_init_zone_range() argument
6338 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
6339 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
6340 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); in memmap_init_zone_range()
6367 struct zone *zone = node->node_zones + j; in memmap_init() local
6369 if (!populated_zone(zone)) in memmap_init()
6372 memmap_init_zone_range(zone, start_pfn, end_pfn, in memmap_init()
6382 * Append the pages in this hole to the highest zone in the last in memmap_init()
6396 unsigned long zone, in arch_memmap_init() argument
6401 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
6408 * size of the zone. in zone_batchsize()
6410 batch = zone_managed_pages(zone) / 1024; in zone_batchsize()
6515 static void pageset_set_high_and_batch(struct zone *zone, in pageset_set_high_and_batch() argument
6520 (zone_managed_pages(zone) / in pageset_set_high_and_batch()
6523 pageset_set_batch(pcp, zone_batchsize(zone)); in pageset_set_high_and_batch()
6526 static void __meminit zone_pageset_init(struct zone *zone, int cpu) in zone_pageset_init() argument
6528 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
6531 pageset_set_high_and_batch(zone, pcp); in zone_pageset_init()
6534 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
6537 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
6539 zone_pageset_init(zone, cpu); in setup_zone_pageset()
6549 struct zone *zone; in setup_per_cpu_pageset() local
6552 for_each_populated_zone(zone) in setup_per_cpu_pageset()
6553 setup_zone_pageset(zone); in setup_per_cpu_pageset()
6574 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
6581 zone->pageset = &boot_pageset; in zone_pcp_init()
6583 if (populated_zone(zone)) in zone_pcp_init()
6584 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", in zone_pcp_init()
6585 zone->name, zone->present_pages, in zone_pcp_init()
6586 zone_batchsize(zone)); in zone_pcp_init()
6589 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
6593 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
6594 int zone_idx = zone_idx(zone) + 1; in init_currently_empty_zone()
6599 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
6602 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
6604 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
6607 zone_init_free_lists(zone); in init_currently_empty_zone()
6608 zone->initialized = 1; in init_currently_empty_zone()
6641 * This finds a zone that can be used for ZONE_MOVABLE pages. The
6643 * increasing memory addresses so that the "highest" populated zone is used
6662 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
6666 * is distributed. This helper function adjusts the zone ranges
6668 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6699 * Return the number of pages a zone spans in a node, including holes
6715 /* Get the start and end of the zone */ in zone_spanned_pages_in_node()
6722 /* Check that this node has pages within the zone's required range */ in zone_spanned_pages_in_node()
6726 /* Move the zone boundaries inside the node if necessary */ in zone_spanned_pages_in_node()
6767 /* Return the number of page frames in holes in a zone on a node */
6826 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
6844 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
6846 zone->zone_start_pfn = 0; in calculate_node_totalpages()
6847 zone->spanned_pages = size; in calculate_node_totalpages()
6848 zone->present_pages = real_size; in calculate_node_totalpages()
6862 * Calculate the size of the zone->blockflags rounded to an unsigned long
6882 struct zone *zone, in setup_usemap() argument
6887 zone->pageblock_flags = NULL; in setup_usemap()
6889 zone->pageblock_flags = in setup_usemap()
6892 if (!zone->pageblock_flags) in setup_usemap()
6893 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", in setup_usemap()
6894 usemapsize, zone->name, pgdat->node_id); in setup_usemap()
6898 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, in setup_usemap() argument
6946 * the zone and SPARSEMEM is in use. If there are holes within the in calc_memmap_size()
6947 * zone, each populated memory region may cost us one or two extra in calc_memmap_size()
7002 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
7005 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
7006 zone_set_nid(zone, nid); in zone_init_internals()
7007 zone->name = zone_names[idx]; in zone_init_internals()
7008 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
7009 spin_lock_init(&zone->lock); in zone_init_internals()
7010 zone_seqlock_init(zone); in zone_init_internals()
7011 zone_pcp_init(zone); in zone_init_internals()
7015 * Set up the zone data structures
7034 * Set up the zone data structures:
7051 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
7053 unsigned long zone_start_pfn = zone->zone_start_pfn; in free_area_init_core()
7055 size = zone->spanned_pages; in free_area_init_core()
7056 freesize = zone->present_pages; in free_area_init_core()
7060 * is used by this zone for memmap. This affects the watermark in free_area_init_core()
7069 " %s zone: %lu pages used for memmap\n", in free_area_init_core()
7072 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n", in free_area_init_core()
7079 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", in free_area_init_core()
7095 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
7101 setup_usemap(pgdat, zone, zone_start_pfn, size); in free_area_init_core()
7102 init_currently_empty_zone(zone, zone_start_pfn, size); in free_area_init_core()
7125 * The zone's endpoints aren't required to be MAX_ORDER in alloc_node_mem_map()
7273 * Sum pages in active regions for movable zone.
7293 * Find the PFN the Movable zone begins in each node. Kernel memory
7511 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
7512 if (populated_zone(zone)) { in check_for_memory()
7532 * free_area_init - Initialise all pg_data_t and zone data
7533 * @max_zone_pfn: an array of max PFNs for each zone
7537 * zone in each node and their holes is calculated. If the maximum PFN
7538 * between two adjacent zones match, it is assumed that the zone is empty.
7540 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7547 int i, nid, zone; in free_area_init() local
7550 /* Record where the zone boundaries are */ in free_area_init()
7561 zone = MAX_NR_ZONES - i - 1; in free_area_init()
7563 zone = i; in free_area_init()
7565 if (zone == ZONE_MOVABLE) in free_area_init()
7568 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init()
7569 arch_zone_lowest_possible_pfn[zone] = start_pfn; in free_area_init()
7570 arch_zone_highest_possible_pfn[zone] = end_pfn; in free_area_init()
7579 /* Print out the zone ranges */ in free_area_init()
7580 pr_info("Zone ranges:\n"); in free_area_init()
7597 pr_info("Movable zone start for each node\n"); in free_area_init()
7796 * set_dma_reserve - set the specified number of pages reserved in the first zone
7799 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7800 * In the DMA zone, a significant percentage may be consumed by kernel image
7803 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7879 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
7881 unsigned long managed_pages = zone_managed_pages(zone); in calculate_totalreserve_pages()
7883 /* Find valid and maximum lowmem_reserve in the zone */ in calculate_totalreserve_pages()
7885 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
7886 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
7890 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
7905 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
7907 * pages are left in the zone after a successful __alloc_pages().
7916 struct zone *zone = &pgdat->node_zones[i]; in setup_per_zone_lowmem_reserve() local
7918 bool clear = !ratio || !zone_managed_pages(zone); in setup_per_zone_lowmem_reserve()
7922 struct zone *upper_zone = &pgdat->node_zones[j]; in setup_per_zone_lowmem_reserve()
7927 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
7929 zone->lowmem_reserve[j] = managed_pages / ratio; in setup_per_zone_lowmem_reserve()
7942 struct zone *zone; in __setup_per_zone_wmarks() local
7946 for_each_zone(zone) { in __setup_per_zone_wmarks()
7947 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
7948 lowmem_pages += zone_managed_pages(zone); in __setup_per_zone_wmarks()
7951 for_each_zone(zone) { in __setup_per_zone_wmarks()
7954 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
7955 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks()
7957 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
7969 min_pages = zone_managed_pages(zone) / 1024; in __setup_per_zone_wmarks()
7971 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
7974 * If it's a lowmem zone, reserve a number of pages in __setup_per_zone_wmarks()
7975 * proportionate to the zone's size. in __setup_per_zone_wmarks()
7977 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
7986 mult_frac(zone_managed_pages(zone), in __setup_per_zone_wmarks()
7989 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
7990 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
7991 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; in __setup_per_zone_wmarks()
7993 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
8004 * Ensures that the watermark[min,low,high] values for each zone are set
8113 struct zone *zone; in setup_min_unmapped_ratio() local
8118 for_each_zone(zone) in setup_min_unmapped_ratio()
8119 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
8141 struct zone *zone; in setup_min_slab_ratio() local
8146 for_each_zone(zone) in setup_min_slab_ratio()
8147 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
8173 * if in function of the boot time zone sizes.
8191 static void __zone_pcp_update(struct zone *zone) in __zone_pcp_update() argument
8196 pageset_set_high_and_batch(zone, in __zone_pcp_update()
8197 per_cpu_ptr(zone->pageset, cpu)); in __zone_pcp_update()
8201 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8202 * cpu. It is the fraction of total pages in each zone that a hot per cpu
8208 struct zone *zone; in percpu_pagelist_fraction_sysctl_handler() local
8231 for_each_populated_zone(zone) in percpu_pagelist_fraction_sysctl_handler()
8232 __zone_pcp_update(zone); in percpu_pagelist_fraction_sysctl_handler()
8391 struct page *has_unmovable_pages(struct zone *zone, struct page *page, in has_unmovable_pages() argument
8426 * If the zone is movable and we have ruled out all reserved in has_unmovable_pages()
8430 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
8513 /* [start, end) must belong to a single zone. */
8523 .nid = zone_to_nid(cc->zone), in __alloc_contig_migrate_range()
8548 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
8573 * aligned. The PFN range must belong to a single zone.
8593 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
8659 * We don't have to hold zone->lock here because the pages are in alloc_contig_range()
8725 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, in pfn_range_valid_contig()
8751 static bool zone_spans_last_pfn(const struct zone *zone, in zone_spans_last_pfn() argument
8756 return zone_spans_pfn(zone, last_pfn); in zone_spans_last_pfn()
8785 struct zone *zone; in alloc_contig_pages() local
8789 for_each_zone_zonelist_nodemask(zone, z, zonelist, in alloc_contig_pages()
8791 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8793 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages()
8794 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { in alloc_contig_pages()
8795 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { in alloc_contig_pages()
8797 * We release the zone lock here because in alloc_contig_pages()
8798 * alloc_contig_range() will also lock the zone in alloc_contig_pages()
8803 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8808 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8812 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8833 * The zone indicated has a new number of managed_pages; batch sizes and percpu
8836 void __meminit zone_pcp_update(struct zone *zone) in zone_pcp_update() argument
8839 __zone_pcp_update(zone); in zone_pcp_update()
8843 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
8851 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
8853 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
8854 drain_zonestat(zone, pset); in zone_pcp_reset()
8856 free_percpu(zone->pageset); in zone_pcp_reset()
8857 zone->pageset = &boot_pageset; in zone_pcp_reset()
8864 * All pages in the range must be in a single zone, must not contain holes,
8871 struct zone *zone; in __offline_isolated_pages() local
8876 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
8877 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
8902 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
8905 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
8911 struct zone *zone = page_zone(page); in is_free_buddy_page() local
8916 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
8923 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()
8933 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
8952 if (set_page_guard(zone, current_buddy, high, migratetype)) in break_down_buddy_pages()
8956 add_to_free_list(current_buddy, zone, high, migratetype); in break_down_buddy_pages()
8968 struct zone *zone = page_zone(page); in take_page_off_buddy() local
8974 spin_lock_irqsave(&zone->lock, flags); in take_page_off_buddy()
8984 del_page_from_free_list(page_head, zone, page_order); in take_page_off_buddy()
8985 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()
8988 __mod_zone_freepage_state(zone, -1, migratetype); in take_page_off_buddy()
8995 spin_unlock_irqrestore(&zone->lock, flags); in take_page_off_buddy()
9006 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; in has_managed_dma() local
9008 if (managed_zone(zone)) in has_managed_dma()