• Home
  • Raw
  • Download

Lines Matching full:zone

13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
101 * shuffle the whole zone).
110 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
134 struct zone *zone; member
382 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
443 * prev_end_pfn static that contains the end of previous zone in defer_init()
589 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
597 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
598 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
599 sp = zone->spanned_pages; in page_outside_zone_boundaries()
600 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
602 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
605 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", in page_outside_zone_boundaries()
606 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
612 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
616 if (zone != page_zone(page)) in page_is_consistent()
622 * Temporary debugging check for pages not lying within a given zone.
624 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
626 if (page_outside_zone_boundaries(zone, page)) in bad_range()
628 if (!page_is_consistent(zone, page)) in bad_range()
634 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
764 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
777 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
782 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
792 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
795 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
797 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
813 * (d) a page and its buddy are in the same zone.
816 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
830 * zone check is done late to avoid uselessly calculating in page_is_buddy()
831 * zone/node ids for pages that could never merge. in page_is_buddy()
842 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
849 capc->cc->zone == zone ? capc : NULL; in task_capc()
878 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
892 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
895 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
902 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, in add_to_free_list_tail() argument
905 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
916 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
919 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
924 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
934 zone->free_area[order].nr_free--; in del_page_from_free_list()
993 struct zone *zone, unsigned int order, in __free_one_page() argument
996 struct capture_control *capc = task_capc(zone); in __free_one_page()
1005 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
1010 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
1013 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
1018 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1034 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
1036 del_page_from_free_list(buddy, zone, order); in __free_one_page()
1051 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
1078 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1080 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1339 * Assumes all pages on list are in same zone, and of same order.
1342 * If the zone was previously in an "all pages pinned" state then look to
1345 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1348 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1398 * under zone->lock. It is believed the overhead of in free_pcppages_bulk()
1409 spin_lock(&zone->lock); in free_pcppages_bulk()
1410 isolated_pageblocks = has_isolate_pageblock(zone); in free_pcppages_bulk()
1424 __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); in free_pcppages_bulk()
1427 spin_unlock(&zone->lock); in free_pcppages_bulk()
1430 static void free_one_page(struct zone *zone, in free_one_page() argument
1435 spin_lock(&zone->lock); in free_one_page()
1436 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
1440 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1441 spin_unlock(&zone->lock); in free_one_page()
1445 unsigned long zone, int nid) in __init_single_page() argument
1448 set_page_links(page, zone, nid, pfn); in __init_single_page()
1457 if (!is_highmem_idx(zone)) in __init_single_page()
1475 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
1477 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1619 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1628 * belong to a single zone. We assume that a border between node0 and node1
1635 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1650 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1662 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
1664 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1668 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
1672 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
1675 block_end_pfn, zone)) in set_zone_contiguous()
1681 zone->contiguous = true; in set_zone_contiguous()
1684 void clear_zone_contiguous(struct zone *zone) in clear_zone_contiguous() argument
1686 zone->contiguous = false; in clear_zone_contiguous()
1775 static unsigned long __init deferred_init_pages(struct zone *zone, in deferred_init_pages() argument
1780 int nid = zone_to_nid(zone); in deferred_init_pages()
1782 int zid = zone_idx(zone); in deferred_init_pages()
1801 * This function is meant to pre-load the iterator for the zone init.
1807 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, in deferred_init_mem_pfn_range_in_zone() argument
1814 * Start out by walking through the ranges in this zone that have in deferred_init_mem_pfn_range_in_zone()
1818 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { in deferred_init_mem_pfn_range_in_zone()
1841 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, in deferred_init_maxorder() argument
1850 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { in deferred_init_maxorder()
1857 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder()
1868 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { in deferred_init_maxorder()
1889 struct zone *zone = arg; in deferred_init_memmap_chunk() local
1892 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); in deferred_init_memmap_chunk()
1899 deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_init_memmap_chunk()
1919 struct zone *zone; in deferred_init_memmap() local
1941 * Once we unlock here, the zone cannot be grown anymore, thus if an in deferred_init_memmap()
1942 * interrupt thread must allocate this early in boot, zone must be in deferred_init_memmap()
1947 /* Only the highest zone is deferred so find it */ in deferred_init_memmap()
1949 zone = pgdat->node_zones + zid; in deferred_init_memmap()
1950 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
1954 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_init_memmap()
1955 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
1965 .fn_arg = zone, in deferred_init_memmap()
1974 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
1978 /* Sanity check that the next zone really is unpopulated */ in deferred_init_memmap()
1979 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
1989 * If this zone has deferred pages, try to grow it by initializing enough
1995 * Return true when zone was grown, otherwise return false. We return true even
2004 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2007 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2013 /* Only the last zone may have deferred pages */ in deferred_grow_zone()
2014 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
2020 * If someone grew this zone while we were waiting for spinlock, return in deferred_grow_zone()
2028 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_grow_zone()
2029 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_grow_zone()
2046 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_grow_zone()
2071 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
2073 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
2080 struct zone *zone; in page_alloc_init_late() local
2099 for_each_populated_zone(zone) in page_alloc_init_late()
2100 zone_pcp_update(zone); in page_alloc_init_late()
2118 for_each_populated_zone(zone) in page_alloc_init_late()
2119 set_zone_contiguous(zone); in page_alloc_init_late()
2167 static inline void expand(struct zone *zone, struct page *page, in expand() argument
2175 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
2183 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
2186 add_to_free_list(&page[size], zone, high, migratetype); in expand()
2314 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2323 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
2327 del_page_from_free_list(page, zone, current_order); in __rmqueue_smallest()
2328 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2354 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2357 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2360 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2369 static int move_freepages(struct zone *zone, in move_freepages() argument
2399 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2400 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in move_freepages()
2403 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2411 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2423 /* Do not cross zone boundaries */ in move_freepages_block()
2424 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
2426 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
2429 return move_freepages(zone, start_pfn, end_pfn, migratetype, in move_freepages_block()
2477 static inline bool boost_watermark(struct zone *zone) in boost_watermark() argument
2489 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) in boost_watermark()
2492 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
2508 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
2522 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2549 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) in steal_suitable_fallback()
2550 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in steal_suitable_fallback()
2556 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2580 /* moving whole block can fail due to zone boundary conditions */ in steal_suitable_fallback()
2595 move_to_free_list(page, zone, current_order, start_type); in steal_suitable_fallback()
2639 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2646 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. in reserve_highatomic_pageblock()
2649 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
2650 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2653 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2656 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2663 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2665 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2669 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2687 struct zone *zone; in unreserve_highatomic_pageblock() local
2692 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock()
2698 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2702 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2704 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2725 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2727 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2740 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2743 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2747 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2764 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
2789 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2815 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2831 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback()
2842 __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, in __rmqueue_with_cma_reuse() argument
2847 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue_with_cma_reuse()
2852 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue_with_cma_reuse()
2856 __rmqueue_fallback(zone, order, migratetype, alloc_flags)) in __rmqueue_with_cma_reuse()
2864 * Call me with the zone->lock already held.
2867 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2873 page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); in __rmqueue()
2880 * allocating from CMA when over half of the zone's free memory in __rmqueue()
2884 zone_page_state(zone, NR_FREE_CMA_PAGES) > in __rmqueue()
2885 zone_page_state(zone, NR_FREE_PAGES) / 2) { in __rmqueue()
2886 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2892 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2895 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2897 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2912 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2918 spin_lock(&zone->lock); in rmqueue_bulk()
2920 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
2941 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
2951 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2952 spin_unlock(&zone->lock); in rmqueue_bulk()
2965 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
2974 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
2980 * Drain pcplists of the indicated processor and zone.
2986 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
2993 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
2997 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
3010 struct zone *zone; in drain_pages() local
3012 for_each_populated_zone(zone) { in drain_pages()
3013 drain_pages_zone(cpu, zone); in drain_pages()
3020 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3021 * the single zone's pages.
3023 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
3027 if (zone) in drain_local_pages()
3028 drain_pages_zone(cpu, zone); in drain_local_pages()
3047 drain_local_pages(drain->zone); in drain_local_pages_wq()
3054 * When zone parameter is non-NULL, spill just the single zone's pages.
3058 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
3077 * a zone. Such callers are primarily CMA and memory hotplug and need in drain_all_pages()
3081 if (!zone) in drain_all_pages()
3094 struct zone *z; in drain_all_pages()
3097 if (zone) { in drain_all_pages()
3098 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
3120 drain->zone = zone; in drain_all_pages()
3137 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
3144 if (zone_is_empty(zone)) in mark_free_pages()
3147 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
3149 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
3150 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
3159 if (page_zone(page) != zone) in mark_free_pages()
3168 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
3181 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
3199 struct zone *zone = page_zone(page); in free_unref_page_commit() local
3215 free_one_page(zone, page, pfn, 0, migratetype, in free_unref_page_commit()
3222 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_unref_page_commit()
3227 free_pcppages_bulk(zone, batch, pcp); in free_unref_page_commit()
3310 struct zone *zone; in __isolate_free_page() local
3315 zone = page_zone(page); in __isolate_free_page()
3325 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3326 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
3329 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3334 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3366 struct zone *zone = page_zone(page); in __putback_isolated_page() local
3368 /* zone lock should be held when this function is called */ in __putback_isolated_page()
3369 lockdep_assert_held(&zone->lock); in __putback_isolated_page()
3372 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3381 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) in zone_statistics()
3404 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, in __rmqueue_pcplist() argument
3413 pcp->count += rmqueue_bulk(zone, 0, in __rmqueue_pcplist()
3429 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3430 struct zone *zone, gfp_t gfp_flags, in rmqueue_pcplist() argument
3439 pcp = &this_cpu_ptr(zone->pageset)->pcp; in rmqueue_pcplist()
3441 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3444 zone_statistics(preferred_zone, zone); in rmqueue_pcplist()
3451 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3454 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3455 struct zone *zone, unsigned int order, in rmqueue() argument
3470 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, in rmqueue()
3481 spin_lock_irqsave(&zone->lock, flags); in rmqueue()
3492 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3497 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue()
3499 spin_unlock(&zone->lock); in rmqueue()
3502 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3506 zone_statistics(preferred_zone, zone); in rmqueue()
3511 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { in rmqueue()
3512 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
3513 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); in rmqueue()
3516 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3598 static inline long __zone_watermark_unusable_free(struct zone *z, in __zone_watermark_unusable_free()
3624 * one free page of a suitable size. Checking now avoids taking the zone lock
3627 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
3691 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
3698 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
3738 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
3751 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3753 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
3757 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3764 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3765 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3766 * premature use of a lower zone may cause lowmem pressure problems that
3767 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3772 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3783 if (!zone) in alloc_flags_nofragment()
3786 if (zone_idx(zone) != ZONE_NORMAL) in alloc_flags_nofragment()
3791 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume in alloc_flags_nofragment()
3795 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
3826 struct zone *zone; in get_page_from_freelist() local
3832 * Scan zonelist, looking for a zone with enough free. in get_page_from_freelist()
3837 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, in get_page_from_freelist()
3844 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3866 if (last_pgdat_dirty_limit == zone->zone_pgdat) in get_page_from_freelist()
3869 if (!node_dirty_ok(zone->zone_pgdat)) { in get_page_from_freelist()
3870 last_pgdat_dirty_limit = zone->zone_pgdat; in get_page_from_freelist()
3876 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
3884 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
3885 if (zone_to_nid(zone) != local_nid) { in get_page_from_freelist()
3891 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in get_page_from_freelist()
3892 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3899 * Watermark failed for this zone, but see if we can in get_page_from_freelist()
3900 * grow this zone if it contains deferred pages. in get_page_from_freelist()
3903 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3913 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
3916 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3926 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3935 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3945 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
3950 /* Try again if zone has deferred pages */ in get_page_from_freelist()
3952 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4149 * At least in one zone compaction wasn't deferred or skipped, so let's in __alloc_pages_direct_compact()
4163 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
4165 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
4166 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4201 * compaction considers all the zone as desperately out of memory in should_compact_retry()
4275 struct zone *zone; in should_compact_retry() local
4287 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry()
4289 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
4411 struct zone *zone; in wake_all_kswapds() local
4415 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds()
4417 if (last_pgdat != zone->zone_pgdat) in wake_all_kswapds()
4418 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4419 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
4522 struct zone *zone; in should_reclaim_retry() local
4551 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry()
4555 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
4558 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
4559 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
4565 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4579 write_pending = zone_page_state_snapshot(zone, in should_reclaim_retry()
4686 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
4938 /* Dirty zone balancing only done in the fast path */ in prepare_alloc_pages()
4942 * The preferred zone is used for statistics but crucially it is in prepare_alloc_pages()
4982 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); in __alloc_pages_nodemask()
5272 * @offset: The zone index of the highest zone
5275 * high watermark within all zones at or below a given zone index. For each
5276 * zone, the number of pages is calculated as:
5285 struct zone *zone; in nr_free_zone_pages() local
5292 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
5293 unsigned long size = zone_managed_pages(zone); in nr_free_zone_pages()
5294 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
5317 static inline void show_node(struct zone *zone) in show_node() argument
5320 printk("Node %d ", zone_to_nid(zone)); in show_node()
5330 struct zone *zone; in si_mem_available() local
5336 for_each_zone(zone) in si_mem_available()
5337 wmark_low += low_wmark_pages(zone); in si_mem_available()
5398 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
5400 if (is_highmem(zone)) { in si_meminfo_node()
5401 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()
5402 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
5477 struct zone *zone; in show_free_areas() local
5480 for_each_populated_zone(zone) { in show_free_areas()
5481 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5485 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5568 for_each_populated_zone(zone) { in show_free_areas()
5571 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5576 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5578 show_node(zone); in show_free_areas()
5601 zone->name, in show_free_areas()
5602 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
5603 K(min_wmark_pages(zone)), in show_free_areas()
5604 K(low_wmark_pages(zone)), in show_free_areas()
5605 K(high_wmark_pages(zone)), in show_free_areas()
5606 K(zone->nr_reserved_highatomic), in show_free_areas()
5607 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in show_free_areas()
5608 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in show_free_areas()
5609 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in show_free_areas()
5610 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in show_free_areas()
5611 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in show_free_areas()
5612 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in show_free_areas()
5613 K(zone->present_pages), in show_free_areas()
5614 K(zone_managed_pages(zone)), in show_free_areas()
5615 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
5616 K(zone_page_state(zone, NR_PAGETABLE)), in show_free_areas()
5617 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
5619 K(this_cpu_read(zone->pageset->pcp.count)), in show_free_areas()
5620 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in show_free_areas()
5623 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
5627 for_each_populated_zone(zone) { in show_free_areas()
5632 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5634 show_node(zone); in show_free_areas()
5635 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
5637 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
5639 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5651 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
5668 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
5670 zoneref->zone = zone; in zoneref_set_zone()
5671 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
5675 * Builds allocation fallback zone lists.
5681 struct zone *zone; in build_zonerefs_node() local
5687 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5688 if (managed_zone(zone)) { in build_zonerefs_node()
5689 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
5793 * This results in maximum locality--normal zone overflows into local
5794 * DMA zone, if any--but risks exhausting DMA zone.
5812 zonerefs->zone = NULL; in build_zonelists_in_node_order()
5827 zonerefs->zone = NULL; in build_thisnode_zonelists()
5832 * Build zonelists ordered by zone and nodes within zones.
5833 * This results in conserving DMA zone[s] until all Normal memory is
5835 * may still exist in local DMA zone.
5873 * I.e., first node id of first zone in arg node's generic zonelist.
5884 return zone_to_nid(z->zone); in local_memory_node()
5925 zonerefs->zone = NULL; in build_zonelists()
5944 * Other parts of the kernel may not check if the zone is available.
5979 * i.e., the node of the first zone in the generic zonelist. in __build_all_zonelists()
6003 * each zone will be allocated later when the per cpu in build_all_zonelists_init()
6041 * more accurate, but expensive to check per-zone. This check is in build_all_zonelists()
6055 pr_info("Policy zone: %s\n", zone_names[policy_zone]); in build_all_zonelists()
6059 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6061 overlap_memmap_init(unsigned long zone, unsigned long *pfn) in overlap_memmap_init() argument
6065 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in overlap_memmap_init()
6088 * zone stats (e.g., nr_isolate_pageblock) are touched.
6090 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, in memmap_init_zone() argument
6109 if (zone == ZONE_DEVICE) { in memmap_init_zone()
6125 if (overlap_memmap_init(zone, &pfn)) in memmap_init_zone()
6132 __init_single_page(page, pfn, zone, nid); in memmap_init_zone()
6150 void __ref memmap_init_zone_device(struct zone *zone, in memmap_init_zone_device() argument
6156 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
6158 unsigned long zone_idx = zone_idx(zone); in memmap_init_zone_device()
6162 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) in memmap_init_zone_device()
6182 * phase for it to be fully associated with a zone. in memmap_init_zone_device()
6218 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
6222 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6223 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6243 * - zone and node links point to zone and node that span the page if the
6244 * hole is in the middle of a zone
6245 * - zone and node links point to adjacent zone/node if the hole falls on
6246 * the zone boundary; the pages in such holes will be prepended to the
6247 * zone/node above the hole except for the trailing pages in the last
6248 * section that will be appended to the zone/node below.
6252 int zone, int node) in init_unavailable_range() argument
6263 __init_single_page(pfn_to_page(pfn), pfn, zone, node); in init_unavailable_range()
6269 pr_info("On node %d, zone %s: %lld pages in unavailable ranges", in init_unavailable_range()
6270 node, zone_names[zone], pgcnt); in init_unavailable_range()
6275 int zone, int node) in init_unavailable_range() argument
6280 static void __init memmap_init_zone_range(struct zone *zone, in memmap_init_zone_range() argument
6285 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
6286 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
6287 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); in memmap_init_zone_range()
6314 struct zone *zone = node->node_zones + j; in memmap_init() local
6316 if (!populated_zone(zone)) in memmap_init()
6319 memmap_init_zone_range(zone, start_pfn, end_pfn, in memmap_init()
6329 * Append the pages in this hole to the highest zone in the last in memmap_init()
6343 unsigned long zone, in arch_memmap_init() argument
6348 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
6355 * size of the zone. in zone_batchsize()
6357 batch = zone_managed_pages(zone) / 1024; in zone_batchsize()
6462 static void pageset_set_high_and_batch(struct zone *zone, in pageset_set_high_and_batch() argument
6467 (zone_managed_pages(zone) / in pageset_set_high_and_batch()
6470 pageset_set_batch(pcp, zone_batchsize(zone)); in pageset_set_high_and_batch()
6473 static void __meminit zone_pageset_init(struct zone *zone, int cpu) in zone_pageset_init() argument
6475 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
6478 pageset_set_high_and_batch(zone, pcp); in zone_pageset_init()
6481 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
6484 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
6486 zone_pageset_init(zone, cpu); in setup_zone_pageset()
6496 struct zone *zone; in setup_per_cpu_pageset() local
6499 for_each_populated_zone(zone) in setup_per_cpu_pageset()
6500 setup_zone_pageset(zone); in setup_per_cpu_pageset()
6521 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
6528 zone->pageset = &boot_pageset; in zone_pcp_init()
6530 if (populated_zone(zone)) in zone_pcp_init()
6531 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", in zone_pcp_init()
6532 zone->name, zone->present_pages, in zone_pcp_init()
6533 zone_batchsize(zone)); in zone_pcp_init()
6536 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
6540 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
6541 int zone_idx = zone_idx(zone) + 1; in init_currently_empty_zone()
6546 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
6549 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
6551 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
6554 zone_init_free_lists(zone); in init_currently_empty_zone()
6555 zone->initialized = 1; in init_currently_empty_zone()
6588 * This finds a zone that can be used for ZONE_MOVABLE pages. The
6590 * increasing memory addresses so that the "highest" populated zone is used
6609 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
6613 * is distributed. This helper function adjusts the zone ranges
6615 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6646 * Return the number of pages a zone spans in a node, including holes
6662 /* Get the start and end of the zone */ in zone_spanned_pages_in_node()
6669 /* Check that this node has pages within the zone's required range */ in zone_spanned_pages_in_node()
6673 /* Move the zone boundaries inside the node if necessary */ in zone_spanned_pages_in_node()
6714 /* Return the number of page frames in holes in a zone on a node */
6773 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
6791 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
6793 zone->zone_start_pfn = 0; in calculate_node_totalpages()
6794 zone->spanned_pages = size; in calculate_node_totalpages()
6795 zone->present_pages = real_size; in calculate_node_totalpages()
6809 * Calculate the size of the zone->blockflags rounded to an unsigned long
6829 struct zone *zone, in setup_usemap() argument
6834 zone->pageblock_flags = NULL; in setup_usemap()
6836 zone->pageblock_flags = in setup_usemap()
6839 if (!zone->pageblock_flags) in setup_usemap()
6840 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", in setup_usemap()
6841 usemapsize, zone->name, pgdat->node_id); in setup_usemap()
6845 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, in setup_usemap() argument
6893 * the zone and SPARSEMEM is in use. If there are holes within the in calc_memmap_size()
6894 * zone, each populated memory region may cost us one or two extra in calc_memmap_size()
6949 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
6952 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
6953 zone_set_nid(zone, nid); in zone_init_internals()
6954 zone->name = zone_names[idx]; in zone_init_internals()
6955 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
6956 spin_lock_init(&zone->lock); in zone_init_internals()
6957 zone_seqlock_init(zone); in zone_init_internals()
6958 zone_pcp_init(zone); in zone_init_internals()
6962 * Set up the zone data structures
6981 * Set up the zone data structures:
6998 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
7000 unsigned long zone_start_pfn = zone->zone_start_pfn; in free_area_init_core()
7002 size = zone->spanned_pages; in free_area_init_core()
7003 freesize = zone->present_pages; in free_area_init_core()
7007 * is used by this zone for memmap. This affects the watermark in free_area_init_core()
7016 " %s zone: %lu pages used for memmap\n", in free_area_init_core()
7019 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n", in free_area_init_core()
7026 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", in free_area_init_core()
7042 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
7048 setup_usemap(pgdat, zone, zone_start_pfn, size); in free_area_init_core()
7049 init_currently_empty_zone(zone, zone_start_pfn, size); in free_area_init_core()
7072 * The zone's endpoints aren't required to be MAX_ORDER in alloc_node_mem_map()
7220 * Sum pages in active regions for movable zone.
7240 * Find the PFN the Movable zone begins in each node. Kernel memory
7458 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
7459 if (populated_zone(zone)) { in check_for_memory()
7479 * free_area_init - Initialise all pg_data_t and zone data
7480 * @max_zone_pfn: an array of max PFNs for each zone
7484 * zone in each node and their holes is calculated. If the maximum PFN
7485 * between two adjacent zones match, it is assumed that the zone is empty.
7487 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7494 int i, nid, zone; in free_area_init() local
7497 /* Record where the zone boundaries are */ in free_area_init()
7508 zone = MAX_NR_ZONES - i - 1; in free_area_init()
7510 zone = i; in free_area_init()
7512 if (zone == ZONE_MOVABLE) in free_area_init()
7515 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init()
7516 arch_zone_lowest_possible_pfn[zone] = start_pfn; in free_area_init()
7517 arch_zone_highest_possible_pfn[zone] = end_pfn; in free_area_init()
7526 /* Print out the zone ranges */ in free_area_init()
7527 pr_info("Zone ranges:\n"); in free_area_init()
7544 pr_info("Movable zone start for each node\n"); in free_area_init()
7743 * set_dma_reserve - set the specified number of pages reserved in the first zone
7746 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7747 * In the DMA zone, a significant percentage may be consumed by kernel image
7750 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7826 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
7828 unsigned long managed_pages = zone_managed_pages(zone); in calculate_totalreserve_pages()
7830 /* Find valid and maximum lowmem_reserve in the zone */ in calculate_totalreserve_pages()
7832 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
7833 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
7837 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
7852 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
7854 * pages are left in the zone after a successful __alloc_pages().
7863 struct zone *zone = &pgdat->node_zones[i]; in setup_per_zone_lowmem_reserve() local
7865 bool clear = !ratio || !zone_managed_pages(zone); in setup_per_zone_lowmem_reserve()
7869 struct zone *upper_zone = &pgdat->node_zones[j]; in setup_per_zone_lowmem_reserve()
7874 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
7876 zone->lowmem_reserve[j] = managed_pages / ratio; in setup_per_zone_lowmem_reserve()
7889 struct zone *zone; in __setup_per_zone_wmarks() local
7893 for_each_zone(zone) { in __setup_per_zone_wmarks()
7894 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
7895 lowmem_pages += zone_managed_pages(zone); in __setup_per_zone_wmarks()
7898 for_each_zone(zone) { in __setup_per_zone_wmarks()
7901 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
7902 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks()
7904 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
7916 min_pages = zone_managed_pages(zone) / 1024; in __setup_per_zone_wmarks()
7918 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
7921 * If it's a lowmem zone, reserve a number of pages in __setup_per_zone_wmarks()
7922 * proportionate to the zone's size. in __setup_per_zone_wmarks()
7924 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
7933 mult_frac(zone_managed_pages(zone), in __setup_per_zone_wmarks()
7936 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
7937 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
7938 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; in __setup_per_zone_wmarks()
7940 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
7951 * Ensures that the watermark[min,low,high] values for each zone are set
8060 struct zone *zone; in setup_min_unmapped_ratio() local
8065 for_each_zone(zone) in setup_min_unmapped_ratio()
8066 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
8088 struct zone *zone; in setup_min_slab_ratio() local
8093 for_each_zone(zone) in setup_min_slab_ratio()
8094 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
8120 * if in function of the boot time zone sizes.
8138 static void __zone_pcp_update(struct zone *zone) in __zone_pcp_update() argument
8143 pageset_set_high_and_batch(zone, in __zone_pcp_update()
8144 per_cpu_ptr(zone->pageset, cpu)); in __zone_pcp_update()
8148 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8149 * cpu. It is the fraction of total pages in each zone that a hot per cpu
8155 struct zone *zone; in percpu_pagelist_fraction_sysctl_handler() local
8178 for_each_populated_zone(zone) in percpu_pagelist_fraction_sysctl_handler()
8179 __zone_pcp_update(zone); in percpu_pagelist_fraction_sysctl_handler()
8338 struct page *has_unmovable_pages(struct zone *zone, struct page *page, in has_unmovable_pages() argument
8373 * If the zone is movable and we have ruled out all reserved in has_unmovable_pages()
8377 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
8460 /* [start, end) must belong to a single zone. */
8470 .nid = zone_to_nid(cc->zone), in __alloc_contig_migrate_range()
8495 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
8520 * aligned. The PFN range must belong to a single zone.
8540 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
8606 * We don't have to hold zone->lock here because the pages are in alloc_contig_range()
8672 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, in pfn_range_valid_contig()
8698 static bool zone_spans_last_pfn(const struct zone *zone, in zone_spans_last_pfn() argument
8703 return zone_spans_pfn(zone, last_pfn); in zone_spans_last_pfn()
8732 struct zone *zone; in alloc_contig_pages() local
8736 for_each_zone_zonelist_nodemask(zone, z, zonelist, in alloc_contig_pages()
8738 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8740 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages()
8741 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { in alloc_contig_pages()
8742 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { in alloc_contig_pages()
8744 * We release the zone lock here because in alloc_contig_pages()
8745 * alloc_contig_range() will also lock the zone in alloc_contig_pages()
8750 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8755 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8759 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8780 * The zone indicated has a new number of managed_pages; batch sizes and percpu
8783 void __meminit zone_pcp_update(struct zone *zone) in zone_pcp_update() argument
8786 __zone_pcp_update(zone); in zone_pcp_update()
8790 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
8798 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
8800 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
8801 drain_zonestat(zone, pset); in zone_pcp_reset()
8803 free_percpu(zone->pageset); in zone_pcp_reset()
8804 zone->pageset = &boot_pageset; in zone_pcp_reset()
8811 * All pages in the range must be in a single zone, must not contain holes,
8818 struct zone *zone; in __offline_isolated_pages() local
8823 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
8824 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
8849 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
8852 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
8858 struct zone *zone = page_zone(page); in is_free_buddy_page() local
8863 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
8870 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()
8880 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
8899 if (set_page_guard(zone, current_buddy, high, migratetype)) in break_down_buddy_pages()
8903 add_to_free_list(current_buddy, zone, high, migratetype); in break_down_buddy_pages()
8915 struct zone *zone = page_zone(page); in take_page_off_buddy() local
8921 spin_lock_irqsave(&zone->lock, flags); in take_page_off_buddy()
8931 del_page_from_free_list(page_head, zone, page_order); in take_page_off_buddy()
8932 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()
8935 __mod_zone_freepage_state(zone, -1, migratetype); in take_page_off_buddy()
8942 spin_unlock_irqrestore(&zone->lock, flags); in take_page_off_buddy()