• Home
  • Raw
  • Download

Lines Matching refs:zone

103 	struct zone *zone;  member
573 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
581 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
582 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
583 sp = zone->spanned_pages; in page_outside_zone_boundaries()
584 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
586 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
590 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
596 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
600 if (zone != page_zone(page)) in page_is_consistent()
608 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
610 if (page_outside_zone_boundaries(zone, page)) in bad_range()
612 if (!page_is_consistent(zone, page)) in bad_range()
618 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
750 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
763 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
768 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
778 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
781 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
783 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
835 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
842 capc->cc->zone == zone && in task_capc()
872 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
911 struct zone *zone, unsigned int order, in __free_one_page() argument
918 struct capture_control *capc = task_capc(zone); in __free_one_page()
922 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
927 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
930 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
935 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
951 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
953 del_page_from_free_area(buddy, &zone->free_area[order]); in __free_one_page()
968 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
1004 add_to_free_area_tail(page, &zone->free_area[order], in __free_one_page()
1011 add_to_free_area_random(page, &zone->free_area[order], in __free_one_page()
1014 add_to_free_area(page, &zone->free_area[order], migratetype); in __free_one_page()
1261 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1317 spin_lock(&zone->lock); in free_pcppages_bulk()
1318 isolated_pageblocks = has_isolate_pageblock(zone); in free_pcppages_bulk()
1332 __free_one_page(page, page_to_pfn(page), zone, 0, mt); in free_pcppages_bulk()
1335 spin_unlock(&zone->lock); in free_pcppages_bulk()
1338 static void free_one_page(struct zone *zone, in free_one_page() argument
1343 spin_lock(&zone->lock); in free_one_page()
1344 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
1348 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
1349 spin_unlock(&zone->lock); in free_one_page()
1353 unsigned long zone, int nid) in __init_single_page() argument
1356 set_page_links(page, zone, nid, pfn); in __init_single_page()
1365 if (!is_highmem_idx(zone)) in __init_single_page()
1383 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
1385 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1528 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1543 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1555 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
1557 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1561 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
1565 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
1568 block_end_pfn, zone)) in set_zone_contiguous()
1573 zone->contiguous = true; in set_zone_contiguous()
1576 void clear_zone_contiguous(struct zone *zone) in clear_zone_contiguous() argument
1578 zone->contiguous = false; in clear_zone_contiguous()
1668 static unsigned long __init deferred_init_pages(struct zone *zone, in deferred_init_pages() argument
1673 int nid = zone_to_nid(zone); in deferred_init_pages()
1675 int zid = zone_idx(zone); in deferred_init_pages()
1701 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, in deferred_init_mem_pfn_range_in_zone() argument
1712 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { in deferred_init_mem_pfn_range_in_zone()
1735 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, in deferred_init_maxorder() argument
1744 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { in deferred_init_maxorder()
1751 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder()
1762 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { in deferred_init_maxorder()
1786 struct zone *zone; in deferred_init_memmap() local
1809 zone = pgdat->node_zones + zid; in deferred_init_memmap()
1810 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
1815 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
1825 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_init_memmap()
1830 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
1855 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
1858 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
1865 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
1891 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_grow_zone()
1908 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_grow_zone()
1932 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
1934 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
1941 struct zone *zone; in page_alloc_init_late() local
1960 for_each_populated_zone(zone) in page_alloc_init_late()
1961 zone_pcp_update(zone); in page_alloc_init_late()
1979 for_each_populated_zone(zone) in page_alloc_init_late()
1980 set_zone_contiguous(zone); in page_alloc_init_late()
2028 static inline void expand(struct zone *zone, struct page *page, in expand() argument
2038 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
2046 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
2194 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2203 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
2208 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
2234 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2237 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2240 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2249 static int move_freepages(struct zone *zone, in move_freepages() argument
2278 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2279 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in move_freepages()
2282 move_to_free_area(page, &zone->free_area[order], migratetype); in move_freepages()
2290 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2306 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
2308 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
2311 return move_freepages(zone, start_page, end_page, migratetype, in move_freepages_block()
2359 static inline void boost_watermark(struct zone *zone) in boost_watermark() argument
2366 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
2382 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
2394 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2422 boost_watermark(zone); in steal_suitable_fallback()
2424 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in steal_suitable_fallback()
2430 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2469 area = &zone->free_area[current_order]; in steal_suitable_fallback()
2514 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2524 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
2525 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2528 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2531 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2538 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2540 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2544 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2562 struct zone *zone; in unreserve_highatomic_pageblock() local
2567 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, in unreserve_highatomic_pageblock()
2573 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2577 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2579 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2600 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2602 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2615 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2618 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2622 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2639 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
2664 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2690 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2706 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback()
2721 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2727 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2730 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2732 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2746 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2752 spin_lock(&zone->lock); in rmqueue_bulk()
2754 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
2775 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
2785 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2786 spin_unlock(&zone->lock); in rmqueue_bulk()
2799 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
2808 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
2820 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
2827 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
2831 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
2844 struct zone *zone; in drain_pages() local
2846 for_each_populated_zone(zone) { in drain_pages()
2847 drain_pages_zone(cpu, zone); in drain_pages()
2857 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
2861 if (zone) in drain_local_pages()
2862 drain_pages_zone(cpu, zone); in drain_local_pages()
2881 drain_local_pages(drain->zone); in drain_local_pages_wq()
2892 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
2915 if (!zone) in drain_all_pages()
2928 struct zone *z; in drain_all_pages()
2931 if (zone) { in drain_all_pages()
2932 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
2954 drain->zone = zone; in drain_all_pages()
2971 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
2978 if (zone_is_empty(zone)) in mark_free_pages()
2981 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
2983 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
2984 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
2993 if (page_zone(page) != zone) in mark_free_pages()
3002 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
3015 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
3033 struct zone *zone = page_zone(page); in free_unref_page_commit() local
3049 free_one_page(zone, page, pfn, 0, migratetype); in free_unref_page_commit()
3055 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_unref_page_commit()
3060 free_pcppages_bulk(zone, batch, pcp); in free_unref_page_commit()
3143 struct zone *zone; in __isolate_free_page() local
3148 zone = page_zone(page); in __isolate_free_page()
3158 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3159 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
3162 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3193 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) in zone_statistics()
3216 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, in __rmqueue_pcplist() argument
3225 pcp->count += rmqueue_bulk(zone, 0, in __rmqueue_pcplist()
3241 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3242 struct zone *zone, gfp_t gfp_flags, in rmqueue_pcplist() argument
3251 pcp = &this_cpu_ptr(zone->pageset)->pcp; in rmqueue_pcplist()
3253 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3256 zone_statistics(preferred_zone, zone); in rmqueue_pcplist()
3266 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3267 struct zone *zone, unsigned int order, in rmqueue() argument
3275 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, in rmqueue()
3285 spin_lock_irqsave(&zone->lock, flags); in rmqueue()
3290 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3295 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue()
3297 spin_unlock(&zone->lock); in rmqueue()
3300 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3304 zone_statistics(preferred_zone, zone); in rmqueue()
3309 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { in rmqueue()
3310 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
3311 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); in rmqueue()
3314 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3402 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
3481 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
3488 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
3514 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
3527 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3529 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
3533 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3548 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3556 if (!zone) in alloc_flags_nofragment()
3559 if (zone_idx(zone) != ZONE_NORMAL) in alloc_flags_nofragment()
3568 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
3585 struct zone *zone; in get_page_from_freelist() local
3596 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in get_page_from_freelist()
3603 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3625 if (last_pgdat_dirty_limit == zone->zone_pgdat) in get_page_from_freelist()
3628 if (!node_dirty_ok(zone->zone_pgdat)) { in get_page_from_freelist()
3629 last_pgdat_dirty_limit = zone->zone_pgdat; in get_page_from_freelist()
3635 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
3643 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
3644 if (zone_to_nid(zone) != local_nid) { in get_page_from_freelist()
3650 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in get_page_from_freelist()
3651 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3661 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3671 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
3674 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3684 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3693 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3703 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
3710 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3923 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
3925 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
3926 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4035 struct zone *zone; in should_compact_retry() local
4047 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in should_compact_retry()
4049 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
4172 struct zone *zone; in wake_all_kswapds() local
4176 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, high_zoneidx, in wake_all_kswapds()
4178 if (last_pgdat != zone->zone_pgdat) in wake_all_kswapds()
4179 wakeup_kswapd(zone, gfp_mask, order, high_zoneidx); in wake_all_kswapds()
4180 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
4282 struct zone *zone; in should_reclaim_retry() local
4311 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in should_reclaim_retry()
4315 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
4318 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
4319 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
4325 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4339 write_pending = zone_page_state_snapshot(zone, in should_reclaim_retry()
4446 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
4754 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); in __alloc_pages_nodemask()
5050 struct zone *zone; in nr_free_zone_pages() local
5057 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
5058 unsigned long size = zone_managed_pages(zone); in nr_free_zone_pages()
5059 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
5095 static inline void show_node(struct zone *zone) in show_node() argument
5098 printk("Node %d ", zone_to_nid(zone)); in show_node()
5108 struct zone *zone; in si_mem_available() local
5114 for_each_zone(zone) in si_mem_available()
5115 wmark_low += low_wmark_pages(zone); in si_mem_available()
5176 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
5178 if (is_highmem(zone)) { in si_meminfo_node()
5179 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()
5180 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
5255 struct zone *zone; in show_free_areas() local
5258 for_each_populated_zone(zone) { in show_free_areas()
5259 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5263 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5341 for_each_populated_zone(zone) { in show_free_areas()
5344 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5349 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5351 show_node(zone); in show_free_areas()
5377 zone->name, in show_free_areas()
5378 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
5379 K(min_wmark_pages(zone)), in show_free_areas()
5380 K(low_wmark_pages(zone)), in show_free_areas()
5381 K(high_wmark_pages(zone)), in show_free_areas()
5382 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in show_free_areas()
5383 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in show_free_areas()
5384 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in show_free_areas()
5385 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in show_free_areas()
5386 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in show_free_areas()
5387 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in show_free_areas()
5388 K(zone->present_pages), in show_free_areas()
5389 K(zone_managed_pages(zone)), in show_free_areas()
5390 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
5391 zone_page_state(zone, NR_KERNEL_STACK_KB), in show_free_areas()
5393 zone_page_state(zone, NR_KERNEL_SCS_BYTES) / 1024, in show_free_areas()
5395 K(zone_page_state(zone, NR_PAGETABLE)), in show_free_areas()
5396 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
5398 K(this_cpu_read(zone->pageset->pcp.count)), in show_free_areas()
5399 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in show_free_areas()
5402 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
5406 for_each_populated_zone(zone) { in show_free_areas()
5411 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5413 show_node(zone); in show_free_areas()
5414 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
5416 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
5418 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5430 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
5447 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
5449 zoneref->zone = zone; in zoneref_set_zone()
5450 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
5460 struct zone *zone; in build_zonerefs_node() local
5466 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5467 if (managed_zone(zone)) { in build_zonerefs_node()
5468 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
5612 zonerefs->zone = NULL; in build_zonelists_in_node_order()
5627 zonerefs->zone = NULL; in build_thisnode_zonelists()
5685 return zone_to_nid(z->zone); in local_memory_node()
5726 zonerefs->zone = NULL; in build_zonelists()
5859 overlap_memmap_init(unsigned long zone, unsigned long *pfn) in overlap_memmap_init() argument
5864 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in overlap_memmap_init()
5886 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, in memmap_init_zone() argument
5904 if (zone == ZONE_DEVICE) { in memmap_init_zone()
5924 if (overlap_memmap_init(zone, &pfn)) in memmap_init_zone()
5931 __init_single_page(page, pfn, zone, nid); in memmap_init_zone()
5955 void __ref memmap_init_zone_device(struct zone *zone, in memmap_init_zone_device() argument
5961 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
5963 unsigned long zone_idx = zone_idx(zone); in memmap_init_zone_device()
5967 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) in memmap_init_zone_device()
6028 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
6032 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6033 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6038 unsigned long zone, unsigned long start_pfn) in memmap_init() argument
6040 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL); in memmap_init()
6043 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
6052 batch = zone_managed_pages(zone) / 1024; in zone_batchsize()
6157 static void pageset_set_high_and_batch(struct zone *zone, in pageset_set_high_and_batch() argument
6162 (zone_managed_pages(zone) / in pageset_set_high_and_batch()
6165 pageset_set_batch(pcp, zone_batchsize(zone)); in pageset_set_high_and_batch()
6168 static void __meminit zone_pageset_init(struct zone *zone, int cpu) in zone_pageset_init() argument
6170 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
6173 pageset_set_high_and_batch(zone, pcp); in zone_pageset_init()
6176 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
6179 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
6181 zone_pageset_init(zone, cpu); in setup_zone_pageset()
6191 struct zone *zone; in setup_per_cpu_pageset() local
6193 for_each_populated_zone(zone) in setup_per_cpu_pageset()
6194 setup_zone_pageset(zone); in setup_per_cpu_pageset()
6201 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
6208 zone->pageset = &boot_pageset; in zone_pcp_init()
6210 if (populated_zone(zone)) in zone_pcp_init()
6212 zone->name, zone->present_pages, in zone_pcp_init()
6213 zone_batchsize(zone)); in zone_pcp_init()
6216 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
6220 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
6221 int zone_idx = zone_idx(zone) + 1; in init_currently_empty_zone()
6226 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
6231 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
6234 zone_init_free_lists(zone); in init_currently_empty_zone()
6235 zone->initialized = 1; in init_currently_empty_zone()
6523 unsigned int zone; in zone_spanned_pages_in_node() local
6526 for (zone = 0; zone < zone_type; zone++) in zone_spanned_pages_in_node()
6527 *zone_start_pfn += zones_size[zone]; in zone_spanned_pages_in_node()
6558 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
6572 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
6574 zone->zone_start_pfn = 0; in calculate_node_totalpages()
6575 zone->spanned_pages = size; in calculate_node_totalpages()
6576 zone->present_pages = real_size; in calculate_node_totalpages()
6610 struct zone *zone, in setup_usemap() argument
6615 zone->pageblock_flags = NULL; in setup_usemap()
6617 zone->pageblock_flags = in setup_usemap()
6620 if (!zone->pageblock_flags) in setup_usemap()
6622 usemapsize, zone->name, pgdat->node_id); in setup_usemap()
6626 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, in setup_usemap() argument
6724 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
6727 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
6728 zone_set_nid(zone, nid); in zone_init_internals()
6729 zone->name = zone_names[idx]; in zone_init_internals()
6730 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
6731 spin_lock_init(&zone->lock); in zone_init_internals()
6732 zone_seqlock_init(zone); in zone_init_internals()
6733 zone_pcp_init(zone); in zone_init_internals()
6773 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
6775 unsigned long zone_start_pfn = zone->zone_start_pfn; in free_area_init_core()
6777 size = zone->spanned_pages; in free_area_init_core()
6778 freesize = zone->present_pages; in free_area_init_core()
6817 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
6823 setup_usemap(pgdat, zone, zone_start_pfn, size); in free_area_init_core()
6824 init_currently_empty_zone(zone, zone_start_pfn, size); in free_area_init_core()
7317 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
7318 if (populated_zone(zone)) { in check_for_memory()
7678 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
7680 unsigned long managed_pages = zone_managed_pages(zone); in calculate_totalreserve_pages()
7684 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
7685 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
7689 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
7715 struct zone *zone = pgdat->node_zones + j; in setup_per_zone_lowmem_reserve() local
7716 unsigned long managed_pages = zone_managed_pages(zone); in setup_per_zone_lowmem_reserve()
7718 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
7722 struct zone *lower_zone; in setup_per_zone_lowmem_reserve()
7748 struct zone *zone; in __setup_per_zone_wmarks() local
7752 for_each_zone(zone) { in __setup_per_zone_wmarks()
7753 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
7754 lowmem_pages += zone_managed_pages(zone); in __setup_per_zone_wmarks()
7757 for_each_zone(zone) { in __setup_per_zone_wmarks()
7760 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
7761 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks()
7763 low = (u64)pages_low * zone_managed_pages(zone); in __setup_per_zone_wmarks()
7765 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
7777 min_pages = zone_managed_pages(zone) / 1024; in __setup_per_zone_wmarks()
7779 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
7785 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
7794 mult_frac(zone_managed_pages(zone), in __setup_per_zone_wmarks()
7797 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + in __setup_per_zone_wmarks()
7799 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + in __setup_per_zone_wmarks()
7801 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
7803 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
7933 struct zone *zone; in setup_min_unmapped_ratio() local
7938 for_each_zone(zone) in setup_min_unmapped_ratio()
7939 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
7961 struct zone *zone; in setup_min_slab_ratio() local
7966 for_each_zone(zone) in setup_min_slab_ratio()
7967 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
8011 struct zone *zone; in percpu_pagelist_fraction_sysctl_handler() local
8034 for_each_populated_zone(zone) { in percpu_pagelist_fraction_sysctl_handler()
8038 pageset_set_high_and_batch(zone, in percpu_pagelist_fraction_sysctl_handler()
8039 per_cpu_ptr(zone->pageset, cpu)); in percpu_pagelist_fraction_sysctl_handler()
8195 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, in has_unmovable_pages() argument
8240 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
8302 WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); in has_unmovable_pages()
8352 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
8397 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
8536 void __meminit zone_pcp_update(struct zone *zone) in zone_pcp_update() argument
8541 pageset_set_high_and_batch(zone, in zone_pcp_update()
8542 per_cpu_ptr(zone->pageset, cpu)); in zone_pcp_update()
8546 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
8554 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
8556 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
8557 drain_zonestat(zone, pset); in zone_pcp_reset()
8559 free_percpu(zone->pageset); in zone_pcp_reset()
8560 zone->pageset = &boot_pageset; in zone_pcp_reset()
8574 struct zone *zone; in __offline_isolated_pages() local
8588 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
8589 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
8616 del_page_from_free_area(page, &zone->free_area[order]); in __offline_isolated_pages()
8621 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
8629 struct zone *zone = page_zone(page); in is_free_buddy_page() local
8634 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
8641 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()
8654 struct zone *zone = page_zone(page); in set_hwpoison_free_buddy_page() local
8660 spin_lock_irqsave(&zone->lock, flags); in set_hwpoison_free_buddy_page()
8670 spin_unlock_irqrestore(&zone->lock, flags); in set_hwpoison_free_buddy_page()