• Home
  • Raw
  • Download

Lines Matching refs:zone

483 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)  in page_outside_zone_boundaries()  argument
491 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
492 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
493 sp = zone->spanned_pages; in page_outside_zone_boundaries()
494 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
496 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
500 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
506 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
510 if (zone != page_zone(page)) in page_is_consistent()
518 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
520 if (page_outside_zone_boundaries(zone, page)) in bad_range()
522 if (!page_is_consistent(zone, page)) in bad_range()
528 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
672 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
692 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
697 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
713 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
717 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
719 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
802 struct zone *zone, unsigned int order, in __free_one_page() argument
812 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
817 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
820 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
836 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
839 zone->free_area[order].nr_free--; in __free_one_page()
856 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
892 &zone->free_area[order].free_list[migratetype]); in __free_one_page()
897 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
899 zone->free_area[order].nr_free++; in __free_one_page()
1109 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1170 spin_lock(&zone->lock); in free_pcppages_bulk()
1171 isolated_pageblocks = has_isolate_pageblock(zone); in free_pcppages_bulk()
1185 __free_one_page(page, page_to_pfn(page), zone, 0, mt); in free_pcppages_bulk()
1188 spin_unlock(&zone->lock); in free_pcppages_bulk()
1191 static void free_one_page(struct zone *zone, in free_one_page() argument
1196 spin_lock(&zone->lock); in free_one_page()
1197 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
1201 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
1202 spin_unlock(&zone->lock); in free_one_page()
1206 unsigned long zone, int nid) in __init_single_page() argument
1209 set_page_links(page, zone, nid, pfn); in __init_single_page()
1217 if (!is_highmem_idx(zone)) in __init_single_page()
1235 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
1237 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1389 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1404 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1416 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
1418 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1422 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
1426 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
1429 block_end_pfn, zone)) in set_zone_contiguous()
1435 zone->contiguous = true; in set_zone_contiguous()
1438 void clear_zone_contiguous(struct zone *zone) in clear_zone_contiguous() argument
1440 zone->contiguous = false; in clear_zone_contiguous()
1574 struct zone *zone; in deferred_init_memmap() local
1604 zone = pgdat->node_zones + zid; in deferred_init_memmap()
1605 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
1608 first_init_pfn = max(zone->zone_start_pfn, first_init_pfn); in deferred_init_memmap()
1618 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa)); in deferred_init_memmap()
1623 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa)); in deferred_init_memmap()
1628 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
1653 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
1655 int zid = zone_idx(zone); in deferred_grow_zone()
1656 int nid = zone_to_nid(zone); in deferred_grow_zone()
1666 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
1680 first_init_pfn = max(zone->zone_start_pfn, first_deferred_pfn); in deferred_grow_zone()
1689 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa)); in deferred_grow_zone()
1724 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
1726 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
1733 struct zone *zone; in page_alloc_init_late() local
1752 for_each_populated_zone(zone) in page_alloc_init_late()
1753 zone_pcp_update(zone); in page_alloc_init_late()
1769 for_each_populated_zone(zone) in page_alloc_init_late()
1770 set_zone_contiguous(zone); in page_alloc_init_late()
1818 static inline void expand(struct zone *zone, struct page *page, in expand() argument
1828 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
1836 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
1971 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
1980 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
1988 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
2014 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2017 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2020 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2029 static int move_freepages(struct zone *zone, in move_freepages() argument
2060 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2078 &zone->free_area[order].free_list[migratetype]); in move_freepages()
2086 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2099 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
2101 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
2104 return move_freepages(zone, start_page, end_page, migratetype, in move_freepages_block()
2160 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2187 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2226 area = &zone->free_area[current_order]; in steal_suitable_fallback()
2271 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2281 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
2282 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2285 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2288 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2295 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2297 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2301 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2319 struct zone *zone; in unreserve_highatomic_pageblock() local
2324 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, in unreserve_highatomic_pageblock()
2330 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2334 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2336 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2359 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2361 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2374 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2377 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2381 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2398 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) in __rmqueue_fallback() argument
2413 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2439 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2456 steal_suitable_fallback(zone, page, start_migratetype, can_steal); in __rmqueue_fallback()
2470 __rmqueue(struct zone *zone, unsigned int order, int migratetype) in __rmqueue() argument
2475 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2478 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2480 if (!page && __rmqueue_fallback(zone, order, migratetype)) in __rmqueue()
2493 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2499 spin_lock(&zone->lock); in rmqueue_bulk()
2501 struct page *page = __rmqueue(zone, order, migratetype); in rmqueue_bulk()
2521 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
2531 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2532 spin_unlock(&zone->lock); in rmqueue_bulk()
2545 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
2554 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
2566 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
2573 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
2577 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
2590 struct zone *zone; in drain_pages() local
2592 for_each_populated_zone(zone) { in drain_pages()
2593 drain_pages_zone(cpu, zone); in drain_pages()
2603 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
2607 if (zone) in drain_local_pages()
2608 drain_pages_zone(cpu, zone); in drain_local_pages()
2634 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
2657 if (!zone) in drain_all_pages()
2670 struct zone *z; in drain_all_pages()
2673 if (zone) { in drain_all_pages()
2674 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
2711 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
2718 if (zone_is_empty(zone)) in mark_free_pages()
2721 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
2723 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
2724 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
2733 if (page_zone(page) != zone) in mark_free_pages()
2742 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
2755 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
2773 struct zone *zone = page_zone(page); in free_unref_page_commit() local
2789 free_one_page(zone, page, pfn, 0, migratetype); in free_unref_page_commit()
2795 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_unref_page_commit()
2800 free_pcppages_bulk(zone, batch, pcp); in free_unref_page_commit()
2882 struct zone *zone; in __isolate_free_page() local
2887 zone = page_zone(page); in __isolate_free_page()
2897 watermark = min_wmark_pages(zone) + (1UL << order); in __isolate_free_page()
2898 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
2901 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
2906 zone->free_area[order].nr_free--; in __isolate_free_page()
2933 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) in zone_statistics()
2956 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, in __rmqueue_pcplist() argument
2964 pcp->count += rmqueue_bulk(zone, 0, in __rmqueue_pcplist()
2980 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
2981 struct zone *zone, unsigned int order, in rmqueue_pcplist() argument
2990 pcp = &this_cpu_ptr(zone->pageset)->pcp; in rmqueue_pcplist()
2992 page = __rmqueue_pcplist(zone, migratetype, pcp, list); in rmqueue_pcplist()
2995 zone_statistics(preferred_zone, zone); in rmqueue_pcplist()
3005 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3006 struct zone *zone, unsigned int order, in rmqueue() argument
3014 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
3024 spin_lock_irqsave(&zone->lock, flags); in rmqueue()
3029 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3034 page = __rmqueue(zone, order, migratetype); in rmqueue()
3036 spin_unlock(&zone->lock); in rmqueue()
3039 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3043 zone_statistics(preferred_zone, zone); in rmqueue()
3047 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3139 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
3218 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
3225 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
3251 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
3264 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3266 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
3270 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3285 struct zone *zone; in get_page_from_freelist() local
3292 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in get_page_from_freelist()
3299 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3321 if (last_pgdat_dirty_limit == zone->zone_pgdat) in get_page_from_freelist()
3324 if (!node_dirty_ok(zone->zone_pgdat)) { in get_page_from_freelist()
3325 last_pgdat_dirty_limit = zone->zone_pgdat; in get_page_from_freelist()
3330 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; in get_page_from_freelist()
3331 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3341 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3351 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
3354 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3364 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3373 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3383 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
3390 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3605 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
3607 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
3608 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
3709 struct zone *zone; in should_compact_retry() local
3721 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in should_compact_retry()
3723 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
3847 struct zone *zone; in wake_all_kswapds() local
3851 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, high_zoneidx, in wake_all_kswapds()
3853 if (last_pgdat != zone->zone_pgdat) in wake_all_kswapds()
3854 wakeup_kswapd(zone, gfp_mask, order, high_zoneidx); in wake_all_kswapds()
3855 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
3954 struct zone *zone; in should_reclaim_retry() local
3982 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in should_reclaim_retry()
3986 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
3989 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
3990 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
3996 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4010 write_pending = zone_page_state_snapshot(zone, in should_reclaim_retry()
4118 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
4678 struct zone *zone; in nr_free_zone_pages() local
4685 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
4686 unsigned long size = zone->managed_pages; in nr_free_zone_pages()
4687 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
4718 static inline void show_node(struct zone *zone) in show_node() argument
4721 printk("Node %d ", zone_to_nid(zone)); in show_node()
4730 struct zone *zone; in si_mem_available() local
4736 for_each_zone(zone) in si_mem_available()
4737 wmark_low += zone->watermark[WMARK_LOW]; in si_mem_available()
4804 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
4806 if (is_highmem(zone)) { in si_meminfo_node()
4807 managed_highpages += zone->managed_pages; in si_meminfo_node()
4808 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
4883 struct zone *zone; in show_free_areas() local
4886 for_each_populated_zone(zone) { in show_free_areas()
4887 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
4891 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
4969 for_each_populated_zone(zone) { in show_free_areas()
4972 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
4977 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
4979 show_node(zone); in show_free_areas()
5002 zone->name, in show_free_areas()
5003 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
5004 K(min_wmark_pages(zone)), in show_free_areas()
5005 K(low_wmark_pages(zone)), in show_free_areas()
5006 K(high_wmark_pages(zone)), in show_free_areas()
5007 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in show_free_areas()
5008 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in show_free_areas()
5009 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in show_free_areas()
5010 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in show_free_areas()
5011 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in show_free_areas()
5012 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in show_free_areas()
5013 K(zone->present_pages), in show_free_areas()
5014 K(zone->managed_pages), in show_free_areas()
5015 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
5016 zone_page_state(zone, NR_KERNEL_STACK_KB), in show_free_areas()
5017 K(zone_page_state(zone, NR_PAGETABLE)), in show_free_areas()
5018 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
5020 K(this_cpu_read(zone->pageset->pcp.count)), in show_free_areas()
5021 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in show_free_areas()
5024 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
5028 for_each_populated_zone(zone) { in show_free_areas()
5033 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5035 show_node(zone); in show_free_areas()
5036 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
5038 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
5040 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5052 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
5069 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
5071 zoneref->zone = zone; in zoneref_set_zone()
5072 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
5082 struct zone *zone; in build_zonerefs_node() local
5088 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5089 if (managed_zone(zone)) { in build_zonerefs_node()
5090 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
5233 zonerefs->zone = NULL; in build_zonelists_in_node_order()
5248 zonerefs->zone = NULL; in build_thisnode_zonelists()
5306 return zone_to_nid(z->zone); in local_memory_node()
5347 zonerefs->zone = NULL; in build_zonelists()
5483 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, in memmap_init_zone() argument
5527 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in memmap_init_zone()
5545 __init_single_page(page, pfn, zone, nid); in memmap_init_zone()
5571 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
5575 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
5576 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
5581 #define memmap_init(size, nid, zone, start_pfn) \ argument
5582 memmap_init_zone((size), (nid), (zone), (start_pfn), \
5586 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
5595 batch = zone->managed_pages / 1024; in zone_batchsize()
5701 static void pageset_set_high_and_batch(struct zone *zone, in pageset_set_high_and_batch() argument
5706 (zone->managed_pages / in pageset_set_high_and_batch()
5709 pageset_set_batch(pcp, zone_batchsize(zone)); in pageset_set_high_and_batch()
5712 static void __meminit zone_pageset_init(struct zone *zone, int cpu) in zone_pageset_init() argument
5714 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
5717 pageset_set_high_and_batch(zone, pcp); in zone_pageset_init()
5720 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
5723 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
5725 zone_pageset_init(zone, cpu); in setup_zone_pageset()
5735 struct zone *zone; in setup_per_cpu_pageset() local
5737 for_each_populated_zone(zone) in setup_per_cpu_pageset()
5738 setup_zone_pageset(zone); in setup_per_cpu_pageset()
5745 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
5752 zone->pageset = &boot_pageset; in zone_pcp_init()
5754 if (populated_zone(zone)) in zone_pcp_init()
5756 zone->name, zone->present_pages, in zone_pcp_init()
5757 zone_batchsize(zone)); in zone_pcp_init()
5760 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
5764 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
5765 int zone_idx = zone_idx(zone) + 1; in init_currently_empty_zone()
5770 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
5775 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
5778 zone_init_free_lists(zone); in init_currently_empty_zone()
5779 zone->initialized = 1; in init_currently_empty_zone()
6067 unsigned int zone; in zone_spanned_pages_in_node() local
6070 for (zone = 0; zone < zone_type; zone++) in zone_spanned_pages_in_node()
6071 *zone_start_pfn += zones_size[zone]; in zone_spanned_pages_in_node()
6102 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
6116 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
6118 zone->zone_start_pfn = 0; in calculate_node_totalpages()
6119 zone->spanned_pages = size; in calculate_node_totalpages()
6120 zone->present_pages = real_size; in calculate_node_totalpages()
6154 struct zone *zone, in setup_usemap() argument
6159 zone->pageblock_flags = NULL; in setup_usemap()
6161 zone->pageblock_flags = in setup_usemap()
6166 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, in setup_usemap() argument
6262 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
6265 zone->managed_pages = remaining_pages; in zone_init_internals()
6266 zone_set_nid(zone, nid); in zone_init_internals()
6267 zone->name = zone_names[idx]; in zone_init_internals()
6268 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
6269 spin_lock_init(&zone->lock); in zone_init_internals()
6270 zone_seqlock_init(zone); in zone_init_internals()
6271 zone_pcp_init(zone); in zone_init_internals()
6311 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
6313 unsigned long zone_start_pfn = zone->zone_start_pfn; in free_area_init_core()
6315 size = zone->spanned_pages; in free_area_init_core()
6316 freesize = zone->present_pages; in free_area_init_core()
6355 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
6361 setup_usemap(pgdat, zone, zone_start_pfn, size); in free_area_init_core()
6362 init_currently_empty_zone(zone, zone_start_pfn, size); in free_area_init_core()
6871 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
6872 if (populated_zone(zone)) { in check_for_memory()
7211 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
7216 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
7217 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
7221 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
7223 if (max > zone->managed_pages) in calculate_totalreserve_pages()
7224 max = zone->managed_pages; in calculate_totalreserve_pages()
7247 struct zone *zone = pgdat->node_zones + j; in setup_per_zone_lowmem_reserve() local
7248 unsigned long managed_pages = zone->managed_pages; in setup_per_zone_lowmem_reserve()
7250 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
7254 struct zone *lower_zone; in setup_per_zone_lowmem_reserve()
7279 struct zone *zone; in __setup_per_zone_wmarks() local
7283 for_each_zone(zone) { in __setup_per_zone_wmarks()
7284 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
7285 lowmem_pages += zone->managed_pages; in __setup_per_zone_wmarks()
7288 for_each_zone(zone) { in __setup_per_zone_wmarks()
7291 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
7292 tmp = (u64)pages_min * zone->managed_pages; in __setup_per_zone_wmarks()
7294 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
7306 min_pages = zone->managed_pages / 1024; in __setup_per_zone_wmarks()
7308 zone->watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
7314 zone->watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
7323 mult_frac(zone->managed_pages, in __setup_per_zone_wmarks()
7326 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
7327 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; in __setup_per_zone_wmarks()
7329 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
7449 struct zone *zone; in setup_min_unmapped_ratio() local
7454 for_each_zone(zone) in setup_min_unmapped_ratio()
7455 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * in setup_min_unmapped_ratio()
7477 struct zone *zone; in setup_min_slab_ratio() local
7482 for_each_zone(zone) in setup_min_slab_ratio()
7483 zone->zone_pgdat->min_slab_pages += (zone->managed_pages * in setup_min_slab_ratio()
7527 struct zone *zone; in percpu_pagelist_fraction_sysctl_handler() local
7550 for_each_populated_zone(zone) { in percpu_pagelist_fraction_sysctl_handler()
7554 pageset_set_high_and_batch(zone, in percpu_pagelist_fraction_sysctl_handler()
7555 per_cpu_ptr(zone->pageset, cpu)); in percpu_pagelist_fraction_sysctl_handler()
7721 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, in has_unmovable_pages() argument
7761 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
7823 WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); in has_unmovable_pages()
7872 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
7917 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
7988 drain_all_pages(cc.zone); in alloc_contig_range()
8058 void __meminit zone_pcp_update(struct zone *zone) in zone_pcp_update() argument
8063 pageset_set_high_and_batch(zone, in zone_pcp_update()
8064 per_cpu_ptr(zone->pageset, cpu)); in zone_pcp_update()
8068 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
8076 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
8078 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
8079 drain_zonestat(zone, pset); in zone_pcp_reset()
8081 free_percpu(zone->pageset); in zone_pcp_reset()
8082 zone->pageset = &boot_pageset; in zone_pcp_reset()
8096 struct zone *zone; in __offline_isolated_pages() local
8107 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
8108 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
8135 zone->free_area[order].nr_free--; in __offline_isolated_pages()
8140 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
8146 struct zone *zone = page_zone(page); in is_free_buddy_page() local
8151 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
8158 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()
8171 struct zone *zone = page_zone(page); in set_hwpoison_free_buddy_page() local
8177 spin_lock_irqsave(&zone->lock, flags); in set_hwpoison_free_buddy_page()
8187 spin_unlock_irqrestore(&zone->lock, flags); in set_hwpoison_free_buddy_page()