• Home
  • Raw
  • Download

Lines Matching refs:zone

262 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)  in page_outside_zone_boundaries()  argument
270 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
271 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
272 sp = zone->spanned_pages; in page_outside_zone_boundaries()
273 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
275 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
279 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
285 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
289 if (zone != page_zone(page)) in page_is_consistent()
297 static int bad_range(struct zone *zone, struct page *page) in bad_range() argument
299 if (page_outside_zone_boundaries(zone, page)) in bad_range()
301 if (!page_is_consistent(zone, page)) in bad_range()
307 static inline int bad_range(struct zone *zone, struct page *page) in bad_range() argument
556 struct zone *zone, unsigned int order, in __free_one_page() argument
567 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
575 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
580 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
596 __mod_zone_freepage_state(zone, 1 << order, in __free_one_page()
601 zone->free_area[order].nr_free--; in __free_one_page()
618 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
653 &zone->free_area[order].free_list[migratetype]); in __free_one_page()
658 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
660 zone->free_area[order].nr_free++; in __free_one_page()
701 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
709 spin_lock(&zone->lock); in free_pcppages_bulk()
710 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); in free_pcppages_bulk()
712 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); in free_pcppages_bulk()
743 if (unlikely(has_isolate_pageblock(zone))) in free_pcppages_bulk()
747 __free_one_page(page, page_to_pfn(page), zone, 0, mt); in free_pcppages_bulk()
751 spin_unlock(&zone->lock); in free_pcppages_bulk()
754 static void free_one_page(struct zone *zone, in free_one_page() argument
760 spin_lock(&zone->lock); in free_one_page()
761 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); in free_one_page()
763 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); in free_one_page()
765 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
769 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
770 spin_unlock(&zone->lock); in free_one_page()
883 static inline void expand(struct zone *zone, struct page *page, in expand() argument
893 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
907 __mod_zone_freepage_state(zone, -(1 << high), in expand()
975 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
984 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
993 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
1026 int move_freepages(struct zone *zone, in move_freepages() argument
1052 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
1061 &zone->free_area[order].free_list[migratetype]); in move_freepages()
1070 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
1083 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
1085 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
1088 return move_freepages(zone, start_page, end_page, migratetype); in move_freepages_block()
1117 static void try_to_steal_freepages(struct zone *zone, struct page *page, in try_to_steal_freepages() argument
1134 pages = move_freepages_block(zone, page, start_type); in try_to_steal_freepages()
1145 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) in __rmqueue_fallback() argument
1164 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
1173 try_to_steal_freepages(zone, page, in __rmqueue_fallback()
1190 expand(zone, page, order, current_order, area, in __rmqueue_fallback()
1217 static struct page *__rmqueue(struct zone *zone, unsigned int order, in __rmqueue() argument
1223 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
1226 page = __rmqueue_fallback(zone, order, migratetype); in __rmqueue()
1248 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
1254 spin_lock(&zone->lock); in rmqueue_bulk()
1256 struct page *page = __rmqueue(zone, order, migratetype); in rmqueue_bulk()
1275 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
1278 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
1279 spin_unlock(&zone->lock); in rmqueue_bulk()
1292 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
1301 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
1318 struct zone *zone; in drain_pages() local
1320 for_each_populated_zone(zone) { in drain_pages()
1325 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages()
1329 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages()
1357 struct zone *zone; in drain_all_pages() local
1373 for_each_populated_zone(zone) { in drain_all_pages()
1374 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
1390 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
1397 if (zone_is_empty(zone)) in mark_free_pages()
1400 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
1402 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
1403 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
1412 list_for_each(curr, &zone->free_area[order].free_list[t]) { in mark_free_pages()
1420 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
1430 struct zone *zone = page_zone(page); in free_hot_cold_page() local
1453 free_one_page(zone, page, pfn, 0, migratetype); in free_hot_cold_page()
1459 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_hot_cold_page()
1467 free_pcppages_bulk(zone, batch, pcp); in free_hot_cold_page()
1520 struct zone *zone; in __isolate_free_page() local
1525 zone = page_zone(page); in __isolate_free_page()
1530 watermark = low_wmark_pages(zone) + (1 << order); in __isolate_free_page()
1531 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) in __isolate_free_page()
1534 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
1539 zone->free_area[order].nr_free--; in __isolate_free_page()
1589 struct page *buffered_rmqueue(struct zone *preferred_zone, in buffered_rmqueue()
1590 struct zone *zone, unsigned int order, in buffered_rmqueue() argument
1603 pcp = &this_cpu_ptr(zone->pageset)->pcp; in buffered_rmqueue()
1606 pcp->count += rmqueue_bulk(zone, 0, in buffered_rmqueue()
1634 spin_lock_irqsave(&zone->lock, flags); in buffered_rmqueue()
1635 page = __rmqueue(zone, order, migratetype); in buffered_rmqueue()
1636 spin_unlock(&zone->lock); in buffered_rmqueue()
1639 __mod_zone_freepage_state(zone, -(1 << order), in buffered_rmqueue()
1643 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); in buffered_rmqueue()
1644 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && in buffered_rmqueue()
1645 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) in buffered_rmqueue()
1646 set_bit(ZONE_FAIR_DEPLETED, &zone->flags); in buffered_rmqueue()
1648 __count_zone_vm_events(PGALLOC, zone, 1 << order); in buffered_rmqueue()
1649 zone_statistics(preferred_zone, zone, gfp_flags); in buffered_rmqueue()
1652 VM_BUG_ON_PAGE(bad_range(zone, page), page); in buffered_rmqueue()
1743 static bool __zone_watermark_ok(struct zone *z, unsigned int order, in __zone_watermark_ok()
1778 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
1785 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
1914 static bool zone_local(struct zone *local_zone, struct zone *zone) in zone_local() argument
1916 return local_zone->node == zone->node; in zone_local()
1919 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
1921 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < in zone_allows_reclaim()
1946 static bool zone_local(struct zone *local_zone, struct zone *zone) in zone_local() argument
1951 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
1958 static void reset_alloc_batches(struct zone *preferred_zone) in reset_alloc_batches()
1960 struct zone *zone = preferred_zone->zone_pgdat->node_zones; in reset_alloc_batches() local
1963 mod_zone_page_state(zone, NR_ALLOC_BATCH, in reset_alloc_batches()
1964 high_wmark_pages(zone) - low_wmark_pages(zone) - in reset_alloc_batches()
1965 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); in reset_alloc_batches()
1966 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags); in reset_alloc_batches()
1967 } while (zone++ != preferred_zone); in reset_alloc_batches()
1977 struct zone *preferred_zone, int classzone_idx, int migratetype) in get_page_from_freelist()
1981 struct zone *zone; in get_page_from_freelist() local
1997 for_each_zone_zonelist_nodemask(zone, z, zonelist, in get_page_from_freelist()
2006 !cpuset_zone_allowed_softwall(zone, gfp_mask)) in get_page_from_freelist()
2015 if (!zone_local(preferred_zone, zone)) in get_page_from_freelist()
2017 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) { in get_page_from_freelist()
2048 if (consider_zone_dirty && !zone_dirty_ok(zone)) in get_page_from_freelist()
2051 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; in get_page_from_freelist()
2052 if (!zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
2074 !zone_allows_reclaim(preferred_zone, zone)) in get_page_from_freelist()
2085 ret = zone_reclaim(zone, gfp_mask, order); in get_page_from_freelist()
2095 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
2117 page = buffered_rmqueue(preferred_zone, zone, order, in get_page_from_freelist()
2273 nodemask_t *nodemask, struct zone *preferred_zone, in __alloc_pages_may_oom()
2334 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, in __alloc_pages_direct_compact()
2338 struct zone *last_compact_zone = NULL; in __alloc_pages_direct_compact()
2378 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
2380 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
2381 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
2408 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, in __alloc_pages_direct_compact()
2448 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, in __alloc_pages_direct_reclaim()
2490 nodemask_t *nodemask, struct zone *preferred_zone, in __alloc_pages_high_priority()
2510 struct zone *preferred_zone, in wake_all_kswapds()
2514 struct zone *zone; in wake_all_kswapds() local
2516 for_each_zone_zonelist_nodemask(zone, z, zonelist, in wake_all_kswapds()
2518 wakeup_kswapd(zone, order, zone_idx(preferred_zone)); in wake_all_kswapds()
2578 nodemask_t *nodemask, struct zone *preferred_zone, in __alloc_pages_slowpath()
2825 struct zone *preferred_zone; in __alloc_pages_nodemask()
2847 if (unlikely(!zonelist->_zonerefs->zone)) in __alloc_pages_nodemask()
3084 struct zone *zone; in nr_free_zone_pages() local
3091 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
3092 unsigned long size = zone->managed_pages; in nr_free_zone_pages()
3093 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
3124 static inline void show_node(struct zone *zone) in show_node() argument
3127 printk("Node %d ", zone_to_nid(zone)); in show_node()
3226 struct zone *zone; in show_free_areas() local
3228 for_each_populated_zone(zone) { in show_free_areas()
3229 if (skip_free_areas_node(filter, zone_to_nid(zone))) in show_free_areas()
3231 show_node(zone); in show_free_areas()
3232 printk("%s per-cpu:\n", zone->name); in show_free_areas()
3237 pageset = per_cpu_ptr(zone->pageset, cpu); in show_free_areas()
3271 for_each_populated_zone(zone) { in show_free_areas()
3274 if (skip_free_areas_node(filter, zone_to_nid(zone))) in show_free_areas()
3276 show_node(zone); in show_free_areas()
3307 zone->name, in show_free_areas()
3308 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
3309 K(min_wmark_pages(zone)), in show_free_areas()
3310 K(low_wmark_pages(zone)), in show_free_areas()
3311 K(high_wmark_pages(zone)), in show_free_areas()
3312 K(zone_page_state(zone, NR_ACTIVE_ANON)), in show_free_areas()
3313 K(zone_page_state(zone, NR_INACTIVE_ANON)), in show_free_areas()
3314 K(zone_page_state(zone, NR_ACTIVE_FILE)), in show_free_areas()
3315 K(zone_page_state(zone, NR_INACTIVE_FILE)), in show_free_areas()
3316 K(zone_page_state(zone, NR_UNEVICTABLE)), in show_free_areas()
3317 K(zone_page_state(zone, NR_ISOLATED_ANON)), in show_free_areas()
3318 K(zone_page_state(zone, NR_ISOLATED_FILE)), in show_free_areas()
3319 K(zone->present_pages), in show_free_areas()
3320 K(zone->managed_pages), in show_free_areas()
3321 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
3322 K(zone_page_state(zone, NR_FILE_DIRTY)), in show_free_areas()
3323 K(zone_page_state(zone, NR_WRITEBACK)), in show_free_areas()
3324 K(zone_page_state(zone, NR_FILE_MAPPED)), in show_free_areas()
3325 K(zone_page_state(zone, NR_SHMEM)), in show_free_areas()
3326 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), in show_free_areas()
3327 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), in show_free_areas()
3328 zone_page_state(zone, NR_KERNEL_STACK) * in show_free_areas()
3330 K(zone_page_state(zone, NR_PAGETABLE)), in show_free_areas()
3331 K(zone_page_state(zone, NR_UNSTABLE_NFS)), in show_free_areas()
3332 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
3333 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), in show_free_areas()
3334 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), in show_free_areas()
3335 K(zone_page_state(zone, NR_PAGES_SCANNED)), in show_free_areas()
3336 (!zone_reclaimable(zone) ? "yes" : "no") in show_free_areas()
3340 printk(" %ld", zone->lowmem_reserve[i]); in show_free_areas()
3344 for_each_populated_zone(zone) { in show_free_areas()
3349 if (skip_free_areas_node(filter, zone_to_nid(zone))) in show_free_areas()
3351 show_node(zone); in show_free_areas()
3352 printk("%s: ", zone->name); in show_free_areas()
3354 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
3356 struct free_area *area = &zone->free_area[order]; in show_free_areas()
3368 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
3384 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
3386 zoneref->zone = zone; in zoneref_set_zone()
3387 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
3398 struct zone *zone; in build_zonelists_node() local
3403 zone = pgdat->node_zones + zone_type; in build_zonelists_node()
3404 if (populated_zone(zone)) { in build_zonelists_node()
3405 zoneref_set_zone(zone, in build_zonelists_node()
3602 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) in build_zonelists_in_node_order()
3605 zonelist->_zonerefs[j].zone = NULL; in build_zonelists_in_node_order()
3619 zonelist->_zonerefs[j].zone = NULL; in build_thisnode_zonelists()
3635 struct zone *z; in build_zonelists_in_zone_order()
3651 zonelist->_zonerefs[pos].zone = NULL; in build_zonelists_in_zone_order()
3700 zonelist->_zonerefs[0].zone = NULL; in build_zonelists()
3749 for (z = zonelist->_zonerefs; z->zone; z++) in build_zonelist_cache()
3762 struct zone *zone; in local_memory_node() local
3767 &zone); in local_memory_node()
3768 return zone->node; in local_memory_node()
3809 zonelist->_zonerefs[j].zone = NULL; in build_zonelists()
3838 static void setup_zone_pageset(struct zone *zone);
3906 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) in build_all_zonelists() argument
3916 if (zone) in build_all_zonelists()
3917 setup_zone_pageset(zone); in build_all_zonelists()
4035 static void setup_zone_migrate_reserve(struct zone *zone) in setup_zone_migrate_reserve() argument
4049 start_pfn = zone->zone_start_pfn; in setup_zone_migrate_reserve()
4050 end_pfn = zone_end_pfn(zone); in setup_zone_migrate_reserve()
4052 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> in setup_zone_migrate_reserve()
4063 old_reserve = zone->nr_migrate_reserve_block; in setup_zone_migrate_reserve()
4068 zone->nr_migrate_reserve_block = reserve; in setup_zone_migrate_reserve()
4076 if (page_to_nid(page) != zone_to_nid(zone)) in setup_zone_migrate_reserve()
4101 move_freepages_block(zone, page, in setup_zone_migrate_reserve()
4120 move_freepages_block(zone, page, MIGRATE_MOVABLE); in setup_zone_migrate_reserve()
4130 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, in memmap_init_zone() argument
4136 struct zone *z; in memmap_init_zone()
4141 z = &NODE_DATA(nid)->node_zones[zone]; in memmap_init_zone()
4155 set_page_links(page, zone, nid, pfn); in memmap_init_zone()
4156 mminit_verify_page_links(page, zone, nid, pfn); in memmap_init_zone()
4183 if (!is_highmem_idx(zone)) in memmap_init_zone()
4189 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
4193 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
4194 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
4199 #define memmap_init(size, nid, zone, start_pfn) \ argument
4200 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
4203 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
4214 batch = zone->managed_pages / 1024; in zone_batchsize()
4319 static void pageset_set_high_and_batch(struct zone *zone, in pageset_set_high_and_batch() argument
4324 (zone->managed_pages / in pageset_set_high_and_batch()
4327 pageset_set_batch(pcp, zone_batchsize(zone)); in pageset_set_high_and_batch()
4330 static void __meminit zone_pageset_init(struct zone *zone, int cpu) in zone_pageset_init() argument
4332 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
4335 pageset_set_high_and_batch(zone, pcp); in zone_pageset_init()
4338 static void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
4341 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
4343 zone_pageset_init(zone, cpu); in setup_zone_pageset()
4352 struct zone *zone; in setup_per_cpu_pageset() local
4354 for_each_populated_zone(zone) in setup_per_cpu_pageset()
4355 setup_zone_pageset(zone); in setup_per_cpu_pageset()
4359 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) in zone_wait_table_init() argument
4368 zone->wait_table_hash_nr_entries = in zone_wait_table_init()
4370 zone->wait_table_bits = in zone_wait_table_init()
4371 wait_table_bits(zone->wait_table_hash_nr_entries); in zone_wait_table_init()
4372 alloc_size = zone->wait_table_hash_nr_entries in zone_wait_table_init()
4376 zone->wait_table = (wait_queue_head_t *) in zone_wait_table_init()
4378 alloc_size, zone->zone_pgdat->node_id); in zone_wait_table_init()
4390 zone->wait_table = vmalloc(alloc_size); in zone_wait_table_init()
4392 if (!zone->wait_table) in zone_wait_table_init()
4395 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i) in zone_wait_table_init()
4396 init_waitqueue_head(zone->wait_table + i); in zone_wait_table_init()
4401 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
4408 zone->pageset = &boot_pageset; in zone_pcp_init()
4410 if (populated_zone(zone)) in zone_pcp_init()
4412 zone->name, zone->present_pages, in zone_pcp_init()
4413 zone_batchsize(zone)); in zone_pcp_init()
4416 int __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
4421 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
4423 ret = zone_wait_table_init(zone, size); in init_currently_empty_zone()
4426 pgdat->nr_zones = zone_idx(zone) + 1; in init_currently_empty_zone()
4428 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
4433 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
4436 zone_init_free_lists(zone); in init_currently_empty_zone()
4778 struct zone *zone, in setup_usemap() argument
4783 zone->pageblock_flags = NULL; in setup_usemap()
4785 zone->pageblock_flags = in setup_usemap()
4790 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, in setup_usemap() argument
4879 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
4920 zone->spanned_pages = size; in free_area_init_core()
4921 zone->present_pages = realsize; in free_area_init_core()
4927 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; in free_area_init_core()
4929 zone->node = nid; in free_area_init_core()
4930 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio) in free_area_init_core()
4932 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; in free_area_init_core()
4934 zone->name = zone_names[j]; in free_area_init_core()
4935 spin_lock_init(&zone->lock); in free_area_init_core()
4936 spin_lock_init(&zone->lru_lock); in free_area_init_core()
4937 zone_seqlock_init(zone); in free_area_init_core()
4938 zone->zone_pgdat = pgdat; in free_area_init_core()
4939 zone_pcp_init(zone); in free_area_init_core()
4942 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); in free_area_init_core()
4944 lruvec_init(&zone->lruvec); in free_area_init_core()
4949 setup_usemap(pgdat, zone, zone_start_pfn, size); in free_area_init_core()
4950 ret = init_currently_empty_zone(zone, zone_start_pfn, in free_area_init_core()
5326 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
5327 if (populated_zone(zone)) { in check_for_memory()
5629 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
5634 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
5635 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
5639 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
5641 if (max > zone->managed_pages) in calculate_totalreserve_pages()
5642 max = zone->managed_pages; in calculate_totalreserve_pages()
5653 zone->dirty_balance_reserve = max; in calculate_totalreserve_pages()
5673 struct zone *zone = pgdat->node_zones + j; in setup_per_zone_lowmem_reserve() local
5674 unsigned long managed_pages = zone->managed_pages; in setup_per_zone_lowmem_reserve()
5676 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
5680 struct zone *lower_zone; in setup_per_zone_lowmem_reserve()
5704 struct zone *zone; in __setup_per_zone_wmarks() local
5708 for_each_zone(zone) { in __setup_per_zone_wmarks()
5709 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
5710 lowmem_pages += zone->managed_pages; in __setup_per_zone_wmarks()
5713 for_each_zone(zone) { in __setup_per_zone_wmarks()
5716 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
5717 min = (u64)pages_min * zone->managed_pages; in __setup_per_zone_wmarks()
5719 low = (u64)pages_low * zone->managed_pages; in __setup_per_zone_wmarks()
5722 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
5734 min_pages = zone->managed_pages / 1024; in __setup_per_zone_wmarks()
5736 zone->watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
5742 zone->watermark[WMARK_MIN] = min; in __setup_per_zone_wmarks()
5745 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + in __setup_per_zone_wmarks()
5747 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + in __setup_per_zone_wmarks()
5750 __mod_zone_page_state(zone, NR_ALLOC_BATCH, in __setup_per_zone_wmarks()
5751 high_wmark_pages(zone) - low_wmark_pages(zone) - in __setup_per_zone_wmarks()
5752 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); in __setup_per_zone_wmarks()
5754 setup_zone_migrate_reserve(zone); in __setup_per_zone_wmarks()
5755 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
5797 static void __meminit calculate_zone_inactive_ratio(struct zone *zone) in calculate_zone_inactive_ratio() argument
5802 gb = zone->managed_pages >> (30 - PAGE_SHIFT); in calculate_zone_inactive_ratio()
5808 zone->inactive_ratio = ratio; in calculate_zone_inactive_ratio()
5813 struct zone *zone; in setup_per_zone_inactive_ratio() local
5815 for_each_zone(zone) in setup_per_zone_inactive_ratio()
5816 calculate_zone_inactive_ratio(zone); in setup_per_zone_inactive_ratio()
5894 struct zone *zone; in sysctl_min_unmapped_ratio_sysctl_handler() local
5901 for_each_zone(zone) in sysctl_min_unmapped_ratio_sysctl_handler()
5902 zone->min_unmapped_pages = (zone->managed_pages * in sysctl_min_unmapped_ratio_sysctl_handler()
5910 struct zone *zone; in sysctl_min_slab_ratio_sysctl_handler() local
5917 for_each_zone(zone) in sysctl_min_slab_ratio_sysctl_handler()
5918 zone->min_slab_pages = (zone->managed_pages * in sysctl_min_slab_ratio_sysctl_handler()
5949 struct zone *zone; in percpu_pagelist_fraction_sysctl_handler() local
5972 for_each_populated_zone(zone) { in percpu_pagelist_fraction_sysctl_handler()
5976 pageset_set_high_and_batch(zone, in percpu_pagelist_fraction_sysctl_handler()
5977 per_cpu_ptr(zone->pageset, cpu)); in percpu_pagelist_fraction_sysctl_handler()
6096 static inline unsigned long *get_pageblock_bitmap(struct zone *zone, in get_pageblock_bitmap() argument
6102 return zone->pageblock_flags; in get_pageblock_bitmap()
6106 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) in pfn_to_bitidx() argument
6112 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); in pfn_to_bitidx()
6130 struct zone *zone; in get_pfnblock_flags_mask() local
6135 zone = page_zone(page); in get_pfnblock_flags_mask()
6136 bitmap = get_pageblock_bitmap(zone, pfn); in get_pfnblock_flags_mask()
6137 bitidx = pfn_to_bitidx(zone, pfn); in get_pfnblock_flags_mask()
6159 struct zone *zone; in set_pfnblock_flags_mask() local
6166 zone = page_zone(page); in set_pfnblock_flags_mask()
6167 bitmap = get_pageblock_bitmap(zone, pfn); in set_pfnblock_flags_mask()
6168 bitidx = pfn_to_bitidx(zone, pfn); in set_pfnblock_flags_mask()
6172 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); in set_pfnblock_flags_mask()
6195 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, in has_unmovable_pages() argument
6205 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
6272 struct zone *zone; in is_pageblock_removable_nolock() local
6285 zone = page_zone(page); in is_pageblock_removable_nolock()
6287 if (!zone_spans_pfn(zone, pfn)) in is_pageblock_removable_nolock()
6290 return !has_unmovable_pages(zone, page, 0, true); in is_pageblock_removable_nolock()
6338 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
6382 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
6498 void __meminit zone_pcp_update(struct zone *zone) in zone_pcp_update() argument
6503 pageset_set_high_and_batch(zone, in zone_pcp_update()
6504 per_cpu_ptr(zone->pageset, cpu)); in zone_pcp_update()
6509 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
6517 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
6519 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
6520 drain_zonestat(zone, pset); in zone_pcp_reset()
6522 free_percpu(zone->pageset); in zone_pcp_reset()
6523 zone->pageset = &boot_pageset; in zone_pcp_reset()
6536 struct zone *zone; in __offline_isolated_pages() local
6546 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
6547 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
6574 zone->free_area[order].nr_free--; in __offline_isolated_pages()
6579 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
6586 struct zone *zone = page_zone(page); in is_free_buddy_page() local
6591 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
6598 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()