Home
last modified time | relevance | path

Searched refs:zone (Results 1 – 25 of 28) sorted by relevance

12

/mm/
Dcompaction.c88 unsigned long end_pfn, struct zone *zone) in pageblock_pfn_to_page() argument
101 if (page_zone(start_page) != zone) in pageblock_pfn_to_page()
129 static void __reset_isolation_suitable(struct zone *zone) in __reset_isolation_suitable() argument
131 unsigned long start_pfn = zone->zone_start_pfn; in __reset_isolation_suitable()
132 unsigned long end_pfn = zone_end_pfn(zone); in __reset_isolation_suitable()
135 zone->compact_cached_migrate_pfn[0] = start_pfn; in __reset_isolation_suitable()
136 zone->compact_cached_migrate_pfn[1] = start_pfn; in __reset_isolation_suitable()
137 zone->compact_cached_free_pfn = end_pfn; in __reset_isolation_suitable()
138 zone->compact_blockskip_flush = false; in __reset_isolation_suitable()
150 if (zone != page_zone(page)) in __reset_isolation_suitable()
[all …]
Dvmstat.c89 int calculate_pressure_threshold(struct zone *zone) in calculate_pressure_threshold() argument
102 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); in calculate_pressure_threshold()
113 int calculate_normal_threshold(struct zone *zone) in calculate_normal_threshold() argument
148 mem = zone->managed_pages >> (27 - PAGE_SHIFT); in calculate_normal_threshold()
165 struct zone *zone; in refresh_zone_stat_thresholds() local
169 for_each_populated_zone(zone) { in refresh_zone_stat_thresholds()
172 threshold = calculate_normal_threshold(zone); in refresh_zone_stat_thresholds()
175 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in refresh_zone_stat_thresholds()
183 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); in refresh_zone_stat_thresholds()
186 zone->percpu_drift_mark = high_wmark_pages(zone) + in refresh_zone_stat_thresholds()
[all …]
Dpage_alloc.c262 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
270 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
271 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
272 sp = zone->spanned_pages; in page_outside_zone_boundaries()
273 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
275 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
279 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
285 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
289 if (zone != page_zone(page)) in page_is_consistent()
297 static int bad_range(struct zone *zone, struct page *page) in bad_range() argument
[all …]
Dmemory_hotplug.c250 struct zone *zone; in register_page_bootmem_info_node() local
258 zone = &pgdat->node_zones[0]; in register_page_bootmem_info_node()
259 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { in register_page_bootmem_info_node()
260 if (zone_is_initialized(zone)) { in register_page_bootmem_info_node()
261 nr_pages = zone->wait_table_hash_nr_entries in register_page_bootmem_info_node()
264 page = virt_to_page(zone->wait_table); in register_page_bootmem_info_node()
288 static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn, in grow_zone_span() argument
293 zone_span_writelock(zone); in grow_zone_span()
295 old_zone_end_pfn = zone_end_pfn(zone); in grow_zone_span()
296 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in grow_zone_span()
[all …]
Dvmscan.c162 static unsigned long zone_reclaimable_pages(struct zone *zone) in zone_reclaimable_pages() argument
166 nr = zone_page_state(zone, NR_ACTIVE_FILE) + in zone_reclaimable_pages()
167 zone_page_state(zone, NR_INACTIVE_FILE); in zone_reclaimable_pages()
170 nr += zone_page_state(zone, NR_ACTIVE_ANON) + in zone_reclaimable_pages()
171 zone_page_state(zone, NR_INACTIVE_ANON); in zone_reclaimable_pages()
176 bool zone_reclaimable(struct zone *zone) in zone_reclaimable() argument
178 return zone_page_state(zone, NR_PAGES_SCANNED) < in zone_reclaimable()
179 zone_reclaimable_pages(zone) * 6; in zone_reclaimable()
846 struct zone *zone, in shrink_page_list() argument
884 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in shrink_page_list()
[all …]
Dmmzone.c29 struct zone *next_zone(struct zone *zone) in next_zone() argument
31 pg_data_t *pgdat = zone->zone_pgdat; in next_zone()
33 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone()
34 zone++; in next_zone()
38 zone = pgdat->node_zones; in next_zone()
40 zone = NULL; in next_zone()
42 return zone; in next_zone()
58 struct zone **zone) in next_zones_zonelist() argument
69 (z->zone && !zref_in_nodemask(z, nodes))) in next_zones_zonelist()
72 *zone = zonelist_zone(z); in next_zones_zonelist()
[all …]
Dswap.c54 struct zone *zone = page_zone(page); in __page_cache_release() local
58 spin_lock_irqsave(&zone->lru_lock, flags); in __page_cache_release()
59 lruvec = mem_cgroup_page_lruvec(page, zone); in __page_cache_release()
63 spin_unlock_irqrestore(&zone->lru_lock, flags); in __page_cache_release()
415 struct zone *zone = NULL; in pagevec_lru_move_fn() local
421 struct zone *pagezone = page_zone(page); in pagevec_lru_move_fn()
423 if (pagezone != zone) { in pagevec_lru_move_fn()
424 if (zone) in pagevec_lru_move_fn()
425 spin_unlock_irqrestore(&zone->lru_lock, flags); in pagevec_lru_move_fn()
426 zone = pagezone; in pagevec_lru_move_fn()
[all …]
Dpage_isolation.c14 struct zone *zone; in set_migratetype_isolate() local
20 zone = page_zone(page); in set_migratetype_isolate()
22 spin_lock_irqsave(&zone->lock, flags); in set_migratetype_isolate()
48 if (!has_unmovable_pages(zone, page, arg.pages_found, in set_migratetype_isolate()
63 zone->nr_isolate_pageblock++; in set_migratetype_isolate()
64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); in set_migratetype_isolate()
66 __mod_zone_freepage_state(zone, -nr_pages, migratetype); in set_migratetype_isolate()
69 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate()
77 struct zone *zone; in unset_migratetype_isolate() local
84 zone = page_zone(page); in unset_migratetype_isolate()
[all …]
Dworkingset.c155 static void *pack_shadow(unsigned long eviction, struct zone *zone) in pack_shadow() argument
157 eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone); in pack_shadow()
158 eviction = (eviction << ZONES_SHIFT) | zone_idx(zone); in pack_shadow()
165 struct zone **zone, in unpack_shadow() argument
181 *zone = NODE_DATA(nid)->node_zones + zid; in unpack_shadow()
183 refault = atomic_long_read(&(*zone)->inactive_age); in unpack_shadow()
215 struct zone *zone = page_zone(page); in workingset_eviction() local
218 eviction = atomic_long_inc_return(&zone->inactive_age); in workingset_eviction()
219 return pack_shadow(eviction, zone); in workingset_eviction()
234 struct zone *zone; in workingset_refault() local
[all …]
Dmm_init.c33 struct zone *zone; in mminit_verify_zonelist() local
45 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
46 if (!populated_zone(zone)) in mminit_verify_zonelist()
52 zone->name); in mminit_verify_zonelist()
55 for_each_zone_zonelist(zone, z, zonelist, zoneid) { in mminit_verify_zonelist()
58 zone->node, zone->name); in mminit_verify_zonelist()
60 printk(KERN_CONT "0:%s ", zone->name); in mminit_verify_zonelist()
133 void __meminit mminit_verify_page_links(struct page *page, enum zone_type zone, in mminit_verify_page_links() argument
137 BUG_ON(page_zonenum(page) != zone); in mminit_verify_page_links()
Doom_kill.c202 struct zone *zone; in constrained_alloc() local
234 for_each_zone_zonelist_nodemask(zone, z, zonelist, in constrained_alloc()
236 if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) in constrained_alloc()
580 struct zone *zone; in oom_zonelist_trylock() local
584 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) in oom_zonelist_trylock()
585 if (test_bit(ZONE_OOM_LOCKED, &zone->flags)) { in oom_zonelist_trylock()
594 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) in oom_zonelist_trylock()
595 set_bit(ZONE_OOM_LOCKED, &zone->flags); in oom_zonelist_trylock()
610 struct zone *zone; in oom_zonelist_unlock() local
613 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) in oom_zonelist_unlock()
[all …]
Dmlock.c176 struct zone *zone = page_zone(page); in munlock_vma_page() local
186 spin_lock_irq(&zone->lru_lock); in munlock_vma_page()
192 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); in munlock_vma_page()
195 spin_unlock_irq(&zone->lru_lock); in munlock_vma_page()
202 spin_unlock_irq(&zone->lru_lock); in munlock_vma_page()
332 static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) in __munlock_pagevec() argument
343 spin_lock_irq(&zone->lru_lock); in __munlock_pagevec()
369 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); in __munlock_pagevec()
370 spin_unlock_irq(&zone->lru_lock); in __munlock_pagevec()
489 struct zone *zone; in munlock_vma_pages_range() local
[all …]
Dinternal.h101 extern bool zone_reclaimable(struct zone *zone);
172 struct zone *zone; member
343 enum zone_type zone, unsigned long nid, unsigned long pfn);
358 enum zone_type zone, unsigned long nid, unsigned long pfn) in mminit_verify_page_links() argument
397 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
Dmemcontrol.c670 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) in mem_cgroup_zone_zoneinfo() argument
672 int nid = zone_to_nid(zone); in mem_cgroup_zone_zoneinfo()
673 int zid = zone_idx(zone); in mem_cgroup_zone_zoneinfo()
1228 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone); in mem_cgroup_iter()
1324 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, in mem_cgroup_zone_lruvec() argument
1331 lruvec = &zone->lruvec; in mem_cgroup_zone_lruvec()
1335 mz = mem_cgroup_zone_zoneinfo(memcg, zone); in mem_cgroup_zone_lruvec()
1343 if (unlikely(lruvec->zone != zone)) in mem_cgroup_zone_lruvec()
1344 lruvec->zone = zone; in mem_cgroup_zone_lruvec()
1353 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) in mem_cgroup_page_lruvec() argument
[all …]
Dpage-writeback.c183 static unsigned long zone_dirtyable_memory(struct zone *zone) in zone_dirtyable_memory() argument
187 nr_pages = zone_page_state(zone, NR_FREE_PAGES); in zone_dirtyable_memory()
188 nr_pages -= min(nr_pages, zone->dirty_balance_reserve); in zone_dirtyable_memory()
190 nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); in zone_dirtyable_memory()
191 nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); in zone_dirtyable_memory()
203 struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; in highmem_dirtyable_memory()
298 static unsigned long zone_dirty_limit(struct zone *zone) in zone_dirty_limit() argument
300 unsigned long zone_memory = zone_dirtyable_memory(zone); in zone_dirty_limit()
323 bool zone_dirty_ok(struct zone *zone) in zone_dirty_ok() argument
325 unsigned long limit = zone_dirty_limit(zone); in zone_dirty_ok()
[all …]
Dsparse.c693 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn) in sparse_add_one_section() argument
696 struct pglist_data *pgdat = zone->zone_pgdat; in sparse_add_one_section()
791 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) in sparse_remove_one_section() argument
795 struct pglist_data *pgdat = zone->zone_pgdat; in sparse_remove_one_section()
Dcma.c108 struct zone *zone; in cma_activate_area() local
116 zone = page_zone(pfn_to_page(pfn)); in cma_activate_area()
130 if (page_zone(pfn_to_page(pfn)) != zone) in cma_activate_area()
Dmempolicy.c1656 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) in apply_policy_zone() argument
1673 return zone >= dynamic_policy_zone; in apply_policy_zone()
1764 struct zone *zone; in mempolicy_slab_node() local
1769 &zone); in mempolicy_slab_node()
1770 return zone ? zone->node : node; in mempolicy_slab_node()
2268 struct zone *zone; in mpol_misplaced() local
2311 &pol->v.nodes, &zone); in mpol_misplaced()
2312 polnid = zone->node; in mpol_misplaced()
Dmigrate.c1560 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat() local
1562 if (!populated_zone(zone)) in migrate_balanced_pgdat()
1565 if (!zone_reclaimable(zone)) in migrate_balanced_pgdat()
1569 if (!zone_watermark_ok(zone, 0, in migrate_balanced_pgdat()
1570 high_wmark_pages(zone) + in migrate_balanced_pgdat()
Dbacking-dev.c619 long wait_iff_congested(struct zone *zone, int sync, long timeout) in wait_iff_congested() argument
632 !test_bit(ZONE_CONGESTED, &zone->flags)) { in wait_iff_congested()
Dhuge_memory.c108 struct zone *zone; in set_recommended_min_free_kbytes() local
115 for_each_populated_zone(zone) in set_recommended_min_free_kbytes()
1670 struct zone *zone = page_zone(page); in __split_huge_page_refcount() local
1675 spin_lock_irq(&zone->lru_lock); in __split_huge_page_refcount()
1676 lruvec = mem_cgroup_page_lruvec(page, zone); in __split_huge_page_refcount()
1760 __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); in __split_huge_page_refcount()
1764 spin_unlock_irq(&zone->lru_lock); in __split_huge_page_refcount()
Dquicklist.c30 struct zone *zones = NODE_DATA(node)->node_zones; in max_pages()
Dhugetlb.c561 struct zone *zone; in dequeue_huge_page_vma() local
583 for_each_zone_zonelist_nodemask(zone, z, zonelist, in dequeue_huge_page_vma()
585 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) { in dequeue_huge_page_vma()
586 page = dequeue_huge_page_node(h, zone_to_nid(zone)); in dequeue_huge_page_vma()
737 static bool zone_spans_last_pfn(const struct zone *zone, in zone_spans_last_pfn() argument
741 return zone_spans_pfn(zone, last_pfn); in zone_spans_last_pfn()
748 struct zone *z; in alloc_gigantic_page()
Dfilemap.c675 const struct zone *zone = page_zone(page); in page_waitqueue() local
677 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; in page_waitqueue()
Dnobootmem.c150 struct zone *z; in reset_node_managed_pages()

12