Home
last modified time | relevance | path

Searched refs:zone (Results 1 – 25 of 30) sorted by relevance

12

/mm/
Dcompaction.c92 unsigned long end_pfn, struct zone *zone) in pageblock_pfn_to_page() argument
105 if (page_zone(start_page) != zone) in pageblock_pfn_to_page()
127 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
129 zone->compact_considered = 0; in defer_compaction()
130 zone->compact_defer_shift++; in defer_compaction()
132 if (order < zone->compact_order_failed) in defer_compaction()
133 zone->compact_order_failed = order; in defer_compaction()
135 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
136 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
138 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
[all …]
Dpage_alloc.c391 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
399 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
400 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
401 sp = zone->spanned_pages; in page_outside_zone_boundaries()
402 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
404 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
408 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
414 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
418 if (zone != page_zone(page)) in page_is_consistent()
426 static int bad_range(struct zone *zone, struct page *page) in bad_range() argument
[all …]
Dvmstat.c94 int calculate_pressure_threshold(struct zone *zone) in calculate_pressure_threshold() argument
107 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); in calculate_pressure_threshold()
118 int calculate_normal_threshold(struct zone *zone) in calculate_normal_threshold() argument
153 mem = zone->managed_pages >> (27 - PAGE_SHIFT); in calculate_normal_threshold()
170 struct zone *zone; in refresh_zone_stat_thresholds() local
174 for_each_populated_zone(zone) { in refresh_zone_stat_thresholds()
177 threshold = calculate_normal_threshold(zone); in refresh_zone_stat_thresholds()
180 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in refresh_zone_stat_thresholds()
188 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); in refresh_zone_stat_thresholds()
191 zone->percpu_drift_mark = high_wmark_pages(zone) + in refresh_zone_stat_thresholds()
[all …]
Dmemory_hotplug.c251 struct zone *zone; in register_page_bootmem_info_node() local
259 zone = &pgdat->node_zones[0]; in register_page_bootmem_info_node()
260 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { in register_page_bootmem_info_node()
261 if (zone_is_initialized(zone)) { in register_page_bootmem_info_node()
262 nr_pages = zone->wait_table_hash_nr_entries in register_page_bootmem_info_node()
265 page = virt_to_page(zone->wait_table); in register_page_bootmem_info_node()
289 static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn, in grow_zone_span() argument
294 zone_span_writelock(zone); in grow_zone_span()
296 old_zone_end_pfn = zone_end_pfn(zone); in grow_zone_span()
297 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in grow_zone_span()
[all …]
Dvmscan.c195 static unsigned long zone_reclaimable_pages(struct zone *zone) in zone_reclaimable_pages() argument
199 nr = zone_page_state(zone, NR_ACTIVE_FILE) + in zone_reclaimable_pages()
200 zone_page_state(zone, NR_INACTIVE_FILE); in zone_reclaimable_pages()
203 nr += zone_page_state(zone, NR_ACTIVE_ANON) + in zone_reclaimable_pages()
204 zone_page_state(zone, NR_INACTIVE_ANON); in zone_reclaimable_pages()
209 bool zone_reclaimable(struct zone *zone) in zone_reclaimable() argument
211 return zone_page_state(zone, NR_PAGES_SCANNED) < in zone_reclaimable()
212 zone_reclaimable_pages(zone) * 6; in zone_reclaimable()
892 struct zone *zone, in shrink_page_list() argument
930 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in shrink_page_list()
[all …]
Dpage_isolation.c15 struct zone *zone; in set_migratetype_isolate() local
21 zone = page_zone(page); in set_migratetype_isolate()
23 spin_lock_irqsave(&zone->lock, flags); in set_migratetype_isolate()
49 if (!has_unmovable_pages(zone, page, arg.pages_found, in set_migratetype_isolate()
64 zone->nr_isolate_pageblock++; in set_migratetype_isolate()
65 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); in set_migratetype_isolate()
67 __mod_zone_freepage_state(zone, -nr_pages, migratetype); in set_migratetype_isolate()
70 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate()
72 drain_all_pages(zone); in set_migratetype_isolate()
78 struct zone *zone; in unset_migratetype_isolate() local
[all …]
Dswap.c56 struct zone *zone = page_zone(page); in __page_cache_release() local
60 spin_lock_irqsave(&zone->lru_lock, flags); in __page_cache_release()
61 lruvec = mem_cgroup_page_lruvec(page, zone); in __page_cache_release()
65 spin_unlock_irqrestore(&zone->lru_lock, flags); in __page_cache_release()
423 struct zone *zone = NULL; in pagevec_lru_move_fn() local
429 struct zone *pagezone = page_zone(page); in pagevec_lru_move_fn()
431 if (pagezone != zone) { in pagevec_lru_move_fn()
432 if (zone) in pagevec_lru_move_fn()
433 spin_unlock_irqrestore(&zone->lru_lock, flags); in pagevec_lru_move_fn()
434 zone = pagezone; in pagevec_lru_move_fn()
[all …]
Dworkingset.c155 static void *pack_shadow(unsigned long eviction, struct zone *zone) in pack_shadow() argument
157 eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone); in pack_shadow()
158 eviction = (eviction << ZONES_SHIFT) | zone_idx(zone); in pack_shadow()
165 struct zone **zone, in unpack_shadow() argument
181 *zone = NODE_DATA(nid)->node_zones + zid; in unpack_shadow()
183 refault = atomic_long_read(&(*zone)->inactive_age); in unpack_shadow()
215 struct zone *zone = page_zone(page); in workingset_eviction() local
218 eviction = atomic_long_inc_return(&zone->inactive_age); in workingset_eviction()
219 return pack_shadow(eviction, zone); in workingset_eviction()
234 struct zone *zone; in workingset_refault() local
[all …]
Dmmzone.c29 struct zone *next_zone(struct zone *zone) in next_zone() argument
31 pg_data_t *pgdat = zone->zone_pgdat; in next_zone()
33 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone()
34 zone++; in next_zone()
38 zone = pgdat->node_zones; in next_zone()
40 zone = NULL; in next_zone()
42 return zone; in next_zone()
68 (z->zone && !zref_in_nodemask(z, nodes))) in next_zones_zonelist()
76 struct page *page, struct zone *zone) in memmap_valid_within() argument
81 if (page_zone(page) != zone) in memmap_valid_within()
Dpage_owner.c228 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) in init_pages_in_zone() argument
232 unsigned long pfn = zone->zone_start_pfn, block_end_pfn; in init_pages_in_zone()
233 unsigned long end_pfn = pfn + zone->spanned_pages; in init_pages_in_zone()
237 pfn = zone->zone_start_pfn; in init_pages_in_zone()
288 pgdat->node_id, zone->name, count); in init_pages_in_zone()
293 struct zone *zone; in init_zones_in_node() local
294 struct zone *node_zones = pgdat->node_zones; in init_zones_in_node()
297 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in init_zones_in_node()
298 if (!populated_zone(zone)) in init_zones_in_node()
301 spin_lock_irqsave(&zone->lock, flags); in init_zones_in_node()
[all …]
Dmm_init.c34 struct zone *zone; in mminit_verify_zonelist() local
46 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
47 if (!populated_zone(zone)) in mminit_verify_zonelist()
53 zone->name); in mminit_verify_zonelist()
56 for_each_zone_zonelist(zone, z, zonelist, zoneid) { in mminit_verify_zonelist()
59 zone->node, zone->name); in mminit_verify_zonelist()
61 printk(KERN_CONT "0:%s ", zone->name); in mminit_verify_zonelist()
Dmlock.c176 struct zone *zone = page_zone(page); in munlock_vma_page() local
186 spin_lock_irq(&zone->lru_lock); in munlock_vma_page()
192 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); in munlock_vma_page()
195 spin_unlock_irq(&zone->lru_lock); in munlock_vma_page()
202 spin_unlock_irq(&zone->lru_lock); in munlock_vma_page()
276 static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) in __munlock_pagevec() argument
287 spin_lock_irq(&zone->lru_lock); in __munlock_pagevec()
313 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); in __munlock_pagevec()
314 spin_unlock_irq(&zone->lru_lock); in __munlock_pagevec()
433 struct zone *zone; in munlock_vma_pages_range() local
[all …]
Dinternal.h145 extern bool zone_reclaimable(struct zone *zone);
172 struct zone *preferred_zone;
237 struct zone *zone; member
457 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
Dpage_idle.c33 struct zone *zone; in page_idle_get_page() local
43 zone = page_zone(page); in page_idle_get_page()
44 spin_lock_irq(&zone->lru_lock); in page_idle_get_page()
49 spin_unlock_irq(&zone->lru_lock); in page_idle_get_page()
Dmemcontrol.c391 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) in mem_cgroup_zone_zoneinfo() argument
393 int nid = zone_to_nid(zone); in mem_cgroup_zone_zoneinfo()
394 int zid = zone_idx(zone); in mem_cgroup_zone_zoneinfo()
887 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone); in mem_cgroup_iter()
1056 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, in mem_cgroup_zone_lruvec() argument
1063 lruvec = &zone->lruvec; in mem_cgroup_zone_lruvec()
1067 mz = mem_cgroup_zone_zoneinfo(memcg, zone); in mem_cgroup_zone_lruvec()
1075 if (unlikely(lruvec->zone != zone)) in mem_cgroup_zone_lruvec()
1076 lruvec->zone = zone; in mem_cgroup_zone_lruvec()
1089 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) in mem_cgroup_page_lruvec() argument
[all …]
Dpage-writeback.c276 static unsigned long zone_dirtyable_memory(struct zone *zone) in zone_dirtyable_memory() argument
280 nr_pages = zone_page_state(zone, NR_FREE_PAGES); in zone_dirtyable_memory()
281 nr_pages -= min(nr_pages, zone->dirty_balance_reserve); in zone_dirtyable_memory()
283 nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); in zone_dirtyable_memory()
284 nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); in zone_dirtyable_memory()
296 struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; in highmem_dirtyable_memory()
440 static unsigned long zone_dirty_limit(struct zone *zone) in zone_dirty_limit() argument
442 unsigned long zone_memory = zone_dirtyable_memory(zone); in zone_dirty_limit()
465 bool zone_dirty_ok(struct zone *zone) in zone_dirty_ok() argument
467 unsigned long limit = zone_dirty_limit(zone); in zone_dirty_ok()
[all …]
Dsparse.c693 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn) in sparse_add_one_section() argument
696 struct pglist_data *pgdat = zone->zone_pgdat; in sparse_add_one_section()
791 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) in sparse_remove_one_section() argument
795 struct pglist_data *pgdat = zone->zone_pgdat; in sparse_remove_one_section()
Dcma.c99 struct zone *zone; in cma_activate_area() local
109 zone = page_zone(pfn_to_page(pfn)); in cma_activate_area()
123 if (page_zone(pfn_to_page(pfn)) != zone) in cma_activate_area()
Doom_kill.c211 struct zone *zone; in constrained_alloc() local
244 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, in constrained_alloc()
246 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) in constrained_alloc()
Dmigrate.c318 struct zone *oldzone, *newzone; in migrate_page_move_mapping()
1568 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat() local
1570 if (!populated_zone(zone)) in migrate_balanced_pgdat()
1573 if (!zone_reclaimable(zone)) in migrate_balanced_pgdat()
1577 if (!zone_watermark_ok(zone, 0, in migrate_balanced_pgdat()
1578 high_wmark_pages(zone) + in migrate_balanced_pgdat()
Dmempolicy.c1628 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) in apply_policy_zone() argument
1645 return zone >= dynamic_policy_zone; in apply_policy_zone()
1736 struct zone *zone; in mempolicy_slab_node() local
1741 &zone); in mempolicy_slab_node()
1742 return zone ? zone->node : node; in mempolicy_slab_node()
2298 struct zone *zone; in mpol_misplaced() local
2341 &pol->v.nodes, &zone); in mpol_misplaced()
2342 polnid = zone->node; in mpol_misplaced()
Dhuge_memory.c112 struct zone *zone; in set_recommended_min_free_kbytes() local
116 for_each_populated_zone(zone) in set_recommended_min_free_kbytes()
1749 struct zone *zone = page_zone(page); in __split_huge_page_refcount() local
1754 spin_lock_irq(&zone->lru_lock); in __split_huge_page_refcount()
1755 lruvec = mem_cgroup_page_lruvec(page, zone); in __split_huge_page_refcount()
1838 __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); in __split_huge_page_refcount()
1842 spin_unlock_irq(&zone->lru_lock); in __split_huge_page_refcount()
Dquicklist.c30 struct zone *zones = NODE_DATA(node)->node_zones; in max_pages()
Dbacking-dev.c1001 long wait_iff_congested(struct zone *zone, int sync, long timeout) in wait_iff_congested() argument
1014 !test_bit(ZONE_CONGESTED, &zone->flags)) { in wait_iff_congested()
Dhugetlb.c901 struct zone *zone; in dequeue_huge_page_vma() local
923 for_each_zone_zonelist_nodemask(zone, z, zonelist, in dequeue_huge_page_vma()
925 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) { in dequeue_huge_page_vma()
926 page = dequeue_huge_page_node(h, zone_to_nid(zone)); in dequeue_huge_page_vma()
1076 static bool zone_spans_last_pfn(const struct zone *zone, in zone_spans_last_pfn() argument
1080 return zone_spans_pfn(zone, last_pfn); in zone_spans_last_pfn()
1087 struct zone *z; in alloc_gigantic_page()

12