Home
last modified time | relevance | path

Searched refs:zone (Results 1 – 25 of 30) sorted by relevance

12

/mm/
Dpage_alloc.c658 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
666 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
667 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
668 sp = zone->spanned_pages; in page_outside_zone_boundaries()
669 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
671 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
675 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
681 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
683 if (zone != page_zone(page)) in page_is_consistent()
691 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
[all …]
Dvmstat.c38 static void zero_zone_numa_counters(struct zone *zone) in zero_zone_numa_counters() argument
43 atomic_long_set(&zone->vm_numa_event[item], 0); in zero_zone_numa_counters()
45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] in zero_zone_numa_counters()
54 struct zone *zone; in zero_zones_numa_counters() local
56 for_each_populated_zone(zone) in zero_zones_numa_counters()
57 zero_zone_numa_counters(zone); in zero_zones_numa_counters()
170 int calculate_pressure_threshold(struct zone *zone) in calculate_pressure_threshold() argument
183 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); in calculate_pressure_threshold()
194 int calculate_normal_threshold(struct zone *zone) in calculate_normal_threshold() argument
229 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); in calculate_normal_threshold()
[all …]
Dcompaction.c161 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
163 zone->compact_considered = 0; in defer_compaction()
164 zone->compact_defer_shift++; in defer_compaction()
166 if (order < zone->compact_order_failed) in defer_compaction()
167 zone->compact_order_failed = order; in defer_compaction()
169 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
170 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
172 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
176 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
178 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
[all …]
Dmemory_hotplug.c348 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
359 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
369 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
384 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
393 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
397 int nid = zone_to_nid(zone); in shrink_zone_span()
399 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span()
406 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span()
407 zone_end_pfn(zone)); in shrink_zone_span()
409 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span()
[all …]
Dpage_isolation.c21 struct zone *zone = page_zone(page); in set_migratetype_isolate() local
25 spin_lock_irqsave(&zone->lock, flags); in set_migratetype_isolate()
33 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate()
41 unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags); in set_migratetype_isolate()
47 zone->nr_isolate_pageblock++; in set_migratetype_isolate()
48 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, in set_migratetype_isolate()
51 __mod_zone_freepage_state(zone, -nr_pages, mt); in set_migratetype_isolate()
52 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate()
56 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate()
70 struct zone *zone; in unset_migratetype_isolate() local
[all …]
Dpage_reporting.c115 page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone, in page_reporting_cycle() argument
119 struct free_area *area = &zone->free_area[order]; in page_reporting_cycle()
133 spin_lock_irq(&zone->lock); in page_reporting_cycle()
195 spin_unlock_irq(&zone->lock); in page_reporting_cycle()
207 spin_lock_irq(&zone->lock); in page_reporting_cycle()
227 spin_unlock_irq(&zone->lock); in page_reporting_cycle()
234 struct scatterlist *sgl, struct zone *zone) in page_reporting_process_zone() argument
241 watermark = low_wmark_pages(zone) + in page_reporting_process_zone()
248 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in page_reporting_process_zone()
258 err = page_reporting_cycle(prdev, zone, order, mt, in page_reporting_process_zone()
[all …]
Dinternal.h185 unsigned long end_pfn, struct zone *zone);
188 unsigned long end_pfn, struct zone *zone) in pageblock_pfn_to_page() argument
190 if (zone->contiguous) in pageblock_pfn_to_page()
193 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); in pageblock_pfn_to_page()
210 extern void zone_pcp_update(struct zone *zone, int cpu_online);
211 extern void zone_pcp_reset(struct zone *zone);
212 extern void zone_pcp_disable(struct zone *zone);
213 extern void zone_pcp_enable(struct zone *zone);
245 struct zone *zone; member
579 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
[all …]
Dmmzone.c31 struct zone *next_zone(struct zone *zone) in next_zone() argument
33 pg_data_t *pgdat = zone->zone_pgdat; in next_zone()
35 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone()
36 zone++; in next_zone()
40 zone = pgdat->node_zones; in next_zone()
42 zone = NULL; in next_zone()
44 return zone; in next_zone()
70 (z->zone && !zref_in_nodemask(z, nodes))) in __next_zones_zonelist()
Dvmscan.c600 unsigned long zone_reclaimable_pages(struct zone *zone) in zone_reclaimable_pages() argument
604 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + in zone_reclaimable_pages()
605 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); in zone_reclaimable_pages()
606 if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) in zone_reclaimable_pages()
607 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + in zone_reclaimable_pages()
608 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); in zone_reclaimable_pages()
626 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size() local
628 if (!managed_zone(zone)) in lruvec_lru_size()
634 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); in lruvec_lru_size()
1866 unsigned int reclaim_clean_pages_from_list(struct zone *zone, in reclaim_clean_pages_from_list() argument
[all …]
Dpage_owner.c282 pg_data_t *pgdat, struct zone *zone) in pagetypeinfo_showmixedcount_print() argument
288 unsigned long end_pfn = zone_end_pfn(zone); in pagetypeinfo_showmixedcount_print()
294 pfn = zone->zone_start_pfn; in pagetypeinfo_showmixedcount_print()
317 if (page_zone(page) != zone) in pagetypeinfo_showmixedcount_print()
358 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in pagetypeinfo_showmixedcount_print()
580 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) in init_pages_in_zone() argument
582 unsigned long pfn = zone->zone_start_pfn; in init_pages_in_zone()
583 unsigned long end_pfn = zone_end_pfn(zone); in init_pages_in_zone()
606 if (page_zone(page) != zone) in init_pages_in_zone()
646 pgdat->node_id, zone->name, count); in init_pages_in_zone()
[all …]
Dmm_init.c32 struct zone *zone; in mminit_verify_zonelist() local
44 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
45 if (!populated_zone(zone)) in mminit_verify_zonelist()
51 zone->name); in mminit_verify_zonelist()
54 for_each_zone_zonelist(zone, z, zonelist, zoneid) in mminit_verify_zonelist()
55 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist()
Dmlock.c261 static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) in __munlock_pagevec() argument
301 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); in __munlock_pagevec()
304 mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); in __munlock_pagevec()
352 struct vm_area_struct *vma, struct zone *zone, in __munlock_pagevec_fill() argument
381 if (!page || page_zone(page) != zone) in __munlock_pagevec_fill()
432 struct zone *zone; in munlock_vma_pages_range() local
466 zone = page_zone(page); in munlock_vma_pages_range()
475 zone, start, end); in munlock_vma_pages_range()
476 __munlock_pagevec(&pvec, zone); in munlock_vma_pages_range()
Dshuffle.c37 static struct page * __meminit shuffle_valid_page(struct zone *zone, in shuffle_valid_page() argument
52 if (page_zone(page) != zone) in shuffle_valid_page()
81 void __meminit __shuffle_zone(struct zone *z) in __shuffle_zone()
156 struct zone *z; in __shuffle_free_memory()
Dshuffle.h20 extern void __shuffle_zone(struct zone *z);
21 static inline void __meminit shuffle_zone(struct zone *z) in shuffle_zone()
44 static inline void shuffle_zone(struct zone *z) in shuffle_zone()
Dhighmem.c109 struct zone *zone; in __nr_free_highpages() local
112 for_each_populated_zone(zone) { in __nr_free_highpages()
113 if (is_highmem(zone)) in __nr_free_highpages()
114 pages += zone_page_state(zone, NR_FREE_PAGES); in __nr_free_highpages()
Dmemremap.c255 struct zone *zone; in pagemap_range() local
257 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in pagemap_range()
258 move_pfn_range_to_zone(zone, PHYS_PFN(range->start), in pagemap_range()
Dcma.c107 struct zone *zone; in cma_activate_area() local
119 zone = page_zone(pfn_to_page(base_pfn)); in cma_activate_area()
122 if (page_zone(pfn_to_page(pfn)) != zone) in cma_activate_area()
Dmemblock.c1281 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, in __next_mem_pfn_range_in_zone() argument
1284 int zone_nid = zone_to_nid(zone); in __next_mem_pfn_range_in_zone()
1300 if (zone->zone_start_pfn < epfn && spfn < epfn) { in __next_mem_pfn_range_in_zone()
1302 if (zone_end_pfn(zone) <= spfn) { in __next_mem_pfn_range_in_zone()
1308 *out_spfn = max(zone->zone_start_pfn, spfn); in __next_mem_pfn_range_in_zone()
1310 *out_epfn = min(zone_end_pfn(zone), epfn); in __next_mem_pfn_range_in_zone()
2088 struct zone *z; in reset_node_managed_pages()
Doom_kill.c260 struct zone *zone; in constrained_alloc() local
301 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, in constrained_alloc()
303 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) in constrained_alloc()
Dkmemleak.c1418 struct zone *zone; in kmemleak_scan() local
1459 for_each_populated_zone(zone) { in kmemleak_scan()
1460 unsigned long start_pfn = zone->zone_start_pfn; in kmemleak_scan()
1461 unsigned long end_pfn = zone_end_pfn(zone); in kmemleak_scan()
1471 if (page_zone(page) != zone) in kmemleak_scan()
Dswap.c768 void lru_add_drain_cpu_zone(struct zone *zone) in lru_add_drain_cpu_zone() argument
772 drain_local_pages(zone); in lru_add_drain_cpu_zone()
Dpage-writeback.c277 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory() local
279 if (!populated_zone(zone)) in node_dirtyable_memory()
282 nr_pages += zone_page_state(zone, NR_FREE_PAGES); in node_dirtyable_memory()
307 struct zone *z; in highmem_dirtyable_memory()
Dmigrate.c385 struct zone *oldzone, *newzone; in migrate_page_move_mapping()
2077 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat() local
2079 if (!populated_zone(zone)) in migrate_balanced_pgdat()
2083 if (!zone_watermark_ok(zone, 0, in migrate_balanced_pgdat()
2084 high_wmark_pages(zone) + in migrate_balanced_pgdat()
Dhuge_memory.c2882 struct zone *zone; in split_huge_pages_all() local
2888 for_each_zone(zone) { in split_huge_pages_all()
2889 if (!managed_zone(zone)) in split_huge_pages_all()
2891 max_zone_pfn = zone_end_pfn(zone); in split_huge_pages_all()
2892 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { in split_huge_pages_all()
2899 if (zone != page_zone(page)) in split_huge_pages_all()
Dmempolicy.c1740 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) in apply_policy_zone() argument
1757 return zone >= dynamic_policy_zone; in apply_policy_zone()
1852 return z->zone ? zone_to_nid(z->zone) : node; in mempolicy_slab_node()
2427 polnid = zone_to_nid(z->zone); in mpol_misplaced()

12