Home
last modified time | relevance | path

Searched refs:zone (Results 1 – 25 of 30) sorted by relevance

12

/mm/
Dpage_alloc.c147 struct zone *zone; member
609 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
617 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
618 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
619 sp = zone->spanned_pages; in page_outside_zone_boundaries()
620 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
622 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
626 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
632 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
636 if (zone != page_zone(page)) in page_is_consistent()
[all …]
Dcompaction.c165 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
167 zone->compact_considered = 0; in defer_compaction()
168 zone->compact_defer_shift++; in defer_compaction()
170 if (order < zone->compact_order_failed) in defer_compaction()
171 zone->compact_order_failed = order; in defer_compaction()
173 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
174 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
176 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
180 bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
182 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
[all …]
Dvmstat.c40 static void zero_zone_numa_counters(struct zone *zone) in zero_zone_numa_counters() argument
45 atomic_long_set(&zone->vm_numa_stat[item], 0); in zero_zone_numa_counters()
47 per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item] in zero_zone_numa_counters()
55 struct zone *zone; in zero_zones_numa_counters() local
57 for_each_populated_zone(zone) in zero_zones_numa_counters()
58 zero_zone_numa_counters(zone); in zero_zones_numa_counters()
172 int calculate_pressure_threshold(struct zone *zone) in calculate_pressure_threshold() argument
185 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); in calculate_pressure_threshold()
196 int calculate_normal_threshold(struct zone *zone) in calculate_normal_threshold() argument
231 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); in calculate_normal_threshold()
[all …]
Dmemory_hotplug.c354 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
365 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
375 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
390 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
399 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
403 int nid = zone_to_nid(zone); in shrink_zone_span()
405 zone_span_writelock(zone); in shrink_zone_span()
406 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span()
413 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span()
414 zone_end_pfn(zone)); in shrink_zone_span()
[all …]
Dpage_isolation.c20 struct zone *zone = page_zone(page); in set_migratetype_isolate() local
24 spin_lock_irqsave(&zone->lock, flags); in set_migratetype_isolate()
32 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate()
40 unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags); in set_migratetype_isolate()
46 zone->nr_isolate_pageblock++; in set_migratetype_isolate()
47 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, in set_migratetype_isolate()
50 __mod_zone_freepage_state(zone, -nr_pages, mt); in set_migratetype_isolate()
51 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate()
55 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate()
69 struct zone *zone; in unset_migratetype_isolate() local
[all …]
Dpage_reporting.c109 page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone, in page_reporting_cycle() argument
113 struct free_area *area = &zone->free_area[order]; in page_reporting_cycle()
127 spin_lock_irq(&zone->lock); in page_reporting_cycle()
185 spin_unlock_irq(&zone->lock); in page_reporting_cycle()
197 spin_lock_irq(&zone->lock); in page_reporting_cycle()
217 spin_unlock_irq(&zone->lock); in page_reporting_cycle()
224 struct scatterlist *sgl, struct zone *zone) in page_reporting_process_zone() argument
231 watermark = low_wmark_pages(zone) + in page_reporting_process_zone()
238 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in page_reporting_process_zone()
248 err = page_reporting_cycle(prdev, zone, order, mt, in page_reporting_process_zone()
[all …]
Dvmscan.c323 unsigned long zone_reclaimable_pages(struct zone *zone) in zone_reclaimable_pages() argument
327 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + in zone_reclaimable_pages()
328 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); in zone_reclaimable_pages()
330 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + in zone_reclaimable_pages()
331 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); in zone_reclaimable_pages()
348 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size() local
350 if (!managed_zone(zone)) in lruvec_lru_size()
356 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); in lruvec_lru_size()
1562 unsigned int reclaim_clean_pages_from_list(struct zone *zone, in reclaim_clean_pages_from_list() argument
1583 nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, in reclaim_clean_pages_from_list()
[all …]
Dmmzone.c31 struct zone *next_zone(struct zone *zone) in next_zone() argument
33 pg_data_t *pgdat = zone->zone_pgdat; in next_zone()
35 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone()
36 zone++; in next_zone()
40 zone = pgdat->node_zones; in next_zone()
42 zone = NULL; in next_zone()
44 return zone; in next_zone()
70 (z->zone && !zref_in_nodemask(z, nodes))) in __next_zones_zonelist()
Dpage_owner.c301 pg_data_t *pgdat, struct zone *zone) in pagetypeinfo_showmixedcount_print() argument
306 unsigned long pfn = zone->zone_start_pfn, block_end_pfn; in pagetypeinfo_showmixedcount_print()
307 unsigned long end_pfn = pfn + zone->spanned_pages; in pagetypeinfo_showmixedcount_print()
313 pfn = zone->zone_start_pfn; in pagetypeinfo_showmixedcount_print()
339 if (page_zone(page) != zone) in pagetypeinfo_showmixedcount_print()
380 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in pagetypeinfo_showmixedcount_print()
611 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) in init_pages_in_zone() argument
613 unsigned long pfn = zone->zone_start_pfn; in init_pages_in_zone()
614 unsigned long end_pfn = zone_end_pfn(zone); in init_pages_in_zone()
642 if (page_zone(page) != zone) in init_pages_in_zone()
[all …]
Dinternal.h200 unsigned long end_pfn, struct zone *zone);
203 unsigned long end_pfn, struct zone *zone) in pageblock_pfn_to_page() argument
205 if (zone->contiguous) in pageblock_pfn_to_page()
208 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); in pageblock_pfn_to_page()
222 extern void zone_pcp_update(struct zone *zone);
223 extern void zone_pcp_reset(struct zone *zone);
245 struct zone *zone; member
585 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
658 void setup_zone_pageset(struct zone *zone);
Dmm_init.c36 struct zone *zone; in mminit_verify_zonelist() local
48 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
49 if (!populated_zone(zone)) in mminit_verify_zonelist()
55 zone->name); in mminit_verify_zonelist()
58 for_each_zone_zonelist(zone, z, zonelist, zoneid) in mminit_verify_zonelist()
59 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist()
Dmlock.c296 static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) in __munlock_pagevec() argument
307 spin_lock_irq(&zone->zone_pgdat->lru_lock); in __munlock_pagevec()
333 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); in __munlock_pagevec()
334 spin_unlock_irq(&zone->zone_pgdat->lru_lock); in __munlock_pagevec()
381 struct vm_area_struct *vma, struct zone *zone, in __munlock_pagevec_fill() argument
410 if (!page || page_zone(page) != zone) in __munlock_pagevec_fill()
463 struct zone *zone; in munlock_vma_pages_range() local
504 zone = page_zone(page); in munlock_vma_pages_range()
513 zone, start, end); in munlock_vma_pages_range()
514 __munlock_pagevec(&pvec, zone); in munlock_vma_pages_range()
Dshuffle.c37 static struct page * __meminit shuffle_valid_page(struct zone *zone, in shuffle_valid_page() argument
52 if (page_zone(page) != zone) in shuffle_valid_page()
81 void __meminit __shuffle_zone(struct zone *z) in __shuffle_zone()
156 struct zone *z; in __shuffle_free_memory()
Dshuffle.h20 extern void __shuffle_zone(struct zone *z);
21 static inline void shuffle_zone(struct zone *z) in shuffle_zone()
44 static inline void shuffle_zone(struct zone *z) in shuffle_zone()
Dhighmem.c115 struct zone *zone; in nr_free_highpages() local
118 for_each_populated_zone(zone) { in nr_free_highpages()
119 if (is_highmem(zone)) in nr_free_highpages()
120 pages += zone_page_state(zone, NR_FREE_PAGES); in nr_free_highpages()
Dmemremap.c276 struct zone *zone; in pagemap_range() local
278 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in pagemap_range()
279 move_pfn_range_to_zone(zone, PHYS_PFN(range->start), in pagemap_range()
Dcma.c109 struct zone *zone; in cma_activate_area() local
121 zone = page_zone(pfn_to_page(base_pfn)); in cma_activate_area()
124 if (page_zone(pfn_to_page(pfn)) != zone) in cma_activate_area()
Dmemblock.c1254 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, in __next_mem_pfn_range_in_zone() argument
1257 int zone_nid = zone_to_nid(zone); in __next_mem_pfn_range_in_zone()
1273 if (zone->zone_start_pfn < epfn && spfn < epfn) { in __next_mem_pfn_range_in_zone()
1275 if (zone_end_pfn(zone) <= spfn) { in __next_mem_pfn_range_in_zone()
1281 *out_spfn = max(zone->zone_start_pfn, spfn); in __next_mem_pfn_range_in_zone()
1283 *out_epfn = min(zone_end_pfn(zone), epfn); in __next_mem_pfn_range_in_zone()
1977 struct zone *z; in reset_node_managed_pages()
Dkmemleak.c1410 struct zone *zone; in kmemleak_scan() local
1451 for_each_populated_zone(zone) { in kmemleak_scan()
1452 unsigned long start_pfn = zone->zone_start_pfn; in kmemleak_scan()
1453 unsigned long end_pfn = zone_end_pfn(zone); in kmemleak_scan()
1463 if (page_zone(page) != zone) in kmemleak_scan()
Doom_kill.c258 struct zone *zone; in constrained_alloc() local
299 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, in constrained_alloc()
301 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) in constrained_alloc()
Dmadvise.c913 struct zone *zone; in madvise_inject_error() local
952 for_each_populated_zone(zone) in madvise_inject_error()
953 drain_all_pages(zone); in madvise_inject_error()
Dswap.c814 void lru_add_drain_cpu_zone(struct zone *zone) in lru_add_drain_cpu_zone() argument
818 drain_local_pages(zone); in lru_add_drain_cpu_zone()
Dpage-writeback.c286 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory() local
288 if (!populated_zone(zone)) in node_dirtyable_memory()
291 nr_pages += zone_page_state(zone, NR_FREE_PAGES); in node_dirtyable_memory()
316 struct zone *z; in highmem_dirtyable_memory()
Dmigrate.c386 struct zone *oldzone, *newzone; in migrate_page_move_mapping()
1983 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat() local
1985 if (!populated_zone(zone)) in migrate_balanced_pgdat()
1989 if (!zone_watermark_ok(zone, 0, in migrate_balanced_pgdat()
1990 high_wmark_pages(zone) + in migrate_balanced_pgdat()
Dkhugepaged.c2347 struct zone *zone; in set_recommended_min_free_kbytes() local
2351 for_each_populated_zone(zone) { in set_recommended_min_free_kbytes()
2356 if (zone_idx(zone) > gfp_zone(GFP_USER)) in set_recommended_min_free_kbytes()

12