Home
last modified time | relevance | path

Searched refs:z (Results 1 – 15 of 15) sorted by relevance

/mm/
Dmmzone.c55 struct zoneref *next_zones_zonelist(struct zoneref *z, in next_zones_zonelist() argument
65 while (zonelist_zone_idx(z) > highest_zoneidx) in next_zones_zonelist()
66 z++; in next_zones_zonelist()
68 while (zonelist_zone_idx(z) > highest_zoneidx || in next_zones_zonelist()
69 (z->zone && !zref_in_nodemask(z, nodes))) in next_zones_zonelist()
70 z++; in next_zones_zonelist()
72 *zone = zonelist_zone(z); in next_zones_zonelist()
73 return z; in next_zones_zonelist()
Doom_kill.c203 struct zoneref *z; in constrained_alloc() local
234 for_each_zone_zonelist_nodemask(zone, z, zonelist, in constrained_alloc()
579 struct zoneref *z; in oom_zonelist_trylock() local
584 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) in oom_zonelist_trylock()
594 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) in oom_zonelist_trylock()
609 struct zoneref *z; in oom_zonelist_unlock() local
613 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) in oom_zonelist_unlock()
Dnobootmem.c150 struct zone *z; in reset_node_managed_pages() local
152 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages()
153 z->managed_pages = 0; in reset_node_managed_pages()
Dvmstat.c321 long o, n, t, z; in mod_state() local
324 z = 0; /* overflow to zone counters */ in mod_state()
345 z = n + os; in mod_state()
350 if (z) in mod_state()
351 zone_page_state_add(z, zone, item); in mod_state()
577 void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags) in zone_statistics() argument
579 if (z->zone_pgdat == preferred_zone->zone_pgdat) { in zone_statistics()
580 __inc_zone_state(z, NUMA_HIT); in zone_statistics()
582 __inc_zone_state(z, NUMA_MISS); in zone_statistics()
585 if (z->node == ((flags & __GFP_OTHER_NODE) ? in zone_statistics()
[all …]
Dpage_alloc.c1743 static bool __zone_watermark_ok(struct zone *z, unsigned int order, in __zone_watermark_ok() argument
1760 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES); in __zone_watermark_ok()
1763 if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx]) in __zone_watermark_ok()
1767 free_pages -= z->free_area[o].nr_free << o; in __zone_watermark_ok()
1778 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
1781 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_ok()
1782 zone_page_state(z, NR_FREE_PAGES)); in zone_watermark_ok()
1785 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe() argument
1788 long free_pages = zone_page_state(z, NR_FREE_PAGES); in zone_watermark_ok_safe()
1790 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) in zone_watermark_ok_safe()
[all …]
Dbootmem.c249 struct zone *z; in reset_node_managed_pages() local
251 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages()
252 z->managed_pages = 0; in reset_node_managed_pages()
Dmm_init.c34 struct zoneref *z; in mminit_verify_zonelist() local
55 for_each_zone_zonelist(zone, z, zonelist, zoneid) { in mminit_verify_zonelist()
Dmemory_hotplug.c579 unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ in shrink_zone_span() local
580 unsigned long zone_end_pfn = z; in shrink_zone_span()
1072 struct zone *z; in reset_node_present_pages() local
1074 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_present_pages()
1075 z->present_pages = 0; in reset_node_present_pages()
Dhugetlb.c562 struct zoneref *z; in dequeue_huge_page_vma() local
583 for_each_zone_zonelist_nodemask(zone, z, zonelist, in dequeue_huge_page_vma()
748 struct zone *z; in alloc_gigantic_page() local
750 z = NODE_DATA(nid)->node_zones; in alloc_gigantic_page()
751 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) { in alloc_gigantic_page()
752 spin_lock_irqsave(&z->lock, flags); in alloc_gigantic_page()
754 pfn = ALIGN(z->zone_start_pfn, nr_pages); in alloc_gigantic_page()
755 while (zone_spans_last_pfn(z, pfn, nr_pages)) { in alloc_gigantic_page()
764 spin_unlock_irqrestore(&z->lock, flags); in alloc_gigantic_page()
768 spin_lock_irqsave(&z->lock, flags); in alloc_gigantic_page()
[all …]
Dmigrate.c1558 int z; in migrate_balanced_pgdat() local
1559 for (z = pgdat->nr_zones - 1; z >= 0; z--) { in migrate_balanced_pgdat()
1560 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat()
Dvmscan.c2414 struct zoneref *z; in shrink_zones() local
2438 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones()
2468 zonelist_zone_idx(z) <= requested_highidx && in shrink_zones()
2646 struct zoneref *z; in throttle_direct_reclaim() local
2681 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim()
Dcompaction.c1332 struct zoneref *z; in try_to_compact_pages() local
1349 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, in try_to_compact_pages()
Dpage-writeback.c203 struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; in highmem_dirtyable_memory() local
205 x += zone_dirtyable_memory(z); in highmem_dirtyable_memory()
Dslab.c2748 #define cache_free_debugcheck(x,objp,z) (objp) argument
2998 struct zoneref *z; in fallback_alloc() local
3019 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { in fallback_alloc()
Dslub.c1666 struct zoneref *z; in get_any_partial() local
1697 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { in get_any_partial()