Home
last modified time | relevance | path

Searched refs:zid (Results 1 – 10 of 10) sorted by relevance

/kernel/linux/linux-5.10/include/linux/
Dmm_inline.h29 enum lru_list lru, enum zone_type zid, in __update_lru_size() argument
35 __mod_zone_page_state(&pgdat->node_zones[zid], in __update_lru_size()
40 enum lru_list lru, enum zone_type zid, in update_lru_size() argument
43 __update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
45 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
Dvmstat.h134 #define __count_zid_vm_events(item, zid, delta) \ argument
135 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
Dmemcontrol.h649 int zid, int nr_pages);
/kernel/linux/linux-5.10/include/trace/events/
Dvmscan.h54 TP_PROTO(int nid, int zid, int order),
56 TP_ARGS(nid, zid, order),
60 __field( int, zid )
66 __entry->zid = zid;
77 TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags),
79 TP_ARGS(nid, zid, order, gfp_flags),
83 __field( int, zid )
90 __entry->zid = zid;
/kernel/linux/linux-5.10/mm/
Dvmscan.c229 int zid; in lruvec_lru_size() local
233 for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) { in lruvec_lru_size()
234 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size()
245 for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) { in lruvec_lru_size()
246 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size()
252 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); in lruvec_lru_size()
1563 int zid; in update_lru_sizes() local
1565 for (zid = 0; zid < MAX_NR_ZONES; zid++) { in update_lru_sizes()
1566 if (!nr_zone_taken[zid]) in update_lru_sizes()
1569 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes()
[all …]
Dmemory_hotplug.c733 int zid; in default_kernel_zone_for_pfn() local
735 for (zid = 0; zid <= ZONE_NORMAL; zid++) { in default_kernel_zone_for_pfn()
736 struct zone *zone = &pgdat->node_zones[zid]; in default_kernel_zone_for_pfn()
Dvmstat.c1635 int zid; in is_zone_first_populated() local
1637 for (zid = 0; zid < MAX_NR_ZONES; zid++) { in is_zone_first_populated()
1638 struct zone *compare = &pgdat->node_zones[zid]; in is_zone_first_populated()
Dpage_alloc.c1469 int nid, zid; in init_reserved_page() local
1477 for (zid = 0; zid < MAX_NR_ZONES; zid++) { in init_reserved_page()
1478 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page()
1483 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); in init_reserved_page()
1785 int zid = zone_idx(zone); in deferred_init_pages() local
1797 __init_single_page(page, pfn, zid, nid); in deferred_init_pages()
1923 int zid, max_threads; in deferred_init_memmap() local
1951 for (zid = 0; zid < MAX_NR_ZONES; zid++) { in deferred_init_memmap()
1952 zone = pgdat->node_zones + zid; in deferred_init_memmap()
1982 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
Dmemcontrol.c1411 int zid, int nr_pages) in mem_cgroup_update_lru_size() argument
1425 lru_size = &mz->lru_zone_size[zid][lru]; in mem_cgroup_update_lru_size()
/kernel/linux/patches/linux-5.10/yangfan_patch/
Dothers.patch649 - __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
650 + __init_single_page(pfn_to_page(pfn), pfn, zid, nid, true);
658 - __init_single_page(page, pfn, zid, nid);
659 + __init_single_page(page, pfn, zid, nid, true);