• Home
  • Raw
  • Download

Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-only
11 * Copyright (C) 2008-2014 Christoph Lameter
34 #define NUMA_STATS_THRESHOLD (U16_MAX - 2)
45 atomic_long_set(&zone->vm_numa_stat[item], 0); in zero_zone_numa_counters()
47 per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item] in zero_zone_numa_counters()
122 ret[i] += this->event[i]; in sum_vm_events()
128 * The result is unavoidably approximate - it can change
151 count_vm_events(i, fold_state->event[i]); in vm_events_fold_cpu()
152 fold_state->event[i] = 0; in vm_events_fold_cpu()
174 int threshold; in calculate_pressure_threshold() local
181 * value looks fine. The pressure threshold is a reduced value such in calculate_pressure_threshold()
185 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); in calculate_pressure_threshold()
186 threshold = max(1, (int)(watermark_distance / num_online_cpus())); in calculate_pressure_threshold()
189 * Maximum threshold is 125 in calculate_pressure_threshold()
191 threshold = min(125, threshold); in calculate_pressure_threshold()
193 return threshold; in calculate_pressure_threshold()
198 int threshold; in calculate_normal_threshold() local
202 * The threshold scales with the number of processors and the amount in calculate_normal_threshold()
209 * Threshold Processors (fls) Zonesize fls(mem+1) in calculate_normal_threshold()
210 * ------------------------------------------------------------------ in calculate_normal_threshold()
211 * 8 1 1 0.9-1 GB 4 in calculate_normal_threshold()
212 * 16 2 2 0.9-1 GB 4 in calculate_normal_threshold()
213 * 20 2 2 1-2 GB 5 in calculate_normal_threshold()
214 * 24 2 2 2-4 GB 6 in calculate_normal_threshold()
215 * 28 2 2 4-8 GB 7 in calculate_normal_threshold()
216 * 32 2 2 8-16 GB 8 in calculate_normal_threshold()
218 * 30 4 3 2-4 GB 5 in calculate_normal_threshold()
219 * 48 4 3 8-16 GB 8 in calculate_normal_threshold()
220 * 32 8 4 1-2 GB 4 in calculate_normal_threshold()
221 * 32 8 4 0.9-1GB 4 in calculate_normal_threshold()
224 * 70 64 7 2-4 GB 5 in calculate_normal_threshold()
225 * 84 64 7 4-8 GB 6 in calculate_normal_threshold()
226 * 108 512 9 4-8 GB 6 in calculate_normal_threshold()
227 * 125 1024 10 8-16 GB 8 in calculate_normal_threshold()
228 * 125 1024 10 16-32 GB 9 in calculate_normal_threshold()
231 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); in calculate_normal_threshold()
233 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); in calculate_normal_threshold()
236 * Maximum threshold is 125 in calculate_normal_threshold()
238 threshold = min(125, threshold); in calculate_normal_threshold()
240 return threshold; in calculate_normal_threshold()
251 int threshold; in refresh_zone_stat_thresholds() local
256 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; in refresh_zone_stat_thresholds()
261 struct pglist_data *pgdat = zone->zone_pgdat; in refresh_zone_stat_thresholds()
264 threshold = calculate_normal_threshold(zone); in refresh_zone_stat_thresholds()
269 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in refresh_zone_stat_thresholds()
270 = threshold; in refresh_zone_stat_thresholds()
272 /* Base nodestat threshold on the largest populated zone. */ in refresh_zone_stat_thresholds()
273 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; in refresh_zone_stat_thresholds()
274 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold in refresh_zone_stat_thresholds()
275 = max(threshold, pgdat_threshold); in refresh_zone_stat_thresholds()
283 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); in refresh_zone_stat_thresholds()
284 max_drift = num_online_cpus() * threshold; in refresh_zone_stat_thresholds()
286 zone->percpu_drift_mark = high_wmark_pages(zone) + in refresh_zone_stat_thresholds()
296 int threshold; in set_pgdat_percpu_threshold() local
299 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold()
300 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold()
301 if (!zone->percpu_drift_mark) in set_pgdat_percpu_threshold()
304 threshold = (*calculate_pressure)(zone); in set_pgdat_percpu_threshold()
306 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in set_pgdat_percpu_threshold()
307 = threshold; in set_pgdat_percpu_threshold()
319 struct per_cpu_pageset __percpu *pcp = zone->pageset; in __mod_zone_page_state()
320 s8 __percpu *p = pcp->vm_stat_diff + item; in __mod_zone_page_state()
326 t = __this_cpu_read(pcp->stat_threshold); in __mod_zone_page_state()
339 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __mod_node_page_state()
340 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __mod_node_page_state()
345 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in __mod_node_page_state()
351 t = __this_cpu_read(pcp->stat_threshold); in __mod_node_page_state()
377 * Some processors have inc/dec instructions that are atomic vs an interrupt.
386 struct per_cpu_pageset __percpu *pcp = zone->pageset; in __inc_zone_state()
387 s8 __percpu *p = pcp->vm_stat_diff + item; in __inc_zone_state()
391 t = __this_cpu_read(pcp->stat_threshold); in __inc_zone_state()
396 __this_cpu_write(*p, -overstep); in __inc_zone_state()
402 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __inc_node_state()
403 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __inc_node_state()
409 t = __this_cpu_read(pcp->stat_threshold); in __inc_node_state()
414 __this_cpu_write(*p, -overstep); in __inc_node_state()
432 struct per_cpu_pageset __percpu *pcp = zone->pageset; in __dec_zone_state()
433 s8 __percpu *p = pcp->vm_stat_diff + item; in __dec_zone_state()
437 t = __this_cpu_read(pcp->stat_threshold); in __dec_zone_state()
438 if (unlikely(v < - t)) { in __dec_zone_state()
441 zone_page_state_add(v - overstep, zone, item); in __dec_zone_state()
448 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __dec_node_state()
449 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __dec_node_state()
455 t = __this_cpu_read(pcp->stat_threshold); in __dec_node_state()
456 if (unlikely(v < - t)) { in __dec_node_state()
459 node_page_state_add(v - overstep, pgdat, item); in __dec_node_state()
481 * mod_state() modifies the zone counter state through atomic per cpu
486 * 1 Overstepping half of threshold
487 * -1 Overstepping minus half of threshold
492 struct per_cpu_pageset __percpu *pcp = zone->pageset; in mod_zone_state()
493 s8 __percpu *p = pcp->vm_stat_diff + item; in mod_zone_state()
501 * a counter threshold to the wrong the cpu if we get in mod_zone_state()
503 * counter update will apply the threshold again and in mod_zone_state()
504 * therefore bring the counter under the threshold again. in mod_zone_state()
509 t = this_cpu_read(pcp->stat_threshold); in mod_zone_state()
519 n = -os; in mod_zone_state()
542 mod_zone_state(page_zone(page), item, -1, -1); in dec_zone_page_state()
549 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in mod_node_state()
550 s8 __percpu *p = pcp->vm_node_stat_diff + item; in mod_node_state()
554 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in mod_node_state()
563 * a counter threshold to the wrong the cpu if we get in mod_node_state()
565 * counter update will apply the threshold again and in mod_node_state()
566 * therefore bring the counter under the threshold again. in mod_node_state()
571 t = this_cpu_read(pcp->stat_threshold); in mod_node_state()
581 n = -os; in mod_node_state()
609 mod_node_state(page_pgdat(page), item, -1, -1); in dec_node_page_state()
772 struct per_cpu_pageset __percpu *p = zone->pageset; in refresh_cpu_vm_stats()
777 v = this_cpu_xchg(p->vm_stat_diff[i], 0); in refresh_cpu_vm_stats()
780 atomic_long_add(v, &zone->vm_stat[i]); in refresh_cpu_vm_stats()
784 __this_cpu_write(p->expire, 3); in refresh_cpu_vm_stats()
792 v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0); in refresh_cpu_vm_stats()
795 atomic_long_add(v, &zone->vm_numa_stat[i]); in refresh_cpu_vm_stats()
797 __this_cpu_write(p->expire, 3); in refresh_cpu_vm_stats()
810 if (!__this_cpu_read(p->expire) || in refresh_cpu_vm_stats()
811 !__this_cpu_read(p->pcp.count)) in refresh_cpu_vm_stats()
818 __this_cpu_write(p->expire, 0); in refresh_cpu_vm_stats()
822 if (__this_cpu_dec_return(p->expire)) in refresh_cpu_vm_stats()
825 if (__this_cpu_read(p->pcp.count)) { in refresh_cpu_vm_stats()
826 drain_zone_pages(zone, this_cpu_ptr(&p->pcp)); in refresh_cpu_vm_stats()
834 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats; in refresh_cpu_vm_stats()
839 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0); in refresh_cpu_vm_stats()
841 atomic_long_add(v, &pgdat->vm_stat[i]); in refresh_cpu_vm_stats()
875 p = per_cpu_ptr(zone->pageset, cpu); in cpu_vm_stats_fold()
878 if (p->vm_stat_diff[i]) { in cpu_vm_stats_fold()
881 v = p->vm_stat_diff[i]; in cpu_vm_stats_fold()
882 p->vm_stat_diff[i] = 0; in cpu_vm_stats_fold()
883 atomic_long_add(v, &zone->vm_stat[i]); in cpu_vm_stats_fold()
889 if (p->vm_numa_stat_diff[i]) { in cpu_vm_stats_fold()
892 v = p->vm_numa_stat_diff[i]; in cpu_vm_stats_fold()
893 p->vm_numa_stat_diff[i] = 0; in cpu_vm_stats_fold()
894 atomic_long_add(v, &zone->vm_numa_stat[i]); in cpu_vm_stats_fold()
903 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); in cpu_vm_stats_fold()
906 if (p->vm_node_stat_diff[i]) { in cpu_vm_stats_fold()
909 v = p->vm_node_stat_diff[i]; in cpu_vm_stats_fold()
910 p->vm_node_stat_diff[i] = 0; in cpu_vm_stats_fold()
911 atomic_long_add(v, &pgdat->vm_stat[i]); in cpu_vm_stats_fold()
925 * pset->vm_stat_diff[] exsist.
932 if (pset->vm_stat_diff[i]) { in drain_zonestat()
933 int v = pset->vm_stat_diff[i]; in drain_zonestat()
934 pset->vm_stat_diff[i] = 0; in drain_zonestat()
935 atomic_long_add(v, &zone->vm_stat[i]); in drain_zonestat()
941 if (pset->vm_numa_stat_diff[i]) { in drain_zonestat()
942 int v = pset->vm_numa_stat_diff[i]; in drain_zonestat()
944 pset->vm_numa_stat_diff[i] = 0; in drain_zonestat()
945 atomic_long_add(v, &zone->vm_numa_stat[i]); in drain_zonestat()
956 struct per_cpu_pageset __percpu *pcp = zone->pageset; in __inc_numa_state()
957 u16 __percpu *p = pcp->vm_numa_stat_diff + item; in __inc_numa_state()
976 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_node_page_state()
993 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_numa_state()
1009 long x = atomic_long_read(&pgdat->vm_stat[item]); in node_page_state_pages()
1048 info->free_pages = 0; in fill_contig_page_info()
1049 info->free_blocks_total = 0; in fill_contig_page_info()
1050 info->free_blocks_suitable = 0; in fill_contig_page_info()
1056 blocks = zone->free_area[order].nr_free; in fill_contig_page_info()
1057 info->free_blocks_total += blocks; in fill_contig_page_info()
1060 info->free_pages += blocks << order; in fill_contig_page_info()
1064 info->free_blocks_suitable += blocks << in fill_contig_page_info()
1065 (order - suitable_order); in fill_contig_page_info()
1083 if (!info->free_blocks_total) in __fragmentation_index()
1087 if (info->free_blocks_suitable) in __fragmentation_index()
1088 return -1000; in __fragmentation_index()
1096 …return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_… in __fragmentation_index()
1112 return div_u64((info.free_pages - in extfrag_for_order()
1396 --node; in frag_start()
1422 struct zone *node_zones = pgdat->node_zones; in walk_zones_in_node()
1425 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in walk_zones_in_node()
1430 spin_lock_irqsave(&zone->lock, flags); in walk_zones_in_node()
1433 spin_unlock_irqrestore(&zone->lock, flags); in walk_zones_in_node()
1444 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in frag_show_print()
1446 seq_printf(m, "%6lu ", zone->free_area[order].nr_free); in frag_show_print()
1467 pgdat->node_id, in pagetypeinfo_showfree_print()
1468 zone->name, in pagetypeinfo_showfree_print()
1476 area = &(zone->free_area[order]); in pagetypeinfo_showfree_print()
1478 list_for_each(curr, &area->free_list[mtype]) { in pagetypeinfo_showfree_print()
1494 spin_unlock_irq(&zone->lock); in pagetypeinfo_showfree_print()
1496 spin_lock_irq(&zone->lock); in pagetypeinfo_showfree_print()
1509 seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); in pagetypeinfo_showfree()
1524 unsigned long start_pfn = zone->zone_start_pfn; in pagetypeinfo_showblockcount_print()
1545 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in pagetypeinfo_showblockcount_print()
1557 seq_printf(m, "\n%-23s", "Number of blocks type "); in pagetypeinfo_showblockcount()
1583 seq_printf(m, "\n%-23s", "Number of mixed blocks "); in pagetypeinfo_showmixedcount()
1602 if (!node_state(pgdat->node_id, N_MEMORY)) in pagetypeinfo_show()
1634 struct zone *compare = &pgdat->node_zones[zid]; in is_zone_first_populated()
1647 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); in zoneinfo_show_print()
1649 seq_printf(m, "\n per-node stats"); in zoneinfo_show_print()
1651 seq_printf(m, "\n %-12s %lu", node_stat_name(i), in zoneinfo_show_print()
1667 zone->spanned_pages, in zoneinfo_show_print()
1668 zone->present_pages, in zoneinfo_show_print()
1673 zone->lowmem_reserve[0]); in zoneinfo_show_print()
1674 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) in zoneinfo_show_print()
1675 seq_printf(m, ", %ld", zone->lowmem_reserve[i]); in zoneinfo_show_print()
1685 seq_printf(m, "\n %-12s %lu", zone_stat_name(i), in zoneinfo_show_print()
1690 seq_printf(m, "\n %-12s %lu", numa_stat_name(i), in zoneinfo_show_print()
1698 pageset = per_cpu_ptr(zone->pageset, i); in zoneinfo_show_print()
1705 pageset->pcp.count, in zoneinfo_show_print()
1706 pageset->pcp.high, in zoneinfo_show_print()
1707 pageset->pcp.batch); in zoneinfo_show_print()
1709 seq_printf(m, "\n vm stats threshold: %d", in zoneinfo_show_print()
1710 pageset->stat_threshold); in zoneinfo_show_print()
1716 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES, in zoneinfo_show_print()
1717 zone->zone_start_pfn); in zoneinfo_show_print()
1759 m->private = v; in vmstat_start()
1761 return ERR_PTR(-ENOMEM); in vmstat_start()
1782 v[PGPGIN] /= 2; /* sectors -> kbytes */ in vmstat_start()
1785 return (unsigned long *)m->private + *pos; in vmstat_start()
1793 return (unsigned long *)m->private + *pos; in vmstat_next()
1799 unsigned long off = l - (unsigned long *)m->private; in vmstat_show()
1805 if (off == NR_VMSTAT_ITEMS - 1) { in vmstat_show()
1807 * We've come to the end - add any deprecated counters to avoid in vmstat_show()
1817 kfree(m->private); in vmstat_stop()
1818 m->private = NULL; in vmstat_stop()
1866 err = -EINVAL; in vmstat_refresh()
1875 err = -EINVAL; in vmstat_refresh()
1917 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu); in need_update()
1919 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1); in need_update()
1921 BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2); in need_update()
1927 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS * in need_update()
1928 sizeof(p->vm_stat_diff[0]))) in need_update()
1931 if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS * in need_update()
1932 sizeof(p->vm_numa_stat_diff[0]))) in need_update()
1959 * vmstat_shepherd will take care about that for us. in quiet_vmstat()
2090 if (info->free_pages == 0) in unusable_free_index()
2100 …return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pa… in unusable_free_index()
2112 pgdat->node_id, in unusable_show_print()
2113 zone->name); in unusable_show_print()
2137 if (!node_state(pgdat->node_id, N_MEMORY)) in unusable_show()
2164 pgdat->node_id, in extfrag_show_print()
2165 zone->name); in extfrag_show_print()