Searched refs:delta (Results 1 – 10 of 10) sorted by relevance
/mm/ |
D | page_counter.c | 21 long delta; in propagate_protected_usage() local 30 delta = protected - old_protected; in propagate_protected_usage() 31 if (delta) in propagate_protected_usage() 32 atomic_long_add(delta, &c->parent->children_min_usage); in propagate_protected_usage() 39 delta = protected - old_protected; in propagate_protected_usage() 40 if (delta) in propagate_protected_usage() 41 atomic_long_add(delta, &c->parent->children_low_usage); in propagate_protected_usage()
|
D | vmstat.c | 317 long delta) in __mod_zone_page_state() argument 324 x = delta + __this_cpu_read(*p); in __mod_zone_page_state() 337 long delta) in __mod_node_page_state() argument 345 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in __mod_node_page_state() 346 delta >>= PAGE_SHIFT; in __mod_node_page_state() 349 x = delta + __this_cpu_read(*p); in __mod_node_page_state() 490 enum zone_stat_item item, long delta, int overstep_mode) in mod_zone_state() argument 512 n = delta + o; in mod_zone_state() 528 long delta) in mod_zone_page_state() argument 530 mod_zone_state(zone, item, delta, 0); in mod_zone_page_state() [all …]
|
D | hugetlb.c | 100 static int hugetlb_acct_memory(struct hstate *h, long delta); 160 long delta) in hugepage_subpool_get_pages() argument 162 long ret = delta; in hugepage_subpool_get_pages() 170 if ((spool->used_hpages + delta) <= spool->max_hpages) in hugepage_subpool_get_pages() 171 spool->used_hpages += delta; in hugepage_subpool_get_pages() 180 if (delta > spool->rsv_hpages) { in hugepage_subpool_get_pages() 185 ret = delta - spool->rsv_hpages; in hugepage_subpool_get_pages() 189 spool->rsv_hpages -= delta; in hugepage_subpool_get_pages() 205 long delta) in hugepage_subpool_put_pages() argument 207 long ret = delta; in hugepage_subpool_put_pages() [all …]
|
D | slob.c | 241 int delta = 0, units = SLOB_UNITS(size); in slob_page_alloc() local 258 delta = aligned - cur; in slob_page_alloc() 260 if (avail >= units + delta) { /* room enough? */ in slob_page_alloc() 263 if (delta) { /* need to fragment head to align? */ in slob_page_alloc() 265 set_slob(aligned, avail - delta, next); in slob_page_alloc() 266 set_slob(cur, delta, aligned); in slob_page_alloc()
|
D | page_pinner.c | 135 s64 now, delta = 0; in check_longterm_pin() local 144 delta = now - page_pinner->ts_usec; in check_longterm_pin() 146 if (delta <= threshold_usec) in check_longterm_pin() 150 record.elapsed = delta; in check_longterm_pin()
|
D | vmscan.c | 445 unsigned long long delta; in do_shrink_slab() local 471 delta = freeable >> priority; in do_shrink_slab() 472 delta *= 4; in do_shrink_slab() 473 do_div(delta, shrinker->seeks); in do_shrink_slab() 480 delta = freeable / 2; in do_shrink_slab() 483 total_scan += delta; in do_shrink_slab() 504 if (delta < freeable / 4) in do_shrink_slab() 516 freeable, delta, total_scan, priority); in do_shrink_slab() 4295 unsigned long delta = 0; in node_pagecache_reclaimable() local 4310 delta += node_page_state(pgdat, NR_FILE_DIRTY); in node_pagecache_reclaimable() [all …]
|
D | mremap.c | 858 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) in vma_expandable() argument 860 unsigned long end = vma->vm_end + delta; in vma_expandable()
|
D | compaction.c | 34 static inline void count_compact_events(enum vm_event_item item, long delta) in count_compact_events() argument 36 count_vm_events(item, delta); in count_compact_events() 40 #define count_compact_events(item, delta) do { } while (0) argument
|
D | percpu.c | 3099 unsigned long delta; in setup_per_cpu_areas() local 3113 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; in setup_per_cpu_areas() 3115 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; in setup_per_cpu_areas()
|
D | page-writeback.c | 1122 long delta = avg - wb->avg_write_bandwidth; in wb_update_write_bandwidth() local 1123 WARN_ON_ONCE(atomic_long_add_return(delta, in wb_update_write_bandwidth()
|