/mm/ |
D | readahead.c | 257 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) in get_init_ra_size() argument 261 if (newsize <= max / 32) in get_init_ra_size() 263 else if (newsize <= max / 4) in get_init_ra_size() 266 newsize = max; in get_init_ra_size() 276 unsigned long max) in get_next_ra_size() argument 280 if (cur < max / 16) in get_next_ra_size() 282 if (cur <= max / 2) in get_next_ra_size() 284 return max; in get_next_ra_size() 333 pgoff_t offset, unsigned long max) in count_history_pages() argument 338 head = page_cache_prev_miss(mapping, offset - 1, max); in count_history_pages() [all …]
|
D | page_counter.c | 122 if (new > c->max) { in page_counter_try_charge() 195 old = xchg(&counter->max, nr_pages); in page_counter_set_max() 200 counter->max = old; in page_counter_set_max() 248 int page_counter_memparse(const char *buf, const char *max, in page_counter_memparse() argument 254 if (!strcmp(buf, max)) { in page_counter_memparse()
|
D | memblock.c | 117 .memory.max = INIT_MEMBLOCK_REGIONS, 122 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, 128 .physmem.max = INIT_PHYSMEM_REGIONS, 285 end = max(start, end); in memblock_find_in_range_node() 296 bottom_up_start = max(start, kernel_end); in memblock_find_in_range_node() 385 memblock.reserved.max); in memblock_discard() 392 memblock.memory.max); in memblock_discard() 430 old_size = type->max * sizeof(struct memblock_region); in memblock_double_array() 466 type->name, type->max, type->max * 2); in memblock_double_array() 472 type->name, type->max * 2, &addr, &new_end); in memblock_double_array() [all …]
|
D | percpu-internal.h | 153 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc); in pcpu_stats_area_alloc() 157 max(pcpu_stats.max_alloc_size, size); in pcpu_stats_area_alloc() 160 chunk->max_alloc_size = max(chunk->max_alloc_size, size); in pcpu_stats_area_alloc() 190 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks); in pcpu_stats_chunk_alloc()
|
D | memcontrol.c | 1319 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin() 1325 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin() 1526 K((u64)memcg->memory.max), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo() 1530 K((u64)memcg->swap.max), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo() 1534 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo() 1537 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo() 1555 unsigned long max; in mem_cgroup_get_max() local 1557 max = memcg->memory.max; in mem_cgroup_get_max() 1562 memsw_max = memcg->memsw.max; in mem_cgroup_get_max() 1563 swap_max = memcg->swap.max; in mem_cgroup_get_max() [all …]
|
D | mmu_gather.c | 36 batch->max = MAX_GATHER_BATCH; in tlb_next_batch() 82 if (batch->nr == batch->max) { in __tlb_remove_page_size() 87 VM_BUG_ON_PAGE(batch->nr > batch->max, page); in __tlb_remove_page_size() 218 tlb->local.max = ARRAY_SIZE(tlb->__pages); in tlb_gather_mmu()
|
D | page-writeback.c | 48 #define MAX_PAUSE max(HZ/5, 1) 59 #define BANDWIDTH_INTERVAL max(HZ/5, 1) 195 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio() local 206 if (max < 100) { in wb_min_max_ratio() 207 max *= this_bw; in wb_min_max_ratio() 208 max = div64_ul(max, tot_bw); in wb_min_max_ratio() 213 *maxp = max; in wb_min_max_ratio() 725 return max(thresh, dom->dirty_limit); in hard_dirty_limit() 1042 wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8); in wb_position_ratio() 1123 avg = max(avg, 1LU); in wb_update_write_bandwidth() [all …]
|
D | swap_cgroup.c | 43 unsigned long idx, max; in swap_cgroup_prepare() local 58 max = idx; in swap_cgroup_prepare() 59 for (idx = 0; idx < max; idx++) in swap_cgroup_prepare()
|
D | swapfile.c | 605 unsigned long tmp, max; in scan_swap_map_try_ssd_cluster() local 633 max = min_t(unsigned long, si->max, in scan_swap_map_try_ssd_cluster() 635 if (tmp >= max) { in scan_swap_map_try_ssd_cluster() 640 while (tmp < max) { in scan_swap_map_try_ssd_cluster() 684 si->lowest_bit = si->max; in swap_range_alloc() 1117 if (offset >= p->max) in __swap_info_get() 1264 if (offset >= si->max) in get_swap_device() 1625 map_swapcount = max(map_swapcount, mapcount + swapcount); in page_trans_huge_map_swapcount() 2104 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse() 2113 if (i == si->max) in find_next_to_unuse() [all …]
|
D | percpu-stats.c | 41 max_nr_alloc = max(max_nr_alloc, chunk->nr_alloc); in find_max_nr_alloc() 113 max_frag = max(max_frag, -1 * (*p)); in chunk_map_stats()
|
D | page_alloc.c | 2380 max_boost = max(pageblock_nr_pages, max_boost); in boost_watermark() 6122 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); in pageset_set_batch() 6150 unsigned long batch = max(1UL, high / 4); in pageset_set_high() 6327 *end_pfn = max(*end_pfn, this_end_pfn); in get_pfn_range_for_nid() 6423 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); in zone_spanned_pages_in_node() 7199 required_kernelcore = max(required_kernelcore, corepages); in find_zone_movable_pfns_for_nodes() 7237 start_pfn = max(start_pfn, zone_movable_pfn[nid]); in find_zone_movable_pfns_for_nodes() 7358 end_pfn = max(max_zone_pfn[i], start_pfn); in free_area_init_nodes() 7679 long max = 0; in calculate_totalreserve_pages() local 7684 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages() [all …]
|
D | slub.c | 3256 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); in slab_order() 3607 if (oo_objects(s->oo) > oo_objects(s->max)) in calculate_sizes() 3608 s->max = s->oo; in calculate_sizes() 4284 s->object_size = max(s->object_size, size); in __kmem_cache_alias() 4285 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias() 4289 c->inuse = max(c->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias() 4459 unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL); in validate_slab_cache() 4488 unsigned long max; member 4495 if (t->max) in free_loc_track() 4497 get_order(sizeof(struct location) * t->max)); in free_loc_track() [all …]
|
D | page_io.c | 174 page_no < sis->max) { in generic_swapfile_activate() 230 sis->max = page_no; in generic_swapfile_activate()
|
D | mmap.c | 312 unsigned long max = vma_compute_gap(vma), subtree_gap; in vma_compute_subtree_gap() local 316 if (subtree_gap > max) in vma_compute_subtree_gap() 317 max = subtree_gap; in vma_compute_subtree_gap() 322 if (subtree_gap > max) in vma_compute_subtree_gap() 323 max = subtree_gap; in vma_compute_subtree_gap() 325 return max; in vma_compute_subtree_gap() 574 max(addr, vma->vm_start)) >> PAGE_SHIFT; in count_vma_pages_range() 2180 info.low_limit = max(PAGE_SIZE, mmap_min_addr); in arch_get_unmapped_area_topdown() 2982 max(tmp->vm_start, start), in SYSCALL_DEFINE5()
|
D | vmscan.c | 2501 cgroup_size = max(cgroup_size, protection); in get_scan_count() 2511 scan = max(scan, SWAP_CLUSTER_MAX); in get_scan_count() 3357 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), in try_to_free_mem_cgroup_pages() 3540 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); in kswapd_shrink_node() 3840 pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep() 3988 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, in wakeup_kswapd() 3990 pgdat->kswapd_order = max(pgdat->kswapd_order, order); in wakeup_kswapd() 4213 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), in __node_reclaim()
|
D | util.c | 96 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument 104 len = strnlen(s, max); in kstrndup()
|
D | swap_state.c | 570 if (end_offset >= si->max) in swap_cluster_readahead() 571 end_offset = si->max - 1; in swap_cluster_readahead()
|
D | cma_debug.c | 62 maxchunk = max(end - start, maxchunk); in cma_maxchunk_get()
|
D | page_vma_mapped.c | 271 pvmw.address = max(start, vma->vm_start); in page_mapped_in_vma()
|
D | slab.c | 155 #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 575 struct array_cache *from, unsigned int max) in transfer_objects() argument 578 int nr = min3(from->avail, max, to->limit - to->avail); in transfer_objects() 3385 int max = shared_array->limit - shared_array->avail; in cache_flusharray() local 3386 if (max) { in cache_flusharray() 3387 if (batchcount > max) in cache_flusharray() 3388 batchcount = max; in cache_flusharray()
|
D | frontswap.c | 367 bitmap_zero(sis->frontswap_map, sis->max); in __frontswap_invalidate_area()
|
D | process_vm_access.c | 178 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core()
|
D | internal.h | 362 return max(start, vma->vm_start); in vma_address()
|
D | hugetlb_cgroup.c | 276 return (u64)counter->max * PAGE_SIZE; in hugetlb_cgroup_read_u64()
|
/mm/kasan/ |
D | quarantine.c | 247 WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE, in quarantine_reduce()
|