/mm/ |
D | readahead.c | 240 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) in get_init_ra_size() argument 244 if (newsize <= max / 32) in get_init_ra_size() 246 else if (newsize <= max / 4) in get_init_ra_size() 249 newsize = max; in get_init_ra_size() 259 unsigned long max) in get_next_ra_size() argument 264 if (cur < max / 16) in get_next_ra_size() 269 return min(newsize, max); in get_next_ra_size() 318 pgoff_t offset, unsigned long max) in count_history_pages() argument 323 head = page_cache_prev_hole(mapping, offset - 1, max); in count_history_pages() 336 unsigned long max) in try_context_readahead() argument [all …]
|
D | quicklist.c | 28 unsigned long node_free_pages, max; in max_pages() local 42 max = node_free_pages / FRACTION_OF_NODE_MEM; in max_pages() 45 max /= num_cpus_on_node; in max_pages() 47 return max(max, min_pages); in max_pages()
|
D | bootmem.c | 364 unsigned long max; in mark_bootmem() local 372 max = min(bdata->node_low_pfn, end); in mark_bootmem() 374 err = mark_bootmem_node(bdata, pos, max, reserve, flags); in mark_bootmem() 380 if (max == end) in mark_bootmem() 502 unsigned long min, max, start, sidx, midx, step; in alloc_bootmem_bdata() local 516 max = bdata->node_low_pfn; in alloc_bootmem_bdata() 521 if (limit && max > limit) in alloc_bootmem_bdata() 522 max = limit; in alloc_bootmem_bdata() 523 if (max <= min) in alloc_bootmem_bdata() 526 step = max(align >> PAGE_SHIFT, 1UL); in alloc_bootmem_bdata() [all …]
|
D | memblock.c | 37 .memory.max = INIT_MEMBLOCK_REGIONS, 41 .reserved.max = INIT_MEMBLOCK_REGIONS, 46 .physmem.max = INIT_PHYSMEM_REGIONS, 212 end = max(start, end); in memblock_find_in_range_node() 223 bottom_up_start = max(start, kernel_end); in memblock_find_in_range_node() 310 memblock.reserved.max); in memblock_discard() 317 memblock.memory.max); in memblock_discard() 355 old_size = type->max * sizeof(struct memblock_region); in memblock_double_array() 401 memblock_type_name(type), type->max, type->max * 2); in memblock_double_array() 406 memblock_type_name(type), type->max * 2, (u64)addr, in memblock_double_array() [all …]
|
D | swap_cgroup.c | 42 unsigned long idx, max; in swap_cgroup_prepare() local 57 max = idx; in swap_cgroup_prepare() 58 for (idx = 0; idx < max; idx++) in swap_cgroup_prepare()
|
D | page-writeback.c | 47 #define MAX_PAUSE max(HZ/5, 1) 58 #define BANDWIDTH_INTERVAL max(HZ/5, 1) 194 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio() local 205 if (max < 100) { in wb_min_max_ratio() 206 max *= this_bw; in wb_min_max_ratio() 207 do_div(max, tot_bw); in wb_min_max_ratio() 212 *maxp = max; in wb_min_max_ratio() 726 return max(thresh, dom->dirty_limit); in hard_dirty_limit() 1043 wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8); in wb_position_ratio() 1124 avg = max(avg, 1LU); in wb_update_write_bandwidth() [all …]
|
D | page_counter.c | 175 int page_counter_memparse(const char *buf, const char *max, in page_counter_memparse() argument 181 if (!strcmp(buf, max)) { in page_counter_memparse()
|
D | percpu.c | 213 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); in __pcpu_size_to_slot() 577 max_contig = max(this_size, max_contig); in pcpu_alloc_area() 592 max_contig = max(*p - p[-1], max_contig); in pcpu_alloc_area() 620 max_contig = max(head, max_contig); in pcpu_alloc_area() 624 max_contig = max(tail, max_contig); in pcpu_alloc_area() 635 chunk->contig_hint = max(chunk->contig_hint, in pcpu_alloc_area() 715 chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint); in pcpu_free_area() 1458 apl = rounddown_pow_of_two(max(60 / width, 1)); in pcpu_dump_alloc_info() 1840 nr_groups = max(nr_groups, group + 1); in pcpu_build_alloc_info()
|
D | page_alloc.c | 304 max_pgcnt = max(2UL << (30 - PAGE_SHIFT), in reset_deferred_meminit() 1566 first_init_pfn = max(end_pfn, first_init_pfn); in deferred_init_memmap() 5222 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); in pageset_set_batch() 5251 unsigned long batch = max(1UL, high / 4); in pageset_set_high() 5428 *end_pfn = max(*end_pfn, this_end_pfn); in get_pfn_range_for_nid() 5522 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); in zone_spanned_pages_in_node() 6168 required_kernelcore = max(required_kernelcore, corepages); in find_zone_movable_pfns_for_nodes() 6206 start_pfn = max(start_pfn, zone_movable_pfn[nid]); in find_zone_movable_pfns_for_nodes() 6330 end_pfn = max(max_zone_pfn[i], start_pfn); in free_area_init_nodes() 6603 long max = 0; in calculate_totalreserve_pages() local [all …]
|
D | swapfile.c | 479 while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) * in scan_swap_map_try_ssd_cluster() 596 si->lowest_bit = si->max; in scan_swap_map() 748 if (offset >= p->max) in swap_info_get() 1343 unsigned int max = si->max; in find_next_to_unuse() local 1354 if (++i >= max) { in find_next_to_unuse() 1363 max = prev + 1; in find_next_to_unuse() 1780 ret = add_swap_extent(sis, 0, sis->max, 0); in setup_swap_extents() 1789 ret = add_swap_extent(sis, 0, sis->max, 0); in setup_swap_extents() 1961 p->max = 0; in SYSCALL_DEFINE1() 2346 p->max = maxpages; in setup_swap_map_and_extents() [all …]
|
D | mmap.c | 256 unsigned long max, prev_end, subtree_gap; in vma_compute_subtree_gap() local 264 max = vm_start_gap(vma); in vma_compute_subtree_gap() 267 if (max > prev_end) in vma_compute_subtree_gap() 268 max -= prev_end; in vma_compute_subtree_gap() 270 max = 0; in vma_compute_subtree_gap() 275 if (subtree_gap > max) in vma_compute_subtree_gap() 276 max = subtree_gap; in vma_compute_subtree_gap() 281 if (subtree_gap > max) in vma_compute_subtree_gap() 282 max = subtree_gap; in vma_compute_subtree_gap() 284 return max; in vma_compute_subtree_gap() [all …]
|
D | page_io.c | 165 page_no < sis->max) { in generic_swapfile_activate() 221 sis->max = page_no; in generic_swapfile_activate()
|
D | memcontrol.c | 1879 unsigned int batch = max(CHARGE_BATCH, nr_pages); in try_charge() 5113 unsigned long max = READ_ONCE(memcg->memory.limit); in memory_max_show() local 5115 if (max == PAGE_COUNTER_MAX) in memory_max_show() 5118 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); in memory_max_show() 5129 unsigned long max; in memory_max_write() local 5133 err = page_counter_memparse(buf, "max", &max); in memory_max_write() 5137 xchg(&memcg->memory.limit, max); in memory_max_write() 5142 if (nr_pages <= max) in memory_max_write() 5157 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write() 6036 unsigned long max = READ_ONCE(memcg->swap.limit); in swap_max_show() local [all …]
|
D | slub.c | 3213 for (order = max(min_order, get_order(min_objects * size + reserved)); in slab_order() 3526 if (oo_objects(s->oo) > oo_objects(s->max)) in calculate_sizes() 3527 s->max = s->oo; in calculate_sizes() 4181 s->object_size = max(s->object_size, (int)size); in __kmem_cache_alias() 4356 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * in validate_slab_cache() 4387 unsigned long max; member 4394 if (t->max) in free_loc_track() 4396 get_order(sizeof(struct location) * t->max)); in free_loc_track() 4399 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) in alloc_loc_track() argument 4404 order = get_order(sizeof(struct location) * max); in alloc_loc_track() [all …]
|
D | util.c | 86 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument 94 len = strnlen(s, max); in kstrndup()
|
D | vmscan.c | 3048 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), in try_to_free_mem_cgroup_pages() 3184 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); in kswapd_shrink_node() 3381 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx); in kswapd_try_to_sleep() 3382 pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep() 3530 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx); in wakeup_kswapd() 3531 pgdat->kswapd_order = max(pgdat->kswapd_order, order); in wakeup_kswapd() 3753 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), in __node_reclaim()
|
D | cma_debug.c | 63 maxchunk = max(end - start, maxchunk); in cma_maxchunk_get()
|
D | slab.c | 154 #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 620 struct array_cache *from, unsigned int max) in transfer_objects() argument 623 int nr = min3(from->avail, max, to->limit - to->avail); in transfer_objects() 3471 int max = shared_array->limit - shared_array->avail; in cache_flusharray() local 3472 if (max) { in cache_flusharray() 3473 if (batchcount > max) in cache_flusharray() 3474 batchcount = max; in cache_flusharray()
|
D | frontswap.c | 368 bitmap_zero(sis->frontswap_map, sis->max); in __frontswap_invalidate_area()
|
D | process_vm_access.c | 176 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core()
|
D | cma.c | 267 alignment = max(alignment, (phys_addr_t)PAGE_SIZE << in cma_declare_contiguous()
|
D | memory.c | 207 batch->max = MAX_GATHER_BATCH; in tlb_next_batch() 229 tlb->local.max = ARRAY_SIZE(tlb->__pages); in tlb_gather_mmu() 312 if (batch->nr == batch->max) { in __tlb_remove_page_size() 317 VM_BUG_ON_PAGE(batch->nr > batch->max, page); in __tlb_remove_page_size() 1311 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma() 3108 fe->address = max(address & mask, fe->vma->vm_start); in do_fault_around()
|
D | vmstat.c | 110 threshold = max(1, (int)(watermark_distance / num_online_cpus())); in calculate_pressure_threshold() 199 = max(threshold, pgdat_threshold); in refresh_zone_stat_thresholds()
|
D | vmalloc.c | 544 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); in __free_vmap_area() 1050 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); in vb_free() 1098 end = max(e, end); in vm_unmap_aliases()
|
/mm/kasan/ |
D | quarantine.c | 246 WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE, in quarantine_reduce()
|