Home
last modified time | relevance | path

Searched refs:max (Results 1 – 25 of 44) sorted by relevance

12

/mm/
Dreadahead.c321 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) in get_init_ra_size() argument
325 if (newsize <= max / 32) in get_init_ra_size()
327 else if (newsize <= max / 4) in get_init_ra_size()
330 newsize = max; in get_init_ra_size()
340 unsigned long max) in get_next_ra_size() argument
344 if (cur < max / 16) in get_next_ra_size()
346 if (cur <= max / 2) in get_next_ra_size()
348 return max; in get_next_ra_size()
397 pgoff_t index, unsigned long max) in count_history_pages() argument
402 head = page_cache_prev_miss(mapping, index - 1, max); in count_history_pages()
[all …]
Dpage_counter.c117 if (new > c->max) { in page_counter_try_charge()
191 old = xchg(&counter->max, nr_pages); in page_counter_set_max()
196 counter->max = old; in page_counter_set_max()
244 int page_counter_memparse(const char *buf, const char *max, in page_counter_memparse() argument
250 if (!strcmp(buf, max)) { in page_counter_memparse()
Dmemblock.c114 .memory.max = INIT_MEMBLOCK_REGIONS,
119 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
130 .max = INIT_PHYSMEM_REGIONS,
296 end = max(start, end); in memblock_find_in_range_node()
369 memblock.reserved.max); in memblock_discard()
379 memblock.memory.max); in memblock_discard()
422 old_size = type->max * sizeof(struct memblock_region); in memblock_double_array()
458 type->name, type->max, type->max * 2); in memblock_double_array()
464 type->name, type->max * 2, &addr, &new_end); in memblock_double_array()
472 memset(new_array + type->max, 0, old_size); in memblock_double_array()
[all …]
Dpercpu-internal.h206 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc); in pcpu_stats_area_alloc()
210 max(pcpu_stats.max_alloc_size, size); in pcpu_stats_area_alloc()
213 chunk->max_alloc_size = max(chunk->max_alloc_size, size); in pcpu_stats_area_alloc()
243 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks); in pcpu_stats_chunk_alloc()
Dmmu_gather.c36 batch->max = MAX_GATHER_BATCH; in tlb_next_batch()
82 if (batch->nr == batch->max) { in __tlb_remove_page_size()
87 VM_BUG_ON_PAGE(batch->nr > batch->max, page); in __tlb_remove_page_size()
274 tlb->local.max = ARRAY_SIZE(tlb->__pages); in tlb_gather_mmu()
Dhugetlb_cgroup.c440 return (u64)counter->max * PAGE_SIZE; in hugetlb_cgroup_read_u64()
442 return (u64)rsvd_counter->max * PAGE_SIZE; in hugetlb_cgroup_read_u64()
483 val = (u64)counter->max; in hugetlb_cgroup_read_u64_max()
500 const char *max) in hugetlb_cgroup_write() argument
511 ret = page_counter_memparse(buf, max, &nr_pages); in hugetlb_cgroup_write()
592 long max; in __hugetlb_events_show() local
599 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]); in __hugetlb_events_show()
601 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]); in __hugetlb_events_show()
603 seq_printf(seq, "max %lu\n", max); in __hugetlb_events_show()
Dmemcontrol.c1463 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1469 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1695 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo()
1699 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo()
1703 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1706 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1724 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max() local
1728 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max()
1733 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max()
1735 max += min(swap, (unsigned long)total_swap_pages); in mem_cgroup_get_max()
[all …]
Dpage-writeback.c51 #define MAX_PAUSE max(HZ/5, 1)
62 #define BANDWIDTH_INTERVAL max(HZ/5, 1)
198 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio() local
209 if (max < 100) { in wb_min_max_ratio()
210 max *= this_bw; in wb_min_max_ratio()
211 max = div64_ul(max, tot_bw); in wb_min_max_ratio()
216 *maxp = max; in wb_min_max_ratio()
722 return max(thresh, dom->dirty_limit); in hard_dirty_limit()
1039 wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8); in wb_position_ratio()
1120 avg = max(avg, 1LU); in wb_update_write_bandwidth()
[all …]
Dswap_cgroup.c43 unsigned long idx, max; in swap_cgroup_prepare() local
58 max = idx; in swap_cgroup_prepare()
59 for (idx = 0; idx < max; idx++) in swap_cgroup_prepare()
Dmapping_dirty_helpers.c45 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, in wp_pte()
103 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, in clean_record_pte()
108 cwalk->end = max(cwalk->end, pgoff + 1); in clean_record_pte()
Dswapfile.c618 unsigned long tmp, max; in scan_swap_map_try_ssd_cluster() local
646 max = min_t(unsigned long, si->max, in scan_swap_map_try_ssd_cluster()
648 if (tmp < max) { in scan_swap_map_try_ssd_cluster()
650 while (tmp < max) { in scan_swap_map_try_ssd_cluster()
657 if (tmp >= max) { in scan_swap_map_try_ssd_cluster()
700 si->lowest_bit = si->max; in swap_range_alloc()
1195 if (offset >= p->max) in __swap_info_get()
1344 if (offset >= si->max) in get_swap_device()
1707 map_swapcount = max(map_swapcount, mapcount + swapcount); in page_trans_huge_map_swapcount()
2179 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse()
[all …]
Dpercpu-stats.c44 max_nr_alloc = max(max_nr_alloc, in find_max_nr_alloc()
117 max_frag = max(max_frag, -1 * (*p)); in chunk_map_stats()
Dpage_alloc.c2614 max_boost = max(pageblock_nr_pages, max_boost); in boost_watermark()
6667 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); in pageset_set_batch()
6695 unsigned long batch = max(1UL, high / 4); in pageset_set_high()
6820 *end_pfn = max(*end_pfn, this_end_pfn); in get_pfn_range_for_nid()
6915 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); in zone_spanned_pages_in_node()
7574 required_kernelcore = max(required_kernelcore, corepages); in find_zone_movable_pfns_for_nodes()
7612 start_pfn = max(start_pfn, zone_movable_pfn[nid]); in find_zone_movable_pfns_for_nodes()
7756 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init()
8073 long max = 0; in calculate_totalreserve_pages() local
8078 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
[all …]
Dpage_io.c125 page_no < sis->max) { in generic_swapfile_activate()
184 sis->max = page_no; in generic_swapfile_activate()
Dslab_common.c155 align = max(align, ralign); in calculate_alignment()
158 align = max(align, arch_slab_minalign()); in calculate_alignment()
571 align = max(align, size); in create_boot_cache()
Dslub.c3484 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); in slab_order()
3849 if (oo_objects(s->oo) > oo_objects(s->max)) in calculate_sizes()
3850 s->max = s->oo; in calculate_sizes()
4496 s->object_size = max(s->object_size, size); in __kmem_cache_alias()
4497 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias()
4689 unsigned long max; member
4699 if (t->max) in free_loc_track()
4701 get_order(sizeof(struct location) * t->max)); in free_loc_track()
4704 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) in alloc_loc_track() argument
4709 order = get_order(sizeof(struct location) * max); in alloc_loc_track()
[all …]
Dmmap.c332 unsigned long max = vma_compute_gap(vma), subtree_gap; in vma_compute_subtree_gap() local
336 if (subtree_gap > max) in vma_compute_subtree_gap()
337 max = subtree_gap; in vma_compute_subtree_gap()
342 if (subtree_gap > max) in vma_compute_subtree_gap()
343 max = subtree_gap; in vma_compute_subtree_gap()
345 return max; in vma_compute_subtree_gap()
654 max(addr, vma->vm_start)) >> PAGE_SHIFT; in count_vma_pages_range()
2307 info.low_limit = max(PAGE_SIZE, mmap_min_addr); in arch_get_unmapped_area_topdown()
3175 max(tmp->vm_start, start), in SYSCALL_DEFINE5()
Dcompaction.c295 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn()
1292 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around()
1391 highest = max(pageblock_start_pfn(pfn), in fast_isolate_freepages()
1972 wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); in fragmentation_score_wmark()
2537 rc = max(status, rc); in try_to_compact_pages()
Dcma_debug.c62 maxchunk = max(end - start, maxchunk); in cma_maxchunk_get()
Dutil.c101 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument
109 len = strnlen(s, max); in kstrndup()
Dprocess_vm_access.c178 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core()
Dslab.c156 #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
576 struct array_cache *from, unsigned int max) in transfer_objects() argument
579 int nr = min3(from->avail, max, to->limit - to->avail); in transfer_objects()
3387 int max = shared_array->limit - shared_array->avail; in cache_flusharray() local
3388 if (max) { in cache_flusharray()
3389 if (batchcount > max) in cache_flusharray()
3390 batchcount = max; in cache_flusharray()
Dswap_state.c685 if (end_offset >= si->max) in swap_cluster_readahead()
686 end_offset = si->max - 1; in swap_cluster_readahead()
/mm/damon/
Dpaddr.c204 max_nr_accesses = max(r->nr_accesses, max_nr_accesses); in damon_pa_check_accesses()
/mm/kasan/
Dquarantine.c259 WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE, in kasan_quarantine_reduce()

12