/mm/ |
D | readahead.c | 324 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) in get_init_ra_size() argument 328 if (newsize <= max / 32) in get_init_ra_size() 330 else if (newsize <= max / 4) in get_init_ra_size() 333 newsize = max; in get_init_ra_size() 343 unsigned long max) in get_next_ra_size() argument 347 if (cur < max / 16) in get_next_ra_size() 349 if (cur <= max / 2) in get_next_ra_size() 351 return max; in get_next_ra_size() 400 pgoff_t index, unsigned long max) in count_history_pages() argument 405 head = page_cache_prev_miss(mapping, index - 1, max); in count_history_pages() [all …]
|
D | page_counter.c | 123 if (new > c->max) { in page_counter_try_charge() 198 old = xchg(&counter->max, nr_pages); in page_counter_set_max() 203 counter->max = old; in page_counter_set_max() 251 int page_counter_memparse(const char *buf, const char *max, in page_counter_memparse() argument 257 if (!strcmp(buf, max)) { in page_counter_memparse()
|
D | percpu-internal.h | 160 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc); in pcpu_stats_area_alloc() 164 max(pcpu_stats.max_alloc_size, size); in pcpu_stats_area_alloc() 167 chunk->max_alloc_size = max(chunk->max_alloc_size, size); in pcpu_stats_area_alloc() 197 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks); in pcpu_stats_chunk_alloc()
|
D | memblock.c | 114 .memory.max = INIT_MEMBLOCK_REGIONS, 119 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, 130 .max = INIT_PHYSMEM_REGIONS, 295 end = max(start, end); in memblock_find_in_range_node() 368 memblock.reserved.max); in memblock_discard() 378 memblock.memory.max); in memblock_discard() 421 old_size = type->max * sizeof(struct memblock_region); in memblock_double_array() 457 type->name, type->max, type->max * 2); in memblock_double_array() 463 type->name, type->max * 2, &addr, &new_end); in memblock_double_array() 471 memset(new_array + type->max, 0, old_size); in memblock_double_array() [all …]
|
D | hugetlb_cgroup.c | 440 return (u64)counter->max * PAGE_SIZE; in hugetlb_cgroup_read_u64() 442 return (u64)rsvd_counter->max * PAGE_SIZE; in hugetlb_cgroup_read_u64() 483 val = (u64)counter->max; in hugetlb_cgroup_read_u64_max() 500 const char *max) in hugetlb_cgroup_write() argument 511 ret = page_counter_memparse(buf, max, &nr_pages); in hugetlb_cgroup_write() 592 long max; in __hugetlb_events_show() local 599 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]); in __hugetlb_events_show() 601 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]); in __hugetlb_events_show() 603 seq_printf(seq, "max %lu\n", max); in __hugetlb_events_show()
|
D | mmu_gather.c | 36 batch->max = MAX_GATHER_BATCH; in tlb_next_batch() 82 if (batch->nr == batch->max) { in __tlb_remove_page_size() 87 VM_BUG_ON_PAGE(batch->nr > batch->max, page); in __tlb_remove_page_size() 260 tlb->local.max = ARRAY_SIZE(tlb->__pages); in __tlb_gather_mmu()
|
D | memcontrol.c | 1345 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin() 1351 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin() 1584 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo() 1588 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo() 1592 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo() 1595 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo() 1613 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max() local 1617 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max() 1622 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max() 1624 max += min(swap, (unsigned long)total_swap_pages); in mem_cgroup_get_max() [all …]
|
D | page-writeback.c | 47 #define MAX_PAUSE max(HZ/5, 1) 58 #define BANDWIDTH_INTERVAL max(HZ/5, 1) 189 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio() local 200 if (max < 100) { in wb_min_max_ratio() 201 max *= this_bw; in wb_min_max_ratio() 202 max = div64_ul(max, tot_bw); in wb_min_max_ratio() 207 *maxp = max; in wb_min_max_ratio() 713 return max(thresh, dom->dirty_limit); in hard_dirty_limit() 1030 wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8); in wb_position_ratio() 1111 avg = max(avg, 1LU); in wb_update_write_bandwidth() [all …]
|
D | swap_cgroup.c | 43 unsigned long idx, max; in swap_cgroup_prepare() local 58 max = idx; in swap_cgroup_prepare() 59 for (idx = 0; idx < max; idx++) in swap_cgroup_prepare()
|
D | mapping_dirty_helpers.c | 46 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, in wp_pte() 105 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, in clean_record_pte() 110 cwalk->end = max(cwalk->end, pgoff + 1); in clean_record_pte()
|
D | swapfile.c | 623 unsigned long tmp, max; in scan_swap_map_try_ssd_cluster() local 651 max = min_t(unsigned long, si->max, in scan_swap_map_try_ssd_cluster() 653 if (tmp < max) { in scan_swap_map_try_ssd_cluster() 655 while (tmp < max) { in scan_swap_map_try_ssd_cluster() 662 if (tmp >= max) { in scan_swap_map_try_ssd_cluster() 699 si->lowest_bit = si->max; in swap_range_alloc() 1139 if (offset >= p->max) in __swap_info_get() 1288 if (offset >= si->max) in get_swap_device() 1650 map_swapcount = max(map_swapcount, mapcount + swapcount); in page_trans_huge_map_swapcount() 2140 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse() [all …]
|
D | percpu-stats.c | 41 max_nr_alloc = max(max_nr_alloc, chunk->nr_alloc); in find_max_nr_alloc() 113 max_frag = max(max_frag, -1 * (*p)); in chunk_map_stats()
|
D | filemap.c | 490 pgoff_t max = end_byte >> PAGE_SHIFT; in filemap_range_has_page() local 497 page = xas_find(&xas, max); in filemap_range_has_page() 663 pgoff_t max = end_byte >> PAGE_SHIFT; in filemap_range_needs_writeback() local 675 xas_for_each(&xas, page, max) { in filemap_range_needs_writeback() 1979 static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max, in find_get_entry() argument 1986 page = xas_find(xas, max); in find_get_entry() 1988 page = xas_find_marked(xas, max, mark); in find_get_entry() 2343 pgoff_t index, pgoff_t max, struct pagevec *pvec) in filemap_get_read_batch() argument 2352 if (xas.xa_index > max || xa_is_value(head)) in filemap_get_read_batch() 2370 if (xas.xa_index - 1 >= max) in filemap_get_read_batch() [all …]
|
D | page_alloc.c | 2812 max_boost = max(pageblock_nr_pages, max_boost); in boost_watermark() 3282 batch = max(batch >> order, 2); in get_populated_pcp_list() 7200 high = max(high, batch << 2); in zone_highsize() 7273 new_batch = max(1, zone_batchsize(zone)); in zone_set_pageset_high_and_batch() 7400 *end_pfn = max(*end_pfn, this_end_pfn); in get_pfn_range_for_nid() 7495 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); in zone_spanned_pages_in_node() 8149 required_kernelcore = max(required_kernelcore, corepages); in find_zone_movable_pfns_for_nodes() 8187 start_pfn = max(start_pfn, zone_movable_pfn[nid]); in find_zone_movable_pfns_for_nodes() 8331 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init() 8652 long max = 0; in calculate_totalreserve_pages() local [all …]
|
D | page_io.c | 105 page_no < sis->max) { in generic_swapfile_activate() 164 sis->max = page_no; in generic_swapfile_activate()
|
D | compaction.c | 291 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn() 1404 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around() 1441 unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); in fast_isolate_freepages() 1503 highest = max(pageblock_start_pfn(pfn), in fast_isolate_freepages() 1557 limit = max(1U, limit >> 1); in fast_isolate_freepages() 2088 wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); in fragmentation_score_wmark() 2667 rc = max(status, rc); in try_to_compact_pages()
|
D | slub.c | 3800 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); in slab_order() 4179 if (oo_objects(s->oo) > oo_objects(s->max)) in calculate_sizes() 4180 s->max = s->oo; in calculate_sizes() 4892 s->object_size = max(s->object_size, size); in __kmem_cache_alias() 4893 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias() 5101 unsigned long max; member 5111 if (t->max) in free_loc_track() 5113 get_order(sizeof(struct location) * t->max)); in free_loc_track() 5116 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) in alloc_loc_track() argument 5121 order = get_order(sizeof(struct location) * max); in alloc_loc_track() [all …]
|
D | mmap.c | 318 unsigned long max = vma_compute_gap(vma), subtree_gap; in vma_compute_subtree_gap() local 322 if (subtree_gap > max) in vma_compute_subtree_gap() 323 max = subtree_gap; in vma_compute_subtree_gap() 328 if (subtree_gap > max) in vma_compute_subtree_gap() 329 max = subtree_gap; in vma_compute_subtree_gap() 331 return max; in vma_compute_subtree_gap() 623 max(addr, vma->vm_start)) >> PAGE_SHIFT; in count_vma_pages_range() 2217 info.low_limit = max(PAGE_SIZE, mmap_min_addr); in arch_get_unmapped_area_topdown()
|
D | vmscan.c | 2851 cgroup_size = max(cgroup_size, protection); in get_scan_count() 2861 scan = max(scan, SWAP_CLUSTER_MAX); in get_scan_count() 3608 *vm_start = max(start, args->vma->vm_start); in get_next_vma() 3964 walk->next_addr = max(end, args->vma->vm_start); in walk_pud_range() 4109 min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); in try_to_inc_min_seq() 4248 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lruvec_is_sizable() 4355 start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start); in lru_gen_look_around() 4604 if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH) in scan_pages() 4841 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in should_run_aging() 4908 return max(sc->nr_to_reclaim, compact_gap(sc->order)); in get_nr_to_reclaim() [all …]
|
D | cma_debug.c | 62 maxchunk = max(end - start, maxchunk); in cma_maxchunk_get()
|
D | swap_state.c | 637 if (end_offset >= si->max) in swap_cluster_readahead() 638 end_offset = si->max - 1; in swap_cluster_readahead()
|
D | util.c | 101 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument 109 len = strnlen(s, max); in kstrndup()
|
D | process_vm_access.c | 177 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core()
|
D | slab.c | 156 #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 576 struct array_cache *from, unsigned int max) in transfer_objects() argument 579 int nr = min3(from->avail, max, to->limit - to->avail); in transfer_objects() 3389 int max = shared_array->limit - shared_array->avail; in cache_flusharray() local 3390 if (max) { in cache_flusharray() 3391 if (batchcount > max) in cache_flusharray() 3392 batchcount = max; in cache_flusharray()
|
/mm/damon/ |
D | paddr.c | 204 max_nr_accesses = max(r->nr_accesses, max_nr_accesses); in damon_pa_check_accesses()
|