/mm/ |
D | page-writeback.c | 194 unsigned long long min = wb->bdi->min_ratio; in wb_min_max_ratio() local 202 if (min) { in wb_min_max_ratio() 203 min *= this_bw; in wb_min_max_ratio() 204 min = div64_ul(min, tot_bw); in wb_min_max_ratio() 212 *minp = min; in wb_min_max_ratio() 296 nr_pages -= min(nr_pages, pgdat->totalreserve_pages); in node_dirtyable_memory() 325 nr_pages -= min(nr_pages, high_wmark_pages(z)); in highmem_dirtyable_memory() 350 return min(x, total); in highmem_dirtyable_memory() 372 x -= min(x, totalreserve_pages); in global_dirtyable_memory() 418 ratio = min(DIV_ROUND_UP(bytes, global_avail), in domain_dirty_limits() [all …]
|
D | page_alloc.c | 335 pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages); in reset_deferred_meminit() 1416 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous() 1508 end_pfn = min(walk_end, zone_end_pfn(zone)); in deferred_init_memmap() 2202 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock() 2398 to_drain = min(pcp->count, batch); in drain_zone_pages() 2959 long min = mark; in __zone_watermark_ok() local 2967 min -= min / 2; in __zone_watermark_ok() 2984 min -= min / 2; in __zone_watermark_ok() 2986 min -= min / 4; in __zone_watermark_ok() 3001 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) in __zone_watermark_ok() [all …]
|
D | bootmem.c | 373 max = min(bdata->node_low_pfn, end); in mark_bootmem() 503 unsigned long min, max, start, sidx, midx, step; in alloc_bootmem_bdata() local 516 min = bdata->node_min_pfn; in alloc_bootmem_bdata() 524 if (max <= min) in alloc_bootmem_bdata() 529 if (goal && min < goal && goal < max) in alloc_bootmem_bdata() 532 start = ALIGN(min, step); in alloc_bootmem_bdata()
|
D | readahead.c | 224 nr_to_read = min(nr_to_read, max_pages); in force_page_cache_readahead() 278 return min(newsize, max); in get_next_ra_size() 366 ra->size = min(size + req_size, max); in try_context_readahead() 391 max_pages = min(req_size, bdi->io_pages); in ondemand_readahead()
|
D | truncate.c | 293 min(end - index, (pgoff_t)PAGEVEC_SIZE), in truncate_inode_pages_range() 368 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { in truncate_inode_pages_range() 509 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, in invalidate_mapping_pages() 640 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, in invalidate_inode_pages2_range()
|
D | mincore.c | 208 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore() 278 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3()
|
D | pagewalk.c | 311 next = min(end, vma->vm_start); in walk_page_range() 314 next = min(end, vma->vm_end); in walk_page_range()
|
D | percpu.c | 624 block->first_free = min(block->first_free, start); in pcpu_block_update() 715 s_block->left_free = min(s_block->left_free, s_off); in pcpu_block_update_hint_alloc() 864 if ((ALIGN_DOWN(end, min(PCPU_BITS_PER_PAGE, PCPU_BITMAP_BLOCK_BITS)) > in pcpu_block_update_hint_free() 865 ALIGN(start, min(PCPU_BITS_PER_PAGE, PCPU_BITMAP_BLOCK_BITS))) || in pcpu_block_update_hint_free() 1047 chunk->first_bit = min(chunk->first_bit, bit_off); in pcpu_free_area() 1660 int nr = min(re - rs, nr_to_pop); in pcpu_balance_workfn() 2471 base = min(ptr, base); in pcpu_embed_first_chunk()
|
D | slub.c | 1577 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) in allocate_slab() 1582 oo = s->min; in allocate_slab() 2399 oo_order(s->min)); in slab_out_of_memory() 2401 if (oo_order(s->min) > get_order(s->object_size)) in slab_out_of_memory() 3273 min_objects = min(min_objects, max_objects); in calculate_order() 3428 static void set_min_partial(struct kmem_cache *s, unsigned long min) in set_min_partial() argument 3430 if (min < MIN_PARTIAL) in set_min_partial() 3431 min = MIN_PARTIAL; in set_min_partial() 3432 else if (min > MAX_PARTIAL) in set_min_partial() 3433 min = MAX_PARTIAL; in set_min_partial() [all …]
|
D | memblock.c | 70 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); in memblock_cap_size() 382 min(new_area_start, memblock.current_limit), in memblock_double_array() 560 base = min(rend, end); in memblock_add_range() 941 *out_end = min(m_end, r_end); in __next_mem_range() 1056 *out_end = min(m_end, r_end); in __next_mem_range_rev()
|
D | vmscan.c | 268 lru_size -= min(size, lru_size); in lruvec_lru_size() 366 total_scan = min(total_scan, freeable / 2); in do_shrink_slab() 398 unsigned long nr_to_scan = min(batch_size, total_scan); in do_shrink_slab() 2364 scan = min(size, SWAP_CLUSTER_MAX); in get_scan_count() 2444 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); in shrink_node_memcg() 2499 nr[lru] -= min(nr[lru], nr_scanned); in shrink_node_memcg() 2504 nr[lru] -= min(nr[lru], nr_scanned); in shrink_node_memcg() 2955 pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx, in allow_direct_reclaim()
|
D | quicklist.c | 58 return min(pages_to_free, max_free); in min_pages_to_free()
|
D | msync.c | 84 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3()
|
D | page_counter.c | 191 *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX); in page_counter_memparse()
|
D | kmemleak.c | 599 min_addr = min(min_addr, ptr); in create_object() 1373 next = min(start + MAX_SCAN_SIZE, end); in scan_large_block() 1406 next = min(start + MAX_SCAN_SIZE, end); in scan_object() 1833 buf_size = min(size, (sizeof(buf) - 1)); in kmemleak_write()
|
D | compaction.c | 586 block_end_pfn = min(block_end_pfn, end_pfn); in isolate_freepages_range() 596 block_end_pfn = min(block_end_pfn, end_pfn); in isolate_freepages_range() 965 block_end_pfn = min(block_end_pfn, end_pfn); in isolate_migratepages_range() 1066 block_end_pfn = min(block_start_pfn + pageblock_nr_pages, in isolate_freepages()
|
D | page_owner.c | 283 block_end_pfn = min(block_end_pfn, end_pfn); in pagetypeinfo_showmixedcount_print() 553 block_end_pfn = min(block_end_pfn, end_pfn); in init_pages_in_zone()
|
D | percpu-internal.h | 148 min(pcpu_stats.min_alloc_size, size); in pcpu_stats_area_alloc()
|
D | nobootmem.c | 104 order = min(MAX_ORDER - 1UL, __ffs(start)); in __free_pages_memory()
|
D | process_vm_access.c | 103 int pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec()
|
D | memcontrol.c | 1093 margin = min(margin, limit - count); in mem_cgroup_margin() 1255 swap_limit = min(swap_limit, (unsigned long)total_swap_pages); in mem_cgroup_get_limit() 1256 limit = min(limit + swap_limit, memsw_limit); in mem_cgroup_get_limit() 3237 memory = min(memory, mi->memory.limit); in memcg_stat_show() 3238 memsw = min(memsw, mi->memsw.limit); in memcg_stat_show() 3743 unsigned long ceiling = min(memcg->memory.limit, memcg->high); in mem_cgroup_wb_stats() 3746 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); in mem_cgroup_wb_stats()
|
D | mmap.c | 530 nr_pages = (min(end, vma->vm_end) - in count_vma_pages_range() 540 overlap_len = min(end, vma->vm_end) - vma->vm_start; in count_vma_pages_range() 2866 min(tmp->vm_end, start + size)); in SYSCALL_DEFINE5() 3590 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); in init_user_reserve() 3611 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); in init_admin_reserve()
|
D | khugepaged.c | 1353 int n = min(iter.index, end) - index; in collapse_shmem() 1373 for (; index < min(iter.index, end); index++) { in collapse_shmem() 1923 recommended_min = min(recommended_min, in set_recommended_min_free_kbytes()
|
D | hmm.c | 151 npages = (min(range->end, end) - addr) >> PAGE_SHIFT; in hmm_invalidate_range() 1005 addr = min((unsigned long)iomem_resource.end, in hmm_devmem_add()
|
D | vmalloc.c | 1063 vb->dirty_min = min(vb->dirty_min, offset); in vb_free() 1113 start = min(s, start); in vm_unmap_aliases() 2464 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); in pvm_determine_end()
|