/mm/ |
D | page_counter.c | 20 unsigned long low, min; in propagate_protected_usage() local 26 min = READ_ONCE(c->min); in propagate_protected_usage() 27 if (min || atomic_long_read(&c->min_usage)) { in propagate_protected_usage() 28 protected = min(usage, min); in propagate_protected_usage() 37 protected = min(usage, low); in propagate_protected_usage() 212 WRITE_ONCE(counter->min, nr_pages); in page_counter_set_min() 259 *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX); in page_counter_memparse()
|
D | page-writeback.c | 197 unsigned long long min = wb->bdi->min_ratio; in wb_min_max_ratio() local 205 if (min) { in wb_min_max_ratio() 206 min *= this_bw; in wb_min_max_ratio() 207 min = div64_ul(min, tot_bw); in wb_min_max_ratio() 215 *minp = min; in wb_min_max_ratio() 299 nr_pages -= min(nr_pages, pgdat->totalreserve_pages); in node_dirtyable_memory() 328 nr_pages -= min(nr_pages, high_wmark_pages(z)); in highmem_dirtyable_memory() 353 return min(x, total); in highmem_dirtyable_memory() 375 x -= min(x, totalreserve_pages); in global_dirtyable_memory() 420 ratio = min(DIV_ROUND_UP(bytes, global_avail), in domain_dirty_limits() [all …]
|
D | mapping_dirty_helpers.c | 44 wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); in wp_pte() 102 wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); in clean_record_pte() 107 cwalk->start = min(cwalk->start, pgoff); in clean_record_pte()
|
D | compaction.c | 304 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); in __reset_isolation_pfn() 723 block_end_pfn = min(block_end_pfn, end_pfn); in isolate_freepages_range() 733 block_end_pfn = min(block_end_pfn, end_pfn); in isolate_freepages_range() 1157 block_end_pfn = min(block_end_pfn, end_pfn); in isolate_migratepages_range() 1229 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; in freelist_scan_limit() 1293 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); in fast_isolate_around() 1329 unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1); in fast_isolate_freepages() 1445 limit = min(1U, limit >> 1); in fast_isolate_freepages() 1462 min(pageblock_end_pfn(min_pfn), in fast_isolate_freepages() 1518 block_end_pfn = min(block_start_pfn + pageblock_nr_pages, in isolate_freepages() [all …]
|
D | truncate.c | 327 min(end - index, (pgoff_t)PAGEVEC_SIZE), in truncate_inode_pages_range() 414 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { in truncate_inode_pages_range() 540 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, in __invalidate_mapping_pages() 720 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, in invalidate_inode_pages2_range()
|
D | page_alloc.c | 1449 count = min(pcp->count, count); in free_pcppages_bulk() 1764 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous() 1948 t = min(mo_pfn, *end_pfn); in deferred_init_maxorder() 1966 t = min(mo_pfn, epfn); in deferred_init_maxorder() 2616 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark() 2839 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock() 3091 to_drain = min(pcp->count, batch); in drain_zone_pages() 3773 long min = mark; in __zone_watermark_ok() local 3781 min -= min / 2; in __zone_watermark_ok() 3791 min -= min / 2; in __zone_watermark_ok() [all …]
|
D | mincore.c | 193 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore() 264 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3()
|
D | vmscan.c | 505 total_scan = min(total_scan, freeable / 2); in do_shrink_slab() 536 unsigned long nr_to_scan = min(batch_size, total_scan); in do_shrink_slab() 2426 unsigned long low, min; in get_scan_count() local 2431 &min, &low); in get_scan_count() 2433 if (min || low) { in get_scan_count() 2467 if (!sc->memcg_low_reclaim && low > min) { in get_scan_count() 2471 protection = min; in get_scan_count() 2497 scan = min(lruvec_size, SWAP_CLUSTER_MAX); in get_scan_count() 2569 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); in shrink_lruvec() 2624 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec() [all …]
|
D | memblock.c | 169 return *size = min(*size, PHYS_ADDR_MAX - base); in memblock_cap_size() 451 min(new_area_start, memblock.current_limit), in memblock_double_array() 630 base = min(rend, end); in memblock_add_range() 1053 *out_end = min(m_end, r_end); in __next_mem_range() 1156 *out_end = min(m_end, r_end); in __next_mem_range_rev() 1283 *out_epfn = min(zone_end_pfn(zone), epfn); in __next_mem_pfn_range_in_zone() 1924 order = min(MAX_ORDER - 1UL, __ffs(start)); in __free_pages_memory()
|
D | pagewalk.c | 409 next = min(end, vma->vm_start); in walk_page_range() 414 next = min(end, vma->vm_end); in walk_page_range() 541 cea = min(cea, vea); in walk_page_mapping()
|
D | slub.c | 1814 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) in allocate_slab() 1819 oo = s->min; in allocate_slab() 2601 oo_order(s->min)); in slab_out_of_memory() 2603 if (oo_order(s->min) > get_order(s->object_size)) in slab_out_of_memory() 3517 min_objects = min(min_objects, max_objects); in calculate_order() 3673 static void set_min_partial(struct kmem_cache *s, unsigned long min) in set_min_partial() argument 3675 if (min < MIN_PARTIAL) in set_min_partial() 3676 min = MIN_PARTIAL; in set_min_partial() 3677 else if (min > MAX_PARTIAL) in set_min_partial() 3678 min = MAX_PARTIAL; in set_min_partial() [all …]
|
D | readahead.c | 436 ra->size = min(size + req_size, max); in try_context_readahead() 460 max_pages = min(req_size, bdi->io_pages); in ondemand_readahead()
|
D | msync.c | 86 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3()
|
D | memcontrol.c | 1471 margin = min(margin, limit - count); in mem_cgroup_margin() 1728 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max() 1735 max += min(swap, (unsigned long)total_swap_pages); in mem_cgroup_get_max() 2679 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); in mem_cgroup_handle_over_high() 4150 memory = min(memory, READ_ONCE(mi->memory.max)); in memcg_stat_show() 4151 memsw = min(memsw, READ_ONCE(mi->memsw.max)); in memcg_stat_show() 4658 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), in mem_cgroup_wb_stats() 4662 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); in mem_cgroup_wb_stats() 6316 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); in memory_min_show() 6323 unsigned long min; in memory_min_write() local [all …]
|
D | kmemleak.c | 624 min_addr = min(min_addr, untagged_ptr); in create_object() 1314 next = min(start + MAX_SCAN_SIZE, end); in scan_large_block() 1348 next = min(start + MAX_SCAN_SIZE, end); in scan_object() 1783 buf_size = min(size, (sizeof(buf) - 1)); in kmemleak_write()
|
D | percpu-internal.h | 208 min(pcpu_stats.min_alloc_size, size); in pcpu_stats_area_alloc()
|
D | page_owner.c | 328 block_end_pfn = min(block_end_pfn, end_pfn); in pagetypeinfo_showmixedcount_print() 631 block_end_pfn = min(block_end_pfn, end_pfn); in init_pages_in_zone()
|
D | process_vm_access.c | 96 int pinned_pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec()
|
D | memory_hotplug.c | 342 cur_nr_pages = min(end_pfn - pfn, in __add_pages() 484 min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); in remove_pfn_range_from_zone() 547 cur_nr_pages = min(end_pfn - pfn, in __remove_pages()
|
D | percpu.c | 590 block->first_free = min(block->first_free, start); in pcpu_block_update() 820 s_block->left_free = min(s_block->left_free, s_off); in pcpu_block_update_hint_alloc() 1251 chunk_md->first_free = min(chunk_md->first_free, bit_off); in pcpu_free_area() 2883 base = min(ptr, base); in pcpu_embed_first_chunk()
|
D | mmap.c | 653 nr_pages = (min(end, vma->vm_end) - in count_vma_pages_range() 663 overlap_len = min(end, vma->vm_end) - vma->vm_start; in count_vma_pages_range() 3176 min(tmp->vm_end, start + size)); in SYSCALL_DEFINE5() 3900 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); in init_user_reserve() 3921 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); in init_admin_reserve()
|
D | vmalloc.c | 1728 vb->dirty_min = min(vb->dirty_min, offset); in vb_free() 1763 start = min(s, start); in _vm_unmap_aliases() 2232 start = min(addr, start); in vm_remove_mappings() 3165 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); in pvm_determine_end_from_reverse()
|
D | nommu.c | 1838 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); in init_user_reserve() 1859 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); in init_admin_reserve()
|
/mm/kfence/ |
D | report.c | 153 end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr) in print_diff_canary() 154 : min(show_until_addr, PAGE_ALIGN(address))); in print_diff_canary()
|
D | kfence_test.c | 53 strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0]))); in probe_console() 56 strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0]))); in probe_console()
|