/mm/ |
D | Kconfig | 105 # Architecture platforms which require a two level mem_section in SPARSEMEM 121 SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise 192 # SPARC32 allocates multiple pte tables within a single page, and therefore 193 # a per-page lock leads to problems when multiple tables need to be locked 222 used within a guest, thus imposing performance penalties associated 241 disable this option unless there really is a strong reason for 253 those pages to another entity, such as a hypervisor, so that the 268 pages as migration can relocate pages to satisfy a huge page 314 the many instances by a single page with that content, so 318 until a program has madvised that an area is MADV_MERGEABLE, and [all …]
|
D | Kconfig.debug | 18 Depending on runtime enablement, this results in a small or large 32 incorrect warnings of memory corruption after a resume because free 35 By default this option will have a small overhead, e.g. by not 56 This keeps track of what call chain is the owner of a page, may 60 a fair amount of memory if enabled. See tools/vm/page_owner_sort.c 73 This keeps track of what call chain is the pinner of a page, may 77 a fair amount of memory if enabled. 87 have a potential performance impact if enabled with the 104 This is a feature to add tracepoint for tracking down page reference 115 This option enables a testcase for the setting rodata read-only. [all …]
|
D | frontswap.c | 115 DECLARE_BITMAP(a, MAX_SWAPFILES); in frontswap_register_ops() 120 bitmap_zero(a, MAX_SWAPFILES); in frontswap_register_ops() 126 set_bit(si->type, a); in frontswap_register_ops() 131 for_each_set_bit(i, a, MAX_SWAPFILES) in frontswap_register_ops() 158 if (unlikely(!bitmap_equal(a, b, MAX_SWAPFILES))) { in frontswap_register_ops() 160 if (!test_bit(i, a) && test_bit(i, b)) in frontswap_register_ops() 162 else if (test_bit(i, a) && !test_bit(i, b)) in frontswap_register_ops()
|
D | nommu.c | 1338 struct mmap_arg_struct a; in SYSCALL_DEFINE1() local 1340 if (copy_from_user(&a, arg, sizeof(a))) in SYSCALL_DEFINE1() 1342 if (offset_in_page(a.offset)) in SYSCALL_DEFINE1() 1345 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, in SYSCALL_DEFINE1() 1346 a.offset >> PAGE_SHIFT); in SYSCALL_DEFINE1()
|
D | page_poison.c | 41 static bool single_bit_flip(unsigned char a, unsigned char b) in single_bit_flip() argument 43 unsigned char error = a ^ b; in single_bit_flip()
|
D | mmap.c | 1326 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) in anon_vma_compatible() argument 1328 return a->vm_end == b->vm_start && in anon_vma_compatible() 1329 mpol_equal(vma_policy(a), vma_policy(b)) && in anon_vma_compatible() 1330 a->vm_file == b->vm_file && in anon_vma_compatible() 1331 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && in anon_vma_compatible() 1332 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); in anon_vma_compatible() 1357 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, str… in reusable_anon_vma() argument 1359 if (anon_vma_compatible(a, b)) { in reusable_anon_vma() 1726 struct mmap_arg_struct a; in SYSCALL_DEFINE1() local 1728 if (copy_from_user(&a, arg, sizeof(a))) in SYSCALL_DEFINE1() [all …]
|
D | percpu-stats.c | 25 static int cmpint(const void *a, const void *b) in cmpint() argument 27 return *(int *)a - *(int *)b; in cmpint()
|
D | mempolicy.c | 2336 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) in __mpol_equal() argument 2338 if (!a || !b) in __mpol_equal() 2340 if (a->mode != b->mode) in __mpol_equal() 2342 if (a->flags != b->flags) in __mpol_equal() 2344 if (mpol_store_user_nodemask(a)) in __mpol_equal() 2345 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) in __mpol_equal() 2348 switch (a->mode) { in __mpol_equal() 2351 return !!nodes_equal(a->v.nodes, b->v.nodes); in __mpol_equal() 2354 if (a->flags & MPOL_F_LOCAL) in __mpol_equal() 2356 return a->v.preferred_node == b->v.preferred_node; in __mpol_equal()
|
D | zsmalloc.c | 129 #define MAX(a, b) ((a) >= (b) ? (a) : (b)) argument
|
D | slab.c | 2418 static void swap_free_obj(struct page *page, unsigned int a, unsigned int b) in swap_free_obj() argument 2420 swap(((freelist_idx_t *)page->freelist)[a], in swap_free_obj() 3026 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) argument
|
D | percpu.c | 571 static inline bool pcpu_region_overlap(int a, int b, int x, int y) in pcpu_region_overlap() argument 573 return (a < y) && (x < b); in pcpu_region_overlap()
|
D | memcontrol.c | 4290 static int compare_thresholds(const void *a, const void *b) in compare_thresholds() argument 4292 const struct mem_cgroup_threshold *_a = a; in compare_thresholds()
|
/mm/damon/ |
D | reclaim.c | 227 struct damon_reclaim_ram_walk_arg *a = arg; in walk_system_ram() local 229 if (a->end - a->start < res->end - res->start) { in walk_system_ram() 230 a->start = res->start; in walk_system_ram() 231 a->end = res->end; in walk_system_ram()
|
D | Kconfig | 8 This builds a framework that allows kernel subsystems to monitor 81 that not accessed for a long time (cold) using DAMON and reclaim 84 This is suggested to be used as a proactive and lightweight
|
D | dbgfs.c | 70 unsigned long s, a, r, minr, maxr; in dbgfs_attrs_write() local 79 &s, &a, &r, &minr, &maxr) != 5) { in dbgfs_attrs_write() 90 ret = damon_set_attrs(ctx, s, a, r, minr, maxr); in dbgfs_attrs_write()
|