/mm/ |
D | slub.c | 129 static inline bool kmem_cache_debug(struct kmem_cache *s) in kmem_cache_debug() argument 131 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); in kmem_cache_debug() 134 void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument 136 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) in fixup_red_left() 137 p += s->red_left_pad; in fixup_red_left() 142 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) in kmem_cache_has_cpu_partial() argument 145 return !kmem_cache_debug(s); in kmem_cache_has_cpu_partial() 210 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } in sysfs_slab_add() argument 211 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument 218 static inline void debugfs_slab_add(struct kmem_cache *s) { } in debugfs_slab_add() argument [all …]
|
D | slab_common.c | 84 unsigned int kmem_cache_size(struct kmem_cache *s) in kmem_cache_size() argument 86 return s->object_size; in kmem_cache_size() 108 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) in __kmem_cache_free_bulk() argument 113 if (s) in __kmem_cache_free_bulk() 114 kmem_cache_free(s, p[i]); in __kmem_cache_free_bulk() 120 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, in __kmem_cache_alloc_bulk() argument 126 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk() 128 __kmem_cache_free_bulk(s, i, p); in __kmem_cache_alloc_bulk() 166 int slab_unmergeable(struct kmem_cache *s) in slab_unmergeable() argument 168 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) in slab_unmergeable() [all …]
|
D | slab.h | 126 int slab_unmergeable(struct kmem_cache *s); 213 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 214 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 227 static inline int cache_vmstat_idx(struct kmem_cache *s) in cache_vmstat_idx() argument 229 return (s->flags & SLAB_RECLAIM_ACCOUNT) ? in cache_vmstat_idx() 239 extern void print_tracking(struct kmem_cache *s, void *object); 240 extern unsigned long get_each_object_track(struct kmem_cache *s, 250 static inline void print_tracking(struct kmem_cache *s, void *object) in print_tracking() argument 258 static inline unsigned long get_each_object_track(struct kmem_cache *s, in get_each_object_track() argument 273 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) in kmem_cache_debug_flags() argument [all …]
|
D | slob.c | 146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument 148 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); in set_slob() 152 s[0].units = size; in set_slob() 153 s[1].units = offset; in set_slob() 155 s[0].units = -offset; in set_slob() 161 static slobidx_t slob_units(slob_t *s) in slob_units() argument 163 if (s->units > 0) in slob_units() 164 return s->units; in slob_units() 171 static slob_t *slob_next(slob_t *s) in slob_next() argument 173 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); in slob_next() [all …]
|
D | util.c | 55 char *kstrdup(const char *s, gfp_t gfp) in kstrdup() argument 60 if (!s) in kstrdup() 63 len = strlen(s) + 1; in kstrdup() 66 memcpy(buf, s, len); in kstrdup() 82 const char *kstrdup_const(const char *s, gfp_t gfp) in kstrdup_const() argument 84 if (is_kernel_rodata((unsigned long)s)) in kstrdup_const() 85 return s; in kstrdup_const() 87 return kstrdup(s, gfp); in kstrdup_const() 101 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument 106 if (!s) in kstrndup() [all …]
|
D | failslab.c | 17 bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) in __should_failslab() argument 20 if (unlikely(s == kmem_cache)) in __should_failslab() 30 if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB)) in __should_failslab() 33 return should_fail(&failslab.attr, s->object_size); in __should_failslab()
|
D | zswap.c | 701 char *s = strstrip((char *)val); in __zswap_param_set() local 710 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) in __zswap_param_set() 717 return param_set_charp(s, kp); in __zswap_param_set() 720 if (!zpool_has_pool(s)) { in __zswap_param_set() 721 pr_err("zpool %s not available\n", s); in __zswap_param_set() 724 type = s; in __zswap_param_set() 726 if (!crypto_has_comp(s, 0, 0)) { in __zswap_param_set() 727 pr_err("compressor %s not available\n", s); in __zswap_param_set() 730 compressor = s; in __zswap_param_set() 751 ret = param_set_charp(s, kp); in __zswap_param_set() [all …]
|
D | slab.c | 458 static int __init noaliencache_setup(char *s) in noaliencache_setup() argument 2225 bool __kmem_cache_empty(struct kmem_cache *s) in __kmem_cache_empty() argument 2230 for_each_kmem_cache_node(s, node, n) in __kmem_cache_empty() 3516 cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags, in cache_alloc_debugcheck_after_bulk() argument 3522 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller); in cache_alloc_debugcheck_after_bulk() 3525 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument 3531 s = slab_pre_alloc_hook(s, &objcg, size, flags); in kmem_cache_alloc_bulk() 3532 if (!s) in kmem_cache_alloc_bulk() 3535 cache_alloc_debugcheck_before(s, flags); in kmem_cache_alloc_bulk() 3539 void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags); in kmem_cache_alloc_bulk() [all …]
|
D | memcontrol.c | 1590 struct seq_buf s; in memory_stat_format() local 1593 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); in memory_stat_format() 1594 if (!s.buffer) in memory_stat_format() 1613 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); in memory_stat_format() 1618 seq_buf_printf(&s, "slab %llu\n", size); in memory_stat_format() 1624 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), in memory_stat_format() 1626 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), in memory_stat_format() 1628 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), in memory_stat_format() 1630 seq_buf_printf(&s, "pgscan %lu\n", in memory_stat_format() 1633 seq_buf_printf(&s, "pgsteal %lu\n", in memory_stat_format() [all …]
|
D | hugetlb_cgroup.c | 57 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s) in hugetlb_cgroup_from_css() argument 59 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL; in hugetlb_cgroup_from_css()
|
D | Kconfig | 83 # Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's 190 # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock. 191 # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes. 312 of an application's address space that an app has advised may be 452 for clean pages that the kernel's pageframe replacement algorithm 541 soft-dirty bit on pte-s. This bit it set when someone writes 853 part of the process's memory space. This type of page fault is named
|
D | zsmalloc.c | 586 static int zs_stats_size_show(struct seq_file *s, void *v) in zs_stats_size_show() argument 589 struct zs_pool *pool = s->private; in zs_stats_size_show() 598 seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n", in zs_stats_size_show() 621 seq_printf(s, " %5u %5u %11lu %12lu %13lu" in zs_stats_size_show() 635 seq_puts(s, "\n"); in zs_stats_size_show() 636 seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n", in zs_stats_size_show()
|
D | hugetlb.c | 3280 static int __init hugepages_setup(char *s) in hugepages_setup() argument 3286 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); in hugepages_setup() 3303 …HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); in hugepages_setup() 3307 if (sscanf(s, "%lu", mhp) <= 0) in hugepages_setup() 3331 static int __init hugepagesz_setup(char *s) in hugepagesz_setup() argument 3337 size = (unsigned long)memparse(s, NULL); in hugepagesz_setup() 3340 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); in hugepagesz_setup() 3355 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); in hugepagesz_setup() 3379 static int __init default_hugepagesz_setup(char *s) in default_hugepagesz_setup() argument 3385 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); in default_hugepagesz_setup() [all …]
|
D | Kconfig.debug | 57 help to find bare alloc_page(s) leaks. Even if you include this 145 once the kernel has booted up - it's a one time check.
|
D | mempolicy.c | 1160 int s,d; in do_migrate_pages() local 1164 for_each_node_mask(s, tmp) { in do_migrate_pages() 1182 (node_isset(s, *to))) in do_migrate_pages() 1185 d = node_remap(s, *from, *to); in do_migrate_pages() 1186 if (s == d) in do_migrate_pages() 1189 source = s; /* Node moved. Memorize */ in do_migrate_pages()
|
D | page_alloc.c | 5931 static int __parse_numa_zonelist_order(char *s) in __parse_numa_zonelist_order() argument 5939 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { in __parse_numa_zonelist_order() 5940 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); in __parse_numa_zonelist_order() 7888 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) in free_reserved_area() argument 7918 if (pages && s) in free_reserved_area() 7920 s, pages << (PAGE_SHIFT - 10)); in free_reserved_area()
|
D | z3fold.c | 228 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s) in slots_to_pool() argument 230 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); in slots_to_pool()
|
D | vmalloc.c | 1758 unsigned long s, e; in _vm_unmap_aliases() local 1760 s = va_start + (vb->dirty_min << PAGE_SHIFT); in _vm_unmap_aliases() 1763 start = min(s, start); in _vm_unmap_aliases()
|
/mm/damon/ |
D | core.c | 121 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) in damon_add_scheme() argument 123 list_add_tail(&s->list, &ctx->schemes); in damon_add_scheme() 126 static void damon_del_scheme(struct damos *s) in damon_del_scheme() argument 128 list_del(&s->list); in damon_del_scheme() 131 static void damon_free_scheme(struct damos *s) in damon_free_scheme() argument 133 kfree(s); in damon_free_scheme() 136 void damon_destroy_scheme(struct damos *s) in damon_destroy_scheme() argument 138 damon_del_scheme(s); in damon_destroy_scheme() 139 damon_free_scheme(s); in damon_destroy_scheme() 239 struct damos *s, *next_s; in damon_destroy_ctx() local [all …]
|
D | dbgfs.c | 70 unsigned long s, a, r, minr, maxr; in dbgfs_attrs_write() local 79 &s, &a, &r, &minr, &maxr) != 5) { in dbgfs_attrs_write() 90 ret = damon_set_attrs(ctx, s, a, r, minr, maxr); in dbgfs_attrs_write() 102 struct damos *s; in sprint_schemes() local 106 damon_for_each_scheme(s, c) { in sprint_schemes() 109 s->min_sz_region, s->max_sz_region, in sprint_schemes() 110 s->min_nr_accesses, s->max_nr_accesses, in sprint_schemes() 111 s->min_age_region, s->max_age_region, in sprint_schemes() 112 s->action, in sprint_schemes() 113 s->quota.ms, s->quota.sz, in sprint_schemes() [all …]
|
D | reclaim.c | 391 struct damos *s; in damon_reclaim_after_aggregation() local 394 damon_for_each_scheme(s, c) { in damon_reclaim_after_aggregation() 395 nr_reclaim_tried_regions = s->stat.nr_tried; in damon_reclaim_after_aggregation() 396 bytes_reclaim_tried_regions = s->stat.sz_tried; in damon_reclaim_after_aggregation() 397 nr_reclaimed_regions = s->stat.nr_applied; in damon_reclaim_after_aggregation() 398 bytes_reclaimed_regions = s->stat.sz_applied; in damon_reclaim_after_aggregation() 399 nr_quota_exceeds = s->stat.qt_exceeds; in damon_reclaim_after_aggregation()
|
D | prmtv-common.c | 93 struct damos *s) in damon_pageout_score() argument 99 unsigned int freq_weight = s->quota.weight_nr_accesses; in damon_pageout_score() 100 unsigned int age_weight = s->quota.weight_age; in damon_pageout_score()
|
D | prmtv-common.h | 16 struct damos *s);
|
/mm/kfence/ |
D | core.c | 676 void kfence_shutdown_cache(struct kmem_cache *s) in kfence_shutdown_cache() argument 694 if (READ_ONCE(meta->cache) != s || in kfence_shutdown_cache() 699 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED; in kfence_shutdown_cache() 725 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED) in kfence_shutdown_cache() 729 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED) in kfence_shutdown_cache() 735 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) in __kfence_alloc() argument 750 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) in __kfence_alloc() 777 return kfence_guarded_alloc(s, size, flags); in __kfence_alloc()
|
D | kfence_test.c | 270 …struct kmem_cache *s = test_cache ?: kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)]; in test_alloc() local 277 KUNIT_EXPECT_EQ(test, obj_to_index(s, page, alloc), 0U); in test_alloc() 278 KUNIT_EXPECT_EQ(test, objs_per_slab_page(s, page), 1); in test_alloc()
|