Home
last modified time | relevance | path

Searched refs:s (Results 1 – 19 of 19) sorted by relevance

/mm/
Dslub.c119 static inline int kmem_cache_debug(struct kmem_cache *s) in kmem_cache_debug() argument
122 return unlikely(s->flags & SLAB_DEBUG_FLAGS); in kmem_cache_debug()
128 void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument
130 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) in fixup_red_left()
131 p += s->red_left_pad; in fixup_red_left()
136 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) in kmem_cache_has_cpu_partial() argument
139 return !kmem_cache_debug(s); in kmem_cache_has_cpu_partial()
219 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
220 static void sysfs_slab_remove(struct kmem_cache *s);
222 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } in sysfs_slab_add() argument
[all …]
Dslab_common.c79 unsigned int kmem_cache_size(struct kmem_cache *s) in kmem_cache_size() argument
81 return s->object_size; in kmem_cache_size()
104 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) in __kmem_cache_free_bulk() argument
109 if (s) in __kmem_cache_free_bulk()
110 kmem_cache_free(s, p[i]); in __kmem_cache_free_bulk()
116 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, in __kmem_cache_alloc_bulk() argument
122 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk()
124 __kmem_cache_free_bulk(s, i, p); in __kmem_cache_alloc_bulk()
138 void slab_init_memcg_params(struct kmem_cache *s) in slab_init_memcg_params() argument
140 s->memcg_params.root_cache = NULL; in slab_init_memcg_params()
[all …]
Dslab.h166 int slab_unmergeable(struct kmem_cache *s);
237 void __kmemcg_cache_deactivate(struct kmem_cache *s);
238 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
240 void kmem_cache_shrink_all(struct kmem_cache *s);
258 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
259 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
272 static inline int cache_vmstat_idx(struct kmem_cache *s) in cache_vmstat_idx() argument
274 return (s->flags & SLAB_RECLAIM_ACCOUNT) ? in cache_vmstat_idx()
292 static inline bool is_root_cache(struct kmem_cache *s) in is_root_cache() argument
294 return !s->memcg_params.root_cache; in is_root_cache()
[all …]
Dslob.c146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument
148 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); in set_slob()
152 s[0].units = size; in set_slob()
153 s[1].units = offset; in set_slob()
155 s[0].units = -offset; in set_slob()
161 static slobidx_t slob_units(slob_t *s) in slob_units() argument
163 if (s->units > 0) in slob_units()
164 return s->units; in slob_units()
171 static slob_t *slob_next(slob_t *s) in slob_next() argument
173 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); in slob_next()
[all …]
Dutil.c51 char *kstrdup(const char *s, gfp_t gfp) in kstrdup() argument
56 if (!s) in kstrdup()
59 len = strlen(s) + 1; in kstrdup()
62 memcpy(buf, s, len); in kstrdup()
77 const char *kstrdup_const(const char *s, gfp_t gfp) in kstrdup_const() argument
79 if (is_kernel_rodata((unsigned long)s)) in kstrdup_const()
80 return s; in kstrdup_const()
82 return kstrdup(s, gfp); in kstrdup_const()
96 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument
101 if (!s) in kstrndup()
[all …]
Dfailslab.c17 bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) in __should_failslab() argument
20 if (unlikely(s == kmem_cache)) in __should_failslab()
30 if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB)) in __should_failslab()
33 return should_fail(&failslab.attr, s->object_size); in __should_failslab()
Dzswap.c670 char *s = strstrip((char *)val); in __zswap_param_set() local
679 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) in __zswap_param_set()
686 return param_set_charp(s, kp); in __zswap_param_set()
689 if (!zpool_has_pool(s)) { in __zswap_param_set()
690 pr_err("zpool %s not available\n", s); in __zswap_param_set()
693 type = s; in __zswap_param_set()
695 if (!crypto_has_comp(s, 0, 0)) { in __zswap_param_set()
696 pr_err("compressor %s not available\n", s); in __zswap_param_set()
699 compressor = s; in __zswap_param_set()
720 ret = param_set_charp(s, kp); in __zswap_param_set()
[all …]
Dslab.c457 static int __init noaliencache_setup(char *s) in noaliencache_setup() argument
2215 bool __kmem_cache_empty(struct kmem_cache *s) in __kmem_cache_empty() argument
2220 for_each_kmem_cache_node(s, node, n) in __kmem_cache_empty()
2251 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s) in __kmemcg_cache_deactivate_after_rcu() argument
3493 cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags, in cache_alloc_debugcheck_after_bulk() argument
3499 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller); in cache_alloc_debugcheck_after_bulk()
3502 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
3507 s = slab_pre_alloc_hook(s, flags); in kmem_cache_alloc_bulk()
3508 if (!s) in kmem_cache_alloc_bulk()
3511 cache_alloc_debugcheck_before(s, flags); in kmem_cache_alloc_bulk()
[all …]
Dmemcontrol.c1382 struct seq_buf s; in memory_stat_format() local
1385 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); in memory_stat_format()
1386 if (!s.buffer) in memory_stat_format()
1400 seq_buf_printf(&s, "anon %llu\n", in memory_stat_format()
1403 seq_buf_printf(&s, "file %llu\n", in memory_stat_format()
1406 seq_buf_printf(&s, "kernel_stack %llu\n", in memory_stat_format()
1409 seq_buf_printf(&s, "slab %llu\n", in memory_stat_format()
1413 seq_buf_printf(&s, "sock %llu\n", in memory_stat_format()
1417 seq_buf_printf(&s, "shmem %llu\n", in memory_stat_format()
1420 seq_buf_printf(&s, "file_mapped %llu\n", in memory_stat_format()
[all …]
Dhugetlb_cgroup.c37 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s) in hugetlb_cgroup_from_css() argument
39 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL; in hugetlb_cgroup_from_css()
Dzsmalloc.c590 static int zs_stats_size_show(struct seq_file *s, void *v) in zs_stats_size_show() argument
593 struct zs_pool *pool = s->private; in zs_stats_size_show()
602 seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n", in zs_stats_size_show()
625 seq_printf(s, " %5u %5u %11lu %12lu %13lu" in zs_stats_size_show()
639 seq_puts(s, "\n"); in zs_stats_size_show()
640 seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n", in zs_stats_size_show()
DKconfig83 # Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
188 # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
189 # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
294 of an application's address space that an app has advised may be
438 for clean pages that the kernel's pageframe replacement algorithm
519 soft-dirty bit on pte-s. This bit it set when someone writes
Dmempolicy.c1106 int s,d; in do_migrate_pages() local
1110 for_each_node_mask(s, tmp) { in do_migrate_pages()
1128 (node_isset(s, *to))) in do_migrate_pages()
1131 d = node_remap(s, *from, *to); in do_migrate_pages()
1132 if (s == d) in do_migrate_pages()
1135 source = s; /* Node moved. Memorize */ in do_migrate_pages()
Dpage_alloc.c5478 static int __parse_numa_zonelist_order(char *s) in __parse_numa_zonelist_order() argument
5486 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { in __parse_numa_zonelist_order()
5487 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); in __parse_numa_zonelist_order()
5493 static __init int setup_numa_zonelist_order(char *s) in setup_numa_zonelist_order() argument
5495 if (!s) in setup_numa_zonelist_order()
5498 return __parse_numa_zonelist_order(s); in setup_numa_zonelist_order()
7492 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) in free_reserved_area() argument
7517 if (pages && s) in free_reserved_area()
7519 s, pages << (PAGE_SHIFT - 10)); in free_reserved_area()
DKconfig.debug57 help to find bare alloc_page(s) leaks. Even if you include this
Dhugetlb.c3024 static int __init hugetlb_nrpages_setup(char *s) in hugetlb_nrpages_setup() argument
3031 "an unsupported hugepagesz, ignoring\n", s); in hugetlb_nrpages_setup()
3049 if (sscanf(s, "%lu", mhp) <= 0) in hugetlb_nrpages_setup()
3066 static int __init hugetlb_default_setup(char *s) in hugetlb_default_setup() argument
3068 default_hstate_size = memparse(s, &s); in hugetlb_default_setup()
Dz3fold.c212 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s) in slots_to_pool() argument
214 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); in slots_to_pool()
Dvmalloc.c1687 unsigned long s, e; in _vm_unmap_aliases() local
1689 s = va_start + (vb->dirty_min << PAGE_SHIFT); in _vm_unmap_aliases()
1692 start = min(s, start); in _vm_unmap_aliases()
Dmemory.c123 static int __init disable_randmaps(char *s) in disable_randmaps() argument