Home
last modified time | relevance | path

Searched refs:s (Results 1 – 17 of 17) sorted by relevance

/mm/
Dslub.c118 static inline int kmem_cache_debug(struct kmem_cache *s) in kmem_cache_debug() argument
121 return unlikely(s->flags & SLAB_DEBUG_FLAGS); in kmem_cache_debug()
127 static inline void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument
129 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) in fixup_red_left()
130 p += s->red_left_pad; in fixup_red_left()
135 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) in kmem_cache_has_cpu_partial() argument
138 return !kmem_cache_debug(s); in kmem_cache_has_cpu_partial()
212 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
214 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } in sysfs_slab_add() argument
215 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument
[all …]
Dslab_common.c63 unsigned int kmem_cache_size(struct kmem_cache *s) in kmem_cache_size() argument
65 return s->object_size; in kmem_cache_size()
72 struct kmem_cache *s = NULL; in kmem_cache_sanity_check() local
80 list_for_each_entry(s, &slab_caches, list) { in kmem_cache_sanity_check()
89 res = probe_kernel_address(s->name, tmp); in kmem_cache_sanity_check()
92 s->object_size); in kmem_cache_sanity_check()
107 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) in __kmem_cache_free_bulk() argument
112 kmem_cache_free(s, p[i]); in __kmem_cache_free_bulk()
115 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, in __kmem_cache_alloc_bulk() argument
121 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk()
[all …]
Dslab.h90 int slab_unmergeable(struct kmem_cache *s);
161 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
162 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
184 static inline bool is_root_cache(struct kmem_cache *s) in is_root_cache() argument
186 return s->memcg_params.is_root_cache; in is_root_cache()
189 static inline bool slab_equal_or_root(struct kmem_cache *s, in slab_equal_or_root() argument
192 return p == s || p == s->memcg_params.root_cache; in slab_equal_or_root()
200 static inline const char *cache_name(struct kmem_cache *s) in cache_name() argument
202 if (!is_root_cache(s)) in cache_name()
203 s = s->memcg_params.root_cache; in cache_name()
[all …]
Dutil.c44 char *kstrdup(const char *s, gfp_t gfp) in kstrdup() argument
49 if (!s) in kstrdup()
52 len = strlen(s) + 1; in kstrdup()
55 memcpy(buf, s, len); in kstrdup()
69 const char *kstrdup_const(const char *s, gfp_t gfp) in kstrdup_const() argument
71 if (is_kernel_rodata((unsigned long)s)) in kstrdup_const()
72 return s; in kstrdup_const()
74 return kstrdup(s, gfp); in kstrdup_const()
86 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument
91 if (!s) in kstrndup()
[all …]
Dslob.c145 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument
147 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); in set_slob()
151 s[0].units = size; in set_slob()
152 s[1].units = offset; in set_slob()
154 s[0].units = -offset; in set_slob()
160 static slobidx_t slob_units(slob_t *s) in slob_units() argument
162 if (s->units > 0) in slob_units()
163 return s->units; in slob_units()
170 static slob_t *slob_next(slob_t *s) in slob_next() argument
172 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); in slob_next()
[all …]
Dkmemcheck.c59 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, in kmemcheck_slab_alloc() argument
70 if (s->flags & SLAB_NOTRACK) in kmemcheck_slab_alloc()
83 } else if (!s->ctor) { in kmemcheck_slab_alloc()
92 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) in kmemcheck_slab_free() argument
95 if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) in kmemcheck_slab_free()
Dzswap.c715 char *s = strstrip((char *)val); in __zswap_param_set() local
724 if (!strcmp(s, *(char **)kp->arg)) in __zswap_param_set()
731 return param_set_charp(s, kp); in __zswap_param_set()
734 if (!zpool_has_pool(s)) { in __zswap_param_set()
735 pr_err("zpool %s not available\n", s); in __zswap_param_set()
738 type = s; in __zswap_param_set()
740 if (!crypto_has_comp(s, 0, 0)) { in __zswap_param_set()
741 pr_err("compressor %s not available\n", s); in __zswap_param_set()
744 compressor = s; in __zswap_param_set()
765 ret = param_set_charp(s, kp); in __zswap_param_set()
Dhugetlb_cgroup.c37 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s) in hugetlb_cgroup_from_css() argument
39 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL; in hugetlb_cgroup_from_css()
Dzsmalloc.c498 static int zs_stats_size_show(struct seq_file *s, void *v) in zs_stats_size_show() argument
501 struct zs_pool *pool = s->private; in zs_stats_size_show()
509 seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n", in zs_stats_size_show()
532 seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n", in zs_stats_size_show()
544 seq_puts(s, "\n"); in zs_stats_size_show()
545 seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n", in zs_stats_size_show()
Dpage_alloc.c4035 static int __parse_numa_zonelist_order(char *s) in __parse_numa_zonelist_order() argument
4037 if (*s == 'd' || *s == 'D') { in __parse_numa_zonelist_order()
4039 } else if (*s == 'n' || *s == 'N') { in __parse_numa_zonelist_order()
4041 } else if (*s == 'z' || *s == 'Z') { in __parse_numa_zonelist_order()
4045 "Ignoring invalid numa_zonelist_order value: %s\n", s); in __parse_numa_zonelist_order()
4051 static __init int setup_numa_zonelist_order(char *s) in setup_numa_zonelist_order() argument
4055 if (!s) in setup_numa_zonelist_order()
4058 ret = __parse_numa_zonelist_order(s); in setup_numa_zonelist_order()
4060 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); in setup_numa_zonelist_order()
5918 unsigned long free_reserved_area(void *start, void *end, int poison, char *s) in free_reserved_area() argument
[all …]
DKconfig81 # Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
207 # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
208 # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
317 of an application's address space that an app has advised may be
444 for clean pages that the kernel's pageframe replacement algorithm
526 soft-dirty bit on pte-s. This bit it set when someone writes
Dmempolicy.c1034 int s,d; in do_migrate_pages() local
1038 for_each_node_mask(s, tmp) { in do_migrate_pages()
1056 (node_isset(s, *to))) in do_migrate_pages()
1059 d = node_remap(s, *from, *to); in do_migrate_pages()
1060 if (s == d) in do_migrate_pages()
1063 source = s; /* Node moved. Memorize */ in do_migrate_pages()
Dslab.c568 static int __init noaliencache_setup(char *s) in noaliencache_setup() argument
3438 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
3440 __kmem_cache_free_bulk(s, size, p); in kmem_cache_free_bulk()
3444 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
3447 return __kmem_cache_alloc_bulk(s, flags, size, p); in kmem_cache_alloc_bulk()
Dhugetlb.c2801 static int __init hugetlb_nrpages_setup(char *s) in hugetlb_nrpages_setup() argument
2821 if (sscanf(s, "%lu", mhp) <= 0) in hugetlb_nrpages_setup()
2838 static int __init hugetlb_default_setup(char *s) in hugetlb_default_setup() argument
2840 default_hstate_size = memparse(s, &s); in hugetlb_default_setup()
Dvmalloc.c1062 unsigned long s, e; in vm_unmap_aliases() local
1064 s = va_start + (vb->dirty_min << PAGE_SHIFT); in vm_unmap_aliases()
1067 start = min(s, start); in vm_unmap_aliases()
Dmemcontrol.c5871 static int __init enable_swap_account(char *s) in enable_swap_account() argument
5873 if (!strcmp(s, "1")) in enable_swap_account()
5875 else if (!strcmp(s, "0")) in enable_swap_account()
Dmemory.c112 static int __init disable_randmaps(char *s) in disable_randmaps() argument