Home
last modified time | relevance | path

Searched refs:s (Results 1 – 17 of 17) sorted by relevance

/mm/
Dslub.c117 static inline int kmem_cache_debug(struct kmem_cache *s) in kmem_cache_debug() argument
120 return unlikely(s->flags & SLAB_DEBUG_FLAGS); in kmem_cache_debug()
126 static inline void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument
128 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) in fixup_red_left()
129 p += s->red_left_pad; in fixup_red_left()
134 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) in kmem_cache_has_cpu_partial() argument
137 return !kmem_cache_debug(s); in kmem_cache_has_cpu_partial()
211 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
213 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } in sysfs_slab_add() argument
214 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument
[all …]
Dslab_common.c64 unsigned int kmem_cache_size(struct kmem_cache *s) in kmem_cache_size() argument
66 return s->object_size; in kmem_cache_size()
73 struct kmem_cache *s = NULL; in kmem_cache_sanity_check() local
81 list_for_each_entry(s, &slab_caches, list) { in kmem_cache_sanity_check()
90 res = probe_kernel_address(s->name, tmp); in kmem_cache_sanity_check()
93 s->object_size); in kmem_cache_sanity_check()
110 struct kmem_cache *s, struct kmem_cache *root_cache) in memcg_alloc_cache_params() argument
123 s->memcg_params = kzalloc(size, GFP_KERNEL); in memcg_alloc_cache_params()
124 if (!s->memcg_params) in memcg_alloc_cache_params()
128 s->memcg_params->memcg = memcg; in memcg_alloc_cache_params()
[all …]
Dslab.h91 int slab_unmergeable(struct kmem_cache *s);
162 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
163 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
168 static inline bool is_root_cache(struct kmem_cache *s) in is_root_cache() argument
170 return !s->memcg_params || s->memcg_params->is_root_cache; in is_root_cache()
173 static inline bool slab_equal_or_root(struct kmem_cache *s, in slab_equal_or_root() argument
176 return (p == s) || in slab_equal_or_root()
177 (s->memcg_params && (p == s->memcg_params->root_cache)); in slab_equal_or_root()
185 static inline const char *cache_name(struct kmem_cache *s) in cache_name() argument
187 if (!is_root_cache(s)) in cache_name()
[all …]
Dutil.c24 char *kstrdup(const char *s, gfp_t gfp) in kstrdup() argument
29 if (!s) in kstrdup()
32 len = strlen(s) + 1; in kstrdup()
35 memcpy(buf, s, len); in kstrdup()
46 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument
51 if (!s) in kstrndup()
54 len = strnlen(s, max); in kstrndup()
57 memcpy(buf, s, len); in kstrndup()
117 char *strndup_user(const char __user *s, long n) in strndup_user() argument
122 length = strnlen_user(s, n); in strndup_user()
[all …]
Dslob.c145 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument
147 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); in set_slob()
151 s[0].units = size; in set_slob()
152 s[1].units = offset; in set_slob()
154 s[0].units = -offset; in set_slob()
160 static slobidx_t slob_units(slob_t *s) in slob_units() argument
162 if (s->units > 0) in slob_units()
163 return s->units; in slob_units()
170 static slob_t *slob_next(slob_t *s) in slob_next() argument
172 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); in slob_next()
[all …]
Dkmemcheck.c60 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, in kmemcheck_slab_alloc() argument
71 if (s->flags & SLAB_NOTRACK) in kmemcheck_slab_alloc()
84 } else if (!s->ctor) { in kmemcheck_slab_alloc()
93 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) in kmemcheck_slab_free() argument
96 if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) in kmemcheck_slab_free()
Dhugetlb_cgroup.c36 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s) in hugetlb_cgroup_from_css() argument
38 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL; in hugetlb_cgroup_from_css()
Dzsmalloc.c484 static int zs_stats_size_show(struct seq_file *s, void *v) in zs_stats_size_show() argument
487 struct zs_pool *pool = s->private; in zs_stats_size_show()
495 seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n", in zs_stats_size_show()
518 seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n", in zs_stats_size_show()
530 seq_puts(s, "\n"); in zs_stats_size_show()
531 seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n", in zs_stats_size_show()
Dpage_alloc.c3450 static int __parse_numa_zonelist_order(char *s) in __parse_numa_zonelist_order() argument
3452 if (*s == 'd' || *s == 'D') { in __parse_numa_zonelist_order()
3454 } else if (*s == 'n' || *s == 'N') { in __parse_numa_zonelist_order()
3456 } else if (*s == 'z' || *s == 'Z') { in __parse_numa_zonelist_order()
3461 "%s\n", s); in __parse_numa_zonelist_order()
3467 static __init int setup_numa_zonelist_order(char *s) in setup_numa_zonelist_order() argument
3471 if (!s) in setup_numa_zonelist_order()
3474 ret = __parse_numa_zonelist_order(s); in setup_numa_zonelist_order()
3476 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); in setup_numa_zonelist_order()
5476 unsigned long free_reserved_area(void *start, void *end, int poison, char *s) in free_reserved_area() argument
[all …]
DKconfig81 # Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
219 # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
220 # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
334 of an application's address space that an app has advised may be
460 for clean pages that the kernel's pageframe replacement algorithm
536 soft-dirty bit on pte-s. This bit it set when someone writes
Dmempolicy.c1082 int s,d; in do_migrate_pages() local
1086 for_each_node_mask(s, tmp) { in do_migrate_pages()
1104 (node_isset(s, *to))) in do_migrate_pages()
1107 d = node_remap(s, *from, *to); in do_migrate_pages()
1108 if (s == d) in do_migrate_pages()
1111 source = s; /* Node moved. Memorize */ in do_migrate_pages()
Dmemcontrol.c490 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s) in mem_cgroup_from_css() argument
492 return s ? container_of(s, struct mem_cgroup, css) : NULL; in mem_cgroup_from_css()
3026 int __memcg_cleanup_cache_params(struct kmem_cache *s) in __memcg_cleanup_cache_params() argument
3033 c = cache_from_memcg_idx(s, i); in __memcg_cleanup_cache_params()
3039 if (cache_from_memcg_idx(s, i)) in __memcg_cleanup_cache_params()
6214 static int __init enable_swap_account(char *s) in enable_swap_account() argument
6216 if (!strcmp(s, "1")) in enable_swap_account()
6218 else if (!strcmp(s, "0")) in enable_swap_account()
Dvmalloc.c1032 unsigned long s, e; in vm_unmap_aliases() local
1038 s = vb->va->va_start + (i << PAGE_SHIFT); in vm_unmap_aliases()
1042 if (s < start) in vm_unmap_aliases()
1043 start = s; in vm_unmap_aliases()
Dhugetlb.c2232 static int __init hugetlb_nrpages_setup(char *s) in hugetlb_nrpages_setup() argument
2252 if (sscanf(s, "%lu", mhp) <= 0) in hugetlb_nrpages_setup()
2269 static int __init hugetlb_default_setup(char *s) in hugetlb_default_setup() argument
2271 default_hstate_size = memparse(s, &s); in hugetlb_default_setup()
Dvmscan.c192 static int debug_shrinker_show(struct seq_file *s, void *unused) in debug_shrinker_show() argument
205 seq_printf(s, "%pf %d\n", shrinker->scan_objects, num_objs); in debug_shrinker_show()
Dmemory.c111 static int __init disable_randmaps(char *s) in disable_randmaps() argument
Dslab.c583 static int __init noaliencache_setup(char *s) in noaliencache_setup() argument