Home
last modified time | relevance | path

Searched refs:c (Results 1 – 16 of 16) sorted by relevance

/mm/
Dpage_counter.c17 static void propagate_protected_usage(struct page_counter *c, in propagate_protected_usage() argument
24 if (!c->parent) in propagate_protected_usage()
27 min = READ_ONCE(c->min); in propagate_protected_usage()
28 if (min || atomic_long_read(&c->min_usage)) { in propagate_protected_usage()
30 old_protected = atomic_long_xchg(&c->min_usage, protected); in propagate_protected_usage()
33 atomic_long_add(delta, &c->parent->children_min_usage); in propagate_protected_usage()
36 low = READ_ONCE(c->low); in propagate_protected_usage()
37 if (low || atomic_long_read(&c->low_usage)) { in propagate_protected_usage()
39 old_protected = atomic_long_xchg(&c->low_usage, protected); in propagate_protected_usage()
42 atomic_long_add(delta, &c->parent->children_low_usage); in propagate_protected_usage()
[all …]
Dslob.c591 int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags) in __kmem_cache_create() argument
595 c->size += sizeof(struct slob_rcu); in __kmem_cache_create()
597 c->flags = flags; in __kmem_cache_create()
601 static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) in slob_alloc_node() argument
609 if (c->size < PAGE_SIZE) { in slob_alloc_node()
610 b = slob_alloc(c->size, flags, c->align, node, 0); in slob_alloc_node()
611 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, in slob_alloc_node()
612 SLOB_UNITS(c->size) * SLOB_UNIT, in slob_alloc_node()
615 b = slob_new_pages(flags, get_order(c->size), node); in slob_alloc_node()
616 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, in slob_alloc_node()
[all …]
Dslub.c2319 struct kmem_cache_cpu *c; in init_kmem_cache_cpus() local
2322 c = per_cpu_ptr(s->cpu_slab, cpu); in init_kmem_cache_cpus()
2323 local_lock_init(&c->lock); in init_kmem_cache_cpus()
2324 c->tid = init_tid(cpu); in init_kmem_cache_cpus()
2546 struct kmem_cache_cpu *c) in unfreeze_partials_cpu() argument
2550 partial_page = slub_percpu_partial(c); in unfreeze_partials_cpu()
2551 c->partial = NULL; in unfreeze_partials_cpu()
2612 struct kmem_cache_cpu *c) { } in unfreeze_partials_cpu() argument
2616 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) in flush_slab() argument
2624 page = c->page; in flush_slab()
[all …]
Dslab.h637 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) in slab_want_init_on_alloc() argument
641 if (c->ctor) in slab_want_init_on_alloc()
643 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) in slab_want_init_on_alloc()
650 static inline bool slab_want_init_on_free(struct kmem_cache *c) in slab_want_init_on_free() argument
654 return !(c->ctor || in slab_want_init_on_free()
655 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); in slab_want_init_on_free()
Dslab.c3787 struct kmem_cache *c; in kfree() local
3796 c = virt_to_cache(objp); in kfree()
3797 if (!c) { in kfree()
3801 debug_check_no_locks_freed(objp, c->object_size); in kfree()
3803 debug_check_no_obj_freed(objp, c->object_size); in kfree()
3804 __cache_free(c, (void *)objp, _RET_IP_); in kfree()
4240 struct kmem_cache *c; in __ksize() local
4247 c = virt_to_cache(objp); in __ksize()
4248 size = c ? c->object_size : 0; in __ksize()
Dgup.c1817 volatile char c; in fault_in_readable() local
1822 if (unlikely(__get_user(c, uaddr) != 0)) in fault_in_readable()
1830 if (unlikely(__get_user(c, uaddr) != 0)) in fault_in_readable()
1836 (void)c; in fault_in_readable()
DKconfig.debug60 a fair amount of memory if enabled. See tools/vm/page_owner_sort.c
Dswapfile.c291 unsigned int c) in cluster_set_count() argument
293 info->data = c; in cluster_set_count()
297 unsigned int c, unsigned int f) in cluster_set_count_flag() argument
300 info->data = c; in cluster_set_count_flag()
DKconfig859 See tools/testing/selftests/vm/gup_test.c
/mm/damon/
Dcore-test.h52 struct damon_ctx *c = damon_new_ctx(); in damon_test_target() local
57 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); in damon_test_target()
59 damon_add_target(c, t); in damon_test_target()
60 KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c)); in damon_test_target()
63 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); in damon_test_target()
65 damon_destroy_ctx(c); in damon_test_target()
121 struct damon_ctx *c = damon_new_ctx(); in damon_test_split_at() local
128 damon_split_region_at(c, t, r, 25); in damon_test_split_at()
137 damon_destroy_ctx(c); in damon_test_split_at()
214 struct damon_ctx *c = damon_new_ctx(); in damon_test_split_regions_of() local
[all …]
Dcore.c523 static void kdamond_reset_aggregated(struct damon_ctx *c) in kdamond_reset_aggregated() argument
528 damon_for_each_target(t, c) { in kdamond_reset_aggregated()
555 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, in damos_valid_target() argument
560 if (!ret || !s->quota.esz || !c->primitive.get_scheme_score) in damos_valid_target()
563 return c->primitive.get_scheme_score(c, t, r, s) >= s->quota.min_score; in damos_valid_target()
566 static void damon_do_apply_schemes(struct damon_ctx *c, in damon_do_apply_schemes() argument
572 damon_for_each_scheme(s, c) { in damon_do_apply_schemes()
608 damon_split_region_at(c, t, r, sz); in damon_do_apply_schemes()
616 if (!damos_valid_target(c, t, r, s)) in damon_do_apply_schemes()
620 if (c->primitive.apply_scheme) { in damon_do_apply_schemes()
[all …]
Dprmtv-common.c88 int damon_pageout_score(struct damon_ctx *c, struct damon_region *r, in damon_pageout_score() argument
99 max_nr_accesses = c->aggr_interval / c->sample_interval; in damon_pageout_score()
102 age_in_sec = (unsigned long)r->age * c->aggr_interval / 1000000; in damon_pageout_score()
Ddbgfs.c100 static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len) in sprint_schemes() argument
106 damon_for_each_scheme(s, c) { in sprint_schemes()
442 static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len) in sprint_init_regions() argument
449 damon_for_each_target(t, c) { in sprint_init_regions()
491 static int add_init_region(struct damon_ctx *c, in add_init_region() argument
502 damon_for_each_target(t, c) { in add_init_region()
504 if (targetid_is_pid(c)) in add_init_region()
524 static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len) in set_init_regions() argument
533 damon_for_each_target(t, c) { in set_init_regions()
543 err = add_init_region(c, target_id, &ar); in set_init_regions()
[all …]
Dreclaim.c389 static int damon_reclaim_after_aggregation(struct damon_ctx *c) in damon_reclaim_after_aggregation() argument
394 damon_for_each_scheme(s, c) { in damon_reclaim_after_aggregation()
Dprmtv-common.h15 int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
/mm/kasan/
Dshadow.c42 void *memset(void *addr, int c, size_t len) in memset() argument
47 return __memset(addr, c, len); in memset()