/kernel/linux/linux-5.10/mm/ |
D | memcontrol.c | 136 struct mem_cgroup *memcg; member 150 int (*register_event)(struct mem_cgroup *memcg, 157 void (*unregister_event)(struct mem_cgroup *memcg, 169 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 170 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 241 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument 243 if (!memcg) in memcg_to_vmpressure() 244 memcg = root_mem_cgroup; in memcg_to_vmpressure() 245 return &memcg->vmpressure; in memcg_to_vmpressure() 259 struct mem_cgroup *memcg; in obj_cgroup_release() local [all …]
|
D | memcg_control.c | 36 struct mem_cgroup *memcg = NULL; in get_next_memcg() local 56 memcg = list_entry(pos->next, in get_next_memcg() 59 if (!css_tryget(&memcg->css)) in get_next_memcg() 60 memcg = NULL; in get_next_memcg() 68 return memcg; in get_next_memcg() 71 void get_next_memcg_break(struct mem_cgroup *memcg) in get_next_memcg_break() argument 73 if (memcg) in get_next_memcg_break() 74 css_put(&memcg->css); in get_next_memcg_break() 79 struct mem_cgroup *memcg = NULL; in get_prev_memcg() local 99 memcg = list_entry(pos->prev, in get_prev_memcg() [all …]
|
D | zswapd.c | 70 u64 memcg_data_size(struct mem_cgroup *memcg, int type) in memcg_data_size() argument 77 size += gsdev->ops->group_data_size(memcg->id.id, type, gsdev->priv); in memcg_data_size() 83 u64 swapin_memcg(struct mem_cgroup *memcg, u64 req_size) in swapin_memcg() argument 85 u64 swap_size = memcg_data_size(memcg, SWAP_SIZE); in swapin_memcg() 87 u64 ratio = atomic64_read(&memcg->memcg_reclaimed.ub_ufs2zram_ratio); in swapin_memcg() 94 read_size += gsdev->ops->group_read(memcg->id.id, req_size - read_size, in swapin_memcg() 104 static u64 swapout_memcg(struct mem_cgroup *memcg, u64 req_size) in swapout_memcg() argument 106 u64 cache_size = memcg_data_size(memcg, CACHE_SIZE); in swapout_memcg() 107 u64 swap_size = memcg_data_size(memcg, SWAP_SIZE); in swapout_memcg() 110 u32 ratio = atomic_read(&memcg->memcg_reclaimed.ub_zram2ufs_ratio); in swapout_memcg() [all …]
|
D | vmpressure.c | 78 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in vmpressure_parent() local 80 memcg = parent_mem_cgroup(memcg); in vmpressure_parent() 81 if (!memcg) in vmpressure_parent() 83 return memcg_to_vmpressure(memcg); in vmpressure_parent() 240 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 243 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure() 283 if (!memcg || mem_cgroup_is_root(memcg)) in vmpressure() 307 memcg->socket_pressure = jiffies + HZ; in vmpressure() 323 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument 339 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio() [all …]
|
D | zswapd_control.c | 315 struct mem_cgroup *memcg = NULL; in zswapd_memcgs_param_parse() local 319 while ((memcg = get_next_memcg(memcg))) { in zswapd_memcgs_param_parse() 320 score = atomic64_read(&memcg->memcg_reclaimed.app_score); in zswapd_memcgs_param_parse() 326 atomic_set(&memcg->memcg_reclaimed.ub_mem2zram_ratio, in zswapd_memcgs_param_parse() 328 atomic_set(&memcg->memcg_reclaimed.ub_zram2ufs_ratio, in zswapd_memcgs_param_parse() 330 atomic_set(&memcg->memcg_reclaimed.refault_threshold, in zswapd_memcgs_param_parse() 409 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in zswapd_single_memcg_param_write() local 424 atomic_set(&memcg->memcg_reclaimed.ub_mem2zram_ratio, in zswapd_single_memcg_param_write() 426 atomic_set(&memcg->memcg_reclaimed.ub_zram2ufs_ratio, in zswapd_single_memcg_param_write() 428 atomic_set(&memcg->memcg_reclaimed.refault_threshold, in zswapd_single_memcg_param_write() [all …]
|
D | memcg_reclaim.c | 187 struct mem_cgroup *memcg, struct scan_control *sc, in shrink_anon_memcg() argument 190 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in shrink_anon_memcg() 238 struct mem_cgroup *memcg = NULL; in shrink_anon() local 246 while ((memcg = get_next_memcg(memcg))) { in shrink_anon() 249 if (!memcg_is_child_of(memcg, target_memcg)) in shrink_anon() 252 lruvec = mem_cgroup_lruvec(memcg, pgdat); in shrink_anon() 274 mem_cgroup_calculate_protection(target_memcg, memcg); in shrink_anon() 276 if (mem_cgroup_below_min(memcg)) { in shrink_anon() 282 } else if (mem_cgroup_below_low(memcg)) { in shrink_anon() 293 memcg_memory_event(memcg, MEMCG_LOW); in shrink_anon() [all …]
|
D | list_lru.c | 65 struct mem_cgroup *memcg = NULL; in list_lru_from_kmem() local 70 memcg = mem_cgroup_from_obj(ptr); in list_lru_from_kmem() 71 if (!memcg) in list_lru_from_kmem() 74 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_from_kmem() 77 *memcg_ptr = memcg; in list_lru_from_kmem() 119 struct mem_cgroup *memcg; in list_lru_add() local 124 l = list_lru_from_kmem(nlru, item, &memcg); in list_lru_add() 128 memcg_set_shrinker_bit(memcg, nid, in list_lru_add() 175 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument 182 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_count_one() [all …]
|
D | workingset.c | 299 struct mem_cgroup *memcg; in workingset_refault() local 365 memcg = page_memcg(page); in workingset_refault() 366 lruvec = mem_cgroup_lruvec(memcg, pgdat); in workingset_refault() 400 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { in workingset_refault() 452 struct mem_cgroup *memcg; in workingset_activation() local 463 memcg = page_memcg_rcu(page); in workingset_activation() 464 if (!mem_cgroup_disabled() && !memcg) in workingset_activation() 553 if (sc->memcg) { in count_shadow_nodes() 557 lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); in count_shadow_nodes()
|
D | vmscan.c | 474 struct mem_cgroup *memcg, int priority) in shrink_slab_memcg() argument 480 if (!mem_cgroup_online(memcg)) in shrink_slab_memcg() 486 map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map, in shrink_slab_memcg() 495 .memcg = memcg, in shrink_slab_memcg() 534 memcg_set_shrinker_bit(memcg, nid, i); in shrink_slab_memcg() 549 struct mem_cgroup *memcg, int priority) in shrink_slab_memcg() argument 576 struct mem_cgroup *memcg, in shrink_slab() argument 589 if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg)) in shrink_slab() 590 return shrink_slab_memcg(gfp_mask, nid, memcg, priority); in shrink_slab() 599 .memcg = memcg, in shrink_slab() [all …]
|
D | zswapd_internal.h | 38 u64 memcg_data_size(struct mem_cgroup *memcg, int type); 39 u64 swapin_memcg(struct mem_cgroup *memcg, u64 req_size);
|
/kernel/linux/linux-5.10/include/linux/ |
D | memcontrol.h | 133 struct mem_cgroup *memcg; /* Back pointer, we cannot */ member 203 struct mem_cgroup *memcg; member 367 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument 369 return (memcg == root_mem_cgroup); in mem_cgroup_is_root() 378 struct mem_cgroup *memcg, in mem_cgroup_protection() argument 420 if (root == memcg) in mem_cgroup_protection() 423 *min = READ_ONCE(memcg->memory.emin); in mem_cgroup_protection() 424 *low = READ_ONCE(memcg->memory.elow); in mem_cgroup_protection() 428 struct mem_cgroup *memcg); 430 static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) in mem_cgroup_supports_protection() argument [all …]
|
D | vmpressure.h | 33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, 35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); 39 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); 41 extern int vmpressure_register_event(struct mem_cgroup *memcg, 44 extern void vmpressure_unregister_event(struct mem_cgroup *memcg, 47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
|
D | list_lru.h | 117 int nid, struct mem_cgroup *memcg); 123 return list_lru_count_one(lru, sc->nid, sc->memcg); in list_lru_shrink_count() 167 int nid, struct mem_cgroup *memcg, 184 int nid, struct mem_cgroup *memcg, 195 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk() 203 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk_irq()
|
D | memcg_policy.h | 22 struct mem_cgroup *memcg, struct scan_control *sc, 29 void get_next_memcg_break(struct mem_cgroup *memcg);
|
D | swap.h | 362 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 391 extern unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, 675 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) in mem_cgroup_swappiness() argument 682 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) in mem_cgroup_swappiness() 685 return memcg->swappiness; in mem_cgroup_swappiness() 706 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); 724 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
|
/kernel/linux/linux-5.10/tools/testing/selftests/cgroup/ |
D | test_memcontrol.c | 161 char *memcg; in test_memcg_current() local 163 memcg = cg_name(root, "memcg_test"); in test_memcg_current() 164 if (!memcg) in test_memcg_current() 167 if (cg_create(memcg)) in test_memcg_current() 170 current = cg_read_long(memcg, "memory.current"); in test_memcg_current() 174 if (cg_run(memcg, alloc_anon_50M_check, NULL)) in test_memcg_current() 177 if (cg_run(memcg, alloc_pagecache_50M_check, NULL)) in test_memcg_current() 183 cg_destroy(memcg); in test_memcg_current() 184 free(memcg); in test_memcg_current() 590 char *memcg; in test_memcg_high() local [all …]
|
/kernel/linux/build/test/moduletest/runtest/bin/enhancedswap_t/testcases/bin/ |
D | enhancedswap05.sh | 33 local memcg_100_stat=/dev/memcg/100/memory.stat 34 local memcg_eswap=/dev/memcg/memory.eswap_info 35 local avail_buffers=/dev/memcg/memory.avail_buffers 36 local zswapd_s=/dev/memcg/memory.zswapd_pressure_show 44 echo 30 > /dev/memcg/memory.zram_wm_ratio 45 echo 60 10 50 > /dev/memcg/memory.zswapd_single_memcg_param
|
D | enhancedswap04.sh | 33 local memcg_100_stat=/dev/memcg/100/memory.stat 34 local memcg_stat=/dev/memcg/memory.stat 35 avail_buffers=/dev/memcg/memory.avail_buffers 36 local zswapd_s=/dev/memcg/memory.zswapd_pressure_show
|
D | enhancedswap02.sh | 35 local memcg_100_zsmp=/dev/memcg/100/memory.zswapd_single_memcg_param 36 local memcg_zsmp=/dev/memcg/memory.zswapd_single_memcg_param
|
/kernel/linux/build/test/moduletest/runtest/bin/mem_debug_t/testcases/bin/ |
D | mem_debug04.sh | 29 avail_buffers=/dev/memcg/memory.avail_buffers 31 local zswapd_s=/dev/memcg/memory.zswapd_pressure_show 34 avail_buffers_def=$(cat /dev/memcg/memory.avail_buffers | awk '$1=="avail_buffers:"{print $2}') 35 …min_avail_buffers_def=$(cat /dev/memcg/memory.avail_buffers | awk '$1=="min_avail_buffers:"{print … 36 …high_avail_buffers_def=$(cat /dev/memcg/memory.avail_buffers | awk '$1=="high_avail_buffers:"{prin… 37 …free_swap_threshold_def=$(cat /dev/memcg/memory.avail_buffers | awk '$1=="free_swap_threshold:"{pr…
|
D | mem_debug05.sh | 29 avail_buffers=/dev/memcg/memory.avail_buffers 31 local zswapd_s=/dev/memcg/memory.zswapd_pressure_show 34 avail_buffers_def=$(cat /dev/memcg/memory.avail_buffers | awk '$1=="avail_buffers:"{print $2}') 35 …min_avail_buffers_def=$(cat /dev/memcg/memory.avail_buffers | awk '$1=="min_avail_buffers:"{print … 36 …high_avail_buffers_def=$(cat /dev/memcg/memory.avail_buffers | awk '$1=="high_avail_buffers:"{prin… 37 …free_swap_threshold_def=$(cat /dev/memcg/memory.avail_buffers | awk '$1=="free_swap_threshold:"{pr…
|
/kernel/linux/linux-5.10/tools/cgroup/ |
D | memcg_slabinfo.py | 42 memcg = container_of(css, 'struct mem_cgroup', 'css') 43 MEMCGS[css.cgroup.kn.id.value_()] = memcg 171 memcg = MEMCGS[cgroup_id] 187 obj_cgroups.add(memcg.objcg.value_()) 189 memcg.objcg_list.address_of_(), 221 memcg.kmem_caches.address_of_(),
|
/kernel/linux/linux-5.10/Documentation/admin-guide/cgroup-v1/ |
D | memcg_test.rst | 9 Because VM is getting complex (one of reasons is memcg...), memcg's behavior 10 is complex. This is a document for memcg's internal behavior. 61 At commit(), the page is associated with the memcg. 114 But brief explanation of the behavior of memcg around shmem will be 136 Each memcg has its own private LRU. Now, its handling is under global 138 Almost all routines around memcg's LRU is called by global LRU's 142 memcg's private LRU and call __isolate_lru_page() to extract a page 154 9.1 Small limit to memcg. 157 When you do test to do racy case, it's good test to set memcg's limit 167 Historically, memcg's shmem handling was poor and we saw some amount [all …]
|
/kernel/linux/linux-5.10/include/linux/sched/ |
D | mm.h | 304 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument 310 this_cpu_write(int_active_memcg, memcg); in set_active_memcg() 313 current->active_memcg = memcg; in set_active_memcg() 320 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
|
/kernel/linux/build/test/moduletest/runtest/bin/memorycontrol_t/testcases/bin/ |
D | memory_control03.sh | 34 echo "test_cgroup" > /dev/memcg/memory.name 35 mem=$(cat /dev/memcg/memory.name)
|