Home
last modified time | relevance | path

Searched refs:memcg (Results 1 – 17 of 17) sorted by relevance

/mm/
Dmemcontrol.c152 struct mem_cgroup *memcg; member
166 int (*register_event)(struct mem_cgroup *memcg,
173 void (*unregister_event)(struct mem_cgroup *memcg,
185 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
186 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
250 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument
252 if (!memcg) in memcg_to_vmpressure()
253 memcg = root_mem_cgroup; in memcg_to_vmpressure()
254 return &memcg->vmpressure; in memcg_to_vmpressure()
262 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument
[all …]
Dvmpressure.c81 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in vmpressure_parent() local
83 memcg = parent_mem_cgroup(memcg); in vmpressure_parent()
84 if (!memcg) in vmpressure_parent()
86 return memcg_to_vmpressure(memcg); in vmpressure_parent()
219 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure() argument
222 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure()
271 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument
287 vmpressure(gfp, memcg, vmpressure_win, 0); in vmpressure_prio()
304 int vmpressure_register_event(struct mem_cgroup *memcg, in vmpressure_register_event() argument
307 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure_register_event()
[all …]
Doom_kill.c132 struct mem_cgroup *memcg, const nodemask_t *nodemask) in oom_unkillable_task() argument
140 if (memcg && !task_in_mem_cgroup(p, memcg)) in oom_unkillable_task()
159 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, in oom_badness() argument
165 if (oom_unkillable_task(p, memcg, nodemask)) in oom_badness()
353 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) in dump_tasks() argument
361 if (oom_unkillable_task(p, memcg, nodemask)) in dump_tasks()
387 struct mem_cgroup *memcg) in dump_header() argument
394 if (memcg) in dump_header()
395 mem_cgroup_print_oom_info(memcg, p); in dump_header()
399 dump_tasks(memcg, oc->nodemask); in dump_header()
[all …]
Dslab_common.c139 struct mem_cgroup *memcg, struct kmem_cache *root_cache) in init_memcg_params() argument
143 if (memcg) { in init_memcg_params()
145 s->memcg_params.memcg = memcg; in init_memcg_params()
215 struct mem_cgroup *memcg, struct kmem_cache *root_cache) in init_memcg_params() argument
325 struct mem_cgroup *memcg, struct kmem_cache *root_cache) in create_cache() argument
341 err = init_memcg_params(s, memcg, root_cache); in create_cache()
495 void memcg_create_kmem_cache(struct mem_cgroup *memcg, in memcg_create_kmem_cache() argument
499 struct cgroup_subsys_state *css = &memcg->css; in memcg_create_kmem_cache()
514 if (!memcg_kmem_is_active(memcg)) in memcg_create_kmem_cache()
517 idx = memcg_cache_id(memcg); in memcg_create_kmem_cache()
[all …]
Dvmscan.c173 struct mem_cgroup *memcg = sc->target_mem_cgroup; in sane_reclaim() local
175 if (!memcg) in sane_reclaim()
418 struct mem_cgroup *memcg, in shrink_slab() argument
425 if (memcg && !memcg_kmem_is_active(memcg)) in shrink_slab()
446 .memcg = memcg, in shrink_slab()
449 if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE)) in shrink_slab()
469 struct mem_cgroup *memcg = NULL; in drop_slab_node() local
473 freed += shrink_slab(GFP_KERNEL, nid, memcg, in drop_slab_node()
475 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); in drop_slab_node()
624 struct mem_cgroup *memcg; in __remove_mapping() local
[all …]
Dpage-writeback.c2417 struct mem_cgroup *memcg) in account_page_dirtied() argument
2429 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); in account_page_dirtied()
2447 struct mem_cgroup *memcg, struct bdi_writeback *wb) in account_page_cleaned() argument
2450 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); in account_page_cleaned()
2471 struct mem_cgroup *memcg; in __set_page_dirty_nobuffers() local
2473 memcg = mem_cgroup_begin_page_stat(page); in __set_page_dirty_nobuffers()
2479 mem_cgroup_end_page_stat(memcg); in __set_page_dirty_nobuffers()
2486 account_page_dirtied(page, mapping, memcg); in __set_page_dirty_nobuffers()
2490 mem_cgroup_end_page_stat(memcg); in __set_page_dirty_nobuffers()
2498 mem_cgroup_end_page_stat(memcg); in __set_page_dirty_nobuffers()
[all …]
Drmap.c846 struct mem_cgroup *memcg; member
932 struct mem_cgroup *memcg = pra->memcg; in invalid_page_referenced_vma() local
934 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
952 struct mem_cgroup *memcg, in page_referenced() argument
959 .memcg = memcg, in page_referenced()
985 if (memcg) { in page_referenced()
1232 struct mem_cgroup *memcg; in page_add_file_rmap() local
1234 memcg = mem_cgroup_begin_page_stat(page); in page_add_file_rmap()
1237 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); in page_add_file_rmap()
1239 mem_cgroup_end_page_stat(memcg); in page_add_file_rmap()
[all …]
Dlist_lru.c75 struct mem_cgroup *memcg; in list_lru_from_kmem() local
80 memcg = mem_cgroup_from_kmem(ptr); in list_lru_from_kmem()
81 if (!memcg) in list_lru_from_kmem()
84 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_from_kmem()
176 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument
178 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg)); in list_lru_count_one()
253 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, in list_lru_walk_one() argument
257 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), in list_lru_walk_one()
Dhuge_memory.c719 struct mem_cgroup *memcg; in __do_huge_pmd_anonymous_page() local
726 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) { in __do_huge_pmd_anonymous_page()
734 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page()
750 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page()
761 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page()
773 mem_cgroup_commit_charge(page, memcg, false); in __do_huge_pmd_anonymous_page()
1046 struct mem_cgroup *memcg; in do_huge_pmd_wp_page_fallback() local
1068 &memcg))) { in do_huge_pmd_wp_page_fallback()
1072 memcg = (void *)page_private(pages[i]); in do_huge_pmd_wp_page_fallback()
1074 mem_cgroup_cancel_charge(pages[i], memcg); in do_huge_pmd_wp_page_fallback()
[all …]
Duserfaultfd.c27 struct mem_cgroup *memcg; in mcopy_atomic_pte() local
66 if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg)) in mcopy_atomic_pte()
80 mem_cgroup_commit_charge(page, memcg, false); in mcopy_atomic_pte()
94 mem_cgroup_cancel_charge(page, memcg); in mcopy_atomic_pte()
Dfilemap.c232 struct mem_cgroup *memcg) in __delete_from_page_cache() argument
268 account_page_cleaned(page, mapping, memcg, in __delete_from_page_cache()
283 struct mem_cgroup *memcg; in delete_from_page_cache() local
292 memcg = mem_cgroup_begin_page_stat(page); in delete_from_page_cache()
294 __delete_from_page_cache(page, NULL, memcg); in delete_from_page_cache()
296 mem_cgroup_end_page_stat(memcg); in delete_from_page_cache()
579 struct mem_cgroup *memcg; in replace_page_cache_page() local
589 memcg = mem_cgroup_begin_page_stat(old); in replace_page_cache_page()
591 __delete_from_page_cache(old, NULL, memcg); in replace_page_cache_page()
603 mem_cgroup_end_page_stat(memcg); in replace_page_cache_page()
[all …]
Dtruncate.c517 struct mem_cgroup *memcg; in invalidate_complete_page2() local
526 memcg = mem_cgroup_begin_page_stat(page); in invalidate_complete_page2()
532 __delete_from_page_cache(page, NULL, memcg); in invalidate_complete_page2()
534 mem_cgroup_end_page_stat(memcg); in invalidate_complete_page2()
543 mem_cgroup_end_page_stat(memcg); in invalidate_complete_page2()
Dmemory.c2157 struct mem_cgroup *memcg; in wp_page_copy() local
2173 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) in wp_page_copy()
2204 mem_cgroup_commit_charge(new_page, memcg, false); in wp_page_copy()
2243 mem_cgroup_cancel_charge(new_page, memcg); in wp_page_copy()
2539 struct mem_cgroup *memcg; in do_swap_page() local
2618 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) { in do_swap_page()
2660 mem_cgroup_commit_charge(page, memcg, true); in do_swap_page()
2663 mem_cgroup_commit_charge(page, memcg, false); in do_swap_page()
2698 mem_cgroup_cancel_charge(page, memcg); in do_swap_page()
2720 struct mem_cgroup *memcg; in do_anonymous_page() local
[all …]
Dbacking-dev.c522 struct mem_cgroup *memcg; in cgwb_create() local
530 memcg = mem_cgroup_from_css(memcg_css); in cgwb_create()
533 memcg_cgwb_list = mem_cgroup_cgwb_list(memcg); in cgwb_create()
721 void wb_memcg_offline(struct mem_cgroup *memcg) in wb_memcg_offline() argument
724 struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg); in wb_memcg_offline()
Dshmem.c716 struct mem_cgroup *memcg; in shmem_unuse() local
731 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg); in shmem_unuse()
754 mem_cgroup_cancel_charge(page, memcg); in shmem_unuse()
756 mem_cgroup_commit_charge(page, memcg, true); in shmem_unuse()
1060 struct mem_cgroup *memcg; in shmem_getpage_gfp() local
1139 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); in shmem_getpage_gfp()
1156 mem_cgroup_cancel_charge(page, memcg); in shmem_getpage_gfp()
1163 mem_cgroup_commit_charge(page, memcg, true); in shmem_getpage_gfp()
1202 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); in shmem_getpage_gfp()
1212 mem_cgroup_cancel_charge(page, memcg); in shmem_getpage_gfp()
[all …]
Dslab.h248 s->memcg_params.memcg); in memcg_charge_slab()
Dswapfile.c1138 struct mem_cgroup *memcg; in unuse_pte() local
1148 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) { in unuse_pte()
1155 mem_cgroup_cancel_charge(page, memcg); in unuse_pte()
1167 mem_cgroup_commit_charge(page, memcg, true); in unuse_pte()
1170 mem_cgroup_commit_charge(page, memcg, false); in unuse_pte()