/mm/ |
D | memcontrol.c | 143 struct mem_cgroup *memcg; member 157 int (*register_event)(struct mem_cgroup *memcg, 164 void (*unregister_event)(struct mem_cgroup *memcg, 176 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 177 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 256 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument 258 if (!memcg) in memcg_to_vmpressure() 259 memcg = root_mem_cgroup; in memcg_to_vmpressure() 260 return &memcg->vmpressure; in memcg_to_vmpressure() 331 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, in memcg_expand_one_shrinker_map() argument [all …]
|
D | slab.h | 83 struct mem_cgroup *memcg; member 338 return READ_ONCE(s->memcg_params.memcg); in memcg_from_slab_page() 351 struct mem_cgroup *memcg; in memcg_charge_slab() local 356 memcg = READ_ONCE(s->memcg_params.memcg); in memcg_charge_slab() 357 while (memcg && !css_tryget_online(&memcg->css)) in memcg_charge_slab() 358 memcg = parent_mem_cgroup(memcg); in memcg_charge_slab() 361 if (unlikely(!memcg || mem_cgroup_is_root(memcg))) { in memcg_charge_slab() 368 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); in memcg_charge_slab() 372 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); in memcg_charge_slab() 377 css_put_many(&memcg->css, 1 << order); in memcg_charge_slab() [all …]
|
D | vmpressure.c | 78 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in vmpressure_parent() local 80 memcg = parent_mem_cgroup(memcg); in vmpressure_parent() 81 if (!memcg) in vmpressure_parent() 83 return memcg_to_vmpressure(memcg); in vmpressure_parent() 240 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 243 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure() 283 if (!memcg || memcg == root_mem_cgroup) in vmpressure() 307 memcg->socket_pressure = jiffies + HZ; in vmpressure() 323 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument 339 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio() [all …]
|
D | workingset.c | 226 struct mem_cgroup *memcg = page_memcg(page); in workingset_eviction() local 227 int memcgid = mem_cgroup_id(memcg); in workingset_eviction() 236 lruvec = mem_cgroup_lruvec(pgdat, memcg); in workingset_eviction() 254 struct mem_cgroup *memcg; in workingset_refault() local 280 memcg = mem_cgroup_from_id(memcgid); in workingset_refault() 281 if (!mem_cgroup_disabled() && !memcg) in workingset_refault() 283 lruvec = mem_cgroup_lruvec(pgdat, memcg); in workingset_refault() 334 struct mem_cgroup *memcg; in workingset_activation() local 345 memcg = page_memcg_rcu(page); in workingset_activation() 346 if (!mem_cgroup_disabled() && !memcg) in workingset_activation() [all …]
|
D | vmscan.c | 262 struct mem_cgroup *memcg = sc->target_mem_cgroup; in sane_reclaim() local 264 if (!memcg) in sane_reclaim() 274 struct mem_cgroup *memcg, in set_memcg_congestion() argument 279 if (!memcg) in set_memcg_congestion() 282 mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); in set_memcg_congestion() 287 struct mem_cgroup *memcg) in memcg_congested() argument 291 mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); in memcg_congested() 316 struct mem_cgroup *memcg, bool congested) in set_memcg_congestion() argument 321 struct mem_cgroup *memcg) in memcg_congested() argument 594 struct mem_cgroup *memcg, int priority) in shrink_slab_memcg() argument [all …]
|
D | slab_common.c | 184 mem_cgroup_put(s->memcg_params.memcg); in destroy_memcg_params() 185 WRITE_ONCE(s->memcg_params.memcg, NULL); in destroy_memcg_params() 238 void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg) in memcg_link_cache() argument 243 css_get(&memcg->css); in memcg_link_cache() 244 s->memcg_params.memcg = memcg; in memcg_link_cache() 248 &s->memcg_params.memcg->kmem_caches); in memcg_link_cache() 383 struct mem_cgroup *memcg, struct kmem_cache *root_cache) in create_cache() argument 413 memcg_link_cache(s, memcg); in create_cache() 640 void memcg_create_kmem_cache(struct mem_cgroup *memcg, in memcg_create_kmem_cache() argument 644 struct cgroup_subsys_state *css = &memcg->css; in memcg_create_kmem_cache() [all …]
|
D | list_lru.c | 75 struct mem_cgroup *memcg = NULL; in list_lru_from_kmem() local 80 memcg = mem_cgroup_from_kmem(ptr); in list_lru_from_kmem() 81 if (!memcg) in list_lru_from_kmem() 84 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_from_kmem() 87 *memcg_ptr = memcg; in list_lru_from_kmem() 129 struct mem_cgroup *memcg; in list_lru_add() local 134 l = list_lru_from_kmem(nlru, item, &memcg); in list_lru_add() 138 memcg_set_shrinker_bit(memcg, nid, in list_lru_add() 185 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument 192 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_count_one() [all …]
|
D | huge_memory.c | 502 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; in get_deferred_split_queue() local 505 if (memcg) in get_deferred_split_queue() 506 return &memcg->deferred_split_queue; in get_deferred_split_queue() 587 struct mem_cgroup *memcg; in __do_huge_pmd_anonymous_page() local 594 if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) { in __do_huge_pmd_anonymous_page() 629 mem_cgroup_cancel_charge(page, memcg, true); in __do_huge_pmd_anonymous_page() 640 mem_cgroup_commit_charge(page, memcg, false, true); in __do_huge_pmd_anonymous_page() 648 count_memcg_events(memcg, THP_FAULT_ALLOC, 1); in __do_huge_pmd_anonymous_page() 657 mem_cgroup_cancel_charge(page, memcg, true); in __do_huge_pmd_anonymous_page() 1211 struct mem_cgroup *memcg; in do_huge_pmd_wp_page_fallback() local [all …]
|
D | oom_kill.c | 69 return oc->memcg != NULL; in is_memcg_oom() 261 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; in constrained_alloc() 368 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); in select_bad_process() 429 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); in dump_tasks() 447 mem_cgroup_print_oom_context(oc->memcg, victim); in dump_oom_summary() 462 mem_cgroup_print_oom_meminfo(oc->memcg); in dump_header() 981 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); in oom_kill_process() 1126 .memcg = NULL, in pagefault_out_of_memory()
|
D | khugepaged.c | 957 struct mem_cgroup *memcg; in collapse_huge_page() local 980 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { in collapse_huge_page() 988 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 996 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 1007 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 1093 mem_cgroup_commit_charge(new_page, memcg, false, true); in collapse_huge_page() 1094 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1); in collapse_huge_page() 1111 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 1499 struct mem_cgroup *memcg; in collapse_file() local 1518 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { in collapse_file() [all …]
|
D | rmap.c | 749 struct mem_cgroup *memcg; member 819 struct mem_cgroup *memcg = pra->memcg; in invalid_page_referenced_vma() local 821 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma() 839 struct mem_cgroup *memcg, in page_referenced() argument 845 .memcg = memcg, in page_referenced() 871 if (memcg) { in page_referenced()
|
D | userfaultfd.c | 28 struct mem_cgroup *memcg; in mcopy_atomic_pte() local 69 if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false)) in mcopy_atomic_pte() 92 mem_cgroup_commit_charge(page, memcg, false, false); in mcopy_atomic_pte() 106 mem_cgroup_cancel_charge(page, memcg, false); in mcopy_atomic_pte()
|
D | memory.c | 2348 struct mem_cgroup *memcg; in wp_page_copy() local 2367 if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false)) in wp_page_copy() 2402 mem_cgroup_commit_charge(new_page, memcg, false, false); in wp_page_copy() 2441 mem_cgroup_cancel_charge(new_page, memcg, false); in wp_page_copy() 2774 struct mem_cgroup *memcg; in do_swap_page() local 2880 &memcg, false)) { in do_swap_page() 2927 mem_cgroup_commit_charge(page, memcg, false, false); in do_swap_page() 2931 mem_cgroup_commit_charge(page, memcg, true, false); in do_swap_page() 2967 mem_cgroup_cancel_charge(page, memcg, false); in do_swap_page() 2988 struct mem_cgroup *memcg; in do_anonymous_page() local [all …]
|
D | backing-dev.c | 529 struct mem_cgroup *memcg; in cgwb_create() local 537 memcg = mem_cgroup_from_css(memcg_css); in cgwb_create() 540 memcg_cgwb_list = &memcg->cgwb_list; in cgwb_create() 743 void wb_memcg_offline(struct mem_cgroup *memcg) in wb_memcg_offline() argument 745 struct list_head *memcg_cgwb_list = &memcg->cgwb_list; in wb_memcg_offline()
|
D | shmem.c | 1634 struct mem_cgroup *memcg; in shmem_swapin_page() local 1679 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, in shmem_swapin_page() 1695 mem_cgroup_cancel_charge(page, memcg, false); in shmem_swapin_page() 1702 mem_cgroup_commit_charge(page, memcg, true, false); in shmem_swapin_page() 1749 struct mem_cgroup *memcg; in shmem_getpage_gfp() local 1871 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, in shmem_getpage_gfp() 1878 mem_cgroup_cancel_charge(page, memcg, in shmem_getpage_gfp() 1882 mem_cgroup_commit_charge(page, memcg, false, in shmem_getpage_gfp() 2318 struct mem_cgroup *memcg; in shmem_mfill_atomic_pte() local 2368 ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false); in shmem_mfill_atomic_pte() [all …]
|
D | migrate.c | 2690 struct mem_cgroup *memcg; in migrate_vma_insert_page() local 2737 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) in migrate_vma_insert_page() 2767 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page() 2773 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page() 2783 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page() 2789 mem_cgroup_commit_charge(page, memcg, false, false); in migrate_vma_insert_page()
|
D | swapfile.c | 1857 struct mem_cgroup *memcg; in unuse_pte() local 1868 &memcg, false)) { in unuse_pte() 1875 mem_cgroup_cancel_charge(page, memcg, false); in unuse_pte() 1887 mem_cgroup_commit_charge(page, memcg, true, false); in unuse_pte() 1890 mem_cgroup_commit_charge(page, memcg, false, false); in unuse_pte() 3741 void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node, in mem_cgroup_throttle_swaprate() argument 3745 if (!(gfp_mask & __GFP_IO) || !memcg) in mem_cgroup_throttle_swaprate()
|
D | filemap.c | 857 struct mem_cgroup *memcg; in __add_to_page_cache_locked() local 867 gfp_mask, &memcg, false); in __add_to_page_cache_locked() 903 mem_cgroup_commit_charge(page, memcg, false, false); in __add_to_page_cache_locked() 910 mem_cgroup_cancel_charge(page, memcg, false); in __add_to_page_cache_locked()
|
D | page-writeback.c | 2717 struct mem_cgroup *memcg; in test_clear_page_writeback() local 2721 memcg = lock_page_memcg(page); in test_clear_page_writeback() 2760 __unlock_page_memcg(memcg); in test_clear_page_writeback()
|
D | page_alloc.c | 3798 .memcg = NULL, in __alloc_pages_may_oom()
|