Lines Matching refs:memcg
143 struct mem_cgroup *memcg; member
157 int (*register_event)(struct mem_cgroup *memcg,
164 void (*unregister_event)(struct mem_cgroup *memcg,
176 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
177 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
256 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument
258 if (!memcg) in memcg_to_vmpressure()
259 memcg = root_mem_cgroup; in memcg_to_vmpressure()
260 return &memcg->vmpressure; in memcg_to_vmpressure()
331 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, in memcg_expand_one_shrinker_map() argument
341 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); in memcg_expand_one_shrinker_map()
354 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); in memcg_expand_one_shrinker_map()
361 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) in memcg_free_shrinker_maps() argument
367 if (mem_cgroup_is_root(memcg)) in memcg_free_shrinker_maps()
371 pn = mem_cgroup_nodeinfo(memcg, nid); in memcg_free_shrinker_maps()
379 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) in memcg_alloc_shrinker_maps() argument
384 if (mem_cgroup_is_root(memcg)) in memcg_alloc_shrinker_maps()
392 memcg_free_shrinker_maps(memcg); in memcg_alloc_shrinker_maps()
396 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); in memcg_alloc_shrinker_maps()
406 struct mem_cgroup *memcg; in memcg_expand_shrinker_maps() local
417 for_each_mem_cgroup(memcg) { in memcg_expand_shrinker_maps()
418 if (mem_cgroup_is_root(memcg)) in memcg_expand_shrinker_maps()
420 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); in memcg_expand_shrinker_maps()
431 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) in memcg_set_shrinker_bit() argument
433 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { in memcg_set_shrinker_bit()
437 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); in memcg_set_shrinker_bit()
458 struct mem_cgroup *memcg; in mem_cgroup_css_from_page() local
460 memcg = page->mem_cgroup; in mem_cgroup_css_from_page()
462 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) in mem_cgroup_css_from_page()
463 memcg = root_mem_cgroup; in mem_cgroup_css_from_page()
465 return &memcg->css; in mem_cgroup_css_from_page()
483 struct mem_cgroup *memcg; in page_cgroup_ino() local
488 memcg = memcg_from_slab_page(page); in page_cgroup_ino()
490 memcg = READ_ONCE(page->mem_cgroup); in page_cgroup_ino()
491 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
492 memcg = parent_mem_cgroup(memcg); in page_cgroup_ino()
493 if (memcg) in page_cgroup_ino()
494 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
500 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) in mem_cgroup_page_nodeinfo() argument
504 return memcg->nodeinfo[nid]; in mem_cgroup_page_nodeinfo()
584 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) in soft_limit_excess() argument
586 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
587 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
596 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) in mem_cgroup_update_tree() argument
609 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_update_tree()
610 mz = mem_cgroup_page_nodeinfo(memcg, page); in mem_cgroup_update_tree()
611 excess = soft_limit_excess(memcg); in mem_cgroup_update_tree()
633 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) in mem_cgroup_remove_from_trees() argument
640 mz = mem_cgroup_nodeinfo(memcg, nid); in mem_cgroup_remove_from_trees()
665 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
666 !css_tryget_online(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
689 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) in __mod_memcg_state() argument
696 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); in __mod_memcg_state()
704 __this_cpu_add(memcg->vmstats_local->stat[idx], x); in __mod_memcg_state()
705 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in __mod_memcg_state()
709 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); in __mod_memcg_state()
717 parent = parent_mem_cgroup(pn->memcg); in parent_nodeinfo()
738 struct mem_cgroup *memcg; in __mod_lruvec_state() local
748 memcg = pn->memcg; in __mod_lruvec_state()
751 __mod_memcg_state(memcg, idx, val); in __mod_lruvec_state()
771 struct mem_cgroup *memcg; in __mod_lruvec_slab_state() local
775 memcg = memcg_from_slab_page(page); in __mod_lruvec_slab_state()
778 if (!memcg || memcg == root_mem_cgroup) { in __mod_lruvec_slab_state()
781 lruvec = mem_cgroup_lruvec(pgdat, memcg); in __mod_lruvec_slab_state()
793 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, in __count_memcg_events() argument
801 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); in __count_memcg_events()
809 __this_cpu_add(memcg->vmstats_local->events[idx], x); in __count_memcg_events()
810 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in __count_memcg_events()
814 __this_cpu_write(memcg->vmstats_percpu->events[idx], x); in __count_memcg_events()
817 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) in memcg_events() argument
819 return atomic_long_read(&memcg->vmevents[event]); in memcg_events()
822 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) in memcg_events_local() argument
828 x += per_cpu(memcg->vmstats_local->events[event], cpu); in memcg_events_local()
832 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, in mem_cgroup_charge_statistics() argument
841 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages); in mem_cgroup_charge_statistics()
843 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages); in mem_cgroup_charge_statistics()
845 __mod_memcg_state(memcg, NR_SHMEM, nr_pages); in mem_cgroup_charge_statistics()
850 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages); in mem_cgroup_charge_statistics()
855 __count_memcg_events(memcg, PGPGIN, 1); in mem_cgroup_charge_statistics()
857 __count_memcg_events(memcg, PGPGOUT, 1); in mem_cgroup_charge_statistics()
861 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
864 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, in mem_cgroup_event_ratelimit() argument
869 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); in mem_cgroup_event_ratelimit()
870 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); in mem_cgroup_event_ratelimit()
886 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); in mem_cgroup_event_ratelimit()
896 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) in memcg_check_events() argument
899 if (unlikely(mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
904 do_softlimit = mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
907 do_numainfo = mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
910 mem_cgroup_threshold(memcg); in memcg_check_events()
912 mem_cgroup_update_tree(memcg, page); in memcg_check_events()
915 atomic_inc(&memcg->numainfo_events); in memcg_check_events()
944 struct mem_cgroup *memcg; in get_mem_cgroup_from_mm() local
957 memcg = root_mem_cgroup; in get_mem_cgroup_from_mm()
959 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
960 if (unlikely(!memcg)) in get_mem_cgroup_from_mm()
961 memcg = root_mem_cgroup; in get_mem_cgroup_from_mm()
963 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
965 return memcg; in get_mem_cgroup_from_mm()
978 struct mem_cgroup *memcg = page->mem_cgroup; in get_mem_cgroup_from_page() local
984 if (!memcg || !css_tryget_online(&memcg->css)) in get_mem_cgroup_from_page()
985 memcg = root_mem_cgroup; in get_mem_cgroup_from_page()
987 return memcg; in get_mem_cgroup_from_page()
997 struct mem_cgroup *memcg = root_mem_cgroup; in get_mem_cgroup_from_current() local
1001 memcg = current->active_memcg; in get_mem_cgroup_from_current()
1003 return memcg; in get_mem_cgroup_from_current()
1031 struct mem_cgroup *memcg = NULL; in mem_cgroup_iter() local
1098 memcg = mem_cgroup_from_css(css); in mem_cgroup_iter()
1106 memcg = NULL; in mem_cgroup_iter()
1115 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter()
1120 if (!memcg) in mem_cgroup_iter()
1132 return memcg; in mem_cgroup_iter()
1169 struct mem_cgroup *memcg = dead_memcg; in invalidate_reclaim_iterators() local
1173 __invalidate_reclaim_iterators(memcg, dead_memcg); in invalidate_reclaim_iterators()
1174 last = memcg; in invalidate_reclaim_iterators()
1175 } while ((memcg = parent_mem_cgroup(memcg))); in invalidate_reclaim_iterators()
1201 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, in mem_cgroup_scan_tasks() argument
1207 BUG_ON(memcg == root_mem_cgroup); in mem_cgroup_scan_tasks()
1209 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_scan_tasks()
1218 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_scan_tasks()
1237 struct mem_cgroup *memcg; in mem_cgroup_page_lruvec() local
1245 memcg = page->mem_cgroup; in mem_cgroup_page_lruvec()
1250 if (!memcg) in mem_cgroup_page_lruvec()
1251 memcg = root_mem_cgroup; in mem_cgroup_page_lruvec()
1253 mz = mem_cgroup_page_nodeinfo(memcg, page); in mem_cgroup_page_lruvec()
1312 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) in mem_cgroup_margin() argument
1318 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1319 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1324 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1325 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1342 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) in mem_cgroup_under_move() argument
1357 ret = mem_cgroup_is_descendant(from, memcg) || in mem_cgroup_under_move()
1358 mem_cgroup_is_descendant(to, memcg); in mem_cgroup_under_move()
1364 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) in mem_cgroup_wait_acct_move() argument
1367 if (mem_cgroup_under_move(memcg)) { in mem_cgroup_wait_acct_move()
1380 static char *memory_stat_format(struct mem_cgroup *memcg) in memory_stat_format() argument
1401 (u64)memcg_page_state(memcg, MEMCG_RSS) * in memory_stat_format()
1404 (u64)memcg_page_state(memcg, MEMCG_CACHE) * in memory_stat_format()
1407 (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * in memory_stat_format()
1410 (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) + in memory_stat_format()
1411 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) * in memory_stat_format()
1414 (u64)memcg_page_state(memcg, MEMCG_SOCK) * in memory_stat_format()
1418 (u64)memcg_page_state(memcg, NR_SHMEM) * in memory_stat_format()
1421 (u64)memcg_page_state(memcg, NR_FILE_MAPPED) * in memory_stat_format()
1424 (u64)memcg_page_state(memcg, NR_FILE_DIRTY) * in memory_stat_format()
1427 (u64)memcg_page_state(memcg, NR_WRITEBACK) * in memory_stat_format()
1437 (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) * in memory_stat_format()
1442 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * in memory_stat_format()
1446 (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) * in memory_stat_format()
1449 (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) * in memory_stat_format()
1454 seq_buf_printf(&s, "pgfault %lu\n", memcg_events(memcg, PGFAULT)); in memory_stat_format()
1455 seq_buf_printf(&s, "pgmajfault %lu\n", memcg_events(memcg, PGMAJFAULT)); in memory_stat_format()
1458 memcg_page_state(memcg, WORKINGSET_REFAULT)); in memory_stat_format()
1460 memcg_page_state(memcg, WORKINGSET_ACTIVATE)); in memory_stat_format()
1462 memcg_page_state(memcg, WORKINGSET_NODERECLAIM)); in memory_stat_format()
1464 seq_buf_printf(&s, "pgrefill %lu\n", memcg_events(memcg, PGREFILL)); in memory_stat_format()
1466 memcg_events(memcg, PGSCAN_KSWAPD) + in memory_stat_format()
1467 memcg_events(memcg, PGSCAN_DIRECT)); in memory_stat_format()
1469 memcg_events(memcg, PGSTEAL_KSWAPD) + in memory_stat_format()
1470 memcg_events(memcg, PGSTEAL_DIRECT)); in memory_stat_format()
1471 seq_buf_printf(&s, "pgactivate %lu\n", memcg_events(memcg, PGACTIVATE)); in memory_stat_format()
1472 seq_buf_printf(&s, "pgdeactivate %lu\n", memcg_events(memcg, PGDEACTIVATE)); in memory_stat_format()
1473 seq_buf_printf(&s, "pglazyfree %lu\n", memcg_events(memcg, PGLAZYFREE)); in memory_stat_format()
1474 seq_buf_printf(&s, "pglazyfreed %lu\n", memcg_events(memcg, PGLAZYFREED)); in memory_stat_format()
1478 memcg_events(memcg, THP_FAULT_ALLOC)); in memory_stat_format()
1480 memcg_events(memcg, THP_COLLAPSE_ALLOC)); in memory_stat_format()
1499 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) in mem_cgroup_print_oom_context() argument
1503 if (memcg) { in mem_cgroup_print_oom_context()
1505 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_context()
1520 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) in mem_cgroup_print_oom_meminfo() argument
1525 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_meminfo()
1526 K((u64)memcg->memory.max), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo()
1529 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1530 K((u64)memcg->swap.max), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo()
1533 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_meminfo()
1534 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1536 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_meminfo()
1537 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1541 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_meminfo()
1543 buf = memory_stat_format(memcg); in mem_cgroup_print_oom_meminfo()
1553 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) in mem_cgroup_get_max() argument
1557 max = memcg->memory.max; in mem_cgroup_get_max()
1558 if (mem_cgroup_swappiness(memcg)) { in mem_cgroup_get_max()
1562 memsw_max = memcg->memsw.max; in mem_cgroup_get_max()
1563 swap_max = memcg->swap.max; in mem_cgroup_get_max()
1570 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) in mem_cgroup_size() argument
1572 return page_counter_read(&memcg->memory); in mem_cgroup_size()
1575 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument
1581 .memcg = memcg, in mem_cgroup_out_of_memory()
1610 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, in test_mem_cgroup_node_reclaimable() argument
1613 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); in test_mem_cgroup_node_reclaimable()
1633 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) in mem_cgroup_may_update_nodemask() argument
1640 if (!atomic_read(&memcg->numainfo_events)) in mem_cgroup_may_update_nodemask()
1642 if (atomic_inc_return(&memcg->numainfo_updating) > 1) in mem_cgroup_may_update_nodemask()
1646 memcg->scan_nodes = node_states[N_MEMORY]; in mem_cgroup_may_update_nodemask()
1650 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) in mem_cgroup_may_update_nodemask()
1651 node_clear(nid, memcg->scan_nodes); in mem_cgroup_may_update_nodemask()
1654 atomic_set(&memcg->numainfo_events, 0); in mem_cgroup_may_update_nodemask()
1655 atomic_set(&memcg->numainfo_updating, 0); in mem_cgroup_may_update_nodemask()
1670 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) in mem_cgroup_select_victim_node() argument
1674 mem_cgroup_may_update_nodemask(memcg); in mem_cgroup_select_victim_node()
1675 node = memcg->last_scanned_node; in mem_cgroup_select_victim_node()
1677 node = next_node_in(node, memcg->scan_nodes); in mem_cgroup_select_victim_node()
1686 memcg->last_scanned_node = node; in mem_cgroup_select_victim_node()
1690 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) in mem_cgroup_select_victim_node() argument
1759 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) in mem_cgroup_oom_trylock() argument
1765 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_oom_trylock()
1772 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_oom_trylock()
1783 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_oom_trylock()
1785 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_oom_trylock()
1798 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) in mem_cgroup_oom_unlock() argument
1804 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_oom_unlock()
1809 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) in mem_cgroup_mark_under_oom() argument
1814 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_mark_under_oom()
1819 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) in mem_cgroup_unmark_under_oom() argument
1828 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_unmark_under_oom()
1837 struct mem_cgroup *memcg; member
1849 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1857 static void memcg_oom_recover(struct mem_cgroup *memcg) in memcg_oom_recover() argument
1867 if (memcg && memcg->under_oom) in memcg_oom_recover()
1868 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); in memcg_oom_recover()
1878 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) in mem_cgroup_oom() argument
1886 memcg_memory_event(memcg, MEMCG_OOM); in mem_cgroup_oom()
1906 if (memcg->oom_kill_disable) { in mem_cgroup_oom()
1909 css_get(&memcg->css); in mem_cgroup_oom()
1910 current->memcg_in_oom = memcg; in mem_cgroup_oom()
1917 mem_cgroup_mark_under_oom(memcg); in mem_cgroup_oom()
1919 locked = mem_cgroup_oom_trylock(memcg); in mem_cgroup_oom()
1922 mem_cgroup_oom_notify(memcg); in mem_cgroup_oom()
1924 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom()
1925 if (mem_cgroup_out_of_memory(memcg, mask, order)) in mem_cgroup_oom()
1931 mem_cgroup_oom_unlock(memcg); in mem_cgroup_oom()
1955 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize() local
1960 if (!memcg) in mem_cgroup_oom_synchronize()
1966 owait.memcg = memcg; in mem_cgroup_oom_synchronize()
1973 mem_cgroup_mark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1975 locked = mem_cgroup_oom_trylock(memcg); in mem_cgroup_oom_synchronize()
1978 mem_cgroup_oom_notify(memcg); in mem_cgroup_oom_synchronize()
1980 if (locked && !memcg->oom_kill_disable) { in mem_cgroup_oom_synchronize()
1981 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1983 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, in mem_cgroup_oom_synchronize()
1987 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1992 mem_cgroup_oom_unlock(memcg); in mem_cgroup_oom_synchronize()
1998 memcg_oom_recover(memcg); in mem_cgroup_oom_synchronize()
2002 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
2020 struct mem_cgroup *memcg; in mem_cgroup_get_oom_group() local
2030 memcg = mem_cgroup_from_task(victim); in mem_cgroup_get_oom_group()
2031 if (memcg == root_mem_cgroup) in mem_cgroup_get_oom_group()
2039 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_get_oom_group()
2040 if (memcg->oom_group) in mem_cgroup_get_oom_group()
2041 oom_group = memcg; in mem_cgroup_get_oom_group()
2043 if (memcg == oom_domain) in mem_cgroup_get_oom_group()
2055 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) in mem_cgroup_print_oom_group() argument
2058 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
2075 struct mem_cgroup *memcg; in lock_page_memcg() local
2094 memcg = page->mem_cgroup; in lock_page_memcg()
2095 if (unlikely(!memcg)) in lock_page_memcg()
2098 if (atomic_read(&memcg->moving_account) <= 0) in lock_page_memcg()
2099 return memcg; in lock_page_memcg()
2101 spin_lock_irqsave(&memcg->move_lock, flags); in lock_page_memcg()
2102 if (memcg != page->mem_cgroup) { in lock_page_memcg()
2103 spin_unlock_irqrestore(&memcg->move_lock, flags); in lock_page_memcg()
2112 memcg->move_lock_task = current; in lock_page_memcg()
2113 memcg->move_lock_flags = flags; in lock_page_memcg()
2115 return memcg; in lock_page_memcg()
2125 void __unlock_page_memcg(struct mem_cgroup *memcg) in __unlock_page_memcg() argument
2127 if (memcg && memcg->move_lock_task == current) { in __unlock_page_memcg()
2128 unsigned long flags = memcg->move_lock_flags; in __unlock_page_memcg()
2130 memcg->move_lock_task = NULL; in __unlock_page_memcg()
2131 memcg->move_lock_flags = 0; in __unlock_page_memcg()
2133 spin_unlock_irqrestore(&memcg->move_lock, flags); in __unlock_page_memcg()
2170 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in consume_stock() argument
2182 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { in consume_stock()
2231 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in refill_stock() argument
2239 if (stock->cached != memcg) { /* reset if necessary */ in refill_stock()
2241 stock->cached = memcg; in refill_stock()
2271 struct mem_cgroup *memcg; in drain_all_stock() local
2275 memcg = stock->cached; in drain_all_stock()
2276 if (memcg && stock->nr_pages && in drain_all_stock()
2277 mem_cgroup_is_descendant(memcg, root_memcg)) in drain_all_stock()
2296 struct mem_cgroup *memcg, *mi; in memcg_hotplug_cpu_dead() local
2301 for_each_mem_cgroup(memcg) { in memcg_hotplug_cpu_dead()
2308 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); in memcg_hotplug_cpu_dead()
2310 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in memcg_hotplug_cpu_dead()
2311 atomic_long_add(x, &memcg->vmstats[i]); in memcg_hotplug_cpu_dead()
2319 pn = mem_cgroup_nodeinfo(memcg, nid); in memcg_hotplug_cpu_dead()
2331 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); in memcg_hotplug_cpu_dead()
2333 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in memcg_hotplug_cpu_dead()
2334 atomic_long_add(x, &memcg->vmevents[i]); in memcg_hotplug_cpu_dead()
2341 static void reclaim_high(struct mem_cgroup *memcg, in reclaim_high() argument
2346 if (page_counter_read(&memcg->memory) <= memcg->high) in reclaim_high()
2348 memcg_memory_event(memcg, MEMCG_HIGH); in reclaim_high()
2349 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); in reclaim_high()
2350 } while ((memcg = parent_mem_cgroup(memcg))); in reclaim_high()
2355 struct mem_cgroup *memcg; in high_work_func() local
2357 memcg = container_of(work, struct mem_cgroup, high_work); in high_work_func()
2358 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); in high_work_func()
2424 struct mem_cgroup *memcg; in mem_cgroup_handle_over_high() local
2429 memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2430 reclaim_high(memcg, nr_pages, GFP_KERNEL); in mem_cgroup_handle_over_high()
2445 usage = page_counter_read(&memcg->memory); in mem_cgroup_handle_over_high()
2446 high = READ_ONCE(memcg->high); in mem_cgroup_handle_over_high()
2499 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2502 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument
2514 if (mem_cgroup_is_root(memcg)) in try_charge()
2517 if (consume_stock(memcg, nr_pages)) in try_charge()
2521 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge()
2522 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge()
2525 page_counter_uncharge(&memcg->memsw, batch); in try_charge()
2641 page_counter_charge(&memcg->memory, nr_pages); in try_charge()
2643 page_counter_charge(&memcg->memsw, nr_pages); in try_charge()
2644 css_get_many(&memcg->css, nr_pages); in try_charge()
2649 css_get_many(&memcg->css, batch); in try_charge()
2651 refill_stock(memcg, batch - nr_pages); in try_charge()
2663 if (page_counter_read(&memcg->memory) > memcg->high) { in try_charge()
2666 schedule_work(&memcg->high_work); in try_charge()
2673 } while ((memcg = parent_mem_cgroup(memcg))); in try_charge()
2678 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) in cancel_charge() argument
2680 if (mem_cgroup_is_root(memcg)) in cancel_charge()
2683 page_counter_uncharge(&memcg->memory, nr_pages); in cancel_charge()
2685 page_counter_uncharge(&memcg->memsw, nr_pages); in cancel_charge()
2687 css_put_many(&memcg->css, nr_pages); in cancel_charge()
2721 static void commit_charge(struct page *page, struct mem_cgroup *memcg, in commit_charge() argument
2749 page->mem_cgroup = memcg; in commit_charge()
2802 struct mem_cgroup *memcg; member
2811 struct mem_cgroup *memcg = cw->memcg; in memcg_kmem_cache_create_func() local
2814 memcg_create_kmem_cache(memcg, cachep); in memcg_kmem_cache_create_func()
2816 css_put(&memcg->css); in memcg_kmem_cache_create_func()
2823 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, in memcg_schedule_kmem_cache_create() argument
2828 if (!css_tryget_online(&memcg->css)) in memcg_schedule_kmem_cache_create()
2835 cw->memcg = memcg; in memcg_schedule_kmem_cache_create()
2867 struct mem_cgroup *memcg; in memcg_kmem_get_cache() local
2880 memcg = current->active_memcg; in memcg_kmem_get_cache()
2882 memcg = mem_cgroup_from_task(current); in memcg_kmem_get_cache()
2884 if (!memcg || memcg == root_mem_cgroup) in memcg_kmem_get_cache()
2887 kmemcg_id = READ_ONCE(memcg->kmemcg_id); in memcg_kmem_get_cache()
2920 memcg_schedule_kmem_cache_create(memcg, cachep); in memcg_kmem_get_cache()
2948 struct mem_cgroup *memcg) in __memcg_kmem_charge_memcg() argument
2954 ret = try_charge(memcg, gfp, nr_pages); in __memcg_kmem_charge_memcg()
2959 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { in __memcg_kmem_charge_memcg()
2967 page_counter_charge(&memcg->kmem, nr_pages); in __memcg_kmem_charge_memcg()
2970 cancel_charge(memcg, nr_pages); in __memcg_kmem_charge_memcg()
2986 struct mem_cgroup *memcg; in __memcg_kmem_charge() local
2992 memcg = get_mem_cgroup_from_current(); in __memcg_kmem_charge()
2993 if (!mem_cgroup_is_root(memcg)) { in __memcg_kmem_charge()
2994 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); in __memcg_kmem_charge()
2996 page->mem_cgroup = memcg; in __memcg_kmem_charge()
3000 css_put(&memcg->css); in __memcg_kmem_charge()
3009 void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg, in __memcg_kmem_uncharge_memcg() argument
3013 page_counter_uncharge(&memcg->kmem, nr_pages); in __memcg_kmem_uncharge_memcg()
3015 page_counter_uncharge(&memcg->memory, nr_pages); in __memcg_kmem_uncharge_memcg()
3017 page_counter_uncharge(&memcg->memsw, nr_pages); in __memcg_kmem_uncharge_memcg()
3026 struct mem_cgroup *memcg = page->mem_cgroup; in __memcg_kmem_uncharge() local
3029 if (!memcg) in __memcg_kmem_uncharge()
3032 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); in __memcg_kmem_uncharge()
3033 __memcg_kmem_uncharge_memcg(memcg, nr_pages); in __memcg_kmem_uncharge()
3040 css_put_many(&memcg->css, nr_pages); in __memcg_kmem_uncharge()
3104 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, in mem_cgroup_resize_max() argument
3111 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; in mem_cgroup_resize_max()
3124 limits_invariant = memsw ? max >= memcg->memory.max : in mem_cgroup_resize_max()
3125 max <= memcg->memsw.max; in mem_cgroup_resize_max()
3140 drain_all_stock(memcg); in mem_cgroup_resize_max()
3145 if (!try_to_free_mem_cgroup_pages(memcg, 1, in mem_cgroup_resize_max()
3153 memcg_oom_recover(memcg); in mem_cgroup_resize_max()
3197 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in mem_cgroup_soft_limit_reclaim()
3212 excess = soft_limit_excess(mz->memcg); in mem_cgroup_soft_limit_reclaim()
3224 css_put(&mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3237 css_put(&next_mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3247 static inline bool memcg_has_children(struct mem_cgroup *memcg) in memcg_has_children() argument
3252 ret = css_next_child(NULL, &memcg->css); in memcg_has_children()
3262 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) in mem_cgroup_force_empty() argument
3269 drain_all_stock(memcg); in mem_cgroup_force_empty()
3272 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
3278 progress = try_to_free_mem_cgroup_pages(memcg, 1, in mem_cgroup_force_empty()
3295 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_force_empty_write() local
3297 if (mem_cgroup_is_root(memcg)) in mem_cgroup_force_empty_write()
3299 return mem_cgroup_force_empty(memcg) ?: nbytes; in mem_cgroup_force_empty_write()
3312 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_hierarchy_write() local
3313 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); in mem_cgroup_hierarchy_write()
3315 if (memcg->use_hierarchy == val) in mem_cgroup_hierarchy_write()
3328 if (!memcg_has_children(memcg)) in mem_cgroup_hierarchy_write()
3329 memcg->use_hierarchy = val; in mem_cgroup_hierarchy_write()
3338 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) in mem_cgroup_usage() argument
3342 if (mem_cgroup_is_root(memcg)) { in mem_cgroup_usage()
3343 val = memcg_page_state(memcg, MEMCG_CACHE) + in mem_cgroup_usage()
3344 memcg_page_state(memcg, MEMCG_RSS); in mem_cgroup_usage()
3346 val += memcg_page_state(memcg, MEMCG_SWAP); in mem_cgroup_usage()
3349 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3351 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3367 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_read_u64() local
3372 counter = &memcg->memory; in mem_cgroup_read_u64()
3375 counter = &memcg->memsw; in mem_cgroup_read_u64()
3378 counter = &memcg->kmem; in mem_cgroup_read_u64()
3381 counter = &memcg->tcpmem; in mem_cgroup_read_u64()
3389 if (counter == &memcg->memory) in mem_cgroup_read_u64()
3390 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; in mem_cgroup_read_u64()
3391 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
3392 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; in mem_cgroup_read_u64()
3401 return (u64)memcg->soft_limit * PAGE_SIZE; in mem_cgroup_read_u64()
3407 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) in memcg_flush_percpu_vmstats() argument
3415 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); in memcg_flush_percpu_vmstats()
3417 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in memcg_flush_percpu_vmstats()
3422 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in memcg_flush_percpu_vmstats()
3439 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) in memcg_flush_percpu_vmevents() argument
3450 events[i] += per_cpu(memcg->vmstats_percpu->events[i], in memcg_flush_percpu_vmevents()
3453 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in memcg_flush_percpu_vmevents()
3459 static int memcg_online_kmem(struct mem_cgroup *memcg) in memcg_online_kmem() argument
3466 BUG_ON(memcg->kmemcg_id >= 0); in memcg_online_kmem()
3467 BUG_ON(memcg->kmem_state); in memcg_online_kmem()
3480 memcg->kmemcg_id = memcg_id; in memcg_online_kmem()
3481 memcg->kmem_state = KMEM_ONLINE; in memcg_online_kmem()
3482 INIT_LIST_HEAD(&memcg->kmem_caches); in memcg_online_kmem()
3487 static void memcg_offline_kmem(struct mem_cgroup *memcg) in memcg_offline_kmem() argument
3493 if (memcg->kmem_state != KMEM_ONLINE) in memcg_offline_kmem()
3501 memcg->kmem_state = KMEM_ALLOCATED; in memcg_offline_kmem()
3503 parent = parent_mem_cgroup(memcg); in memcg_offline_kmem()
3510 memcg_deactivate_kmem_caches(memcg, parent); in memcg_offline_kmem()
3512 kmemcg_id = memcg->kmemcg_id; in memcg_offline_kmem()
3524 css_for_each_descendant_pre(css, &memcg->css) { in memcg_offline_kmem()
3528 if (!memcg->use_hierarchy) in memcg_offline_kmem()
3538 static void memcg_free_kmem(struct mem_cgroup *memcg) in memcg_free_kmem() argument
3541 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) in memcg_free_kmem()
3542 memcg_offline_kmem(memcg); in memcg_free_kmem()
3544 if (memcg->kmem_state == KMEM_ALLOCATED) { in memcg_free_kmem()
3545 WARN_ON(!list_empty(&memcg->kmem_caches)); in memcg_free_kmem()
3550 static int memcg_online_kmem(struct mem_cgroup *memcg) in memcg_online_kmem() argument
3554 static void memcg_offline_kmem(struct mem_cgroup *memcg) in memcg_offline_kmem() argument
3557 static void memcg_free_kmem(struct mem_cgroup *memcg) in memcg_free_kmem() argument
3562 static int memcg_update_kmem_max(struct mem_cgroup *memcg, in memcg_update_kmem_max() argument
3568 ret = page_counter_set_max(&memcg->kmem, max); in memcg_update_kmem_max()
3573 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) in memcg_update_tcp_max() argument
3579 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max()
3583 if (!memcg->tcpmem_active) { in memcg_update_tcp_max()
3601 memcg->tcpmem_active = true; in memcg_update_tcp_max()
3615 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_write() local
3626 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ in mem_cgroup_write()
3632 ret = mem_cgroup_resize_max(memcg, nr_pages, false); in mem_cgroup_write()
3635 ret = mem_cgroup_resize_max(memcg, nr_pages, true); in mem_cgroup_write()
3641 ret = memcg_update_kmem_max(memcg, nr_pages); in mem_cgroup_write()
3644 ret = memcg_update_tcp_max(memcg, nr_pages); in mem_cgroup_write()
3649 memcg->soft_limit = nr_pages; in mem_cgroup_write()
3659 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_reset() local
3664 counter = &memcg->memory; in mem_cgroup_reset()
3667 counter = &memcg->memsw; in mem_cgroup_reset()
3670 counter = &memcg->kmem; in mem_cgroup_reset()
3673 counter = &memcg->tcpmem; in mem_cgroup_reset()
3703 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_move_charge_write() local
3714 memcg->move_charge_at_immigrate = val; in mem_cgroup_move_charge_write()
3731 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, in mem_cgroup_node_nr_lru_pages() argument
3734 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); in mem_cgroup_node_nr_lru_pages()
3748 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, in mem_cgroup_nr_lru_pages() argument
3757 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); in mem_cgroup_nr_lru_pages()
3778 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memcg_numa_stat_show() local
3781 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); in memcg_numa_stat_show()
3784 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, in memcg_numa_stat_show()
3795 for_each_mem_cgroup_tree(iter, memcg) in memcg_numa_stat_show()
3800 for_each_mem_cgroup_tree(iter, memcg) in memcg_numa_stat_show()
3851 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memcg_stat_show() local
3863 memcg_page_state_local(memcg, memcg1_stats[i]) * in memcg_stat_show()
3869 memcg_events_local(memcg, memcg1_events[i])); in memcg_stat_show()
3873 memcg_page_state_local(memcg, NR_LRU_BASE + i) * in memcg_stat_show()
3878 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { in memcg_stat_show()
3892 (u64)memcg_page_state(memcg, memcg1_stats[i]) * in memcg_stat_show()
3898 (u64)memcg_events(memcg, memcg1_events[i])); in memcg_stat_show()
3902 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * in memcg_stat_show()
3914 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); in memcg_stat_show()
3935 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_swappiness_read() local
3937 return mem_cgroup_swappiness(memcg); in mem_cgroup_swappiness_read()
3943 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_swappiness_write() local
3949 memcg->swappiness = val; in mem_cgroup_swappiness_write()
3956 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) in __mem_cgroup_threshold() argument
3964 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
3966 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
3971 usage = mem_cgroup_usage(memcg, swap); in __mem_cgroup_threshold()
4007 static void mem_cgroup_threshold(struct mem_cgroup *memcg) in mem_cgroup_threshold() argument
4009 while (memcg) { in mem_cgroup_threshold()
4010 __mem_cgroup_threshold(memcg, false); in mem_cgroup_threshold()
4012 __mem_cgroup_threshold(memcg, true); in mem_cgroup_threshold()
4014 memcg = parent_mem_cgroup(memcg); in mem_cgroup_threshold()
4032 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) in mem_cgroup_oom_notify_cb() argument
4038 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
4045 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) in mem_cgroup_oom_notify() argument
4049 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_oom_notify()
4053 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, in __mem_cgroup_usage_register_event() argument
4066 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4069 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
4070 usage = mem_cgroup_usage(memcg, false); in __mem_cgroup_usage_register_event()
4072 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
4073 usage = mem_cgroup_usage(memcg, true); in __mem_cgroup_usage_register_event()
4079 __mem_cgroup_threshold(memcg, type == _MEMSWAP); in __mem_cgroup_usage_register_event()
4129 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4134 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, in mem_cgroup_usage_register_event() argument
4137 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); in mem_cgroup_usage_register_event()
4140 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, in memsw_cgroup_usage_register_event() argument
4143 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); in memsw_cgroup_usage_register_event()
4146 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in __mem_cgroup_usage_unregister_event() argument
4154 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4157 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
4158 usage = mem_cgroup_usage(memcg, false); in __mem_cgroup_usage_unregister_event()
4160 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
4161 usage = mem_cgroup_usage(memcg, true); in __mem_cgroup_usage_unregister_event()
4169 __mem_cgroup_threshold(memcg, type == _MEMSWAP); in __mem_cgroup_usage_unregister_event()
4222 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4225 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in mem_cgroup_usage_unregister_event() argument
4228 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); in mem_cgroup_usage_unregister_event()
4231 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in memsw_cgroup_usage_unregister_event() argument
4234 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); in memsw_cgroup_usage_unregister_event()
4237 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, in mem_cgroup_oom_register_event() argument
4249 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
4252 if (memcg->under_oom) in mem_cgroup_oom_register_event()
4259 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, in mem_cgroup_oom_unregister_event() argument
4266 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
4278 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); in mem_cgroup_oom_control_read() local
4280 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); in mem_cgroup_oom_control_read()
4281 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
4283 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in mem_cgroup_oom_control_read()
4290 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_oom_control_write() local
4296 memcg->oom_kill_disable = val; in mem_cgroup_oom_control_write()
4298 memcg_oom_recover(memcg); in mem_cgroup_oom_control_write()
4307 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument
4309 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
4312 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) in memcg_wb_domain_exit() argument
4314 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
4317 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) in memcg_wb_domain_size_changed() argument
4319 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
4324 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain() local
4326 if (!memcg->css.parent) in mem_cgroup_wb_domain()
4329 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
4336 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) in memcg_exact_page_state() argument
4338 long x = atomic_long_read(&memcg->vmstats[idx]); in memcg_exact_page_state()
4342 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; in memcg_exact_page_state()
4370 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats() local
4373 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); in mem_cgroup_wb_stats()
4376 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); in mem_cgroup_wb_stats()
4377 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + in mem_cgroup_wb_stats()
4378 memcg_exact_page_state(memcg, NR_ACTIVE_FILE); in mem_cgroup_wb_stats()
4381 while ((parent = parent_mem_cgroup(memcg))) { in mem_cgroup_wb_stats()
4382 unsigned long ceiling = min(memcg->memory.max, memcg->high); in mem_cgroup_wb_stats()
4383 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
4386 memcg = parent; in mem_cgroup_wb_stats()
4437 struct mem_cgroup *memcg = page->mem_cgroup; in mem_cgroup_track_foreign_dirty_slowpath() local
4452 frn = &memcg->cgwb_frn[i]; in mem_cgroup_track_foreign_dirty_slowpath()
4479 frn = &memcg->cgwb_frn[oldest]; in mem_cgroup_track_foreign_dirty_slowpath()
4489 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_flush_foreign() local
4495 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; in mem_cgroup_flush_foreign()
4516 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument
4521 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) in memcg_wb_domain_exit() argument
4525 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) in memcg_wb_domain_size_changed() argument
4553 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove() local
4557 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
4564 css_put(&memcg->css); in memcg_event_remove()
4577 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake() local
4590 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
4599 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
4627 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in memcg_write_event_control() local
4653 event->memcg = memcg; in memcg_write_event_control()
4725 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
4731 spin_lock(&memcg->event_list_lock); in memcg_write_event_control()
4732 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
4733 spin_unlock(&memcg->event_list_lock); in memcg_write_event_control()
4911 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) in mem_cgroup_id_remove() argument
4913 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
4914 idr_remove(&mem_cgroup_idr, memcg->id.id); in mem_cgroup_id_remove()
4915 memcg->id.id = 0; in mem_cgroup_id_remove()
4919 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) in mem_cgroup_id_get_many() argument
4921 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
4924 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) in mem_cgroup_id_put_many() argument
4926 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
4927 mem_cgroup_id_remove(memcg); in mem_cgroup_id_put_many()
4930 css_put(&memcg->css); in mem_cgroup_id_put_many()
4934 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) in mem_cgroup_id_put() argument
4936 mem_cgroup_id_put_many(memcg, 1); in mem_cgroup_id_put()
4951 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) in alloc_mem_cgroup_per_node_info() argument
4985 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
4987 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
4991 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) in free_mem_cgroup_per_node_info() argument
4993 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in free_mem_cgroup_per_node_info()
5003 static void __mem_cgroup_free(struct mem_cgroup *memcg) in __mem_cgroup_free() argument
5008 free_mem_cgroup_per_node_info(memcg, node); in __mem_cgroup_free()
5009 free_percpu(memcg->vmstats_percpu); in __mem_cgroup_free()
5010 free_percpu(memcg->vmstats_local); in __mem_cgroup_free()
5011 kfree(memcg); in __mem_cgroup_free()
5014 static void mem_cgroup_free(struct mem_cgroup *memcg) in mem_cgroup_free() argument
5016 memcg_wb_domain_exit(memcg); in mem_cgroup_free()
5021 memcg_flush_percpu_vmstats(memcg); in mem_cgroup_free()
5022 memcg_flush_percpu_vmevents(memcg); in mem_cgroup_free()
5023 __mem_cgroup_free(memcg); in mem_cgroup_free()
5028 struct mem_cgroup *memcg; in mem_cgroup_alloc() local
5036 memcg = kzalloc(size, GFP_KERNEL); in mem_cgroup_alloc()
5037 if (!memcg) in mem_cgroup_alloc()
5040 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, in mem_cgroup_alloc()
5043 if (memcg->id.id < 0) in mem_cgroup_alloc()
5046 memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu); in mem_cgroup_alloc()
5047 if (!memcg->vmstats_local) in mem_cgroup_alloc()
5050 memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu); in mem_cgroup_alloc()
5051 if (!memcg->vmstats_percpu) in mem_cgroup_alloc()
5055 if (alloc_mem_cgroup_per_node_info(memcg, node)) in mem_cgroup_alloc()
5058 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) in mem_cgroup_alloc()
5061 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
5062 memcg->last_scanned_node = MAX_NUMNODES; in mem_cgroup_alloc()
5063 INIT_LIST_HEAD(&memcg->oom_notify); in mem_cgroup_alloc()
5064 mutex_init(&memcg->thresholds_lock); in mem_cgroup_alloc()
5065 spin_lock_init(&memcg->move_lock); in mem_cgroup_alloc()
5066 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
5067 INIT_LIST_HEAD(&memcg->event_list); in mem_cgroup_alloc()
5068 spin_lock_init(&memcg->event_list_lock); in mem_cgroup_alloc()
5069 memcg->socket_pressure = jiffies; in mem_cgroup_alloc()
5071 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
5074 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
5076 memcg->cgwb_frn[i].done = in mem_cgroup_alloc()
5080 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); in mem_cgroup_alloc()
5081 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); in mem_cgroup_alloc()
5082 memcg->deferred_split_queue.split_queue_len = 0; in mem_cgroup_alloc()
5084 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); in mem_cgroup_alloc()
5085 return memcg; in mem_cgroup_alloc()
5087 mem_cgroup_id_remove(memcg); in mem_cgroup_alloc()
5088 __mem_cgroup_free(memcg); in mem_cgroup_alloc()
5096 struct mem_cgroup *memcg; in mem_cgroup_css_alloc() local
5099 memcg = mem_cgroup_alloc(); in mem_cgroup_css_alloc()
5100 if (!memcg) in mem_cgroup_css_alloc()
5103 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
5104 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
5106 memcg->swappiness = mem_cgroup_swappiness(parent); in mem_cgroup_css_alloc()
5107 memcg->oom_kill_disable = parent->oom_kill_disable; in mem_cgroup_css_alloc()
5110 memcg->use_hierarchy = true; in mem_cgroup_css_alloc()
5111 page_counter_init(&memcg->memory, &parent->memory); in mem_cgroup_css_alloc()
5112 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc()
5113 page_counter_init(&memcg->memsw, &parent->memsw); in mem_cgroup_css_alloc()
5114 page_counter_init(&memcg->kmem, &parent->kmem); in mem_cgroup_css_alloc()
5115 page_counter_init(&memcg->tcpmem, &parent->tcpmem); in mem_cgroup_css_alloc()
5117 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_alloc()
5118 page_counter_init(&memcg->swap, NULL); in mem_cgroup_css_alloc()
5119 page_counter_init(&memcg->memsw, NULL); in mem_cgroup_css_alloc()
5120 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_alloc()
5121 page_counter_init(&memcg->tcpmem, NULL); in mem_cgroup_css_alloc()
5134 INIT_LIST_HEAD(&memcg->kmem_caches); in mem_cgroup_css_alloc()
5136 root_mem_cgroup = memcg; in mem_cgroup_css_alloc()
5137 return &memcg->css; in mem_cgroup_css_alloc()
5140 error = memcg_online_kmem(memcg); in mem_cgroup_css_alloc()
5147 return &memcg->css; in mem_cgroup_css_alloc()
5149 mem_cgroup_id_remove(memcg); in mem_cgroup_css_alloc()
5150 mem_cgroup_free(memcg); in mem_cgroup_css_alloc()
5156 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_online() local
5163 if (memcg_alloc_shrinker_maps(memcg)) { in mem_cgroup_css_online()
5164 mem_cgroup_id_remove(memcg); in mem_cgroup_css_online()
5169 refcount_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
5176 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_offline() local
5184 spin_lock(&memcg->event_list_lock); in mem_cgroup_css_offline()
5185 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
5189 spin_unlock(&memcg->event_list_lock); in mem_cgroup_css_offline()
5191 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
5192 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
5194 memcg_offline_kmem(memcg); in mem_cgroup_css_offline()
5195 wb_memcg_offline(memcg); in mem_cgroup_css_offline()
5197 drain_all_stock(memcg); in mem_cgroup_css_offline()
5199 mem_cgroup_id_put(memcg); in mem_cgroup_css_offline()
5204 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_released() local
5206 invalidate_reclaim_iterators(memcg); in mem_cgroup_css_released()
5211 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_free() local
5216 wb_wait_for_completion(&memcg->cgwb_frn[i].done); in mem_cgroup_css_free()
5221 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) in mem_cgroup_css_free()
5224 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
5225 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
5226 mem_cgroup_remove_from_trees(memcg); in mem_cgroup_css_free()
5227 memcg_free_shrinker_maps(memcg); in mem_cgroup_css_free()
5228 memcg_free_kmem(memcg); in mem_cgroup_css_free()
5229 mem_cgroup_free(memcg); in mem_cgroup_css_free()
5247 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_reset() local
5249 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5250 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5251 page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5252 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5253 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5254 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
5255 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
5256 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
5257 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
5258 memcg_wb_domain_size_changed(memcg); in mem_cgroup_css_reset()
5746 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ in mem_cgroup_can_attach() local
5767 memcg = mem_cgroup_from_css(css); in mem_cgroup_can_attach()
5777 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
5783 VM_BUG_ON(from == memcg); in mem_cgroup_can_attach()
5799 mc.to = memcg; in mem_cgroup_can_attach()
6020 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in memory_current_read() local
6022 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
6034 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_min_write() local
6043 page_counter_set_min(&memcg->memory, min); in memory_min_write()
6057 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_low_write() local
6066 page_counter_set_low(&memcg->memory, low); in memory_low_write()
6079 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_high_write() local
6089 memcg->high = high; in memory_high_write()
6091 nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
6093 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
6096 memcg_wb_domain_size_changed(memcg); in memory_high_write()
6109 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_max_write() local
6120 xchg(&memcg->memory.max, max); in memory_max_write()
6123 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
6134 drain_all_stock(memcg); in memory_max_write()
6140 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
6146 memcg_memory_event(memcg, MEMCG_OOM); in memory_max_write()
6147 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) in memory_max_write()
6151 memcg_wb_domain_size_changed(memcg); in memory_max_write()
6167 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_events_show() local
6169 __memory_events_show(m, memcg->memory_events); in memory_events_show()
6175 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_events_local_show() local
6177 __memory_events_show(m, memcg->memory_events_local); in memory_events_local_show()
6183 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_stat_show() local
6186 buf = memory_stat_format(memcg); in memory_stat_show()
6196 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_oom_group_show() local
6198 seq_printf(m, "%d\n", memcg->oom_group); in memory_oom_group_show()
6206 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_oom_group_write() local
6220 memcg->oom_group = oom_group; in memory_oom_group_write()
6368 struct mem_cgroup *memcg) in mem_cgroup_protected() argument
6380 if (memcg == root) in mem_cgroup_protected()
6383 usage = page_counter_read(&memcg->memory); in mem_cgroup_protected()
6387 emin = memcg->memory.min; in mem_cgroup_protected()
6388 elow = memcg->memory.low; in mem_cgroup_protected()
6390 parent = parent_mem_cgroup(memcg); in mem_cgroup_protected()
6403 min_usage = min(usage, memcg->memory.min); in mem_cgroup_protected()
6417 low_usage = min(usage, memcg->memory.low); in mem_cgroup_protected()
6427 memcg->memory.emin = emin; in mem_cgroup_protected()
6428 memcg->memory.elow = elow; in mem_cgroup_protected()
6460 struct mem_cgroup *memcg = NULL; in mem_cgroup_try_charge() local
6484 memcg = mem_cgroup_from_id(id); in mem_cgroup_try_charge()
6485 if (memcg && !css_tryget_online(&memcg->css)) in mem_cgroup_try_charge()
6486 memcg = NULL; in mem_cgroup_try_charge()
6491 if (!memcg) in mem_cgroup_try_charge()
6492 memcg = get_mem_cgroup_from_mm(mm); in mem_cgroup_try_charge()
6494 ret = try_charge(memcg, gfp_mask, nr_pages); in mem_cgroup_try_charge()
6496 css_put(&memcg->css); in mem_cgroup_try_charge()
6498 *memcgp = memcg; in mem_cgroup_try_charge()
6506 struct mem_cgroup *memcg; in mem_cgroup_try_charge_delay() local
6510 memcg = *memcgp; in mem_cgroup_try_charge_delay()
6511 mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask); in mem_cgroup_try_charge_delay()
6532 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, in mem_cgroup_commit_charge() argument
6547 if (!memcg) in mem_cgroup_commit_charge()
6550 commit_charge(page, memcg, lrucare); in mem_cgroup_commit_charge()
6553 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); in mem_cgroup_commit_charge()
6554 memcg_check_events(memcg, page); in mem_cgroup_commit_charge()
6576 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, in mem_cgroup_cancel_charge() argument
6588 if (!memcg) in mem_cgroup_cancel_charge()
6591 cancel_charge(memcg, nr_pages); in mem_cgroup_cancel_charge()
6595 struct mem_cgroup *memcg; member
6615 if (!mem_cgroup_is_root(ug->memcg)) { in uncharge_batch()
6616 page_counter_uncharge(&ug->memcg->memory, nr_pages); in uncharge_batch()
6618 page_counter_uncharge(&ug->memcg->memsw, nr_pages); in uncharge_batch()
6620 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); in uncharge_batch()
6621 memcg_oom_recover(ug->memcg); in uncharge_batch()
6625 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); in uncharge_batch()
6626 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); in uncharge_batch()
6627 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); in uncharge_batch()
6628 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem); in uncharge_batch()
6629 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); in uncharge_batch()
6630 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages); in uncharge_batch()
6631 memcg_check_events(ug->memcg, ug->dummy_page); in uncharge_batch()
6634 if (!mem_cgroup_is_root(ug->memcg)) in uncharge_batch()
6635 css_put_many(&ug->memcg->css, nr_pages); in uncharge_batch()
6653 if (ug->memcg != page->mem_cgroup) { in uncharge_page()
6654 if (ug->memcg) { in uncharge_page()
6658 ug->memcg = page->mem_cgroup; in uncharge_page()
6706 if (ug.memcg) in uncharge_list()
6761 struct mem_cgroup *memcg; in mem_cgroup_migrate() local
6780 memcg = oldpage->mem_cgroup; in mem_cgroup_migrate()
6781 if (!memcg) in mem_cgroup_migrate()
6788 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_migrate()
6790 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_migrate()
6791 css_get_many(&memcg->css, nr_pages); in mem_cgroup_migrate()
6793 commit_charge(newpage, memcg, false); in mem_cgroup_migrate()
6796 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); in mem_cgroup_migrate()
6797 memcg_check_events(memcg, newpage); in mem_cgroup_migrate()
6806 struct mem_cgroup *memcg; in mem_cgroup_sk_alloc() local
6826 memcg = mem_cgroup_from_task(current); in mem_cgroup_sk_alloc()
6827 if (memcg == root_mem_cgroup) in mem_cgroup_sk_alloc()
6829 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) in mem_cgroup_sk_alloc()
6831 if (css_tryget_online(&memcg->css)) in mem_cgroup_sk_alloc()
6832 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
6851 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) in mem_cgroup_charge_skmem() argument
6858 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in mem_cgroup_charge_skmem()
6859 memcg->tcpmem_pressure = 0; in mem_cgroup_charge_skmem()
6862 page_counter_charge(&memcg->tcpmem, nr_pages); in mem_cgroup_charge_skmem()
6863 memcg->tcpmem_pressure = 1; in mem_cgroup_charge_skmem()
6871 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); in mem_cgroup_charge_skmem()
6873 if (try_charge(memcg, gfp_mask, nr_pages) == 0) in mem_cgroup_charge_skmem()
6876 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); in mem_cgroup_charge_skmem()
6885 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) in mem_cgroup_uncharge_skmem() argument
6888 page_counter_uncharge(&memcg->tcpmem, nr_pages); in mem_cgroup_uncharge_skmem()
6892 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
6894 refill_stock(memcg, nr_pages); in mem_cgroup_uncharge_skmem()
6960 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) in mem_cgroup_id_get_online() argument
6962 while (!refcount_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
6967 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { in mem_cgroup_id_get_online()
6971 memcg = parent_mem_cgroup(memcg); in mem_cgroup_id_get_online()
6972 if (!memcg) in mem_cgroup_id_get_online()
6973 memcg = root_mem_cgroup; in mem_cgroup_id_get_online()
6975 return memcg; in mem_cgroup_id_get_online()
6987 struct mem_cgroup *memcg, *swap_memcg; in mem_cgroup_swapout() local
6997 memcg = page->mem_cgroup; in mem_cgroup_swapout()
7000 if (!memcg) in mem_cgroup_swapout()
7008 swap_memcg = mem_cgroup_id_get_online(memcg); in mem_cgroup_swapout()
7020 if (!mem_cgroup_is_root(memcg)) in mem_cgroup_swapout()
7021 page_counter_uncharge(&memcg->memory, nr_entries); in mem_cgroup_swapout()
7023 if (memcg != swap_memcg) { in mem_cgroup_swapout()
7026 page_counter_uncharge(&memcg->memsw, nr_entries); in mem_cgroup_swapout()
7036 mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page), in mem_cgroup_swapout()
7038 memcg_check_events(memcg, page); in mem_cgroup_swapout()
7040 if (!mem_cgroup_is_root(memcg)) in mem_cgroup_swapout()
7041 css_put_many(&memcg->css, nr_entries); in mem_cgroup_swapout()
7057 struct mem_cgroup *memcg; in mem_cgroup_try_charge_swap() local
7063 memcg = page->mem_cgroup; in mem_cgroup_try_charge_swap()
7066 if (!memcg) in mem_cgroup_try_charge_swap()
7070 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); in mem_cgroup_try_charge_swap()
7074 memcg = mem_cgroup_id_get_online(memcg); in mem_cgroup_try_charge_swap()
7076 if (!mem_cgroup_is_root(memcg) && in mem_cgroup_try_charge_swap()
7077 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in mem_cgroup_try_charge_swap()
7078 memcg_memory_event(memcg, MEMCG_SWAP_MAX); in mem_cgroup_try_charge_swap()
7079 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); in mem_cgroup_try_charge_swap()
7080 mem_cgroup_id_put(memcg); in mem_cgroup_try_charge_swap()
7086 mem_cgroup_id_get_many(memcg, nr_pages - 1); in mem_cgroup_try_charge_swap()
7087 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); in mem_cgroup_try_charge_swap()
7089 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); in mem_cgroup_try_charge_swap()
7101 struct mem_cgroup *memcg; in mem_cgroup_uncharge_swap() local
7109 memcg = mem_cgroup_from_id(id); in mem_cgroup_uncharge_swap()
7110 if (memcg) { in mem_cgroup_uncharge_swap()
7111 if (!mem_cgroup_is_root(memcg)) { in mem_cgroup_uncharge_swap()
7113 page_counter_uncharge(&memcg->swap, nr_pages); in mem_cgroup_uncharge_swap()
7115 page_counter_uncharge(&memcg->memsw, nr_pages); in mem_cgroup_uncharge_swap()
7117 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in mem_cgroup_uncharge_swap()
7118 mem_cgroup_id_put_many(memcg, nr_pages); in mem_cgroup_uncharge_swap()
7123 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
7129 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) in mem_cgroup_get_nr_swap_pages()
7131 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
7132 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
7138 struct mem_cgroup *memcg; in mem_cgroup_swap_full() local
7147 memcg = page->mem_cgroup; in mem_cgroup_swap_full()
7148 if (!memcg) in mem_cgroup_swap_full()
7151 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) in mem_cgroup_swap_full()
7152 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max) in mem_cgroup_swap_full()
7178 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in swap_current_read() local
7180 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
7192 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in swap_max_write() local
7201 xchg(&memcg->swap.max, max); in swap_max_write()
7208 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in swap_events_show() local
7211 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
7213 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()