Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping
1 /* memcontrol.c - Memory Controller
18 * Charge lifetime sanitation
43 #include <linux/page-flags.h>
44 #include <linux/backing-dev.h>
116 * Cgroups above their limits are maintained in a RB-Tree, independent of
208 * limit reclaim to prevent infinite loops, if they ever occur.
221 /* for encoding cft->private value on file */
253 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || in should_force_charge()
254 (current->flags & PF_EXITING); in should_force_charge()
262 return &memcg->vmpressure; in memcg_to_vmpressure()
267 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; in vmpressure_to_css()
272 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
275 * but only a few kmem-limited. Or also, if we have, for instance, 200
276 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
279 * The current size of the caches array is stored in memcg_nr_cache_ids. It
300 * the alloc/free process all the time. In a small machine, 4 kmem-limited
342 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); in memcg_expand_one_shrinker_map()
349 return -ENOMEM; in memcg_expand_one_shrinker_map()
352 memset(new->map, (int)0xff, old_size); in memcg_expand_one_shrinker_map()
353 memset((void *)new->map + old_size, 0, size - old_size); in memcg_expand_one_shrinker_map()
355 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); in memcg_expand_one_shrinker_map()
356 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); in memcg_expand_one_shrinker_map()
373 map = rcu_dereference_protected(pn->shrinker_map, true); in memcg_free_shrinker_maps()
376 rcu_assign_pointer(pn->shrinker_map, NULL); in memcg_free_shrinker_maps()
394 ret = -ENOMEM; in memcg_alloc_shrinker_maps()
397 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); in memcg_alloc_shrinker_maps()
440 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); in memcg_set_shrinker_bit()
443 set_bit(shrinker_id, map->map); in memcg_set_shrinker_bit()
457 * mem_cgroup_css_from_page - css of the memcg associated with a page
471 memcg = page->mem_cgroup; in mem_cgroup_css_from_page()
476 return &memcg->css; in mem_cgroup_css_from_page()
480 * page_cgroup_ino - return inode number of the memcg a page is charged to
498 memcg = READ_ONCE(page->mem_cgroup); in page_cgroup_ino()
499 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
502 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
512 return memcg->nodeinfo[nid]; in mem_cgroup_page_nodeinfo()
533 struct rb_node **p = &mctz->rb_root.rb_node; in __mem_cgroup_insert_exceeded()
538 if (mz->on_tree) in __mem_cgroup_insert_exceeded()
541 mz->usage_in_excess = new_usage_in_excess; in __mem_cgroup_insert_exceeded()
542 if (!mz->usage_in_excess) in __mem_cgroup_insert_exceeded()
548 if (mz->usage_in_excess < mz_node->usage_in_excess) { in __mem_cgroup_insert_exceeded()
549 p = &(*p)->rb_left; in __mem_cgroup_insert_exceeded()
555 * limit by the same amount in __mem_cgroup_insert_exceeded()
557 else if (mz->usage_in_excess >= mz_node->usage_in_excess) in __mem_cgroup_insert_exceeded()
558 p = &(*p)->rb_right; in __mem_cgroup_insert_exceeded()
562 mctz->rb_rightmost = &mz->tree_node; in __mem_cgroup_insert_exceeded()
564 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded()
565 rb_insert_color(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_insert_exceeded()
566 mz->on_tree = true; in __mem_cgroup_insert_exceeded()
572 if (!mz->on_tree) in __mem_cgroup_remove_exceeded()
575 if (&mz->tree_node == mctz->rb_rightmost) in __mem_cgroup_remove_exceeded()
576 mctz->rb_rightmost = rb_prev(&mz->tree_node); in __mem_cgroup_remove_exceeded()
578 rb_erase(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_remove_exceeded()
579 mz->on_tree = false; in __mem_cgroup_remove_exceeded()
587 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
589 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
594 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
595 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
599 excess = nr_pages - soft_limit; in soft_limit_excess()
621 * We have to update the tree if mz is on RB-tree or in mem_cgroup_update_tree()
624 if (excess || mz->on_tree) { in mem_cgroup_update_tree()
627 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_update_tree()
628 /* if on-tree, remove it */ in mem_cgroup_update_tree()
629 if (mz->on_tree) in mem_cgroup_update_tree()
632 * Insert again. mz->usage_in_excess will be updated. in mem_cgroup_update_tree()
636 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_update_tree()
662 if (!mctz->rb_rightmost) in __mem_cgroup_largest_soft_limit_node()
665 mz = rb_entry(mctz->rb_rightmost, in __mem_cgroup_largest_soft_limit_node()
673 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
674 !css_tryget_online(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
685 spin_lock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
687 spin_unlock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
694 return atomic_long_read(&memcg->events[event]); in memcg_sum_events()
723 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics()
726 __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
762 val = __this_cpu_read(memcg->stat_cpu->nr_page_events); in mem_cgroup_event_ratelimit()
763 next = __this_cpu_read(memcg->stat_cpu->targets[target]); in mem_cgroup_event_ratelimit()
765 if ((long)(next - val) < 0) { in mem_cgroup_event_ratelimit()
779 __this_cpu_write(memcg->stat_cpu->targets[target], next); in mem_cgroup_event_ratelimit()
791 /* threshold event is triggered in finer grain than soft limit */ in memcg_check_events()
808 atomic_inc(&memcg->numainfo_events); in memcg_check_events()
816 * mm_update_next_owner() may clear mm->owner to NULL in mem_cgroup_from_task()
831 * Obtain a reference on mm->memcg and returns it if successful. Otherwise
852 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
856 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
866 * Obtain a reference on page->memcg and returns it if successful. Otherwise
871 struct mem_cgroup *memcg = page->mem_cgroup; in get_mem_cgroup_from_page()
877 if (!memcg || !css_tryget_online(&memcg->css)) in get_mem_cgroup_from_page()
885 * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
889 if (unlikely(current->active_memcg)) { in get_mem_cgroup_from_current()
893 if (css_tryget_online(¤t->active_memcg->css)) in get_mem_cgroup_from_current()
894 memcg = current->active_memcg; in get_mem_cgroup_from_current()
898 return get_mem_cgroup_from_mm(current->mm); in get_mem_cgroup_from_current()
902 * mem_cgroup_iter - iterate over memory cgroup hierarchy
908 * @root itself, or %NULL after a full round-trip.
912 * to cancel a hierarchy walk before the round-trip is complete.
936 if (!root->use_hierarchy && root != root_mem_cgroup) { in mem_cgroup_iter()
947 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); in mem_cgroup_iter()
948 iter = &mz->iter[reclaim->priority]; in mem_cgroup_iter()
950 if (prev && reclaim->generation != iter->generation) in mem_cgroup_iter()
954 pos = READ_ONCE(iter->position); in mem_cgroup_iter()
955 if (!pos || css_tryget(&pos->css)) in mem_cgroup_iter()
958 * css reference reached zero, so iter->position will in mem_cgroup_iter()
959 * be cleared by ->css_released. However, we should not in mem_cgroup_iter()
960 * rely on this happening soon, because ->css_released in mem_cgroup_iter()
961 * is called from a work queue, and by busy-waiting we in mem_cgroup_iter()
962 * might block it. So we clear iter->position right in mem_cgroup_iter()
965 (void)cmpxchg(&iter->position, pos, NULL); in mem_cgroup_iter()
970 css = &pos->css; in mem_cgroup_iter()
973 css = css_next_descendant_pre(css, &root->css); in mem_cgroup_iter()
978 * the hierarchy - make sure they see at least in mem_cgroup_iter()
993 if (css == &root->css) in mem_cgroup_iter()
1008 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter()
1011 css_put(&pos->css); in mem_cgroup_iter()
1014 iter->generation++; in mem_cgroup_iter()
1016 reclaim->generation = iter->generation; in mem_cgroup_iter()
1023 css_put(&prev->css); in mem_cgroup_iter()
1029 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1039 css_put(&prev->css); in mem_cgroup_iter_break()
1053 iter = &mz->iter[i]; in __invalidate_reclaim_iterators()
1054 cmpxchg(&iter->position, in __invalidate_reclaim_iterators()
1071 * When cgruop1 non-hierarchy mode is used, in invalidate_reclaim_iterators()
1082 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1088 * descendants and calls @fn for each task. If @fn returns a non-zero
1106 css_task_iter_start(&iter->css, 0, &it); in mem_cgroup_scan_tasks()
1119 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1134 lruvec = &pgdat->lruvec; in mem_cgroup_page_lruvec()
1138 memcg = page->mem_cgroup; in mem_cgroup_page_lruvec()
1140 * Swapcache readahead pages are added to the LRU - and in mem_cgroup_page_lruvec()
1141 * possibly migrated - before they are charged. in mem_cgroup_page_lruvec()
1147 lruvec = &mz->lruvec; in mem_cgroup_page_lruvec()
1151 * we have to be prepared to initialize lruvec->zone here; in mem_cgroup_page_lruvec()
1154 if (unlikely(lruvec->pgdat != pgdat)) in mem_cgroup_page_lruvec()
1155 lruvec->pgdat = pgdat; in mem_cgroup_page_lruvec()
1160 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1181 lru_size = &mz->lru_zone_size[zid][lru]; in mem_cgroup_update_lru_size()
1206 task_memcg = get_mem_cgroup_from_mm(p->mm); in task_in_mem_cgroup()
1216 css_get(&task_memcg->css); in task_in_mem_cgroup()
1220 css_put(&task_memcg->css); in task_in_mem_cgroup()
1225 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1235 unsigned long limit; in mem_cgroup_margin() local
1237 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1238 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1239 if (count < limit) in mem_cgroup_margin()
1240 margin = limit - count; in mem_cgroup_margin()
1243 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1244 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1245 if (count <= limit) in mem_cgroup_margin()
1246 margin = min(margin, limit - count); in mem_cgroup_margin()
1258 * moving cgroups. This is for waiting at high-memory pressure
1285 if (mc.moving_task && current != mc.moving_task) { in mem_cgroup_wait_acct_move()
1289 /* moving charge context might have finished. */ in mem_cgroup_wait_acct_move()
1321 #define K(x) ((x) << (PAGE_SHIFT-10))
1324 * @memcg: The memory cgroup that went over limit
1340 pr_cont(" killed as a result of limit of "); in mem_cgroup_print_oom_info()
1342 pr_info("Memory limit reached of cgroup "); in mem_cgroup_print_oom_info()
1345 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_info()
1350 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_info()
1351 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_info()
1352 K((u64)memcg->memory.max), memcg->memory.failcnt); in mem_cgroup_print_oom_info()
1353 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_info()
1354 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_info()
1355 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_info()
1356 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_info()
1357 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_info()
1358 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_info()
1362 pr_cont_cgroup_path(iter->css.cgroup); in mem_cgroup_print_oom_info()
1381 * Return the memory (and swap, if configured) limit for a memcg.
1387 max = memcg->memory.max; in mem_cgroup_get_max()
1392 memsw_max = memcg->memsw.max; in mem_cgroup_get_max()
1393 swap_max = memcg->swap.max; in mem_cgroup_get_max()
1449 * Always updating the nodemask is not very good - even if we have an empty
1461 if (!atomic_read(&memcg->numainfo_events)) in mem_cgroup_may_update_nodemask()
1463 if (atomic_inc_return(&memcg->numainfo_updating) > 1) in mem_cgroup_may_update_nodemask()
1467 memcg->scan_nodes = node_states[N_MEMORY]; in mem_cgroup_may_update_nodemask()
1472 node_clear(nid, memcg->scan_nodes); in mem_cgroup_may_update_nodemask()
1475 atomic_set(&memcg->numainfo_events, 0); in mem_cgroup_may_update_nodemask()
1476 atomic_set(&memcg->numainfo_updating, 0); in mem_cgroup_may_update_nodemask()
1482 * memory reclaim from current node, there are pros. and cons.
1484 * Freeing memory from current node means freeing memory from a node which
1489 * Now, we use round-robin. Better algorithm is welcomed.
1496 node = memcg->last_scanned_node; in mem_cgroup_select_victim_node()
1498 node = next_node_in(node, memcg->scan_nodes); in mem_cgroup_select_victim_node()
1502 * Fallback to the current node in that case for simplicity. in mem_cgroup_select_victim_node()
1507 memcg->last_scanned_node = node; in mem_cgroup_select_victim_node()
1577 * Check OOM-Killer is already running under our hierarchy.
1587 if (iter->oom_lock) { in mem_cgroup_oom_trylock()
1596 iter->oom_lock = true; in mem_cgroup_oom_trylock()
1609 iter->oom_lock = false; in mem_cgroup_oom_trylock()
1626 iter->oom_lock = false; in mem_cgroup_oom_unlock()
1636 iter->under_oom++; in mem_cgroup_mark_under_oom()
1650 if (iter->under_oom > 0) in mem_cgroup_unmark_under_oom()
1651 iter->under_oom--; in mem_cgroup_unmark_under_oom()
1670 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1681 * For the following lockless ->under_oom test, the only required in memcg_oom_recover()
1688 if (memcg && memcg->under_oom) in memcg_oom_recover()
1708 * We are in the middle of the charge context here, so we in mem_cgroup_oom()
1713 * handling until the charge can succeed; remember the context and put in mem_cgroup_oom()
1717 * On the other hand, in-kernel OOM killer allows for an async victim in mem_cgroup_oom()
1723 * victim and then we have to bail out from the charge path. in mem_cgroup_oom()
1725 if (memcg->oom_kill_disable) { in mem_cgroup_oom()
1726 if (!current->in_user_fault) in mem_cgroup_oom()
1728 css_get(&memcg->css); in mem_cgroup_oom()
1729 current->memcg_in_oom = memcg; in mem_cgroup_oom()
1730 current->memcg_oom_gfp_mask = mask; in mem_cgroup_oom()
1731 current->memcg_oom_order = order; in mem_cgroup_oom()
1756 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1764 * situation. Sleeping directly in the charge context with all kinds
1774 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize()
1788 owait.wait.private = current; in mem_cgroup_oom_synchronize()
1799 if (locked && !memcg->oom_kill_disable) { in mem_cgroup_oom_synchronize()
1802 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, in mem_cgroup_oom_synchronize()
1803 current->memcg_oom_order); in mem_cgroup_oom_synchronize()
1813 * There is no guarantee that an OOM-lock contender in mem_cgroup_oom_synchronize()
1820 current->memcg_in_oom = NULL; in mem_cgroup_oom_synchronize()
1821 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
1826 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1828 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1831 * by killing all belonging OOM-killable tasks.
1833 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1856 * highest-level memory cgroup with oom.group set. in mem_cgroup_get_oom_group()
1859 if (memcg->oom_group) in mem_cgroup_get_oom_group()
1867 css_get(&oom_group->css); in mem_cgroup_get_oom_group()
1877 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
1882 * lock_page_memcg - lock a page->mem_cgroup binding
1899 * path can get away without acquiring the memcg->move_lock in lock_page_memcg()
1913 memcg = page->mem_cgroup; in lock_page_memcg()
1917 if (atomic_read(&memcg->moving_account) <= 0) in lock_page_memcg()
1920 spin_lock_irqsave(&memcg->move_lock, flags); in lock_page_memcg()
1921 if (memcg != page->mem_cgroup) { in lock_page_memcg()
1922 spin_unlock_irqrestore(&memcg->move_lock, flags); in lock_page_memcg()
1927 * When charge migration first begins, we can have locked and in lock_page_memcg()
1931 memcg->move_lock_task = current; in lock_page_memcg()
1932 memcg->move_lock_flags = flags; in lock_page_memcg()
1939 * __unlock_page_memcg - unlock and unpin a memcg
1946 if (memcg && memcg->move_lock_task == current) { in __unlock_page_memcg()
1947 unsigned long flags = memcg->move_lock_flags; in __unlock_page_memcg()
1949 memcg->move_lock_task = NULL; in __unlock_page_memcg()
1950 memcg->move_lock_flags = 0; in __unlock_page_memcg()
1952 spin_unlock_irqrestore(&memcg->move_lock, flags); in __unlock_page_memcg()
1959 * unlock_page_memcg - unlock a page->mem_cgroup binding
1964 __unlock_page_memcg(page->mem_cgroup); in unlock_page_memcg()
1979 * consume_stock: Try to consume stocked charge on this cpu.
1981 * @nr_pages: how many pages to charge.
1983 * The charges will only happen if @memcg matches the current cpu's memcg
2001 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { in consume_stock()
2002 stock->nr_pages -= nr_pages; in consume_stock()
2016 struct mem_cgroup *old = stock->cached; in drain_stock()
2018 if (stock->nr_pages) { in drain_stock()
2019 page_counter_uncharge(&old->memory, stock->nr_pages); in drain_stock()
2021 page_counter_uncharge(&old->memsw, stock->nr_pages); in drain_stock()
2022 css_put_many(&old->css, stock->nr_pages); in drain_stock()
2023 stock->nr_pages = 0; in drain_stock()
2025 stock->cached = NULL; in drain_stock()
2041 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); in drain_local_stock()
2058 if (stock->cached != memcg) { /* reset if necessary */ in refill_stock()
2060 stock->cached = memcg; in refill_stock()
2062 stock->nr_pages += nr_pages; in refill_stock()
2064 if (stock->nr_pages > MEMCG_CHARGE_BATCH) in refill_stock()
2071 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2082 * Notify other cpus that system-wide "drain" is running in drain_all_stock()
2085 * per-cpu data. CPU up doesn't touch memcg_stock at all. in drain_all_stock()
2092 memcg = stock->cached; in drain_all_stock()
2093 if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css)) in drain_all_stock()
2096 css_put(&memcg->css); in drain_all_stock()
2099 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { in drain_all_stock()
2101 drain_local_stock(&stock->work); in drain_all_stock()
2103 schedule_work_on(cpu, &stock->work); in drain_all_stock()
2105 css_put(&memcg->css); in drain_all_stock()
2126 x = this_cpu_xchg(memcg->stat_cpu->count[i], 0); in memcg_hotplug_cpu_dead()
2128 atomic_long_add(x, &memcg->stat[i]); in memcg_hotplug_cpu_dead()
2137 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); in memcg_hotplug_cpu_dead()
2139 atomic_long_add(x, &pn->lruvec_stat[i]); in memcg_hotplug_cpu_dead()
2146 x = this_cpu_xchg(memcg->stat_cpu->events[i], 0); in memcg_hotplug_cpu_dead()
2148 atomic_long_add(x, &memcg->events[i]); in memcg_hotplug_cpu_dead()
2160 if (page_counter_read(&memcg->memory) <= memcg->high) in reclaim_high()
2177 * and reclaims memory over the high limit.
2181 unsigned int nr_pages = current->memcg_nr_pages_over_high; in mem_cgroup_handle_over_high()
2187 memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2189 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2190 current->memcg_nr_pages_over_high = 0; in mem_cgroup_handle_over_high()
2213 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge()
2214 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge()
2217 page_counter_uncharge(&memcg->memsw, batch); in try_charge()
2240 * memory shortage. Allow dying and OOM-killed tasks to in try_charge()
2251 * under the limit over triggering OOM kills in these cases. in try_charge()
2253 if (unlikely(current->flags & PF_MEMALLOC)) in try_charge()
2256 if (unlikely(task_in_memcg_oom(current))) in try_charge()
2279 * Even though the limit is exceeded at this point, reclaim in try_charge()
2280 * may have been able to free some pages. Retry the charge in try_charge()
2284 * unlikely to succeed so close to the limit, and we fall back in try_charge()
2290 * At task move, charge accounts can be doubly counted. So, it's in try_charge()
2296 if (nr_retries--) in try_charge()
2305 if (fatal_signal_pending(current)) in try_charge()
2312 * a forward progress or bypass the charge if the oom killer in try_charge()
2329 return -ENOMEM; in try_charge()
2333 * being freed very soon. Allow memory usage go over the limit in try_charge()
2336 page_counter_charge(&memcg->memory, nr_pages); in try_charge()
2338 page_counter_charge(&memcg->memsw, nr_pages); in try_charge()
2339 css_get_many(&memcg->css, nr_pages); in try_charge()
2344 css_get_many(&memcg->css, batch); in try_charge()
2346 refill_stock(memcg, batch - nr_pages); in try_charge()
2353 * not recorded as it most likely matches current's and won't in try_charge()
2354 * change in the meantime. As high limit is checked again before in try_charge()
2358 if (page_counter_read(&memcg->memory) > memcg->high) { in try_charge()
2361 schedule_work(&memcg->high_work); in try_charge()
2364 current->memcg_nr_pages_over_high += batch; in try_charge()
2365 set_notify_resume(current); in try_charge()
2378 page_counter_uncharge(&memcg->memory, nr_pages); in cancel_charge()
2380 page_counter_uncharge(&memcg->memsw, nr_pages); in cancel_charge()
2382 css_put_many(&memcg->css, nr_pages); in cancel_charge()
2393 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); in lock_page_lru()
2408 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); in unlock_page_lru()
2421 VM_BUG_ON_PAGE(page->mem_cgroup, page); in commit_charge()
2424 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page in commit_charge()
2432 * page->mem_cgroup at this point: in commit_charge()
2434 * - the page is uncharged in commit_charge()
2436 * - the page is off-LRU in commit_charge()
2438 * - an anonymous fault has exclusive page access, except for in commit_charge()
2441 * - a page cache insertion, a swapin fault, or a migration in commit_charge()
2444 page->mem_cgroup = memcg; in commit_charge()
2506 struct mem_cgroup *memcg = cw->memcg; in memcg_kmem_cache_create_func()
2507 struct kmem_cache *cachep = cw->cachep; in memcg_kmem_cache_create_func()
2511 css_put(&memcg->css); in memcg_kmem_cache_create_func()
2516 * Enqueue the creation of a per-memcg kmem_cache.
2527 css_get(&memcg->css); in __memcg_schedule_kmem_cache_create()
2529 cw->memcg = memcg; in __memcg_schedule_kmem_cache_create()
2530 cw->cachep = cachep; in __memcg_schedule_kmem_cache_create()
2531 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); in __memcg_schedule_kmem_cache_create()
2533 queue_work(memcg_kmem_cache_wq, &cw->work); in __memcg_schedule_kmem_cache_create()
2550 current->memcg_kmem_skip_account = 1; in memcg_schedule_kmem_cache_create()
2552 current->memcg_kmem_skip_account = 0; in memcg_schedule_kmem_cache_create()
2557 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) in memcg_kmem_bypass()
2563 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2567 * We try to use the current memcg's version of the cache.
2570 * create it asynchronously in a workqueue and let the current allocation
2589 if (current->memcg_kmem_skip_account) in memcg_kmem_get_cache()
2593 kmemcg_id = READ_ONCE(memcg->kmemcg_id); in memcg_kmem_get_cache()
2615 css_put(&memcg->css); in memcg_kmem_get_cache()
2626 css_put(&cachep->memcg_params.memcg->css); in memcg_kmem_put_cache()
2630 * memcg_kmem_charge_memcg: charge a kmem page
2631 * @page: page to charge
2634 * @memcg: memory cgroup to charge
2650 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { in memcg_kmem_charge_memcg()
2658 page_counter_charge(&memcg->kmem, nr_pages); in memcg_kmem_charge_memcg()
2662 return -ENOMEM; in memcg_kmem_charge_memcg()
2665 page->mem_cgroup = memcg; in memcg_kmem_charge_memcg()
2671 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
2672 * @page: page to charge
2692 css_put(&memcg->css); in memcg_kmem_charge()
2702 struct mem_cgroup *memcg = page->mem_cgroup; in memcg_kmem_uncharge()
2711 page_counter_uncharge(&memcg->kmem, nr_pages); in memcg_kmem_uncharge()
2713 page_counter_uncharge(&memcg->memory, nr_pages); in memcg_kmem_uncharge()
2715 page_counter_uncharge(&memcg->memsw, nr_pages); in memcg_kmem_uncharge()
2717 page->mem_cgroup = NULL; in memcg_kmem_uncharge()
2723 css_put_many(&memcg->css, nr_pages); in memcg_kmem_uncharge()
2741 head[i].mem_cgroup = head->mem_cgroup; in mem_cgroup_split_huge_fixup()
2743 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR); in mem_cgroup_split_huge_fixup()
2749 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2757 * Returns 0 on success, -EINVAL on failure.
2771 mod_memcg_state(from, MEMCG_SWAP, -1); in mem_cgroup_move_swap_account()
2775 return -EINVAL; in mem_cgroup_move_swap_account()
2781 return -EINVAL; in mem_cgroup_move_swap_account()
2794 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; in mem_cgroup_resize_max()
2797 if (signal_pending(current)) { in mem_cgroup_resize_max()
2798 ret = -EINTR; in mem_cgroup_resize_max()
2804 * Make sure that the new limit (memsw or memory limit) doesn't in mem_cgroup_resize_max()
2807 limits_invariant = memsw ? max >= memcg->memory.max : in mem_cgroup_resize_max()
2808 max <= memcg->memsw.max; in mem_cgroup_resize_max()
2811 ret = -EINVAL; in mem_cgroup_resize_max()
2814 if (max > counter->max) in mem_cgroup_resize_max()
2830 ret = -EBUSY; in mem_cgroup_resize_max()
2856 mctz = soft_limit_tree_node(pgdat->node_id); in mem_cgroup_soft_limit_reclaim()
2861 * are acceptable as soft limit is best effort anyway. in mem_cgroup_soft_limit_reclaim()
2863 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) in mem_cgroup_soft_limit_reclaim()
2868 * keep exceeding their soft limit and putting the system under in mem_cgroup_soft_limit_reclaim()
2880 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in mem_cgroup_soft_limit_reclaim()
2884 spin_lock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
2895 excess = soft_limit_excess(mz->memcg); in mem_cgroup_soft_limit_reclaim()
2906 spin_unlock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
2907 css_put(&mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
2920 css_put(&next_mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
2935 ret = css_next_child(NULL, &memcg->css); in memcg_has_children()
2949 /* we call try-to-free pages for make this cgroup empty */ in mem_cgroup_force_empty()
2955 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
2958 if (signal_pending(current)) in mem_cgroup_force_empty()
2959 return -EINTR; in mem_cgroup_force_empty()
2964 nr_retries--; in mem_cgroup_force_empty()
2981 return -EINVAL; in mem_cgroup_force_empty_write()
2988 return mem_cgroup_from_css(css)->use_hierarchy; in mem_cgroup_hierarchy_read()
2996 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); in mem_cgroup_hierarchy_write()
2998 if (memcg->use_hierarchy == val) in mem_cgroup_hierarchy_write()
3004 * occur, provided the current cgroup has no children. in mem_cgroup_hierarchy_write()
3009 if ((!parent_memcg || !parent_memcg->use_hierarchy) && in mem_cgroup_hierarchy_write()
3012 memcg->use_hierarchy = val; in mem_cgroup_hierarchy_write()
3014 retval = -EBUSY; in mem_cgroup_hierarchy_write()
3016 retval = -EINVAL; in mem_cgroup_hierarchy_write()
3038 for (i = 0; i < acc->stats_size; i++) in accumulate_memcg_tree()
3039 acc->stat[i] += memcg_page_state(mi, in accumulate_memcg_tree()
3040 acc->stats_array ? acc->stats_array[i] : i); in accumulate_memcg_tree()
3042 for (i = 0; i < acc->events_size; i++) in accumulate_memcg_tree()
3043 acc->events[i] += memcg_sum_events(mi, in accumulate_memcg_tree()
3044 acc->events_array ? acc->events_array[i] : i); in accumulate_memcg_tree()
3047 acc->lru_pages[i] += in accumulate_memcg_tree()
3067 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3069 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3088 switch (MEMFILE_TYPE(cft->private)) { in mem_cgroup_read_u64()
3090 counter = &memcg->memory; in mem_cgroup_read_u64()
3093 counter = &memcg->memsw; in mem_cgroup_read_u64()
3096 counter = &memcg->kmem; in mem_cgroup_read_u64()
3099 counter = &memcg->tcpmem; in mem_cgroup_read_u64()
3105 switch (MEMFILE_ATTR(cft->private)) { in mem_cgroup_read_u64()
3107 if (counter == &memcg->memory) in mem_cgroup_read_u64()
3109 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
3113 return (u64)counter->max * PAGE_SIZE; in mem_cgroup_read_u64()
3115 return (u64)counter->watermark * PAGE_SIZE; in mem_cgroup_read_u64()
3117 return counter->failcnt; in mem_cgroup_read_u64()
3119 return (u64)memcg->soft_limit * PAGE_SIZE; in mem_cgroup_read_u64()
3133 BUG_ON(memcg->kmemcg_id >= 0); in memcg_online_kmem()
3134 BUG_ON(memcg->kmem_state); in memcg_online_kmem()
3142 * A memory cgroup is considered kmem-online as soon as it gets in memcg_online_kmem()
3147 memcg->kmemcg_id = memcg_id; in memcg_online_kmem()
3148 memcg->kmem_state = KMEM_ONLINE; in memcg_online_kmem()
3149 INIT_LIST_HEAD(&memcg->kmem_caches); in memcg_online_kmem()
3160 if (memcg->kmem_state != KMEM_ONLINE) in memcg_offline_kmem()
3168 memcg->kmem_state = KMEM_ALLOCATED; in memcg_offline_kmem()
3172 kmemcg_id = memcg->kmemcg_id; in memcg_offline_kmem()
3184 * ordering is imposed by list_lru_node->lock taken by in memcg_offline_kmem()
3188 css_for_each_descendant_pre(css, &memcg->css) { in memcg_offline_kmem()
3190 BUG_ON(child->kmemcg_id != kmemcg_id); in memcg_offline_kmem()
3191 child->kmemcg_id = parent->kmemcg_id; in memcg_offline_kmem()
3192 if (!memcg->use_hierarchy) in memcg_offline_kmem()
3205 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) in memcg_free_kmem()
3208 if (memcg->kmem_state == KMEM_ALLOCATED) { in memcg_free_kmem()
3211 WARN_ON(page_counter_read(&memcg->kmem)); in memcg_free_kmem()
3233 ret = page_counter_set_max(&memcg->kmem, max); in memcg_update_kmem_max()
3244 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max()
3248 if (!memcg->tcpmem_active) { in memcg_update_tcp_max()
3266 memcg->tcpmem_active = true; in memcg_update_tcp_max()
3285 ret = page_counter_memparse(buf, "-1", &nr_pages); in mem_cgroup_write()
3289 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_write()
3291 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ in mem_cgroup_write()
3292 ret = -EINVAL; in mem_cgroup_write()
3295 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_write()
3311 memcg->soft_limit = nr_pages; in mem_cgroup_write()
3324 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_reset()
3326 counter = &memcg->memory; in mem_cgroup_reset()
3329 counter = &memcg->memsw; in mem_cgroup_reset()
3332 counter = &memcg->kmem; in mem_cgroup_reset()
3335 counter = &memcg->tcpmem; in mem_cgroup_reset()
3341 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_reset()
3346 counter->failcnt = 0; in mem_cgroup_reset()
3358 return mem_cgroup_from_css(css)->move_charge_at_immigrate; in mem_cgroup_move_charge_read()
3368 return -EINVAL; in mem_cgroup_move_charge_write()
3371 * No kind of locking is needed in here, because ->can_attach() will in mem_cgroup_move_charge_write()
3376 memcg->move_charge_at_immigrate = val; in mem_cgroup_move_charge_write()
3383 return -ENOSYS; in mem_cgroup_move_charge_write()
3407 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); in memcg_numa_stat_show()
3408 seq_printf(m, "%s=%lu", stat->name, nr); in memcg_numa_stat_show()
3411 stat->lru_mask); in memcg_numa_stat_show()
3422 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); in memcg_numa_stat_show()
3423 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); in memcg_numa_stat_show()
3428 iter, nid, stat->lru_mask); in memcg_numa_stat_show()
3483 memory = min(memory, mi->memory.max); in memcg_stat_show()
3484 memsw = min(memsw, mi->memsw.max); in memcg_stat_show()
3523 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); in memcg_stat_show()
3524 rstat = &mz->lruvec.reclaim_stat; in memcg_stat_show()
3526 recent_rotated[0] += rstat->recent_rotated[0]; in memcg_stat_show()
3527 recent_rotated[1] += rstat->recent_rotated[1]; in memcg_stat_show()
3528 recent_scanned[0] += rstat->recent_scanned[0]; in memcg_stat_show()
3529 recent_scanned[1] += rstat->recent_scanned[1]; in memcg_stat_show()
3555 return -EINVAL; in mem_cgroup_swappiness_write()
3557 if (css->parent) in mem_cgroup_swappiness_write()
3558 memcg->swappiness = val; in mem_cgroup_swappiness_write()
3573 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
3575 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
3587 i = t->current_threshold; in __mem_cgroup_threshold()
3595 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold()
3596 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
3607 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold()
3608 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
3611 t->current_threshold = i - 1; in __mem_cgroup_threshold()
3632 if (_a->threshold > _b->threshold) in compare_thresholds()
3635 if (_a->threshold < _b->threshold) in compare_thresholds()
3636 return -1; in compare_thresholds()
3647 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
3648 eventfd_signal(ev->eventfd, 1); in mem_cgroup_oom_notify_cb()
3671 ret = page_counter_memparse(args, "-1", &threshold); in __mem_cgroup_usage_register_event()
3675 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
3678 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
3681 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
3687 if (thresholds->primary) in __mem_cgroup_usage_register_event()
3690 size = thresholds->primary ? thresholds->primary->size + 1 : 1; in __mem_cgroup_usage_register_event()
3696 ret = -ENOMEM; in __mem_cgroup_usage_register_event()
3699 new->size = size; in __mem_cgroup_usage_register_event()
3702 if (thresholds->primary) { in __mem_cgroup_usage_register_event()
3703 memcpy(new->entries, thresholds->primary->entries, (size - 1) * in __mem_cgroup_usage_register_event()
3708 new->entries[size - 1].eventfd = eventfd; in __mem_cgroup_usage_register_event()
3709 new->entries[size - 1].threshold = threshold; in __mem_cgroup_usage_register_event()
3711 /* Sort thresholds. Registering of new threshold isn't time-critical */ in __mem_cgroup_usage_register_event()
3712 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), in __mem_cgroup_usage_register_event()
3715 /* Find current threshold */ in __mem_cgroup_usage_register_event()
3716 new->current_threshold = -1; in __mem_cgroup_usage_register_event()
3718 if (new->entries[i].threshold <= usage) { in __mem_cgroup_usage_register_event()
3720 * new->current_threshold will not be used until in __mem_cgroup_usage_register_event()
3724 ++new->current_threshold; in __mem_cgroup_usage_register_event()
3730 kfree(thresholds->spare); in __mem_cgroup_usage_register_event()
3731 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_register_event()
3733 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_register_event()
3739 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
3764 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
3767 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
3770 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
3775 if (!thresholds->primary) in __mem_cgroup_usage_unregister_event()
3783 for (i = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
3784 if (thresholds->primary->entries[i].eventfd != eventfd) in __mem_cgroup_usage_unregister_event()
3790 new = thresholds->spare; in __mem_cgroup_usage_unregister_event()
3803 new->size = size; in __mem_cgroup_usage_unregister_event()
3805 /* Copy thresholds and find current threshold */ in __mem_cgroup_usage_unregister_event()
3806 new->current_threshold = -1; in __mem_cgroup_usage_unregister_event()
3807 for (i = 0, j = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
3808 if (thresholds->primary->entries[i].eventfd == eventfd) in __mem_cgroup_usage_unregister_event()
3811 new->entries[j] = thresholds->primary->entries[i]; in __mem_cgroup_usage_unregister_event()
3812 if (new->entries[j].threshold <= usage) { in __mem_cgroup_usage_unregister_event()
3814 * new->current_threshold will not be used in __mem_cgroup_usage_unregister_event()
3818 ++new->current_threshold; in __mem_cgroup_usage_unregister_event()
3825 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_unregister_event()
3827 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_unregister_event()
3834 kfree(thresholds->spare); in __mem_cgroup_usage_unregister_event()
3835 thresholds->spare = NULL; in __mem_cgroup_usage_unregister_event()
3838 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
3860 return -ENOMEM; in mem_cgroup_oom_register_event()
3864 event->eventfd = eventfd; in mem_cgroup_oom_register_event()
3865 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
3868 if (memcg->under_oom) in mem_cgroup_oom_register_event()
3882 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
3883 if (ev->eventfd == eventfd) { in mem_cgroup_oom_unregister_event()
3884 list_del(&ev->list); in mem_cgroup_oom_unregister_event()
3896 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); in mem_cgroup_oom_control_read()
3897 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
3899 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in mem_cgroup_oom_control_read()
3909 if (!css->parent || !((val == 0) || (val == 1))) in mem_cgroup_oom_control_write()
3910 return -EINVAL; in mem_cgroup_oom_control_write()
3912 memcg->oom_kill_disable = val; in mem_cgroup_oom_control_write()
3923 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
3928 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
3933 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
3938 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain()
3940 if (!memcg->css.parent) in mem_cgroup_wb_domain()
3943 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
3952 long x = atomic_long_read(&memcg->stat[idx]); in memcg_exact_page_state()
3956 x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx]; in memcg_exact_page_state()
3963 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3971 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3974 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3984 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats()
3996 unsigned long ceiling = min(memcg->memory.max, memcg->high); in mem_cgroup_wb_stats()
3997 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
3999 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); in mem_cgroup_wb_stats()
4026 * This is way over-engineered. It tries to support fully configurable
4043 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove()
4045 remove_wait_queue(event->wqh, &event->wait); in memcg_event_remove()
4047 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
4050 eventfd_signal(event->eventfd, 1); in memcg_event_remove()
4052 eventfd_ctx_put(event->eventfd); in memcg_event_remove()
4054 css_put(&memcg->css); in memcg_event_remove()
4060 * Called with wqh->lock held and interrupts disabled.
4067 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake()
4077 * side will require wqh->lock via remove_wait_queue(), in memcg_event_wake()
4080 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
4081 if (!list_empty(&event->list)) { in memcg_event_wake()
4082 list_del_init(&event->list); in memcg_event_wake()
4087 schedule_work(&event->remove); in memcg_event_wake()
4089 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
4101 event->wqh = wqh; in memcg_event_ptable_queue_proc()
4102 add_wait_queue(wqh, &event->wait); in memcg_event_ptable_queue_proc()
4131 return -EINVAL; in memcg_write_event_control()
4136 return -EINVAL; in memcg_write_event_control()
4141 return -ENOMEM; in memcg_write_event_control()
4143 event->memcg = memcg; in memcg_write_event_control()
4144 INIT_LIST_HEAD(&event->list); in memcg_write_event_control()
4145 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); in memcg_write_event_control()
4146 init_waitqueue_func_entry(&event->wait, memcg_event_wake); in memcg_write_event_control()
4147 INIT_WORK(&event->remove, memcg_event_remove); in memcg_write_event_control()
4151 ret = -EBADF; in memcg_write_event_control()
4155 event->eventfd = eventfd_ctx_fileget(efile.file); in memcg_write_event_control()
4156 if (IS_ERR(event->eventfd)) { in memcg_write_event_control()
4157 ret = PTR_ERR(event->eventfd); in memcg_write_event_control()
4163 ret = -EBADF; in memcg_write_event_control()
4181 name = cfile.file->f_path.dentry->d_name.name; in memcg_write_event_control()
4184 event->register_event = mem_cgroup_usage_register_event; in memcg_write_event_control()
4185 event->unregister_event = mem_cgroup_usage_unregister_event; in memcg_write_event_control()
4187 event->register_event = mem_cgroup_oom_register_event; in memcg_write_event_control()
4188 event->unregister_event = mem_cgroup_oom_unregister_event; in memcg_write_event_control()
4190 event->register_event = vmpressure_register_event; in memcg_write_event_control()
4191 event->unregister_event = vmpressure_unregister_event; in memcg_write_event_control()
4193 event->register_event = memsw_cgroup_usage_register_event; in memcg_write_event_control()
4194 event->unregister_event = memsw_cgroup_usage_unregister_event; in memcg_write_event_control()
4196 ret = -EINVAL; in memcg_write_event_control()
4205 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, in memcg_write_event_control()
4207 ret = -EINVAL; in memcg_write_event_control()
4215 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
4219 vfs_poll(efile.file, &event->pt); in memcg_write_event_control()
4221 spin_lock(&memcg->event_list_lock); in memcg_write_event_control()
4222 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
4223 spin_unlock(&memcg->event_list_lock); in memcg_write_event_control()
4235 eventfd_ctx_put(event->eventfd); in memcg_write_event_control()
4378 * Swap-out records and page cache shadow entries need to store memcg
4381 * memory-controlled cgroups to 64k.
4388 * even when there are much fewer than 64k cgroups - possibly none.
4390 * Maintain a private 16-bit ID space for memcg, and allow the ID to
4403 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
4404 idr_remove(&mem_cgroup_idr, memcg->id.id); in mem_cgroup_id_remove()
4405 memcg->id.id = 0; in mem_cgroup_id_remove()
4411 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); in mem_cgroup_id_get_many()
4412 atomic_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
4417 VM_BUG_ON(atomic_read(&memcg->id.ref) < n); in mem_cgroup_id_put_many()
4418 if (atomic_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
4422 css_put(&memcg->css); in mem_cgroup_id_put_many()
4437 * mem_cgroup_from_id - look up a memcg from a memcg id
4461 tmp = -1; in alloc_mem_cgroup_per_node_info()
4466 pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat); in alloc_mem_cgroup_per_node_info()
4467 if (!pn->lruvec_stat_cpu) { in alloc_mem_cgroup_per_node_info()
4472 lruvec_init(&pn->lruvec); in alloc_mem_cgroup_per_node_info()
4473 pn->usage_in_excess = 0; in alloc_mem_cgroup_per_node_info()
4474 pn->on_tree = false; in alloc_mem_cgroup_per_node_info()
4475 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
4477 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
4483 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in free_mem_cgroup_per_node_info()
4488 free_percpu(pn->lruvec_stat_cpu); in free_mem_cgroup_per_node_info()
4498 free_percpu(memcg->stat_cpu); in __mem_cgroup_free()
4521 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, in mem_cgroup_alloc()
4524 if (memcg->id.id < 0) in mem_cgroup_alloc()
4527 memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu); in mem_cgroup_alloc()
4528 if (!memcg->stat_cpu) in mem_cgroup_alloc()
4538 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
4539 memcg->last_scanned_node = MAX_NUMNODES; in mem_cgroup_alloc()
4540 INIT_LIST_HEAD(&memcg->oom_notify); in mem_cgroup_alloc()
4541 mutex_init(&memcg->thresholds_lock); in mem_cgroup_alloc()
4542 spin_lock_init(&memcg->move_lock); in mem_cgroup_alloc()
4543 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
4544 INIT_LIST_HEAD(&memcg->event_list); in mem_cgroup_alloc()
4545 spin_lock_init(&memcg->event_list_lock); in mem_cgroup_alloc()
4546 memcg->socket_pressure = jiffies; in mem_cgroup_alloc()
4548 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
4551 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
4553 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); in mem_cgroup_alloc()
4566 long error = -ENOMEM; in mem_cgroup_css_alloc()
4572 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
4573 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
4575 memcg->swappiness = mem_cgroup_swappiness(parent); in mem_cgroup_css_alloc()
4576 memcg->oom_kill_disable = parent->oom_kill_disable; in mem_cgroup_css_alloc()
4578 if (parent && parent->use_hierarchy) { in mem_cgroup_css_alloc()
4579 memcg->use_hierarchy = true; in mem_cgroup_css_alloc()
4580 page_counter_init(&memcg->memory, &parent->memory); in mem_cgroup_css_alloc()
4581 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc()
4582 page_counter_init(&memcg->memsw, &parent->memsw); in mem_cgroup_css_alloc()
4583 page_counter_init(&memcg->kmem, &parent->kmem); in mem_cgroup_css_alloc()
4584 page_counter_init(&memcg->tcpmem, &parent->tcpmem); in mem_cgroup_css_alloc()
4586 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_alloc()
4587 page_counter_init(&memcg->swap, NULL); in mem_cgroup_css_alloc()
4588 page_counter_init(&memcg->memsw, NULL); in mem_cgroup_css_alloc()
4589 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_alloc()
4590 page_counter_init(&memcg->tcpmem, NULL); in mem_cgroup_css_alloc()
4603 return &memcg->css; in mem_cgroup_css_alloc()
4613 return &memcg->css; in mem_cgroup_css_alloc()
4617 return ERR_PTR(-ENOMEM); in mem_cgroup_css_alloc()
4631 return -ENOMEM; in mem_cgroup_css_online()
4635 atomic_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
4650 spin_lock(&memcg->event_list_lock); in mem_cgroup_css_offline()
4651 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
4652 list_del_init(&event->list); in mem_cgroup_css_offline()
4653 schedule_work(&event->remove); in mem_cgroup_css_offline()
4655 spin_unlock(&memcg->event_list_lock); in mem_cgroup_css_offline()
4657 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
4658 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
4680 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) in mem_cgroup_css_free()
4683 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
4684 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
4692 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4701 * The current implementation only resets the essential configurations.
4708 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
4709 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
4710 page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
4711 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
4712 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
4713 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
4714 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
4715 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
4716 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
4721 /* Handlers for move charge at task migration. */
4726 /* Try a single bulk charge without reclaim first, kswapd may wake */ in mem_cgroup_do_precharge()
4734 while (count--) { in mem_cgroup_do_precharge()
4811 entry->val = ent.val; in mc_handle_swap_pte()
4827 struct address_space *mapping; in mc_handle_file_pte() local
4830 if (!vma->vm_file) /* anonymous vma */ in mc_handle_file_pte()
4835 mapping = vma->vm_file->f_mapping; in mc_handle_file_pte()
4838 /* page is moved even if it's not RSS of this task(page-faulted). */ in mc_handle_file_pte()
4841 if (shmem_mapping(mapping)) { in mc_handle_file_pte()
4842 page = find_get_entry(mapping, pgoff); in mc_handle_file_pte()
4851 page = find_get_page(mapping, pgoff); in mc_handle_file_pte()
4853 page = find_get_page(mapping, pgoff); in mc_handle_file_pte()
4859 * mem_cgroup_move_account - move account of the page
4861 * @compound: charge the page as compound or small page
4867 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4886 * page->mem_cgroup of its source page while we change it. in mem_cgroup_move_account()
4888 ret = -EBUSY; in mem_cgroup_move_account()
4892 ret = -EINVAL; in mem_cgroup_move_account()
4893 if (page->mem_cgroup != from) in mem_cgroup_move_account()
4898 spin_lock_irqsave(&from->move_lock, flags); in mem_cgroup_move_account()
4901 __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages); in mem_cgroup_move_account()
4906 * move_lock grabbed above and caller set from->moving_account, so in mem_cgroup_move_account()
4908 * So mapping should be stable for dirty pages. in mem_cgroup_move_account()
4911 struct address_space *mapping = page_mapping(page); in mem_cgroup_move_account() local
4913 if (mapping_cap_account_dirty(mapping)) { in mem_cgroup_move_account()
4914 __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages); in mem_cgroup_move_account()
4920 __mod_memcg_state(from, NR_WRITEBACK, -nr_pages); in mem_cgroup_move_account()
4925 * It is safe to change page->mem_cgroup here because the page in mem_cgroup_move_account()
4926 * is referenced, charged, and isolated - we can't race with in mem_cgroup_move_account()
4931 page->mem_cgroup = to; in mem_cgroup_move_account()
4932 spin_unlock_irqrestore(&from->move_lock, flags); in mem_cgroup_move_account()
4939 mem_cgroup_charge_statistics(from, page, compound, -nr_pages); in mem_cgroup_move_account()
4949 * get_mctgt_type - get target type of moving charge
4956 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4958 * move charge. if @target is not NULL, the page is stored in target->page
4961 * target for charge migration. if @target is not NULL, the entry is stored
4962 * in target->ent.
4965 * For now we such page is charge like a regular page would be as for all
4996 if (page->mem_cgroup == mc.from) { in get_mctgt_type()
5002 target->page = page; in get_mctgt_type()
5009 * But we cannot move a tail-page in a THP. in get_mctgt_type()
5015 target->ent = ent; in get_mctgt_type()
5041 if (page->mem_cgroup == mc.from) { in get_mctgt_type_thp()
5045 target->page = page; in get_mctgt_type_thp()
5062 struct vm_area_struct *vma = walk->vma; in mem_cgroup_count_precharge_pte_range()
5081 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
5085 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_count_precharge_pte_range()
5099 down_read(&mm->mmap_sem); in mem_cgroup_count_precharge()
5100 walk_page_range(0, mm->highest_vm_end, in mem_cgroup_count_precharge()
5102 up_read(&mm->mmap_sem); in mem_cgroup_count_precharge()
5115 mc.moving_task = current; in mem_cgroup_precharge_mc()
5142 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); in __mem_cgroup_clear_mc()
5147 * we charged both to->memory and to->memsw, so we in __mem_cgroup_clear_mc()
5148 * should uncharge to->memory. in __mem_cgroup_clear_mc()
5151 page_counter_uncharge(&mc.to->memory, mc.moved_swap); in __mem_cgroup_clear_mc()
5153 css_put_many(&mc.to->css, mc.moved_swap); in __mem_cgroup_clear_mc()
5191 /* charge immigration isn't supported on the default hierarchy */ in mem_cgroup_can_attach()
5196 * Multi-process migrations only happen on the default hierarchy in mem_cgroup_can_attach()
5197 * where charge immigration is not used. Perform charge in mem_cgroup_can_attach()
5212 * tunable will only affect upcoming migrations, not the current one. in mem_cgroup_can_attach()
5215 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
5227 if (mm->owner == p) { in mem_cgroup_can_attach()
5262 struct vm_area_struct *vma = walk->vma; in mem_cgroup_move_charge_pte_range()
5281 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
5291 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
5303 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()
5322 * memcg. There should be somebody mapping the head. in mem_cgroup_move_charge_pte_range()
5330 mc.precharge--; in mem_cgroup_move_charge_pte_range()
5342 mc.precharge--; in mem_cgroup_move_charge_pte_range()
5352 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_move_charge_pte_range()
5358 * We try charge one by one, but don't do any additional in mem_cgroup_move_charge_pte_range()
5359 * charges to mc.to if we have failed in charge once in attach() in mem_cgroup_move_charge_pte_range()
5381 * for already started RCU-only updates to finish. in mem_cgroup_move_charge()
5383 atomic_inc(&mc.from->moving_account); in mem_cgroup_move_charge()
5386 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { in mem_cgroup_move_charge()
5391 * to move enough charges, but moving charge is a best-effort in mem_cgroup_move_charge()
5400 * additional charge, the page walk just aborts. in mem_cgroup_move_charge()
5402 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk); in mem_cgroup_move_charge()
5404 up_read(&mc.mm->mmap_sem); in mem_cgroup_move_charge()
5405 atomic_dec(&mc.from->moving_account); in mem_cgroup_move_charge()
5441 root_mem_cgroup->use_hierarchy = true; in mem_cgroup_bind()
5443 root_mem_cgroup->use_hierarchy = false; in mem_cgroup_bind()
5451 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
5457 unsigned long min = READ_ONCE(memcg->memory.min); in memory_min_show()
5479 page_counter_set_min(&memcg->memory, min); in memory_min_write()
5487 unsigned long low = READ_ONCE(memcg->memory.low); in memory_low_show()
5509 page_counter_set_low(&memcg->memory, low); in memory_low_write()
5517 unsigned long high = READ_ONCE(memcg->high); in memory_high_show()
5540 memcg->high = high; in memory_high_write()
5542 nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
5544 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
5554 unsigned long max = READ_ONCE(memcg->memory.max); in memory_max_show()
5578 xchg(&memcg->memory.max, max); in memory_max_write()
5581 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
5586 if (signal_pending(current)) { in memory_max_write()
5587 err = -EINTR; in memory_max_write()
5598 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
5600 nr_reclaims--; in memory_max_write()
5618 atomic_long_read(&memcg->memory_events[MEMCG_LOW])); in memory_events_show()
5620 atomic_long_read(&memcg->memory_events[MEMCG_HIGH])); in memory_events_show()
5622 atomic_long_read(&memcg->memory_events[MEMCG_MAX])); in memory_events_show()
5624 atomic_long_read(&memcg->memory_events[MEMCG_OOM])); in memory_events_show()
5626 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in memory_events_show()
5642 * 1) generic big picture -> specifics and details in memory_stat_show()
5643 * 2) reflecting userspace activity -> reflecting kernel heuristics in memory_stat_show()
5645 * Current memory state: in memory_stat_show()
5712 seq_printf(m, "%d\n", memcg->oom_group); in memory_oom_group_show()
5725 return -EINVAL; in memory_oom_group_write()
5732 return -EINVAL; in memory_oom_group_write()
5734 memcg->oom_group = oom_group; in memory_oom_group_write()
5741 .name = "current",
5806 * mem_cgroup_protected - check if memory consumption is in the normal range
5807 * @root: the top ancestor of the sub-tree being checked
5811 * of a top-down tree iteration, not for isolated queries.
5827 * top-level memory cgroups), these two values are equal.
5834 * elow = min( memory.low, parent->elow * ------------------ ),
5837 * | memory.current, if memory.current < memory.low
5850 * A A/memory.low = 2G, A/memory.current = 6G
5852 * BC DE B/memory.low = 3G B/memory.current = 2G
5853 * C/memory.low = 1G C/memory.current = 2G
5854 * D/memory.low = 0 D/memory.current = 2G
5855 * E/memory.low = 10G E/memory.current = 0
5860 * A/memory.current = 2G
5862 * B/memory.current = 1.3G
5863 * C/memory.current = 0.6G
5864 * D/memory.current = 0
5865 * E/memory.current = 0
5870 * path for each memory cgroup top-down from the reclaim,
5873 * as memory.low is a best-effort mechanism.
5891 usage = page_counter_read(&memcg->memory); in mem_cgroup_protected()
5895 emin = memcg->memory.min; in mem_cgroup_protected()
5896 elow = memcg->memory.low; in mem_cgroup_protected()
5899 /* No parent means a non-hierarchical mode on v1 memcg */ in mem_cgroup_protected()
5906 parent_emin = READ_ONCE(parent->memory.emin); in mem_cgroup_protected()
5911 min_usage = min(usage, memcg->memory.min); in mem_cgroup_protected()
5913 &parent->memory.children_min_usage); in mem_cgroup_protected()
5920 parent_elow = READ_ONCE(parent->memory.elow); in mem_cgroup_protected()
5925 low_usage = min(usage, memcg->memory.low); in mem_cgroup_protected()
5927 &parent->memory.children_low_usage); in mem_cgroup_protected()
5935 memcg->memory.emin = emin; in mem_cgroup_protected()
5936 memcg->memory.elow = elow; in mem_cgroup_protected()
5947 * mem_cgroup_try_charge - try charging a page
5948 * @page: page to charge
5952 * @compound: charge the page as compound or small page
5954 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5960 * After page->mapping has been set up, the caller must finalize the
5961 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5977 * Every swap fault against a single page tries to charge the in mem_cgroup_try_charge()
5984 if (compound_head(page)->mem_cgroup) in mem_cgroup_try_charge()
5993 if (memcg && !css_tryget_online(&memcg->css)) in mem_cgroup_try_charge()
6004 css_put(&memcg->css); in mem_cgroup_try_charge()
6024 * mem_cgroup_commit_charge - commit a page charge
6025 * @page: page to charge
6026 * @memcg: memcg to charge the page to
6028 * @compound: charge the page as compound or small page
6030 * Finalize a charge transaction started by mem_cgroup_try_charge(),
6031 * after page->mapping has been set up. This must happen atomically
6045 VM_BUG_ON_PAGE(!page->mapping, page); in mem_cgroup_commit_charge()
6051 * Swap faults will attempt to charge the same page multiple in mem_cgroup_commit_charge()
6070 * memory+swap charge, drop the swap entry duplicate. in mem_cgroup_commit_charge()
6077 * mem_cgroup_cancel_charge - cancel a page charge
6078 * @page: page to charge
6079 * @memcg: memcg to charge the page to
6080 * @compound: charge the page as compound or small page
6082 * Cancel a charge transaction started by mem_cgroup_try_charge().
6092 * Swap faults will attempt to charge the same page multiple in mem_cgroup_cancel_charge()
6120 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem; in uncharge_batch()
6123 if (!mem_cgroup_is_root(ug->memcg)) { in uncharge_batch()
6124 page_counter_uncharge(&ug->memcg->memory, nr_pages); in uncharge_batch()
6126 page_counter_uncharge(&ug->memcg->memsw, nr_pages); in uncharge_batch()
6127 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) in uncharge_batch()
6128 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); in uncharge_batch()
6129 memcg_oom_recover(ug->memcg); in uncharge_batch()
6133 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); in uncharge_batch()
6134 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); in uncharge_batch()
6135 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); in uncharge_batch()
6136 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem); in uncharge_batch()
6137 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); in uncharge_batch()
6138 __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); in uncharge_batch()
6139 memcg_check_events(ug->memcg, ug->dummy_page); in uncharge_batch()
6142 if (!mem_cgroup_is_root(ug->memcg)) in uncharge_batch()
6143 css_put_many(&ug->memcg->css, nr_pages); in uncharge_batch()
6152 if (!page->mem_cgroup) in uncharge_page()
6157 * page->mem_cgroup at this point, we have fully in uncharge_page()
6161 if (ug->memcg != page->mem_cgroup) { in uncharge_page()
6162 if (ug->memcg) { in uncharge_page()
6166 ug->memcg = page->mem_cgroup; in uncharge_page()
6174 ug->nr_huge += nr_pages; in uncharge_page()
6177 ug->nr_anon += nr_pages; in uncharge_page()
6179 ug->nr_file += nr_pages; in uncharge_page()
6181 ug->nr_shmem += nr_pages; in uncharge_page()
6183 ug->pgpgout++; in uncharge_page()
6185 ug->nr_kmem += 1 << compound_order(page); in uncharge_page()
6189 ug->dummy_page = page; in uncharge_page()
6190 page->mem_cgroup = NULL; in uncharge_page()
6201 * Note that the list can be a single page->lru; hence the in uncharge_list()
6202 * do-while loop instead of a simple list_for_each_entry(). in uncharge_list()
6204 next = page_list->next; in uncharge_list()
6209 next = page->lru.next; in uncharge_list()
6219 * mem_cgroup_uncharge - uncharge a page
6232 /* Don't touch page->lru of any random page, pre-check: */ in mem_cgroup_uncharge()
6233 if (!page->mem_cgroup) in mem_cgroup_uncharge()
6242 * mem_cgroup_uncharge_list - uncharge a list of page
6258 * mem_cgroup_migrate - charge a page's replacement
6262 * Charge @newpage as a replacement page for @oldpage. @oldpage will
6265 * Both pages must be locked, @newpage->mapping must be set up.
6284 if (newpage->mem_cgroup) in mem_cgroup_migrate()
6288 memcg = oldpage->mem_cgroup; in mem_cgroup_migrate()
6292 /* Force-charge the new page. The old one will be freed soon */ in mem_cgroup_migrate()
6296 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_migrate()
6298 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_migrate()
6299 css_get_many(&memcg->css, nr_pages); in mem_cgroup_migrate()
6324 memcg = mem_cgroup_from_task(current); in mem_cgroup_sk_alloc()
6327 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) in mem_cgroup_sk_alloc()
6329 if (css_tryget_online(&memcg->css)) in mem_cgroup_sk_alloc()
6330 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
6337 if (sk->sk_memcg) in mem_cgroup_sk_free()
6338 css_put(&sk->sk_memcg->css); in mem_cgroup_sk_free()
6342 * mem_cgroup_charge_skmem - charge socket memory
6343 * @memcg: memcg to charge
6344 * @nr_pages: number of pages to charge
6346 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6347 * @memcg's configured limit, %false if the charge had to be forced.
6356 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in mem_cgroup_charge_skmem()
6357 memcg->tcpmem_pressure = 0; in mem_cgroup_charge_skmem()
6360 page_counter_charge(&memcg->tcpmem, nr_pages); in mem_cgroup_charge_skmem()
6361 memcg->tcpmem_pressure = 1; in mem_cgroup_charge_skmem()
6379 * mem_cgroup_uncharge_skmem - uncharge socket memory
6386 page_counter_uncharge(&memcg->tcpmem, nr_pages); in mem_cgroup_uncharge_skmem()
6390 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
6415 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
6438 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
6447 rtpn->rb_root = RB_ROOT; in mem_cgroup_init()
6448 rtpn->rb_rightmost = NULL; in mem_cgroup_init()
6449 spin_lock_init(&rtpn->lock); in mem_cgroup_init()
6460 while (!atomic_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
6477 * mem_cgroup_swapout - transfer a memsw charge to swap
6478 * @page: page whose memsw charge to transfer
6479 * @entry: swap entry to move the charge to
6481 * Transfer the memsw charge of @page to @entry.
6495 memcg = page->mem_cgroup; in mem_cgroup_swapout()
6503 * have an ID allocated to it anymore, charge the closest online in mem_cgroup_swapout()
6504 * ancestor for the swap instead and transfer the memory+swap charge. in mem_cgroup_swapout()
6510 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); in mem_cgroup_swapout()
6516 page->mem_cgroup = NULL; in mem_cgroup_swapout()
6519 page_counter_uncharge(&memcg->memory, nr_entries); in mem_cgroup_swapout()
6523 page_counter_charge(&swap_memcg->memsw, nr_entries); in mem_cgroup_swapout()
6524 page_counter_uncharge(&memcg->memsw, nr_entries); in mem_cgroup_swapout()
6529 * i_pages lock which is taken with interrupts-off. It is in mem_cgroup_swapout()
6531 * only synchronisation we have for updating the per-CPU variables. in mem_cgroup_swapout()
6535 -nr_entries); in mem_cgroup_swapout()
6539 css_put_many(&memcg->css, nr_entries); in mem_cgroup_swapout()
6543 * mem_cgroup_try_charge_swap - try charging swap space for a page
6545 * @entry: swap entry to charge
6547 * Try to charge @page's memcg for the swap space at @entry.
6549 * Returns 0 on success, -ENOMEM on failure.
6561 memcg = page->mem_cgroup; in mem_cgroup_try_charge_swap()
6575 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in mem_cgroup_try_charge_swap()
6579 return -ENOMEM; in mem_cgroup_try_charge_swap()
6584 mem_cgroup_id_get_many(memcg, nr_pages - 1); in mem_cgroup_try_charge_swap()
6593 * mem_cgroup_uncharge_swap - uncharge swap space
6611 page_counter_uncharge(&memcg->swap, nr_pages); in mem_cgroup_uncharge_swap()
6613 page_counter_uncharge(&memcg->memsw, nr_pages); in mem_cgroup_uncharge_swap()
6615 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in mem_cgroup_uncharge_swap()
6629 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
6630 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
6645 memcg = page->mem_cgroup; in mem_cgroup_swap_full()
6650 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max) in mem_cgroup_swap_full()
6678 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
6684 unsigned long max = READ_ONCE(memcg->swap.max); in swap_max_show()
6706 xchg(&memcg->swap.max, max); in swap_max_write()
6716 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
6718 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()
6725 .name = "swap.current",