• Home
  • Raw
  • Download

Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping

1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
19 * Charge lifetime sanitation
35 #include <linux/page-flags.h>
36 #include <linux/backing-dev.h>
107 * Cgroups above their limits are maintained in a RB-Tree, independent of
199 * limit reclaim to prevent infinite loops, if they ever occur.
204 /* for encoding cft->private value on file */
236 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || in task_is_dying()
237 (current->flags & PF_EXITING); in task_is_dying()
245 return &memcg->vmpressure; in memcg_to_vmpressure()
250 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; in vmpressure_to_css()
266 * objcg->nr_charged_bytes can't have an arbitrary byte value. in obj_cgroup_release()
270 * 1) CPU0: objcg == stock->cached_objcg in obj_cgroup_release()
275 * objcg->nr_charged_bytes = PAGE_SIZE - 92 in obj_cgroup_release()
277 * 92 bytes are added to stock->nr_bytes in obj_cgroup_release()
279 * 92 bytes are added to objcg->nr_charged_bytes in obj_cgroup_release()
284 nr_bytes = atomic_read(&objcg->nr_charged_bytes); in obj_cgroup_release()
285 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); in obj_cgroup_release()
292 list_del(&objcg->list); in obj_cgroup_release()
309 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, in obj_cgroup_alloc()
315 INIT_LIST_HEAD(&objcg->list); in obj_cgroup_alloc()
324 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); in memcg_reparent_objcgs()
329 xchg(&objcg->memcg, parent); in memcg_reparent_objcgs()
330 css_get(&parent->css); in memcg_reparent_objcgs()
331 list_add(&objcg->list, &parent->objcg_list); in memcg_reparent_objcgs()
334 list_for_each_entry(iter, &memcg->objcg_list, list) { in memcg_reparent_objcgs()
335 css_get(&parent->css); in memcg_reparent_objcgs()
336 xchg(&iter->memcg, parent); in memcg_reparent_objcgs()
337 css_put(&memcg->css); in memcg_reparent_objcgs()
339 list_splice(&memcg->objcg_list, &parent->objcg_list); in memcg_reparent_objcgs()
343 percpu_ref_kill(&objcg->refcnt); in memcg_reparent_objcgs()
350 * but only a few kmem-limited. Or also, if we have, for instance, 200
351 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
354 * The current size of the caches array is stored in memcg_nr_cache_ids. It
375 * the alloc/free process all the time. In a small machine, 4 kmem-limited
416 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); in memcg_expand_one_shrinker_map()
423 return -ENOMEM; in memcg_expand_one_shrinker_map()
426 memset(new->map, (int)0xff, old_size); in memcg_expand_one_shrinker_map()
427 memset((void *)new->map + old_size, 0, size - old_size); in memcg_expand_one_shrinker_map()
429 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); in memcg_expand_one_shrinker_map()
430 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); in memcg_expand_one_shrinker_map()
447 map = rcu_dereference_protected(pn->shrinker_map, true); in memcg_free_shrinker_maps()
450 rcu_assign_pointer(pn->shrinker_map, NULL); in memcg_free_shrinker_maps()
468 ret = -ENOMEM; in memcg_alloc_shrinker_maps()
471 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); in memcg_alloc_shrinker_maps()
514 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); in memcg_set_shrinker_bit()
517 set_bit(shrinker_id, map->map); in memcg_set_shrinker_bit()
523 * mem_cgroup_css_from_page - css of the memcg associated with a page
537 memcg = page->mem_cgroup; in mem_cgroup_css_from_page()
542 return &memcg->css; in mem_cgroup_css_from_page()
546 * page_cgroup_ino - return inode number of the memcg a page is charged to
564 memcg = page->mem_cgroup; in page_cgroup_ino()
575 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
578 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
588 return memcg->nodeinfo[nid]; in mem_cgroup_page_nodeinfo()
609 struct rb_node **p = &mctz->rb_root.rb_node; in __mem_cgroup_insert_exceeded()
614 if (mz->on_tree) in __mem_cgroup_insert_exceeded()
617 mz->usage_in_excess = new_usage_in_excess; in __mem_cgroup_insert_exceeded()
618 if (!mz->usage_in_excess) in __mem_cgroup_insert_exceeded()
624 if (mz->usage_in_excess < mz_node->usage_in_excess) { in __mem_cgroup_insert_exceeded()
625 p = &(*p)->rb_left; in __mem_cgroup_insert_exceeded()
631 * limit by the same amount in __mem_cgroup_insert_exceeded()
633 else if (mz->usage_in_excess >= mz_node->usage_in_excess) in __mem_cgroup_insert_exceeded()
634 p = &(*p)->rb_right; in __mem_cgroup_insert_exceeded()
638 mctz->rb_rightmost = &mz->tree_node; in __mem_cgroup_insert_exceeded()
640 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded()
641 rb_insert_color(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_insert_exceeded()
642 mz->on_tree = true; in __mem_cgroup_insert_exceeded()
648 if (!mz->on_tree) in __mem_cgroup_remove_exceeded()
651 if (&mz->tree_node == mctz->rb_rightmost) in __mem_cgroup_remove_exceeded()
652 mctz->rb_rightmost = rb_prev(&mz->tree_node); in __mem_cgroup_remove_exceeded()
654 rb_erase(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_remove_exceeded()
655 mz->on_tree = false; in __mem_cgroup_remove_exceeded()
663 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
665 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
672 struct lruvec *lruvec = &mz->lruvec; in soft_limit_excess()
677 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
679 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
683 excess = nr_pages - soft_limit; in soft_limit_excess()
705 * We have to update the tree if mz is on RB-tree or in mem_cgroup_update_tree()
708 if (excess || mz->on_tree) { in mem_cgroup_update_tree()
711 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_update_tree()
712 /* if on-tree, remove it */ in mem_cgroup_update_tree()
713 if (mz->on_tree) in mem_cgroup_update_tree()
716 * Insert again. mz->usage_in_excess will be updated. in mem_cgroup_update_tree()
720 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_update_tree()
746 if (!mctz->rb_rightmost) in __mem_cgroup_largest_soft_limit_node()
749 mz = rb_entry(mctz->rb_rightmost, in __mem_cgroup_largest_soft_limit_node()
757 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
758 !css_tryget(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
769 spin_lock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
771 spin_unlock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
776 * __mod_memcg_state - update cgroup memory statistics
778 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
791 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); in __mod_memcg_state()
799 __this_cpu_add(memcg->vmstats_local->stat[idx], x); in __mod_memcg_state()
801 atomic_long_add(x, &mi->vmstats[idx]); in __mod_memcg_state()
804 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); in __mod_memcg_state()
812 parent = parent_mem_cgroup(pn->memcg); in parent_nodeinfo()
826 memcg = pn->memcg; in __mod_memcg_lruvec_state()
832 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); in __mod_memcg_lruvec_state()
837 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); in __mod_memcg_lruvec_state()
842 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) in __mod_memcg_lruvec_state()
843 atomic_long_add(x, &pi->lruvec_stat[idx]); in __mod_memcg_lruvec_state()
846 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); in __mod_memcg_lruvec_state()
850 * __mod_lruvec_state - update lruvec memory statistics
857 * change of state at this level: per-node, per-cgroup, per-lruvec.
887 * when we free the slab object, we need to update the per-memcg in __mod_lruvec_slab_state()
911 * __count_memcg_events - account VM events in a cgroup
928 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); in __count_memcg_events()
936 __this_cpu_add(memcg->vmstats_local->events[idx], x); in __count_memcg_events()
938 atomic_long_add(x, &mi->vmevents[idx]); in __count_memcg_events()
941 __this_cpu_write(memcg->vmstats_percpu->events[idx], x); in __count_memcg_events()
946 return atomic_long_read(&memcg->vmevents[event]); in memcg_events()
955 x += per_cpu(memcg->vmstats_local->events[event], cpu); in memcg_events_local()
968 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics()
971 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
979 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); in mem_cgroup_event_ratelimit()
980 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); in mem_cgroup_event_ratelimit()
982 if ((long)(next - val) < 0) { in mem_cgroup_event_ratelimit()
993 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); in mem_cgroup_event_ratelimit()
1005 /* threshold event is triggered in finer grain than soft limit */ in memcg_check_events()
1021 * mm_update_next_owner() may clear mm->owner to NULL in mem_cgroup_from_task()
1036 * Obtain a reference on mm->memcg and returns it if successful. Otherwise
1057 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
1061 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
1071 * Obtain a reference on page->memcg and returns it if successful. Otherwise
1076 struct mem_cgroup *memcg = page->mem_cgroup; in get_mem_cgroup_from_page()
1083 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) in get_mem_cgroup_from_page()
1095 return current->active_memcg; in active_memcg()
1105 if (memcg && WARN_ON_ONCE(!css_tryget(&memcg->css))) in get_active_memcg()
1118 /* Memcg to charge can't be determined. */ in memcg_kmem_bypass()
1119 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) in memcg_kmem_bypass()
1126 * If active memcg is set, do not fallback to current->mm->memcg.
1136 return get_mem_cgroup_from_mm(current->mm); in get_mem_cgroup_from_current()
1140 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1146 * @root itself, or %NULL after a full round-trip.
1150 * to cancel a hierarchy walk before the round-trip is complete.
1174 if (!root->use_hierarchy && root != root_mem_cgroup) { in mem_cgroup_iter()
1185 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); in mem_cgroup_iter()
1186 iter = &mz->iter; in mem_cgroup_iter()
1188 if (prev && reclaim->generation != iter->generation) in mem_cgroup_iter()
1192 pos = READ_ONCE(iter->position); in mem_cgroup_iter()
1193 if (!pos || css_tryget(&pos->css)) in mem_cgroup_iter()
1196 * css reference reached zero, so iter->position will in mem_cgroup_iter()
1197 * be cleared by ->css_released. However, we should not in mem_cgroup_iter()
1198 * rely on this happening soon, because ->css_released in mem_cgroup_iter()
1199 * is called from a work queue, and by busy-waiting we in mem_cgroup_iter()
1200 * might block it. So we clear iter->position right in mem_cgroup_iter()
1203 (void)cmpxchg(&iter->position, pos, NULL); in mem_cgroup_iter()
1208 css = &pos->css; in mem_cgroup_iter()
1211 css = css_next_descendant_pre(css, &root->css); in mem_cgroup_iter()
1216 * the hierarchy - make sure they see at least in mem_cgroup_iter()
1231 if (css == &root->css) in mem_cgroup_iter()
1246 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter()
1249 css_put(&pos->css); in mem_cgroup_iter()
1252 iter->generation++; in mem_cgroup_iter()
1254 reclaim->generation = iter->generation; in mem_cgroup_iter()
1261 css_put(&prev->css); in mem_cgroup_iter()
1267 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1277 css_put(&prev->css); in mem_cgroup_iter_break()
1289 iter = &mz->iter; in __invalidate_reclaim_iterators()
1290 cmpxchg(&iter->position, dead_memcg, NULL); in __invalidate_reclaim_iterators()
1305 * When cgruop1 non-hierarchy mode is used, in invalidate_reclaim_iterators()
1316 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1322 * descendants and calls @fn for each task. If @fn returns a non-zero
1341 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); in mem_cgroup_scan_tasks()
1358 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1362 * This function relies on page->mem_cgroup being stable - see the
1372 lruvec = &pgdat->__lruvec; in mem_cgroup_page_lruvec()
1383 memcg = page->mem_cgroup; in mem_cgroup_page_lruvec()
1385 * Swapcache readahead pages are added to the LRU - and in mem_cgroup_page_lruvec()
1386 * possibly migrated - before they are charged. in mem_cgroup_page_lruvec()
1392 lruvec = &mz->lruvec; in mem_cgroup_page_lruvec()
1396 * we have to be prepared to initialize lruvec->zone here; in mem_cgroup_page_lruvec()
1399 if (unlikely(lruvec->pgdat != pgdat)) in mem_cgroup_page_lruvec()
1400 lruvec->pgdat = pgdat; in mem_cgroup_page_lruvec()
1405 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1430 lru_size = &mz->lru_zone_size[zid][lru]; in mem_cgroup_update_lru_size()
1448 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1458 unsigned long limit; in mem_cgroup_margin() local
1460 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1461 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1462 if (count < limit) in mem_cgroup_margin()
1463 margin = limit - count; in mem_cgroup_margin()
1466 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1467 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1468 if (count < limit) in mem_cgroup_margin()
1469 margin = min(margin, limit - count); in mem_cgroup_margin()
1481 * moving cgroups. This is for waiting at high-memory pressure
1508 if (mc.moving_task && current != mc.moving_task) { in mem_cgroup_wait_acct_move()
1512 /* moving charge context might have finished. */ in mem_cgroup_wait_acct_move()
1600 * 1) generic big picture -> specifics and details in memory_stat_format()
1601 * 2) reflecting userspace activity -> reflecting kernel heuristics in memory_stat_format()
1603 * Current memory state: in memory_stat_format()
1656 #define K(x) ((x) << (PAGE_SHIFT-10))
1660 * @memcg: The memory cgroup that went over limit
1672 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_context()
1685 * @memcg: The memory cgroup that went over limit
1691 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1692 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_meminfo()
1693 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo()
1695 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1696 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1697 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo()
1699 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1700 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_meminfo()
1701 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1702 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1703 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_meminfo()
1704 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1708 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_meminfo()
1718 * Return the memory (and swap, if configured) limit for a memcg.
1722 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max()
1726 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max()
1730 /* Calculate swap excess capacity from memsw limit */ in mem_cgroup_get_max()
1731 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max()
1741 return page_counter_read(&memcg->memory); in mem_cgroup_size()
1832 * Check OOM-Killer is already running under our hierarchy.
1842 if (iter->oom_lock) { in mem_cgroup_oom_trylock()
1851 iter->oom_lock = true; in mem_cgroup_oom_trylock()
1864 iter->oom_lock = false; in mem_cgroup_oom_trylock()
1881 iter->oom_lock = false; in mem_cgroup_oom_unlock()
1891 iter->under_oom++; in mem_cgroup_mark_under_oom()
1905 if (iter->under_oom > 0) in mem_cgroup_unmark_under_oom()
1906 iter->under_oom--; in mem_cgroup_unmark_under_oom()
1925 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1936 * For the following lockless ->under_oom test, the only required in memcg_oom_recover()
1943 if (memcg && memcg->under_oom) in memcg_oom_recover()
1965 * We are in the middle of the charge context here, so we in mem_cgroup_oom()
1970 * handling until the charge can succeed; remember the context and put in mem_cgroup_oom()
1974 * On the other hand, in-kernel OOM killer allows for an async victim in mem_cgroup_oom()
1980 * victim and then we have to bail out from the charge path. in mem_cgroup_oom()
1982 if (memcg->oom_kill_disable) { in mem_cgroup_oom()
1983 if (!current->in_user_fault) in mem_cgroup_oom()
1985 css_get(&memcg->css); in mem_cgroup_oom()
1986 current->memcg_in_oom = memcg; in mem_cgroup_oom()
1987 current->memcg_oom_gfp_mask = mask; in mem_cgroup_oom()
1988 current->memcg_oom_order = order; in mem_cgroup_oom()
2013 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2021 * situation. Sleeping directly in the charge context with all kinds
2031 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize()
2045 owait.wait.private = current; in mem_cgroup_oom_synchronize()
2056 if (locked && !memcg->oom_kill_disable) { in mem_cgroup_oom_synchronize()
2059 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, in mem_cgroup_oom_synchronize()
2060 current->memcg_oom_order); in mem_cgroup_oom_synchronize()
2070 * There is no guarantee that an OOM-lock contender in mem_cgroup_oom_synchronize()
2077 current->memcg_in_oom = NULL; in mem_cgroup_oom_synchronize()
2078 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
2083 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2085 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2088 * by killing all belonging OOM-killable tasks.
2090 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2121 * highest-level memory cgroup with oom.group set. in mem_cgroup_get_oom_group()
2124 if (memcg->oom_group) in mem_cgroup_get_oom_group()
2132 css_get(&oom_group->css); in mem_cgroup_get_oom_group()
2142 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
2147 * lock_page_memcg - lock a page->mem_cgroup binding
2165 * path can get away without acquiring the memcg->move_lock in lock_page_memcg()
2179 memcg = head->mem_cgroup; in lock_page_memcg()
2183 if (atomic_read(&memcg->moving_account) <= 0) in lock_page_memcg()
2186 spin_lock_irqsave(&memcg->move_lock, flags); in lock_page_memcg()
2187 if (memcg != head->mem_cgroup) { in lock_page_memcg()
2188 spin_unlock_irqrestore(&memcg->move_lock, flags); in lock_page_memcg()
2193 * When charge migration first begins, we can have locked and in lock_page_memcg()
2197 memcg->move_lock_task = current; in lock_page_memcg()
2198 memcg->move_lock_flags = flags; in lock_page_memcg()
2205 * __unlock_page_memcg - unlock and unpin a memcg
2212 if (memcg && memcg->move_lock_task == current) { in __unlock_page_memcg()
2213 unsigned long flags = memcg->move_lock_flags; in __unlock_page_memcg()
2215 memcg->move_lock_task = NULL; in __unlock_page_memcg()
2216 memcg->move_lock_flags = 0; in __unlock_page_memcg()
2218 spin_unlock_irqrestore(&memcg->move_lock, flags); in __unlock_page_memcg()
2225 * unlock_page_memcg - unlock a page->mem_cgroup binding
2232 __unlock_page_memcg(head->mem_cgroup); in unlock_page_memcg()
2269 * consume_stock: Try to consume stocked charge on this cpu.
2271 * @nr_pages: how many pages to charge.
2273 * The charges will only happen if @memcg matches the current cpu's memcg
2291 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { in consume_stock()
2292 stock->nr_pages -= nr_pages; in consume_stock()
2306 struct mem_cgroup *old = stock->cached; in drain_stock()
2311 if (stock->nr_pages) { in drain_stock()
2312 page_counter_uncharge(&old->memory, stock->nr_pages); in drain_stock()
2314 page_counter_uncharge(&old->memsw, stock->nr_pages); in drain_stock()
2315 stock->nr_pages = 0; in drain_stock()
2318 css_put(&old->css); in drain_stock()
2319 stock->cached = NULL; in drain_stock()
2336 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); in drain_local_stock()
2353 if (stock->cached != memcg) { /* reset if necessary */ in refill_stock()
2355 css_get(&memcg->css); in refill_stock()
2356 stock->cached = memcg; in refill_stock()
2358 stock->nr_pages += nr_pages; in refill_stock()
2360 if (stock->nr_pages > MEMCG_CHARGE_BATCH) in refill_stock()
2367 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2378 * Notify other cpus that system-wide "drain" is running in drain_all_stock()
2381 * per-cpu data. CPU up doesn't touch memcg_stock at all. in drain_all_stock()
2390 memcg = stock->cached; in drain_all_stock()
2391 if (memcg && stock->nr_pages && in drain_all_stock()
2399 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { in drain_all_stock()
2401 drain_local_stock(&stock->work); in drain_all_stock()
2403 schedule_work_on(cpu, &stock->work); in drain_all_stock()
2425 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); in memcg_hotplug_cpu_dead()
2428 atomic_long_add(x, &memcg->vmstats[i]); in memcg_hotplug_cpu_dead()
2437 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); in memcg_hotplug_cpu_dead()
2440 atomic_long_add(x, &pn->lruvec_stat[i]); in memcg_hotplug_cpu_dead()
2448 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); in memcg_hotplug_cpu_dead()
2451 atomic_long_add(x, &memcg->vmevents[i]); in memcg_hotplug_cpu_dead()
2467 if (page_counter_read(&memcg->memory) <= in reclaim_high()
2468 READ_ONCE(memcg->memory.high)) in reclaim_high()
2503 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2505 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2510 * reasonable delay curve compared to precision-adjusted overage, not
2512 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2515 * +-------+------------------------+
2517 * +-------+------------------------+
2539 * +-------+------------------------+
2557 overage = usage - high; in calculate_overage()
2567 overage = calculate_overage(page_counter_read(&memcg->memory), in mem_find_max_overage()
2568 READ_ONCE(memcg->memory.high)); in mem_find_max_overage()
2581 overage = calculate_overage(page_counter_read(&memcg->swap), in swap_find_max_overage()
2582 READ_ONCE(memcg->swap.high)); in swap_find_max_overage()
2619 * N-sized allocations are throttled approximately the same as one in calculate_high_delay()
2620 * 4N-sized allocation. in calculate_high_delay()
2623 * larger the current charge patch is than that. in calculate_high_delay()
2630 * and reclaims memory over the high limit.
2637 unsigned int nr_pages = current->memcg_nr_pages_over_high; in mem_cgroup_handle_over_high()
2645 memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2646 current->memcg_nr_pages_over_high = 0; in mem_cgroup_handle_over_high()
2693 if (nr_reclaimed || nr_retries--) { in mem_cgroup_handle_over_high()
2701 * need to account for any ill-begotten jiffies to pay them off later. in mem_cgroup_handle_over_high()
2708 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2732 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge()
2733 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge()
2736 page_counter_uncharge(&memcg->memsw, batch); in try_charge()
2761 * under the limit over triggering OOM kills in these cases. in try_charge()
2763 if (unlikely(current->flags & PF_MEMALLOC)) in try_charge()
2766 if (unlikely(task_in_memcg_oom(current))) in try_charge()
2791 * Even though the limit is exceeded at this point, reclaim in try_charge()
2792 * may have been able to free some pages. Retry the charge in try_charge()
2796 * unlikely to succeed so close to the limit, and we fall back in try_charge()
2802 * At task move, charge accounts can be doubly counted. So, it's in try_charge()
2808 if (nr_retries--) in try_charge()
2823 * a forward progress or bypass the charge if the oom killer in try_charge()
2835 return -ENOMEM; in try_charge()
2839 * being freed very soon. Allow memory usage go over the limit in try_charge()
2842 page_counter_charge(&memcg->memory, nr_pages); in try_charge()
2844 page_counter_charge(&memcg->memsw, nr_pages); in try_charge()
2850 refill_stock(memcg, batch - nr_pages); in try_charge()
2857 * not recorded as it most likely matches current's and won't in try_charge()
2858 * change in the meantime. As high limit is checked again before in try_charge()
2864 mem_high = page_counter_read(&memcg->memory) > in try_charge()
2865 READ_ONCE(memcg->memory.high); in try_charge()
2866 swap_high = page_counter_read(&memcg->swap) > in try_charge()
2867 READ_ONCE(memcg->swap.high); in try_charge()
2872 schedule_work(&memcg->high_work); in try_charge()
2884 * Target some best-effort fairness between the tasks, in try_charge()
2888 current->memcg_nr_pages_over_high += batch; in try_charge()
2889 set_notify_resume(current); in try_charge()
2903 page_counter_uncharge(&memcg->memory, nr_pages); in cancel_charge()
2905 page_counter_uncharge(&memcg->memsw, nr_pages); in cancel_charge()
2911 VM_BUG_ON_PAGE(page->mem_cgroup, page); in commit_charge()
2913 * Any of the following ensures page->mem_cgroup stability: in commit_charge()
2915 * - the page lock in commit_charge()
2916 * - LRU isolation in commit_charge()
2917 * - lock_page_memcg() in commit_charge()
2918 * - exclusive reference in commit_charge()
2920 page->mem_cgroup = memcg; in commit_charge()
2942 return -ENOMEM; in memcg_alloc_page_obj_cgroups()
2944 if (cmpxchg(&page->obj_cgroups, NULL, in memcg_alloc_page_obj_cgroups()
2969 * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer in mem_cgroup_from_obj()
2972 * The page->mem_cgroup pointer can be asynchronously changed in mem_cgroup_from_obj()
2976 if (!page->mem_cgroup) in mem_cgroup_from_obj()
2980 * Slab objects are accounted individually, not per-page. in mem_cgroup_from_obj()
2982 * the page->obj_cgroups. in mem_cgroup_from_obj()
2988 off = obj_to_index(page->slab_cache, page, p); in mem_cgroup_from_obj()
2996 /* All other pages use page->mem_cgroup */ in mem_cgroup_from_obj()
2997 return page->mem_cgroup; in mem_cgroup_from_obj()
3012 memcg = mem_cgroup_from_task(current); in get_obj_cgroup_from_current()
3015 objcg = rcu_dereference(memcg->objcg); in get_obj_cgroup_from_current()
3069 * __memcg_kmem_charge: charge a number of kernel pages to a memcg
3070 * @memcg: memory cgroup to charge
3072 * @nr_pages: number of pages to charge
3087 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { in __memcg_kmem_charge()
3095 page_counter_charge(&memcg->kmem, nr_pages); in __memcg_kmem_charge()
3099 return -ENOMEM; in __memcg_kmem_charge()
3112 page_counter_uncharge(&memcg->kmem, nr_pages); in __memcg_kmem_uncharge()
3118 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3119 * @page: page to charge
3134 page->mem_cgroup = memcg; in __memcg_kmem_charge_page()
3138 css_put(&memcg->css); in __memcg_kmem_charge_page()
3150 struct mem_cgroup *memcg = page->mem_cgroup; in __memcg_kmem_uncharge_page()
3158 page->mem_cgroup = NULL; in __memcg_kmem_uncharge_page()
3159 css_put(&memcg->css); in __memcg_kmem_uncharge_page()
3175 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { in consume_obj_stock()
3176 stock->nr_bytes -= nr_bytes; in consume_obj_stock()
3187 struct obj_cgroup *old = stock->cached_objcg; in drain_obj_stock()
3192 if (stock->nr_bytes) { in drain_obj_stock()
3193 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; in drain_obj_stock()
3194 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); in drain_obj_stock()
3202 if (unlikely(!css_tryget(&memcg->css))) in drain_obj_stock()
3207 css_put(&memcg->css); in drain_obj_stock()
3211 * The leftover is flushed to the centralized per-memcg value. in drain_obj_stock()
3213 * to a per-cpu stock (probably, on an other CPU), see in drain_obj_stock()
3216 * How often it's flushed is a trade-off between the memory in drain_obj_stock()
3217 * limit enforcement accuracy and potential CPU contention, in drain_obj_stock()
3220 atomic_add(nr_bytes, &old->nr_charged_bytes); in drain_obj_stock()
3221 stock->nr_bytes = 0; in drain_obj_stock()
3225 stock->cached_objcg = NULL; in drain_obj_stock()
3233 if (stock->cached_objcg) { in obj_stock_flush_required()
3234 memcg = obj_cgroup_memcg(stock->cached_objcg); in obj_stock_flush_required()
3250 if (stock->cached_objcg != objcg) { /* reset if necessary */ in refill_obj_stock()
3253 stock->cached_objcg = objcg; in refill_obj_stock()
3254 stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0); in refill_obj_stock()
3256 stock->nr_bytes += nr_bytes; in refill_obj_stock()
3258 if (stock->nr_bytes > PAGE_SIZE) in refill_obj_stock()
3274 * In theory, memcg->nr_charged_bytes can have enough in obj_cgroup_charge()
3275 * pre-charged bytes to satisfy the allocation. However, in obj_cgroup_charge()
3276 * flushing memcg->nr_charged_bytes requires two atomic in obj_cgroup_charge()
3277 * operations, and memcg->nr_charged_bytes can't be big, in obj_cgroup_charge()
3279 * memcg->nr_charged_bytes will be flushed in in obj_cgroup_charge()
3286 if (unlikely(!css_tryget(&memcg->css))) in obj_cgroup_charge()
3291 nr_bytes = size & (PAGE_SIZE - 1); in obj_cgroup_charge()
3298 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes); in obj_cgroup_charge()
3300 css_put(&memcg->css); in obj_cgroup_charge()
3312 * Because head->mem_cgroup is not set on tails, set it now.
3316 struct mem_cgroup *memcg = head->mem_cgroup; in split_page_memcg()
3328 css_get_many(&memcg->css, nr - 1); in split_page_memcg()
3333 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3341 * Returns 0 on success, -EINVAL on failure.
3355 mod_memcg_state(from, MEMCG_SWAP, -1); in mem_cgroup_move_swap_account()
3359 return -EINVAL; in mem_cgroup_move_swap_account()
3365 return -EINVAL; in mem_cgroup_move_swap_account()
3378 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; in mem_cgroup_resize_max()
3381 if (signal_pending(current)) { in mem_cgroup_resize_max()
3382 ret = -EINTR; in mem_cgroup_resize_max()
3388 * Make sure that the new limit (memsw or memory limit) doesn't in mem_cgroup_resize_max()
3391 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : in mem_cgroup_resize_max()
3392 max <= memcg->memsw.max; in mem_cgroup_resize_max()
3395 ret = -EINVAL; in mem_cgroup_resize_max()
3398 if (max > counter->max) in mem_cgroup_resize_max()
3414 ret = -EBUSY; in mem_cgroup_resize_max()
3440 mctz = soft_limit_tree_node(pgdat->node_id); in mem_cgroup_soft_limit_reclaim()
3445 * are acceptable as soft limit is best effort anyway. in mem_cgroup_soft_limit_reclaim()
3447 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) in mem_cgroup_soft_limit_reclaim()
3452 * keep exceeding their soft limit and putting the system under in mem_cgroup_soft_limit_reclaim()
3464 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in mem_cgroup_soft_limit_reclaim()
3468 spin_lock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
3479 excess = soft_limit_excess(mz->memcg); in mem_cgroup_soft_limit_reclaim()
3490 spin_unlock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
3491 css_put(&mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3504 css_put(&next_mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3519 ret = css_next_child(NULL, &memcg->css); in memcg_has_children()
3533 /* we call try-to-free pages for make this cgroup empty */ in mem_cgroup_force_empty()
3539 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
3542 if (signal_pending(current)) in mem_cgroup_force_empty()
3543 return -EINTR; in mem_cgroup_force_empty()
3548 nr_retries--; in mem_cgroup_force_empty()
3565 return -EINVAL; in mem_cgroup_force_empty_write()
3572 return mem_cgroup_from_css(css)->use_hierarchy; in mem_cgroup_hierarchy_read()
3580 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); in mem_cgroup_hierarchy_write()
3582 if (memcg->use_hierarchy == val) in mem_cgroup_hierarchy_write()
3588 * occur, provided the current cgroup has no children. in mem_cgroup_hierarchy_write()
3593 if ((!parent_memcg || !parent_memcg->use_hierarchy) && in mem_cgroup_hierarchy_write()
3596 memcg->use_hierarchy = val; in mem_cgroup_hierarchy_write()
3598 retval = -EBUSY; in mem_cgroup_hierarchy_write()
3600 retval = -EINVAL; in mem_cgroup_hierarchy_write()
3616 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3618 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3637 switch (MEMFILE_TYPE(cft->private)) { in mem_cgroup_read_u64()
3639 counter = &memcg->memory; in mem_cgroup_read_u64()
3642 counter = &memcg->memsw; in mem_cgroup_read_u64()
3645 counter = &memcg->kmem; in mem_cgroup_read_u64()
3648 counter = &memcg->tcpmem; in mem_cgroup_read_u64()
3654 switch (MEMFILE_ATTR(cft->private)) { in mem_cgroup_read_u64()
3656 if (counter == &memcg->memory) in mem_cgroup_read_u64()
3658 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
3662 return (u64)counter->max * PAGE_SIZE; in mem_cgroup_read_u64()
3664 return (u64)counter->watermark * PAGE_SIZE; in mem_cgroup_read_u64()
3666 return counter->failcnt; in mem_cgroup_read_u64()
3668 return (u64)memcg->soft_limit * PAGE_SIZE; in mem_cgroup_read_u64()
3682 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); in memcg_flush_percpu_vmstats()
3686 atomic_long_add(stat[i], &mi->vmstats[i]); in memcg_flush_percpu_vmstats()
3689 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in memcg_flush_percpu_vmstats()
3698 pn->lruvec_stat_cpu->count[i], cpu); in memcg_flush_percpu_vmstats()
3702 atomic_long_add(stat[i], &pi->lruvec_stat[i]); in memcg_flush_percpu_vmstats()
3717 events[i] += per_cpu(memcg->vmstats_percpu->events[i], in memcg_flush_percpu_vmevents()
3722 atomic_long_add(events[i], &mi->vmevents[i]); in memcg_flush_percpu_vmevents()
3734 BUG_ON(memcg->kmemcg_id >= 0); in memcg_online_kmem()
3735 BUG_ON(memcg->kmem_state); in memcg_online_kmem()
3744 return -ENOMEM; in memcg_online_kmem()
3746 objcg->memcg = memcg; in memcg_online_kmem()
3747 rcu_assign_pointer(memcg->objcg, objcg); in memcg_online_kmem()
3752 * A memory cgroup is considered kmem-online as soon as it gets in memcg_online_kmem()
3757 memcg->kmemcg_id = memcg_id; in memcg_online_kmem()
3758 memcg->kmem_state = KMEM_ONLINE; in memcg_online_kmem()
3769 if (memcg->kmem_state != KMEM_ONLINE) in memcg_offline_kmem()
3772 memcg->kmem_state = KMEM_ALLOCATED; in memcg_offline_kmem()
3780 kmemcg_id = memcg->kmemcg_id; in memcg_offline_kmem()
3788 * ordering is imposed by list_lru_node->lock taken by in memcg_offline_kmem()
3792 css_for_each_descendant_pre(css, &memcg->css) { in memcg_offline_kmem()
3794 BUG_ON(child->kmemcg_id != kmemcg_id); in memcg_offline_kmem()
3795 child->kmemcg_id = parent->kmemcg_id; in memcg_offline_kmem()
3796 if (!memcg->use_hierarchy) in memcg_offline_kmem()
3809 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) in memcg_free_kmem()
3831 ret = page_counter_set_max(&memcg->kmem, max); in memcg_update_kmem_max()
3842 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max()
3846 if (!memcg->tcpmem_active) { in memcg_update_tcp_max()
3864 memcg->tcpmem_active = true; in memcg_update_tcp_max()
3883 ret = page_counter_memparse(buf, "-1", &nr_pages); in mem_cgroup_write()
3887 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_write()
3889 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ in mem_cgroup_write()
3890 ret = -EINVAL; in mem_cgroup_write()
3893 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_write()
3902 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_write()
3912 memcg->soft_limit = nr_pages; in mem_cgroup_write()
3925 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_reset()
3927 counter = &memcg->memory; in mem_cgroup_reset()
3930 counter = &memcg->memsw; in mem_cgroup_reset()
3933 counter = &memcg->kmem; in mem_cgroup_reset()
3936 counter = &memcg->tcpmem; in mem_cgroup_reset()
3942 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_reset()
3947 counter->failcnt = 0; in mem_cgroup_reset()
3959 return mem_cgroup_from_css(css)->move_charge_at_immigrate; in mem_cgroup_move_charge_read()
3969 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_move_charge_write()
3973 return -EINVAL; in mem_cgroup_move_charge_write()
3976 * No kind of locking is needed in here, because ->can_attach() will in mem_cgroup_move_charge_write()
3981 memcg->move_charge_at_immigrate = val; in mem_cgroup_move_charge_write()
3988 return -ENOSYS; in mem_cgroup_move_charge_write()
3996 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
4054 seq_printf(m, "%s=%lu", stat->name, in memcg_numa_stat_show()
4055 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
4060 stat->lru_mask, false)); in memcg_numa_stat_show()
4066 seq_printf(m, "hierarchical_%s=%lu", stat->name, in memcg_numa_stat_show()
4067 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
4072 stat->lru_mask, true)); in memcg_numa_stat_show()
4153 memory = min(memory, READ_ONCE(mi->memory.max)); in memcg_stat_show()
4154 memsw = min(memsw, READ_ONCE(mi->memsw.max)); in memcg_stat_show()
4199 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); in memcg_stat_show()
4201 anon_cost += mz->lruvec.anon_cost; in memcg_stat_show()
4202 file_cost += mz->lruvec.file_cost; in memcg_stat_show()
4229 return -EINVAL; in mem_cgroup_swappiness_write()
4231 if (css->parent) in mem_cgroup_swappiness_write()
4232 memcg->swappiness = val; in mem_cgroup_swappiness_write()
4247 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
4249 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
4261 i = t->current_threshold; in __mem_cgroup_threshold()
4269 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold()
4270 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
4281 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold()
4282 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
4285 t->current_threshold = i - 1; in __mem_cgroup_threshold()
4306 if (_a->threshold > _b->threshold) in compare_thresholds()
4309 if (_a->threshold < _b->threshold) in compare_thresholds()
4310 return -1; in compare_thresholds()
4321 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
4322 eventfd_signal(ev->eventfd, 1); in mem_cgroup_oom_notify_cb()
4345 ret = page_counter_memparse(args, "-1", &threshold); in __mem_cgroup_usage_register_event()
4349 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4352 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
4355 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
4361 if (thresholds->primary) in __mem_cgroup_usage_register_event()
4364 size = thresholds->primary ? thresholds->primary->size + 1 : 1; in __mem_cgroup_usage_register_event()
4369 ret = -ENOMEM; in __mem_cgroup_usage_register_event()
4372 new->size = size; in __mem_cgroup_usage_register_event()
4375 if (thresholds->primary) in __mem_cgroup_usage_register_event()
4376 memcpy(new->entries, thresholds->primary->entries, in __mem_cgroup_usage_register_event()
4377 flex_array_size(new, entries, size - 1)); in __mem_cgroup_usage_register_event()
4380 new->entries[size - 1].eventfd = eventfd; in __mem_cgroup_usage_register_event()
4381 new->entries[size - 1].threshold = threshold; in __mem_cgroup_usage_register_event()
4383 /* Sort thresholds. Registering of new threshold isn't time-critical */ in __mem_cgroup_usage_register_event()
4384 sort(new->entries, size, sizeof(*new->entries), in __mem_cgroup_usage_register_event()
4387 /* Find current threshold */ in __mem_cgroup_usage_register_event()
4388 new->current_threshold = -1; in __mem_cgroup_usage_register_event()
4390 if (new->entries[i].threshold <= usage) { in __mem_cgroup_usage_register_event()
4392 * new->current_threshold will not be used until in __mem_cgroup_usage_register_event()
4396 ++new->current_threshold; in __mem_cgroup_usage_register_event()
4402 kfree(thresholds->spare); in __mem_cgroup_usage_register_event()
4403 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_register_event()
4405 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_register_event()
4411 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4436 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4439 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
4442 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
4447 if (!thresholds->primary) in __mem_cgroup_usage_unregister_event()
4455 for (i = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
4456 if (thresholds->primary->entries[i].eventfd != eventfd) in __mem_cgroup_usage_unregister_event()
4462 new = thresholds->spare; in __mem_cgroup_usage_unregister_event()
4475 new->size = size; in __mem_cgroup_usage_unregister_event()
4477 /* Copy thresholds and find current threshold */ in __mem_cgroup_usage_unregister_event()
4478 new->current_threshold = -1; in __mem_cgroup_usage_unregister_event()
4479 for (i = 0, j = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
4480 if (thresholds->primary->entries[i].eventfd == eventfd) in __mem_cgroup_usage_unregister_event()
4483 new->entries[j] = thresholds->primary->entries[i]; in __mem_cgroup_usage_unregister_event()
4484 if (new->entries[j].threshold <= usage) { in __mem_cgroup_usage_unregister_event()
4486 * new->current_threshold will not be used in __mem_cgroup_usage_unregister_event()
4490 ++new->current_threshold; in __mem_cgroup_usage_unregister_event()
4497 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_unregister_event()
4499 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_unregister_event()
4506 kfree(thresholds->spare); in __mem_cgroup_usage_unregister_event()
4507 thresholds->spare = NULL; in __mem_cgroup_usage_unregister_event()
4510 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4532 return -ENOMEM; in mem_cgroup_oom_register_event()
4536 event->eventfd = eventfd; in mem_cgroup_oom_register_event()
4537 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
4540 if (memcg->under_oom) in mem_cgroup_oom_register_event()
4554 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
4555 if (ev->eventfd == eventfd) { in mem_cgroup_oom_unregister_event()
4556 list_del(&ev->list); in mem_cgroup_oom_unregister_event()
4568 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); in mem_cgroup_oom_control_read()
4569 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
4571 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in mem_cgroup_oom_control_read()
4581 if (!css->parent || !((val == 0) || (val == 1))) in mem_cgroup_oom_control_write()
4582 return -EINVAL; in mem_cgroup_oom_control_write()
4584 memcg->oom_kill_disable = val; in mem_cgroup_oom_control_write()
4597 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
4602 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
4607 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
4612 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain()
4614 if (!memcg->css.parent) in mem_cgroup_wb_domain()
4617 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
4626 long x = atomic_long_read(&memcg->vmstats[idx]); in memcg_exact_page_state()
4630 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; in memcg_exact_page_state()
4637 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4645 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4648 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4658 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats()
4669 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), in mem_cgroup_wb_stats()
4670 READ_ONCE(memcg->memory.high)); in mem_cgroup_wb_stats()
4671 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
4673 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); in mem_cgroup_wb_stats()
4682 * trackes ownership per-page while the latter per-inode. This was a
4683 * deliberate design decision because honoring per-page ownership in the
4685 * and deemed unnecessary given that write-sharing an inode across
4686 * different cgroups isn't a common use-case.
4688 * Combined with inode majority-writer ownership switching, this works well
4709 * page - a page whose memcg and writeback ownerships don't match - is
4715 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4725 struct mem_cgroup *memcg = page->mem_cgroup; in mem_cgroup_track_foreign_dirty_slowpath()
4729 int oldest = -1; in mem_cgroup_track_foreign_dirty_slowpath()
4740 frn = &memcg->cgwb_frn[i]; in mem_cgroup_track_foreign_dirty_slowpath()
4741 if (frn->bdi_id == wb->bdi->id && in mem_cgroup_track_foreign_dirty_slowpath()
4742 frn->memcg_id == wb->memcg_css->id) in mem_cgroup_track_foreign_dirty_slowpath()
4744 if (time_before64(frn->at, oldest_at) && in mem_cgroup_track_foreign_dirty_slowpath()
4745 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_track_foreign_dirty_slowpath()
4747 oldest_at = frn->at; in mem_cgroup_track_foreign_dirty_slowpath()
4753 * Re-using an existing one. Update timestamp lazily to in mem_cgroup_track_foreign_dirty_slowpath()
4755 * reasonably up-to-date and significantly shorter than in mem_cgroup_track_foreign_dirty_slowpath()
4763 if (time_before64(frn->at, now - update_intv)) in mem_cgroup_track_foreign_dirty_slowpath()
4764 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
4767 frn = &memcg->cgwb_frn[oldest]; in mem_cgroup_track_foreign_dirty_slowpath()
4768 frn->bdi_id = wb->bdi->id; in mem_cgroup_track_foreign_dirty_slowpath()
4769 frn->memcg_id = wb->memcg_css->id; in mem_cgroup_track_foreign_dirty_slowpath()
4770 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
4777 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_flush_foreign()
4783 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; in mem_cgroup_flush_foreign()
4791 if (time_after64(frn->at, now - intv) && in mem_cgroup_flush_foreign()
4792 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_flush_foreign()
4793 frn->at = 0; in mem_cgroup_flush_foreign()
4794 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); in mem_cgroup_flush_foreign()
4795 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0, in mem_cgroup_flush_foreign()
4797 &frn->done); in mem_cgroup_flush_foreign()
4824 * This is way over-engineered. It tries to support fully configurable
4841 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove()
4843 remove_wait_queue(event->wqh, &event->wait); in memcg_event_remove()
4845 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
4848 eventfd_signal(event->eventfd, 1); in memcg_event_remove()
4850 eventfd_ctx_put(event->eventfd); in memcg_event_remove()
4852 css_put(&memcg->css); in memcg_event_remove()
4858 * Called with wqh->lock held and interrupts disabled.
4865 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake()
4875 * side will require wqh->lock via remove_wait_queue(), in memcg_event_wake()
4878 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
4879 if (!list_empty(&event->list)) { in memcg_event_wake()
4880 list_del_init(&event->list); in memcg_event_wake()
4885 schedule_work(&event->remove); in memcg_event_wake()
4887 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
4899 event->wqh = wqh; in memcg_event_ptable_queue_proc()
4900 add_wait_queue(wqh, &event->wait); in memcg_event_ptable_queue_proc()
4930 return -EINVAL; in memcg_write_event_control()
4939 return -EINVAL; in memcg_write_event_control()
4943 return -ENOMEM; in memcg_write_event_control()
4945 event->memcg = memcg; in memcg_write_event_control()
4946 INIT_LIST_HEAD(&event->list); in memcg_write_event_control()
4947 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); in memcg_write_event_control()
4948 init_waitqueue_func_entry(&event->wait, memcg_event_wake); in memcg_write_event_control()
4949 INIT_WORK(&event->remove, memcg_event_remove); in memcg_write_event_control()
4953 ret = -EBADF; in memcg_write_event_control()
4957 event->eventfd = eventfd_ctx_fileget(efile.file); in memcg_write_event_control()
4958 if (IS_ERR(event->eventfd)) { in memcg_write_event_control()
4959 ret = PTR_ERR(event->eventfd); in memcg_write_event_control()
4965 ret = -EBADF; in memcg_write_event_control()
4979 cdentry = cfile.file->f_path.dentry; in memcg_write_event_control()
4980 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) { in memcg_write_event_control()
4981 ret = -EINVAL; in memcg_write_event_control()
4993 name = cdentry->d_name.name; in memcg_write_event_control()
4996 event->register_event = mem_cgroup_usage_register_event; in memcg_write_event_control()
4997 event->unregister_event = mem_cgroup_usage_unregister_event; in memcg_write_event_control()
4999 event->register_event = mem_cgroup_oom_register_event; in memcg_write_event_control()
5000 event->unregister_event = mem_cgroup_oom_unregister_event; in memcg_write_event_control()
5002 event->register_event = vmpressure_register_event; in memcg_write_event_control()
5003 event->unregister_event = vmpressure_unregister_event; in memcg_write_event_control()
5005 event->register_event = memsw_cgroup_usage_register_event; in memcg_write_event_control()
5006 event->unregister_event = memsw_cgroup_usage_unregister_event; in memcg_write_event_control()
5008 ret = -EINVAL; in memcg_write_event_control()
5017 cfile_css = css_tryget_online_from_dir(cdentry->d_parent, in memcg_write_event_control()
5019 ret = -EINVAL; in memcg_write_event_control()
5027 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
5031 vfs_poll(efile.file, &event->pt); in memcg_write_event_control()
5033 spin_lock(&memcg->event_list_lock); in memcg_write_event_control()
5034 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
5035 spin_unlock(&memcg->event_list_lock); in memcg_write_event_control()
5047 eventfd_ctx_put(event->eventfd); in memcg_write_event_control()
5188 * Swap-out records and page cache shadow entries need to store memcg
5191 * memory-controlled cgroups to 64k.
5198 * even when there are much fewer than 64k cgroups - possibly none.
5200 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5227 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
5229 idr_remove(&mem_cgroup_idr, memcg->id.id); in mem_cgroup_id_remove()
5232 memcg->id.id = 0; in mem_cgroup_id_remove()
5239 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
5244 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
5248 css_put(&memcg->css); in mem_cgroup_id_put_many()
5258 * mem_cgroup_from_id - look up a memcg from a memcg id
5267 if (id == -1) in mem_cgroup_from_id()
5286 tmp = -1; in alloc_mem_cgroup_per_node_info()
5291 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat, in alloc_mem_cgroup_per_node_info()
5293 if (!pn->lruvec_stat_local) { in alloc_mem_cgroup_per_node_info()
5298 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat, in alloc_mem_cgroup_per_node_info()
5300 if (!pn->lruvec_stat_cpu) { in alloc_mem_cgroup_per_node_info()
5301 free_percpu(pn->lruvec_stat_local); in alloc_mem_cgroup_per_node_info()
5306 lruvec_init(&pn->lruvec); in alloc_mem_cgroup_per_node_info()
5307 pn->usage_in_excess = 0; in alloc_mem_cgroup_per_node_info()
5308 pn->lruvec.pgdat = NODE_DATA(node); in alloc_mem_cgroup_per_node_info()
5309 pn->on_tree = false; in alloc_mem_cgroup_per_node_info()
5310 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
5312 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
5318 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in free_mem_cgroup_per_node_info()
5323 free_percpu(pn->lruvec_stat_cpu); in free_mem_cgroup_per_node_info()
5324 free_percpu(pn->lruvec_stat_local); in free_mem_cgroup_per_node_info()
5334 free_percpu(memcg->vmstats_percpu); in __mem_cgroup_free()
5335 free_percpu(memcg->vmstats_local); in __mem_cgroup_free()
5357 long error = -ENOMEM; in mem_cgroup_alloc()
5366 memcg->id.id = mem_cgroup_alloc_id(); in mem_cgroup_alloc()
5367 if (memcg->id.id < 0) { in mem_cgroup_alloc()
5368 error = memcg->id.id; in mem_cgroup_alloc()
5372 memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
5374 if (!memcg->vmstats_local) in mem_cgroup_alloc()
5377 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
5379 if (!memcg->vmstats_percpu) in mem_cgroup_alloc()
5389 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
5390 INIT_LIST_HEAD(&memcg->oom_notify); in mem_cgroup_alloc()
5391 mutex_init(&memcg->thresholds_lock); in mem_cgroup_alloc()
5392 spin_lock_init(&memcg->move_lock); in mem_cgroup_alloc()
5393 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
5394 INIT_LIST_HEAD(&memcg->event_list); in mem_cgroup_alloc()
5395 spin_lock_init(&memcg->event_list_lock); in mem_cgroup_alloc()
5396 memcg->socket_pressure = jiffies; in mem_cgroup_alloc()
5398 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
5399 INIT_LIST_HEAD(&memcg->objcg_list); in mem_cgroup_alloc()
5402 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
5404 memcg->cgwb_frn[i].done = in mem_cgroup_alloc()
5408 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); in mem_cgroup_alloc()
5409 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); in mem_cgroup_alloc()
5410 memcg->deferred_split_queue.split_queue_len = 0; in mem_cgroup_alloc()
5421 INIT_LIST_HEAD(&memcg->score_node); in mem_cgroup_alloc()
5423 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); in mem_cgroup_alloc()
5436 long error = -ENOMEM; in mem_cgroup_css_alloc()
5445 atomic64_set(&memcg->memcg_reclaimed.app_score, 300); in mem_cgroup_css_alloc()
5448 atomic_set(&memcg->memcg_reclaimed.ub_zram2ufs_ratio, 10); in mem_cgroup_css_alloc()
5449 atomic_set(&memcg->memcg_reclaimed.ub_mem2zram_ratio, 60); in mem_cgroup_css_alloc()
5450 atomic_set(&memcg->memcg_reclaimed.refault_threshold, 50); in mem_cgroup_css_alloc()
5452 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5453 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
5454 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5456 memcg->swappiness = mem_cgroup_swappiness(parent); in mem_cgroup_css_alloc()
5457 memcg->oom_kill_disable = parent->oom_kill_disable; in mem_cgroup_css_alloc()
5460 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_alloc()
5461 page_counter_init(&memcg->swap, NULL); in mem_cgroup_css_alloc()
5462 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_alloc()
5463 page_counter_init(&memcg->tcpmem, NULL); in mem_cgroup_css_alloc()
5464 } else if (parent->use_hierarchy) { in mem_cgroup_css_alloc()
5465 memcg->use_hierarchy = true; in mem_cgroup_css_alloc()
5466 page_counter_init(&memcg->memory, &parent->memory); in mem_cgroup_css_alloc()
5467 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc()
5468 page_counter_init(&memcg->kmem, &parent->kmem); in mem_cgroup_css_alloc()
5469 page_counter_init(&memcg->tcpmem, &parent->tcpmem); in mem_cgroup_css_alloc()
5471 page_counter_init(&memcg->memory, &root_mem_cgroup->memory); in mem_cgroup_css_alloc()
5472 page_counter_init(&memcg->swap, &root_mem_cgroup->swap); in mem_cgroup_css_alloc()
5473 page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem); in mem_cgroup_css_alloc()
5474 page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem); in mem_cgroup_css_alloc()
5487 return &memcg->css; in mem_cgroup_css_alloc()
5497 return &memcg->css; in mem_cgroup_css_alloc()
5515 return -ENOMEM; in mem_cgroup_css_online()
5524 refcount_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
5538 list_del_init(&memcg->score_node); in mem_cgroup_css_offline()
5548 spin_lock(&memcg->event_list_lock); in mem_cgroup_css_offline()
5549 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
5550 list_del_init(&event->list); in mem_cgroup_css_offline()
5551 schedule_work(&event->remove); in mem_cgroup_css_offline()
5553 spin_unlock(&memcg->event_list_lock); in mem_cgroup_css_offline()
5555 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
5556 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
5580 wb_wait_for_completion(&memcg->cgwb_frn[i].done); in mem_cgroup_css_free()
5585 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) in mem_cgroup_css_free()
5588 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
5589 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
5597 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5606 * The current implementation only resets the essential configurations.
5613 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5614 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5615 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5616 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5617 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
5618 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
5619 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5620 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
5621 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5626 /* Handlers for move charge at task migration. */
5631 /* Try a single bulk charge without reclaim first, kswapd may wake */ in mem_cgroup_do_precharge()
5639 while (count--) { in mem_cgroup_do_precharge()
5715 entry->val = ent.val; in mc_handle_swap_pte()
5730 if (!vma->vm_file) /* anonymous vma */ in mc_handle_file_pte()
5735 /* page is moved even if it's not RSS of this task(page-faulted). */ in mc_handle_file_pte()
5737 return find_get_incore_page(vma->vm_file->f_mapping, in mc_handle_file_pte()
5742 * mem_cgroup_move_account - move account of the page
5744 * @compound: charge the page as compound or small page
5750 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5769 * page->mem_cgroup of its source page while we change it. in mem_cgroup_move_account()
5771 ret = -EBUSY; in mem_cgroup_move_account()
5775 ret = -EINVAL; in mem_cgroup_move_account()
5776 if (page->mem_cgroup != from) in mem_cgroup_move_account()
5787 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); in mem_cgroup_move_account()
5796 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); in mem_cgroup_move_account()
5800 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); in mem_cgroup_move_account()
5805 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); in mem_cgroup_move_account()
5810 struct address_space *mapping = page_mapping(page); in mem_cgroup_move_account() local
5812 if (mapping_can_writeback(mapping)) { in mem_cgroup_move_account()
5814 -nr_pages); in mem_cgroup_move_account()
5822 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); in mem_cgroup_move_account()
5829 * It is safe to change page->mem_cgroup here because the page in mem_cgroup_move_account()
5832 * that would rely on a stable page->mem_cgroup. in mem_cgroup_move_account()
5835 * to save space. As soon as we switch page->mem_cgroup to a in mem_cgroup_move_account()
5841 css_get(&to->css); in mem_cgroup_move_account()
5842 css_put(&from->css); in mem_cgroup_move_account()
5844 page->mem_cgroup = to; in mem_cgroup_move_account()
5853 mem_cgroup_charge_statistics(from, page, -nr_pages); in mem_cgroup_move_account()
5863 * get_mctgt_type - get target type of moving charge
5870 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5872 * move charge. if @target is not NULL, the page is stored in target->page
5875 * target for charge migration. if @target is not NULL, the entry is stored
5876 * in target->ent.
5879 * For now we such page is charge like a regular page would be as for all
5910 if (page->mem_cgroup == mc.from) { in get_mctgt_type()
5915 target->page = page; in get_mctgt_type()
5922 * But we cannot move a tail-page in a THP. in get_mctgt_type()
5928 target->ent = ent; in get_mctgt_type()
5954 if (page->mem_cgroup == mc.from) { in get_mctgt_type_thp()
5958 target->page = page; in get_mctgt_type_thp()
5975 struct vm_area_struct *vma = walk->vma; in mem_cgroup_count_precharge_pte_range()
5994 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
5998 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_count_precharge_pte_range()
6013 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); in mem_cgroup_count_precharge()
6027 mc.moving_task = current; in mem_cgroup_precharge_mc()
6054 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); in __mem_cgroup_clear_mc()
6059 * we charged both to->memory and to->memsw, so we in __mem_cgroup_clear_mc()
6060 * should uncharge to->memory. in __mem_cgroup_clear_mc()
6063 page_counter_uncharge(&mc.to->memory, mc.moved_swap); in __mem_cgroup_clear_mc()
6101 /* charge immigration isn't supported on the default hierarchy */ in mem_cgroup_can_attach()
6106 * Multi-process migrations only happen on the default hierarchy in mem_cgroup_can_attach()
6107 * where charge immigration is not used. Perform charge in mem_cgroup_can_attach()
6122 * tunable will only affect upcoming migrations, not the current one. in mem_cgroup_can_attach()
6125 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
6137 if (mm->owner == p) { in mem_cgroup_can_attach()
6172 struct vm_area_struct *vma = walk->vma; in mem_cgroup_move_charge_pte_range()
6191 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
6201 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
6213 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()
6232 * memcg. There should be somebody mapping the head. in mem_cgroup_move_charge_pte_range()
6240 mc.precharge--; in mem_cgroup_move_charge_pte_range()
6252 mc.precharge--; in mem_cgroup_move_charge_pte_range()
6262 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_move_charge_pte_range()
6268 * We try charge one by one, but don't do any additional in mem_cgroup_move_charge_pte_range()
6269 * charges to mc.to if we have failed in charge once in attach() in mem_cgroup_move_charge_pte_range()
6290 * for already started RCU-only updates to finish. in mem_cgroup_move_charge()
6292 atomic_inc(&mc.from->moving_account); in mem_cgroup_move_charge()
6300 * to move enough charges, but moving charge is a best-effort in mem_cgroup_move_charge()
6309 * additional charge, the page walk just aborts. in mem_cgroup_move_charge()
6311 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, in mem_cgroup_move_charge()
6315 atomic_dec(&mc.from->moving_account); in mem_cgroup_move_charge()
6351 root_mem_cgroup->use_hierarchy = true; in mem_cgroup_bind()
6353 root_mem_cgroup->use_hierarchy = false; in mem_cgroup_bind()
6371 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
6377 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); in memory_min_show()
6392 page_counter_set_min(&memcg->memory, min); in memory_min_write()
6400 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); in memory_low_show()
6415 page_counter_set_low(&memcg->memory, low); in memory_low_write()
6423 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); in memory_high_show()
6440 page_counter_set_high(&memcg->memory, high); in memory_high_write()
6443 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
6449 if (signal_pending(current)) in memory_high_write()
6458 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
6461 if (!reclaimed && !nr_retries--) in memory_high_write()
6472 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); in memory_max_show()
6489 xchg(&memcg->memory.max, max); in memory_max_write()
6492 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
6497 if (signal_pending(current)) in memory_max_write()
6507 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
6509 nr_reclaims--; in memory_max_write()
6536 __memory_events_show(m, memcg->memory_events); in memory_events_show()
6544 __memory_events_show(m, memcg->memory_events_local); in memory_events_local_show()
6555 return -ENOMEM; in memory_stat_show()
6594 seq_printf(m, "%d\n", memcg->oom_group); in memory_oom_group_show()
6607 return -EINVAL; in memory_oom_group_write()
6614 return -EINVAL; in memory_oom_group_write()
6616 memcg->oom_group = oom_group; in memory_oom_group_write()
6623 .name = "current",
6719 * This makes distribution proportional, but also work-conserving:
6730 * of the ancestor's claim to protection, any unutilized -
6731 * "floating" - protection from up the tree is distributed in
6757 * claimed protection in order to be work-conserving: claimed in effective_protection()
6795 * aren't read atomically - make sure the division is sane. in effective_protection()
6804 unclaimed = parent_effective - siblings_protected; in effective_protection()
6805 unclaimed *= usage - protected; in effective_protection()
6806 unclaimed /= parent_usage - siblings_protected; in effective_protection()
6815 * mem_cgroup_protected - check if memory consumption is in the normal range
6816 * @root: the top ancestor of the sub-tree being checked
6820 * of a top-down tree iteration, not for isolated queries.
6844 usage = page_counter_read(&memcg->memory); in mem_cgroup_calculate_protection()
6849 /* No parent means a non-hierarchical mode on v1 memcg */ in mem_cgroup_calculate_protection()
6854 memcg->memory.emin = READ_ONCE(memcg->memory.min); in mem_cgroup_calculate_protection()
6855 memcg->memory.elow = READ_ONCE(memcg->memory.low); in mem_cgroup_calculate_protection()
6859 parent_usage = page_counter_read(&parent->memory); in mem_cgroup_calculate_protection()
6861 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
6862 READ_ONCE(memcg->memory.min), in mem_cgroup_calculate_protection()
6863 READ_ONCE(parent->memory.emin), in mem_cgroup_calculate_protection()
6864 atomic_long_read(&parent->memory.children_min_usage))); in mem_cgroup_calculate_protection()
6866 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
6867 READ_ONCE(memcg->memory.low), in mem_cgroup_calculate_protection()
6868 READ_ONCE(parent->memory.elow), in mem_cgroup_calculate_protection()
6869 atomic_long_read(&parent->memory.children_low_usage))); in mem_cgroup_calculate_protection()
6873 * mem_cgroup_charge - charge a newly allocated page to a cgroup
6874 * @page: page to charge
6878 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6897 * Every swap fault against a single page tries to charge the in mem_cgroup_charge()
6899 * already charged pages, too. page->mem_cgroup is protected in mem_cgroup_charge()
6904 if (compound_head(page)->mem_cgroup) in mem_cgroup_charge()
6910 if (memcg && !css_tryget_online(&memcg->css)) in mem_cgroup_charge()
6922 css_get(&memcg->css); in mem_cgroup_charge()
6938 * so this is a non-issue here. Memory and swap charge lifetimes in mem_cgroup_charge()
6939 * correspond 1:1 to page and swap slot lifetimes: we charge the in mem_cgroup_charge()
6947 * memory+swap charge, drop the swap entry duplicate. in mem_cgroup_charge()
6953 css_put(&memcg->css); in mem_cgroup_charge()
6975 if (!mem_cgroup_is_root(ug->memcg)) { in uncharge_batch()
6976 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages); in uncharge_batch()
6978 page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages); in uncharge_batch()
6979 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) in uncharge_batch()
6980 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); in uncharge_batch()
6981 memcg_oom_recover(ug->memcg); in uncharge_batch()
6985 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); in uncharge_batch()
6986 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); in uncharge_batch()
6987 memcg_check_events(ug->memcg, ug->dummy_page); in uncharge_batch()
6991 css_put(&ug->memcg->css); in uncharge_batch()
7000 if (!page->mem_cgroup) in uncharge_page()
7005 * page->mem_cgroup at this point, we have fully in uncharge_page()
7009 if (ug->memcg != page->mem_cgroup) { in uncharge_page()
7010 if (ug->memcg) { in uncharge_page()
7014 ug->memcg = page->mem_cgroup; in uncharge_page()
7017 css_get(&ug->memcg->css); in uncharge_page()
7021 ug->nr_pages += nr_pages; in uncharge_page()
7024 ug->pgpgout++; in uncharge_page()
7026 ug->nr_kmem += nr_pages; in uncharge_page()
7030 ug->dummy_page = page; in uncharge_page()
7031 page->mem_cgroup = NULL; in uncharge_page()
7032 css_put(&ug->memcg->css); in uncharge_page()
7043 * Note that the list can be a single page->lru; hence the in uncharge_list()
7044 * do-while loop instead of a simple list_for_each_entry(). in uncharge_list()
7046 next = page_list->next; in uncharge_list()
7051 next = page->lru.next; in uncharge_list()
7061 * mem_cgroup_uncharge - uncharge a page
7073 /* Don't touch page->lru of any random page, pre-check: */ in mem_cgroup_uncharge()
7074 if (!page->mem_cgroup) in mem_cgroup_uncharge()
7083 * mem_cgroup_uncharge_list - uncharge a list of page
7099 * mem_cgroup_migrate - charge a page's replacement
7103 * Charge @newpage as a replacement page for @oldpage. @oldpage will
7106 * Both pages must be locked, @newpage->mapping must be set up.
7124 if (newpage->mem_cgroup) in mem_cgroup_migrate()
7128 memcg = oldpage->mem_cgroup; in mem_cgroup_migrate()
7132 /* Force-charge the new page. The old one will be freed soon */ in mem_cgroup_migrate()
7135 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_migrate()
7137 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_migrate()
7139 css_get(&memcg->css); in mem_cgroup_migrate()
7163 memcg = mem_cgroup_from_task(current); in mem_cgroup_sk_alloc()
7166 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) in mem_cgroup_sk_alloc()
7168 if (css_tryget(&memcg->css)) in mem_cgroup_sk_alloc()
7169 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
7176 if (sk->sk_memcg) in mem_cgroup_sk_free()
7177 css_put(&sk->sk_memcg->css); in mem_cgroup_sk_free()
7181 * mem_cgroup_charge_skmem - charge socket memory
7182 * @memcg: memcg to charge
7183 * @nr_pages: number of pages to charge
7185 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7186 * @memcg's configured limit, %false if the charge had to be forced.
7195 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in mem_cgroup_charge_skmem()
7196 memcg->tcpmem_pressure = 0; in mem_cgroup_charge_skmem()
7199 page_counter_charge(&memcg->tcpmem, nr_pages); in mem_cgroup_charge_skmem()
7200 memcg->tcpmem_pressure = 1; in mem_cgroup_charge_skmem()
7218 * mem_cgroup_uncharge_skmem - uncharge socket memory
7225 page_counter_uncharge(&memcg->tcpmem, nr_pages); in mem_cgroup_uncharge_skmem()
7229 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
7256 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7268 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
7277 rtpn->rb_root = RB_ROOT; in mem_cgroup_init()
7278 rtpn->rb_rightmost = NULL; in mem_cgroup_init()
7279 spin_lock_init(&rtpn->lock); in mem_cgroup_init()
7290 while (!refcount_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
7307 * mem_cgroup_swapout - transfer a memsw charge to swap
7308 * @page: page whose memsw charge to transfer
7309 * @entry: swap entry to move the charge to
7311 * Transfer the memsw charge of @page to @entry.
7325 memcg = page->mem_cgroup; in mem_cgroup_swapout()
7333 * have an ID allocated to it anymore, charge the closest online in mem_cgroup_swapout()
7334 * ancestor for the swap instead and transfer the memory+swap charge. in mem_cgroup_swapout()
7340 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); in mem_cgroup_swapout()
7347 page->mem_cgroup = NULL; in mem_cgroup_swapout()
7350 page_counter_uncharge(&memcg->memory, nr_entries); in mem_cgroup_swapout()
7354 page_counter_charge(&swap_memcg->memsw, nr_entries); in mem_cgroup_swapout()
7355 page_counter_uncharge(&memcg->memsw, nr_entries); in mem_cgroup_swapout()
7360 * i_pages lock which is taken with interrupts-off. It is in mem_cgroup_swapout()
7362 * only synchronisation we have for updating the per-CPU variables. in mem_cgroup_swapout()
7365 mem_cgroup_charge_statistics(memcg, page, -nr_entries); in mem_cgroup_swapout()
7368 css_put(&memcg->css); in mem_cgroup_swapout()
7372 * mem_cgroup_try_charge_swap - try charging swap space for a page
7374 * @entry: swap entry to charge
7376 * Try to charge @page's memcg for the swap space at @entry.
7378 * Returns 0 on success, -ENOMEM on failure.
7390 memcg = page->mem_cgroup; in mem_cgroup_try_charge_swap()
7404 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in mem_cgroup_try_charge_swap()
7408 return -ENOMEM; in mem_cgroup_try_charge_swap()
7413 mem_cgroup_id_get_many(memcg, nr_pages - 1); in mem_cgroup_try_charge_swap()
7422 * mem_cgroup_uncharge_swap - uncharge swap space
7437 page_counter_uncharge(&memcg->swap, nr_pages); in mem_cgroup_uncharge_swap()
7439 page_counter_uncharge(&memcg->memsw, nr_pages); in mem_cgroup_uncharge_swap()
7441 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in mem_cgroup_uncharge_swap()
7455 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
7456 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
7471 memcg = page->mem_cgroup; in mem_cgroup_swap_full()
7476 unsigned long usage = page_counter_read(&memcg->swap); in mem_cgroup_swap_full()
7478 if (usage * 2 >= READ_ONCE(memcg->swap.high) || in mem_cgroup_swap_full()
7479 usage * 2 >= READ_ONCE(memcg->swap.max)) in mem_cgroup_swap_full()
7501 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
7507 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); in swap_high_show()
7522 page_counter_set_high(&memcg->swap, high); in swap_high_write()
7530 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); in swap_max_show()
7545 xchg(&memcg->swap.max, max); in swap_max_write()
7555 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); in swap_events_show()
7557 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
7559 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()
7566 .name = "swap.current",
7627 /* No memory control -> no swap control */ in mem_cgroup_swap_init()