Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
19 * Charge lifetime sanitation
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
110 * Cgroups above their limits are maintained in a RB-Tree, independent of
202 * limit reclaim to prevent infinite loops, if they ever occur.
207 /* for encoding cft->private value on file */
236 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || in task_is_dying()
237 (current->flags & PF_EXITING); in task_is_dying()
245 return &memcg->vmpressure; in memcg_to_vmpressure()
273 * objcg->nr_charged_bytes can't have an arbitrary byte value. in obj_cgroup_release()
277 * 1) CPU0: objcg == stock->cached_objcg in obj_cgroup_release()
282 * objcg->nr_charged_bytes = PAGE_SIZE - 92 in obj_cgroup_release()
284 * 92 bytes are added to stock->nr_bytes in obj_cgroup_release()
286 * 92 bytes are added to objcg->nr_charged_bytes in obj_cgroup_release()
291 nr_bytes = atomic_read(&objcg->nr_charged_bytes); in obj_cgroup_release()
292 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); in obj_cgroup_release()
299 list_del(&objcg->list); in obj_cgroup_release()
315 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, in obj_cgroup_alloc()
321 INIT_LIST_HEAD(&objcg->list); in obj_cgroup_alloc()
330 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); in memcg_reparent_objcgs()
335 list_add(&objcg->list, &memcg->objcg_list); in memcg_reparent_objcgs()
337 list_for_each_entry(iter, &memcg->objcg_list, list) in memcg_reparent_objcgs()
338 WRITE_ONCE(iter->memcg, parent); in memcg_reparent_objcgs()
340 list_splice(&memcg->objcg_list, &parent->objcg_list); in memcg_reparent_objcgs()
344 percpu_ref_kill(&objcg->refcnt); in memcg_reparent_objcgs()
361 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
378 return &memcg->css; in mem_cgroup_css_from_folio()
382 * page_cgroup_ino - return inode number of the memcg a page is charged to
403 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
406 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
415 struct rb_node **p = &mctz->rb_root.rb_node; in __mem_cgroup_insert_exceeded()
420 if (mz->on_tree) in __mem_cgroup_insert_exceeded()
423 mz->usage_in_excess = new_usage_in_excess; in __mem_cgroup_insert_exceeded()
424 if (!mz->usage_in_excess) in __mem_cgroup_insert_exceeded()
430 if (mz->usage_in_excess < mz_node->usage_in_excess) { in __mem_cgroup_insert_exceeded()
431 p = &(*p)->rb_left; in __mem_cgroup_insert_exceeded()
434 p = &(*p)->rb_right; in __mem_cgroup_insert_exceeded()
439 mctz->rb_rightmost = &mz->tree_node; in __mem_cgroup_insert_exceeded()
441 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded()
442 rb_insert_color(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_insert_exceeded()
443 mz->on_tree = true; in __mem_cgroup_insert_exceeded()
449 if (!mz->on_tree) in __mem_cgroup_remove_exceeded()
452 if (&mz->tree_node == mctz->rb_rightmost) in __mem_cgroup_remove_exceeded()
453 mctz->rb_rightmost = rb_prev(&mz->tree_node); in __mem_cgroup_remove_exceeded()
455 rb_erase(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_remove_exceeded()
456 mz->on_tree = false; in __mem_cgroup_remove_exceeded()
464 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
466 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
473 struct lruvec *lruvec = &mz->lruvec; in soft_limit_excess()
478 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
480 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
484 excess = nr_pages - soft_limit; in soft_limit_excess()
509 mz = memcg->nodeinfo[nid]; in mem_cgroup_update_tree()
512 * We have to update the tree if mz is on RB-tree or in mem_cgroup_update_tree()
515 if (excess || mz->on_tree) { in mem_cgroup_update_tree()
518 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_update_tree()
519 /* if on-tree, remove it */ in mem_cgroup_update_tree()
520 if (mz->on_tree) in mem_cgroup_update_tree()
523 * Insert again. mz->usage_in_excess will be updated. in mem_cgroup_update_tree()
527 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_update_tree()
539 mz = memcg->nodeinfo[nid]; in mem_cgroup_remove_from_trees()
553 if (!mctz->rb_rightmost) in __mem_cgroup_largest_soft_limit_node()
556 mz = rb_entry(mctz->rb_rightmost, in __mem_cgroup_largest_soft_limit_node()
564 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
565 !css_tryget(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
576 spin_lock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
578 spin_unlock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
635 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); in memcg_rstat_updated()
664 cgroup_rstat_flush(root_mem_cgroup->css.cgroup); in do_flush_stats()
685 * Always flush here so that flushing in latency-sensitive paths is in flush_memcg_stats_dwork()
732 return mem_cgroup_events_index[idx] - 1; in memcg_events_index()
754 /* Non-hierarchical (CPU aggregated) page state & events */
765 long x = READ_ONCE(memcg->vmstats->state[idx]); in memcg_page_state()
774 * __mod_memcg_state - update cgroup memory statistics
776 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
784 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); in __mod_memcg_state()
791 long x = READ_ONCE(memcg->vmstats->state_local[idx]); in memcg_page_state_local()
807 memcg = pn->memcg; in __mod_memcg_lruvec_state()
811 * update their counter from in-interrupt context. For these two in __mod_memcg_lruvec_state()
831 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); in __mod_memcg_lruvec_state()
834 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); in __mod_memcg_lruvec_state()
841 * __mod_lruvec_state - update lruvec memory statistics
848 * change of state at this level: per-node, per-cgroup, per-lruvec.
908 * when we free the slab object, we need to update the per-memcg in __mod_lruvec_kmem_state()
921 * __count_memcg_events - account VM events in a cgroup
939 __this_cpu_add(memcg->vmstats_percpu->events[index], count); in __count_memcg_events()
950 return READ_ONCE(memcg->vmstats->events[index]); in memcg_events()
960 return READ_ONCE(memcg->vmstats->events_local[index]); in memcg_events_local()
971 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics()
974 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
982 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); in mem_cgroup_event_ratelimit()
983 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); in mem_cgroup_event_ratelimit()
985 if ((long)(next - val) < 0) { in mem_cgroup_event_ratelimit()
996 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); in mem_cgroup_event_ratelimit()
1011 /* threshold event is triggered in finer grain than soft limit */ in memcg_check_events()
1027 * mm_update_next_owner() may clear mm->owner to NULL in mem_cgroup_from_task()
1043 return current->active_memcg; in active_memcg()
1050 * Obtain a reference on mm->memcg and returns it if successful. If mm
1053 * 2) current->mm->memcg, if available
1077 css_get(&memcg->css); in get_mem_cgroup_from_mm()
1080 mm = current->mm; in get_mem_cgroup_from_mm()
1087 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
1090 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
1102 /* Memcg to charge can't be determined. */ in memcg_kmem_bypass()
1103 if (!in_task() || !current->mm || (current->flags & PF_KTHREAD)) in memcg_kmem_bypass()
1110 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1116 * @root itself, or %NULL after a full round-trip.
1120 * to cancel a hierarchy walk before the round-trip is complete.
1146 mz = root->nodeinfo[reclaim->pgdat->node_id]; in mem_cgroup_iter()
1147 iter = &mz->iter; in mem_cgroup_iter()
1150 * On start, join the current reclaim iteration cycle. in mem_cgroup_iter()
1154 reclaim->generation = iter->generation; in mem_cgroup_iter()
1155 else if (reclaim->generation != iter->generation) in mem_cgroup_iter()
1159 pos = READ_ONCE(iter->position); in mem_cgroup_iter()
1160 if (!pos || css_tryget(&pos->css)) in mem_cgroup_iter()
1163 * css reference reached zero, so iter->position will in mem_cgroup_iter()
1164 * be cleared by ->css_released. However, we should not in mem_cgroup_iter()
1165 * rely on this happening soon, because ->css_released in mem_cgroup_iter()
1166 * is called from a work queue, and by busy-waiting we in mem_cgroup_iter()
1167 * might block it. So we clear iter->position right in mem_cgroup_iter()
1170 (void)cmpxchg(&iter->position, pos, NULL); in mem_cgroup_iter()
1177 css = &pos->css; in mem_cgroup_iter()
1180 css = css_next_descendant_pre(css, &root->css); in mem_cgroup_iter()
1185 * the hierarchy - make sure they see at least in mem_cgroup_iter()
1198 if (css == &root->css || css_tryget(css)) { in mem_cgroup_iter()
1210 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter()
1213 css_put(&pos->css); in mem_cgroup_iter()
1216 iter->generation++; in mem_cgroup_iter()
1222 css_put(&prev->css); in mem_cgroup_iter()
1228 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1238 css_put(&prev->css); in mem_cgroup_iter_break()
1249 mz = from->nodeinfo[nid]; in __invalidate_reclaim_iterators()
1250 iter = &mz->iter; in __invalidate_reclaim_iterators()
1251 cmpxchg(&iter->position, dead_memcg, NULL); in __invalidate_reclaim_iterators()
1266 * When cgroup1 non-hierarchy mode is used, in invalidate_reclaim_iterators()
1277 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1283 * descendants and calls @fn for each task. If @fn returns a non-zero
1301 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); in mem_cgroup_scan_tasks()
1333 * folio_lruvec_lock - Lock the lruvec for a folio.
1337 * - folio locked
1338 * - folio_test_lru false
1339 * - folio_memcg_lock()
1340 * - folio frozen (refcount of 0)
1348 spin_lock(&lruvec->lru_lock); in folio_lruvec_lock()
1355 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1359 * - folio locked
1360 * - folio_test_lru false
1361 * - folio_memcg_lock()
1362 * - folio frozen (refcount of 0)
1371 spin_lock_irq(&lruvec->lru_lock); in folio_lruvec_lock_irq()
1378 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1383 * - folio locked
1384 * - folio_test_lru false
1385 * - folio_memcg_lock()
1386 * - folio frozen (refcount of 0)
1396 spin_lock_irqsave(&lruvec->lru_lock, *flags); in folio_lruvec_lock_irqsave()
1403 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1428 lru_size = &mz->lru_zone_size[zid][lru]; in mem_cgroup_update_lru_size()
1446 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1456 unsigned long limit; in mem_cgroup_margin() local
1458 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1459 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1460 if (count < limit) in mem_cgroup_margin()
1461 margin = limit - count; in mem_cgroup_margin()
1464 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1465 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1466 if (count < limit) in mem_cgroup_margin()
1467 margin = min(margin, limit - count); in mem_cgroup_margin()
1479 * moving cgroups. This is for waiting at high-memory pressure
1506 if (mc.moving_task && current != mc.moving_task) { in mem_cgroup_wait_acct_move()
1510 /* moving charge context might have finished. */ in mem_cgroup_wait_acct_move()
1607 * 1) generic big picture -> specifics and details in memcg_stat_format()
1608 * 2) reflecting userspace activity -> reflecting kernel heuristics in memcg_stat_format()
1610 * Current memory state: in memcg_stat_format()
1665 * @memcg: The memory cgroup that went over limit
1677 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_context()
1690 * @memcg: The memory cgroup that went over limit
1700 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1701 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_meminfo()
1702 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo()
1704 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1705 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1706 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo()
1708 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1709 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_meminfo()
1710 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1711 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1712 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_meminfo()
1713 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1717 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_meminfo()
1725 * Return the memory (and swap, if configured) limit for a memcg.
1729 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max()
1733 /* Calculate swap excess capacity from memsw limit */ in mem_cgroup_get_max()
1734 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max()
1740 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max()
1748 return page_counter_read(&memcg->memory); in mem_cgroup_size()
1839 * Check OOM-Killer is already running under our hierarchy.
1849 if (iter->oom_lock) { in mem_cgroup_oom_trylock()
1858 iter->oom_lock = true; in mem_cgroup_oom_trylock()
1871 iter->oom_lock = false; in mem_cgroup_oom_trylock()
1888 iter->oom_lock = false; in mem_cgroup_oom_unlock()
1898 iter->under_oom++; in mem_cgroup_mark_under_oom()
1912 if (iter->under_oom > 0) in mem_cgroup_unmark_under_oom()
1913 iter->under_oom--; in mem_cgroup_unmark_under_oom()
1932 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1943 * For the following lockless ->under_oom test, the only required in memcg_oom_recover()
1950 if (memcg && memcg->under_oom) in memcg_oom_recover()
1968 * We are in the middle of the charge context here, so we in mem_cgroup_oom()
1973 * handling until the charge can succeed; remember the context and put in mem_cgroup_oom()
1977 * On the other hand, in-kernel OOM killer allows for an async victim in mem_cgroup_oom()
1983 * victim and then we have to bail out from the charge path. in mem_cgroup_oom()
1985 if (READ_ONCE(memcg->oom_kill_disable)) { in mem_cgroup_oom()
1986 if (current->in_user_fault) { in mem_cgroup_oom()
1987 css_get(&memcg->css); in mem_cgroup_oom()
1988 current->memcg_in_oom = memcg; in mem_cgroup_oom()
1989 current->memcg_oom_gfp_mask = mask; in mem_cgroup_oom()
1990 current->memcg_oom_order = order; in mem_cgroup_oom()
2012 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2020 * situation. Sleeping directly in the charge context with all kinds
2030 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize()
2044 owait.wait.private = current; in mem_cgroup_oom_synchronize()
2062 current->memcg_in_oom = NULL; in mem_cgroup_oom_synchronize()
2063 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
2068 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2070 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2073 * by killing all belonging OOM-killable tasks.
2075 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2106 * highest-level memory cgroup with oom.group set. in mem_cgroup_get_oom_group()
2109 if (READ_ONCE(memcg->oom_group)) in mem_cgroup_get_oom_group()
2117 css_get(&oom_group->css); in mem_cgroup_get_oom_group()
2127 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
2132 * folio_memcg_lock - Bind a folio to its memcg.
2148 * path can get away without acquiring the memcg->move_lock in folio_memcg_lock()
2162 might_lock(&memcg->move_lock); in folio_memcg_lock()
2166 if (atomic_read(&memcg->moving_account) <= 0) in folio_memcg_lock()
2169 spin_lock_irqsave(&memcg->move_lock, flags); in folio_memcg_lock()
2171 spin_unlock_irqrestore(&memcg->move_lock, flags); in folio_memcg_lock()
2176 * When charge migration first begins, we can have multiple in folio_memcg_lock()
2177 * critical sections holding the fast-path RCU lock and one in folio_memcg_lock()
2181 memcg->move_lock_task = current; in folio_memcg_lock()
2182 memcg->move_lock_flags = flags; in folio_memcg_lock()
2187 if (memcg && memcg->move_lock_task == current) { in __folio_memcg_unlock()
2188 unsigned long flags = memcg->move_lock_flags; in __folio_memcg_unlock()
2190 memcg->move_lock_task = NULL; in __folio_memcg_unlock()
2191 memcg->move_lock_flags = 0; in __folio_memcg_unlock()
2193 spin_unlock_irqrestore(&memcg->move_lock, flags); in __folio_memcg_unlock()
2200 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2256 * consume_stock: Try to consume stocked charge on this cpu.
2258 * @nr_pages: how many pages to charge.
2260 * The charges will only happen if @memcg matches the current cpu's memcg
2278 if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) { in consume_stock()
2279 stock->nr_pages -= nr_pages; in consume_stock()
2293 struct mem_cgroup *old = READ_ONCE(stock->cached); in drain_stock()
2298 if (stock->nr_pages) { in drain_stock()
2299 page_counter_uncharge(&old->memory, stock->nr_pages); in drain_stock()
2301 page_counter_uncharge(&old->memsw, stock->nr_pages); in drain_stock()
2302 stock->nr_pages = 0; in drain_stock()
2305 css_put(&old->css); in drain_stock()
2306 WRITE_ONCE(stock->cached, NULL); in drain_stock()
2325 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); in drain_local_stock()
2341 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */ in __refill_stock()
2343 css_get(&memcg->css); in __refill_stock()
2344 WRITE_ONCE(stock->cached, memcg); in __refill_stock()
2346 stock->nr_pages += nr_pages; in __refill_stock()
2348 if (stock->nr_pages > MEMCG_CHARGE_BATCH) in __refill_stock()
2362 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2373 * Notify other cpus that system-wide "drain" is running in drain_all_stock()
2376 * per-cpu data. CPU up doesn't touch memcg_stock at all. in drain_all_stock()
2386 memcg = READ_ONCE(stock->cached); in drain_all_stock()
2387 if (memcg && stock->nr_pages && in drain_all_stock()
2395 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { in drain_all_stock()
2397 drain_local_stock(&stock->work); in drain_all_stock()
2399 schedule_work_on(cpu, &stock->work); in drain_all_stock()
2425 if (page_counter_read(&memcg->memory) <= in reclaim_high()
2426 READ_ONCE(memcg->memory.high)) in reclaim_high()
2462 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2464 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2469 * reasonable delay curve compared to precision-adjusted overage, not
2471 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2474 * +-------+------------------------+
2476 * +-------+------------------------+
2498 * +-------+------------------------+
2516 overage = usage - high; in calculate_overage()
2526 overage = calculate_overage(page_counter_read(&memcg->memory), in mem_find_max_overage()
2527 READ_ONCE(memcg->memory.high)); in mem_find_max_overage()
2540 overage = calculate_overage(page_counter_read(&memcg->swap), in swap_find_max_overage()
2541 READ_ONCE(memcg->swap.high)); in swap_find_max_overage()
2578 * N-sized allocations are throttled approximately the same as one in calculate_high_delay()
2579 * 4N-sized allocation. in calculate_high_delay()
2582 * larger the current charge patch is than that. in calculate_high_delay()
2589 * and reclaims memory over the high limit.
2596 unsigned int nr_pages = current->memcg_nr_pages_over_high; in mem_cgroup_handle_over_high()
2604 memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2605 current->memcg_nr_pages_over_high = 0; in mem_cgroup_handle_over_high()
2652 if (nr_reclaimed || nr_retries--) { in mem_cgroup_handle_over_high()
2660 * need to account for any ill-begotten jiffies to pay them off later. in mem_cgroup_handle_over_high()
2667 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2689 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge_memcg()
2690 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge_memcg()
2693 page_counter_uncharge(&memcg->memsw, batch); in try_charge_memcg()
2709 * under the limit over triggering OOM kills in these cases. in try_charge_memcg()
2711 if (unlikely(current->flags & PF_MEMALLOC)) in try_charge_memcg()
2714 if (unlikely(task_in_memcg_oom(current))) in try_charge_memcg()
2740 * Even though the limit is exceeded at this point, reclaim in try_charge_memcg()
2741 * may have been able to free some pages. Retry the charge in try_charge_memcg()
2745 * unlikely to succeed so close to the limit, and we fall back in try_charge_memcg()
2751 * At task move, charge accounts can be doubly counted. So, it's in try_charge_memcg()
2757 if (nr_retries--) in try_charge_memcg()
2769 * a forward progress or bypass the charge if the oom killer in try_charge_memcg()
2786 return -ENOMEM; in try_charge_memcg()
2797 * being freed very soon. Allow memory usage go over the limit in try_charge_memcg()
2800 page_counter_charge(&memcg->memory, nr_pages); in try_charge_memcg()
2802 page_counter_charge(&memcg->memsw, nr_pages); in try_charge_memcg()
2808 refill_stock(memcg, batch - nr_pages); in try_charge_memcg()
2815 * not recorded as it most likely matches current's and won't in try_charge_memcg()
2816 * change in the meantime. As high limit is checked again before in try_charge_memcg()
2822 mem_high = page_counter_read(&memcg->memory) > in try_charge_memcg()
2823 READ_ONCE(memcg->memory.high); in try_charge_memcg()
2824 swap_high = page_counter_read(&memcg->swap) > in try_charge_memcg()
2825 READ_ONCE(memcg->swap.high); in try_charge_memcg()
2830 schedule_work(&memcg->high_work); in try_charge_memcg()
2842 * Target some best-effort fairness between the tasks, in try_charge_memcg()
2846 current->memcg_nr_pages_over_high += batch; in try_charge_memcg()
2847 set_notify_resume(current); in try_charge_memcg()
2852 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && in try_charge_memcg()
2853 !(current->flags & PF_MEMALLOC) && in try_charge_memcg()
2874 page_counter_uncharge(&memcg->memory, nr_pages); in cancel_charge()
2876 page_counter_uncharge(&memcg->memsw, nr_pages); in cancel_charge()
2885 * - the page lock in commit_charge()
2886 * - LRU isolation in commit_charge()
2887 * - folio_memcg_lock() in commit_charge()
2888 * - exclusive reference in commit_charge()
2889 * - mem_cgroup_trylock_pages() in commit_charge()
2891 folio->memcg_data = (unsigned long)memcg; in commit_charge()
2932 return -ENOMEM; in memcg_alloc_slab_cgroups()
2941 slab->memcg_data = memcg_data; in memcg_alloc_slab_cgroups()
2942 } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) { in memcg_alloc_slab_cgroups()
2960 * Slab objects are accounted individually, not per-page. in mem_cgroup_from_obj_folio()
2962 * slab->memcg_data. in mem_cgroup_from_obj_folio()
2974 off = obj_to_index(slab->slab_cache, slab, p); in mem_cgroup_from_obj_folio()
2984 * slab->memcg_data has not been freed yet in mem_cgroup_from_obj_folio()
3043 objcg = rcu_dereference(memcg->objcg); in __get_obj_cgroup_from_memcg()
3063 memcg = mem_cgroup_from_task(current); in get_obj_cgroup_from_current()
3098 page_counter_charge(&memcg->kmem, nr_pages); in memcg_account_kmem()
3100 page_counter_uncharge(&memcg->kmem, -nr_pages); in memcg_account_kmem()
3117 memcg_account_kmem(memcg, -nr_pages); in obj_cgroup_uncharge_pages()
3120 css_put(&memcg->css); in obj_cgroup_uncharge_pages()
3124 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3125 * @objcg: object cgroup to charge
3127 * @nr_pages: number of pages to charge
3145 css_put(&memcg->css); in obj_cgroup_charge_pages()
3151 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3152 * @page: page to charge
3167 page->memcg_data = (unsigned long)objcg | in __memcg_kmem_charge_page()
3192 folio->memcg_data = 0; in __memcg_kmem_uncharge_page()
3212 if (READ_ONCE(stock->cached_objcg) != objcg) { in mod_objcg_state()
3215 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) in mod_objcg_state()
3216 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; in mod_objcg_state()
3217 WRITE_ONCE(stock->cached_objcg, objcg); in mod_objcg_state()
3218 stock->cached_pgdat = pgdat; in mod_objcg_state()
3219 } else if (stock->cached_pgdat != pgdat) { in mod_objcg_state()
3221 struct pglist_data *oldpg = stock->cached_pgdat; in mod_objcg_state()
3223 if (stock->nr_slab_reclaimable_b) { in mod_objcg_state()
3225 stock->nr_slab_reclaimable_b); in mod_objcg_state()
3226 stock->nr_slab_reclaimable_b = 0; in mod_objcg_state()
3228 if (stock->nr_slab_unreclaimable_b) { in mod_objcg_state()
3230 stock->nr_slab_unreclaimable_b); in mod_objcg_state()
3231 stock->nr_slab_unreclaimable_b = 0; in mod_objcg_state()
3233 stock->cached_pgdat = pgdat; in mod_objcg_state()
3236 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b in mod_objcg_state()
3237 : &stock->nr_slab_unreclaimable_b; in mod_objcg_state()
3271 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { in consume_obj_stock()
3272 stock->nr_bytes -= nr_bytes; in consume_obj_stock()
3283 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); in drain_obj_stock()
3288 if (stock->nr_bytes) { in drain_obj_stock()
3289 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; in drain_obj_stock()
3290 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); in drain_obj_stock()
3297 memcg_account_kmem(memcg, -nr_pages); in drain_obj_stock()
3300 css_put(&memcg->css); in drain_obj_stock()
3304 * The leftover is flushed to the centralized per-memcg value. in drain_obj_stock()
3306 * to a per-cpu stock (probably, on an other CPU), see in drain_obj_stock()
3309 * How often it's flushed is a trade-off between the memory in drain_obj_stock()
3310 * limit enforcement accuracy and potential CPU contention, in drain_obj_stock()
3313 atomic_add(nr_bytes, &old->nr_charged_bytes); in drain_obj_stock()
3314 stock->nr_bytes = 0; in drain_obj_stock()
3318 * Flush the vmstat data in current stock in drain_obj_stock()
3320 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
3321 if (stock->nr_slab_reclaimable_b) { in drain_obj_stock()
3322 mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
3324 stock->nr_slab_reclaimable_b); in drain_obj_stock()
3325 stock->nr_slab_reclaimable_b = 0; in drain_obj_stock()
3327 if (stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
3328 mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
3330 stock->nr_slab_unreclaimable_b); in drain_obj_stock()
3331 stock->nr_slab_unreclaimable_b = 0; in drain_obj_stock()
3333 stock->cached_pgdat = NULL; in drain_obj_stock()
3336 WRITE_ONCE(stock->cached_objcg, NULL); in drain_obj_stock()
3347 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); in obj_stock_flush_required()
3370 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ in refill_obj_stock()
3373 WRITE_ONCE(stock->cached_objcg, objcg); in refill_obj_stock()
3374 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) in refill_obj_stock()
3375 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; in refill_obj_stock()
3378 stock->nr_bytes += nr_bytes; in refill_obj_stock()
3380 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { in refill_obj_stock()
3381 nr_pages = stock->nr_bytes >> PAGE_SHIFT; in refill_obj_stock()
3382 stock->nr_bytes &= (PAGE_SIZE - 1); in refill_obj_stock()
3402 * In theory, objcg->nr_charged_bytes can have enough in obj_cgroup_charge()
3403 * pre-charged bytes to satisfy the allocation. However, in obj_cgroup_charge()
3404 * flushing objcg->nr_charged_bytes requires two atomic in obj_cgroup_charge()
3405 * operations, and objcg->nr_charged_bytes can't be big. in obj_cgroup_charge()
3406 * The shared objcg->nr_charged_bytes can also become a in obj_cgroup_charge()
3410 * objcg->nr_charged_bytes later on when objcg changes. in obj_cgroup_charge()
3412 * The stock's nr_bytes may contain enough pre-charged bytes in obj_cgroup_charge()
3414 * on the pre-charged bytes not being changed outside of in obj_cgroup_charge()
3416 * pre-charged bytes as well when charging pages. To avoid a in obj_cgroup_charge()
3417 * page uncharge right after a page charge, we set the in obj_cgroup_charge()
3419 * to temporarily allow the pre-charged bytes to exceed the page in obj_cgroup_charge()
3420 * size limit. The maximum reachable value of the pre-charged in obj_cgroup_charge()
3421 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data in obj_cgroup_charge()
3425 nr_bytes = size & (PAGE_SIZE - 1); in obj_cgroup_charge()
3432 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); in obj_cgroup_charge()
3457 folio_page(folio, i)->memcg_data = folio->memcg_data; in split_page_memcg()
3460 obj_cgroup_get_many(__folio_objcg(folio), nr - 1); in split_page_memcg()
3462 css_get_many(&memcg->css, nr - 1); in split_page_memcg()
3467 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3475 * Returns 0 on success, -EINVAL on failure.
3489 mod_memcg_state(from, MEMCG_SWAP, -1); in mem_cgroup_move_swap_account()
3493 return -EINVAL; in mem_cgroup_move_swap_account()
3499 return -EINVAL; in mem_cgroup_move_swap_account()
3512 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; in mem_cgroup_resize_max()
3515 if (signal_pending(current)) { in mem_cgroup_resize_max()
3516 ret = -EINTR; in mem_cgroup_resize_max()
3522 * Make sure that the new limit (memsw or memory limit) doesn't in mem_cgroup_resize_max()
3525 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : in mem_cgroup_resize_max()
3526 max <= memcg->memsw.max; in mem_cgroup_resize_max()
3529 ret = -EINVAL; in mem_cgroup_resize_max()
3532 if (max > counter->max) in mem_cgroup_resize_max()
3548 ret = -EBUSY; in mem_cgroup_resize_max()
3576 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id]; in mem_cgroup_soft_limit_reclaim()
3581 * are acceptable as soft limit is best effort anyway. in mem_cgroup_soft_limit_reclaim()
3583 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) in mem_cgroup_soft_limit_reclaim()
3588 * keep exceeding their soft limit and putting the system under in mem_cgroup_soft_limit_reclaim()
3599 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in mem_cgroup_soft_limit_reclaim()
3602 spin_lock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
3612 excess = soft_limit_excess(mz->memcg); in mem_cgroup_soft_limit_reclaim()
3623 spin_unlock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
3624 css_put(&mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3637 css_put(&next_mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3650 /* we call try-to-free pages for make this cgroup empty */ in mem_cgroup_force_empty()
3656 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
3657 if (signal_pending(current)) in mem_cgroup_force_empty()
3658 return -EINTR; in mem_cgroup_force_empty()
3662 nr_retries--; in mem_cgroup_force_empty()
3675 return -EINVAL; in mem_cgroup_force_empty_write()
3691 pr_warn_once("Non-hierarchical mode is deprecated. " in mem_cgroup_hierarchy_write()
3692 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_hierarchy_write()
3695 return -EINVAL; in mem_cgroup_hierarchy_write()
3710 val += total_swap_pages - get_nr_swap_pages(); in mem_cgroup_usage()
3713 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3715 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3734 switch (MEMFILE_TYPE(cft->private)) { in mem_cgroup_read_u64()
3736 counter = &memcg->memory; in mem_cgroup_read_u64()
3739 counter = &memcg->memsw; in mem_cgroup_read_u64()
3742 counter = &memcg->kmem; in mem_cgroup_read_u64()
3745 counter = &memcg->tcpmem; in mem_cgroup_read_u64()
3751 switch (MEMFILE_ATTR(cft->private)) { in mem_cgroup_read_u64()
3753 if (counter == &memcg->memory) in mem_cgroup_read_u64()
3755 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
3759 return (u64)counter->max * PAGE_SIZE; in mem_cgroup_read_u64()
3761 return (u64)counter->watermark * PAGE_SIZE; in mem_cgroup_read_u64()
3763 return counter->failcnt; in mem_cgroup_read_u64()
3765 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE; in mem_cgroup_read_u64()
3778 return -EINVAL; in mem_cgroup_dummy_seq_show()
3794 return -ENOMEM; in memcg_online_kmem()
3796 objcg->memcg = memcg; in memcg_online_kmem()
3797 rcu_assign_pointer(memcg->objcg, objcg); in memcg_online_kmem()
3801 memcg->kmemcg_id = memcg->id.id; in memcg_online_kmem()
3825 * The ordering is imposed by list_lru_node->lock taken by in memcg_offline_kmem()
3846 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max()
3850 if (!memcg->tcpmem_active) { in memcg_update_tcp_max()
3868 memcg->tcpmem_active = true; in memcg_update_tcp_max()
3887 ret = page_counter_memparse(buf, "-1", &nr_pages); in mem_cgroup_write()
3891 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_write()
3893 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ in mem_cgroup_write()
3894 ret = -EINVAL; in mem_cgroup_write()
3897 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_write()
3907 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_write()
3918 ret = -EOPNOTSUPP; in mem_cgroup_write()
3920 WRITE_ONCE(memcg->soft_limit, nr_pages); in mem_cgroup_write()
3934 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_reset()
3936 counter = &memcg->memory; in mem_cgroup_reset()
3939 counter = &memcg->memsw; in mem_cgroup_reset()
3942 counter = &memcg->kmem; in mem_cgroup_reset()
3945 counter = &memcg->tcpmem; in mem_cgroup_reset()
3951 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_reset()
3956 counter->failcnt = 0; in mem_cgroup_reset()
3968 return mem_cgroup_from_css(css)->move_charge_at_immigrate; in mem_cgroup_move_charge_read()
3978 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_move_charge_write()
3982 return -EINVAL; in mem_cgroup_move_charge_write()
3985 * No kind of locking is needed in here, because ->can_attach() will in mem_cgroup_move_charge_write()
3990 memcg->move_charge_at_immigrate = val; in mem_cgroup_move_charge_write()
3997 return -ENOSYS; in mem_cgroup_move_charge_write()
4005 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
4065 seq_printf(m, "%s=%lu", stat->name, in memcg_numa_stat_show()
4066 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
4071 stat->lru_mask, false)); in memcg_numa_stat_show()
4077 seq_printf(m, "hierarchical_%s=%lu", stat->name, in memcg_numa_stat_show()
4078 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
4083 stat->lru_mask, true)); in memcg_numa_stat_show()
4161 memory = min(memory, READ_ONCE(mi->memory.max)); in memcg1_stat_format()
4162 memsw = min(memsw, READ_ONCE(mi->memsw.max)); in memcg1_stat_format()
4198 mz = memcg->nodeinfo[pgdat->node_id]; in memcg1_stat_format()
4200 anon_cost += mz->lruvec.anon_cost; in memcg1_stat_format()
4201 file_cost += mz->lruvec.file_cost; in memcg1_stat_format()
4223 return -EINVAL; in mem_cgroup_swappiness_write()
4226 WRITE_ONCE(memcg->swappiness, val); in mem_cgroup_swappiness_write()
4241 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
4243 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
4255 i = t->current_threshold; in __mem_cgroup_threshold()
4263 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold()
4264 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
4275 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold()
4276 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
4279 t->current_threshold = i - 1; in __mem_cgroup_threshold()
4300 if (_a->threshold > _b->threshold) in compare_thresholds()
4303 if (_a->threshold < _b->threshold) in compare_thresholds()
4304 return -1; in compare_thresholds()
4315 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
4316 eventfd_signal(ev->eventfd, 1); in mem_cgroup_oom_notify_cb()
4339 ret = page_counter_memparse(args, "-1", &threshold); in __mem_cgroup_usage_register_event()
4343 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4346 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
4349 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
4355 if (thresholds->primary) in __mem_cgroup_usage_register_event()
4358 size = thresholds->primary ? thresholds->primary->size + 1 : 1; in __mem_cgroup_usage_register_event()
4363 ret = -ENOMEM; in __mem_cgroup_usage_register_event()
4366 new->size = size; in __mem_cgroup_usage_register_event()
4369 if (thresholds->primary) in __mem_cgroup_usage_register_event()
4370 memcpy(new->entries, thresholds->primary->entries, in __mem_cgroup_usage_register_event()
4371 flex_array_size(new, entries, size - 1)); in __mem_cgroup_usage_register_event()
4374 new->entries[size - 1].eventfd = eventfd; in __mem_cgroup_usage_register_event()
4375 new->entries[size - 1].threshold = threshold; in __mem_cgroup_usage_register_event()
4377 /* Sort thresholds. Registering of new threshold isn't time-critical */ in __mem_cgroup_usage_register_event()
4378 sort(new->entries, size, sizeof(*new->entries), in __mem_cgroup_usage_register_event()
4381 /* Find current threshold */ in __mem_cgroup_usage_register_event()
4382 new->current_threshold = -1; in __mem_cgroup_usage_register_event()
4384 if (new->entries[i].threshold <= usage) { in __mem_cgroup_usage_register_event()
4386 * new->current_threshold will not be used until in __mem_cgroup_usage_register_event()
4390 ++new->current_threshold; in __mem_cgroup_usage_register_event()
4396 kfree(thresholds->spare); in __mem_cgroup_usage_register_event()
4397 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_register_event()
4399 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_register_event()
4405 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4430 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4433 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
4436 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
4441 if (!thresholds->primary) in __mem_cgroup_usage_unregister_event()
4449 for (i = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
4450 if (thresholds->primary->entries[i].eventfd != eventfd) in __mem_cgroup_usage_unregister_event()
4456 new = thresholds->spare; in __mem_cgroup_usage_unregister_event()
4469 new->size = size; in __mem_cgroup_usage_unregister_event()
4471 /* Copy thresholds and find current threshold */ in __mem_cgroup_usage_unregister_event()
4472 new->current_threshold = -1; in __mem_cgroup_usage_unregister_event()
4473 for (i = 0, j = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
4474 if (thresholds->primary->entries[i].eventfd == eventfd) in __mem_cgroup_usage_unregister_event()
4477 new->entries[j] = thresholds->primary->entries[i]; in __mem_cgroup_usage_unregister_event()
4478 if (new->entries[j].threshold <= usage) { in __mem_cgroup_usage_unregister_event()
4480 * new->current_threshold will not be used in __mem_cgroup_usage_unregister_event()
4484 ++new->current_threshold; in __mem_cgroup_usage_unregister_event()
4491 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_unregister_event()
4493 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_unregister_event()
4500 kfree(thresholds->spare); in __mem_cgroup_usage_unregister_event()
4501 thresholds->spare = NULL; in __mem_cgroup_usage_unregister_event()
4504 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4526 return -ENOMEM; in mem_cgroup_oom_register_event()
4530 event->eventfd = eventfd; in mem_cgroup_oom_register_event()
4531 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
4534 if (memcg->under_oom) in mem_cgroup_oom_register_event()
4548 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
4549 if (ev->eventfd == eventfd) { in mem_cgroup_oom_unregister_event()
4550 list_del(&ev->list); in mem_cgroup_oom_unregister_event()
4562 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable)); in mem_cgroup_oom_control_read()
4563 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
4565 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in mem_cgroup_oom_control_read()
4576 return -EINVAL; in mem_cgroup_oom_control_write()
4578 WRITE_ONCE(memcg->oom_kill_disable, val); in mem_cgroup_oom_control_write()
4591 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
4596 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
4601 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
4606 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain()
4608 if (!memcg->css.parent) in mem_cgroup_wb_domain()
4611 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
4615 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4623 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4626 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4636 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats()
4648 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), in mem_cgroup_wb_stats()
4649 READ_ONCE(memcg->memory.high)); in mem_cgroup_wb_stats()
4650 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
4652 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); in mem_cgroup_wb_stats()
4661 * tracks ownership per-page while the latter per-inode. This was a
4662 * deliberate design decision because honoring per-page ownership in the
4664 * and deemed unnecessary given that write-sharing an inode across
4665 * different cgroups isn't a common use-case.
4667 * Combined with inode majority-writer ownership switching, this works well
4688 * page - a page whose memcg and writeback ownerships don't match - is
4694 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4708 int oldest = -1; in mem_cgroup_track_foreign_dirty_slowpath()
4719 frn = &memcg->cgwb_frn[i]; in mem_cgroup_track_foreign_dirty_slowpath()
4720 if (frn->bdi_id == wb->bdi->id && in mem_cgroup_track_foreign_dirty_slowpath()
4721 frn->memcg_id == wb->memcg_css->id) in mem_cgroup_track_foreign_dirty_slowpath()
4723 if (time_before64(frn->at, oldest_at) && in mem_cgroup_track_foreign_dirty_slowpath()
4724 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_track_foreign_dirty_slowpath()
4726 oldest_at = frn->at; in mem_cgroup_track_foreign_dirty_slowpath()
4732 * Re-using an existing one. Update timestamp lazily to in mem_cgroup_track_foreign_dirty_slowpath()
4734 * reasonably up-to-date and significantly shorter than in mem_cgroup_track_foreign_dirty_slowpath()
4742 if (time_before64(frn->at, now - update_intv)) in mem_cgroup_track_foreign_dirty_slowpath()
4743 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
4746 frn = &memcg->cgwb_frn[oldest]; in mem_cgroup_track_foreign_dirty_slowpath()
4747 frn->bdi_id = wb->bdi->id; in mem_cgroup_track_foreign_dirty_slowpath()
4748 frn->memcg_id = wb->memcg_css->id; in mem_cgroup_track_foreign_dirty_slowpath()
4749 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
4756 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_flush_foreign()
4762 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; in mem_cgroup_flush_foreign()
4770 if (time_after64(frn->at, now - intv) && in mem_cgroup_flush_foreign()
4771 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_flush_foreign()
4772 frn->at = 0; in mem_cgroup_flush_foreign()
4773 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); in mem_cgroup_flush_foreign()
4774 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, in mem_cgroup_flush_foreign()
4776 &frn->done); in mem_cgroup_flush_foreign()
4803 * This is way over-engineered. It tries to support fully configurable
4820 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove()
4822 remove_wait_queue(event->wqh, &event->wait); in memcg_event_remove()
4824 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
4827 eventfd_signal(event->eventfd, 1); in memcg_event_remove()
4829 eventfd_ctx_put(event->eventfd); in memcg_event_remove()
4831 css_put(&memcg->css); in memcg_event_remove()
4837 * Called with wqh->lock held and interrupts disabled.
4844 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake()
4854 * side will require wqh->lock via remove_wait_queue(), in memcg_event_wake()
4857 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
4858 if (!list_empty(&event->list)) { in memcg_event_wake()
4859 list_del_init(&event->list); in memcg_event_wake()
4864 schedule_work(&event->remove); in memcg_event_wake()
4866 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
4878 event->wqh = wqh; in memcg_event_ptable_queue_proc()
4879 add_wait_queue(wqh, &event->wait); in memcg_event_ptable_queue_proc()
4906 return -EOPNOTSUPP; in memcg_write_event_control()
4912 return -EINVAL; in memcg_write_event_control()
4921 return -EINVAL; in memcg_write_event_control()
4925 return -ENOMEM; in memcg_write_event_control()
4927 event->memcg = memcg; in memcg_write_event_control()
4928 INIT_LIST_HEAD(&event->list); in memcg_write_event_control()
4929 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); in memcg_write_event_control()
4930 init_waitqueue_func_entry(&event->wait, memcg_event_wake); in memcg_write_event_control()
4931 INIT_WORK(&event->remove, memcg_event_remove); in memcg_write_event_control()
4935 ret = -EBADF; in memcg_write_event_control()
4939 event->eventfd = eventfd_ctx_fileget(efile.file); in memcg_write_event_control()
4940 if (IS_ERR(event->eventfd)) { in memcg_write_event_control()
4941 ret = PTR_ERR(event->eventfd); in memcg_write_event_control()
4947 ret = -EBADF; in memcg_write_event_control()
4961 cdentry = cfile.file->f_path.dentry; in memcg_write_event_control()
4962 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) { in memcg_write_event_control()
4963 ret = -EINVAL; in memcg_write_event_control()
4975 name = cdentry->d_name.name; in memcg_write_event_control()
4978 event->register_event = mem_cgroup_usage_register_event; in memcg_write_event_control()
4979 event->unregister_event = mem_cgroup_usage_unregister_event; in memcg_write_event_control()
4981 event->register_event = mem_cgroup_oom_register_event; in memcg_write_event_control()
4982 event->unregister_event = mem_cgroup_oom_unregister_event; in memcg_write_event_control()
4984 event->register_event = vmpressure_register_event; in memcg_write_event_control()
4985 event->unregister_event = vmpressure_unregister_event; in memcg_write_event_control()
4987 event->register_event = memsw_cgroup_usage_register_event; in memcg_write_event_control()
4988 event->unregister_event = memsw_cgroup_usage_unregister_event; in memcg_write_event_control()
4990 ret = -EINVAL; in memcg_write_event_control()
4999 cfile_css = css_tryget_online_from_dir(cdentry->d_parent, in memcg_write_event_control()
5001 ret = -EINVAL; in memcg_write_event_control()
5009 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
5013 vfs_poll(efile.file, &event->pt); in memcg_write_event_control()
5015 spin_lock_irq(&memcg->event_list_lock); in memcg_write_event_control()
5016 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
5017 spin_unlock_irq(&memcg->event_list_lock); in memcg_write_event_control()
5029 eventfd_ctx_put(event->eventfd); in memcg_write_event_control()
5183 * Swap-out records and page cache shadow entries need to store memcg
5186 * memory-controlled cgroups to 64k.
5193 * even when there are much fewer than 64k cgroups - possibly none.
5195 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5204 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5223 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
5225 idr_remove(&mem_cgroup_idr, memcg->id.id); in mem_cgroup_id_remove()
5228 memcg->id.id = 0; in mem_cgroup_id_remove()
5235 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
5240 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
5244 css_put(&memcg->css); in mem_cgroup_id_put_many()
5254 * mem_cgroup_from_id - look up a memcg from a memcg id
5263 if (id == -1) in mem_cgroup_from_id()
5284 memcg = ERR_PTR(-ENOENT); in mem_cgroup_get_from_ino()
5300 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, in alloc_mem_cgroup_per_node_info()
5302 if (!pn->lruvec_stats_percpu) { in alloc_mem_cgroup_per_node_info()
5307 lruvec_init(&pn->lruvec); in alloc_mem_cgroup_per_node_info()
5309 pn->lruvec.pgdat = NODE_DATA(node); in alloc_mem_cgroup_per_node_info()
5311 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
5313 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
5319 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in free_mem_cgroup_per_node_info()
5324 free_percpu(pn->lruvec_stats_percpu); in free_mem_cgroup_per_node_info()
5334 kfree(memcg->vmstats); in __mem_cgroup_free()
5335 free_percpu(memcg->vmstats_percpu); in __mem_cgroup_free()
5351 long error = -ENOMEM; in mem_cgroup_alloc()
5357 memcg->id.id = mem_cgroup_alloc_id(); in mem_cgroup_alloc()
5358 if (memcg->id.id < 0) { in mem_cgroup_alloc()
5359 error = memcg->id.id; in mem_cgroup_alloc()
5363 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL); in mem_cgroup_alloc()
5364 if (!memcg->vmstats) in mem_cgroup_alloc()
5367 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
5369 if (!memcg->vmstats_percpu) in mem_cgroup_alloc()
5379 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
5380 INIT_LIST_HEAD(&memcg->oom_notify); in mem_cgroup_alloc()
5381 mutex_init(&memcg->thresholds_lock); in mem_cgroup_alloc()
5382 spin_lock_init(&memcg->move_lock); in mem_cgroup_alloc()
5383 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
5384 INIT_LIST_HEAD(&memcg->event_list); in mem_cgroup_alloc()
5385 spin_lock_init(&memcg->event_list_lock); in mem_cgroup_alloc()
5386 memcg->socket_pressure = jiffies; in mem_cgroup_alloc()
5388 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
5389 INIT_LIST_HEAD(&memcg->objcg_list); in mem_cgroup_alloc()
5392 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
5394 memcg->cgwb_frn[i].done = in mem_cgroup_alloc()
5398 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); in mem_cgroup_alloc()
5399 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); in mem_cgroup_alloc()
5400 memcg->deferred_split_queue.split_queue_len = 0; in mem_cgroup_alloc()
5411 INIT_LIST_HEAD(&memcg->score_node); in mem_cgroup_alloc()
5435 atomic64_set(&memcg->memcg_reclaimed.app_score, 300); in mem_cgroup_css_alloc()
5438 atomic_set(&memcg->memcg_reclaimed.ub_zram2ufs_ratio, 10); in mem_cgroup_css_alloc()
5439 atomic_set(&memcg->memcg_reclaimed.ub_mem2zram_ratio, 60); in mem_cgroup_css_alloc()
5440 atomic_set(&memcg->memcg_reclaimed.refault_threshold, 50); in mem_cgroup_css_alloc()
5442 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5443 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5445 memcg->zswap_max = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
5447 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5449 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); in mem_cgroup_css_alloc()
5450 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); in mem_cgroup_css_alloc()
5452 page_counter_init(&memcg->memory, &parent->memory); in mem_cgroup_css_alloc()
5453 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc()
5454 page_counter_init(&memcg->kmem, &parent->kmem); in mem_cgroup_css_alloc()
5455 page_counter_init(&memcg->tcpmem, &parent->tcpmem); in mem_cgroup_css_alloc()
5458 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_alloc()
5459 page_counter_init(&memcg->swap, NULL); in mem_cgroup_css_alloc()
5460 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_alloc()
5461 page_counter_init(&memcg->tcpmem, NULL); in mem_cgroup_css_alloc()
5464 return &memcg->css; in mem_cgroup_css_alloc()
5475 return &memcg->css; in mem_cgroup_css_alloc()
5504 refcount_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
5518 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); in mem_cgroup_css_online()
5526 return -ENOMEM; in mem_cgroup_css_online()
5538 list_del_init(&memcg->score_node); in mem_cgroup_css_offline()
5548 spin_lock_irq(&memcg->event_list_lock); in mem_cgroup_css_offline()
5549 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
5550 list_del_init(&event->list); in mem_cgroup_css_offline()
5551 schedule_work(&event->remove); in mem_cgroup_css_offline()
5553 spin_unlock_irq(&memcg->event_list_lock); in mem_cgroup_css_offline()
5555 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
5556 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
5583 wb_wait_for_completion(&memcg->cgwb_frn[i].done); in mem_cgroup_css_free()
5588 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) in mem_cgroup_css_free()
5596 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
5597 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
5604 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5613 * The current implementation only resets the essential configurations.
5620 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5621 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5622 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5623 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5624 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
5625 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
5626 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5627 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5628 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5640 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_css_rstat_flush()
5645 * below us. We're in a per-cpu loop here and this is in mem_cgroup_css_rstat_flush()
5648 delta = memcg->vmstats->state_pending[i]; in mem_cgroup_css_rstat_flush()
5650 memcg->vmstats->state_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5654 v = READ_ONCE(statc->state[i]); in mem_cgroup_css_rstat_flush()
5655 if (v != statc->state_prev[i]) { in mem_cgroup_css_rstat_flush()
5656 delta_cpu = v - statc->state_prev[i]; in mem_cgroup_css_rstat_flush()
5658 statc->state_prev[i] = v; in mem_cgroup_css_rstat_flush()
5663 memcg->vmstats->state_local[i] += delta_cpu; in mem_cgroup_css_rstat_flush()
5666 memcg->vmstats->state[i] += delta; in mem_cgroup_css_rstat_flush()
5668 parent->vmstats->state_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5673 delta = memcg->vmstats->events_pending[i]; in mem_cgroup_css_rstat_flush()
5675 memcg->vmstats->events_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5678 v = READ_ONCE(statc->events[i]); in mem_cgroup_css_rstat_flush()
5679 if (v != statc->events_prev[i]) { in mem_cgroup_css_rstat_flush()
5680 delta_cpu = v - statc->events_prev[i]; in mem_cgroup_css_rstat_flush()
5682 statc->events_prev[i] = v; in mem_cgroup_css_rstat_flush()
5686 memcg->vmstats->events_local[i] += delta_cpu; in mem_cgroup_css_rstat_flush()
5689 memcg->vmstats->events[i] += delta; in mem_cgroup_css_rstat_flush()
5691 parent->vmstats->events_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5696 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
5701 ppn = parent->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
5703 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); in mem_cgroup_css_rstat_flush()
5706 delta = pn->lruvec_stats.state_pending[i]; in mem_cgroup_css_rstat_flush()
5708 pn->lruvec_stats.state_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5711 v = READ_ONCE(lstatc->state[i]); in mem_cgroup_css_rstat_flush()
5712 if (v != lstatc->state_prev[i]) { in mem_cgroup_css_rstat_flush()
5713 delta_cpu = v - lstatc->state_prev[i]; in mem_cgroup_css_rstat_flush()
5715 lstatc->state_prev[i] = v; in mem_cgroup_css_rstat_flush()
5719 pn->lruvec_stats.state_local[i] += delta_cpu; in mem_cgroup_css_rstat_flush()
5722 pn->lruvec_stats.state[i] += delta; in mem_cgroup_css_rstat_flush()
5724 ppn->lruvec_stats.state_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5731 /* Handlers for move charge at task migration. */
5736 /* Try a single bulk charge without reclaim first, kswapd may wake */ in mem_cgroup_do_precharge()
5744 while (count--) { in mem_cgroup_do_precharge()
5814 entry->val = ent.val; in mc_handle_swap_pte()
5832 if (!vma->vm_file) /* anonymous vma */ in mc_handle_file_pte()
5837 /* folio is moved even if it's not RSS of this task(page-faulted). */ in mc_handle_file_pte()
5840 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index); in mc_handle_file_pte()
5847 * mem_cgroup_move_account - move account of the page
5849 * @compound: charge the page as compound or small page
5855 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5874 ret = -EINVAL; in mem_cgroup_move_account()
5886 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); in mem_cgroup_move_account()
5890 -nr_pages); in mem_cgroup_move_account()
5896 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); in mem_cgroup_move_account()
5900 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); in mem_cgroup_move_account()
5905 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); in mem_cgroup_move_account()
5910 struct address_space *mapping = folio_mapping(folio); in mem_cgroup_move_account() local
5912 if (mapping_can_writeback(mapping)) { in mem_cgroup_move_account()
5914 -nr_pages); in mem_cgroup_move_account()
5923 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages); in mem_cgroup_move_account()
5928 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); in mem_cgroup_move_account()
5947 css_get(&to->css); in mem_cgroup_move_account()
5948 css_put(&from->css); in mem_cgroup_move_account()
5950 /* Warning should never happen, so don't worry about refcount non-0 */ in mem_cgroup_move_account()
5952 folio->memcg_data = (unsigned long)to; in mem_cgroup_move_account()
5962 mem_cgroup_charge_statistics(from, -nr_pages); in mem_cgroup_move_account()
5970 * get_mctgt_type - get target type of moving charge
5978 * * MC_TARGET_NONE - If the pte is not a target for move charge.
5979 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
5980 * move charge. If @target is not NULL, the page is stored in target->page
5982 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
5983 * target for charge migration. If @target is not NULL, the entry is
5984 * stored in target->ent.
5985 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6045 target->page = page; in get_mctgt_type()
6055 * But we cannot move a tail-page in a THP. in get_mctgt_type()
6061 target->ent = ent; in get_mctgt_type()
6095 target->page = page; in get_mctgt_type_thp()
6112 struct vm_area_struct *vma = walk->vma; in mem_cgroup_count_precharge_pte_range()
6129 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
6135 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_count_precharge_pte_range()
6165 mc.moving_task = current; in mem_cgroup_precharge_mc()
6192 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); in __mem_cgroup_clear_mc()
6197 * we charged both to->memory and to->memsw, so we in __mem_cgroup_clear_mc()
6198 * should uncharge to->memory. in __mem_cgroup_clear_mc()
6201 page_counter_uncharge(&mc.to->memory, mc.moved_swap); in __mem_cgroup_clear_mc()
6239 /* charge immigration isn't supported on the default hierarchy */ in mem_cgroup_can_attach()
6244 * Multi-process migrations only happen on the default hierarchy in mem_cgroup_can_attach()
6245 * where charge immigration is not used. Perform charge in mem_cgroup_can_attach()
6260 * tunable will only affect upcoming migrations, not the current one. in mem_cgroup_can_attach()
6263 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
6275 if (mm->owner == p) { in mem_cgroup_can_attach()
6310 struct vm_area_struct *vma = walk->vma; in mem_cgroup_move_charge_pte_range()
6335 if (!list_empty(&folio->_deferred_list)) { in mem_cgroup_move_charge_pte_range()
6354 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
6365 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
6376 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()
6397 * memcg. There should be somebody mapping the head. in mem_cgroup_move_charge_pte_range()
6405 mc.precharge--; in mem_cgroup_move_charge_pte_range()
6418 mc.precharge--; in mem_cgroup_move_charge_pte_range()
6428 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_move_charge_pte_range()
6434 * We try charge one by one, but don't do any additional in mem_cgroup_move_charge_pte_range()
6435 * charges to mc.to if we have failed in charge once in attach() in mem_cgroup_move_charge_pte_range()
6457 * for already started RCU-only updates to finish. in mem_cgroup_move_charge()
6459 atomic_inc(&mc.from->moving_account); in mem_cgroup_move_charge()
6467 * to move enough charges, but moving charge is a best-effort in mem_cgroup_move_charge()
6476 * additional charge, the page walk just aborts. in mem_cgroup_move_charge()
6480 atomic_dec(&mc.from->moving_account); in mem_cgroup_move_charge()
6517 if (task->mm && READ_ONCE(task->mm->owner) == task) in mem_cgroup_attach()
6518 lru_gen_migrate_mm(task->mm); in mem_cgroup_attach()
6542 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
6550 return (u64)memcg->memory.watermark * PAGE_SIZE; in memory_peak_read()
6556 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); in memory_min_show()
6571 page_counter_set_min(&memcg->memory, min); in memory_min_write()
6579 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); in memory_low_show()
6594 page_counter_set_low(&memcg->memory, low); in memory_low_write()
6602 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); in memory_high_show()
6619 page_counter_set_high(&memcg->memory, high); in memory_high_write()
6622 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
6628 if (signal_pending(current)) in memory_high_write()
6637 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
6640 if (!reclaimed && !nr_retries--) in memory_high_write()
6651 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); in memory_max_show()
6668 xchg(&memcg->memory.max, max); in memory_max_write()
6671 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
6676 if (signal_pending(current)) in memory_max_write()
6686 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
6688 nr_reclaims--; in memory_max_write()
6717 __memory_events_show(m, memcg->memory_events); in memory_events_show()
6725 __memory_events_show(m, memcg->memory_events_local); in memory_events_local_show()
6736 return -ENOMEM; in memory_stat_show()
6788 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); in memory_oom_group_show()
6801 return -EINVAL; in memory_oom_group_write()
6808 return -EINVAL; in memory_oom_group_write()
6810 WRITE_ONCE(memcg->oom_group, oom_group); in memory_oom_group_write()
6833 if (signal_pending(current)) in memory_reclaim()
6834 return -EINTR; in memory_reclaim()
6845 min(nr_to_reclaim - nr_reclaimed, SWAP_CLUSTER_MAX), in memory_reclaim()
6848 if (!reclaimed && !nr_retries--) in memory_reclaim()
6849 return -EAGAIN; in memory_reclaim()
6859 .name = "current",
6966 * This makes distribution proportional, but also work-conserving:
6977 * of the ancestor's claim to protection, any unutilized -
6978 * "floating" - protection from up the tree is distributed in
7004 * claimed protection in order to be work-conserving: claimed in effective_protection()
7042 * aren't read atomically - make sure the division is sane. in effective_protection()
7051 unclaimed = parent_effective - siblings_protected; in effective_protection()
7052 unclaimed *= usage - protected; in effective_protection()
7053 unclaimed /= parent_usage - siblings_protected; in effective_protection()
7062 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
7063 * @root: the top ancestor of the sub-tree being checked
7067 * of a top-down tree iteration, not for isolated queries.
7091 usage = page_counter_read(&memcg->memory); in mem_cgroup_calculate_protection()
7098 memcg->memory.emin = READ_ONCE(memcg->memory.min); in mem_cgroup_calculate_protection()
7099 memcg->memory.elow = READ_ONCE(memcg->memory.low); in mem_cgroup_calculate_protection()
7103 parent_usage = page_counter_read(&parent->memory); in mem_cgroup_calculate_protection()
7105 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
7106 READ_ONCE(memcg->memory.min), in mem_cgroup_calculate_protection()
7107 READ_ONCE(parent->memory.emin), in mem_cgroup_calculate_protection()
7108 atomic_long_read(&parent->memory.children_min_usage))); in mem_cgroup_calculate_protection()
7110 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
7111 READ_ONCE(memcg->memory.low), in mem_cgroup_calculate_protection()
7112 READ_ONCE(parent->memory.elow), in mem_cgroup_calculate_protection()
7113 atomic_long_read(&parent->memory.children_low_usage))); in mem_cgroup_calculate_protection()
7126 css_get(&memcg->css); in charge_memcg()
7144 css_put(&memcg->css); in __mem_cgroup_charge()
7150 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7151 * @folio: folio to charge.
7174 if (!memcg || !css_tryget_online(&memcg->css)) in mem_cgroup_swapin_charge_folio()
7180 css_put(&memcg->css); in mem_cgroup_swapin_charge_folio()
7185 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7203 * so this is a non-issue here. Memory and swap charge lifetimes in mem_cgroup_swapin_uncharge_swap()
7204 * correspond 1:1 to page and swap slot lifetimes: we charge the in mem_cgroup_swapin_uncharge_swap()
7211 * memory+swap charge, drop the swap entry duplicate. in mem_cgroup_swapin_uncharge_swap()
7234 if (ug->nr_memory) { in uncharge_batch()
7235 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); in uncharge_batch()
7237 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); in uncharge_batch()
7238 if (ug->nr_kmem) in uncharge_batch()
7239 memcg_account_kmem(ug->memcg, -ug->nr_kmem); in uncharge_batch()
7240 memcg_oom_recover(ug->memcg); in uncharge_batch()
7244 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); in uncharge_batch()
7245 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); in uncharge_batch()
7246 memcg_check_events(ug->memcg, ug->nid); in uncharge_batch()
7250 css_put(&ug->memcg->css); in uncharge_batch()
7280 if (ug->memcg != memcg) { in uncharge_folio()
7281 if (ug->memcg) { in uncharge_folio()
7285 ug->memcg = memcg; in uncharge_folio()
7286 ug->nid = folio_nid(folio); in uncharge_folio()
7289 css_get(&memcg->css); in uncharge_folio()
7295 ug->nr_memory += nr_pages; in uncharge_folio()
7296 ug->nr_kmem += nr_pages; in uncharge_folio()
7298 folio->memcg_data = 0; in uncharge_folio()
7303 ug->nr_memory += nr_pages; in uncharge_folio()
7304 ug->pgpgout++; in uncharge_folio()
7307 folio->memcg_data = 0; in uncharge_folio()
7310 css_put(&memcg->css); in uncharge_folio()
7317 /* Don't touch folio->lru of any random page, pre-check: */ in __mem_cgroup_uncharge()
7327 * __mem_cgroup_uncharge_list - uncharge a list of page
7346 * mem_cgroup_migrate - Charge a folio's replacement.
7350 * Charge @new as a replacement folio for @old. @old will
7353 * Both folios must be locked, @new->mapping must be set up.
7378 /* Force-charge the new page. The old one will be freed soon */ in mem_cgroup_migrate()
7380 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_migrate()
7382 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_migrate()
7385 css_get(&memcg->css); in mem_cgroup_migrate()
7409 memcg = mem_cgroup_from_task(current); in mem_cgroup_sk_alloc()
7412 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) in mem_cgroup_sk_alloc()
7414 if (css_tryget(&memcg->css)) in mem_cgroup_sk_alloc()
7415 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
7422 if (sk->sk_memcg) in mem_cgroup_sk_free()
7423 css_put(&sk->sk_memcg->css); in mem_cgroup_sk_free()
7427 * mem_cgroup_charge_skmem - charge socket memory
7428 * @memcg: memcg to charge
7429 * @nr_pages: number of pages to charge
7432 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7433 * @memcg's configured limit, %false if it doesn't.
7441 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in mem_cgroup_charge_skmem()
7442 memcg->tcpmem_pressure = 0; in mem_cgroup_charge_skmem()
7445 memcg->tcpmem_pressure = 1; in mem_cgroup_charge_skmem()
7447 page_counter_charge(&memcg->tcpmem, nr_pages); in mem_cgroup_charge_skmem()
7462 * mem_cgroup_uncharge_skmem - uncharge socket memory
7469 page_counter_uncharge(&memcg->tcpmem, nr_pages); in mem_cgroup_uncharge_skmem()
7473 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
7502 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7512 * used for per-memcg-per-cpu caching of per-node statistics. In order in mem_cgroup_init()
7522 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
7530 rtpn->rb_root = RB_ROOT; in mem_cgroup_init()
7531 rtpn->rb_rightmost = NULL; in mem_cgroup_init()
7532 spin_lock_init(&rtpn->lock); in mem_cgroup_init()
7543 while (!refcount_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
7560 * mem_cgroup_swapout - transfer a memsw charge to swap
7561 * @folio: folio whose memsw charge to transfer
7562 * @entry: swap entry to move the charge to
7564 * Transfer the memsw charge of @folio to @entry.
7589 * have an ID allocated to it anymore, charge the closest online in mem_cgroup_swapout()
7590 * ancestor for the swap instead and transfer the memory+swap charge. in mem_cgroup_swapout()
7596 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); in mem_cgroup_swapout()
7603 folio->memcg_data = 0; in mem_cgroup_swapout()
7606 page_counter_uncharge(&memcg->memory, nr_entries); in mem_cgroup_swapout()
7610 page_counter_charge(&swap_memcg->memsw, nr_entries); in mem_cgroup_swapout()
7611 page_counter_uncharge(&memcg->memsw, nr_entries); in mem_cgroup_swapout()
7616 * i_pages lock which is taken with interrupts-off. It is in mem_cgroup_swapout()
7618 * only synchronisation we have for updating the per-CPU variables. in mem_cgroup_swapout()
7621 mem_cgroup_charge_statistics(memcg, -nr_entries); in mem_cgroup_swapout()
7625 css_put(&memcg->css); in mem_cgroup_swapout()
7629 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7631 * @entry: swap entry to charge
7633 * Try to charge @folio's memcg for the swap space at @entry.
7635 * Returns 0 on success, -ENOMEM on failure.
7661 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in __mem_cgroup_try_charge_swap()
7665 return -ENOMEM; in __mem_cgroup_try_charge_swap()
7670 mem_cgroup_id_get_many(memcg, nr_pages - 1); in __mem_cgroup_try_charge_swap()
7679 * __mem_cgroup_uncharge_swap - uncharge swap space
7694 page_counter_uncharge(&memcg->memsw, nr_pages); in __mem_cgroup_uncharge_swap()
7696 page_counter_uncharge(&memcg->swap, nr_pages); in __mem_cgroup_uncharge_swap()
7698 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in __mem_cgroup_uncharge_swap()
7712 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
7713 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
7733 unsigned long usage = page_counter_read(&memcg->swap); in mem_cgroup_swap_full()
7735 if (usage * 2 >= READ_ONCE(memcg->swap.high) || in mem_cgroup_swap_full()
7736 usage * 2 >= READ_ONCE(memcg->swap.max)) in mem_cgroup_swap_full()
7750 "Please report your usecase to linux-mm@kvack.org if you " in setup_swap_account()
7761 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
7769 return (u64)memcg->swap.watermark * PAGE_SIZE; in swap_peak_read()
7775 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); in swap_high_show()
7790 page_counter_set_high(&memcg->swap, high); in swap_high_write()
7798 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); in swap_max_show()
7813 xchg(&memcg->swap.max, max); in swap_max_write()
7823 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); in swap_events_show()
7825 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
7827 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()
7834 .name = "swap.current",
7893 * obj_cgroup_may_zswap - check if this cgroup can zswap
7896 * Check if the hierarchical zswap limit has been reached.
7900 * once compression has occured, and this optimistic pre-check avoids
7915 unsigned long max = READ_ONCE(memcg->zswap_max); in obj_cgroup_may_zswap()
7925 cgroup_rstat_flush(memcg->css.cgroup); in obj_cgroup_may_zswap()
7937 * obj_cgroup_charge_zswap - charge compression backend memory
7941 * This forces the charge after obj_cgroup_may_zswap() allowed
7951 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); in obj_cgroup_charge_zswap()
7965 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
7982 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); in obj_cgroup_uncharge_zswap()
7983 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); in obj_cgroup_uncharge_zswap()
7990 cgroup_rstat_flush(css->cgroup); in zswap_current_read()
7997 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); in zswap_max_show()
8012 xchg(&memcg->zswap_max, max); in zswap_max_write()
8019 .name = "zswap.current",