• Home
  • Raw
  • Download

Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping

1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
19 * Charge lifetime sanitation
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
109 * Cgroups above their limits are maintained in a RB-Tree, independent of
201 * limit reclaim to prevent infinite loops, if they ever occur.
206 /* for encoding cft->private value on file */
235 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || in task_is_dying()
236 (current->flags & PF_EXITING); in task_is_dying()
244 return &memcg->vmpressure; in memcg_to_vmpressure()
272 * objcg->nr_charged_bytes can't have an arbitrary byte value. in obj_cgroup_release()
276 * 1) CPU0: objcg == stock->cached_objcg in obj_cgroup_release()
281 * objcg->nr_charged_bytes = PAGE_SIZE - 92 in obj_cgroup_release()
283 * 92 bytes are added to stock->nr_bytes in obj_cgroup_release()
285 * 92 bytes are added to objcg->nr_charged_bytes in obj_cgroup_release()
290 nr_bytes = atomic_read(&objcg->nr_charged_bytes); in obj_cgroup_release()
291 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); in obj_cgroup_release()
298 list_del(&objcg->list); in obj_cgroup_release()
314 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, in obj_cgroup_alloc()
320 INIT_LIST_HEAD(&objcg->list); in obj_cgroup_alloc()
329 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); in memcg_reparent_objcgs()
334 list_add(&objcg->list, &memcg->objcg_list); in memcg_reparent_objcgs()
336 list_for_each_entry(iter, &memcg->objcg_list, list) in memcg_reparent_objcgs()
337 WRITE_ONCE(iter->memcg, parent); in memcg_reparent_objcgs()
339 list_splice(&memcg->objcg_list, &parent->objcg_list); in memcg_reparent_objcgs()
343 percpu_ref_kill(&objcg->refcnt); in memcg_reparent_objcgs()
360 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
377 return &memcg->css; in mem_cgroup_css_from_folio()
381 * page_cgroup_ino - return inode number of the memcg a page is charged to
402 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
405 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
414 struct rb_node **p = &mctz->rb_root.rb_node; in __mem_cgroup_insert_exceeded()
419 if (mz->on_tree) in __mem_cgroup_insert_exceeded()
422 mz->usage_in_excess = new_usage_in_excess; in __mem_cgroup_insert_exceeded()
423 if (!mz->usage_in_excess) in __mem_cgroup_insert_exceeded()
429 if (mz->usage_in_excess < mz_node->usage_in_excess) { in __mem_cgroup_insert_exceeded()
430 p = &(*p)->rb_left; in __mem_cgroup_insert_exceeded()
433 p = &(*p)->rb_right; in __mem_cgroup_insert_exceeded()
438 mctz->rb_rightmost = &mz->tree_node; in __mem_cgroup_insert_exceeded()
440 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded()
441 rb_insert_color(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_insert_exceeded()
442 mz->on_tree = true; in __mem_cgroup_insert_exceeded()
448 if (!mz->on_tree) in __mem_cgroup_remove_exceeded()
451 if (&mz->tree_node == mctz->rb_rightmost) in __mem_cgroup_remove_exceeded()
452 mctz->rb_rightmost = rb_prev(&mz->tree_node); in __mem_cgroup_remove_exceeded()
454 rb_erase(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_remove_exceeded()
455 mz->on_tree = false; in __mem_cgroup_remove_exceeded()
463 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
465 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
470 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
471 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
475 excess = nr_pages - soft_limit; in soft_limit_excess()
500 mz = memcg->nodeinfo[nid]; in mem_cgroup_update_tree()
503 * We have to update the tree if mz is on RB-tree or in mem_cgroup_update_tree()
506 if (excess || mz->on_tree) { in mem_cgroup_update_tree()
509 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_update_tree()
510 /* if on-tree, remove it */ in mem_cgroup_update_tree()
511 if (mz->on_tree) in mem_cgroup_update_tree()
514 * Insert again. mz->usage_in_excess will be updated. in mem_cgroup_update_tree()
518 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_update_tree()
530 mz = memcg->nodeinfo[nid]; in mem_cgroup_remove_from_trees()
544 if (!mctz->rb_rightmost) in __mem_cgroup_largest_soft_limit_node()
547 mz = rb_entry(mctz->rb_rightmost, in __mem_cgroup_largest_soft_limit_node()
555 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
556 !css_tryget(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
567 spin_lock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
569 spin_unlock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
626 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); in memcg_rstat_updated()
655 cgroup_rstat_flush(root_mem_cgroup->css.cgroup); in do_flush_stats()
676 * Always flush here so that flushing in latency-sensitive paths is in flush_memcg_stats_dwork()
723 return mem_cgroup_events_index[idx] - 1; in memcg_events_index()
745 /* Non-hierarchical (CPU aggregated) page state & events */
756 long x = READ_ONCE(memcg->vmstats->state[idx]); in memcg_page_state()
765 * __mod_memcg_state - update cgroup memory statistics
767 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
775 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); in __mod_memcg_state()
782 long x = READ_ONCE(memcg->vmstats->state_local[idx]); in memcg_page_state_local()
798 memcg = pn->memcg; in __mod_memcg_lruvec_state()
802 * update their counter from in-interrupt context. For these two in __mod_memcg_lruvec_state()
822 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); in __mod_memcg_lruvec_state()
825 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); in __mod_memcg_lruvec_state()
832 * __mod_lruvec_state - update lruvec memory statistics
839 * change of state at this level: per-node, per-cgroup, per-lruvec.
887 * when we free the slab object, we need to update the per-memcg in __mod_lruvec_kmem_state()
900 * __count_memcg_events - account VM events in a cgroup
914 __this_cpu_add(memcg->vmstats_percpu->events[index], count); in __count_memcg_events()
925 return READ_ONCE(memcg->vmstats->events[index]); in memcg_events()
935 return READ_ONCE(memcg->vmstats->events_local[index]); in memcg_events_local()
946 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics()
949 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
957 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); in mem_cgroup_event_ratelimit()
958 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); in mem_cgroup_event_ratelimit()
960 if ((long)(next - val) < 0) { in mem_cgroup_event_ratelimit()
971 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); in mem_cgroup_event_ratelimit()
986 /* threshold event is triggered in finer grain than soft limit */ in memcg_check_events()
1002 * mm_update_next_owner() may clear mm->owner to NULL in mem_cgroup_from_task()
1018 return current->active_memcg; in active_memcg()
1025 * Obtain a reference on mm->memcg and returns it if successful. If mm
1028 * 2) current->mm->memcg, if available
1052 css_get(&memcg->css); in get_mem_cgroup_from_mm()
1055 mm = current->mm; in get_mem_cgroup_from_mm()
1062 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
1065 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
1077 /* Memcg to charge can't be determined. */ in memcg_kmem_bypass()
1078 if (!in_task() || !current->mm || (current->flags & PF_KTHREAD)) in memcg_kmem_bypass()
1085 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1091 * @root itself, or %NULL after a full round-trip.
1095 * to cancel a hierarchy walk before the round-trip is complete.
1121 mz = root->nodeinfo[reclaim->pgdat->node_id]; in mem_cgroup_iter()
1122 iter = &mz->iter; in mem_cgroup_iter()
1125 * On start, join the current reclaim iteration cycle. in mem_cgroup_iter()
1129 reclaim->generation = iter->generation; in mem_cgroup_iter()
1130 else if (reclaim->generation != iter->generation) in mem_cgroup_iter()
1134 pos = READ_ONCE(iter->position); in mem_cgroup_iter()
1135 if (!pos || css_tryget(&pos->css)) in mem_cgroup_iter()
1138 * css reference reached zero, so iter->position will in mem_cgroup_iter()
1139 * be cleared by ->css_released. However, we should not in mem_cgroup_iter()
1140 * rely on this happening soon, because ->css_released in mem_cgroup_iter()
1141 * is called from a work queue, and by busy-waiting we in mem_cgroup_iter()
1142 * might block it. So we clear iter->position right in mem_cgroup_iter()
1145 (void)cmpxchg(&iter->position, pos, NULL); in mem_cgroup_iter()
1152 css = &pos->css; in mem_cgroup_iter()
1155 css = css_next_descendant_pre(css, &root->css); in mem_cgroup_iter()
1160 * the hierarchy - make sure they see at least in mem_cgroup_iter()
1173 if (css == &root->css || css_tryget(css)) { in mem_cgroup_iter()
1185 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter()
1188 css_put(&pos->css); in mem_cgroup_iter()
1191 iter->generation++; in mem_cgroup_iter()
1197 css_put(&prev->css); in mem_cgroup_iter()
1203 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1213 css_put(&prev->css); in mem_cgroup_iter_break()
1224 mz = from->nodeinfo[nid]; in __invalidate_reclaim_iterators()
1225 iter = &mz->iter; in __invalidate_reclaim_iterators()
1226 cmpxchg(&iter->position, dead_memcg, NULL); in __invalidate_reclaim_iterators()
1241 * When cgroup1 non-hierarchy mode is used, in invalidate_reclaim_iterators()
1252 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1258 * descendants and calls @fn for each task. If @fn returns a non-zero
1276 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); in mem_cgroup_scan_tasks()
1305 * folio_lruvec_lock - Lock the lruvec for a folio.
1309 * - folio locked
1310 * - folio_test_lru false
1311 * - folio_memcg_lock()
1312 * - folio frozen (refcount of 0)
1320 spin_lock(&lruvec->lru_lock); in folio_lruvec_lock()
1327 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1331 * - folio locked
1332 * - folio_test_lru false
1333 * - folio_memcg_lock()
1334 * - folio frozen (refcount of 0)
1343 spin_lock_irq(&lruvec->lru_lock); in folio_lruvec_lock_irq()
1350 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1355 * - folio locked
1356 * - folio_test_lru false
1357 * - folio_memcg_lock()
1358 * - folio frozen (refcount of 0)
1368 spin_lock_irqsave(&lruvec->lru_lock, *flags); in folio_lruvec_lock_irqsave()
1375 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1395 lru_size = &mz->lru_zone_size[zid][lru]; in mem_cgroup_update_lru_size()
1413 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1423 unsigned long limit; in mem_cgroup_margin() local
1425 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1426 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1427 if (count < limit) in mem_cgroup_margin()
1428 margin = limit - count; in mem_cgroup_margin()
1431 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1432 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1433 if (count < limit) in mem_cgroup_margin()
1434 margin = min(margin, limit - count); in mem_cgroup_margin()
1446 * moving cgroups. This is for waiting at high-memory pressure
1473 if (mc.moving_task && current != mc.moving_task) { in mem_cgroup_wait_acct_move()
1477 /* moving charge context might have finished. */ in mem_cgroup_wait_acct_move()
1574 * 1) generic big picture -> specifics and details in memcg_stat_format()
1575 * 2) reflecting userspace activity -> reflecting kernel heuristics in memcg_stat_format()
1577 * Current memory state: in memcg_stat_format()
1632 * @memcg: The memory cgroup that went over limit
1644 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_context()
1657 * @memcg: The memory cgroup that went over limit
1667 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1668 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_meminfo()
1669 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo()
1671 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1672 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1673 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo()
1675 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1676 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_meminfo()
1677 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1678 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", in mem_cgroup_print_oom_meminfo()
1679 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_meminfo()
1680 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1684 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_meminfo()
1692 * Return the memory (and swap, if configured) limit for a memcg.
1696 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max()
1700 /* Calculate swap excess capacity from memsw limit */ in mem_cgroup_get_max()
1701 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max()
1707 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max()
1715 return page_counter_read(&memcg->memory); in mem_cgroup_size()
1806 * Check OOM-Killer is already running under our hierarchy.
1816 if (iter->oom_lock) { in mem_cgroup_oom_trylock()
1825 iter->oom_lock = true; in mem_cgroup_oom_trylock()
1838 iter->oom_lock = false; in mem_cgroup_oom_trylock()
1855 iter->oom_lock = false; in mem_cgroup_oom_unlock()
1865 iter->under_oom++; in mem_cgroup_mark_under_oom()
1879 if (iter->under_oom > 0) in mem_cgroup_unmark_under_oom()
1880 iter->under_oom--; in mem_cgroup_unmark_under_oom()
1899 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1910 * For the following lockless ->under_oom test, the only required in memcg_oom_recover()
1917 if (memcg && memcg->under_oom) in memcg_oom_recover()
1935 * We are in the middle of the charge context here, so we in mem_cgroup_oom()
1940 * handling until the charge can succeed; remember the context and put in mem_cgroup_oom()
1944 * On the other hand, in-kernel OOM killer allows for an async victim in mem_cgroup_oom()
1950 * victim and then we have to bail out from the charge path. in mem_cgroup_oom()
1952 if (READ_ONCE(memcg->oom_kill_disable)) { in mem_cgroup_oom()
1953 if (current->in_user_fault) { in mem_cgroup_oom()
1954 css_get(&memcg->css); in mem_cgroup_oom()
1955 current->memcg_in_oom = memcg; in mem_cgroup_oom()
1956 current->memcg_oom_gfp_mask = mask; in mem_cgroup_oom()
1957 current->memcg_oom_order = order; in mem_cgroup_oom()
1979 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1987 * situation. Sleeping directly in the charge context with all kinds
1997 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize()
2011 owait.wait.private = current; in mem_cgroup_oom_synchronize()
2029 current->memcg_in_oom = NULL; in mem_cgroup_oom_synchronize()
2030 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
2035 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2037 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2040 * by killing all belonging OOM-killable tasks.
2042 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2073 * highest-level memory cgroup with oom.group set. in mem_cgroup_get_oom_group()
2076 if (READ_ONCE(memcg->oom_group)) in mem_cgroup_get_oom_group()
2084 css_get(&oom_group->css); in mem_cgroup_get_oom_group()
2094 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
2099 * folio_memcg_lock - Bind a folio to its memcg.
2115 * path can get away without acquiring the memcg->move_lock in folio_memcg_lock()
2129 might_lock(&memcg->move_lock); in folio_memcg_lock()
2133 if (atomic_read(&memcg->moving_account) <= 0) in folio_memcg_lock()
2136 spin_lock_irqsave(&memcg->move_lock, flags); in folio_memcg_lock()
2138 spin_unlock_irqrestore(&memcg->move_lock, flags); in folio_memcg_lock()
2143 * When charge migration first begins, we can have multiple in folio_memcg_lock()
2144 * critical sections holding the fast-path RCU lock and one in folio_memcg_lock()
2148 memcg->move_lock_task = current; in folio_memcg_lock()
2149 memcg->move_lock_flags = flags; in folio_memcg_lock()
2154 if (memcg && memcg->move_lock_task == current) { in __folio_memcg_unlock()
2155 unsigned long flags = memcg->move_lock_flags; in __folio_memcg_unlock()
2157 memcg->move_lock_task = NULL; in __folio_memcg_unlock()
2158 memcg->move_lock_flags = 0; in __folio_memcg_unlock()
2160 spin_unlock_irqrestore(&memcg->move_lock, flags); in __folio_memcg_unlock()
2167 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2223 * consume_stock: Try to consume stocked charge on this cpu.
2225 * @nr_pages: how many pages to charge.
2227 * The charges will only happen if @memcg matches the current cpu's memcg
2245 if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) { in consume_stock()
2246 stock->nr_pages -= nr_pages; in consume_stock()
2260 struct mem_cgroup *old = READ_ONCE(stock->cached); in drain_stock()
2265 if (stock->nr_pages) { in drain_stock()
2266 page_counter_uncharge(&old->memory, stock->nr_pages); in drain_stock()
2268 page_counter_uncharge(&old->memsw, stock->nr_pages); in drain_stock()
2269 stock->nr_pages = 0; in drain_stock()
2272 css_put(&old->css); in drain_stock()
2273 WRITE_ONCE(stock->cached, NULL); in drain_stock()
2292 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); in drain_local_stock()
2308 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */ in __refill_stock()
2310 css_get(&memcg->css); in __refill_stock()
2311 WRITE_ONCE(stock->cached, memcg); in __refill_stock()
2313 stock->nr_pages += nr_pages; in __refill_stock()
2315 if (stock->nr_pages > MEMCG_CHARGE_BATCH) in __refill_stock()
2329 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2340 * Notify other cpus that system-wide "drain" is running in drain_all_stock()
2343 * per-cpu data. CPU up doesn't touch memcg_stock at all. in drain_all_stock()
2353 memcg = READ_ONCE(stock->cached); in drain_all_stock()
2354 if (memcg && stock->nr_pages && in drain_all_stock()
2362 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { in drain_all_stock()
2364 drain_local_stock(&stock->work); in drain_all_stock()
2366 schedule_work_on(cpu, &stock->work); in drain_all_stock()
2392 if (page_counter_read(&memcg->memory) <= in reclaim_high()
2393 READ_ONCE(memcg->memory.high)) in reclaim_high()
2429 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2431 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2436 * reasonable delay curve compared to precision-adjusted overage, not
2438 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2441 * +-------+------------------------+
2443 * +-------+------------------------+
2465 * +-------+------------------------+
2483 overage = usage - high; in calculate_overage()
2493 overage = calculate_overage(page_counter_read(&memcg->memory), in mem_find_max_overage()
2494 READ_ONCE(memcg->memory.high)); in mem_find_max_overage()
2507 overage = calculate_overage(page_counter_read(&memcg->swap), in swap_find_max_overage()
2508 READ_ONCE(memcg->swap.high)); in swap_find_max_overage()
2545 * N-sized allocations are throttled approximately the same as one in calculate_high_delay()
2546 * 4N-sized allocation. in calculate_high_delay()
2549 * larger the current charge patch is than that. in calculate_high_delay()
2556 * and reclaims memory over the high limit.
2563 unsigned int nr_pages = current->memcg_nr_pages_over_high; in mem_cgroup_handle_over_high()
2571 memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2572 current->memcg_nr_pages_over_high = 0; in mem_cgroup_handle_over_high()
2619 if (nr_reclaimed || nr_retries--) { in mem_cgroup_handle_over_high()
2627 * need to account for any ill-begotten jiffies to pay them off later. in mem_cgroup_handle_over_high()
2634 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2656 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge_memcg()
2657 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge_memcg()
2660 page_counter_uncharge(&memcg->memsw, batch); in try_charge_memcg()
2676 * under the limit over triggering OOM kills in these cases. in try_charge_memcg()
2678 if (unlikely(current->flags & PF_MEMALLOC)) in try_charge_memcg()
2681 if (unlikely(task_in_memcg_oom(current))) in try_charge_memcg()
2707 * Even though the limit is exceeded at this point, reclaim in try_charge_memcg()
2708 * may have been able to free some pages. Retry the charge in try_charge_memcg()
2712 * unlikely to succeed so close to the limit, and we fall back in try_charge_memcg()
2718 * At task move, charge accounts can be doubly counted. So, it's in try_charge_memcg()
2724 if (nr_retries--) in try_charge_memcg()
2736 * a forward progress or bypass the charge if the oom killer in try_charge_memcg()
2753 return -ENOMEM; in try_charge_memcg()
2764 * being freed very soon. Allow memory usage go over the limit in try_charge_memcg()
2767 page_counter_charge(&memcg->memory, nr_pages); in try_charge_memcg()
2769 page_counter_charge(&memcg->memsw, nr_pages); in try_charge_memcg()
2775 refill_stock(memcg, batch - nr_pages); in try_charge_memcg()
2782 * not recorded as it most likely matches current's and won't in try_charge_memcg()
2783 * change in the meantime. As high limit is checked again before in try_charge_memcg()
2789 mem_high = page_counter_read(&memcg->memory) > in try_charge_memcg()
2790 READ_ONCE(memcg->memory.high); in try_charge_memcg()
2791 swap_high = page_counter_read(&memcg->swap) > in try_charge_memcg()
2792 READ_ONCE(memcg->swap.high); in try_charge_memcg()
2797 schedule_work(&memcg->high_work); in try_charge_memcg()
2809 * Target some best-effort fairness between the tasks, in try_charge_memcg()
2813 current->memcg_nr_pages_over_high += batch; in try_charge_memcg()
2814 set_notify_resume(current); in try_charge_memcg()
2819 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && in try_charge_memcg()
2820 !(current->flags & PF_MEMALLOC) && in try_charge_memcg()
2841 page_counter_uncharge(&memcg->memory, nr_pages); in cancel_charge()
2843 page_counter_uncharge(&memcg->memsw, nr_pages); in cancel_charge()
2852 * - the page lock in commit_charge()
2853 * - LRU isolation in commit_charge()
2854 * - folio_memcg_lock() in commit_charge()
2855 * - exclusive reference in commit_charge()
2856 * - mem_cgroup_trylock_pages() in commit_charge()
2858 folio->memcg_data = (unsigned long)memcg; in commit_charge()
2899 return -ENOMEM; in memcg_alloc_slab_cgroups()
2908 slab->memcg_data = memcg_data; in memcg_alloc_slab_cgroups()
2909 } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) { in memcg_alloc_slab_cgroups()
2927 * Slab objects are accounted individually, not per-page. in mem_cgroup_from_obj_folio()
2929 * slab->memcg_data. in mem_cgroup_from_obj_folio()
2941 off = obj_to_index(slab->slab_cache, slab, p); in mem_cgroup_from_obj_folio()
2951 * slab->memcg_data has not been freed yet in mem_cgroup_from_obj_folio()
3010 objcg = rcu_dereference(memcg->objcg); in __get_obj_cgroup_from_memcg()
3030 memcg = mem_cgroup_from_task(current); in get_obj_cgroup_from_current()
3065 page_counter_charge(&memcg->kmem, nr_pages); in memcg_account_kmem()
3067 page_counter_uncharge(&memcg->kmem, -nr_pages); in memcg_account_kmem()
3084 memcg_account_kmem(memcg, -nr_pages); in obj_cgroup_uncharge_pages()
3087 css_put(&memcg->css); in obj_cgroup_uncharge_pages()
3091 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3092 * @objcg: object cgroup to charge
3094 * @nr_pages: number of pages to charge
3112 css_put(&memcg->css); in obj_cgroup_charge_pages()
3118 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3119 * @page: page to charge
3134 page->memcg_data = (unsigned long)objcg | in __memcg_kmem_charge_page()
3159 folio->memcg_data = 0; in __memcg_kmem_uncharge_page()
3179 if (READ_ONCE(stock->cached_objcg) != objcg) { in mod_objcg_state()
3182 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) in mod_objcg_state()
3183 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; in mod_objcg_state()
3184 WRITE_ONCE(stock->cached_objcg, objcg); in mod_objcg_state()
3185 stock->cached_pgdat = pgdat; in mod_objcg_state()
3186 } else if (stock->cached_pgdat != pgdat) { in mod_objcg_state()
3188 struct pglist_data *oldpg = stock->cached_pgdat; in mod_objcg_state()
3190 if (stock->nr_slab_reclaimable_b) { in mod_objcg_state()
3192 stock->nr_slab_reclaimable_b); in mod_objcg_state()
3193 stock->nr_slab_reclaimable_b = 0; in mod_objcg_state()
3195 if (stock->nr_slab_unreclaimable_b) { in mod_objcg_state()
3197 stock->nr_slab_unreclaimable_b); in mod_objcg_state()
3198 stock->nr_slab_unreclaimable_b = 0; in mod_objcg_state()
3200 stock->cached_pgdat = pgdat; in mod_objcg_state()
3203 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b in mod_objcg_state()
3204 : &stock->nr_slab_unreclaimable_b; in mod_objcg_state()
3238 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { in consume_obj_stock()
3239 stock->nr_bytes -= nr_bytes; in consume_obj_stock()
3250 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); in drain_obj_stock()
3255 if (stock->nr_bytes) { in drain_obj_stock()
3256 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; in drain_obj_stock()
3257 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); in drain_obj_stock()
3264 memcg_account_kmem(memcg, -nr_pages); in drain_obj_stock()
3267 css_put(&memcg->css); in drain_obj_stock()
3271 * The leftover is flushed to the centralized per-memcg value. in drain_obj_stock()
3273 * to a per-cpu stock (probably, on an other CPU), see in drain_obj_stock()
3276 * How often it's flushed is a trade-off between the memory in drain_obj_stock()
3277 * limit enforcement accuracy and potential CPU contention, in drain_obj_stock()
3280 atomic_add(nr_bytes, &old->nr_charged_bytes); in drain_obj_stock()
3281 stock->nr_bytes = 0; in drain_obj_stock()
3285 * Flush the vmstat data in current stock in drain_obj_stock()
3287 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
3288 if (stock->nr_slab_reclaimable_b) { in drain_obj_stock()
3289 mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
3291 stock->nr_slab_reclaimable_b); in drain_obj_stock()
3292 stock->nr_slab_reclaimable_b = 0; in drain_obj_stock()
3294 if (stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
3295 mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
3297 stock->nr_slab_unreclaimable_b); in drain_obj_stock()
3298 stock->nr_slab_unreclaimable_b = 0; in drain_obj_stock()
3300 stock->cached_pgdat = NULL; in drain_obj_stock()
3303 WRITE_ONCE(stock->cached_objcg, NULL); in drain_obj_stock()
3314 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); in obj_stock_flush_required()
3337 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ in refill_obj_stock()
3340 WRITE_ONCE(stock->cached_objcg, objcg); in refill_obj_stock()
3341 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) in refill_obj_stock()
3342 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; in refill_obj_stock()
3345 stock->nr_bytes += nr_bytes; in refill_obj_stock()
3347 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { in refill_obj_stock()
3348 nr_pages = stock->nr_bytes >> PAGE_SHIFT; in refill_obj_stock()
3349 stock->nr_bytes &= (PAGE_SIZE - 1); in refill_obj_stock()
3369 * In theory, objcg->nr_charged_bytes can have enough in obj_cgroup_charge()
3370 * pre-charged bytes to satisfy the allocation. However, in obj_cgroup_charge()
3371 * flushing objcg->nr_charged_bytes requires two atomic in obj_cgroup_charge()
3372 * operations, and objcg->nr_charged_bytes can't be big. in obj_cgroup_charge()
3373 * The shared objcg->nr_charged_bytes can also become a in obj_cgroup_charge()
3377 * objcg->nr_charged_bytes later on when objcg changes. in obj_cgroup_charge()
3379 * The stock's nr_bytes may contain enough pre-charged bytes in obj_cgroup_charge()
3381 * on the pre-charged bytes not being changed outside of in obj_cgroup_charge()
3383 * pre-charged bytes as well when charging pages. To avoid a in obj_cgroup_charge()
3384 * page uncharge right after a page charge, we set the in obj_cgroup_charge()
3386 * to temporarily allow the pre-charged bytes to exceed the page in obj_cgroup_charge()
3387 * size limit. The maximum reachable value of the pre-charged in obj_cgroup_charge()
3388 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data in obj_cgroup_charge()
3392 nr_bytes = size & (PAGE_SIZE - 1); in obj_cgroup_charge()
3399 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); in obj_cgroup_charge()
3424 folio_page(folio, i)->memcg_data = folio->memcg_data; in split_page_memcg()
3427 obj_cgroup_get_many(__folio_objcg(folio), nr - 1); in split_page_memcg()
3429 css_get_many(&memcg->css, nr - 1); in split_page_memcg()
3434 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3442 * Returns 0 on success, -EINVAL on failure.
3456 mod_memcg_state(from, MEMCG_SWAP, -1); in mem_cgroup_move_swap_account()
3460 return -EINVAL; in mem_cgroup_move_swap_account()
3466 return -EINVAL; in mem_cgroup_move_swap_account()
3479 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; in mem_cgroup_resize_max()
3482 if (signal_pending(current)) { in mem_cgroup_resize_max()
3483 ret = -EINTR; in mem_cgroup_resize_max()
3489 * Make sure that the new limit (memsw or memory limit) doesn't in mem_cgroup_resize_max()
3492 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : in mem_cgroup_resize_max()
3493 max <= memcg->memsw.max; in mem_cgroup_resize_max()
3496 ret = -EINVAL; in mem_cgroup_resize_max()
3499 if (max > counter->max) in mem_cgroup_resize_max()
3515 ret = -EBUSY; in mem_cgroup_resize_max()
3543 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id]; in mem_cgroup_soft_limit_reclaim()
3548 * are acceptable as soft limit is best effort anyway. in mem_cgroup_soft_limit_reclaim()
3550 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) in mem_cgroup_soft_limit_reclaim()
3555 * keep exceeding their soft limit and putting the system under in mem_cgroup_soft_limit_reclaim()
3566 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in mem_cgroup_soft_limit_reclaim()
3569 spin_lock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
3579 excess = soft_limit_excess(mz->memcg); in mem_cgroup_soft_limit_reclaim()
3590 spin_unlock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
3591 css_put(&mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3604 css_put(&next_mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3617 /* we call try-to-free pages for make this cgroup empty */ in mem_cgroup_force_empty()
3623 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
3624 if (signal_pending(current)) in mem_cgroup_force_empty()
3625 return -EINTR; in mem_cgroup_force_empty()
3629 nr_retries--; in mem_cgroup_force_empty()
3642 return -EINVAL; in mem_cgroup_force_empty_write()
3658 pr_warn_once("Non-hierarchical mode is deprecated. " in mem_cgroup_hierarchy_write()
3659 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_hierarchy_write()
3662 return -EINVAL; in mem_cgroup_hierarchy_write()
3677 val += total_swap_pages - get_nr_swap_pages(); in mem_cgroup_usage()
3680 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3682 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3701 switch (MEMFILE_TYPE(cft->private)) { in mem_cgroup_read_u64()
3703 counter = &memcg->memory; in mem_cgroup_read_u64()
3706 counter = &memcg->memsw; in mem_cgroup_read_u64()
3709 counter = &memcg->kmem; in mem_cgroup_read_u64()
3712 counter = &memcg->tcpmem; in mem_cgroup_read_u64()
3718 switch (MEMFILE_ATTR(cft->private)) { in mem_cgroup_read_u64()
3720 if (counter == &memcg->memory) in mem_cgroup_read_u64()
3722 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
3726 return (u64)counter->max * PAGE_SIZE; in mem_cgroup_read_u64()
3728 return (u64)counter->watermark * PAGE_SIZE; in mem_cgroup_read_u64()
3730 return counter->failcnt; in mem_cgroup_read_u64()
3732 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE; in mem_cgroup_read_u64()
3745 return -EINVAL; in mem_cgroup_dummy_seq_show()
3761 return -ENOMEM; in memcg_online_kmem()
3763 objcg->memcg = memcg; in memcg_online_kmem()
3764 rcu_assign_pointer(memcg->objcg, objcg); in memcg_online_kmem()
3768 memcg->kmemcg_id = memcg->id.id; in memcg_online_kmem()
3792 * The ordering is imposed by list_lru_node->lock taken by in memcg_offline_kmem()
3813 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max()
3817 if (!memcg->tcpmem_active) { in memcg_update_tcp_max()
3835 memcg->tcpmem_active = true; in memcg_update_tcp_max()
3854 ret = page_counter_memparse(buf, "-1", &nr_pages); in mem_cgroup_write()
3858 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_write()
3860 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ in mem_cgroup_write()
3861 ret = -EINVAL; in mem_cgroup_write()
3864 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_write()
3874 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_write()
3885 ret = -EOPNOTSUPP; in mem_cgroup_write()
3887 WRITE_ONCE(memcg->soft_limit, nr_pages); in mem_cgroup_write()
3901 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_reset()
3903 counter = &memcg->memory; in mem_cgroup_reset()
3906 counter = &memcg->memsw; in mem_cgroup_reset()
3909 counter = &memcg->kmem; in mem_cgroup_reset()
3912 counter = &memcg->tcpmem; in mem_cgroup_reset()
3918 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_reset()
3923 counter->failcnt = 0; in mem_cgroup_reset()
3935 return mem_cgroup_from_css(css)->move_charge_at_immigrate; in mem_cgroup_move_charge_read()
3945 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_move_charge_write()
3949 return -EINVAL; in mem_cgroup_move_charge_write()
3952 * No kind of locking is needed in here, because ->can_attach() will in mem_cgroup_move_charge_write()
3957 memcg->move_charge_at_immigrate = val; in mem_cgroup_move_charge_write()
3964 return -ENOSYS; in mem_cgroup_move_charge_write()
3972 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
4032 seq_printf(m, "%s=%lu", stat->name, in memcg_numa_stat_show()
4033 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
4038 stat->lru_mask, false)); in memcg_numa_stat_show()
4044 seq_printf(m, "hierarchical_%s=%lu", stat->name, in memcg_numa_stat_show()
4045 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
4050 stat->lru_mask, true)); in memcg_numa_stat_show()
4128 memory = min(memory, READ_ONCE(mi->memory.max)); in memcg1_stat_format()
4129 memsw = min(memsw, READ_ONCE(mi->memsw.max)); in memcg1_stat_format()
4165 mz = memcg->nodeinfo[pgdat->node_id]; in memcg1_stat_format()
4167 anon_cost += mz->lruvec.anon_cost; in memcg1_stat_format()
4168 file_cost += mz->lruvec.file_cost; in memcg1_stat_format()
4190 return -EINVAL; in mem_cgroup_swappiness_write()
4193 WRITE_ONCE(memcg->swappiness, val); in mem_cgroup_swappiness_write()
4208 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
4210 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
4222 i = t->current_threshold; in __mem_cgroup_threshold()
4230 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold()
4231 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
4242 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold()
4243 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
4246 t->current_threshold = i - 1; in __mem_cgroup_threshold()
4267 if (_a->threshold > _b->threshold) in compare_thresholds()
4270 if (_a->threshold < _b->threshold) in compare_thresholds()
4271 return -1; in compare_thresholds()
4282 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
4283 eventfd_signal(ev->eventfd, 1); in mem_cgroup_oom_notify_cb()
4306 ret = page_counter_memparse(args, "-1", &threshold); in __mem_cgroup_usage_register_event()
4310 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4313 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
4316 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
4322 if (thresholds->primary) in __mem_cgroup_usage_register_event()
4325 size = thresholds->primary ? thresholds->primary->size + 1 : 1; in __mem_cgroup_usage_register_event()
4330 ret = -ENOMEM; in __mem_cgroup_usage_register_event()
4333 new->size = size; in __mem_cgroup_usage_register_event()
4336 if (thresholds->primary) in __mem_cgroup_usage_register_event()
4337 memcpy(new->entries, thresholds->primary->entries, in __mem_cgroup_usage_register_event()
4338 flex_array_size(new, entries, size - 1)); in __mem_cgroup_usage_register_event()
4341 new->entries[size - 1].eventfd = eventfd; in __mem_cgroup_usage_register_event()
4342 new->entries[size - 1].threshold = threshold; in __mem_cgroup_usage_register_event()
4344 /* Sort thresholds. Registering of new threshold isn't time-critical */ in __mem_cgroup_usage_register_event()
4345 sort(new->entries, size, sizeof(*new->entries), in __mem_cgroup_usage_register_event()
4348 /* Find current threshold */ in __mem_cgroup_usage_register_event()
4349 new->current_threshold = -1; in __mem_cgroup_usage_register_event()
4351 if (new->entries[i].threshold <= usage) { in __mem_cgroup_usage_register_event()
4353 * new->current_threshold will not be used until in __mem_cgroup_usage_register_event()
4357 ++new->current_threshold; in __mem_cgroup_usage_register_event()
4363 kfree(thresholds->spare); in __mem_cgroup_usage_register_event()
4364 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_register_event()
4366 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_register_event()
4372 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4397 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4400 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
4403 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
4408 if (!thresholds->primary) in __mem_cgroup_usage_unregister_event()
4416 for (i = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
4417 if (thresholds->primary->entries[i].eventfd != eventfd) in __mem_cgroup_usage_unregister_event()
4423 new = thresholds->spare; in __mem_cgroup_usage_unregister_event()
4436 new->size = size; in __mem_cgroup_usage_unregister_event()
4438 /* Copy thresholds and find current threshold */ in __mem_cgroup_usage_unregister_event()
4439 new->current_threshold = -1; in __mem_cgroup_usage_unregister_event()
4440 for (i = 0, j = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
4441 if (thresholds->primary->entries[i].eventfd == eventfd) in __mem_cgroup_usage_unregister_event()
4444 new->entries[j] = thresholds->primary->entries[i]; in __mem_cgroup_usage_unregister_event()
4445 if (new->entries[j].threshold <= usage) { in __mem_cgroup_usage_unregister_event()
4447 * new->current_threshold will not be used in __mem_cgroup_usage_unregister_event()
4451 ++new->current_threshold; in __mem_cgroup_usage_unregister_event()
4458 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_unregister_event()
4460 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_unregister_event()
4467 kfree(thresholds->spare); in __mem_cgroup_usage_unregister_event()
4468 thresholds->spare = NULL; in __mem_cgroup_usage_unregister_event()
4471 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4493 return -ENOMEM; in mem_cgroup_oom_register_event()
4497 event->eventfd = eventfd; in mem_cgroup_oom_register_event()
4498 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
4501 if (memcg->under_oom) in mem_cgroup_oom_register_event()
4515 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
4516 if (ev->eventfd == eventfd) { in mem_cgroup_oom_unregister_event()
4517 list_del(&ev->list); in mem_cgroup_oom_unregister_event()
4529 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable)); in mem_cgroup_oom_control_read()
4530 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
4532 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in mem_cgroup_oom_control_read()
4543 return -EINVAL; in mem_cgroup_oom_control_write()
4545 WRITE_ONCE(memcg->oom_kill_disable, val); in mem_cgroup_oom_control_write()
4558 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
4563 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
4568 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
4573 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain()
4575 if (!memcg->css.parent) in mem_cgroup_wb_domain()
4578 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
4582 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4590 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4593 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4603 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats()
4615 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), in mem_cgroup_wb_stats()
4616 READ_ONCE(memcg->memory.high)); in mem_cgroup_wb_stats()
4617 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
4619 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); in mem_cgroup_wb_stats()
4628 * tracks ownership per-page while the latter per-inode. This was a
4629 * deliberate design decision because honoring per-page ownership in the
4631 * and deemed unnecessary given that write-sharing an inode across
4632 * different cgroups isn't a common use-case.
4634 * Combined with inode majority-writer ownership switching, this works well
4655 * page - a page whose memcg and writeback ownerships don't match - is
4661 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4675 int oldest = -1; in mem_cgroup_track_foreign_dirty_slowpath()
4686 frn = &memcg->cgwb_frn[i]; in mem_cgroup_track_foreign_dirty_slowpath()
4687 if (frn->bdi_id == wb->bdi->id && in mem_cgroup_track_foreign_dirty_slowpath()
4688 frn->memcg_id == wb->memcg_css->id) in mem_cgroup_track_foreign_dirty_slowpath()
4690 if (time_before64(frn->at, oldest_at) && in mem_cgroup_track_foreign_dirty_slowpath()
4691 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_track_foreign_dirty_slowpath()
4693 oldest_at = frn->at; in mem_cgroup_track_foreign_dirty_slowpath()
4699 * Re-using an existing one. Update timestamp lazily to in mem_cgroup_track_foreign_dirty_slowpath()
4701 * reasonably up-to-date and significantly shorter than in mem_cgroup_track_foreign_dirty_slowpath()
4709 if (time_before64(frn->at, now - update_intv)) in mem_cgroup_track_foreign_dirty_slowpath()
4710 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
4713 frn = &memcg->cgwb_frn[oldest]; in mem_cgroup_track_foreign_dirty_slowpath()
4714 frn->bdi_id = wb->bdi->id; in mem_cgroup_track_foreign_dirty_slowpath()
4715 frn->memcg_id = wb->memcg_css->id; in mem_cgroup_track_foreign_dirty_slowpath()
4716 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
4723 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_flush_foreign()
4729 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; in mem_cgroup_flush_foreign()
4737 if (time_after64(frn->at, now - intv) && in mem_cgroup_flush_foreign()
4738 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_flush_foreign()
4739 frn->at = 0; in mem_cgroup_flush_foreign()
4740 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); in mem_cgroup_flush_foreign()
4741 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, in mem_cgroup_flush_foreign()
4743 &frn->done); in mem_cgroup_flush_foreign()
4770 * This is way over-engineered. It tries to support fully configurable
4787 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove()
4789 remove_wait_queue(event->wqh, &event->wait); in memcg_event_remove()
4791 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
4794 eventfd_signal(event->eventfd, 1); in memcg_event_remove()
4796 eventfd_ctx_put(event->eventfd); in memcg_event_remove()
4798 css_put(&memcg->css); in memcg_event_remove()
4804 * Called with wqh->lock held and interrupts disabled.
4811 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake()
4821 * side will require wqh->lock via remove_wait_queue(), in memcg_event_wake()
4824 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
4825 if (!list_empty(&event->list)) { in memcg_event_wake()
4826 list_del_init(&event->list); in memcg_event_wake()
4831 schedule_work(&event->remove); in memcg_event_wake()
4833 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
4845 event->wqh = wqh; in memcg_event_ptable_queue_proc()
4846 add_wait_queue(wqh, &event->wait); in memcg_event_ptable_queue_proc()
4873 return -EOPNOTSUPP; in memcg_write_event_control()
4879 return -EINVAL; in memcg_write_event_control()
4884 return -EINVAL; in memcg_write_event_control()
4889 return -ENOMEM; in memcg_write_event_control()
4891 event->memcg = memcg; in memcg_write_event_control()
4892 INIT_LIST_HEAD(&event->list); in memcg_write_event_control()
4893 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); in memcg_write_event_control()
4894 init_waitqueue_func_entry(&event->wait, memcg_event_wake); in memcg_write_event_control()
4895 INIT_WORK(&event->remove, memcg_event_remove); in memcg_write_event_control()
4899 ret = -EBADF; in memcg_write_event_control()
4903 event->eventfd = eventfd_ctx_fileget(efile.file); in memcg_write_event_control()
4904 if (IS_ERR(event->eventfd)) { in memcg_write_event_control()
4905 ret = PTR_ERR(event->eventfd); in memcg_write_event_control()
4911 ret = -EBADF; in memcg_write_event_control()
4925 cdentry = cfile.file->f_path.dentry; in memcg_write_event_control()
4926 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) { in memcg_write_event_control()
4927 ret = -EINVAL; in memcg_write_event_control()
4939 name = cdentry->d_name.name; in memcg_write_event_control()
4942 event->register_event = mem_cgroup_usage_register_event; in memcg_write_event_control()
4943 event->unregister_event = mem_cgroup_usage_unregister_event; in memcg_write_event_control()
4945 event->register_event = mem_cgroup_oom_register_event; in memcg_write_event_control()
4946 event->unregister_event = mem_cgroup_oom_unregister_event; in memcg_write_event_control()
4948 event->register_event = vmpressure_register_event; in memcg_write_event_control()
4949 event->unregister_event = vmpressure_unregister_event; in memcg_write_event_control()
4951 event->register_event = memsw_cgroup_usage_register_event; in memcg_write_event_control()
4952 event->unregister_event = memsw_cgroup_usage_unregister_event; in memcg_write_event_control()
4954 ret = -EINVAL; in memcg_write_event_control()
4963 cfile_css = css_tryget_online_from_dir(cdentry->d_parent, in memcg_write_event_control()
4965 ret = -EINVAL; in memcg_write_event_control()
4973 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
4977 vfs_poll(efile.file, &event->pt); in memcg_write_event_control()
4979 spin_lock_irq(&memcg->event_list_lock); in memcg_write_event_control()
4980 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
4981 spin_unlock_irq(&memcg->event_list_lock); in memcg_write_event_control()
4993 eventfd_ctx_put(event->eventfd); in memcg_write_event_control()
5147 * Swap-out records and page cache shadow entries need to store memcg
5150 * memory-controlled cgroups to 64k.
5157 * even when there are much fewer than 64k cgroups - possibly none.
5159 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5168 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5173 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
5174 idr_remove(&mem_cgroup_idr, memcg->id.id); in mem_cgroup_id_remove()
5175 memcg->id.id = 0; in mem_cgroup_id_remove()
5182 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
5187 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
5191 css_put(&memcg->css); in mem_cgroup_id_put_many()
5201 * mem_cgroup_from_id - look up a memcg from a memcg id
5227 memcg = ERR_PTR(-ENOENT); in mem_cgroup_get_from_ino()
5243 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, in alloc_mem_cgroup_per_node_info()
5245 if (!pn->lruvec_stats_percpu) { in alloc_mem_cgroup_per_node_info()
5250 lruvec_init(&pn->lruvec); in alloc_mem_cgroup_per_node_info()
5251 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
5253 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
5259 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in free_mem_cgroup_per_node_info()
5264 free_percpu(pn->lruvec_stats_percpu); in free_mem_cgroup_per_node_info()
5274 kfree(memcg->vmstats); in __mem_cgroup_free()
5275 free_percpu(memcg->vmstats_percpu); in __mem_cgroup_free()
5291 long error = -ENOMEM; in mem_cgroup_alloc()
5297 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, in mem_cgroup_alloc()
5299 if (memcg->id.id < 0) { in mem_cgroup_alloc()
5300 error = memcg->id.id; in mem_cgroup_alloc()
5304 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL); in mem_cgroup_alloc()
5305 if (!memcg->vmstats) in mem_cgroup_alloc()
5308 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
5310 if (!memcg->vmstats_percpu) in mem_cgroup_alloc()
5320 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
5321 INIT_LIST_HEAD(&memcg->oom_notify); in mem_cgroup_alloc()
5322 mutex_init(&memcg->thresholds_lock); in mem_cgroup_alloc()
5323 spin_lock_init(&memcg->move_lock); in mem_cgroup_alloc()
5324 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
5325 INIT_LIST_HEAD(&memcg->event_list); in mem_cgroup_alloc()
5326 spin_lock_init(&memcg->event_list_lock); in mem_cgroup_alloc()
5327 memcg->socket_pressure = jiffies; in mem_cgroup_alloc()
5329 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
5330 INIT_LIST_HEAD(&memcg->objcg_list); in mem_cgroup_alloc()
5333 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
5335 memcg->cgwb_frn[i].done = in mem_cgroup_alloc()
5339 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); in mem_cgroup_alloc()
5340 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); in mem_cgroup_alloc()
5341 memcg->deferred_split_queue.split_queue_len = 0; in mem_cgroup_alloc()
5363 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5364 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5366 memcg->zswap_max = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
5368 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5370 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); in mem_cgroup_css_alloc()
5371 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); in mem_cgroup_css_alloc()
5373 page_counter_init(&memcg->memory, &parent->memory); in mem_cgroup_css_alloc()
5374 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc()
5375 page_counter_init(&memcg->kmem, &parent->kmem); in mem_cgroup_css_alloc()
5376 page_counter_init(&memcg->tcpmem, &parent->tcpmem); in mem_cgroup_css_alloc()
5379 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_alloc()
5380 page_counter_init(&memcg->swap, NULL); in mem_cgroup_css_alloc()
5381 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_alloc()
5382 page_counter_init(&memcg->tcpmem, NULL); in mem_cgroup_css_alloc()
5385 return &memcg->css; in mem_cgroup_css_alloc()
5396 return &memcg->css; in mem_cgroup_css_alloc()
5420 refcount_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
5433 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); in mem_cgroup_css_online()
5440 return -ENOMEM; in mem_cgroup_css_online()
5453 spin_lock_irq(&memcg->event_list_lock); in mem_cgroup_css_offline()
5454 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
5455 list_del_init(&event->list); in mem_cgroup_css_offline()
5456 schedule_work(&event->remove); in mem_cgroup_css_offline()
5458 spin_unlock_irq(&memcg->event_list_lock); in mem_cgroup_css_offline()
5460 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
5461 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
5488 wb_wait_for_completion(&memcg->cgwb_frn[i].done); in mem_cgroup_css_free()
5493 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) in mem_cgroup_css_free()
5501 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
5502 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
5509 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5518 * The current implementation only resets the essential configurations.
5525 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5526 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5527 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5528 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5529 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
5530 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
5531 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5532 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5533 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5545 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_css_rstat_flush()
5550 * below us. We're in a per-cpu loop here and this is in mem_cgroup_css_rstat_flush()
5553 delta = memcg->vmstats->state_pending[i]; in mem_cgroup_css_rstat_flush()
5555 memcg->vmstats->state_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5559 v = READ_ONCE(statc->state[i]); in mem_cgroup_css_rstat_flush()
5560 if (v != statc->state_prev[i]) { in mem_cgroup_css_rstat_flush()
5561 delta_cpu = v - statc->state_prev[i]; in mem_cgroup_css_rstat_flush()
5563 statc->state_prev[i] = v; in mem_cgroup_css_rstat_flush()
5568 memcg->vmstats->state_local[i] += delta_cpu; in mem_cgroup_css_rstat_flush()
5571 memcg->vmstats->state[i] += delta; in mem_cgroup_css_rstat_flush()
5573 parent->vmstats->state_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5578 delta = memcg->vmstats->events_pending[i]; in mem_cgroup_css_rstat_flush()
5580 memcg->vmstats->events_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5583 v = READ_ONCE(statc->events[i]); in mem_cgroup_css_rstat_flush()
5584 if (v != statc->events_prev[i]) { in mem_cgroup_css_rstat_flush()
5585 delta_cpu = v - statc->events_prev[i]; in mem_cgroup_css_rstat_flush()
5587 statc->events_prev[i] = v; in mem_cgroup_css_rstat_flush()
5591 memcg->vmstats->events_local[i] += delta_cpu; in mem_cgroup_css_rstat_flush()
5594 memcg->vmstats->events[i] += delta; in mem_cgroup_css_rstat_flush()
5596 parent->vmstats->events_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5601 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
5606 ppn = parent->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
5608 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); in mem_cgroup_css_rstat_flush()
5611 delta = pn->lruvec_stats.state_pending[i]; in mem_cgroup_css_rstat_flush()
5613 pn->lruvec_stats.state_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5616 v = READ_ONCE(lstatc->state[i]); in mem_cgroup_css_rstat_flush()
5617 if (v != lstatc->state_prev[i]) { in mem_cgroup_css_rstat_flush()
5618 delta_cpu = v - lstatc->state_prev[i]; in mem_cgroup_css_rstat_flush()
5620 lstatc->state_prev[i] = v; in mem_cgroup_css_rstat_flush()
5624 pn->lruvec_stats.state_local[i] += delta_cpu; in mem_cgroup_css_rstat_flush()
5627 pn->lruvec_stats.state[i] += delta; in mem_cgroup_css_rstat_flush()
5629 ppn->lruvec_stats.state_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5636 /* Handlers for move charge at task migration. */
5641 /* Try a single bulk charge without reclaim first, kswapd may wake */ in mem_cgroup_do_precharge()
5649 while (count--) { in mem_cgroup_do_precharge()
5719 entry->val = ent.val; in mc_handle_swap_pte()
5737 if (!vma->vm_file) /* anonymous vma */ in mc_handle_file_pte()
5742 /* folio is moved even if it's not RSS of this task(page-faulted). */ in mc_handle_file_pte()
5745 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index); in mc_handle_file_pte()
5752 * mem_cgroup_move_account - move account of the page
5754 * @compound: charge the page as compound or small page
5760 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5779 ret = -EINVAL; in mem_cgroup_move_account()
5791 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); in mem_cgroup_move_account()
5795 -nr_pages); in mem_cgroup_move_account()
5801 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); in mem_cgroup_move_account()
5805 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); in mem_cgroup_move_account()
5810 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); in mem_cgroup_move_account()
5815 struct address_space *mapping = folio_mapping(folio); in mem_cgroup_move_account() local
5817 if (mapping_can_writeback(mapping)) { in mem_cgroup_move_account()
5819 -nr_pages); in mem_cgroup_move_account()
5828 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages); in mem_cgroup_move_account()
5833 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); in mem_cgroup_move_account()
5852 css_get(&to->css); in mem_cgroup_move_account()
5853 css_put(&from->css); in mem_cgroup_move_account()
5855 folio->memcg_data = (unsigned long)to; in mem_cgroup_move_account()
5865 mem_cgroup_charge_statistics(from, -nr_pages); in mem_cgroup_move_account()
5873 * get_mctgt_type - get target type of moving charge
5881 * * MC_TARGET_NONE - If the pte is not a target for move charge.
5882 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
5883 * move charge. If @target is not NULL, the page is stored in target->page
5885 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
5886 * target for charge migration. If @target is not NULL, the entry is
5887 * stored in target->ent.
5888 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
5948 target->page = page; in get_mctgt_type()
5958 * But we cannot move a tail-page in a THP. in get_mctgt_type()
5964 target->ent = ent; in get_mctgt_type()
5998 target->page = page; in get_mctgt_type_thp()
6015 struct vm_area_struct *vma = walk->vma; in mem_cgroup_count_precharge_pte_range()
6032 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
6038 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_count_precharge_pte_range()
6068 mc.moving_task = current; in mem_cgroup_precharge_mc()
6095 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); in __mem_cgroup_clear_mc()
6100 * we charged both to->memory and to->memsw, so we in __mem_cgroup_clear_mc()
6101 * should uncharge to->memory. in __mem_cgroup_clear_mc()
6104 page_counter_uncharge(&mc.to->memory, mc.moved_swap); in __mem_cgroup_clear_mc()
6142 /* charge immigration isn't supported on the default hierarchy */ in mem_cgroup_can_attach()
6147 * Multi-process migrations only happen on the default hierarchy in mem_cgroup_can_attach()
6148 * where charge immigration is not used. Perform charge in mem_cgroup_can_attach()
6163 * tunable will only affect upcoming migrations, not the current one. in mem_cgroup_can_attach()
6166 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
6178 if (mm->owner == p) { in mem_cgroup_can_attach()
6213 struct vm_area_struct *vma = walk->vma; in mem_cgroup_move_charge_pte_range()
6232 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
6243 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
6254 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()
6275 * memcg. There should be somebody mapping the head. in mem_cgroup_move_charge_pte_range()
6283 mc.precharge--; in mem_cgroup_move_charge_pte_range()
6296 mc.precharge--; in mem_cgroup_move_charge_pte_range()
6306 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_move_charge_pte_range()
6312 * We try charge one by one, but don't do any additional in mem_cgroup_move_charge_pte_range()
6313 * charges to mc.to if we have failed in charge once in attach() in mem_cgroup_move_charge_pte_range()
6335 * for already started RCU-only updates to finish. in mem_cgroup_move_charge()
6337 atomic_inc(&mc.from->moving_account); in mem_cgroup_move_charge()
6345 * to move enough charges, but moving charge is a best-effort in mem_cgroup_move_charge()
6354 * additional charge, the page walk just aborts. in mem_cgroup_move_charge()
6358 atomic_dec(&mc.from->moving_account); in mem_cgroup_move_charge()
6395 if (task->mm && READ_ONCE(task->mm->owner) == task) in mem_cgroup_attach()
6396 lru_gen_migrate_mm(task->mm); in mem_cgroup_attach()
6420 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
6428 return (u64)memcg->memory.watermark * PAGE_SIZE; in memory_peak_read()
6434 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); in memory_min_show()
6449 page_counter_set_min(&memcg->memory, min); in memory_min_write()
6457 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); in memory_low_show()
6472 page_counter_set_low(&memcg->memory, low); in memory_low_write()
6480 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); in memory_high_show()
6497 page_counter_set_high(&memcg->memory, high); in memory_high_write()
6500 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
6506 if (signal_pending(current)) in memory_high_write()
6515 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
6518 if (!reclaimed && !nr_retries--) in memory_high_write()
6529 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); in memory_max_show()
6546 xchg(&memcg->memory.max, max); in memory_max_write()
6549 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
6554 if (signal_pending(current)) in memory_max_write()
6564 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
6566 nr_reclaims--; in memory_max_write()
6595 __memory_events_show(m, memcg->memory_events); in memory_events_show()
6603 __memory_events_show(m, memcg->memory_events_local); in memory_events_local_show()
6614 return -ENOMEM; in memory_stat_show()
6663 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); in memory_oom_group_show()
6676 return -EINVAL; in memory_oom_group_write()
6683 return -EINVAL; in memory_oom_group_write()
6685 WRITE_ONCE(memcg->oom_group, oom_group); in memory_oom_group_write()
6708 if (signal_pending(current)) in memory_reclaim()
6709 return -EINTR; in memory_reclaim()
6720 min(nr_to_reclaim - nr_reclaimed, SWAP_CLUSTER_MAX), in memory_reclaim()
6723 if (!reclaimed && !nr_retries--) in memory_reclaim()
6724 return -EAGAIN; in memory_reclaim()
6734 .name = "current",
6841 * This makes distribution proportional, but also work-conserving:
6852 * of the ancestor's claim to protection, any unutilized -
6853 * "floating" - protection from up the tree is distributed in
6879 * claimed protection in order to be work-conserving: claimed in effective_protection()
6917 * aren't read atomically - make sure the division is sane. in effective_protection()
6926 unclaimed = parent_effective - siblings_protected; in effective_protection()
6927 unclaimed *= usage - protected; in effective_protection()
6928 unclaimed /= parent_usage - siblings_protected; in effective_protection()
6937 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6938 * @root: the top ancestor of the sub-tree being checked
6942 * of a top-down tree iteration, not for isolated queries.
6966 usage = page_counter_read(&memcg->memory); in mem_cgroup_calculate_protection()
6973 memcg->memory.emin = READ_ONCE(memcg->memory.min); in mem_cgroup_calculate_protection()
6974 memcg->memory.elow = READ_ONCE(memcg->memory.low); in mem_cgroup_calculate_protection()
6978 parent_usage = page_counter_read(&parent->memory); in mem_cgroup_calculate_protection()
6980 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
6981 READ_ONCE(memcg->memory.min), in mem_cgroup_calculate_protection()
6982 READ_ONCE(parent->memory.emin), in mem_cgroup_calculate_protection()
6983 atomic_long_read(&parent->memory.children_min_usage))); in mem_cgroup_calculate_protection()
6985 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
6986 READ_ONCE(memcg->memory.low), in mem_cgroup_calculate_protection()
6987 READ_ONCE(parent->memory.elow), in mem_cgroup_calculate_protection()
6988 atomic_long_read(&parent->memory.children_low_usage))); in mem_cgroup_calculate_protection()
7001 css_get(&memcg->css); in charge_memcg()
7019 css_put(&memcg->css); in __mem_cgroup_charge()
7025 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7026 * @folio: folio to charge.
7049 if (!memcg || !css_tryget_online(&memcg->css)) in mem_cgroup_swapin_charge_folio()
7055 css_put(&memcg->css); in mem_cgroup_swapin_charge_folio()
7060 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7078 * so this is a non-issue here. Memory and swap charge lifetimes in mem_cgroup_swapin_uncharge_swap()
7079 * correspond 1:1 to page and swap slot lifetimes: we charge the in mem_cgroup_swapin_uncharge_swap()
7086 * memory+swap charge, drop the swap entry duplicate. in mem_cgroup_swapin_uncharge_swap()
7109 if (ug->nr_memory) { in uncharge_batch()
7110 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); in uncharge_batch()
7112 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); in uncharge_batch()
7113 if (ug->nr_kmem) in uncharge_batch()
7114 memcg_account_kmem(ug->memcg, -ug->nr_kmem); in uncharge_batch()
7115 memcg_oom_recover(ug->memcg); in uncharge_batch()
7119 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); in uncharge_batch()
7120 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); in uncharge_batch()
7121 memcg_check_events(ug->memcg, ug->nid); in uncharge_batch()
7125 css_put(&ug->memcg->css); in uncharge_batch()
7155 if (ug->memcg != memcg) { in uncharge_folio()
7156 if (ug->memcg) { in uncharge_folio()
7160 ug->memcg = memcg; in uncharge_folio()
7161 ug->nid = folio_nid(folio); in uncharge_folio()
7164 css_get(&memcg->css); in uncharge_folio()
7170 ug->nr_memory += nr_pages; in uncharge_folio()
7171 ug->nr_kmem += nr_pages; in uncharge_folio()
7173 folio->memcg_data = 0; in uncharge_folio()
7178 ug->nr_memory += nr_pages; in uncharge_folio()
7179 ug->pgpgout++; in uncharge_folio()
7181 folio->memcg_data = 0; in uncharge_folio()
7184 css_put(&memcg->css); in uncharge_folio()
7191 /* Don't touch folio->lru of any random page, pre-check: */ in __mem_cgroup_uncharge()
7201 * __mem_cgroup_uncharge_list - uncharge a list of page
7220 * mem_cgroup_migrate - Charge a folio's replacement.
7224 * Charge @new as a replacement folio for @old. @old will
7227 * Both folios must be locked, @new->mapping must be set up.
7252 /* Force-charge the new page. The old one will be freed soon */ in mem_cgroup_migrate()
7254 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_migrate()
7256 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_migrate()
7259 css_get(&memcg->css); in mem_cgroup_migrate()
7283 memcg = mem_cgroup_from_task(current); in mem_cgroup_sk_alloc()
7286 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) in mem_cgroup_sk_alloc()
7288 if (css_tryget(&memcg->css)) in mem_cgroup_sk_alloc()
7289 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
7296 if (sk->sk_memcg) in mem_cgroup_sk_free()
7297 css_put(&sk->sk_memcg->css); in mem_cgroup_sk_free()
7301 * mem_cgroup_charge_skmem - charge socket memory
7302 * @memcg: memcg to charge
7303 * @nr_pages: number of pages to charge
7306 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7307 * @memcg's configured limit, %false if it doesn't.
7315 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in mem_cgroup_charge_skmem()
7316 memcg->tcpmem_pressure = 0; in mem_cgroup_charge_skmem()
7319 memcg->tcpmem_pressure = 1; in mem_cgroup_charge_skmem()
7321 page_counter_charge(&memcg->tcpmem, nr_pages); in mem_cgroup_charge_skmem()
7336 * mem_cgroup_uncharge_skmem - uncharge socket memory
7343 page_counter_uncharge(&memcg->tcpmem, nr_pages); in mem_cgroup_uncharge_skmem()
7347 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
7374 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7384 * used for per-memcg-per-cpu caching of per-node statistics. In order in mem_cgroup_init()
7394 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
7402 rtpn->rb_root = RB_ROOT; in mem_cgroup_init()
7403 rtpn->rb_rightmost = NULL; in mem_cgroup_init()
7404 spin_lock_init(&rtpn->lock); in mem_cgroup_init()
7415 while (!refcount_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
7432 * mem_cgroup_swapout - transfer a memsw charge to swap
7433 * @folio: folio whose memsw charge to transfer
7434 * @entry: swap entry to move the charge to
7436 * Transfer the memsw charge of @folio to @entry.
7461 * have an ID allocated to it anymore, charge the closest online in mem_cgroup_swapout()
7462 * ancestor for the swap instead and transfer the memory+swap charge. in mem_cgroup_swapout()
7468 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); in mem_cgroup_swapout()
7474 folio->memcg_data = 0; in mem_cgroup_swapout()
7477 page_counter_uncharge(&memcg->memory, nr_entries); in mem_cgroup_swapout()
7481 page_counter_charge(&swap_memcg->memsw, nr_entries); in mem_cgroup_swapout()
7482 page_counter_uncharge(&memcg->memsw, nr_entries); in mem_cgroup_swapout()
7487 * i_pages lock which is taken with interrupts-off. It is in mem_cgroup_swapout()
7489 * only synchronisation we have for updating the per-CPU variables. in mem_cgroup_swapout()
7492 mem_cgroup_charge_statistics(memcg, -nr_entries); in mem_cgroup_swapout()
7496 css_put(&memcg->css); in mem_cgroup_swapout()
7500 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7502 * @entry: swap entry to charge
7504 * Try to charge @folio's memcg for the swap space at @entry.
7506 * Returns 0 on success, -ENOMEM on failure.
7532 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in __mem_cgroup_try_charge_swap()
7536 return -ENOMEM; in __mem_cgroup_try_charge_swap()
7541 mem_cgroup_id_get_many(memcg, nr_pages - 1); in __mem_cgroup_try_charge_swap()
7550 * __mem_cgroup_uncharge_swap - uncharge swap space
7565 page_counter_uncharge(&memcg->memsw, nr_pages); in __mem_cgroup_uncharge_swap()
7567 page_counter_uncharge(&memcg->swap, nr_pages); in __mem_cgroup_uncharge_swap()
7569 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in __mem_cgroup_uncharge_swap()
7583 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
7584 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
7604 unsigned long usage = page_counter_read(&memcg->swap); in mem_cgroup_swap_full()
7606 if (usage * 2 >= READ_ONCE(memcg->swap.high) || in mem_cgroup_swap_full()
7607 usage * 2 >= READ_ONCE(memcg->swap.max)) in mem_cgroup_swap_full()
7621 "Please report your usecase to linux-mm@kvack.org if you " in setup_swap_account()
7632 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
7640 return (u64)memcg->swap.watermark * PAGE_SIZE; in swap_peak_read()
7646 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); in swap_high_show()
7661 page_counter_set_high(&memcg->swap, high); in swap_high_write()
7669 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); in swap_max_show()
7684 xchg(&memcg->swap.max, max); in swap_max_write()
7694 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); in swap_events_show()
7696 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
7698 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()
7705 .name = "swap.current",
7764 * obj_cgroup_may_zswap - check if this cgroup can zswap
7767 * Check if the hierarchical zswap limit has been reached.
7771 * once compression has occured, and this optimistic pre-check avoids
7786 unsigned long max = READ_ONCE(memcg->zswap_max); in obj_cgroup_may_zswap()
7796 cgroup_rstat_flush(memcg->css.cgroup); in obj_cgroup_may_zswap()
7808 * obj_cgroup_charge_zswap - charge compression backend memory
7812 * This forces the charge after obj_cgroup_may_zswap() allowed
7822 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); in obj_cgroup_charge_zswap()
7836 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
7853 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); in obj_cgroup_uncharge_zswap()
7854 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); in obj_cgroup_uncharge_zswap()
7861 cgroup_rstat_flush(css->cgroup); in zswap_current_read()
7868 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); in zswap_max_show()
7883 xchg(&memcg->zswap_max, max); in zswap_max_write()
7890 .name = "zswap.current",