Lines Matching refs:nr_pages
586 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() local
590 if (nr_pages > soft_limit) in soft_limit_excess()
591 excess = nr_pages - soft_limit; in soft_limit_excess()
834 bool compound, int nr_pages) in mem_cgroup_charge_statistics() argument
841 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages); in mem_cgroup_charge_statistics()
843 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages); in mem_cgroup_charge_statistics()
845 __mod_memcg_state(memcg, NR_SHMEM, nr_pages); in mem_cgroup_charge_statistics()
850 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages); in mem_cgroup_charge_statistics()
854 if (nr_pages > 0) in mem_cgroup_charge_statistics()
858 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics()
861 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
1278 int zid, int nr_pages) in mem_cgroup_update_lru_size() argument
1290 if (nr_pages < 0) in mem_cgroup_update_lru_size()
1291 *lru_size += nr_pages; in mem_cgroup_update_lru_size()
1296 __func__, lruvec, lru, nr_pages, size)) { in mem_cgroup_update_lru_size()
1301 if (nr_pages > 0) in mem_cgroup_update_lru_size()
1302 *lru_size += nr_pages; in mem_cgroup_update_lru_size()
2151 unsigned int nr_pages; member
2170 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in consume_stock() argument
2176 if (nr_pages > MEMCG_CHARGE_BATCH) in consume_stock()
2182 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { in consume_stock()
2183 stock->nr_pages -= nr_pages; in consume_stock()
2199 if (stock->nr_pages) { in drain_stock()
2200 page_counter_uncharge(&old->memory, stock->nr_pages); in drain_stock()
2202 page_counter_uncharge(&old->memsw, stock->nr_pages); in drain_stock()
2203 css_put_many(&old->css, stock->nr_pages); in drain_stock()
2204 stock->nr_pages = 0; in drain_stock()
2231 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in refill_stock() argument
2243 stock->nr_pages += nr_pages; in refill_stock()
2245 if (stock->nr_pages > MEMCG_CHARGE_BATCH) in refill_stock()
2276 if (memcg && stock->nr_pages && in drain_all_stock()
2342 unsigned int nr_pages, in reclaim_high() argument
2349 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); in reclaim_high()
2423 unsigned int nr_pages = current->memcg_nr_pages_over_high; in mem_cgroup_handle_over_high() local
2426 if (likely(!nr_pages)) in mem_cgroup_handle_over_high()
2430 reclaim_high(memcg, nr_pages, GFP_KERNEL); in mem_cgroup_handle_over_high()
2471 penalty_jiffies = penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; in mem_cgroup_handle_over_high()
2503 unsigned int nr_pages) in try_charge() argument
2505 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); in try_charge()
2517 if (consume_stock(memcg, nr_pages)) in try_charge()
2532 if (batch > nr_pages) { in try_charge()
2533 batch = nr_pages; in try_charge()
2572 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, in try_charge()
2575 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) in try_charge()
2595 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) in try_charge()
2622 get_order(nr_pages * PAGE_SIZE)); in try_charge()
2641 page_counter_charge(&memcg->memory, nr_pages); in try_charge()
2643 page_counter_charge(&memcg->memsw, nr_pages); in try_charge()
2644 css_get_many(&memcg->css, nr_pages); in try_charge()
2650 if (batch > nr_pages) in try_charge()
2651 refill_stock(memcg, batch - nr_pages); in try_charge()
2678 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) in cancel_charge() argument
2683 page_counter_uncharge(&memcg->memory, nr_pages); in cancel_charge()
2685 page_counter_uncharge(&memcg->memsw, nr_pages); in cancel_charge()
2687 css_put_many(&memcg->css, nr_pages); in cancel_charge()
2950 unsigned int nr_pages = 1 << order; in __memcg_kmem_charge_memcg() local
2954 ret = try_charge(memcg, gfp, nr_pages); in __memcg_kmem_charge_memcg()
2959 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { in __memcg_kmem_charge_memcg()
2967 page_counter_charge(&memcg->kmem, nr_pages); in __memcg_kmem_charge_memcg()
2970 cancel_charge(memcg, nr_pages); in __memcg_kmem_charge_memcg()
3010 unsigned int nr_pages) in __memcg_kmem_uncharge_memcg() argument
3013 page_counter_uncharge(&memcg->kmem, nr_pages); in __memcg_kmem_uncharge_memcg()
3015 page_counter_uncharge(&memcg->memory, nr_pages); in __memcg_kmem_uncharge_memcg()
3017 page_counter_uncharge(&memcg->memsw, nr_pages); in __memcg_kmem_uncharge_memcg()
3027 unsigned int nr_pages = 1 << order; in __memcg_kmem_uncharge() local
3033 __memcg_kmem_uncharge_memcg(memcg, nr_pages); in __memcg_kmem_uncharge()
3040 css_put_many(&memcg->css, nr_pages); in __memcg_kmem_uncharge()
3616 unsigned long nr_pages; in mem_cgroup_write() local
3620 ret = page_counter_memparse(buf, "-1", &nr_pages); in mem_cgroup_write()
3632 ret = mem_cgroup_resize_max(memcg, nr_pages, false); in mem_cgroup_write()
3635 ret = mem_cgroup_resize_max(memcg, nr_pages, true); in mem_cgroup_write()
3641 ret = memcg_update_kmem_max(memcg, nr_pages); in mem_cgroup_write()
3644 ret = memcg_update_tcp_max(memcg, nr_pages); in mem_cgroup_write()
3649 memcg->soft_limit = nr_pages; in mem_cgroup_write()
5416 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; in mem_cgroup_move_account() local
5445 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); in mem_cgroup_move_account()
5446 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); in mem_cgroup_move_account()
5458 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages); in mem_cgroup_move_account()
5459 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages); in mem_cgroup_move_account()
5464 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); in mem_cgroup_move_account()
5465 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); in mem_cgroup_move_account()
5500 mem_cgroup_charge_statistics(to, page, compound, nr_pages); in mem_cgroup_move_account()
5502 mem_cgroup_charge_statistics(from, page, compound, -nr_pages); in mem_cgroup_move_account()
6080 unsigned long nr_pages; in memory_high_write() local
6091 nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
6092 if (nr_pages > high) in memory_high_write()
6093 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
6123 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write() local
6125 if (nr_pages <= max) in memory_max_write()
6140 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
6461 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; in mem_cgroup_try_charge() local
6494 ret = try_charge(memcg, gfp_mask, nr_pages); in mem_cgroup_try_charge()
6535 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; in mem_cgroup_commit_charge() local
6553 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); in mem_cgroup_commit_charge()
6564 mem_cgroup_uncharge_swap(entry, nr_pages); in mem_cgroup_commit_charge()
6579 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; in mem_cgroup_cancel_charge() local
6591 cancel_charge(memcg, nr_pages); in mem_cgroup_cancel_charge()
6612 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem; in uncharge_batch() local
6616 page_counter_uncharge(&ug->memcg->memory, nr_pages); in uncharge_batch()
6618 page_counter_uncharge(&ug->memcg->memsw, nr_pages); in uncharge_batch()
6630 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages); in uncharge_batch()
6635 css_put_many(&ug->memcg->css, nr_pages); in uncharge_batch()
6662 unsigned int nr_pages = 1; in uncharge_page() local
6665 nr_pages = compound_nr(page); in uncharge_page()
6666 ug->nr_huge += nr_pages; in uncharge_page()
6669 ug->nr_anon += nr_pages; in uncharge_page()
6671 ug->nr_file += nr_pages; in uncharge_page()
6673 ug->nr_shmem += nr_pages; in uncharge_page()
6762 unsigned int nr_pages; in mem_cgroup_migrate() local
6786 nr_pages = compound ? hpage_nr_pages(newpage) : 1; in mem_cgroup_migrate()
6788 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_migrate()
6790 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_migrate()
6791 css_get_many(&memcg->css, nr_pages); in mem_cgroup_migrate()
6796 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); in mem_cgroup_migrate()
6851 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) in mem_cgroup_charge_skmem() argument
6858 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in mem_cgroup_charge_skmem()
6862 page_counter_charge(&memcg->tcpmem, nr_pages); in mem_cgroup_charge_skmem()
6871 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); in mem_cgroup_charge_skmem()
6873 if (try_charge(memcg, gfp_mask, nr_pages) == 0) in mem_cgroup_charge_skmem()
6876 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); in mem_cgroup_charge_skmem()
6885 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) in mem_cgroup_uncharge_skmem() argument
6888 page_counter_uncharge(&memcg->tcpmem, nr_pages); in mem_cgroup_uncharge_skmem()
6892 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
6894 refill_stock(memcg, nr_pages); in mem_cgroup_uncharge_skmem()
7055 unsigned int nr_pages = hpage_nr_pages(page); in mem_cgroup_try_charge_swap() local
7077 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in mem_cgroup_try_charge_swap()
7085 if (nr_pages > 1) in mem_cgroup_try_charge_swap()
7086 mem_cgroup_id_get_many(memcg, nr_pages - 1); in mem_cgroup_try_charge_swap()
7087 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); in mem_cgroup_try_charge_swap()
7089 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); in mem_cgroup_try_charge_swap()
7099 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) in mem_cgroup_uncharge_swap() argument
7107 id = swap_cgroup_record(entry, 0, nr_pages); in mem_cgroup_uncharge_swap()
7113 page_counter_uncharge(&memcg->swap, nr_pages); in mem_cgroup_uncharge_swap()
7115 page_counter_uncharge(&memcg->memsw, nr_pages); in mem_cgroup_uncharge_swap()
7117 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in mem_cgroup_uncharge_swap()
7118 mem_cgroup_id_put_many(memcg, nr_pages); in mem_cgroup_uncharge_swap()