Home
last modified time | relevance | path

Searched refs:swap (Results 1 – 12 of 12) sorted by relevance

/mm/
Dshmem.c376 pgoff_t index, swp_entry_t swap) in shmem_confirm_swap() argument
378 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); in shmem_confirm_swap()
1298 swp_entry_t swap; in shmem_writepage() local
1356 swap = get_swap_page(page); in shmem_writepage()
1357 if (!swap.val) in shmem_writepage()
1372 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { in shmem_writepage()
1378 swap_shmem_alloc(swap); in shmem_writepage()
1379 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); in shmem_writepage()
1388 put_swap_page(page, swap); in shmem_writepage()
1450 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, in shmem_swapin() argument
[all …]
Dmemcontrol.c1529 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1530 K((u64)memcg->swap.max), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo()
1563 swap_max = memcg->swap.max; in mem_cgroup_get_max()
3338 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) in mem_cgroup_usage() argument
3345 if (swap) in mem_cgroup_usage()
3348 if (!swap) in mem_cgroup_usage()
3956 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) in __mem_cgroup_threshold() argument
3963 if (!swap) in __mem_cgroup_threshold()
3971 usage = mem_cgroup_usage(memcg, swap); in __mem_cgroup_threshold()
5112 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc()
[all …]
DKconfig417 XXX: For now, swap cluster backing transparent huge page
457 bool "Enable frontswap to cache swap pages if tmem is present"
461 of a "backing" store for a swap device. The data is stored into
465 a significant swap I/O reduction may be achieved. When none is
468 and swap data is stored as normal on the matching swap device.
526 bool "Compressed cache for swap pages (EXPERIMENTAL)"
531 A lightweight compressed cache for swap pages. It takes
534 This can result in a significant I/O reduction on swap device and,
535 in the case where decompressing from RAM is faster that swap device
Dswapfile.c2709 static void *swap_start(struct seq_file *swap, loff_t *pos) in swap_start() argument
2730 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) in swap_next() argument
2750 static void swap_stop(struct seq_file *swap, void *v) in swap_stop() argument
2755 static int swap_show(struct seq_file *swap, void *v) in swap_show() argument
2762 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); in swap_show()
2767 len = seq_file_path(swap, file, " \t\n\\"); in swap_show()
2768 seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", in swap_show()
3502 swp_entry_t swap = { .val = page_private(page) }; in __page_file_index() local
3503 return swp_offset(swap); in __page_file_index()
Dmadvise.c228 swp_entry_t swap; in force_shm_swapin_readahead() local
239 swap = radix_to_swp_entry(page); in force_shm_swapin_readahead()
240 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, in force_shm_swapin_readahead()
DMakefile40 readahead.o swap.o truncate.o vmscan.o shmem.o \
Dkhugepaged.c1820 int present, swap; in khugepaged_scan_file() local
1825 swap = 0; in khugepaged_scan_file()
1833 if (++swap > khugepaged_max_ptes_swap) { in khugepaged_scan_file()
Dvmscan.c946 swp_entry_t swap = { .val = page_private(page) }; in __remove_mapping() local
947 mem_cgroup_swapout(page, swap); in __remove_mapping()
948 __delete_from_swap_cache(page, swap); in __remove_mapping()
950 put_swap_page(page, swap); in __remove_mapping()
Dslab_common.c1355 swap(list[i], list[rand]); in freelist_randomize()
Dmmap.c756 swap(vma, next); in __vma_adjust()
Dslab.c2423 swap(((freelist_idx_t *)page->freelist)[a], in swap_free_obj()
Dpage_alloc.c1760 swap(j, *i); in deferred_init_maxorder()