Home
last modified time | relevance | path

Searched refs:swap (Results 1 – 11 of 11) sorted by relevance

/mm/
Dshmem.c357 pgoff_t index, swp_entry_t swap) in shmem_confirm_swap() argument
364 return item == swp_to_radix_entry(swap); in shmem_confirm_swap()
1130 swp_entry_t swap, struct page **pagep) in shmem_unuse_inode() argument
1138 radswap = swp_to_radix_entry(swap); in shmem_unuse_inode()
1198 swap_free(swap); in shmem_unuse_inode()
1207 int shmem_unuse(swp_entry_t swap, struct page *page) in shmem_unuse() argument
1218 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) in shmem_unuse()
1237 error = shmem_unuse_inode(info, swap, &page); in shmem_unuse()
1267 swp_entry_t swap; in shmem_writepage() local
1325 swap = get_swap_page(page); in shmem_writepage()
[all …]
DKconfig433 XXX: For now this only does clustered swap space allocation.
473 bool "Enable frontswap to cache swap pages if tmem is present"
478 of a "backing" store for a swap device. The data is stored into
482 a significant swap I/O reduction may be achieved. When none is
485 and swap data is stored as normal on the matching swap device.
543 bool "Compressed cache for swap pages (EXPERIMENTAL)"
549 A lightweight compressed cache for swap pages. It takes
552 This can result in a significant I/O reduction on swap device and,
553 in the case where decompressing from RAM is faster that swap device
Dmemcontrol.c1254 swap_limit = memcg->swap.limit; in mem_cgroup_get_limit()
2807 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) in mem_cgroup_usage() argument
2817 if (swap) in mem_cgroup_usage()
2821 if (!swap) in mem_cgroup_usage()
3324 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) in __mem_cgroup_threshold() argument
3331 if (!swap) in __mem_cgroup_threshold()
3339 usage = mem_cgroup_usage(memcg, swap); in __mem_cgroup_threshold()
4328 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc()
4334 page_counter_init(&memcg->swap, NULL); in mem_cgroup_css_alloc()
4444 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
[all …]
Dswapfile.c2709 static void *swap_start(struct seq_file *swap, loff_t *pos) in swap_start() argument
2732 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) in swap_next() argument
2754 static void swap_stop(struct seq_file *swap, void *v) in swap_stop() argument
2759 static int swap_show(struct seq_file *swap, void *v) in swap_show() argument
2766 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); in swap_show()
2771 len = seq_file_path(swap, file, " \t\n\\"); in swap_show()
2772 seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", in swap_show()
3477 swp_entry_t swap = { .val = page_private(page) }; in page_swap_info() local
3478 return swap_info[swp_type(swap)]; in page_swap_info()
3493 swp_entry_t swap = { .val = page_private(page) }; in __page_file_index() local
[all …]
Dmadvise.c248 swp_entry_t swap; in force_shm_swapin_readahead() local
259 swap = radix_to_swp_entry(page); in force_shm_swapin_readahead()
260 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, in force_shm_swapin_readahead()
DMakefile37 readahead.o swap.o truncate.o vmscan.o shmem.o \
Dkhugepaged.c1599 int present, swap; in khugepaged_scan_shmem() local
1604 swap = 0; in khugepaged_scan_shmem()
1618 if (++swap > khugepaged_max_ptes_swap) { in khugepaged_scan_shmem()
Dvmscan.c729 swp_entry_t swap = { .val = page_private(page) }; in __remove_mapping() local
730 mem_cgroup_swapout(page, swap); in __remove_mapping()
733 put_swap_page(page, swap); in __remove_mapping()
Dslab_common.c1159 swap(list[i], list[rand]); in freelist_randomize()
Dmmap.c713 swap(vma, next); in __vma_adjust()
Dslab.c2493 swap(((freelist_idx_t *)page->freelist)[a], in swap_free_obj()