Searched refs:swap (Results 1 – 11 of 11) sorted by relevance
/mm/ |
D | shmem.c | 357 pgoff_t index, swp_entry_t swap) in shmem_confirm_swap() argument 364 return item == swp_to_radix_entry(swap); in shmem_confirm_swap() 1130 swp_entry_t swap, struct page **pagep) in shmem_unuse_inode() argument 1138 radswap = swp_to_radix_entry(swap); in shmem_unuse_inode() 1198 swap_free(swap); in shmem_unuse_inode() 1207 int shmem_unuse(swp_entry_t swap, struct page *page) in shmem_unuse() argument 1218 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) in shmem_unuse() 1237 error = shmem_unuse_inode(info, swap, &page); in shmem_unuse() 1267 swp_entry_t swap; in shmem_writepage() local 1325 swap = get_swap_page(page); in shmem_writepage() [all …]
|
D | Kconfig | 433 XXX: For now this only does clustered swap space allocation. 473 bool "Enable frontswap to cache swap pages if tmem is present" 478 of a "backing" store for a swap device. The data is stored into 482 a significant swap I/O reduction may be achieved. When none is 485 and swap data is stored as normal on the matching swap device. 543 bool "Compressed cache for swap pages (EXPERIMENTAL)" 549 A lightweight compressed cache for swap pages. It takes 552 This can result in a significant I/O reduction on swap device and, 553 in the case where decompressing from RAM is faster that swap device
|
D | memcontrol.c | 1254 swap_limit = memcg->swap.limit; in mem_cgroup_get_limit() 2807 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) in mem_cgroup_usage() argument 2817 if (swap) in mem_cgroup_usage() 2821 if (!swap) in mem_cgroup_usage() 3324 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) in __mem_cgroup_threshold() argument 3331 if (!swap) in __mem_cgroup_threshold() 3339 usage = mem_cgroup_usage(memcg, swap); in __mem_cgroup_threshold() 4328 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc() 4334 page_counter_init(&memcg->swap, NULL); in mem_cgroup_css_alloc() 4444 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset() [all …]
|
D | swapfile.c | 2709 static void *swap_start(struct seq_file *swap, loff_t *pos) in swap_start() argument 2732 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) in swap_next() argument 2754 static void swap_stop(struct seq_file *swap, void *v) in swap_stop() argument 2759 static int swap_show(struct seq_file *swap, void *v) in swap_show() argument 2766 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); in swap_show() 2771 len = seq_file_path(swap, file, " \t\n\\"); in swap_show() 2772 seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", in swap_show() 3477 swp_entry_t swap = { .val = page_private(page) }; in page_swap_info() local 3478 return swap_info[swp_type(swap)]; in page_swap_info() 3493 swp_entry_t swap = { .val = page_private(page) }; in __page_file_index() local [all …]
|
D | madvise.c | 248 swp_entry_t swap; in force_shm_swapin_readahead() local 259 swap = radix_to_swp_entry(page); in force_shm_swapin_readahead() 260 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, in force_shm_swapin_readahead()
|
D | Makefile | 37 readahead.o swap.o truncate.o vmscan.o shmem.o \
|
D | khugepaged.c | 1599 int present, swap; in khugepaged_scan_shmem() local 1604 swap = 0; in khugepaged_scan_shmem() 1618 if (++swap > khugepaged_max_ptes_swap) { in khugepaged_scan_shmem()
|
D | vmscan.c | 729 swp_entry_t swap = { .val = page_private(page) }; in __remove_mapping() local 730 mem_cgroup_swapout(page, swap); in __remove_mapping() 733 put_swap_page(page, swap); in __remove_mapping()
|
D | slab_common.c | 1159 swap(list[i], list[rand]); in freelist_randomize()
|
D | mmap.c | 713 swap(vma, next); in __vma_adjust()
|
D | slab.c | 2493 swap(((freelist_idx_t *)page->freelist)[a], in swap_free_obj()
|