1 /*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/buffer_head.h>
17 #include <linux/backing-dev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
20 #include <linux/page_cgroup.h>
21
22 #include <asm/pgtable.h>
23
24 /*
25 * swapper_space is a fiction, retained to simplify the path through
26 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
27 * future use of radix_tree tags in the swap cache.
28 */
29 static const struct address_space_operations swap_aops = {
30 .writepage = swap_writepage,
31 .sync_page = block_sync_page,
32 .set_page_dirty = __set_page_dirty_nobuffers,
33 .migratepage = migrate_page,
34 };
35
36 static struct backing_dev_info swap_backing_dev_info = {
37 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
38 .unplug_io_fn = swap_unplug_io_fn,
39 };
40
41 struct address_space swapper_space = {
42 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
43 .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
44 .a_ops = &swap_aops,
45 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
46 .backing_dev_info = &swap_backing_dev_info,
47 };
48
49 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
50
51 static struct {
52 unsigned long add_total;
53 unsigned long del_total;
54 unsigned long find_success;
55 unsigned long find_total;
56 } swap_cache_info;
57
show_swap_cache_info(void)58 void show_swap_cache_info(void)
59 {
60 printk("%lu pages in swap cache\n", total_swapcache_pages);
61 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
62 swap_cache_info.add_total, swap_cache_info.del_total,
63 swap_cache_info.find_success, swap_cache_info.find_total);
64 printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
65 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
66 }
67
68 /*
69 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
70 * but sets SwapCache flag and private instead of mapping and index.
71 */
add_to_swap_cache(struct page * page,swp_entry_t entry,gfp_t gfp_mask)72 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
73 {
74 int error;
75
76 VM_BUG_ON(!PageLocked(page));
77 VM_BUG_ON(PageSwapCache(page));
78 VM_BUG_ON(!PageSwapBacked(page));
79
80 error = radix_tree_preload(gfp_mask);
81 if (!error) {
82 page_cache_get(page);
83 SetPageSwapCache(page);
84 set_page_private(page, entry.val);
85
86 spin_lock_irq(&swapper_space.tree_lock);
87 error = radix_tree_insert(&swapper_space.page_tree,
88 entry.val, page);
89 if (likely(!error)) {
90 total_swapcache_pages++;
91 __inc_zone_page_state(page, NR_FILE_PAGES);
92 INC_CACHE_INFO(add_total);
93 }
94 spin_unlock_irq(&swapper_space.tree_lock);
95 radix_tree_preload_end();
96
97 if (unlikely(error)) {
98 set_page_private(page, 0UL);
99 ClearPageSwapCache(page);
100 page_cache_release(page);
101 }
102 }
103 return error;
104 }
105
106 /*
107 * This must be called only on pages that have
108 * been verified to be in the swap cache.
109 */
__delete_from_swap_cache(struct page * page)110 void __delete_from_swap_cache(struct page *page)
111 {
112 swp_entry_t ent = {.val = page_private(page)};
113
114 VM_BUG_ON(!PageLocked(page));
115 VM_BUG_ON(!PageSwapCache(page));
116 VM_BUG_ON(PageWriteback(page));
117
118 radix_tree_delete(&swapper_space.page_tree, page_private(page));
119 set_page_private(page, 0);
120 ClearPageSwapCache(page);
121 total_swapcache_pages--;
122 __dec_zone_page_state(page, NR_FILE_PAGES);
123 INC_CACHE_INFO(del_total);
124 mem_cgroup_uncharge_swapcache(page, ent);
125 }
126
127 /**
128 * add_to_swap - allocate swap space for a page
129 * @page: page we want to move to swap
130 * @gfp_mask: memory allocation flags
131 *
132 * Allocate swap space for the page and add the page to the
133 * swap cache. Caller needs to hold the page lock.
134 */
add_to_swap(struct page * page)135 int add_to_swap(struct page *page)
136 {
137 swp_entry_t entry;
138 int err;
139
140 VM_BUG_ON(!PageLocked(page));
141 VM_BUG_ON(!PageUptodate(page));
142
143 for (;;) {
144 entry = get_swap_page();
145 if (!entry.val)
146 return 0;
147
148 /*
149 * Radix-tree node allocations from PF_MEMALLOC contexts could
150 * completely exhaust the page allocator. __GFP_NOMEMALLOC
151 * stops emergency reserves from being allocated.
152 *
153 * TODO: this could cause a theoretical memory reclaim
154 * deadlock in the swap out path.
155 */
156 /*
157 * Add it to the swap cache and mark it dirty
158 */
159 err = add_to_swap_cache(page, entry,
160 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
161
162 switch (err) {
163 case 0: /* Success */
164 SetPageDirty(page);
165 return 1;
166 case -EEXIST:
167 /* Raced with "speculative" read_swap_cache_async */
168 swap_free(entry);
169 continue;
170 default:
171 /* -ENOMEM radix-tree allocation failure */
172 swap_free(entry);
173 return 0;
174 }
175 }
176 }
177
178 /*
179 * This must be called only on pages that have
180 * been verified to be in the swap cache and locked.
181 * It will never put the page into the free list,
182 * the caller has a reference on the page.
183 */
delete_from_swap_cache(struct page * page)184 void delete_from_swap_cache(struct page *page)
185 {
186 swp_entry_t entry;
187
188 entry.val = page_private(page);
189
190 spin_lock_irq(&swapper_space.tree_lock);
191 __delete_from_swap_cache(page);
192 spin_unlock_irq(&swapper_space.tree_lock);
193
194 swap_free(entry);
195 page_cache_release(page);
196 }
197
198 /*
199 * If we are the only user, then try to free up the swap cache.
200 *
201 * Its ok to check for PageSwapCache without the page lock
202 * here because we are going to recheck again inside
203 * try_to_free_swap() _with_ the lock.
204 * - Marcelo
205 */
free_swap_cache(struct page * page)206 static inline void free_swap_cache(struct page *page)
207 {
208 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
209 try_to_free_swap(page);
210 unlock_page(page);
211 }
212 }
213
214 /*
215 * Perform a free_page(), also freeing any swap cache associated with
216 * this page if it is the last user of the page.
217 */
free_page_and_swap_cache(struct page * page)218 void free_page_and_swap_cache(struct page *page)
219 {
220 free_swap_cache(page);
221 page_cache_release(page);
222 }
223
224 /*
225 * Passed an array of pages, drop them all from swapcache and then release
226 * them. They are removed from the LRU and freed if this is their last use.
227 */
free_pages_and_swap_cache(struct page ** pages,int nr)228 void free_pages_and_swap_cache(struct page **pages, int nr)
229 {
230 struct page **pagep = pages;
231
232 lru_add_drain();
233 while (nr) {
234 int todo = min(nr, PAGEVEC_SIZE);
235 int i;
236
237 for (i = 0; i < todo; i++)
238 free_swap_cache(pagep[i]);
239 release_pages(pagep, todo, 0);
240 pagep += todo;
241 nr -= todo;
242 }
243 }
244
245 /*
246 * Lookup a swap entry in the swap cache. A found page will be returned
247 * unlocked and with its refcount incremented - we rely on the kernel
248 * lock getting page table operations atomic even if we drop the page
249 * lock before returning.
250 */
lookup_swap_cache(swp_entry_t entry)251 struct page * lookup_swap_cache(swp_entry_t entry)
252 {
253 struct page *page;
254
255 page = find_get_page(&swapper_space, entry.val);
256
257 if (page)
258 INC_CACHE_INFO(find_success);
259
260 INC_CACHE_INFO(find_total);
261 return page;
262 }
263
264 /*
265 * Locate a page of swap in physical memory, reserving swap cache space
266 * and reading the disk if it is not already cached.
267 * A failure return means that either the page allocation failed or that
268 * the swap entry is no longer in use.
269 */
read_swap_cache_async(swp_entry_t entry,gfp_t gfp_mask,struct vm_area_struct * vma,unsigned long addr)270 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
271 struct vm_area_struct *vma, unsigned long addr)
272 {
273 struct page *found_page, *new_page = NULL;
274 int err;
275
276 do {
277 /*
278 * First check the swap cache. Since this is normally
279 * called after lookup_swap_cache() failed, re-calling
280 * that would confuse statistics.
281 */
282 found_page = find_get_page(&swapper_space, entry.val);
283 if (found_page)
284 break;
285
286 /*
287 * Get a new page to read into from swap.
288 */
289 if (!new_page) {
290 new_page = alloc_page_vma(gfp_mask, vma, addr);
291 if (!new_page)
292 break; /* Out of memory */
293 }
294
295 /*
296 * Swap entry may have been freed since our caller observed it.
297 */
298 if (!swap_duplicate(entry))
299 break;
300
301 /*
302 * Associate the page with swap entry in the swap cache.
303 * May fail (-EEXIST) if there is already a page associated
304 * with this entry in the swap cache: added by a racing
305 * read_swap_cache_async, or add_to_swap or shmem_writepage
306 * re-using the just freed swap entry for an existing page.
307 * May fail (-ENOMEM) if radix-tree node allocation failed.
308 */
309 __set_page_locked(new_page);
310 SetPageSwapBacked(new_page);
311 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
312 if (likely(!err)) {
313 /*
314 * Initiate read into locked page and return.
315 */
316 lru_cache_add_anon(new_page);
317 swap_readpage(NULL, new_page);
318 return new_page;
319 }
320 ClearPageSwapBacked(new_page);
321 __clear_page_locked(new_page);
322 swap_free(entry);
323 } while (err != -ENOMEM);
324
325 if (new_page)
326 page_cache_release(new_page);
327 return found_page;
328 }
329
330 /**
331 * swapin_readahead - swap in pages in hope we need them soon
332 * @entry: swap entry of this memory
333 * @gfp_mask: memory allocation flags
334 * @vma: user vma this address belongs to
335 * @addr: target address for mempolicy
336 *
337 * Returns the struct page for entry and addr, after queueing swapin.
338 *
339 * Primitive swap readahead code. We simply read an aligned block of
340 * (1 << page_cluster) entries in the swap area. This method is chosen
341 * because it doesn't cost us any seek time. We also make sure to queue
342 * the 'original' request together with the readahead ones...
343 *
344 * This has been extended to use the NUMA policies from the mm triggering
345 * the readahead.
346 *
347 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
348 */
swapin_readahead(swp_entry_t entry,gfp_t gfp_mask,struct vm_area_struct * vma,unsigned long addr)349 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
350 struct vm_area_struct *vma, unsigned long addr)
351 {
352 int nr_pages;
353 struct page *page;
354 unsigned long offset;
355 unsigned long end_offset;
356
357 /*
358 * Get starting offset for readaround, and number of pages to read.
359 * Adjust starting address by readbehind (for NUMA interleave case)?
360 * No, it's very unlikely that swap layout would follow vma layout,
361 * more likely that neighbouring swap pages came from the same node:
362 * so use the same "addr" to choose the same node for each swap read.
363 */
364 nr_pages = valid_swaphandles(entry, &offset);
365 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
366 /* Ok, do the async read-ahead now */
367 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
368 gfp_mask, vma, addr);
369 if (!page)
370 break;
371 page_cache_release(page);
372 }
373 lru_add_drain(); /* Push any new pages onto the LRU now */
374 return read_swap_cache_async(entry, gfp_mask, vma, addr);
375 }
376