1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/page_io.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12 */
13
14 #include <linux/mm.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/writeback.h>
23 #include <linux/frontswap.h>
24 #include <linux/blkdev.h>
25 #include <linux/psi.h>
26 #include <linux/uio.h>
27 #include <linux/sched/task.h>
28 #include <asm/pgtable.h>
29
get_swap_bio(gfp_t gfp_flags,struct page * page,bio_end_io_t end_io)30 static struct bio *get_swap_bio(gfp_t gfp_flags,
31 struct page *page, bio_end_io_t end_io)
32 {
33 struct bio *bio;
34
35 bio = bio_alloc(gfp_flags, 1);
36 if (bio) {
37 struct block_device *bdev;
38
39 bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
40 bio_set_dev(bio, bdev);
41 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
42 bio->bi_end_io = end_io;
43
44 bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0);
45 }
46 return bio;
47 }
48
end_swap_bio_write(struct bio * bio)49 void end_swap_bio_write(struct bio *bio)
50 {
51 struct page *page = bio_first_page_all(bio);
52
53 if (bio->bi_status) {
54 SetPageError(page);
55 /*
56 * We failed to write the page out to swap-space.
57 * Re-dirty the page in order to avoid it being reclaimed.
58 * Also print a dire warning that things will go BAD (tm)
59 * very quickly.
60 *
61 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
62 */
63 set_page_dirty(page);
64 pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
65 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
66 (unsigned long long)bio->bi_iter.bi_sector);
67 ClearPageReclaim(page);
68 }
69 end_page_writeback(page);
70 bio_put(bio);
71 }
72
end_swap_bio_read(struct bio * bio)73 static void end_swap_bio_read(struct bio *bio)
74 {
75 struct page *page = bio_first_page_all(bio);
76 struct task_struct *waiter = bio->bi_private;
77
78 if (bio->bi_status) {
79 SetPageError(page);
80 ClearPageUptodate(page);
81 pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
82 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
83 (unsigned long long)bio->bi_iter.bi_sector);
84 goto out;
85 }
86
87 SetPageUptodate(page);
88 out:
89 unlock_page(page);
90 WRITE_ONCE(bio->bi_private, NULL);
91 bio_put(bio);
92 if (waiter) {
93 blk_wake_io_task(waiter);
94 put_task_struct(waiter);
95 }
96 }
97
generic_swapfile_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)98 int generic_swapfile_activate(struct swap_info_struct *sis,
99 struct file *swap_file,
100 sector_t *span)
101 {
102 struct address_space *mapping = swap_file->f_mapping;
103 struct inode *inode = mapping->host;
104 unsigned blocks_per_page;
105 unsigned long page_no;
106 unsigned blkbits;
107 sector_t probe_block;
108 sector_t last_block;
109 sector_t lowest_block = -1;
110 sector_t highest_block = 0;
111 int nr_extents = 0;
112 int ret;
113
114 blkbits = inode->i_blkbits;
115 blocks_per_page = PAGE_SIZE >> blkbits;
116
117 /*
118 * Map all the blocks into the extent tree. This code doesn't try
119 * to be very smart.
120 */
121 probe_block = 0;
122 page_no = 0;
123 last_block = i_size_read(inode) >> blkbits;
124 while ((probe_block + blocks_per_page) <= last_block &&
125 page_no < sis->max) {
126 unsigned block_in_page;
127 sector_t first_block;
128
129 cond_resched();
130
131 first_block = bmap(inode, probe_block);
132 if (first_block == 0)
133 goto bad_bmap;
134
135 /*
136 * It must be PAGE_SIZE aligned on-disk
137 */
138 if (first_block & (blocks_per_page - 1)) {
139 probe_block++;
140 goto reprobe;
141 }
142
143 for (block_in_page = 1; block_in_page < blocks_per_page;
144 block_in_page++) {
145 sector_t block;
146
147 block = bmap(inode, probe_block + block_in_page);
148 if (block == 0)
149 goto bad_bmap;
150 if (block != first_block + block_in_page) {
151 /* Discontiguity */
152 probe_block++;
153 goto reprobe;
154 }
155 }
156
157 first_block >>= (PAGE_SHIFT - blkbits);
158 if (page_no) { /* exclude the header page */
159 if (first_block < lowest_block)
160 lowest_block = first_block;
161 if (first_block > highest_block)
162 highest_block = first_block;
163 }
164
165 /*
166 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
167 */
168 ret = add_swap_extent(sis, page_no, 1, first_block);
169 if (ret < 0)
170 goto out;
171 nr_extents += ret;
172 page_no++;
173 probe_block += blocks_per_page;
174 reprobe:
175 continue;
176 }
177 ret = nr_extents;
178 *span = 1 + highest_block - lowest_block;
179 if (page_no == 0)
180 page_no = 1; /* force Empty message */
181 sis->max = page_no;
182 sis->pages = page_no - 1;
183 sis->highest_bit = page_no - 1;
184 out:
185 return ret;
186 bad_bmap:
187 pr_err("swapon: swapfile has holes\n");
188 ret = -EINVAL;
189 goto out;
190 }
191
192 /*
193 * We may have stale swap cache pages in memory: notice
194 * them here and get rid of the unnecessary final write.
195 */
swap_writepage(struct page * page,struct writeback_control * wbc)196 int swap_writepage(struct page *page, struct writeback_control *wbc)
197 {
198 int ret = 0;
199
200 if (try_to_free_swap(page)) {
201 unlock_page(page);
202 goto out;
203 }
204 if (frontswap_store(page) == 0) {
205 set_page_writeback(page);
206 unlock_page(page);
207 end_page_writeback(page);
208 goto out;
209 }
210 ret = __swap_writepage(page, wbc, end_swap_bio_write);
211 out:
212 return ret;
213 }
214
count_swpout_vm_event(struct page * page)215 static inline void count_swpout_vm_event(struct page *page)
216 {
217 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
218 if (unlikely(PageTransHuge(page)))
219 count_vm_event(THP_SWPOUT);
220 #endif
221 count_vm_events(PSWPOUT, hpage_nr_pages(page));
222 }
223
__swap_writepage(struct page * page,struct writeback_control * wbc,bio_end_io_t end_write_func)224 int __swap_writepage(struct page *page, struct writeback_control *wbc,
225 bio_end_io_t end_write_func)
226 {
227 struct bio *bio;
228 int ret;
229 struct swap_info_struct *sis = page_swap_info(page);
230
231 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
232 if (sis->flags & SWP_FS) {
233 struct kiocb kiocb;
234 struct file *swap_file = sis->swap_file;
235 struct address_space *mapping = swap_file->f_mapping;
236 struct bio_vec bv = {
237 .bv_page = page,
238 .bv_len = PAGE_SIZE,
239 .bv_offset = 0
240 };
241 struct iov_iter from;
242
243 iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
244 init_sync_kiocb(&kiocb, swap_file);
245 kiocb.ki_pos = page_file_offset(page);
246
247 set_page_writeback(page);
248 unlock_page(page);
249 ret = mapping->a_ops->direct_IO(&kiocb, &from);
250 if (ret == PAGE_SIZE) {
251 count_vm_event(PSWPOUT);
252 ret = 0;
253 } else {
254 /*
255 * In the case of swap-over-nfs, this can be a
256 * temporary failure if the system has limited
257 * memory for allocating transmit buffers.
258 * Mark the page dirty and avoid
259 * rotate_reclaimable_page but rate-limit the
260 * messages but do not flag PageError like
261 * the normal direct-to-bio case as it could
262 * be temporary.
263 */
264 set_page_dirty(page);
265 ClearPageReclaim(page);
266 pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
267 page_file_offset(page));
268 }
269 end_page_writeback(page);
270 return ret;
271 }
272
273 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
274 if (!ret) {
275 count_swpout_vm_event(page);
276 return 0;
277 }
278
279 ret = 0;
280 bio = get_swap_bio(GFP_NOIO, page, end_write_func);
281 if (bio == NULL) {
282 set_page_dirty(page);
283 unlock_page(page);
284 ret = -ENOMEM;
285 goto out;
286 }
287 bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
288 bio_associate_blkg_from_page(bio, page);
289 count_swpout_vm_event(page);
290 set_page_writeback(page);
291 unlock_page(page);
292 submit_bio(bio);
293 out:
294 return ret;
295 }
296
swap_readpage(struct page * page,bool synchronous)297 int swap_readpage(struct page *page, bool synchronous)
298 {
299 struct bio *bio;
300 int ret = 0;
301 struct swap_info_struct *sis = page_swap_info(page);
302 blk_qc_t qc;
303 struct gendisk *disk;
304 unsigned long pflags;
305
306 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
307 VM_BUG_ON_PAGE(!PageLocked(page), page);
308 VM_BUG_ON_PAGE(PageUptodate(page), page);
309
310 /*
311 * Count submission time as memory stall. When the device is congested,
312 * or the submitting cgroup IO-throttled, submission can be a
313 * significant part of overall IO time.
314 */
315 psi_memstall_enter(&pflags);
316
317 if (frontswap_load(page) == 0) {
318 SetPageUptodate(page);
319 unlock_page(page);
320 goto out;
321 }
322
323 if (sis->flags & SWP_FS) {
324 struct file *swap_file = sis->swap_file;
325 struct address_space *mapping = swap_file->f_mapping;
326
327 ret = mapping->a_ops->readpage(swap_file, page);
328 if (!ret)
329 count_vm_event(PSWPIN);
330 goto out;
331 }
332
333 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
334 if (!ret) {
335 count_vm_event(PSWPIN);
336 goto out;
337 }
338
339 ret = 0;
340 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
341 if (bio == NULL) {
342 unlock_page(page);
343 ret = -ENOMEM;
344 goto out;
345 }
346 disk = bio->bi_disk;
347 /*
348 * Keep this task valid during swap readpage because the oom killer may
349 * attempt to access it in the page fault retry time check.
350 */
351 bio_set_op_attrs(bio, REQ_OP_READ, 0);
352 if (synchronous) {
353 bio->bi_opf |= REQ_HIPRI;
354 get_task_struct(current);
355 bio->bi_private = current;
356 }
357 count_vm_event(PSWPIN);
358 bio_get(bio);
359 qc = submit_bio(bio);
360 while (synchronous) {
361 set_current_state(TASK_UNINTERRUPTIBLE);
362 if (!READ_ONCE(bio->bi_private))
363 break;
364
365 if (!blk_poll(disk->queue, qc, true))
366 io_schedule();
367 }
368 __set_current_state(TASK_RUNNING);
369 bio_put(bio);
370
371 out:
372 psi_memstall_leave(&pflags);
373 return ret;
374 }
375
swap_set_page_dirty(struct page * page)376 int swap_set_page_dirty(struct page *page)
377 {
378 struct swap_info_struct *sis = page_swap_info(page);
379
380 if (sis->flags & SWP_FS) {
381 struct address_space *mapping = sis->swap_file->f_mapping;
382
383 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
384 return mapping->a_ops->set_page_dirty(page);
385 } else {
386 return __set_page_dirty_no_writeback(page);
387 }
388 }
389