• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/page_io.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  Swap reorganised 29.12.95,
8  *  Asynchronous swapping added 30.12.95. Stephen Tweedie
9  *  Removed race in async swapping. 14.4.1996. Bruno Haible
10  *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11  *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12  */
13 
14 #include <linux/mm.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/writeback.h>
23 #include <linux/frontswap.h>
24 #include <linux/blkdev.h>
25 #include <linux/psi.h>
26 #include <linux/uio.h>
27 #include <linux/sched/task.h>
28 
end_swap_bio_write(struct bio * bio)29 void end_swap_bio_write(struct bio *bio)
30 {
31 	struct page *page = bio_first_page_all(bio);
32 
33 	if (bio->bi_status) {
34 		SetPageError(page);
35 		/*
36 		 * We failed to write the page out to swap-space.
37 		 * Re-dirty the page in order to avoid it being reclaimed.
38 		 * Also print a dire warning that things will go BAD (tm)
39 		 * very quickly.
40 		 *
41 		 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
42 		 */
43 		set_page_dirty(page);
44 		pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
45 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
46 				     (unsigned long long)bio->bi_iter.bi_sector);
47 		ClearPageReclaim(page);
48 	}
49 	end_page_writeback(page);
50 	bio_put(bio);
51 }
52 
end_swap_bio_read(struct bio * bio)53 static void end_swap_bio_read(struct bio *bio)
54 {
55 	struct page *page = bio_first_page_all(bio);
56 	struct task_struct *waiter = bio->bi_private;
57 
58 	if (bio->bi_status) {
59 		SetPageError(page);
60 		ClearPageUptodate(page);
61 		pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
62 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
63 				     (unsigned long long)bio->bi_iter.bi_sector);
64 		goto out;
65 	}
66 
67 	SetPageUptodate(page);
68 out:
69 	unlock_page(page);
70 	WRITE_ONCE(bio->bi_private, NULL);
71 	bio_put(bio);
72 	if (waiter) {
73 		blk_wake_io_task(waiter);
74 		put_task_struct(waiter);
75 	}
76 }
77 
generic_swapfile_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)78 int generic_swapfile_activate(struct swap_info_struct *sis,
79 				struct file *swap_file,
80 				sector_t *span)
81 {
82 	struct address_space *mapping = swap_file->f_mapping;
83 	struct inode *inode = mapping->host;
84 	unsigned blocks_per_page;
85 	unsigned long page_no;
86 	unsigned blkbits;
87 	sector_t probe_block;
88 	sector_t last_block;
89 	sector_t lowest_block = -1;
90 	sector_t highest_block = 0;
91 	int nr_extents = 0;
92 	int ret;
93 
94 	blkbits = inode->i_blkbits;
95 	blocks_per_page = PAGE_SIZE >> blkbits;
96 
97 	/*
98 	 * Map all the blocks into the extent tree.  This code doesn't try
99 	 * to be very smart.
100 	 */
101 	probe_block = 0;
102 	page_no = 0;
103 	last_block = i_size_read(inode) >> blkbits;
104 	while ((probe_block + blocks_per_page) <= last_block &&
105 			page_no < sis->max) {
106 		unsigned block_in_page;
107 		sector_t first_block;
108 
109 		cond_resched();
110 
111 		first_block = probe_block;
112 		ret = bmap(inode, &first_block);
113 		if (ret || !first_block)
114 			goto bad_bmap;
115 
116 		/*
117 		 * It must be PAGE_SIZE aligned on-disk
118 		 */
119 		if (first_block & (blocks_per_page - 1)) {
120 			probe_block++;
121 			goto reprobe;
122 		}
123 
124 		for (block_in_page = 1; block_in_page < blocks_per_page;
125 					block_in_page++) {
126 			sector_t block;
127 
128 			block = probe_block + block_in_page;
129 			ret = bmap(inode, &block);
130 			if (ret || !block)
131 				goto bad_bmap;
132 
133 			if (block != first_block + block_in_page) {
134 				/* Discontiguity */
135 				probe_block++;
136 				goto reprobe;
137 			}
138 		}
139 
140 		first_block >>= (PAGE_SHIFT - blkbits);
141 		if (page_no) {	/* exclude the header page */
142 			if (first_block < lowest_block)
143 				lowest_block = first_block;
144 			if (first_block > highest_block)
145 				highest_block = first_block;
146 		}
147 
148 		/*
149 		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
150 		 */
151 		ret = add_swap_extent(sis, page_no, 1, first_block);
152 		if (ret < 0)
153 			goto out;
154 		nr_extents += ret;
155 		page_no++;
156 		probe_block += blocks_per_page;
157 reprobe:
158 		continue;
159 	}
160 	ret = nr_extents;
161 	*span = 1 + highest_block - lowest_block;
162 	if (page_no == 0)
163 		page_no = 1;	/* force Empty message */
164 	sis->max = page_no;
165 	sis->pages = page_no - 1;
166 	sis->highest_bit = page_no - 1;
167 out:
168 	return ret;
169 bad_bmap:
170 	pr_err("swapon: swapfile has holes\n");
171 	ret = -EINVAL;
172 	goto out;
173 }
174 
175 /*
176  * We may have stale swap cache pages in memory: notice
177  * them here and get rid of the unnecessary final write.
178  */
swap_writepage(struct page * page,struct writeback_control * wbc)179 int swap_writepage(struct page *page, struct writeback_control *wbc)
180 {
181 	int ret = 0;
182 
183 	if (try_to_free_swap(page)) {
184 		unlock_page(page);
185 		goto out;
186 	}
187 	/*
188 	 * Arch code may have to preserve more data than just the page
189 	 * contents, e.g. memory tags.
190 	 */
191 	ret = arch_prepare_to_swap(page);
192 	if (ret) {
193 		set_page_dirty(page);
194 		unlock_page(page);
195 		goto out;
196 	}
197 	if (frontswap_store(page) == 0) {
198 		set_page_writeback(page);
199 		unlock_page(page);
200 		end_page_writeback(page);
201 		goto out;
202 	}
203 	ret = __swap_writepage(page, wbc, end_swap_bio_write);
204 out:
205 	return ret;
206 }
207 
count_swpout_vm_event(struct page * page)208 static inline void count_swpout_vm_event(struct page *page)
209 {
210 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
211 	if (unlikely(PageTransHuge(page)))
212 		count_vm_event(THP_SWPOUT);
213 #endif
214 	count_vm_events(PSWPOUT, thp_nr_pages(page));
215 }
216 
217 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
bio_associate_blkg_from_page(struct bio * bio,struct page * page)218 static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
219 {
220 	struct cgroup_subsys_state *css;
221 	struct mem_cgroup *memcg;
222 
223 	memcg = page_memcg(page);
224 	if (!memcg)
225 		return;
226 
227 	rcu_read_lock();
228 	css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
229 	bio_associate_blkg_from_css(bio, css);
230 	rcu_read_unlock();
231 }
232 #else
233 #define bio_associate_blkg_from_page(bio, page)		do { } while (0)
234 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
235 
__swap_writepage(struct page * page,struct writeback_control * wbc,bio_end_io_t end_write_func)236 int __swap_writepage(struct page *page, struct writeback_control *wbc,
237 		bio_end_io_t end_write_func)
238 {
239 	struct bio *bio;
240 	int ret;
241 	struct swap_info_struct *sis = page_swap_info(page);
242 
243 	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
244 	if (data_race(sis->flags & SWP_FS_OPS)) {
245 		struct kiocb kiocb;
246 		struct file *swap_file = sis->swap_file;
247 		struct address_space *mapping = swap_file->f_mapping;
248 		struct bio_vec bv = {
249 			.bv_page = page,
250 			.bv_len  = PAGE_SIZE,
251 			.bv_offset = 0
252 		};
253 		struct iov_iter from;
254 
255 		iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
256 		init_sync_kiocb(&kiocb, swap_file);
257 		kiocb.ki_pos = page_file_offset(page);
258 
259 		set_page_writeback(page);
260 		unlock_page(page);
261 		ret = mapping->a_ops->direct_IO(&kiocb, &from);
262 		if (ret == PAGE_SIZE) {
263 			count_vm_event(PSWPOUT);
264 			ret = 0;
265 		} else {
266 			/*
267 			 * In the case of swap-over-nfs, this can be a
268 			 * temporary failure if the system has limited
269 			 * memory for allocating transmit buffers.
270 			 * Mark the page dirty and avoid
271 			 * rotate_reclaimable_page but rate-limit the
272 			 * messages but do not flag PageError like
273 			 * the normal direct-to-bio case as it could
274 			 * be temporary.
275 			 */
276 			set_page_dirty(page);
277 			ClearPageReclaim(page);
278 			pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
279 					   page_file_offset(page));
280 		}
281 		end_page_writeback(page);
282 		return ret;
283 	}
284 
285 	ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
286 	if (!ret) {
287 		count_swpout_vm_event(page);
288 		return 0;
289 	}
290 
291 	bio = bio_alloc(GFP_NOIO, 1);
292 	bio_set_dev(bio, sis->bdev);
293 	bio->bi_iter.bi_sector = swap_page_sector(page);
294 	bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
295 	bio->bi_end_io = end_write_func;
296 	bio_add_page(bio, page, thp_size(page), 0);
297 
298 	bio_associate_blkg_from_page(bio, page);
299 	count_swpout_vm_event(page);
300 	set_page_writeback(page);
301 	unlock_page(page);
302 	submit_bio(bio);
303 
304 	return 0;
305 }
306 
swap_readpage(struct page * page,bool synchronous)307 int swap_readpage(struct page *page, bool synchronous)
308 {
309 	struct bio *bio;
310 	int ret = 0;
311 	struct swap_info_struct *sis = page_swap_info(page);
312 	blk_qc_t qc;
313 	struct gendisk *disk;
314 	unsigned long pflags;
315 
316 	VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
317 	VM_BUG_ON_PAGE(!PageLocked(page), page);
318 	VM_BUG_ON_PAGE(PageUptodate(page), page);
319 
320 	/*
321 	 * Count submission time as memory stall. When the device is congested,
322 	 * or the submitting cgroup IO-throttled, submission can be a
323 	 * significant part of overall IO time.
324 	 */
325 	psi_memstall_enter(&pflags);
326 
327 	if (frontswap_load(page) == 0) {
328 		SetPageUptodate(page);
329 		unlock_page(page);
330 		goto out;
331 	}
332 
333 	if (data_race(sis->flags & SWP_FS_OPS)) {
334 		struct file *swap_file = sis->swap_file;
335 		struct address_space *mapping = swap_file->f_mapping;
336 
337 		ret = mapping->a_ops->readpage(swap_file, page);
338 		if (!ret)
339 			count_vm_event(PSWPIN);
340 		goto out;
341 	}
342 
343 	if (sis->flags & SWP_SYNCHRONOUS_IO) {
344 		ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
345 		if (!ret) {
346 			count_vm_event(PSWPIN);
347 			goto out;
348 		}
349 	}
350 
351 	ret = 0;
352 	bio = bio_alloc(GFP_KERNEL, 1);
353 	bio_set_dev(bio, sis->bdev);
354 	bio->bi_opf = REQ_OP_READ;
355 	bio->bi_iter.bi_sector = swap_page_sector(page);
356 	bio->bi_end_io = end_swap_bio_read;
357 	bio_add_page(bio, page, thp_size(page), 0);
358 
359 	disk = bio->bi_bdev->bd_disk;
360 	/*
361 	 * Keep this task valid during swap readpage because the oom killer may
362 	 * attempt to access it in the page fault retry time check.
363 	 */
364 	if (synchronous) {
365 		bio->bi_opf |= REQ_HIPRI;
366 		get_task_struct(current);
367 		bio->bi_private = current;
368 	}
369 	count_vm_event(PSWPIN);
370 	bio_get(bio);
371 	qc = submit_bio(bio);
372 	while (synchronous) {
373 		set_current_state(TASK_UNINTERRUPTIBLE);
374 		if (!READ_ONCE(bio->bi_private))
375 			break;
376 
377 		if (!blk_poll(disk->queue, qc, true))
378 			blk_io_schedule();
379 	}
380 	__set_current_state(TASK_RUNNING);
381 	bio_put(bio);
382 
383 out:
384 	psi_memstall_leave(&pflags);
385 	return ret;
386 }
387 
swap_set_page_dirty(struct page * page)388 int swap_set_page_dirty(struct page *page)
389 {
390 	struct swap_info_struct *sis = page_swap_info(page);
391 
392 	if (data_race(sis->flags & SWP_FS_OPS)) {
393 		struct address_space *mapping = sis->swap_file->f_mapping;
394 
395 		VM_BUG_ON_PAGE(!PageSwapCache(page), page);
396 		return mapping->a_ops->set_page_dirty(page);
397 	} else {
398 		return __set_page_dirty_no_writeback(page);
399 	}
400 }
401