• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/page_io.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  Swap reorganised 29.12.95,
8  *  Asynchronous swapping added 30.12.95. Stephen Tweedie
9  *  Removed race in async swapping. 14.4.1996. Bruno Haible
10  *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11  *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12  */
13 
14 #include <linux/mm.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/psi.h>
24 #include <linux/uio.h>
25 #include <linux/sched/task.h>
26 #include <linux/delayacct.h>
27 #include <linux/zswap.h>
28 #include "swap.h"
29 
30 #undef CREATE_TRACE_POINTS
31 #include <trace/hooks/mm.h>
32 
__end_swap_bio_write(struct bio * bio)33 static void __end_swap_bio_write(struct bio *bio)
34 {
35 	struct folio *folio = bio_first_folio_all(bio);
36 
37 	if (bio->bi_status) {
38 		/*
39 		 * We failed to write the page out to swap-space.
40 		 * Re-dirty the page in order to avoid it being reclaimed.
41 		 * Also print a dire warning that things will go BAD (tm)
42 		 * very quickly.
43 		 *
44 		 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
45 		 */
46 		folio_mark_dirty(folio);
47 		pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
48 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
49 				     (unsigned long long)bio->bi_iter.bi_sector);
50 		folio_clear_reclaim(folio);
51 	}
52 	folio_end_writeback(folio);
53 }
54 
end_swap_bio_write(struct bio * bio)55 static void end_swap_bio_write(struct bio *bio)
56 {
57 	__end_swap_bio_write(bio);
58 	bio_put(bio);
59 }
60 
__end_swap_bio_read(struct bio * bio)61 static void __end_swap_bio_read(struct bio *bio)
62 {
63 	struct folio *folio = bio_first_folio_all(bio);
64 
65 	if (bio->bi_status) {
66 		pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
67 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
68 				     (unsigned long long)bio->bi_iter.bi_sector);
69 	} else {
70 		folio_mark_uptodate(folio);
71 	}
72 	folio_unlock(folio);
73 }
74 
end_swap_bio_read(struct bio * bio)75 static void end_swap_bio_read(struct bio *bio)
76 {
77 	__end_swap_bio_read(bio);
78 	bio_put(bio);
79 }
80 
generic_swapfile_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)81 int generic_swapfile_activate(struct swap_info_struct *sis,
82 				struct file *swap_file,
83 				sector_t *span)
84 {
85 	struct address_space *mapping = swap_file->f_mapping;
86 	struct inode *inode = mapping->host;
87 	unsigned blocks_per_page;
88 	unsigned long page_no;
89 	unsigned blkbits;
90 	sector_t probe_block;
91 	sector_t last_block;
92 	sector_t lowest_block = -1;
93 	sector_t highest_block = 0;
94 	int nr_extents = 0;
95 	int ret;
96 
97 	blkbits = inode->i_blkbits;
98 	blocks_per_page = PAGE_SIZE >> blkbits;
99 
100 	/*
101 	 * Map all the blocks into the extent tree.  This code doesn't try
102 	 * to be very smart.
103 	 */
104 	probe_block = 0;
105 	page_no = 0;
106 	last_block = i_size_read(inode) >> blkbits;
107 	while ((probe_block + blocks_per_page) <= last_block &&
108 			page_no < sis->max) {
109 		unsigned block_in_page;
110 		sector_t first_block;
111 
112 		cond_resched();
113 
114 		first_block = probe_block;
115 		ret = bmap(inode, &first_block);
116 		if (ret || !first_block)
117 			goto bad_bmap;
118 
119 		/*
120 		 * It must be PAGE_SIZE aligned on-disk
121 		 */
122 		if (first_block & (blocks_per_page - 1)) {
123 			probe_block++;
124 			goto reprobe;
125 		}
126 
127 		for (block_in_page = 1; block_in_page < blocks_per_page;
128 					block_in_page++) {
129 			sector_t block;
130 
131 			block = probe_block + block_in_page;
132 			ret = bmap(inode, &block);
133 			if (ret || !block)
134 				goto bad_bmap;
135 
136 			if (block != first_block + block_in_page) {
137 				/* Discontiguity */
138 				probe_block++;
139 				goto reprobe;
140 			}
141 		}
142 
143 		first_block >>= (PAGE_SHIFT - blkbits);
144 		if (page_no) {	/* exclude the header page */
145 			if (first_block < lowest_block)
146 				lowest_block = first_block;
147 			if (first_block > highest_block)
148 				highest_block = first_block;
149 		}
150 
151 		/*
152 		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
153 		 */
154 		ret = add_swap_extent(sis, page_no, 1, first_block);
155 		if (ret < 0)
156 			goto out;
157 		nr_extents += ret;
158 		page_no++;
159 		probe_block += blocks_per_page;
160 reprobe:
161 		continue;
162 	}
163 	ret = nr_extents;
164 	*span = 1 + highest_block - lowest_block;
165 	if (page_no == 0)
166 		page_no = 1;	/* force Empty message */
167 	sis->max = page_no;
168 	sis->pages = page_no - 1;
169 	sis->highest_bit = page_no - 1;
170 out:
171 	return ret;
172 bad_bmap:
173 	pr_err("swapon: swapfile has holes\n");
174 	ret = -EINVAL;
175 	goto out;
176 }
177 
is_folio_zero_filled(struct folio * folio)178 static bool is_folio_zero_filled(struct folio *folio)
179 {
180 	unsigned int pos, last_pos;
181 	unsigned long *data;
182 	unsigned int i;
183 
184 	last_pos = PAGE_SIZE / sizeof(*data) - 1;
185 	for (i = 0; i < folio_nr_pages(folio); i++) {
186 		data = kmap_local_folio(folio, i * PAGE_SIZE);
187 		/*
188 		 * Check last word first, incase the page is zero-filled at
189 		 * the start and has non-zero data at the end, which is common
190 		 * in real-world workloads.
191 		 */
192 		if (data[last_pos]) {
193 			kunmap_local(data);
194 			return false;
195 		}
196 		for (pos = 0; pos < last_pos; pos++) {
197 			if (data[pos]) {
198 				kunmap_local(data);
199 				return false;
200 			}
201 		}
202 		kunmap_local(data);
203 	}
204 
205 	return true;
206 }
207 
swap_zeromap_folio_set(struct folio * folio)208 static void swap_zeromap_folio_set(struct folio *folio)
209 {
210 	struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio);
211 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
212 	int nr_pages = folio_nr_pages(folio);
213 	swp_entry_t entry;
214 	unsigned int i;
215 
216 	for (i = 0; i < folio_nr_pages(folio); i++) {
217 		entry = page_swap_entry(folio_page(folio, i));
218 		set_bit(swp_offset(entry), sis->zeromap);
219 	}
220 
221 	count_vm_events(SWPOUT_ZERO, nr_pages);
222 	if (objcg) {
223 		count_objcg_events(objcg, SWPOUT_ZERO, nr_pages);
224 		obj_cgroup_put(objcg);
225 	}
226 }
227 
swap_zeromap_folio_clear(struct folio * folio)228 static void swap_zeromap_folio_clear(struct folio *folio)
229 {
230 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
231 	swp_entry_t entry;
232 	unsigned int i;
233 
234 	for (i = 0; i < folio_nr_pages(folio); i++) {
235 		entry = page_swap_entry(folio_page(folio, i));
236 		clear_bit(swp_offset(entry), sis->zeromap);
237 	}
238 }
239 
240 /*
241  * We may have stale swap cache pages in memory: notice
242  * them here and get rid of the unnecessary final write.
243  */
swap_writepage(struct page * page,struct writeback_control * wbc)244 int swap_writepage(struct page *page, struct writeback_control *wbc)
245 {
246 	struct folio *folio = page_folio(page);
247 	int ret;
248 
249 	if (folio_free_swap(folio)) {
250 		folio_unlock(folio);
251 		return 0;
252 	}
253 	/*
254 	 * Arch code may have to preserve more data than just the page
255 	 * contents, e.g. memory tags.
256 	 */
257 	ret = arch_prepare_to_swap(folio);
258 	if (ret) {
259 		folio_mark_dirty(folio);
260 		folio_unlock(folio);
261 		return ret;
262 	}
263 
264 	/*
265 	 * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages.
266 	 * The bits in zeromap are protected by the locked swapcache folio
267 	 * and atomic updates are used to protect against read-modify-write
268 	 * corruption due to other zero swap entries seeing concurrent updates.
269 	 */
270 	if (is_folio_zero_filled(folio)) {
271 		swap_zeromap_folio_set(folio);
272 		folio_unlock(folio);
273 		return 0;
274 	} else {
275 		/*
276 		 * Clear bits this folio occupies in the zeromap to prevent
277 		 * zero data being read in from any previous zero writes that
278 		 * occupied the same swap entries.
279 		 */
280 		swap_zeromap_folio_clear(folio);
281 	}
282 	if (zswap_store(folio)) {
283 		folio_unlock(folio);
284 		return 0;
285 	}
286 	if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) {
287 		folio_mark_dirty(folio);
288 		return AOP_WRITEPAGE_ACTIVATE;
289 	}
290 
291 	__swap_writepage(folio, wbc);
292 	return 0;
293 }
294 
count_swpout_vm_event(struct folio * folio)295 static inline void count_swpout_vm_event(struct folio *folio)
296 {
297 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
298 	if (unlikely(folio_test_pmd_mappable(folio))) {
299 		count_memcg_folio_events(folio, THP_SWPOUT, 1);
300 		count_vm_event(THP_SWPOUT);
301 	}
302 	count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
303 #endif
304 	count_vm_events(PSWPOUT, folio_nr_pages(folio));
305 }
306 
307 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
bio_associate_blkg_from_page(struct bio * bio,struct folio * folio)308 static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
309 {
310 	struct cgroup_subsys_state *css;
311 	struct mem_cgroup *memcg;
312 
313 	memcg = folio_memcg(folio);
314 	if (!memcg)
315 		return;
316 
317 	rcu_read_lock();
318 	css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
319 	bio_associate_blkg_from_css(bio, css);
320 	rcu_read_unlock();
321 }
322 #else
323 #define bio_associate_blkg_from_page(bio, folio)		do { } while (0)
324 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
325 
326 struct swap_iocb {
327 	struct kiocb		iocb;
328 	struct bio_vec		bvec[SWAP_CLUSTER_MAX];
329 	int			pages;
330 	int			len;
331 };
332 static mempool_t *sio_pool;
333 
sio_pool_init(void)334 int sio_pool_init(void)
335 {
336 	if (!sio_pool) {
337 		mempool_t *pool = mempool_create_kmalloc_pool(
338 			SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
339 		if (cmpxchg(&sio_pool, NULL, pool))
340 			mempool_destroy(pool);
341 	}
342 	if (!sio_pool)
343 		return -ENOMEM;
344 	return 0;
345 }
346 
sio_write_complete(struct kiocb * iocb,long ret)347 static void sio_write_complete(struct kiocb *iocb, long ret)
348 {
349 	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
350 	struct page *page = sio->bvec[0].bv_page;
351 	int p;
352 
353 	if (ret != sio->len) {
354 		/*
355 		 * In the case of swap-over-nfs, this can be a
356 		 * temporary failure if the system has limited
357 		 * memory for allocating transmit buffers.
358 		 * Mark the page dirty and avoid
359 		 * folio_rotate_reclaimable but rate-limit the
360 		 * messages.
361 		 */
362 		pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
363 				   ret, swap_dev_pos(page_swap_entry(page)));
364 		for (p = 0; p < sio->pages; p++) {
365 			page = sio->bvec[p].bv_page;
366 			set_page_dirty(page);
367 			ClearPageReclaim(page);
368 		}
369 	}
370 
371 	for (p = 0; p < sio->pages; p++)
372 		end_page_writeback(sio->bvec[p].bv_page);
373 
374 	mempool_free(sio, sio_pool);
375 }
376 
swap_writepage_fs(struct folio * folio,struct writeback_control * wbc)377 static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc)
378 {
379 	struct swap_iocb *sio = NULL;
380 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
381 	struct file *swap_file = sis->swap_file;
382 	loff_t pos = swap_dev_pos(folio->swap);
383 
384 	count_swpout_vm_event(folio);
385 	folio_start_writeback(folio);
386 	folio_unlock(folio);
387 	if (wbc->swap_plug)
388 		sio = *wbc->swap_plug;
389 	if (sio) {
390 		if (sio->iocb.ki_filp != swap_file ||
391 		    sio->iocb.ki_pos + sio->len != pos) {
392 			swap_write_unplug(sio);
393 			sio = NULL;
394 		}
395 	}
396 	if (!sio) {
397 		sio = mempool_alloc(sio_pool, GFP_NOIO);
398 		init_sync_kiocb(&sio->iocb, swap_file);
399 		sio->iocb.ki_complete = sio_write_complete;
400 		sio->iocb.ki_pos = pos;
401 		sio->pages = 0;
402 		sio->len = 0;
403 	}
404 	bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
405 	sio->len += folio_size(folio);
406 	sio->pages += 1;
407 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
408 		swap_write_unplug(sio);
409 		sio = NULL;
410 	}
411 	if (wbc->swap_plug)
412 		*wbc->swap_plug = sio;
413 }
414 
swap_writepage_bdev_sync(struct folio * folio,struct writeback_control * wbc,struct swap_info_struct * sis)415 static void swap_writepage_bdev_sync(struct folio *folio,
416 		struct writeback_control *wbc, struct swap_info_struct *sis)
417 {
418 	struct bio_vec bv;
419 	struct bio bio;
420 
421 	bio_init(&bio, sis->bdev, &bv, 1,
422 		 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
423 	bio.bi_iter.bi_sector = swap_folio_sector(folio);
424 	bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
425 
426 	bio_associate_blkg_from_page(&bio, folio);
427 	count_swpout_vm_event(folio);
428 
429 	folio_start_writeback(folio);
430 	folio_unlock(folio);
431 
432 	submit_bio_wait(&bio);
433 	__end_swap_bio_write(&bio);
434 }
435 
swap_writepage_bdev_async(struct folio * folio,struct writeback_control * wbc,struct swap_info_struct * sis)436 static void swap_writepage_bdev_async(struct folio *folio,
437 		struct writeback_control *wbc, struct swap_info_struct *sis)
438 {
439 	struct bio *bio;
440 
441 	bio = bio_alloc(sis->bdev, 1,
442 			REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
443 			GFP_NOIO);
444 	bio->bi_iter.bi_sector = swap_folio_sector(folio);
445 	bio->bi_end_io = end_swap_bio_write;
446 	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
447 
448 	bio_associate_blkg_from_page(bio, folio);
449 	count_swpout_vm_event(folio);
450 	folio_start_writeback(folio);
451 	folio_unlock(folio);
452 	submit_bio(bio);
453 }
454 
__swap_writepage(struct folio * folio,struct writeback_control * wbc)455 void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
456 {
457 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
458 	unsigned long sis_flags = 0;
459 
460 	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
461 	/*
462 	 * ->flags can be updated non-atomicially (scan_swap_map_slots),
463 	 * but that will never affect SWP_FS_OPS, so the data_race
464 	 * is safe.
465 	 */
466 	sis_flags = data_race(sis->flags);
467 	trace_android_vh_swap_writepage(&sis_flags, &folio->page);
468 	if (sis_flags & SWP_FS_OPS)
469 		swap_writepage_fs(folio, wbc);
470 	/*
471 	 * ->flags can be updated non-atomicially (scan_swap_map_slots),
472 	 * but that will never affect __SWP_WRITE_SYNCHRONOUS_IO, so the data_race
473 	 * is safe.
474 	 */
475 	else if (sis_flags & __SWP_WRITE_SYNCHRONOUS_IO)
476 		swap_writepage_bdev_sync(folio, wbc, sis);
477 	else
478 		swap_writepage_bdev_async(folio, wbc, sis);
479 }
480 
swap_write_unplug(struct swap_iocb * sio)481 void swap_write_unplug(struct swap_iocb *sio)
482 {
483 	struct iov_iter from;
484 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
485 	int ret;
486 
487 	iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
488 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
489 	if (ret != -EIOCBQUEUED)
490 		sio_write_complete(&sio->iocb, ret);
491 }
492 
sio_read_complete(struct kiocb * iocb,long ret)493 static void sio_read_complete(struct kiocb *iocb, long ret)
494 {
495 	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
496 	int p;
497 
498 	if (ret == sio->len) {
499 		for (p = 0; p < sio->pages; p++) {
500 			struct folio *folio = page_folio(sio->bvec[p].bv_page);
501 
502 			folio_mark_uptodate(folio);
503 			folio_unlock(folio);
504 		}
505 		count_vm_events(PSWPIN, sio->pages);
506 	} else {
507 		for (p = 0; p < sio->pages; p++) {
508 			struct folio *folio = page_folio(sio->bvec[p].bv_page);
509 
510 			folio_unlock(folio);
511 		}
512 		pr_alert_ratelimited("Read-error on swap-device\n");
513 	}
514 	mempool_free(sio, sio_pool);
515 }
516 
swap_read_folio_zeromap(struct folio * folio)517 static bool swap_read_folio_zeromap(struct folio *folio)
518 {
519 	int nr_pages = folio_nr_pages(folio);
520 	struct obj_cgroup *objcg;
521 	bool is_zeromap;
522 
523 	/*
524 	 * Swapping in a large folio that is partially in the zeromap is not
525 	 * currently handled. Return true without marking the folio uptodate so
526 	 * that an IO error is emitted (e.g. do_swap_page() will sigbus).
527 	 */
528 	if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages,
529 			&is_zeromap) != nr_pages))
530 		return true;
531 
532 	if (!is_zeromap)
533 		return false;
534 
535 	objcg = get_obj_cgroup_from_folio(folio);
536 	count_vm_events(SWPIN_ZERO, nr_pages);
537 	if (objcg) {
538 		count_objcg_events(objcg, SWPIN_ZERO, nr_pages);
539 		obj_cgroup_put(objcg);
540 	}
541 
542 	folio_zero_range(folio, 0, folio_size(folio));
543 	folio_mark_uptodate(folio);
544 	return true;
545 }
546 
swap_read_folio_fs(struct folio * folio,struct swap_iocb ** plug)547 static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
548 {
549 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
550 	struct swap_iocb *sio = NULL;
551 	loff_t pos = swap_dev_pos(folio->swap);
552 
553 	if (plug)
554 		sio = *plug;
555 	if (sio) {
556 		if (sio->iocb.ki_filp != sis->swap_file ||
557 		    sio->iocb.ki_pos + sio->len != pos) {
558 			swap_read_unplug(sio);
559 			sio = NULL;
560 		}
561 	}
562 	if (!sio) {
563 		sio = mempool_alloc(sio_pool, GFP_KERNEL);
564 		init_sync_kiocb(&sio->iocb, sis->swap_file);
565 		sio->iocb.ki_pos = pos;
566 		sio->iocb.ki_complete = sio_read_complete;
567 		sio->pages = 0;
568 		sio->len = 0;
569 	}
570 	bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
571 	sio->len += folio_size(folio);
572 	sio->pages += 1;
573 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
574 		swap_read_unplug(sio);
575 		sio = NULL;
576 	}
577 	if (plug)
578 		*plug = sio;
579 }
580 
swap_read_folio_bdev_sync(struct folio * folio,struct swap_info_struct * sis)581 static void swap_read_folio_bdev_sync(struct folio *folio,
582 		struct swap_info_struct *sis)
583 {
584 	struct bio_vec bv;
585 	struct bio bio;
586 	bool read = false;
587 
588 	trace_android_rvh_swap_read_folio_bdev_sync(sis->bdev,
589 		swap_folio_sector(folio) + get_start_sect(sis->bdev),
590 		&folio->page, &read);
591 	if (read) {
592 		count_vm_events(PSWPIN, folio_nr_pages(folio));
593 		return;
594 	}
595 
596 	bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
597 	bio.bi_iter.bi_sector = swap_folio_sector(folio);
598 	bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
599 	/*
600 	 * Keep this task valid during swap readpage because the oom killer may
601 	 * attempt to access it in the page fault retry time check.
602 	 */
603 	get_task_struct(current);
604 	count_vm_events(PSWPIN, folio_nr_pages(folio));
605 	submit_bio_wait(&bio);
606 	trace_android_vh_swap_bio_charge(&bio);
607 	__end_swap_bio_read(&bio);
608 	put_task_struct(current);
609 }
610 
swap_read_folio_bdev_async(struct folio * folio,struct swap_info_struct * sis)611 static void swap_read_folio_bdev_async(struct folio *folio,
612 		struct swap_info_struct *sis)
613 {
614 	struct bio *bio;
615 
616 	bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
617 	bio->bi_iter.bi_sector = swap_folio_sector(folio);
618 	bio->bi_end_io = end_swap_bio_read;
619 	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
620 	count_vm_events(PSWPIN, folio_nr_pages(folio));
621 	submit_bio(bio);
622 }
623 
swap_read_folio(struct folio * folio,struct swap_iocb ** plug)624 void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
625 {
626 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
627 	bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO;
628 	bool workingset = folio_test_workingset(folio);
629 	unsigned long pflags;
630 	bool in_thrashing;
631 
632 	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio);
633 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
634 	VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
635 
636 	/*
637 	 * Count submission time as memory stall and delay. When the device
638 	 * is congested, or the submitting cgroup IO-throttled, submission
639 	 * can be a significant part of overall IO time.
640 	 */
641 	if (workingset) {
642 		delayacct_thrashing_start(&in_thrashing);
643 		psi_memstall_enter(&pflags);
644 	}
645 	delayacct_swapin_start();
646 
647 	if (swap_read_folio_zeromap(folio)) {
648 		folio_unlock(folio);
649 		goto finish;
650 	} else if (zswap_load(folio)) {
651 		folio_unlock(folio);
652 		goto finish;
653 	}
654 
655 	/* We have to read from slower devices. Increase zswap protection. */
656 	zswap_folio_swapin(folio);
657 
658 	if (data_race(sis->flags & SWP_FS_OPS)) {
659 		swap_read_folio_fs(folio, plug);
660 	} else if (synchronous) {
661 		swap_read_folio_bdev_sync(folio, sis);
662 	} else {
663 		swap_read_folio_bdev_async(folio, sis);
664 	}
665 
666 finish:
667 	if (workingset) {
668 		delayacct_thrashing_end(&in_thrashing);
669 		psi_memstall_leave(&pflags);
670 	}
671 	delayacct_swapin_end();
672 }
673 
__swap_read_unplug(struct swap_iocb * sio)674 void __swap_read_unplug(struct swap_iocb *sio)
675 {
676 	struct iov_iter from;
677 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
678 	int ret;
679 
680 	iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
681 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
682 	if (ret != -EIOCBQUEUED)
683 		sio_read_complete(&sio->iocb, ret);
684 }
685