• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/ext4/readpage.c
4  *
5  * Copyright (C) 2002, Linus Torvalds.
6  * Copyright (C) 2015, Google, Inc.
7  *
8  * This was originally taken from fs/mpage.c
9  *
10  * The ext4_mpage_readpages() function here is intended to
11  * replace mpage_readahead() in the general case, not just for
12  * encrypted files.  It has some limitations (see below), where it
13  * will fall back to read_block_full_page(), but these limitations
14  * should only be hit when page_size != block_size.
15  *
16  * This will allow us to attach a callback function to support ext4
17  * encryption.
18  *
19  * If anything unusual happens, such as:
20  *
21  * - encountering a page which has buffers
22  * - encountering a page which has a non-hole after a hole
23  * - encountering a page with non-contiguous blocks
24  *
25  * then this code just gives up and calls the buffer_head-based read function.
26  * It does handle a page which has holes at the end - that is a common case:
27  * the end-of-file on blocksize < PAGE_SIZE setups.
28  *
29  */
30 
31 #include <linux/kernel.h>
32 #include <linux/export.h>
33 #include <linux/mm.h>
34 #include <linux/kdev_t.h>
35 #include <linux/gfp.h>
36 #include <linux/bio.h>
37 #include <linux/fs.h>
38 #include <linux/buffer_head.h>
39 #include <linux/blkdev.h>
40 #include <linux/highmem.h>
41 #include <linux/prefetch.h>
42 #include <linux/mpage.h>
43 #include <linux/writeback.h>
44 #include <linux/backing-dev.h>
45 #include <linux/pagevec.h>
46 #include <linux/cleancache.h>
47 
48 #include "ext4.h"
49 
50 #define NUM_PREALLOC_POST_READ_CTXS	128
51 
52 static struct kmem_cache *bio_post_read_ctx_cache;
53 static mempool_t *bio_post_read_ctx_pool;
54 
55 /* postprocessing steps for read bios */
56 enum bio_post_read_step {
57 	STEP_INITIAL = 0,
58 	STEP_DECRYPT,
59 	STEP_VERITY,
60 	STEP_MAX,
61 };
62 
63 struct bio_post_read_ctx {
64 	struct bio *bio;
65 	struct work_struct work;
66 	unsigned int cur_step;
67 	unsigned int enabled_steps;
68 };
69 
__read_end_io(struct bio * bio)70 static void __read_end_io(struct bio *bio)
71 {
72 	struct page *page;
73 	struct bio_vec *bv;
74 	struct bvec_iter_all iter_all;
75 
76 	bio_for_each_segment_all(bv, bio, iter_all) {
77 		page = bv->bv_page;
78 
79 		if (bio->bi_status)
80 			ClearPageUptodate(page);
81 		else
82 			SetPageUptodate(page);
83 		unlock_page(page);
84 	}
85 	if (bio->bi_private)
86 		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
87 	bio_put(bio);
88 }
89 
90 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
91 
decrypt_work(struct work_struct * work)92 static void decrypt_work(struct work_struct *work)
93 {
94 	struct bio_post_read_ctx *ctx =
95 		container_of(work, struct bio_post_read_ctx, work);
96 	struct bio *bio = ctx->bio;
97 
98 	if (fscrypt_decrypt_bio(bio))
99 		bio_post_read_processing(ctx);
100 	else
101 		__read_end_io(bio);
102 }
103 
verity_work(struct work_struct * work)104 static void verity_work(struct work_struct *work)
105 {
106 	struct bio_post_read_ctx *ctx =
107 		container_of(work, struct bio_post_read_ctx, work);
108 	struct bio *bio = ctx->bio;
109 
110 	/*
111 	 * fsverity_verify_bio() may call readahead() again, and although verity
112 	 * will be disabled for that, decryption may still be needed, causing
113 	 * another bio_post_read_ctx to be allocated.  So to guarantee that
114 	 * mempool_alloc() never deadlocks we must free the current ctx first.
115 	 * This is safe because verity is the last post-read step.
116 	 */
117 	BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
118 	mempool_free(ctx, bio_post_read_ctx_pool);
119 	bio->bi_private = NULL;
120 
121 	fsverity_verify_bio(bio);
122 
123 	__read_end_io(bio);
124 }
125 
bio_post_read_processing(struct bio_post_read_ctx * ctx)126 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
127 {
128 	/*
129 	 * We use different work queues for decryption and for verity because
130 	 * verity may require reading metadata pages that need decryption, and
131 	 * we shouldn't recurse to the same workqueue.
132 	 */
133 	switch (++ctx->cur_step) {
134 	case STEP_DECRYPT:
135 		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
136 			INIT_WORK(&ctx->work, decrypt_work);
137 			fscrypt_enqueue_decrypt_work(&ctx->work);
138 			return;
139 		}
140 		ctx->cur_step++;
141 		fallthrough;
142 	case STEP_VERITY:
143 		if (ctx->enabled_steps & (1 << STEP_VERITY)) {
144 			INIT_WORK(&ctx->work, verity_work);
145 			fsverity_enqueue_verify_work(&ctx->work);
146 			return;
147 		}
148 		ctx->cur_step++;
149 		fallthrough;
150 	default:
151 		__read_end_io(ctx->bio);
152 	}
153 }
154 
bio_post_read_required(struct bio * bio)155 static bool bio_post_read_required(struct bio *bio)
156 {
157 	return bio->bi_private && !bio->bi_status;
158 }
159 
160 /*
161  * I/O completion handler for multipage BIOs.
162  *
163  * The mpage code never puts partial pages into a BIO (except for end-of-file).
164  * If a page does not map to a contiguous run of blocks then it simply falls
165  * back to block_read_full_folio().
166  *
167  * Why is this?  If a page's completion depends on a number of different BIOs
168  * which can complete in any order (or at the same time) then determining the
169  * status of that page is hard.  See end_buffer_async_read() for the details.
170  * There is no point in duplicating all that complexity.
171  */
mpage_end_io(struct bio * bio)172 static void mpage_end_io(struct bio *bio)
173 {
174 	if (bio_post_read_required(bio)) {
175 		struct bio_post_read_ctx *ctx = bio->bi_private;
176 
177 		ctx->cur_step = STEP_INITIAL;
178 		bio_post_read_processing(ctx);
179 		return;
180 	}
181 	__read_end_io(bio);
182 }
183 
ext4_need_verity(const struct inode * inode,pgoff_t idx)184 static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
185 {
186 	return fsverity_active(inode) &&
187 	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
188 }
189 
ext4_set_bio_post_read_ctx(struct bio * bio,const struct inode * inode,pgoff_t first_idx)190 static void ext4_set_bio_post_read_ctx(struct bio *bio,
191 				       const struct inode *inode,
192 				       pgoff_t first_idx)
193 {
194 	unsigned int post_read_steps = 0;
195 
196 	if (fscrypt_inode_uses_fs_layer_crypto(inode))
197 		post_read_steps |= 1 << STEP_DECRYPT;
198 
199 	if (ext4_need_verity(inode, first_idx))
200 		post_read_steps |= 1 << STEP_VERITY;
201 
202 	if (post_read_steps) {
203 		/* Due to the mempool, this never fails. */
204 		struct bio_post_read_ctx *ctx =
205 			mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
206 
207 		ctx->bio = bio;
208 		ctx->enabled_steps = post_read_steps;
209 		bio->bi_private = ctx;
210 	}
211 }
212 
ext4_readpage_limit(struct inode * inode)213 static inline loff_t ext4_readpage_limit(struct inode *inode)
214 {
215 	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
216 		return inode->i_sb->s_maxbytes;
217 
218 	return i_size_read(inode);
219 }
220 
ext4_mpage_readpages(struct inode * inode,struct readahead_control * rac,struct page * page)221 int ext4_mpage_readpages(struct inode *inode,
222 		struct readahead_control *rac, struct page *page)
223 {
224 	struct bio *bio = NULL;
225 	sector_t last_block_in_bio = 0;
226 
227 	const unsigned blkbits = inode->i_blkbits;
228 	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
229 	const unsigned blocksize = 1 << blkbits;
230 	sector_t next_block;
231 	sector_t block_in_file;
232 	sector_t last_block;
233 	sector_t last_block_in_file;
234 	sector_t blocks[MAX_BUF_PER_PAGE];
235 	unsigned page_block;
236 	struct block_device *bdev = inode->i_sb->s_bdev;
237 	int length;
238 	unsigned relative_block = 0;
239 	struct ext4_map_blocks map;
240 	unsigned int nr_pages = rac ? readahead_count(rac) : 1;
241 
242 	map.m_pblk = 0;
243 	map.m_lblk = 0;
244 	map.m_len = 0;
245 	map.m_flags = 0;
246 
247 	for (; nr_pages; nr_pages--) {
248 		int fully_mapped = 1;
249 		unsigned first_hole = blocks_per_page;
250 
251 		if (rac) {
252 			page = readahead_page(rac);
253 			prefetchw(&page->flags);
254 		}
255 
256 		if (page_has_buffers(page))
257 			goto confused;
258 
259 		block_in_file = next_block =
260 			(sector_t)page->index << (PAGE_SHIFT - blkbits);
261 		last_block = block_in_file + nr_pages * blocks_per_page;
262 		last_block_in_file = (ext4_readpage_limit(inode) +
263 				      blocksize - 1) >> blkbits;
264 		if (last_block > last_block_in_file)
265 			last_block = last_block_in_file;
266 		page_block = 0;
267 
268 		/*
269 		 * Map blocks using the previous result first.
270 		 */
271 		if ((map.m_flags & EXT4_MAP_MAPPED) &&
272 		    block_in_file > map.m_lblk &&
273 		    block_in_file < (map.m_lblk + map.m_len)) {
274 			unsigned map_offset = block_in_file - map.m_lblk;
275 			unsigned last = map.m_len - map_offset;
276 
277 			for (relative_block = 0; ; relative_block++) {
278 				if (relative_block == last) {
279 					/* needed? */
280 					map.m_flags &= ~EXT4_MAP_MAPPED;
281 					break;
282 				}
283 				if (page_block == blocks_per_page)
284 					break;
285 				blocks[page_block] = map.m_pblk + map_offset +
286 					relative_block;
287 				page_block++;
288 				block_in_file++;
289 			}
290 		}
291 
292 		/*
293 		 * Then do more ext4_map_blocks() calls until we are
294 		 * done with this page.
295 		 */
296 		while (page_block < blocks_per_page) {
297 			if (block_in_file < last_block) {
298 				map.m_lblk = block_in_file;
299 				map.m_len = last_block - block_in_file;
300 
301 				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
302 				set_error_page:
303 					SetPageError(page);
304 					zero_user_segment(page, 0,
305 							  PAGE_SIZE);
306 					unlock_page(page);
307 					goto next_page;
308 				}
309 			}
310 			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
311 				fully_mapped = 0;
312 				if (first_hole == blocks_per_page)
313 					first_hole = page_block;
314 				page_block++;
315 				block_in_file++;
316 				continue;
317 			}
318 			if (first_hole != blocks_per_page)
319 				goto confused;		/* hole -> non-hole */
320 
321 			/* Contiguous blocks? */
322 			if (page_block && blocks[page_block-1] != map.m_pblk-1)
323 				goto confused;
324 			for (relative_block = 0; ; relative_block++) {
325 				if (relative_block == map.m_len) {
326 					/* needed? */
327 					map.m_flags &= ~EXT4_MAP_MAPPED;
328 					break;
329 				} else if (page_block == blocks_per_page)
330 					break;
331 				blocks[page_block] = map.m_pblk+relative_block;
332 				page_block++;
333 				block_in_file++;
334 			}
335 		}
336 		if (first_hole != blocks_per_page) {
337 			zero_user_segment(page, first_hole << blkbits,
338 					  PAGE_SIZE);
339 			if (first_hole == 0) {
340 				if (ext4_need_verity(inode, page->index) &&
341 				    !fsverity_verify_page(page))
342 					goto set_error_page;
343 				SetPageUptodate(page);
344 				unlock_page(page);
345 				goto next_page;
346 			}
347 		} else if (fully_mapped) {
348 			SetPageMappedToDisk(page);
349 		}
350 		if (fully_mapped && blocks_per_page == 1 &&
351 		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
352 			SetPageUptodate(page);
353 			goto confused;
354 		}
355 
356 		/*
357 		 * This page will go to BIO.  Do we need to send this
358 		 * BIO off first?
359 		 */
360 		if (bio && (last_block_in_bio != blocks[0] - 1 ||
361 			    !fscrypt_mergeable_bio(bio, inode, next_block))) {
362 		submit_and_realloc:
363 			submit_bio(bio);
364 			bio = NULL;
365 		}
366 		if (bio == NULL) {
367 			/*
368 			 * bio_alloc will _always_ be able to allocate a bio if
369 			 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
370 			 */
371 			bio = bio_alloc(bdev, bio_max_segs(nr_pages),
372 					REQ_OP_READ, GFP_KERNEL);
373 			fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
374 						  GFP_KERNEL);
375 			ext4_set_bio_post_read_ctx(bio, inode, page->index);
376 			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
377 			bio->bi_end_io = mpage_end_io;
378 			if (rac)
379 				bio->bi_opf |= REQ_RAHEAD;
380 		}
381 
382 		length = first_hole << blkbits;
383 		if (bio_add_page(bio, page, length, 0) < length)
384 			goto submit_and_realloc;
385 
386 		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
387 		     (relative_block == map.m_len)) ||
388 		    (first_hole != blocks_per_page)) {
389 			submit_bio(bio);
390 			bio = NULL;
391 		} else
392 			last_block_in_bio = blocks[blocks_per_page - 1];
393 		goto next_page;
394 	confused:
395 		if (bio) {
396 			submit_bio(bio);
397 			bio = NULL;
398 		}
399 		if (!PageUptodate(page))
400 			block_read_full_folio(page_folio(page), ext4_get_block);
401 		else
402 			unlock_page(page);
403 	next_page:
404 		if (rac)
405 			put_page(page);
406 	}
407 	if (bio)
408 		submit_bio(bio);
409 	return 0;
410 }
411 
ext4_init_post_read_processing(void)412 int __init ext4_init_post_read_processing(void)
413 {
414 	bio_post_read_ctx_cache =
415 		kmem_cache_create("ext4_bio_post_read_ctx",
416 				  sizeof(struct bio_post_read_ctx), 0, 0, NULL);
417 	if (!bio_post_read_ctx_cache)
418 		goto fail;
419 	bio_post_read_ctx_pool =
420 		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
421 					 bio_post_read_ctx_cache);
422 	if (!bio_post_read_ctx_pool)
423 		goto fail_free_cache;
424 	return 0;
425 
426 fail_free_cache:
427 	kmem_cache_destroy(bio_post_read_ctx_cache);
428 fail:
429 	return -ENOMEM;
430 }
431 
ext4_exit_post_read_processing(void)432 void ext4_exit_post_read_processing(void)
433 {
434 	mempool_destroy(bio_post_read_ctx_pool);
435 	kmem_cache_destroy(bio_post_read_ctx_cache);
436 }
437