1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/readpage.c
4 *
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2015, Google, Inc.
7 *
8 * This was originally taken from fs/mpage.c
9 *
10 * The intent is the ext4_mpage_readpages() function here is intended
11 * to replace mpage_readpages() in the general case, not just for
12 * encrypted files. It has some limitations (see below), where it
13 * will fall back to read_block_full_page(), but these limitations
14 * should only be hit when page_size != block_size.
15 *
16 * This will allow us to attach a callback function to support ext4
17 * encryption.
18 *
19 * If anything unusual happens, such as:
20 *
21 * - encountering a page which has buffers
22 * - encountering a page which has a non-hole after a hole
23 * - encountering a page with non-contiguous blocks
24 *
25 * then this code just gives up and calls the buffer_head-based read function.
26 * It does handle a page which has holes at the end - that is a common case:
27 * the end-of-file on blocksize < PAGE_SIZE setups.
28 *
29 */
30
31 #include <linux/kernel.h>
32 #include <linux/export.h>
33 #include <linux/mm.h>
34 #include <linux/kdev_t.h>
35 #include <linux/gfp.h>
36 #include <linux/bio.h>
37 #include <linux/fs.h>
38 #include <linux/buffer_head.h>
39 #include <linux/blkdev.h>
40 #include <linux/highmem.h>
41 #include <linux/prefetch.h>
42 #include <linux/mpage.h>
43 #include <linux/writeback.h>
44 #include <linux/backing-dev.h>
45 #include <linux/pagevec.h>
46 #include <linux/cleancache.h>
47
48 #include "ext4.h"
49 #include <trace/events/android_fs.h>
50
51 #define NUM_PREALLOC_POST_READ_CTXS 128
52
53 static struct kmem_cache *bio_post_read_ctx_cache;
54 static mempool_t *bio_post_read_ctx_pool;
55
56 /* postprocessing steps for read bios */
57 enum bio_post_read_step {
58 STEP_INITIAL = 0,
59 STEP_DECRYPT,
60 STEP_VERITY,
61 STEP_MAX,
62 };
63
64 struct bio_post_read_ctx {
65 struct bio *bio;
66 struct work_struct work;
67 unsigned int cur_step;
68 unsigned int enabled_steps;
69 };
70
__read_end_io(struct bio * bio)71 static void __read_end_io(struct bio *bio)
72 {
73 struct page *page;
74 struct bio_vec *bv;
75 struct bvec_iter_all iter_all;
76
77 bio_for_each_segment_all(bv, bio, iter_all) {
78 page = bv->bv_page;
79
80 /* PG_error was set if any post_read step failed */
81 if (bio->bi_status || PageError(page)) {
82 ClearPageUptodate(page);
83 /* will re-read again later */
84 ClearPageError(page);
85 } else {
86 SetPageUptodate(page);
87 }
88 unlock_page(page);
89 }
90 if (bio->bi_private)
91 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
92 bio_put(bio);
93 }
94
95 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
96
decrypt_work(struct work_struct * work)97 static void decrypt_work(struct work_struct *work)
98 {
99 struct bio_post_read_ctx *ctx =
100 container_of(work, struct bio_post_read_ctx, work);
101
102 fscrypt_decrypt_bio(ctx->bio);
103
104 bio_post_read_processing(ctx);
105 }
106
verity_work(struct work_struct * work)107 static void verity_work(struct work_struct *work)
108 {
109 struct bio_post_read_ctx *ctx =
110 container_of(work, struct bio_post_read_ctx, work);
111 struct bio *bio = ctx->bio;
112
113 /*
114 * fsverity_verify_bio() may call readpages() again, and although verity
115 * will be disabled for that, decryption may still be needed, causing
116 * another bio_post_read_ctx to be allocated. So to guarantee that
117 * mempool_alloc() never deadlocks we must free the current ctx first.
118 * This is safe because verity is the last post-read step.
119 */
120 BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
121 mempool_free(ctx, bio_post_read_ctx_pool);
122 bio->bi_private = NULL;
123
124 fsverity_verify_bio(bio);
125
126 __read_end_io(bio);
127 }
128
bio_post_read_processing(struct bio_post_read_ctx * ctx)129 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
130 {
131 /*
132 * We use different work queues for decryption and for verity because
133 * verity may require reading metadata pages that need decryption, and
134 * we shouldn't recurse to the same workqueue.
135 */
136 switch (++ctx->cur_step) {
137 case STEP_DECRYPT:
138 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
139 INIT_WORK(&ctx->work, decrypt_work);
140 fscrypt_enqueue_decrypt_work(&ctx->work);
141 return;
142 }
143 ctx->cur_step++;
144 /* fall-through */
145 case STEP_VERITY:
146 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
147 INIT_WORK(&ctx->work, verity_work);
148 fsverity_enqueue_verify_work(&ctx->work);
149 return;
150 }
151 ctx->cur_step++;
152 /* fall-through */
153 default:
154 __read_end_io(ctx->bio);
155 }
156 }
157
bio_post_read_required(struct bio * bio)158 static bool bio_post_read_required(struct bio *bio)
159 {
160 return bio->bi_private && !bio->bi_status;
161 }
162
163 static void
ext4_trace_read_completion(struct bio * bio)164 ext4_trace_read_completion(struct bio *bio)
165 {
166 struct page *first_page = bio->bi_io_vec[0].bv_page;
167
168 if (first_page != NULL)
169 trace_android_fs_dataread_end(first_page->mapping->host,
170 page_offset(first_page),
171 bio->bi_iter.bi_size);
172 }
173
174 /*
175 * I/O completion handler for multipage BIOs.
176 *
177 * The mpage code never puts partial pages into a BIO (except for end-of-file).
178 * If a page does not map to a contiguous run of blocks then it simply falls
179 * back to block_read_full_page().
180 *
181 * Why is this? If a page's completion depends on a number of different BIOs
182 * which can complete in any order (or at the same time) then determining the
183 * status of that page is hard. See end_buffer_async_read() for the details.
184 * There is no point in duplicating all that complexity.
185 */
mpage_end_io(struct bio * bio)186 static void mpage_end_io(struct bio *bio)
187 {
188 if (trace_android_fs_dataread_start_enabled())
189 ext4_trace_read_completion(bio);
190
191 if (bio_post_read_required(bio)) {
192 struct bio_post_read_ctx *ctx = bio->bi_private;
193
194 ctx->cur_step = STEP_INITIAL;
195 bio_post_read_processing(ctx);
196 return;
197 }
198 __read_end_io(bio);
199 }
200
ext4_need_verity(const struct inode * inode,pgoff_t idx)201 static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
202 {
203 return fsverity_active(inode) &&
204 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
205 }
206
get_bio_post_read_ctx(struct inode * inode,struct bio * bio,pgoff_t first_idx)207 static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode,
208 struct bio *bio,
209 pgoff_t first_idx)
210 {
211 unsigned int post_read_steps = 0;
212 struct bio_post_read_ctx *ctx = NULL;
213
214 if (fscrypt_inode_uses_fs_layer_crypto(inode))
215 post_read_steps |= 1 << STEP_DECRYPT;
216
217 if (ext4_need_verity(inode, first_idx))
218 post_read_steps |= 1 << STEP_VERITY;
219
220 if (post_read_steps) {
221 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
222 if (!ctx)
223 return ERR_PTR(-ENOMEM);
224 ctx->bio = bio;
225 ctx->enabled_steps = post_read_steps;
226 bio->bi_private = ctx;
227 }
228 return ctx;
229 }
230
ext4_readpage_limit(struct inode * inode)231 static inline loff_t ext4_readpage_limit(struct inode *inode)
232 {
233 if (IS_ENABLED(CONFIG_FS_VERITY) &&
234 (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
235 return inode->i_sb->s_maxbytes;
236
237 return i_size_read(inode);
238 }
239
240 static void
ext4_submit_bio_read(struct bio * bio)241 ext4_submit_bio_read(struct bio *bio)
242 {
243 if (trace_android_fs_dataread_start_enabled()) {
244 struct page *first_page = bio->bi_io_vec[0].bv_page;
245
246 if (first_page != NULL) {
247 char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
248
249 path = android_fstrace_get_pathname(pathbuf,
250 MAX_TRACE_PATHBUF_LEN,
251 first_page->mapping->host);
252 trace_android_fs_dataread_start(
253 first_page->mapping->host,
254 page_offset(first_page),
255 bio->bi_iter.bi_size,
256 current->pid,
257 path,
258 current->comm);
259 }
260 }
261 submit_bio(bio);
262 }
263
ext4_mpage_readpages(struct address_space * mapping,struct list_head * pages,struct page * page,unsigned nr_pages,bool is_readahead)264 int ext4_mpage_readpages(struct address_space *mapping,
265 struct list_head *pages, struct page *page,
266 unsigned nr_pages, bool is_readahead)
267 {
268 struct bio *bio = NULL;
269 sector_t last_block_in_bio = 0;
270
271 struct inode *inode = mapping->host;
272 const unsigned blkbits = inode->i_blkbits;
273 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
274 const unsigned blocksize = 1 << blkbits;
275 sector_t next_block;
276 sector_t block_in_file;
277 sector_t last_block;
278 sector_t last_block_in_file;
279 sector_t blocks[MAX_BUF_PER_PAGE];
280 unsigned page_block;
281 struct block_device *bdev = inode->i_sb->s_bdev;
282 int length;
283 unsigned relative_block = 0;
284 struct ext4_map_blocks map;
285
286 map.m_pblk = 0;
287 map.m_lblk = 0;
288 map.m_len = 0;
289 map.m_flags = 0;
290
291 for (; nr_pages; nr_pages--) {
292 int fully_mapped = 1;
293 unsigned first_hole = blocks_per_page;
294
295 if (pages) {
296 page = lru_to_page(pages);
297
298 prefetchw(&page->flags);
299 list_del(&page->lru);
300 if (add_to_page_cache_lru(page, mapping, page->index,
301 readahead_gfp_mask(mapping)))
302 goto next_page;
303 }
304
305 if (page_has_buffers(page))
306 goto confused;
307
308 block_in_file = next_block =
309 (sector_t)page->index << (PAGE_SHIFT - blkbits);
310 last_block = block_in_file + nr_pages * blocks_per_page;
311 last_block_in_file = (ext4_readpage_limit(inode) +
312 blocksize - 1) >> blkbits;
313 if (last_block > last_block_in_file)
314 last_block = last_block_in_file;
315 page_block = 0;
316
317 /*
318 * Map blocks using the previous result first.
319 */
320 if ((map.m_flags & EXT4_MAP_MAPPED) &&
321 block_in_file > map.m_lblk &&
322 block_in_file < (map.m_lblk + map.m_len)) {
323 unsigned map_offset = block_in_file - map.m_lblk;
324 unsigned last = map.m_len - map_offset;
325
326 for (relative_block = 0; ; relative_block++) {
327 if (relative_block == last) {
328 /* needed? */
329 map.m_flags &= ~EXT4_MAP_MAPPED;
330 break;
331 }
332 if (page_block == blocks_per_page)
333 break;
334 blocks[page_block] = map.m_pblk + map_offset +
335 relative_block;
336 page_block++;
337 block_in_file++;
338 }
339 }
340
341 /*
342 * Then do more ext4_map_blocks() calls until we are
343 * done with this page.
344 */
345 while (page_block < blocks_per_page) {
346 if (block_in_file < last_block) {
347 map.m_lblk = block_in_file;
348 map.m_len = last_block - block_in_file;
349
350 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
351 set_error_page:
352 SetPageError(page);
353 zero_user_segment(page, 0,
354 PAGE_SIZE);
355 unlock_page(page);
356 goto next_page;
357 }
358 }
359 if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
360 fully_mapped = 0;
361 if (first_hole == blocks_per_page)
362 first_hole = page_block;
363 page_block++;
364 block_in_file++;
365 continue;
366 }
367 if (first_hole != blocks_per_page)
368 goto confused; /* hole -> non-hole */
369
370 /* Contiguous blocks? */
371 if (page_block && blocks[page_block-1] != map.m_pblk-1)
372 goto confused;
373 for (relative_block = 0; ; relative_block++) {
374 if (relative_block == map.m_len) {
375 /* needed? */
376 map.m_flags &= ~EXT4_MAP_MAPPED;
377 break;
378 } else if (page_block == blocks_per_page)
379 break;
380 blocks[page_block] = map.m_pblk+relative_block;
381 page_block++;
382 block_in_file++;
383 }
384 }
385 if (first_hole != blocks_per_page) {
386 zero_user_segment(page, first_hole << blkbits,
387 PAGE_SIZE);
388 if (first_hole == 0) {
389 if (ext4_need_verity(inode, page->index) &&
390 !fsverity_verify_page(page))
391 goto set_error_page;
392 SetPageUptodate(page);
393 unlock_page(page);
394 goto next_page;
395 }
396 } else if (fully_mapped) {
397 SetPageMappedToDisk(page);
398 }
399 if (fully_mapped && blocks_per_page == 1 &&
400 !PageUptodate(page) && cleancache_get_page(page) == 0) {
401 SetPageUptodate(page);
402 goto confused;
403 }
404
405 /*
406 * This page will go to BIO. Do we need to send this
407 * BIO off first?
408 */
409 if (bio && (last_block_in_bio != blocks[0] - 1 ||
410 !fscrypt_mergeable_bio(bio, inode, next_block))) {
411 submit_and_realloc:
412 ext4_submit_bio_read(bio);
413 bio = NULL;
414 }
415 if (bio == NULL) {
416 struct bio_post_read_ctx *ctx;
417
418 bio = bio_alloc(GFP_KERNEL,
419 min_t(int, nr_pages, BIO_MAX_PAGES));
420 if (!bio)
421 goto set_error_page;
422 fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
423 GFP_KERNEL);
424 ctx = get_bio_post_read_ctx(inode, bio, page->index);
425 if (IS_ERR(ctx)) {
426 bio_put(bio);
427 bio = NULL;
428 goto set_error_page;
429 }
430 bio_set_dev(bio, bdev);
431 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
432 bio->bi_end_io = mpage_end_io;
433 bio->bi_private = ctx;
434 bio_set_op_attrs(bio, REQ_OP_READ,
435 is_readahead ? REQ_RAHEAD : 0);
436 }
437
438 length = first_hole << blkbits;
439 if (bio_add_page(bio, page, length, 0) < length)
440 goto submit_and_realloc;
441
442 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
443 (relative_block == map.m_len)) ||
444 (first_hole != blocks_per_page)) {
445 ext4_submit_bio_read(bio);
446 bio = NULL;
447 } else
448 last_block_in_bio = blocks[blocks_per_page - 1];
449 goto next_page;
450 confused:
451 if (bio) {
452 ext4_submit_bio_read(bio);
453 bio = NULL;
454 }
455 if (!PageUptodate(page))
456 block_read_full_page(page, ext4_get_block);
457 else
458 unlock_page(page);
459 next_page:
460 if (pages)
461 put_page(page);
462 }
463 BUG_ON(pages && !list_empty(pages));
464 if (bio)
465 ext4_submit_bio_read(bio);
466 return 0;
467 }
468
ext4_init_post_read_processing(void)469 int __init ext4_init_post_read_processing(void)
470 {
471 bio_post_read_ctx_cache =
472 kmem_cache_create("ext4_bio_post_read_ctx",
473 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
474 if (!bio_post_read_ctx_cache)
475 goto fail;
476 bio_post_read_ctx_pool =
477 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
478 bio_post_read_ctx_cache);
479 if (!bio_post_read_ctx_pool)
480 goto fail_free_cache;
481 return 0;
482
483 fail_free_cache:
484 kmem_cache_destroy(bio_post_read_ctx_cache);
485 fail:
486 return -ENOMEM;
487 }
488
ext4_exit_post_read_processing(void)489 void ext4_exit_post_read_processing(void)
490 {
491 mempool_destroy(bio_post_read_ctx_pool);
492 kmem_cache_destroy(bio_post_read_ctx_cache);
493 }
494