• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/fs/ext4/readpage.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  * Copyright (C) 2015, Google, Inc.
6  *
7  * This was originally taken from fs/mpage.c
8  *
9  * The intent is the ext4_mpage_readpages() function here is intended
10  * to replace mpage_readpages() in the general case, not just for
11  * encrypted files.  It has some limitations (see below), where it
12  * will fall back to read_block_full_page(), but these limitations
13  * should only be hit when page_size != block_size.
14  *
15  * This will allow us to attach a callback function to support ext4
16  * encryption.
17  *
18  * If anything unusual happens, such as:
19  *
20  * - encountering a page which has buffers
21  * - encountering a page which has a non-hole after a hole
22  * - encountering a page with non-contiguous blocks
23  *
24  * then this code just gives up and calls the buffer_head-based read function.
25  * It does handle a page which has holes at the end - that is a common case:
26  * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
27  *
28  */
29 
30 #include <linux/kernel.h>
31 #include <linux/export.h>
32 #include <linux/mm.h>
33 #include <linux/kdev_t.h>
34 #include <linux/gfp.h>
35 #include <linux/bio.h>
36 #include <linux/fs.h>
37 #include <linux/buffer_head.h>
38 #include <linux/blkdev.h>
39 #include <linux/highmem.h>
40 #include <linux/prefetch.h>
41 #include <linux/mpage.h>
42 #include <linux/writeback.h>
43 #include <linux/backing-dev.h>
44 #include <linux/pagevec.h>
45 #include <linux/cleancache.h>
46 
47 #include "ext4.h"
48 #include <trace/events/android_fs.h>
49 
50 /*
51  * Call ext4_decrypt on every single page, reusing the encryption
52  * context.
53  */
completion_pages(struct work_struct * work)54 static void completion_pages(struct work_struct *work)
55 {
56 #ifdef CONFIG_EXT4_FS_ENCRYPTION
57 	struct ext4_crypto_ctx *ctx =
58 		container_of(work, struct ext4_crypto_ctx, r.work);
59 	struct bio	*bio	= ctx->r.bio;
60 	struct bio_vec	*bv;
61 	int		i;
62 
63 	bio_for_each_segment_all(bv, bio, i) {
64 		struct page *page = bv->bv_page;
65 
66 		int ret = ext4_decrypt(page);
67 		if (ret) {
68 			WARN_ON_ONCE(1);
69 			SetPageError(page);
70 		} else
71 			SetPageUptodate(page);
72 		unlock_page(page);
73 	}
74 	ext4_release_crypto_ctx(ctx);
75 	bio_put(bio);
76 #else
77 	BUG();
78 #endif
79 }
80 
ext4_bio_encrypted(struct bio * bio)81 static inline bool ext4_bio_encrypted(struct bio *bio)
82 {
83 #ifdef CONFIG_EXT4_FS_ENCRYPTION
84 	return unlikely(bio->bi_private != NULL);
85 #else
86 	return false;
87 #endif
88 }
89 
90 static void
ext4_trace_read_completion(struct bio * bio)91 ext4_trace_read_completion(struct bio *bio)
92 {
93 	struct page *first_page = bio->bi_io_vec[0].bv_page;
94 
95 	if (first_page != NULL)
96 		trace_android_fs_dataread_end(first_page->mapping->host,
97 					      page_offset(first_page),
98 					      bio->bi_iter.bi_size);
99 }
100 
101 /*
102  * I/O completion handler for multipage BIOs.
103  *
104  * The mpage code never puts partial pages into a BIO (except for end-of-file).
105  * If a page does not map to a contiguous run of blocks then it simply falls
106  * back to block_read_full_page().
107  *
108  * Why is this?  If a page's completion depends on a number of different BIOs
109  * which can complete in any order (or at the same time) then determining the
110  * status of that page is hard.  See end_buffer_async_read() for the details.
111  * There is no point in duplicating all that complexity.
112  */
mpage_end_io(struct bio * bio)113 static void mpage_end_io(struct bio *bio)
114 {
115 	struct bio_vec *bv;
116 	int i;
117 
118 	if (trace_android_fs_dataread_start_enabled())
119 		ext4_trace_read_completion(bio);
120 
121 	if (ext4_bio_encrypted(bio)) {
122 		struct ext4_crypto_ctx *ctx = bio->bi_private;
123 
124 		if (bio->bi_error) {
125 			ext4_release_crypto_ctx(ctx);
126 		} else {
127 			INIT_WORK(&ctx->r.work, completion_pages);
128 			ctx->r.bio = bio;
129 			queue_work(ext4_read_workqueue, &ctx->r.work);
130 			return;
131 		}
132 	}
133 	bio_for_each_segment_all(bv, bio, i) {
134 		struct page *page = bv->bv_page;
135 
136 		if (!bio->bi_error) {
137 			SetPageUptodate(page);
138 		} else {
139 			ClearPageUptodate(page);
140 			SetPageError(page);
141 		}
142 		unlock_page(page);
143 	}
144 
145 	bio_put(bio);
146 }
147 
148 static void
ext4_submit_bio_read(struct bio * bio)149 ext4_submit_bio_read(struct bio *bio)
150 {
151 	if (trace_android_fs_dataread_start_enabled()) {
152 		struct page *first_page = bio->bi_io_vec[0].bv_page;
153 
154 		if (first_page != NULL) {
155 			char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
156 
157 			path = android_fstrace_get_pathname(pathbuf,
158 						    MAX_TRACE_PATHBUF_LEN,
159 						    first_page->mapping->host);
160 			trace_android_fs_dataread_start(
161 				first_page->mapping->host,
162 				page_offset(first_page),
163 				bio->bi_iter.bi_size,
164 				current->pid,
165 				path,
166 				current->comm);
167 		}
168 	}
169 	submit_bio(READ, bio);
170 }
171 
ext4_mpage_readpages(struct address_space * mapping,struct list_head * pages,struct page * page,unsigned nr_pages)172 int ext4_mpage_readpages(struct address_space *mapping,
173 			 struct list_head *pages, struct page *page,
174 			 unsigned nr_pages)
175 {
176 	struct bio *bio = NULL;
177 	unsigned page_idx;
178 	sector_t last_block_in_bio = 0;
179 
180 	struct inode *inode = mapping->host;
181 	const unsigned blkbits = inode->i_blkbits;
182 	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
183 	const unsigned blocksize = 1 << blkbits;
184 	sector_t block_in_file;
185 	sector_t last_block;
186 	sector_t last_block_in_file;
187 	sector_t blocks[MAX_BUF_PER_PAGE];
188 	unsigned page_block;
189 	struct block_device *bdev = inode->i_sb->s_bdev;
190 	int length;
191 	unsigned relative_block = 0;
192 	struct ext4_map_blocks map;
193 
194 	map.m_pblk = 0;
195 	map.m_lblk = 0;
196 	map.m_len = 0;
197 	map.m_flags = 0;
198 
199 	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
200 		int fully_mapped = 1;
201 		unsigned first_hole = blocks_per_page;
202 
203 		prefetchw(&page->flags);
204 		if (pages) {
205 			page = list_entry(pages->prev, struct page, lru);
206 			list_del(&page->lru);
207 			if (add_to_page_cache_lru(page, mapping, page->index,
208 				  mapping_gfp_constraint(mapping, GFP_KERNEL)))
209 				goto next_page;
210 		}
211 
212 		if (page_has_buffers(page))
213 			goto confused;
214 
215 		block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
216 		last_block = block_in_file + nr_pages * blocks_per_page;
217 		last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
218 		if (last_block > last_block_in_file)
219 			last_block = last_block_in_file;
220 		page_block = 0;
221 
222 		/*
223 		 * Map blocks using the previous result first.
224 		 */
225 		if ((map.m_flags & EXT4_MAP_MAPPED) &&
226 		    block_in_file > map.m_lblk &&
227 		    block_in_file < (map.m_lblk + map.m_len)) {
228 			unsigned map_offset = block_in_file - map.m_lblk;
229 			unsigned last = map.m_len - map_offset;
230 
231 			for (relative_block = 0; ; relative_block++) {
232 				if (relative_block == last) {
233 					/* needed? */
234 					map.m_flags &= ~EXT4_MAP_MAPPED;
235 					break;
236 				}
237 				if (page_block == blocks_per_page)
238 					break;
239 				blocks[page_block] = map.m_pblk + map_offset +
240 					relative_block;
241 				page_block++;
242 				block_in_file++;
243 			}
244 		}
245 
246 		/*
247 		 * Then do more ext4_map_blocks() calls until we are
248 		 * done with this page.
249 		 */
250 		while (page_block < blocks_per_page) {
251 			if (block_in_file < last_block) {
252 				map.m_lblk = block_in_file;
253 				map.m_len = last_block - block_in_file;
254 
255 				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
256 				set_error_page:
257 					SetPageError(page);
258 					zero_user_segment(page, 0,
259 							  PAGE_CACHE_SIZE);
260 					unlock_page(page);
261 					goto next_page;
262 				}
263 			}
264 			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
265 				fully_mapped = 0;
266 				if (first_hole == blocks_per_page)
267 					first_hole = page_block;
268 				page_block++;
269 				block_in_file++;
270 				continue;
271 			}
272 			if (first_hole != blocks_per_page)
273 				goto confused;		/* hole -> non-hole */
274 
275 			/* Contiguous blocks? */
276 			if (page_block && blocks[page_block-1] != map.m_pblk-1)
277 				goto confused;
278 			for (relative_block = 0; ; relative_block++) {
279 				if (relative_block == map.m_len) {
280 					/* needed? */
281 					map.m_flags &= ~EXT4_MAP_MAPPED;
282 					break;
283 				} else if (page_block == blocks_per_page)
284 					break;
285 				blocks[page_block] = map.m_pblk+relative_block;
286 				page_block++;
287 				block_in_file++;
288 			}
289 		}
290 		if (first_hole != blocks_per_page) {
291 			zero_user_segment(page, first_hole << blkbits,
292 					  PAGE_CACHE_SIZE);
293 			if (first_hole == 0) {
294 				SetPageUptodate(page);
295 				unlock_page(page);
296 				goto next_page;
297 			}
298 		} else if (fully_mapped) {
299 			SetPageMappedToDisk(page);
300 		}
301 		if (fully_mapped && blocks_per_page == 1 &&
302 		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
303 			SetPageUptodate(page);
304 			goto confused;
305 		}
306 
307 		/*
308 		 * This page will go to BIO.  Do we need to send this
309 		 * BIO off first?
310 		 */
311 		if (bio && (last_block_in_bio != blocks[0] - 1)) {
312 		submit_and_realloc:
313 			ext4_submit_bio_read(bio);
314 			bio = NULL;
315 		}
316 		if (bio == NULL) {
317 			struct ext4_crypto_ctx *ctx = NULL;
318 
319 			if (ext4_encrypted_inode(inode) &&
320 			    S_ISREG(inode->i_mode)) {
321 				ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
322 				if (IS_ERR(ctx))
323 					goto set_error_page;
324 			}
325 			bio = bio_alloc(GFP_KERNEL,
326 				min_t(int, nr_pages, BIO_MAX_PAGES));
327 			if (!bio) {
328 				if (ctx)
329 					ext4_release_crypto_ctx(ctx);
330 				goto set_error_page;
331 			}
332 			bio->bi_bdev = bdev;
333 			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
334 			bio->bi_end_io = mpage_end_io;
335 			bio->bi_private = ctx;
336 		}
337 
338 		length = first_hole << blkbits;
339 		if (bio_add_page(bio, page, length, 0) < length)
340 			goto submit_and_realloc;
341 
342 		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
343 		     (relative_block == map.m_len)) ||
344 		    (first_hole != blocks_per_page)) {
345 			ext4_submit_bio_read(bio);
346 			bio = NULL;
347 		} else
348 			last_block_in_bio = blocks[blocks_per_page - 1];
349 		goto next_page;
350 	confused:
351 		if (bio) {
352 			ext4_submit_bio_read(bio);
353 			bio = NULL;
354 		}
355 		if (!PageUptodate(page))
356 			block_read_full_page(page, ext4_get_block);
357 		else
358 			unlock_page(page);
359 	next_page:
360 		if (pages)
361 			page_cache_release(page);
362 	}
363 	BUG_ON(pages && !list_empty(pages));
364 	if (bio)
365 		ext4_submit_bio_read(bio);
366 	return 0;
367 }
368