• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/fs/ext4/readpage.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  * Copyright (C) 2015, Google, Inc.
6  *
7  * This was originally taken from fs/mpage.c
8  *
9  * The intent is the ext4_mpage_readpages() function here is intended
10  * to replace mpage_readpages() in the general case, not just for
11  * encrypted files.  It has some limitations (see below), where it
12  * will fall back to read_block_full_page(), but these limitations
13  * should only be hit when page_size != block_size.
14  *
15  * This will allow us to attach a callback function to support ext4
16  * encryption.
17  *
18  * If anything unusual happens, such as:
19  *
20  * - encountering a page which has buffers
21  * - encountering a page which has a non-hole after a hole
22  * - encountering a page with non-contiguous blocks
23  *
24  * then this code just gives up and calls the buffer_head-based read function.
25  * It does handle a page which has holes at the end - that is a common case:
26  * the end-of-file on blocksize < PAGE_SIZE setups.
27  *
28  */
29 
30 #include <linux/kernel.h>
31 #include <linux/export.h>
32 #include <linux/mm.h>
33 #include <linux/kdev_t.h>
34 #include <linux/gfp.h>
35 #include <linux/bio.h>
36 #include <linux/fs.h>
37 #include <linux/buffer_head.h>
38 #include <linux/blkdev.h>
39 #include <linux/highmem.h>
40 #include <linux/prefetch.h>
41 #include <linux/mpage.h>
42 #include <linux/writeback.h>
43 #include <linux/backing-dev.h>
44 #include <linux/pagevec.h>
45 #include <linux/cleancache.h>
46 
47 #include "ext4.h"
48 #include <trace/events/android_fs.h>
49 
ext4_bio_encrypted(struct bio * bio)50 static inline bool ext4_bio_encrypted(struct bio *bio)
51 {
52 #ifdef CONFIG_EXT4_FS_ENCRYPTION
53 	return unlikely(bio->bi_private != NULL);
54 #else
55 	return false;
56 #endif
57 }
58 
59 static void
ext4_trace_read_completion(struct bio * bio)60 ext4_trace_read_completion(struct bio *bio)
61 {
62 	struct page *first_page = bio->bi_io_vec[0].bv_page;
63 
64 	if (first_page != NULL)
65 		trace_android_fs_dataread_end(first_page->mapping->host,
66 					      page_offset(first_page),
67 					      bio->bi_iter.bi_size);
68 }
69 
70 /*
71  * I/O completion handler for multipage BIOs.
72  *
73  * The mpage code never puts partial pages into a BIO (except for end-of-file).
74  * If a page does not map to a contiguous run of blocks then it simply falls
75  * back to block_read_full_page().
76  *
77  * Why is this?  If a page's completion depends on a number of different BIOs
78  * which can complete in any order (or at the same time) then determining the
79  * status of that page is hard.  See end_buffer_async_read() for the details.
80  * There is no point in duplicating all that complexity.
81  */
mpage_end_io(struct bio * bio)82 static void mpage_end_io(struct bio *bio)
83 {
84 	struct bio_vec *bv;
85 	int i;
86 
87 	if (trace_android_fs_dataread_start_enabled())
88 		ext4_trace_read_completion(bio);
89 
90 	if (ext4_bio_encrypted(bio)) {
91 		if (bio->bi_error) {
92 			fscrypt_release_ctx(bio->bi_private);
93 		} else {
94 			fscrypt_enqueue_decrypt_bio(bio->bi_private, bio);
95 			return;
96 		}
97 	}
98 	bio_for_each_segment_all(bv, bio, i) {
99 		struct page *page = bv->bv_page;
100 
101 		if (!bio->bi_error) {
102 			SetPageUptodate(page);
103 		} else {
104 			ClearPageUptodate(page);
105 			SetPageError(page);
106 		}
107 		unlock_page(page);
108 	}
109 
110 	bio_put(bio);
111 }
112 
113 static void
ext4_submit_bio_read(struct bio * bio)114 ext4_submit_bio_read(struct bio *bio)
115 {
116 	if (trace_android_fs_dataread_start_enabled()) {
117 		struct page *first_page = bio->bi_io_vec[0].bv_page;
118 
119 		if (first_page != NULL) {
120 			char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
121 
122 			path = android_fstrace_get_pathname(pathbuf,
123 						    MAX_TRACE_PATHBUF_LEN,
124 						    first_page->mapping->host);
125 			trace_android_fs_dataread_start(
126 				first_page->mapping->host,
127 				page_offset(first_page),
128 				bio->bi_iter.bi_size,
129 				current->pid,
130 				path,
131 				current->comm);
132 		}
133 	}
134 	submit_bio(bio);
135 }
136 
ext4_mpage_readpages(struct address_space * mapping,struct list_head * pages,struct page * page,unsigned nr_pages)137 int ext4_mpage_readpages(struct address_space *mapping,
138 			 struct list_head *pages, struct page *page,
139 			 unsigned nr_pages)
140 {
141 	struct bio *bio = NULL;
142 	sector_t last_block_in_bio = 0;
143 
144 	struct inode *inode = mapping->host;
145 	const unsigned blkbits = inode->i_blkbits;
146 	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
147 	const unsigned blocksize = 1 << blkbits;
148 	sector_t block_in_file;
149 	sector_t last_block;
150 	sector_t last_block_in_file;
151 	sector_t blocks[MAX_BUF_PER_PAGE];
152 	unsigned page_block;
153 	struct block_device *bdev = inode->i_sb->s_bdev;
154 	int length;
155 	unsigned relative_block = 0;
156 	struct ext4_map_blocks map;
157 
158 	map.m_pblk = 0;
159 	map.m_lblk = 0;
160 	map.m_len = 0;
161 	map.m_flags = 0;
162 
163 	for (; nr_pages; nr_pages--) {
164 		int fully_mapped = 1;
165 		unsigned first_hole = blocks_per_page;
166 
167 		prefetchw(&page->flags);
168 		if (pages) {
169 			page = list_entry(pages->prev, struct page, lru);
170 			list_del(&page->lru);
171 			if (add_to_page_cache_lru(page, mapping, page->index,
172 				  readahead_gfp_mask(mapping)))
173 				goto next_page;
174 		}
175 
176 		if (page_has_buffers(page))
177 			goto confused;
178 
179 		block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
180 		last_block = block_in_file + nr_pages * blocks_per_page;
181 		last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
182 		if (last_block > last_block_in_file)
183 			last_block = last_block_in_file;
184 		page_block = 0;
185 
186 		/*
187 		 * Map blocks using the previous result first.
188 		 */
189 		if ((map.m_flags & EXT4_MAP_MAPPED) &&
190 		    block_in_file > map.m_lblk &&
191 		    block_in_file < (map.m_lblk + map.m_len)) {
192 			unsigned map_offset = block_in_file - map.m_lblk;
193 			unsigned last = map.m_len - map_offset;
194 
195 			for (relative_block = 0; ; relative_block++) {
196 				if (relative_block == last) {
197 					/* needed? */
198 					map.m_flags &= ~EXT4_MAP_MAPPED;
199 					break;
200 				}
201 				if (page_block == blocks_per_page)
202 					break;
203 				blocks[page_block] = map.m_pblk + map_offset +
204 					relative_block;
205 				page_block++;
206 				block_in_file++;
207 			}
208 		}
209 
210 		/*
211 		 * Then do more ext4_map_blocks() calls until we are
212 		 * done with this page.
213 		 */
214 		while (page_block < blocks_per_page) {
215 			if (block_in_file < last_block) {
216 				map.m_lblk = block_in_file;
217 				map.m_len = last_block - block_in_file;
218 
219 				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
220 				set_error_page:
221 					SetPageError(page);
222 					zero_user_segment(page, 0,
223 							  PAGE_SIZE);
224 					unlock_page(page);
225 					goto next_page;
226 				}
227 			}
228 			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
229 				fully_mapped = 0;
230 				if (first_hole == blocks_per_page)
231 					first_hole = page_block;
232 				page_block++;
233 				block_in_file++;
234 				continue;
235 			}
236 			if (first_hole != blocks_per_page)
237 				goto confused;		/* hole -> non-hole */
238 
239 			/* Contiguous blocks? */
240 			if (page_block && blocks[page_block-1] != map.m_pblk-1)
241 				goto confused;
242 			for (relative_block = 0; ; relative_block++) {
243 				if (relative_block == map.m_len) {
244 					/* needed? */
245 					map.m_flags &= ~EXT4_MAP_MAPPED;
246 					break;
247 				} else if (page_block == blocks_per_page)
248 					break;
249 				blocks[page_block] = map.m_pblk+relative_block;
250 				page_block++;
251 				block_in_file++;
252 			}
253 		}
254 		if (first_hole != blocks_per_page) {
255 			zero_user_segment(page, first_hole << blkbits,
256 					  PAGE_SIZE);
257 			if (first_hole == 0) {
258 				SetPageUptodate(page);
259 				unlock_page(page);
260 				goto next_page;
261 			}
262 		} else if (fully_mapped) {
263 			SetPageMappedToDisk(page);
264 		}
265 		if (fully_mapped && blocks_per_page == 1 &&
266 		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
267 			SetPageUptodate(page);
268 			goto confused;
269 		}
270 
271 		/*
272 		 * This page will go to BIO.  Do we need to send this
273 		 * BIO off first?
274 		 */
275 		if (bio && (last_block_in_bio != blocks[0] - 1)) {
276 		submit_and_realloc:
277 			ext4_submit_bio_read(bio);
278 			bio = NULL;
279 		}
280 		if (bio == NULL) {
281 			struct fscrypt_ctx *ctx = NULL;
282 
283 			if (ext4_encrypted_inode(inode) &&
284 			    S_ISREG(inode->i_mode)) {
285 				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
286 				if (IS_ERR(ctx))
287 					goto set_error_page;
288 			}
289 			bio = bio_alloc(GFP_KERNEL,
290 				min_t(int, nr_pages, BIO_MAX_PAGES));
291 			if (!bio) {
292 				if (ctx)
293 					fscrypt_release_ctx(ctx);
294 				goto set_error_page;
295 			}
296 			bio->bi_bdev = bdev;
297 			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
298 			bio->bi_end_io = mpage_end_io;
299 			bio->bi_private = ctx;
300 			bio_set_op_attrs(bio, REQ_OP_READ, 0);
301 		}
302 
303 		length = first_hole << blkbits;
304 		if (bio_add_page(bio, page, length, 0) < length)
305 			goto submit_and_realloc;
306 
307 		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
308 		     (relative_block == map.m_len)) ||
309 		    (first_hole != blocks_per_page)) {
310 			ext4_submit_bio_read(bio);
311 			bio = NULL;
312 		} else
313 			last_block_in_bio = blocks[blocks_per_page - 1];
314 		goto next_page;
315 	confused:
316 		if (bio) {
317 			ext4_submit_bio_read(bio);
318 			bio = NULL;
319 		}
320 		if (!PageUptodate(page))
321 			block_read_full_page(page, ext4_get_block);
322 		else
323 			unlock_page(page);
324 	next_page:
325 		if (pages)
326 			put_page(page);
327 	}
328 	BUG_ON(pages && !list_empty(pages));
329 	if (bio)
330 		ext4_submit_bio_read(bio);
331 	return 0;
332 }
333