1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9
10 #include <trace/events/erofs.h>
11
erofs_readendio(struct bio * bio)12 static void erofs_readendio(struct bio *bio)
13 {
14 struct bio_vec *bvec;
15 blk_status_t err = bio->bi_status;
16 struct bvec_iter_all iter_all;
17
18 bio_for_each_segment_all(bvec, bio, iter_all) {
19 struct page *page = bvec->bv_page;
20
21 /* page is already locked */
22 DBG_BUGON(PageUptodate(page));
23
24 if (err)
25 SetPageError(page);
26 else
27 SetPageUptodate(page);
28
29 unlock_page(page);
30 /* page could be reclaimed now */
31 }
32 bio_put(bio);
33 }
34
erofs_get_meta_page(struct super_block * sb,erofs_blk_t blkaddr)35 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
36 {
37 struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
38 struct page *page;
39
40 page = read_cache_page_gfp(mapping, blkaddr,
41 mapping_gfp_constraint(mapping, ~__GFP_FS));
42 /* should already be PageUptodate */
43 if (!IS_ERR(page))
44 lock_page(page);
45 return page;
46 }
47
erofs_map_blocks_flatmode(struct inode * inode,struct erofs_map_blocks * map,int flags)48 static int erofs_map_blocks_flatmode(struct inode *inode,
49 struct erofs_map_blocks *map,
50 int flags)
51 {
52 int err = 0;
53 erofs_blk_t nblocks, lastblk;
54 u64 offset = map->m_la;
55 struct erofs_inode *vi = EROFS_I(inode);
56 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
57
58 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
59
60 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
61 lastblk = nblocks - tailendpacking;
62
63 if (offset >= inode->i_size) {
64 /* leave out-of-bound access unmapped */
65 map->m_flags = 0;
66 map->m_plen = 0;
67 goto out;
68 }
69
70 /* there is no hole in flatmode */
71 map->m_flags = EROFS_MAP_MAPPED;
72
73 if (offset < blknr_to_addr(lastblk)) {
74 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
75 map->m_plen = blknr_to_addr(lastblk) - offset;
76 } else if (tailendpacking) {
77 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
78 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
79
80 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
81 vi->xattr_isize + erofs_blkoff(map->m_la);
82 map->m_plen = inode->i_size - offset;
83
84 /* inline data should be located in one meta block */
85 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
86 erofs_err(inode->i_sb,
87 "inline data cross block boundary @ nid %llu",
88 vi->nid);
89 DBG_BUGON(1);
90 err = -EFSCORRUPTED;
91 goto err_out;
92 }
93
94 map->m_flags |= EROFS_MAP_META;
95 } else {
96 erofs_err(inode->i_sb,
97 "internal error @ nid: %llu (size %llu), m_la 0x%llx",
98 vi->nid, inode->i_size, map->m_la);
99 DBG_BUGON(1);
100 err = -EIO;
101 goto err_out;
102 }
103
104 out:
105 map->m_llen = map->m_plen;
106
107 err_out:
108 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
109 return err;
110 }
111
erofs_read_raw_page(struct bio * bio,struct address_space * mapping,struct page * page,erofs_off_t * last_block,unsigned int nblocks,bool ra)112 static inline struct bio *erofs_read_raw_page(struct bio *bio,
113 struct address_space *mapping,
114 struct page *page,
115 erofs_off_t *last_block,
116 unsigned int nblocks,
117 bool ra)
118 {
119 struct inode *const inode = mapping->host;
120 struct super_block *const sb = inode->i_sb;
121 erofs_off_t current_block = (erofs_off_t)page->index;
122 int err;
123
124 DBG_BUGON(!nblocks);
125
126 if (PageUptodate(page)) {
127 err = 0;
128 goto has_updated;
129 }
130
131 /* note that for readpage case, bio also equals to NULL */
132 if (bio &&
133 /* not continuous */
134 *last_block + 1 != current_block) {
135 submit_bio_retry:
136 submit_bio(bio);
137 bio = NULL;
138 }
139
140 if (!bio) {
141 struct erofs_map_blocks map = {
142 .m_la = blknr_to_addr(current_block),
143 };
144 erofs_blk_t blknr;
145 unsigned int blkoff;
146
147 err = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW);
148 if (err)
149 goto err_out;
150
151 /* zero out the holed page */
152 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
153 zero_user_segment(page, 0, PAGE_SIZE);
154 SetPageUptodate(page);
155
156 /* imply err = 0, see erofs_map_blocks */
157 goto has_updated;
158 }
159
160 /* for RAW access mode, m_plen must be equal to m_llen */
161 DBG_BUGON(map.m_plen != map.m_llen);
162
163 blknr = erofs_blknr(map.m_pa);
164 blkoff = erofs_blkoff(map.m_pa);
165
166 /* deal with inline page */
167 if (map.m_flags & EROFS_MAP_META) {
168 void *vsrc, *vto;
169 struct page *ipage;
170
171 DBG_BUGON(map.m_plen > PAGE_SIZE);
172
173 ipage = erofs_get_meta_page(inode->i_sb, blknr);
174
175 if (IS_ERR(ipage)) {
176 err = PTR_ERR(ipage);
177 goto err_out;
178 }
179
180 vsrc = kmap_atomic(ipage);
181 vto = kmap_atomic(page);
182 memcpy(vto, vsrc + blkoff, map.m_plen);
183 memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
184 kunmap_atomic(vto);
185 kunmap_atomic(vsrc);
186 flush_dcache_page(page);
187
188 SetPageUptodate(page);
189 /* TODO: could we unlock the page earlier? */
190 unlock_page(ipage);
191 put_page(ipage);
192
193 /* imply err = 0, see erofs_map_blocks */
194 goto has_updated;
195 }
196
197 /* pa must be block-aligned for raw reading */
198 DBG_BUGON(erofs_blkoff(map.m_pa));
199
200 /* max # of continuous pages */
201 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
202 nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
203 if (nblocks > BIO_MAX_PAGES)
204 nblocks = BIO_MAX_PAGES;
205
206 bio = bio_alloc(GFP_NOIO, nblocks);
207
208 bio->bi_end_io = erofs_readendio;
209 bio_set_dev(bio, sb->s_bdev);
210 bio->bi_iter.bi_sector = (sector_t)blknr <<
211 LOG_SECTORS_PER_BLOCK;
212 bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0);
213 }
214
215 err = bio_add_page(bio, page, PAGE_SIZE, 0);
216 /* out of the extent or bio is full */
217 if (err < PAGE_SIZE)
218 goto submit_bio_retry;
219
220 *last_block = current_block;
221
222 /* shift in advance in case of it followed by too many gaps */
223 if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
224 /* err should reassign to 0 after submitting */
225 err = 0;
226 goto submit_bio_out;
227 }
228
229 return bio;
230
231 err_out:
232 /* for sync reading, set page error immediately */
233 if (!ra) {
234 SetPageError(page);
235 ClearPageUptodate(page);
236 }
237 has_updated:
238 unlock_page(page);
239
240 /* if updated manually, continuous pages has a gap */
241 if (bio)
242 submit_bio_out:
243 submit_bio(bio);
244 return err ? ERR_PTR(err) : NULL;
245 }
246
247 /*
248 * since we dont have write or truncate flows, so no inode
249 * locking needs to be held at the moment.
250 */
erofs_raw_access_readpage(struct file * file,struct page * page)251 static int erofs_raw_access_readpage(struct file *file, struct page *page)
252 {
253 erofs_off_t last_block;
254 struct bio *bio;
255
256 trace_erofs_readpage(page, true);
257
258 bio = erofs_read_raw_page(NULL, page->mapping,
259 page, &last_block, 1, false);
260
261 if (IS_ERR(bio))
262 return PTR_ERR(bio);
263
264 DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
265 return 0;
266 }
267
erofs_raw_access_readahead(struct readahead_control * rac)268 static void erofs_raw_access_readahead(struct readahead_control *rac)
269 {
270 erofs_off_t last_block;
271 struct bio *bio = NULL;
272 struct page *page;
273
274 trace_erofs_readpages(rac->mapping->host, readahead_index(rac),
275 readahead_count(rac), true);
276
277 while ((page = readahead_page(rac))) {
278 prefetchw(&page->flags);
279
280 bio = erofs_read_raw_page(bio, rac->mapping, page, &last_block,
281 readahead_count(rac), true);
282
283 /* all the page errors are ignored when readahead */
284 if (IS_ERR(bio)) {
285 pr_err("%s, readahead error at page %lu of nid %llu\n",
286 __func__, page->index,
287 EROFS_I(rac->mapping->host)->nid);
288
289 bio = NULL;
290 }
291
292 put_page(page);
293 }
294
295 /* the rare case (end in gaps) */
296 if (bio)
297 submit_bio(bio);
298 }
299
erofs_bmap(struct address_space * mapping,sector_t block)300 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
301 {
302 struct inode *inode = mapping->host;
303 struct erofs_map_blocks map = {
304 .m_la = blknr_to_addr(block),
305 };
306
307 if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
308 erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
309
310 if (block >> LOG_SECTORS_PER_BLOCK >= blks)
311 return 0;
312 }
313
314 if (!erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW))
315 return erofs_blknr(map.m_pa);
316
317 return 0;
318 }
319
320 /* for uncompressed (aligned) files and raw access for other files */
321 const struct address_space_operations erofs_raw_access_aops = {
322 .readpage = erofs_raw_access_readpage,
323 .readahead = erofs_raw_access_readahead,
324 .bmap = erofs_bmap,
325 };
326
327