• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3  * (a large amount of code was adapted from Linux kernel. )
4  *
5  * Copyright (C) 2018-2019 HUAWEI, Inc.
6  *             https://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  * Modified by Huang Jianan <huangjianan@oppo.com>
9  */
10 #include "erofs/io.h"
11 #include "erofs/print.h"
12 
13 static int z_erofs_do_map_blocks(struct erofs_inode *vi,
14 				 struct erofs_map_blocks *map,
15 				 int flags);
16 
z_erofs_fill_inode(struct erofs_inode * vi)17 int z_erofs_fill_inode(struct erofs_inode *vi)
18 {
19 	struct erofs_sb_info *sbi = vi->sbi;
20 
21 	if (!erofs_sb_has_big_pcluster(sbi) &&
22 	    !erofs_sb_has_ztailpacking(sbi) && !erofs_sb_has_fragments(sbi) &&
23 	    vi->datalayout == EROFS_INODE_COMPRESSED_FULL) {
24 		vi->z_advise = 0;
25 		vi->z_algorithmtype[0] = 0;
26 		vi->z_algorithmtype[1] = 0;
27 		vi->z_logical_clusterbits = sbi->blkszbits;
28 
29 		vi->flags |= EROFS_I_Z_INITED;
30 	}
31 	return 0;
32 }
33 
z_erofs_fill_inode_lazy(struct erofs_inode * vi)34 static int z_erofs_fill_inode_lazy(struct erofs_inode *vi)
35 {
36 	int ret;
37 	erofs_off_t pos;
38 	struct z_erofs_map_header *h;
39 	char buf[sizeof(struct z_erofs_map_header)];
40 	struct erofs_sb_info *sbi = vi->sbi;
41 
42 	if (vi->flags & EROFS_I_Z_INITED)
43 		return 0;
44 
45 	pos = round_up(erofs_iloc(vi) + vi->inode_isize + vi->xattr_isize, 8);
46 	ret = dev_read(sbi, 0, buf, pos, sizeof(buf));
47 	if (ret < 0)
48 		return -EIO;
49 
50 	h = (struct z_erofs_map_header *)buf;
51 	/*
52 	 * if the highest bit of the 8-byte map header is set, the whole file
53 	 * is stored in the packed inode. The rest bits keeps z_fragmentoff.
54 	 */
55 	if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
56 		vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
57 		vi->fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
58 		vi->z_tailextent_headlcn = 0;
59 		goto out;
60 	}
61 
62 	vi->z_advise = le16_to_cpu(h->h_advise);
63 	vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
64 	vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
65 
66 	if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) {
67 		erofs_err("unknown compression format %u for nid %llu",
68 			  vi->z_algorithmtype[0], (unsigned long long)vi->nid);
69 		return -EOPNOTSUPP;
70 	}
71 
72 	vi->z_logical_clusterbits = sbi->blkszbits + (h->h_clusterbits & 7);
73 	if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
74 	    !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
75 	    !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
76 		erofs_err("big pcluster head1/2 of compact indexes should be consistent for nid %llu",
77 			  vi->nid * 1ULL);
78 		return -EFSCORRUPTED;
79 	}
80 
81 	if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
82 		struct erofs_map_blocks map = { .index = UINT_MAX };
83 
84 		vi->idata_size = le16_to_cpu(h->h_idata_size);
85 		ret = z_erofs_do_map_blocks(vi, &map,
86 					    EROFS_GET_BLOCKS_FINDTAIL);
87 		if (!map.m_plen ||
88 		    erofs_blkoff(sbi, map.m_pa) + map.m_plen > erofs_blksiz(sbi)) {
89 			erofs_err("invalid tail-packing pclustersize %llu",
90 				  map.m_plen | 0ULL);
91 			return -EFSCORRUPTED;
92 		}
93 		if (ret < 0)
94 			return ret;
95 	}
96 	if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
97 	    !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
98 		struct erofs_map_blocks map = { .index = UINT_MAX };
99 
100 		vi->fragmentoff = le32_to_cpu(h->h_fragmentoff);
101 		ret = z_erofs_do_map_blocks(vi, &map,
102 					    EROFS_GET_BLOCKS_FINDTAIL);
103 		if (ret < 0)
104 			return ret;
105 	}
106 out:
107 	vi->flags |= EROFS_I_Z_INITED;
108 	return 0;
109 }
110 
111 struct z_erofs_maprecorder {
112 	struct erofs_inode *inode;
113 	struct erofs_map_blocks *map;
114 	void *kaddr;
115 
116 	unsigned long lcn;
117 	/* compression extent information gathered */
118 	u8  type, headtype;
119 	u16 clusterofs;
120 	u16 delta[2];
121 	erofs_blk_t pblk, compressedblks;
122 	erofs_off_t nextpackoff;
123 	bool partialref;
124 };
125 
z_erofs_reload_indexes(struct z_erofs_maprecorder * m,erofs_blk_t eblk)126 static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
127 				  erofs_blk_t eblk)
128 {
129 	int ret;
130 	struct erofs_map_blocks *const map = m->map;
131 	char *mpage = map->mpage;
132 
133 	if (map->index == eblk)
134 		return 0;
135 
136 	ret = blk_read(m->inode->sbi, 0, mpage, eblk, 1);
137 	if (ret < 0)
138 		return -EIO;
139 
140 	map->index = eblk;
141 
142 	return 0;
143 }
144 
legacy_load_cluster_from_disk(struct z_erofs_maprecorder * m,unsigned long lcn)145 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
146 					 unsigned long lcn)
147 {
148 	struct erofs_inode *const vi = m->inode;
149 	struct erofs_sb_info *sbi = vi->sbi;
150 	const erofs_off_t ibase = erofs_iloc(vi);
151 	const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(ibase +
152 			vi->inode_isize + vi->xattr_isize) +
153 		lcn * sizeof(struct z_erofs_lcluster_index);
154 	struct z_erofs_lcluster_index *di;
155 	unsigned int advise, type;
156 	int err;
157 
158 	err = z_erofs_reload_indexes(m, erofs_blknr(sbi, pos));
159 	if (err)
160 		return err;
161 
162 	m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
163 	m->lcn = lcn;
164 	di = m->kaddr + erofs_blkoff(sbi, pos);
165 
166 	advise = le16_to_cpu(di->di_advise);
167 	type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) &
168 		((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1);
169 	switch (type) {
170 	case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
171 		m->clusterofs = 1 << vi->z_logical_clusterbits;
172 		m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
173 		if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
174 			if (!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
175 				DBG_BUGON(1);
176 				return -EFSCORRUPTED;
177 			}
178 			m->compressedblks = m->delta[0] &
179 				~Z_EROFS_LI_D0_CBLKCNT;
180 			m->delta[0] = 1;
181 		}
182 		m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
183 		break;
184 	case Z_EROFS_LCLUSTER_TYPE_PLAIN:
185 	case Z_EROFS_LCLUSTER_TYPE_HEAD1:
186 		if (advise & Z_EROFS_LI_PARTIAL_REF)
187 			m->partialref = true;
188 		m->clusterofs = le16_to_cpu(di->di_clusterofs);
189 		m->pblk = le32_to_cpu(di->di_u.blkaddr);
190 		break;
191 	default:
192 		DBG_BUGON(1);
193 		return -EOPNOTSUPP;
194 	}
195 	m->type = type;
196 	return 0;
197 }
198 
decode_compactedbits(unsigned int lobits,unsigned int lomask,u8 * in,unsigned int pos,u8 * type)199 static unsigned int decode_compactedbits(unsigned int lobits,
200 					 unsigned int lomask,
201 					 u8 *in, unsigned int pos, u8 *type)
202 {
203 	const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
204 	const unsigned int lo = v & lomask;
205 
206 	*type = (v >> lobits) & 3;
207 	return lo;
208 }
209 
get_compacted_la_distance(unsigned int lclusterbits,unsigned int encodebits,unsigned int vcnt,u8 * in,int i)210 static int get_compacted_la_distance(unsigned int lclusterbits,
211 				     unsigned int encodebits,
212 				     unsigned int vcnt, u8 *in, int i)
213 {
214 	const unsigned int lomask = (1 << lclusterbits) - 1;
215 	unsigned int lo, d1 = 0;
216 	u8 type;
217 
218 	DBG_BUGON(i >= vcnt);
219 
220 	do {
221 		lo = decode_compactedbits(lclusterbits, lomask,
222 					  in, encodebits * i, &type);
223 
224 		if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
225 			return d1;
226 		++d1;
227 	} while (++i < vcnt);
228 
229 	/* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
230 	if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
231 		d1 += lo - 1;
232 	return d1;
233 }
234 
unpack_compacted_index(struct z_erofs_maprecorder * m,unsigned int amortizedshift,erofs_off_t pos,bool lookahead)235 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
236 				  unsigned int amortizedshift,
237 				  erofs_off_t pos, bool lookahead)
238 {
239 	struct erofs_inode *const vi = m->inode;
240 	const unsigned int lclusterbits = vi->z_logical_clusterbits;
241 	const unsigned int lomask = (1 << lclusterbits) - 1;
242 	unsigned int vcnt, base, lo, encodebits, nblk, eofs;
243 	int i;
244 	u8 *in, type;
245 	bool big_pcluster;
246 
247 	if (1 << amortizedshift == 4 && lclusterbits <= 14)
248 		vcnt = 2;
249 	else if (1 << amortizedshift == 2 && lclusterbits == 12)
250 		vcnt = 16;
251 	else
252 		return -EOPNOTSUPP;
253 
254 	/* it doesn't equal to round_up(..) */
255 	m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
256 			 (vcnt << amortizedshift);
257 	big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
258 	encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
259 	eofs = erofs_blkoff(vi->sbi, pos);
260 	base = round_down(eofs, vcnt << amortizedshift);
261 	in = m->kaddr + base;
262 
263 	i = (eofs - base) >> amortizedshift;
264 
265 	lo = decode_compactedbits(lclusterbits, lomask,
266 				  in, encodebits * i, &type);
267 	m->type = type;
268 	if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
269 		m->clusterofs = 1 << lclusterbits;
270 
271 		/* figure out lookahead_distance: delta[1] if needed */
272 		if (lookahead)
273 			m->delta[1] = get_compacted_la_distance(lclusterbits,
274 						encodebits, vcnt, in, i);
275 		if (lo & Z_EROFS_LI_D0_CBLKCNT) {
276 			if (!big_pcluster) {
277 				DBG_BUGON(1);
278 				return -EFSCORRUPTED;
279 			}
280 			m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
281 			m->delta[0] = 1;
282 			return 0;
283 		} else if (i + 1 != (int)vcnt) {
284 			m->delta[0] = lo;
285 			return 0;
286 		}
287 		/*
288 		 * since the last lcluster in the pack is special,
289 		 * of which lo saves delta[1] rather than delta[0].
290 		 * Hence, get delta[0] by the previous lcluster indirectly.
291 		 */
292 		lo = decode_compactedbits(lclusterbits, lomask,
293 					  in, encodebits * (i - 1), &type);
294 		if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
295 			lo = 0;
296 		else if (lo & Z_EROFS_LI_D0_CBLKCNT)
297 			lo = 1;
298 		m->delta[0] = lo + 1;
299 		return 0;
300 	}
301 	m->clusterofs = lo;
302 	m->delta[0] = 0;
303 	/* figout out blkaddr (pblk) for HEAD lclusters */
304 	if (!big_pcluster) {
305 		nblk = 1;
306 		while (i > 0) {
307 			--i;
308 			lo = decode_compactedbits(lclusterbits, lomask,
309 						  in, encodebits * i, &type);
310 			if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
311 				i -= lo;
312 
313 			if (i >= 0)
314 				++nblk;
315 		}
316 	} else {
317 		nblk = 0;
318 		while (i > 0) {
319 			--i;
320 			lo = decode_compactedbits(lclusterbits, lomask,
321 						  in, encodebits * i, &type);
322 			if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
323 				if (lo & Z_EROFS_LI_D0_CBLKCNT) {
324 					--i;
325 					nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
326 					continue;
327 				}
328 				if (lo <= 1) {
329 					DBG_BUGON(1);
330 					/* --i; ++nblk;	continue; */
331 					return -EFSCORRUPTED;
332 				}
333 				i -= lo - 2;
334 				continue;
335 			}
336 			++nblk;
337 		}
338 	}
339 	in += (vcnt << amortizedshift) - sizeof(__le32);
340 	m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
341 	return 0;
342 }
343 
compacted_load_cluster_from_disk(struct z_erofs_maprecorder * m,unsigned long lcn,bool lookahead)344 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
345 					    unsigned long lcn, bool lookahead)
346 {
347 	struct erofs_inode *const vi = m->inode;
348 	struct erofs_sb_info *sbi = vi->sbi;
349 	const erofs_off_t ebase = round_up(erofs_iloc(vi) + vi->inode_isize +
350 					   vi->xattr_isize, 8) +
351 		sizeof(struct z_erofs_map_header);
352 	const unsigned int totalidx = BLK_ROUND_UP(sbi, vi->i_size);
353 	unsigned int compacted_4b_initial, compacted_2b;
354 	unsigned int amortizedshift;
355 	erofs_off_t pos;
356 	int err;
357 
358 	if (lcn >= totalidx)
359 		return -EINVAL;
360 
361 	m->lcn = lcn;
362 	/* used to align to 32-byte (compacted_2b) alignment */
363 	compacted_4b_initial = (32 - ebase % 32) / 4;
364 	if (compacted_4b_initial == 32 / 4)
365 		compacted_4b_initial = 0;
366 
367 	if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
368 	    compacted_4b_initial < totalidx)
369 		compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
370 	else
371 		compacted_2b = 0;
372 
373 	pos = ebase;
374 	if (lcn < compacted_4b_initial) {
375 		amortizedshift = 2;
376 		goto out;
377 	}
378 	pos += compacted_4b_initial * 4;
379 	lcn -= compacted_4b_initial;
380 
381 	if (lcn < compacted_2b) {
382 		amortizedshift = 1;
383 		goto out;
384 	}
385 	pos += compacted_2b * 2;
386 	lcn -= compacted_2b;
387 	amortizedshift = 2;
388 out:
389 	pos += lcn * (1 << amortizedshift);
390 	err = z_erofs_reload_indexes(m, erofs_blknr(sbi, pos));
391 	if (err)
392 		return err;
393 	return unpack_compacted_index(m, amortizedshift, pos, lookahead);
394 }
395 
z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder * m,unsigned int lcn,bool lookahead)396 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
397 					  unsigned int lcn, bool lookahead)
398 {
399 	const unsigned int datamode = m->inode->datalayout;
400 
401 	if (datamode == EROFS_INODE_COMPRESSED_FULL)
402 		return legacy_load_cluster_from_disk(m, lcn);
403 
404 	if (datamode == EROFS_INODE_COMPRESSED_COMPACT)
405 		return compacted_load_cluster_from_disk(m, lcn, lookahead);
406 
407 	return -EINVAL;
408 }
409 
z_erofs_extent_lookback(struct z_erofs_maprecorder * m,unsigned int lookback_distance)410 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
411 				   unsigned int lookback_distance)
412 {
413 	struct erofs_inode *const vi = m->inode;
414 	struct erofs_map_blocks *const map = m->map;
415 	const unsigned int lclusterbits = vi->z_logical_clusterbits;
416 	unsigned long lcn = m->lcn;
417 	int err;
418 
419 	if (lcn < lookback_distance) {
420 		erofs_err("bogus lookback distance @ nid %llu",
421 			  (unsigned long long)vi->nid);
422 		DBG_BUGON(1);
423 		return -EFSCORRUPTED;
424 	}
425 
426 	/* load extent head logical cluster if needed */
427 	lcn -= lookback_distance;
428 	err = z_erofs_load_cluster_from_disk(m, lcn, false);
429 	if (err)
430 		return err;
431 
432 	switch (m->type) {
433 	case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
434 		if (!m->delta[0]) {
435 			erofs_err("invalid lookback distance 0 @ nid %llu",
436 				  (unsigned long long)vi->nid);
437 			DBG_BUGON(1);
438 			return -EFSCORRUPTED;
439 		}
440 		return z_erofs_extent_lookback(m, m->delta[0]);
441 	case Z_EROFS_LCLUSTER_TYPE_PLAIN:
442 	case Z_EROFS_LCLUSTER_TYPE_HEAD1:
443 		m->headtype = m->type;
444 		map->m_la = (lcn << lclusterbits) | m->clusterofs;
445 		break;
446 	default:
447 		erofs_err("unknown type %u @ lcn %lu of nid %llu",
448 			  m->type, lcn, (unsigned long long)vi->nid);
449 		DBG_BUGON(1);
450 		return -EOPNOTSUPP;
451 	}
452 	return 0;
453 }
454 
z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder * m,unsigned int initial_lcn)455 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
456 					    unsigned int initial_lcn)
457 {
458 	struct erofs_inode *const vi = m->inode;
459 	struct erofs_sb_info *sbi = vi->sbi;
460 	struct erofs_map_blocks *const map = m->map;
461 	const unsigned int lclusterbits = vi->z_logical_clusterbits;
462 	unsigned long lcn;
463 	int err;
464 
465 	DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
466 		  m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1);
467 
468 	if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
469 	    !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
470 		map->m_plen = 1 << lclusterbits;
471 		return 0;
472 	}
473 
474 	lcn = m->lcn + 1;
475 	if (m->compressedblks)
476 		goto out;
477 
478 	err = z_erofs_load_cluster_from_disk(m, lcn, false);
479 	if (err)
480 		return err;
481 
482 	/*
483 	 * If the 1st NONHEAD lcluster has already been handled initially w/o
484 	 * valid compressedblks, which means at least it mustn't be CBLKCNT, or
485 	 * an internal implemenatation error is detected.
486 	 *
487 	 * The following code can also handle it properly anyway, but let's
488 	 * BUG_ON in the debugging mode only for developers to notice that.
489 	 */
490 	DBG_BUGON(lcn == initial_lcn &&
491 		  m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
492 
493 	switch (m->type) {
494 	case Z_EROFS_LCLUSTER_TYPE_PLAIN:
495 	case Z_EROFS_LCLUSTER_TYPE_HEAD1:
496 		/*
497 		 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
498 		 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
499 		 */
500 		m->compressedblks = 1 << (lclusterbits - sbi->blkszbits);
501 		break;
502 	case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
503 		if (m->delta[0] != 1)
504 			goto err_bonus_cblkcnt;
505 		if (m->compressedblks)
506 			break;
507 		/* fallthrough */
508 	default:
509 		erofs_err("cannot found CBLKCNT @ lcn %lu of nid %llu",
510 			  lcn, vi->nid | 0ULL);
511 		DBG_BUGON(1);
512 		return -EFSCORRUPTED;
513 	}
514 out:
515 	map->m_plen = m->compressedblks << sbi->blkszbits;
516 	return 0;
517 err_bonus_cblkcnt:
518 	erofs_err("bogus CBLKCNT @ lcn %lu of nid %llu",
519 		  lcn, vi->nid | 0ULL);
520 	DBG_BUGON(1);
521 	return -EFSCORRUPTED;
522 }
523 
z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder * m)524 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
525 {
526 	struct erofs_inode *const vi = m->inode;
527 	struct erofs_map_blocks *map = m->map;
528 	unsigned int lclusterbits = vi->z_logical_clusterbits;
529 	u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
530 	int err;
531 
532 	do {
533 		/* handle the last EOF pcluster (no next HEAD lcluster) */
534 		if ((lcn << lclusterbits) >= vi->i_size) {
535 			map->m_llen = vi->i_size - map->m_la;
536 			return 0;
537 		}
538 
539 		err = z_erofs_load_cluster_from_disk(m, lcn, true);
540 		if (err)
541 			return err;
542 
543 		if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
544 			DBG_BUGON(!m->delta[1] &&
545 				  m->clusterofs != 1 << lclusterbits);
546 		} else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
547 			   m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1) {
548 			/* go on until the next HEAD lcluster */
549 			if (lcn != headlcn)
550 				break;
551 			m->delta[1] = 1;
552 		} else {
553 			erofs_err("unknown type %u @ lcn %llu of nid %llu",
554 				  m->type, lcn | 0ULL,
555 				  (unsigned long long)vi->nid);
556 			DBG_BUGON(1);
557 			return -EOPNOTSUPP;
558 		}
559 		lcn += m->delta[1];
560 	} while (m->delta[1]);
561 
562 	map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
563 	return 0;
564 }
565 
z_erofs_do_map_blocks(struct erofs_inode * vi,struct erofs_map_blocks * map,int flags)566 static int z_erofs_do_map_blocks(struct erofs_inode *vi,
567 				 struct erofs_map_blocks *map,
568 				 int flags)
569 {
570 	struct erofs_sb_info *sbi = vi->sbi;
571 	bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
572 	bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
573 	struct z_erofs_maprecorder m = {
574 		.inode = vi,
575 		.map = map,
576 		.kaddr = map->mpage,
577 	};
578 	int err = 0;
579 	unsigned int lclusterbits, endoff;
580 	unsigned long initial_lcn;
581 	unsigned long long ofs, end;
582 
583 	lclusterbits = vi->z_logical_clusterbits;
584 	ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? vi->i_size - 1 : map->m_la;
585 	initial_lcn = ofs >> lclusterbits;
586 	endoff = ofs & ((1 << lclusterbits) - 1);
587 
588 	err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
589 	if (err)
590 		goto out;
591 
592 	if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
593 		vi->z_idataoff = m.nextpackoff;
594 
595 	map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
596 	end = (m.lcn + 1ULL) << lclusterbits;
597 	switch (m.type) {
598 	case Z_EROFS_LCLUSTER_TYPE_PLAIN:
599 	case Z_EROFS_LCLUSTER_TYPE_HEAD1:
600 		if (endoff >= m.clusterofs) {
601 			m.headtype = m.type;
602 			map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
603 			/*
604 			 * For ztailpacking files, in order to inline data more
605 			 * effectively, special EOF lclusters are now supported
606 			 * which can have three parts at most.
607 			 */
608 			if (ztailpacking && end > vi->i_size)
609 				end = vi->i_size;
610 			break;
611 		}
612 		/* m.lcn should be >= 1 if endoff < m.clusterofs */
613 		if (!m.lcn) {
614 			erofs_err("invalid logical cluster 0 at nid %llu",
615 				  (unsigned long long)vi->nid);
616 			err = -EFSCORRUPTED;
617 			goto out;
618 		}
619 		end = (m.lcn << lclusterbits) | m.clusterofs;
620 		map->m_flags |= EROFS_MAP_FULL_MAPPED;
621 		m.delta[0] = 1;
622 		/* fallthrough */
623 	case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
624 		/* get the correspoinding first chunk */
625 		err = z_erofs_extent_lookback(&m, m.delta[0]);
626 		if (err)
627 			goto out;
628 		break;
629 	default:
630 		erofs_err("unknown type %u @ offset %llu of nid %llu",
631 			  m.type, ofs, (unsigned long long)vi->nid);
632 		err = -EOPNOTSUPP;
633 		goto out;
634 	}
635 	if (m.partialref)
636 		map->m_flags |= EROFS_MAP_PARTIAL_REF;
637 	map->m_llen = end - map->m_la;
638 	if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
639 		vi->z_tailextent_headlcn = m.lcn;
640 		/* for non-compact indexes, fragmentoff is 64 bits */
641 		if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
642 			vi->fragmentoff |= (u64)m.pblk << 32;
643 	}
644 	if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
645 		map->m_flags |= EROFS_MAP_META;
646 		map->m_pa = vi->z_idataoff;
647 		map->m_plen = vi->z_idata_size;
648 	} else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
649 		map->m_flags |= EROFS_MAP_FRAGMENT;
650 	} else {
651 		map->m_pa = erofs_pos(sbi, m.pblk);
652 		err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
653 		if (err)
654 			goto out;
655 	}
656 
657 	if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
658 		if (map->m_llen > map->m_plen) {
659 			DBG_BUGON(1);
660 			err = -EFSCORRUPTED;
661 			goto out;
662 		}
663 		if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
664 			map->m_algorithmformat =
665 				Z_EROFS_COMPRESSION_INTERLACED;
666 		else
667 			map->m_algorithmformat =
668 				Z_EROFS_COMPRESSION_SHIFTED;
669 	} else {
670 		map->m_algorithmformat = vi->z_algorithmtype[0];
671 	}
672 
673 	if (flags & EROFS_GET_BLOCKS_FIEMAP) {
674 		err = z_erofs_get_extent_decompressedlen(&m);
675 		if (!err)
676 			map->m_flags |= EROFS_MAP_FULL_MAPPED;
677 	}
678 
679 out:
680 	erofs_dbg("m_la %" PRIu64 " m_pa %" PRIu64 " m_llen %" PRIu64 " m_plen %" PRIu64 " m_flags 0%o",
681 		  map->m_la, map->m_pa,
682 		  map->m_llen, map->m_plen, map->m_flags);
683 	return err;
684 }
685 
z_erofs_map_blocks_iter(struct erofs_inode * vi,struct erofs_map_blocks * map,int flags)686 int z_erofs_map_blocks_iter(struct erofs_inode *vi,
687 			    struct erofs_map_blocks *map,
688 			    int flags)
689 {
690 	int err = 0;
691 
692 	/* when trying to read beyond EOF, leave it unmapped */
693 	if (map->m_la >= vi->i_size) {
694 		map->m_llen = map->m_la + 1 - vi->i_size;
695 		map->m_la = vi->i_size;
696 		map->m_flags = 0;
697 		goto out;
698 	}
699 
700 	err = z_erofs_fill_inode_lazy(vi);
701 	if (err)
702 		goto out;
703 
704 	if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
705 	    !vi->z_tailextent_headlcn) {
706 		map->m_la = 0;
707 		map->m_llen = vi->i_size;
708 		map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
709 				EROFS_MAP_FRAGMENT;
710 		goto out;
711 	}
712 
713 	err = z_erofs_do_map_blocks(vi, map, flags);
714 out:
715 	DBG_BUGON(err < 0 && err != -ENOMEM);
716 	return err;
717 }
718