1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (a large amount of code was adapted from Linux kernel. )
4 *
5 * Copyright (C) 2018-2019 HUAWEI, Inc.
6 * https://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 * Modified by Huang Jianan <huangjianan@oppo.com>
9 */
10 #include "erofs/io.h"
11 #include "erofs/print.h"
12
z_erofs_fill_inode(struct erofs_inode * vi)13 int z_erofs_fill_inode(struct erofs_inode *vi)
14 {
15 if (!erofs_sb_has_big_pcluster() &&
16 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
17 vi->z_advise = 0;
18 vi->z_algorithmtype[0] = 0;
19 vi->z_algorithmtype[1] = 0;
20 vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
21
22 vi->flags |= EROFS_I_Z_INITED;
23 }
24 return 0;
25 }
26
z_erofs_fill_inode_lazy(struct erofs_inode * vi)27 static int z_erofs_fill_inode_lazy(struct erofs_inode *vi)
28 {
29 int ret;
30 erofs_off_t pos;
31 struct z_erofs_map_header *h;
32 char buf[sizeof(struct z_erofs_map_header)];
33
34 if (vi->flags & EROFS_I_Z_INITED)
35 return 0;
36
37 DBG_BUGON(!erofs_sb_has_big_pcluster() &&
38 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY);
39 pos = round_up(iloc(vi->nid) + vi->inode_isize + vi->xattr_isize, 8);
40
41 ret = dev_read(0, buf, pos, sizeof(buf));
42 if (ret < 0)
43 return -EIO;
44
45 h = (struct z_erofs_map_header *)buf;
46 vi->z_advise = le16_to_cpu(h->h_advise);
47 vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
48 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
49
50 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) {
51 erofs_err("unknown compression format %u for nid %llu",
52 vi->z_algorithmtype[0], (unsigned long long)vi->nid);
53 return -EOPNOTSUPP;
54 }
55
56 vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
57 if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
58 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
59 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
60 erofs_err("big pcluster head1/2 of compact indexes should be consistent for nid %llu",
61 vi->nid * 1ULL);
62 return -EFSCORRUPTED;
63 }
64 vi->flags |= EROFS_I_Z_INITED;
65 return 0;
66 }
67
68 struct z_erofs_maprecorder {
69 struct erofs_inode *inode;
70 struct erofs_map_blocks *map;
71 void *kaddr;
72
73 unsigned long lcn;
74 /* compression extent information gathered */
75 u8 type, headtype;
76 u16 clusterofs;
77 u16 delta[2];
78 erofs_blk_t pblk, compressedlcs;
79 };
80
z_erofs_reload_indexes(struct z_erofs_maprecorder * m,erofs_blk_t eblk)81 static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
82 erofs_blk_t eblk)
83 {
84 int ret;
85 struct erofs_map_blocks *const map = m->map;
86 char *mpage = map->mpage;
87
88 if (map->index == eblk)
89 return 0;
90
91 ret = blk_read(0, mpage, eblk, 1);
92 if (ret < 0)
93 return -EIO;
94
95 map->index = eblk;
96
97 return 0;
98 }
99
legacy_load_cluster_from_disk(struct z_erofs_maprecorder * m,unsigned long lcn)100 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
101 unsigned long lcn)
102 {
103 struct erofs_inode *const vi = m->inode;
104 const erofs_off_t ibase = iloc(vi->nid);
105 const erofs_off_t pos =
106 Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
107 vi->xattr_isize) +
108 lcn * sizeof(struct z_erofs_vle_decompressed_index);
109 struct z_erofs_vle_decompressed_index *di;
110 unsigned int advise, type;
111 int err;
112
113 err = z_erofs_reload_indexes(m, erofs_blknr(pos));
114 if (err)
115 return err;
116
117 m->lcn = lcn;
118 di = m->kaddr + erofs_blkoff(pos);
119
120 advise = le16_to_cpu(di->di_advise);
121 type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
122 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
123 switch (type) {
124 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
125 m->clusterofs = 1 << vi->z_logical_clusterbits;
126 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
127 if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
128 if (!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
129 DBG_BUGON(1);
130 return -EFSCORRUPTED;
131 }
132 m->compressedlcs = m->delta[0] &
133 ~Z_EROFS_VLE_DI_D0_CBLKCNT;
134 m->delta[0] = 1;
135 }
136 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
137 break;
138 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
139 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
140 m->clusterofs = le16_to_cpu(di->di_clusterofs);
141 m->pblk = le32_to_cpu(di->di_u.blkaddr);
142 break;
143 default:
144 DBG_BUGON(1);
145 return -EOPNOTSUPP;
146 }
147 m->type = type;
148 return 0;
149 }
150
decode_compactedbits(unsigned int lobits,unsigned int lomask,u8 * in,unsigned int pos,u8 * type)151 static unsigned int decode_compactedbits(unsigned int lobits,
152 unsigned int lomask,
153 u8 *in, unsigned int pos, u8 *type)
154 {
155 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
156 const unsigned int lo = v & lomask;
157
158 *type = (v >> lobits) & 3;
159 return lo;
160 }
161
get_compacted_la_distance(unsigned int lclusterbits,unsigned int encodebits,unsigned int vcnt,u8 * in,int i)162 static int get_compacted_la_distance(unsigned int lclusterbits,
163 unsigned int encodebits,
164 unsigned int vcnt, u8 *in, int i)
165 {
166 const unsigned int lomask = (1 << lclusterbits) - 1;
167 unsigned int lo, d1 = 0;
168 u8 type;
169
170 DBG_BUGON(i >= vcnt);
171
172 do {
173 lo = decode_compactedbits(lclusterbits, lomask,
174 in, encodebits * i, &type);
175
176 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
177 return d1;
178 ++d1;
179 } while (++i < vcnt);
180
181 /* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */
182 if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT))
183 d1 += lo - 1;
184 return d1;
185 }
186
unpack_compacted_index(struct z_erofs_maprecorder * m,unsigned int amortizedshift,unsigned int eofs,bool lookahead)187 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
188 unsigned int amortizedshift,
189 unsigned int eofs, bool lookahead)
190 {
191 struct erofs_inode *const vi = m->inode;
192 const unsigned int lclusterbits = vi->z_logical_clusterbits;
193 const unsigned int lomask = (1 << lclusterbits) - 1;
194 unsigned int vcnt, base, lo, encodebits, nblk;
195 int i;
196 u8 *in, type;
197 bool big_pcluster;
198
199 if (1 << amortizedshift == 4)
200 vcnt = 2;
201 else if (1 << amortizedshift == 2 && lclusterbits == 12)
202 vcnt = 16;
203 else
204 return -EOPNOTSUPP;
205
206 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
207 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
208 base = round_down(eofs, vcnt << amortizedshift);
209 in = m->kaddr + base;
210
211 i = (eofs - base) >> amortizedshift;
212
213 lo = decode_compactedbits(lclusterbits, lomask,
214 in, encodebits * i, &type);
215 m->type = type;
216 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
217 m->clusterofs = 1 << lclusterbits;
218
219 /* figure out lookahead_distance: delta[1] if needed */
220 if (lookahead)
221 m->delta[1] = get_compacted_la_distance(lclusterbits,
222 encodebits, vcnt, in, i);
223 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
224 if (!big_pcluster) {
225 DBG_BUGON(1);
226 return -EFSCORRUPTED;
227 }
228 m->compressedlcs = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
229 m->delta[0] = 1;
230 return 0;
231 } else if (i + 1 != (int)vcnt) {
232 m->delta[0] = lo;
233 return 0;
234 }
235 /*
236 * since the last lcluster in the pack is special,
237 * of which lo saves delta[1] rather than delta[0].
238 * Hence, get delta[0] by the previous lcluster indirectly.
239 */
240 lo = decode_compactedbits(lclusterbits, lomask,
241 in, encodebits * (i - 1), &type);
242 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
243 lo = 0;
244 else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT)
245 lo = 1;
246 m->delta[0] = lo + 1;
247 return 0;
248 }
249 m->clusterofs = lo;
250 m->delta[0] = 0;
251 /* figout out blkaddr (pblk) for HEAD lclusters */
252 if (!big_pcluster) {
253 nblk = 1;
254 while (i > 0) {
255 --i;
256 lo = decode_compactedbits(lclusterbits, lomask,
257 in, encodebits * i, &type);
258 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
259 i -= lo;
260
261 if (i >= 0)
262 ++nblk;
263 }
264 } else {
265 nblk = 0;
266 while (i > 0) {
267 --i;
268 lo = decode_compactedbits(lclusterbits, lomask,
269 in, encodebits * i, &type);
270 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
271 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
272 --i;
273 nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
274 continue;
275 }
276 if (lo == 1) {
277 DBG_BUGON(1);
278 /* --i; ++nblk; continue; */
279 return -EFSCORRUPTED;
280 }
281 i -= lo - 2;
282 continue;
283 }
284 ++nblk;
285 }
286 }
287 in += (vcnt << amortizedshift) - sizeof(__le32);
288 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
289 return 0;
290 }
291
compacted_load_cluster_from_disk(struct z_erofs_maprecorder * m,unsigned long lcn,bool lookahead)292 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
293 unsigned long lcn, bool lookahead)
294 {
295 struct erofs_inode *const vi = m->inode;
296 const unsigned int lclusterbits = vi->z_logical_clusterbits;
297 const erofs_off_t ebase = round_up(iloc(vi->nid) + vi->inode_isize +
298 vi->xattr_isize, 8) +
299 sizeof(struct z_erofs_map_header);
300 const unsigned int totalidx = DIV_ROUND_UP(vi->i_size, EROFS_BLKSIZ);
301 unsigned int compacted_4b_initial, compacted_2b;
302 unsigned int amortizedshift;
303 erofs_off_t pos;
304 int err;
305
306 if (lclusterbits != 12)
307 return -EOPNOTSUPP;
308
309 if (lcn >= totalidx)
310 return -EINVAL;
311
312 m->lcn = lcn;
313 /* used to align to 32-byte (compacted_2b) alignment */
314 compacted_4b_initial = (32 - ebase % 32) / 4;
315 if (compacted_4b_initial == 32 / 4)
316 compacted_4b_initial = 0;
317
318 if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B)
319 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
320 else
321 compacted_2b = 0;
322
323 pos = ebase;
324 if (lcn < compacted_4b_initial) {
325 amortizedshift = 2;
326 goto out;
327 }
328 pos += compacted_4b_initial * 4;
329 lcn -= compacted_4b_initial;
330
331 if (lcn < compacted_2b) {
332 amortizedshift = 1;
333 goto out;
334 }
335 pos += compacted_2b * 2;
336 lcn -= compacted_2b;
337 amortizedshift = 2;
338 out:
339 pos += lcn * (1 << amortizedshift);
340 err = z_erofs_reload_indexes(m, erofs_blknr(pos));
341 if (err)
342 return err;
343 return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos),
344 lookahead);
345 }
346
z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder * m,unsigned int lcn,bool lookahead)347 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
348 unsigned int lcn, bool lookahead)
349 {
350 const unsigned int datamode = m->inode->datalayout;
351
352 if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
353 return legacy_load_cluster_from_disk(m, lcn);
354
355 if (datamode == EROFS_INODE_FLAT_COMPRESSION)
356 return compacted_load_cluster_from_disk(m, lcn, lookahead);
357
358 return -EINVAL;
359 }
360
z_erofs_extent_lookback(struct z_erofs_maprecorder * m,unsigned int lookback_distance)361 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
362 unsigned int lookback_distance)
363 {
364 struct erofs_inode *const vi = m->inode;
365 struct erofs_map_blocks *const map = m->map;
366 const unsigned int lclusterbits = vi->z_logical_clusterbits;
367 unsigned long lcn = m->lcn;
368 int err;
369
370 if (lcn < lookback_distance) {
371 erofs_err("bogus lookback distance @ nid %llu",
372 (unsigned long long)vi->nid);
373 DBG_BUGON(1);
374 return -EFSCORRUPTED;
375 }
376
377 /* load extent head logical cluster if needed */
378 lcn -= lookback_distance;
379 err = z_erofs_load_cluster_from_disk(m, lcn, false);
380 if (err)
381 return err;
382
383 switch (m->type) {
384 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
385 if (!m->delta[0]) {
386 erofs_err("invalid lookback distance 0 @ nid %llu",
387 (unsigned long long)vi->nid);
388 DBG_BUGON(1);
389 return -EFSCORRUPTED;
390 }
391 return z_erofs_extent_lookback(m, m->delta[0]);
392 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
393 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
394 m->headtype = m->type;
395 map->m_la = (lcn << lclusterbits) | m->clusterofs;
396 break;
397 default:
398 erofs_err("unknown type %u @ lcn %lu of nid %llu",
399 m->type, lcn, (unsigned long long)vi->nid);
400 DBG_BUGON(1);
401 return -EOPNOTSUPP;
402 }
403 return 0;
404 }
405
z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder * m,unsigned int initial_lcn)406 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
407 unsigned int initial_lcn)
408 {
409 struct erofs_inode *const vi = m->inode;
410 struct erofs_map_blocks *const map = m->map;
411 const unsigned int lclusterbits = vi->z_logical_clusterbits;
412 unsigned long lcn;
413 int err;
414
415 DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN &&
416 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD);
417 if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
418 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
419 map->m_plen = 1 << lclusterbits;
420 return 0;
421 }
422
423 lcn = m->lcn + 1;
424 if (m->compressedlcs)
425 goto out;
426
427 err = z_erofs_load_cluster_from_disk(m, lcn, false);
428 if (err)
429 return err;
430
431 /*
432 * If the 1st NONHEAD lcluster has already been handled initially w/o
433 * valid compressedlcs, which means at least it mustn't be CBLKCNT, or
434 * an internal implemenatation error is detected.
435 *
436 * The following code can also handle it properly anyway, but let's
437 * BUG_ON in the debugging mode only for developers to notice that.
438 */
439 DBG_BUGON(lcn == initial_lcn &&
440 m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
441
442 switch (m->type) {
443 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
444 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
445 /*
446 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
447 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
448 */
449 m->compressedlcs = 1;
450 break;
451 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
452 if (m->delta[0] != 1)
453 goto err_bonus_cblkcnt;
454 if (m->compressedlcs)
455 break;
456 /* fallthrough */
457 default:
458 erofs_err("cannot found CBLKCNT @ lcn %lu of nid %llu",
459 lcn, vi->nid | 0ULL);
460 DBG_BUGON(1);
461 return -EFSCORRUPTED;
462 }
463 out:
464 map->m_plen = m->compressedlcs << lclusterbits;
465 return 0;
466 err_bonus_cblkcnt:
467 erofs_err("bogus CBLKCNT @ lcn %lu of nid %llu",
468 lcn, vi->nid | 0ULL);
469 DBG_BUGON(1);
470 return -EFSCORRUPTED;
471 }
472
z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder * m)473 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
474 {
475 struct erofs_inode *const vi = m->inode;
476 struct erofs_map_blocks *map = m->map;
477 unsigned int lclusterbits = vi->z_logical_clusterbits;
478 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
479 int err;
480
481 do {
482 /* handle the last EOF pcluster (no next HEAD lcluster) */
483 if ((lcn << lclusterbits) >= vi->i_size) {
484 map->m_llen = vi->i_size - map->m_la;
485 return 0;
486 }
487
488 err = z_erofs_load_cluster_from_disk(m, lcn, true);
489 if (err)
490 return err;
491
492 if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
493 DBG_BUGON(!m->delta[1] &&
494 m->clusterofs != 1 << lclusterbits);
495 } else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
496 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD) {
497 /* go on until the next HEAD lcluster */
498 if (lcn != headlcn)
499 break;
500 m->delta[1] = 1;
501 } else {
502 erofs_err("unknown type %u @ lcn %llu of nid %llu",
503 m->type, lcn | 0ULL,
504 (unsigned long long)vi->nid);
505 DBG_BUGON(1);
506 return -EOPNOTSUPP;
507 }
508 lcn += m->delta[1];
509 } while (m->delta[1]);
510
511 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
512 return 0;
513 }
514
z_erofs_map_blocks_iter(struct erofs_inode * vi,struct erofs_map_blocks * map,int flags)515 int z_erofs_map_blocks_iter(struct erofs_inode *vi,
516 struct erofs_map_blocks *map,
517 int flags)
518 {
519 struct z_erofs_maprecorder m = {
520 .inode = vi,
521 .map = map,
522 .kaddr = map->mpage,
523 };
524 int err = 0;
525 unsigned int lclusterbits, endoff;
526 unsigned long initial_lcn;
527 unsigned long long ofs, end;
528
529 /* when trying to read beyond EOF, leave it unmapped */
530 if (map->m_la >= vi->i_size) {
531 map->m_llen = map->m_la + 1 - vi->i_size;
532 map->m_la = vi->i_size;
533 map->m_flags = 0;
534 goto out;
535 }
536
537 err = z_erofs_fill_inode_lazy(vi);
538 if (err)
539 goto out;
540
541 lclusterbits = vi->z_logical_clusterbits;
542 ofs = map->m_la;
543 initial_lcn = ofs >> lclusterbits;
544 endoff = ofs & ((1 << lclusterbits) - 1);
545
546 err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
547 if (err)
548 goto out;
549
550 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
551 end = (m.lcn + 1ULL) << lclusterbits;
552 switch (m.type) {
553 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
554 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
555 if (endoff >= m.clusterofs) {
556 m.headtype = m.type;
557 map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
558 break;
559 }
560 /* m.lcn should be >= 1 if endoff < m.clusterofs */
561 if (!m.lcn) {
562 erofs_err("invalid logical cluster 0 at nid %llu",
563 (unsigned long long)vi->nid);
564 err = -EFSCORRUPTED;
565 goto out;
566 }
567 end = (m.lcn << lclusterbits) | m.clusterofs;
568 map->m_flags |= EROFS_MAP_FULL_MAPPED;
569 m.delta[0] = 1;
570 /* fallthrough */
571 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
572 /* get the correspoinding first chunk */
573 err = z_erofs_extent_lookback(&m, m.delta[0]);
574 if (err)
575 goto out;
576 break;
577 default:
578 erofs_err("unknown type %u @ offset %llu of nid %llu",
579 m.type, ofs, (unsigned long long)vi->nid);
580 err = -EOPNOTSUPP;
581 goto out;
582 }
583
584 map->m_llen = end - map->m_la;
585 map->m_pa = blknr_to_addr(m.pblk);
586
587 err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
588 if (err)
589 goto out;
590
591 if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN)
592 map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
593 else
594 map->m_algorithmformat = vi->z_algorithmtype[0];
595
596 if (flags & EROFS_GET_BLOCKS_FIEMAP) {
597 err = z_erofs_get_extent_decompressedlen(&m);
598 if (!err)
599 map->m_flags |= EROFS_MAP_FULL_MAPPED;
600 }
601
602 out:
603 erofs_dbg("m_la %" PRIu64 " m_pa %" PRIu64 " m_llen %" PRIu64 " m_plen %" PRIu64 " m_flags 0%o",
604 map->m_la, map->m_pa,
605 map->m_llen, map->m_plen, map->m_flags);
606
607 DBG_BUGON(err < 0 && err != -ENOMEM);
608 return err;
609 }
610