1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Miao Xie <miaoxie@huawei.com>
6 * with heavy changes by Gao Xiang <gaoxiang25@huawei.com>
7 */
8 #ifndef _LARGEFILE64_SOURCE
9 #define _LARGEFILE64_SOURCE
10 #endif
11 #include <string.h>
12 #include <stdlib.h>
13 #include <unistd.h>
14 #include "erofs/print.h"
15 #include "erofs/io.h"
16 #include "erofs/cache.h"
17 #include "erofs/compress.h"
18 #include "erofs/dedupe.h"
19 #include "compressor.h"
20 #include "erofs/block_list.h"
21 #include "erofs/compress_hints.h"
22 #include "erofs/fragments.h"
23
24 /* compressing configuration specified by users */
25 struct erofs_compress_cfg {
26 struct erofs_compress handle;
27 unsigned int algorithmtype;
28 bool enable;
29 } erofs_ccfg[EROFS_MAX_COMPR_CFGS];
30
31 struct z_erofs_vle_compress_ctx {
32 u8 queue[EROFS_CONFIG_COMPR_MAX_SZ * 2];
33 struct z_erofs_inmem_extent e; /* (lookahead) extent */
34
35 struct erofs_inode *inode;
36 struct erofs_compress_cfg *ccfg;
37
38 u8 *metacur;
39 unsigned int head, tail;
40 erofs_off_t remaining;
41 unsigned int pclustersize;
42 erofs_blk_t blkaddr; /* pointing to the next blkaddr */
43 u16 clusterofs;
44
45 u32 tof_chksum;
46 bool fix_dedupedfrag;
47 bool fragemitted;
48 };
49
50 #define Z_EROFS_LEGACY_MAP_HEADER_SIZE Z_EROFS_FULL_INDEX_ALIGN(0)
51
z_erofs_write_indexes_final(struct z_erofs_vle_compress_ctx * ctx)52 static void z_erofs_write_indexes_final(struct z_erofs_vle_compress_ctx *ctx)
53 {
54 const unsigned int type = Z_EROFS_LCLUSTER_TYPE_PLAIN;
55 struct z_erofs_lcluster_index di;
56
57 if (!ctx->clusterofs)
58 return;
59
60 di.di_clusterofs = cpu_to_le16(ctx->clusterofs);
61 di.di_u.blkaddr = 0;
62 di.di_advise = cpu_to_le16(type << Z_EROFS_LI_LCLUSTER_TYPE_BIT);
63
64 memcpy(ctx->metacur, &di, sizeof(di));
65 ctx->metacur += sizeof(di);
66 }
67
z_erofs_write_indexes(struct z_erofs_vle_compress_ctx * ctx)68 static void z_erofs_write_indexes(struct z_erofs_vle_compress_ctx *ctx)
69 {
70 struct erofs_inode *inode = ctx->inode;
71 struct erofs_sb_info *sbi = inode->sbi;
72 unsigned int clusterofs = ctx->clusterofs;
73 unsigned int count = ctx->e.length;
74 unsigned int d0 = 0, d1 = (clusterofs + count) / erofs_blksiz(sbi);
75 struct z_erofs_lcluster_index di;
76 unsigned int type, advise;
77
78 if (!count)
79 return;
80
81 ctx->e.length = 0; /* mark as written first */
82 di.di_clusterofs = cpu_to_le16(ctx->clusterofs);
83
84 /* whether the tail-end (un)compressed block or not */
85 if (!d1) {
86 /*
87 * A lcluster cannot have three parts with the middle one which
88 * is well-compressed for !ztailpacking cases.
89 */
90 DBG_BUGON(!ctx->e.raw && !cfg.c_ztailpacking && !cfg.c_fragments);
91 DBG_BUGON(ctx->e.partial);
92 type = ctx->e.raw ? Z_EROFS_LCLUSTER_TYPE_PLAIN :
93 Z_EROFS_LCLUSTER_TYPE_HEAD1;
94 advise = type << Z_EROFS_LI_LCLUSTER_TYPE_BIT;
95 di.di_advise = cpu_to_le16(advise);
96
97 if (inode->datalayout == EROFS_INODE_COMPRESSED_FULL &&
98 !ctx->e.compressedblks)
99 di.di_u.blkaddr = cpu_to_le32(inode->fragmentoff >> 32);
100 else
101 di.di_u.blkaddr = cpu_to_le32(ctx->e.blkaddr);
102 memcpy(ctx->metacur, &di, sizeof(di));
103 ctx->metacur += sizeof(di);
104
105 /* don't add the final index if the tail-end block exists */
106 ctx->clusterofs = 0;
107 return;
108 }
109
110 do {
111 advise = 0;
112 /* XXX: big pcluster feature should be per-inode */
113 if (d0 == 1 && erofs_sb_has_big_pcluster(sbi)) {
114 type = Z_EROFS_LCLUSTER_TYPE_NONHEAD;
115 di.di_u.delta[0] = cpu_to_le16(ctx->e.compressedblks |
116 Z_EROFS_LI_D0_CBLKCNT);
117 di.di_u.delta[1] = cpu_to_le16(d1);
118 } else if (d0) {
119 type = Z_EROFS_LCLUSTER_TYPE_NONHEAD;
120
121 /*
122 * If the |Z_EROFS_VLE_DI_D0_CBLKCNT| bit is set, parser
123 * will interpret |delta[0]| as size of pcluster, rather
124 * than distance to last head cluster. Normally this
125 * isn't a problem, because uncompressed extent size are
126 * below Z_EROFS_VLE_DI_D0_CBLKCNT * BLOCK_SIZE = 8MB.
127 * But with large pcluster it's possible to go over this
128 * number, resulting in corrupted compressed indices.
129 * To solve this, we replace d0 with
130 * Z_EROFS_VLE_DI_D0_CBLKCNT-1.
131 */
132 if (d0 >= Z_EROFS_LI_D0_CBLKCNT)
133 di.di_u.delta[0] = cpu_to_le16(
134 Z_EROFS_LI_D0_CBLKCNT - 1);
135 else
136 di.di_u.delta[0] = cpu_to_le16(d0);
137 di.di_u.delta[1] = cpu_to_le16(d1);
138 } else {
139 type = ctx->e.raw ? Z_EROFS_LCLUSTER_TYPE_PLAIN :
140 Z_EROFS_LCLUSTER_TYPE_HEAD1;
141
142 if (inode->datalayout == EROFS_INODE_COMPRESSED_FULL &&
143 !ctx->e.compressedblks)
144 di.di_u.blkaddr = cpu_to_le32(inode->fragmentoff >> 32);
145 else
146 di.di_u.blkaddr = cpu_to_le32(ctx->e.blkaddr);
147
148 if (ctx->e.partial) {
149 DBG_BUGON(ctx->e.raw);
150 advise |= Z_EROFS_LI_PARTIAL_REF;
151 }
152 }
153 advise |= type << Z_EROFS_LI_LCLUSTER_TYPE_BIT;
154 di.di_advise = cpu_to_le16(advise);
155
156 memcpy(ctx->metacur, &di, sizeof(di));
157 ctx->metacur += sizeof(di);
158
159 count -= erofs_blksiz(sbi) - clusterofs;
160 clusterofs = 0;
161
162 ++d0;
163 --d1;
164 } while (clusterofs + count >= erofs_blksiz(sbi));
165
166 ctx->clusterofs = clusterofs + count;
167 }
168
z_erofs_compress_dedupe(struct z_erofs_vle_compress_ctx * ctx,unsigned int * len)169 static int z_erofs_compress_dedupe(struct z_erofs_vle_compress_ctx *ctx,
170 unsigned int *len)
171 {
172 struct erofs_inode *inode = ctx->inode;
173 const unsigned int lclustermask = (1 << inode->z_logical_clusterbits) - 1;
174 struct erofs_sb_info *sbi = inode->sbi;
175 int ret = 0;
176
177 /*
178 * No need dedupe for packed inode since it is composed of
179 * fragments which have already been deduplicated.
180 */
181 if (erofs_is_packed_inode(inode))
182 goto out;
183
184 do {
185 struct z_erofs_dedupe_ctx dctx = {
186 .start = ctx->queue + ctx->head - ({ int rc;
187 if (ctx->e.length <= erofs_blksiz(sbi))
188 rc = 0;
189 else if (ctx->e.length - erofs_blksiz(sbi) >= ctx->head)
190 rc = ctx->head;
191 else
192 rc = ctx->e.length - erofs_blksiz(sbi);
193 rc; }),
194 .end = ctx->queue + ctx->head + *len,
195 .cur = ctx->queue + ctx->head,
196 };
197 int delta;
198
199 if (z_erofs_dedupe_match(&dctx))
200 break;
201
202 delta = ctx->queue + ctx->head - dctx.cur;
203 /*
204 * For big pcluster dedupe, leave two indices at least to store
205 * CBLKCNT as the first step. Even laterly, an one-block
206 * decompresssion could be done as another try in practice.
207 */
208 if (dctx.e.compressedblks > 1 &&
209 ((ctx->clusterofs + ctx->e.length - delta) & lclustermask) +
210 dctx.e.length < 2 * (lclustermask + 1))
211 break;
212
213 if (delta) {
214 DBG_BUGON(delta < 0);
215 DBG_BUGON(!ctx->e.length);
216
217 /*
218 * For big pcluster dedupe, if we decide to shorten the
219 * previous big pcluster, make sure that the previous
220 * CBLKCNT is still kept.
221 */
222 if (ctx->e.compressedblks > 1 &&
223 (ctx->clusterofs & lclustermask) + ctx->e.length
224 - delta < 2 * (lclustermask + 1))
225 break;
226 ctx->e.partial = true;
227 ctx->e.length -= delta;
228 }
229
230 /* fall back to noncompact indexes for deduplication */
231 inode->z_advise &= ~Z_EROFS_ADVISE_COMPACTED_2B;
232 inode->datalayout = EROFS_INODE_COMPRESSED_FULL;
233 erofs_sb_set_dedupe(sbi);
234
235 sbi->saved_by_deduplication +=
236 dctx.e.compressedblks * erofs_blksiz(sbi);
237 erofs_dbg("Dedupe %u %scompressed data (delta %d) to %u of %u blocks",
238 dctx.e.length, dctx.e.raw ? "un" : "",
239 delta, dctx.e.blkaddr, dctx.e.compressedblks);
240 z_erofs_write_indexes(ctx);
241 ctx->e = dctx.e;
242 ctx->head += dctx.e.length - delta;
243 DBG_BUGON(*len < dctx.e.length - delta);
244 *len -= dctx.e.length - delta;
245
246 if (ctx->head >= EROFS_CONFIG_COMPR_MAX_SZ) {
247 const unsigned int qh_aligned =
248 round_down(ctx->head, erofs_blksiz(sbi));
249 const unsigned int qh_after = ctx->head - qh_aligned;
250
251 memmove(ctx->queue, ctx->queue + qh_aligned,
252 *len + qh_after);
253 ctx->head = qh_after;
254 ctx->tail = qh_after + *len;
255 ret = -EAGAIN;
256 break;
257 }
258 } while (*len);
259
260 out:
261 z_erofs_write_indexes(ctx);
262 return ret;
263 }
264
write_uncompressed_extent(struct z_erofs_vle_compress_ctx * ctx,unsigned int * len,char * dst)265 static int write_uncompressed_extent(struct z_erofs_vle_compress_ctx *ctx,
266 unsigned int *len, char *dst)
267 {
268 int ret;
269 struct erofs_sb_info *sbi = ctx->inode->sbi;
270 unsigned int count, interlaced_offset, rightpart;
271
272 /* reset clusterofs to 0 if permitted */
273 if (!erofs_sb_has_lz4_0padding(sbi) && ctx->clusterofs &&
274 ctx->head >= ctx->clusterofs) {
275 ctx->head -= ctx->clusterofs;
276 *len += ctx->clusterofs;
277 ctx->clusterofs = 0;
278 }
279
280 count = min(erofs_blksiz(sbi), *len);
281
282 /* write interlaced uncompressed data if needed */
283 if (ctx->inode->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
284 interlaced_offset = ctx->clusterofs;
285 else
286 interlaced_offset = 0;
287 rightpart = min(erofs_blksiz(sbi) - interlaced_offset, count);
288
289 memset(dst, 0, erofs_blksiz(sbi));
290
291 memcpy(dst + interlaced_offset, ctx->queue + ctx->head, rightpart);
292 memcpy(dst, ctx->queue + ctx->head + rightpart, count - rightpart);
293
294 erofs_dbg("Writing %u uncompressed data to block %u",
295 count, ctx->blkaddr);
296 ret = blk_write(sbi, dst, ctx->blkaddr, 1);
297 if (ret)
298 return ret;
299 return count;
300 }
301
z_erofs_get_max_pclustersize(struct erofs_inode * inode)302 static unsigned int z_erofs_get_max_pclustersize(struct erofs_inode *inode)
303 {
304 unsigned int pclusterblks;
305
306 if (erofs_is_packed_inode(inode))
307 pclusterblks = cfg.c_pclusterblks_packed;
308 #ifndef NDEBUG
309 else if (cfg.c_random_pclusterblks)
310 pclusterblks = 1 + rand() % cfg.c_pclusterblks_max;
311 #endif
312 else if (cfg.c_compress_hints_file) {
313 z_erofs_apply_compress_hints(inode);
314 DBG_BUGON(!inode->z_physical_clusterblks);
315 pclusterblks = inode->z_physical_clusterblks;
316 } else {
317 pclusterblks = cfg.c_pclusterblks_def;
318 }
319 return pclusterblks * erofs_blksiz(inode->sbi);
320 }
321
z_erofs_fill_inline_data(struct erofs_inode * inode,void * data,unsigned int len,bool raw)322 static int z_erofs_fill_inline_data(struct erofs_inode *inode, void *data,
323 unsigned int len, bool raw)
324 {
325 inode->z_advise |= Z_EROFS_ADVISE_INLINE_PCLUSTER;
326 inode->idata_size = len;
327 inode->compressed_idata = !raw;
328
329 inode->idata = malloc(inode->idata_size);
330 if (!inode->idata)
331 return -ENOMEM;
332 erofs_dbg("Recording %u %scompressed inline data",
333 inode->idata_size, raw ? "un" : "");
334 memcpy(inode->idata, data, inode->idata_size);
335 return len;
336 }
337
tryrecompress_trailing(struct z_erofs_vle_compress_ctx * ctx,struct erofs_compress * ec,void * in,unsigned int * insize,void * out,int * compressedsize)338 static void tryrecompress_trailing(struct z_erofs_vle_compress_ctx *ctx,
339 struct erofs_compress *ec,
340 void *in, unsigned int *insize,
341 void *out, int *compressedsize)
342 {
343 struct erofs_sb_info *sbi = ctx->inode->sbi;
344 static char tmp[Z_EROFS_PCLUSTER_MAX_SIZE];
345 unsigned int count;
346 int ret = *compressedsize;
347
348 /* no need to recompress */
349 if (!(ret & (erofs_blksiz(sbi) - 1)))
350 return;
351
352 count = *insize;
353 ret = erofs_compress_destsize(ec, in, &count, (void *)tmp,
354 rounddown(ret, erofs_blksiz(sbi)), false);
355 if (ret <= 0 || ret + (*insize - count) >=
356 roundup(*compressedsize, erofs_blksiz(sbi)))
357 return;
358
359 /* replace the original compressed data if any gain */
360 memcpy(out, tmp, ret);
361 *insize = count;
362 *compressedsize = ret;
363 }
364
z_erofs_fixup_deduped_fragment(struct z_erofs_vle_compress_ctx * ctx,unsigned int len)365 static bool z_erofs_fixup_deduped_fragment(struct z_erofs_vle_compress_ctx *ctx,
366 unsigned int len)
367 {
368 struct erofs_inode *inode = ctx->inode;
369 struct erofs_sb_info *sbi = inode->sbi;
370 const unsigned int newsize = ctx->remaining + len;
371
372 DBG_BUGON(!inode->fragment_size);
373
374 /* try to fix again if it gets larger (should be rare) */
375 if (inode->fragment_size < newsize) {
376 ctx->pclustersize = min(z_erofs_get_max_pclustersize(inode),
377 roundup(newsize - inode->fragment_size,
378 erofs_blksiz(sbi)));
379 return false;
380 }
381
382 inode->fragmentoff += inode->fragment_size - newsize;
383 inode->fragment_size = newsize;
384
385 erofs_dbg("Reducing fragment size to %u at %llu",
386 inode->fragment_size, inode->fragmentoff | 0ULL);
387
388 /* it's the end */
389 DBG_BUGON(ctx->tail - ctx->head + ctx->remaining != newsize);
390 ctx->head = ctx->tail;
391 ctx->remaining = 0;
392 return true;
393 }
394
vle_compress_one(struct z_erofs_vle_compress_ctx * ctx)395 static int vle_compress_one(struct z_erofs_vle_compress_ctx *ctx)
396 {
397 static char dstbuf[EROFS_CONFIG_COMPR_MAX_SZ + EROFS_MAX_BLOCK_SIZE];
398 struct erofs_inode *inode = ctx->inode;
399 struct erofs_sb_info *sbi = inode->sbi;
400 char *const dst = dstbuf + erofs_blksiz(sbi);
401 struct erofs_compress *const h = &ctx->ccfg->handle;
402 unsigned int len = ctx->tail - ctx->head;
403 bool is_packed_inode = erofs_is_packed_inode(inode);
404 bool final = !ctx->remaining;
405 int ret;
406
407 while (len) {
408 bool may_packing = (cfg.c_fragments && final &&
409 !is_packed_inode);
410 bool may_inline = (cfg.c_ztailpacking && final &&
411 !may_packing);
412 bool fix_dedupedfrag = ctx->fix_dedupedfrag;
413
414 if (z_erofs_compress_dedupe(ctx, &len) && !final)
415 break;
416
417 if (len <= ctx->pclustersize) {
418 if (!final || !len)
419 break;
420 if (may_packing) {
421 if (inode->fragment_size && !fix_dedupedfrag) {
422 ctx->pclustersize =
423 roundup(len, erofs_blksiz(sbi));
424 goto fix_dedupedfrag;
425 }
426 ctx->e.length = len;
427 goto frag_packing;
428 }
429 if (!may_inline && len <= erofs_blksiz(sbi))
430 goto nocompression;
431 }
432
433 ctx->e.length = min(len,
434 cfg.c_max_decompressed_extent_bytes);
435 ret = erofs_compress_destsize(h, ctx->queue + ctx->head,
436 &ctx->e.length, dst, ctx->pclustersize,
437 !(final && len == ctx->e.length));
438 if (ret <= 0) {
439 if (ret != -EAGAIN) {
440 erofs_err("failed to compress %s: %s",
441 inode->i_srcpath,
442 erofs_strerror(ret));
443 }
444
445 if (may_inline && len < erofs_blksiz(sbi)) {
446 ret = z_erofs_fill_inline_data(inode,
447 ctx->queue + ctx->head,
448 len, true);
449 } else {
450 may_inline = false;
451 may_packing = false;
452 nocompression:
453 ret = write_uncompressed_extent(ctx, &len, dst);
454 }
455
456 if (ret < 0)
457 return ret;
458 ctx->e.length = ret;
459
460 /*
461 * XXX: For now, we have to leave `ctx->compressedblks
462 * = 1' since there is no way to generate compressed
463 * indexes after the time that ztailpacking is decided.
464 */
465 ctx->e.compressedblks = 1;
466 ctx->e.raw = true;
467 } else if (may_packing && len == ctx->e.length &&
468 ret < ctx->pclustersize &&
469 (!inode->fragment_size || fix_dedupedfrag)) {
470 frag_packing:
471 ret = z_erofs_pack_fragments(inode,
472 ctx->queue + ctx->head,
473 len, ctx->tof_chksum);
474 if (ret < 0)
475 return ret;
476 ctx->e.compressedblks = 0; /* indicate a fragment */
477 ctx->e.raw = false;
478 ctx->fragemitted = true;
479 fix_dedupedfrag = false;
480 /* tailpcluster should be less than 1 block */
481 } else if (may_inline && len == ctx->e.length &&
482 ret < erofs_blksiz(sbi)) {
483 if (ctx->clusterofs + len <= erofs_blksiz(sbi)) {
484 inode->eof_tailraw = malloc(len);
485 if (!inode->eof_tailraw)
486 return -ENOMEM;
487
488 memcpy(inode->eof_tailraw,
489 ctx->queue + ctx->head, len);
490 inode->eof_tailrawsize = len;
491 }
492
493 ret = z_erofs_fill_inline_data(inode, dst, ret, false);
494 if (ret < 0)
495 return ret;
496 ctx->e.compressedblks = 1;
497 ctx->e.raw = false;
498 } else {
499 unsigned int tailused, padding;
500
501 /*
502 * If there's space left for the last round when
503 * deduping fragments, try to read the fragment and
504 * recompress a little more to check whether it can be
505 * filled up. Fix up the fragment if succeeds.
506 * Otherwise, just drop it and go to packing.
507 */
508 if (may_packing && len == ctx->e.length &&
509 (ret & (erofs_blksiz(sbi) - 1)) &&
510 ctx->tail < sizeof(ctx->queue)) {
511 ctx->pclustersize = BLK_ROUND_UP(sbi, ret) *
512 erofs_blksiz(sbi);
513 goto fix_dedupedfrag;
514 }
515
516 if (may_inline && len == ctx->e.length)
517 tryrecompress_trailing(ctx, h,
518 ctx->queue + ctx->head,
519 &ctx->e.length, dst, &ret);
520
521 tailused = ret & (erofs_blksiz(sbi) - 1);
522 padding = 0;
523 ctx->e.compressedblks = BLK_ROUND_UP(sbi, ret);
524 DBG_BUGON(ctx->e.compressedblks * erofs_blksiz(sbi) >=
525 ctx->e.length);
526
527 /* zero out garbage trailing data for non-0padding */
528 if (!erofs_sb_has_lz4_0padding(sbi))
529 memset(dst + ret, 0,
530 roundup(ret, erofs_blksiz(sbi)) - ret);
531 else if (tailused)
532 padding = erofs_blksiz(sbi) - tailused;
533
534 /* write compressed data */
535 erofs_dbg("Writing %u compressed data to %u of %u blocks",
536 ctx->e.length, ctx->blkaddr,
537 ctx->e.compressedblks);
538
539 ret = blk_write(sbi, dst - padding, ctx->blkaddr,
540 ctx->e.compressedblks);
541 if (ret)
542 return ret;
543 ctx->e.raw = false;
544 may_inline = false;
545 may_packing = false;
546 }
547 ctx->e.partial = false;
548 ctx->e.blkaddr = ctx->blkaddr;
549 if (!may_inline && !may_packing && !is_packed_inode)
550 (void)z_erofs_dedupe_insert(&ctx->e,
551 ctx->queue + ctx->head);
552 ctx->blkaddr += ctx->e.compressedblks;
553 ctx->head += ctx->e.length;
554 len -= ctx->e.length;
555
556 if (fix_dedupedfrag &&
557 z_erofs_fixup_deduped_fragment(ctx, len))
558 break;
559
560 if (!final && ctx->head >= EROFS_CONFIG_COMPR_MAX_SZ) {
561 const unsigned int qh_aligned =
562 round_down(ctx->head, erofs_blksiz(sbi));
563 const unsigned int qh_after = ctx->head - qh_aligned;
564
565 memmove(ctx->queue, ctx->queue + qh_aligned,
566 len + qh_after);
567 ctx->head = qh_after;
568 ctx->tail = qh_after + len;
569 break;
570 }
571 }
572 return 0;
573
574 fix_dedupedfrag:
575 DBG_BUGON(!inode->fragment_size);
576 ctx->remaining += inode->fragment_size;
577 ctx->e.length = 0;
578 ctx->fix_dedupedfrag = true;
579 return 0;
580 }
581
582 struct z_erofs_compressindex_vec {
583 union {
584 erofs_blk_t blkaddr;
585 u16 delta[2];
586 } u;
587 u16 clusterofs;
588 u8 clustertype;
589 };
590
parse_legacy_indexes(struct z_erofs_compressindex_vec * cv,unsigned int nr,void * metacur)591 static void *parse_legacy_indexes(struct z_erofs_compressindex_vec *cv,
592 unsigned int nr, void *metacur)
593 {
594 struct z_erofs_lcluster_index *const db = metacur;
595 unsigned int i;
596
597 for (i = 0; i < nr; ++i, ++cv) {
598 struct z_erofs_lcluster_index *const di = db + i;
599 const unsigned int advise = le16_to_cpu(di->di_advise);
600
601 cv->clustertype = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) &
602 ((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1);
603 cv->clusterofs = le16_to_cpu(di->di_clusterofs);
604
605 if (cv->clustertype == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
606 cv->u.delta[0] = le16_to_cpu(di->di_u.delta[0]);
607 cv->u.delta[1] = le16_to_cpu(di->di_u.delta[1]);
608 } else {
609 cv->u.blkaddr = le32_to_cpu(di->di_u.blkaddr);
610 }
611 }
612 return db + nr;
613 }
614
write_compacted_indexes(u8 * out,struct z_erofs_compressindex_vec * cv,erofs_blk_t * blkaddr_ret,unsigned int destsize,unsigned int logical_clusterbits,bool final,bool * dummy_head,bool update_blkaddr)615 static void *write_compacted_indexes(u8 *out,
616 struct z_erofs_compressindex_vec *cv,
617 erofs_blk_t *blkaddr_ret,
618 unsigned int destsize,
619 unsigned int logical_clusterbits,
620 bool final, bool *dummy_head,
621 bool update_blkaddr)
622 {
623 unsigned int vcnt, encodebits, pos, i, cblks;
624 erofs_blk_t blkaddr;
625
626 if (destsize == 4)
627 vcnt = 2;
628 else if (destsize == 2 && logical_clusterbits == 12)
629 vcnt = 16;
630 else
631 return ERR_PTR(-EINVAL);
632 encodebits = (vcnt * destsize * 8 - 32) / vcnt;
633 blkaddr = *blkaddr_ret;
634
635 pos = 0;
636 for (i = 0; i < vcnt; ++i) {
637 unsigned int offset, v;
638 u8 ch, rem;
639
640 if (cv[i].clustertype == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
641 if (cv[i].u.delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
642 cblks = cv[i].u.delta[0] & ~Z_EROFS_LI_D0_CBLKCNT;
643 offset = cv[i].u.delta[0];
644 blkaddr += cblks;
645 *dummy_head = false;
646 } else if (i + 1 == vcnt) {
647 offset = min_t(u16, cv[i].u.delta[1],
648 (1 << logical_clusterbits) - 1);
649 } else {
650 offset = cv[i].u.delta[0];
651 }
652 } else {
653 offset = cv[i].clusterofs;
654 if (*dummy_head) {
655 ++blkaddr;
656 if (update_blkaddr)
657 *blkaddr_ret = blkaddr;
658 }
659 *dummy_head = true;
660 update_blkaddr = false;
661
662 if (cv[i].u.blkaddr != blkaddr) {
663 if (i + 1 != vcnt)
664 DBG_BUGON(!final);
665 DBG_BUGON(cv[i].u.blkaddr);
666 }
667 }
668 v = (cv[i].clustertype << logical_clusterbits) | offset;
669 rem = pos & 7;
670 ch = out[pos / 8] & ((1 << rem) - 1);
671 out[pos / 8] = (v << rem) | ch;
672 out[pos / 8 + 1] = v >> (8 - rem);
673 out[pos / 8 + 2] = v >> (16 - rem);
674 pos += encodebits;
675 }
676 DBG_BUGON(destsize * vcnt * 8 != pos + 32);
677 *(__le32 *)(out + destsize * vcnt - 4) = cpu_to_le32(*blkaddr_ret);
678 *blkaddr_ret = blkaddr;
679 return out + destsize * vcnt;
680 }
681
z_erofs_convert_to_compacted_format(struct erofs_inode * inode,erofs_blk_t blkaddr,unsigned int legacymetasize,void * compressmeta)682 int z_erofs_convert_to_compacted_format(struct erofs_inode *inode,
683 erofs_blk_t blkaddr,
684 unsigned int legacymetasize,
685 void *compressmeta)
686 {
687 const unsigned int mpos = roundup(inode->inode_isize +
688 inode->xattr_isize, 8) +
689 sizeof(struct z_erofs_map_header);
690 const unsigned int totalidx = (legacymetasize -
691 Z_EROFS_LEGACY_MAP_HEADER_SIZE) /
692 sizeof(struct z_erofs_lcluster_index);
693 const unsigned int logical_clusterbits = inode->z_logical_clusterbits;
694 u8 *out, *in;
695 struct z_erofs_compressindex_vec cv[16];
696 struct erofs_sb_info *sbi = inode->sbi;
697 /* # of 8-byte units so that it can be aligned with 32 bytes */
698 unsigned int compacted_4b_initial, compacted_4b_end;
699 unsigned int compacted_2b;
700 bool dummy_head;
701 bool big_pcluster = erofs_sb_has_big_pcluster(sbi);
702
703 if (logical_clusterbits < sbi->blkszbits || sbi->blkszbits < 12)
704 return -EINVAL;
705 if (logical_clusterbits > 14) {
706 erofs_err("compact format is unsupported for lcluster size %u",
707 1 << logical_clusterbits);
708 return -EOPNOTSUPP;
709 }
710
711 if (inode->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) {
712 if (logical_clusterbits != 12) {
713 erofs_err("compact 2B is unsupported for lcluster size %u",
714 1 << logical_clusterbits);
715 return -EINVAL;
716 }
717
718 compacted_4b_initial = (32 - mpos % 32) / 4;
719 if (compacted_4b_initial == 32 / 4)
720 compacted_4b_initial = 0;
721
722 if (compacted_4b_initial > totalidx) {
723 compacted_4b_initial = compacted_2b = 0;
724 compacted_4b_end = totalidx;
725 } else {
726 compacted_2b = rounddown(totalidx -
727 compacted_4b_initial, 16);
728 compacted_4b_end = totalidx - compacted_4b_initial -
729 compacted_2b;
730 }
731 } else {
732 compacted_2b = compacted_4b_initial = 0;
733 compacted_4b_end = totalidx;
734 }
735
736 out = in = compressmeta;
737
738 out += sizeof(struct z_erofs_map_header);
739 in += Z_EROFS_LEGACY_MAP_HEADER_SIZE;
740
741 dummy_head = false;
742 /* prior to bigpcluster, blkaddr was bumped up once coming into HEAD */
743 if (!big_pcluster) {
744 --blkaddr;
745 dummy_head = true;
746 }
747
748 /* generate compacted_4b_initial */
749 while (compacted_4b_initial) {
750 in = parse_legacy_indexes(cv, 2, in);
751 out = write_compacted_indexes(out, cv, &blkaddr,
752 4, logical_clusterbits, false,
753 &dummy_head, big_pcluster);
754 compacted_4b_initial -= 2;
755 }
756 DBG_BUGON(compacted_4b_initial);
757
758 /* generate compacted_2b */
759 while (compacted_2b) {
760 in = parse_legacy_indexes(cv, 16, in);
761 out = write_compacted_indexes(out, cv, &blkaddr,
762 2, logical_clusterbits, false,
763 &dummy_head, big_pcluster);
764 compacted_2b -= 16;
765 }
766 DBG_BUGON(compacted_2b);
767
768 /* generate compacted_4b_end */
769 while (compacted_4b_end > 1) {
770 in = parse_legacy_indexes(cv, 2, in);
771 out = write_compacted_indexes(out, cv, &blkaddr,
772 4, logical_clusterbits, false,
773 &dummy_head, big_pcluster);
774 compacted_4b_end -= 2;
775 }
776
777 /* generate final compacted_4b_end if needed */
778 if (compacted_4b_end) {
779 memset(cv, 0, sizeof(cv));
780 in = parse_legacy_indexes(cv, 1, in);
781 out = write_compacted_indexes(out, cv, &blkaddr,
782 4, logical_clusterbits, true,
783 &dummy_head, big_pcluster);
784 }
785 inode->extent_isize = out - (u8 *)compressmeta;
786 return 0;
787 }
788
z_erofs_write_mapheader(struct erofs_inode * inode,void * compressmeta)789 static void z_erofs_write_mapheader(struct erofs_inode *inode,
790 void *compressmeta)
791 {
792 struct erofs_sb_info *sbi = inode->sbi;
793 struct z_erofs_map_header h = {
794 .h_advise = cpu_to_le16(inode->z_advise),
795 .h_algorithmtype = inode->z_algorithmtype[1] << 4 |
796 inode->z_algorithmtype[0],
797 /* lclustersize */
798 .h_clusterbits = inode->z_logical_clusterbits - sbi->blkszbits,
799 };
800
801 if (inode->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)
802 h.h_fragmentoff = cpu_to_le32(inode->fragmentoff);
803 else
804 h.h_idata_size = cpu_to_le16(inode->idata_size);
805
806 memset(compressmeta, 0, Z_EROFS_LEGACY_MAP_HEADER_SIZE);
807 /* write out map header */
808 memcpy(compressmeta, &h, sizeof(struct z_erofs_map_header));
809 }
810
z_erofs_drop_inline_pcluster(struct erofs_inode * inode)811 void z_erofs_drop_inline_pcluster(struct erofs_inode *inode)
812 {
813 struct erofs_sb_info *sbi = inode->sbi;
814 const unsigned int type = Z_EROFS_LCLUSTER_TYPE_PLAIN;
815 struct z_erofs_map_header *h = inode->compressmeta;
816
817 h->h_advise = cpu_to_le16(le16_to_cpu(h->h_advise) &
818 ~Z_EROFS_ADVISE_INLINE_PCLUSTER);
819 h->h_idata_size = 0;
820 if (!inode->eof_tailraw)
821 return;
822 DBG_BUGON(inode->compressed_idata != true);
823
824 /* patch the EOF lcluster to uncompressed type first */
825 if (inode->datalayout == EROFS_INODE_COMPRESSED_FULL) {
826 struct z_erofs_lcluster_index *di =
827 (inode->compressmeta + inode->extent_isize) -
828 sizeof(struct z_erofs_lcluster_index);
829 __le16 advise =
830 cpu_to_le16(type << Z_EROFS_LI_LCLUSTER_TYPE_BIT);
831
832 di->di_advise = advise;
833 } else if (inode->datalayout == EROFS_INODE_COMPRESSED_COMPACT) {
834 /* handle the last compacted 4B pack */
835 unsigned int eofs, base, pos, v, lo;
836 u8 *out;
837
838 eofs = inode->extent_isize -
839 (4 << (BLK_ROUND_UP(sbi, inode->i_size) & 1));
840 base = round_down(eofs, 8);
841 pos = 16 /* encodebits */ * ((eofs - base) / 4);
842 out = inode->compressmeta + base;
843 lo = erofs_blkoff(sbi, get_unaligned_le32(out + pos / 8));
844 v = (type << sbi->blkszbits) | lo;
845 out[pos / 8] = v & 0xff;
846 out[pos / 8 + 1] = v >> 8;
847 } else {
848 DBG_BUGON(1);
849 return;
850 }
851 free(inode->idata);
852 /* replace idata with prepared uncompressed data */
853 inode->idata = inode->eof_tailraw;
854 inode->idata_size = inode->eof_tailrawsize;
855 inode->compressed_idata = false;
856 inode->eof_tailraw = NULL;
857 }
858
erofs_write_compressed_file(struct erofs_inode * inode,int fd)859 int erofs_write_compressed_file(struct erofs_inode *inode, int fd)
860 {
861 struct erofs_buffer_head *bh;
862 static struct z_erofs_vle_compress_ctx ctx;
863 erofs_blk_t blkaddr, compressed_blocks;
864 unsigned int legacymetasize;
865 int ret;
866 struct erofs_sb_info *sbi = inode->sbi;
867 u8 *compressmeta = malloc(BLK_ROUND_UP(sbi, inode->i_size) *
868 sizeof(struct z_erofs_lcluster_index) +
869 Z_EROFS_LEGACY_MAP_HEADER_SIZE);
870
871 if (!compressmeta)
872 return -ENOMEM;
873
874 /* allocate main data buffer */
875 bh = erofs_balloc(DATA, 0, 0, 0);
876 if (IS_ERR(bh)) {
877 ret = PTR_ERR(bh);
878 goto err_free_meta;
879 }
880
881 /* initialize per-file compression setting */
882 inode->z_advise = 0;
883 inode->z_logical_clusterbits = sbi->blkszbits;
884 if (!cfg.c_legacy_compress && inode->z_logical_clusterbits <= 14) {
885 if (inode->z_logical_clusterbits <= 12)
886 inode->z_advise |= Z_EROFS_ADVISE_COMPACTED_2B;
887 inode->datalayout = EROFS_INODE_COMPRESSED_COMPACT;
888 } else {
889 inode->datalayout = EROFS_INODE_COMPRESSED_FULL;
890 }
891
892 if (erofs_sb_has_big_pcluster(sbi)) {
893 inode->z_advise |= Z_EROFS_ADVISE_BIG_PCLUSTER_1;
894 if (inode->datalayout == EROFS_INODE_COMPRESSED_COMPACT)
895 inode->z_advise |= Z_EROFS_ADVISE_BIG_PCLUSTER_2;
896 }
897 if (cfg.c_fragments && !cfg.c_dedupe)
898 inode->z_advise |= Z_EROFS_ADVISE_INTERLACED_PCLUSTER;
899
900 #ifndef NDEBUG
901 if (cfg.c_random_algorithms) {
902 while (1) {
903 inode->z_algorithmtype[0] =
904 rand() % EROFS_MAX_COMPR_CFGS;
905 if (erofs_ccfg[inode->z_algorithmtype[0]].enable)
906 break;
907 }
908 }
909 #endif
910 ctx.ccfg = &erofs_ccfg[inode->z_algorithmtype[0]];
911 inode->z_algorithmtype[0] = ctx.ccfg[0].algorithmtype;
912 inode->z_algorithmtype[1] = 0;
913
914 inode->idata_size = 0;
915 inode->fragment_size = 0;
916
917 /*
918 * Handle tails in advance to avoid writing duplicated
919 * parts into the packed inode.
920 */
921 if (cfg.c_fragments && !erofs_is_packed_inode(inode)) {
922 ret = z_erofs_fragments_dedupe(inode, fd, &ctx.tof_chksum);
923 if (ret < 0)
924 goto err_bdrop;
925 }
926
927 blkaddr = erofs_mapbh(bh->block); /* start_blkaddr */
928 ctx.inode = inode;
929 ctx.pclustersize = z_erofs_get_max_pclustersize(inode);
930 ctx.blkaddr = blkaddr;
931 ctx.metacur = compressmeta + Z_EROFS_LEGACY_MAP_HEADER_SIZE;
932 ctx.head = ctx.tail = 0;
933 ctx.clusterofs = 0;
934 ctx.e.length = 0;
935 ctx.remaining = inode->i_size - inode->fragment_size;
936 ctx.fix_dedupedfrag = false;
937 ctx.fragemitted = false;
938 if (cfg.c_all_fragments && !erofs_is_packed_inode(inode) &&
939 !inode->fragment_size) {
940 ret = z_erofs_pack_file_from_fd(inode, fd, ctx.tof_chksum);
941 if (ret)
942 goto err_free_idata;
943 } else {
944 while (ctx.remaining) {
945 const u64 rx = min_t(u64, ctx.remaining,
946 sizeof(ctx.queue) - ctx.tail);
947
948 ret = read(fd, ctx.queue + ctx.tail, rx);
949 if (ret != rx) {
950 ret = -errno;
951 goto err_bdrop;
952 }
953 ctx.remaining -= rx;
954 ctx.tail += rx;
955
956 ret = vle_compress_one(&ctx);
957 if (ret)
958 goto err_free_idata;
959 }
960 }
961 DBG_BUGON(ctx.head != ctx.tail);
962
963 /* fall back to no compression mode */
964 compressed_blocks = ctx.blkaddr - blkaddr;
965 DBG_BUGON(compressed_blocks < !!inode->idata_size);
966 compressed_blocks -= !!inode->idata_size;
967
968 /* generate an extent for the deduplicated fragment */
969 if (inode->fragment_size && !ctx.fragemitted) {
970 z_erofs_write_indexes(&ctx);
971 ctx.e.length = inode->fragment_size;
972 ctx.e.compressedblks = 0;
973 ctx.e.raw = false;
974 ctx.e.partial = false;
975 ctx.e.blkaddr = ctx.blkaddr;
976 }
977 z_erofs_fragments_commit(inode);
978
979 z_erofs_write_indexes(&ctx);
980 z_erofs_write_indexes_final(&ctx);
981 legacymetasize = ctx.metacur - compressmeta;
982 /* estimate if data compression saves space or not */
983 if (!inode->fragment_size &&
984 compressed_blocks * erofs_blksiz(sbi) + inode->idata_size +
985 legacymetasize >= inode->i_size) {
986 z_erofs_dedupe_commit(true);
987 ret = -ENOSPC;
988 goto err_free_idata;
989 }
990 z_erofs_dedupe_commit(false);
991 z_erofs_write_mapheader(inode, compressmeta);
992
993 if (!ctx.fragemitted)
994 sbi->saved_by_deduplication += inode->fragment_size;
995
996 /* if the entire file is a fragment, a simplified form is used. */
997 if (inode->i_size == inode->fragment_size) {
998 DBG_BUGON(inode->fragmentoff >> 63);
999 *(__le64 *)compressmeta =
1000 cpu_to_le64(inode->fragmentoff | 1ULL << 63);
1001 inode->datalayout = EROFS_INODE_COMPRESSED_FULL;
1002 legacymetasize = Z_EROFS_LEGACY_MAP_HEADER_SIZE;
1003 }
1004
1005 if (compressed_blocks) {
1006 ret = erofs_bh_balloon(bh, erofs_pos(sbi, compressed_blocks));
1007 DBG_BUGON(ret != erofs_blksiz(sbi));
1008 } else {
1009 if (!cfg.c_fragments && !cfg.c_dedupe)
1010 DBG_BUGON(!inode->idata_size);
1011 }
1012
1013 erofs_info("compressed %s (%llu bytes) into %u blocks",
1014 inode->i_srcpath, (unsigned long long)inode->i_size,
1015 compressed_blocks);
1016
1017 if (inode->idata_size) {
1018 bh->op = &erofs_skip_write_bhops;
1019 inode->bh_data = bh;
1020 } else {
1021 erofs_bdrop(bh, false);
1022 }
1023
1024 inode->u.i_blocks = compressed_blocks;
1025
1026 if (inode->datalayout == EROFS_INODE_COMPRESSED_FULL) {
1027 inode->extent_isize = legacymetasize;
1028 } else {
1029 ret = z_erofs_convert_to_compacted_format(inode, blkaddr,
1030 legacymetasize,
1031 compressmeta);
1032 DBG_BUGON(ret);
1033 }
1034 inode->compressmeta = compressmeta;
1035 if (!erofs_is_packed_inode(inode))
1036 erofs_droid_blocklist_write(inode, blkaddr, compressed_blocks);
1037 return 0;
1038
1039 err_free_idata:
1040 if (inode->idata) {
1041 free(inode->idata);
1042 inode->idata = NULL;
1043 }
1044 err_bdrop:
1045 erofs_bdrop(bh, true); /* revoke buffer */
1046 err_free_meta:
1047 free(compressmeta);
1048 return ret;
1049 }
1050
z_erofs_build_compr_cfgs(struct erofs_sb_info * sbi,struct erofs_buffer_head * sb_bh)1051 static int z_erofs_build_compr_cfgs(struct erofs_sb_info *sbi,
1052 struct erofs_buffer_head *sb_bh)
1053 {
1054 struct erofs_buffer_head *bh = sb_bh;
1055 int ret = 0;
1056
1057 if (sbi->available_compr_algs & (1 << Z_EROFS_COMPRESSION_LZ4)) {
1058 struct {
1059 __le16 size;
1060 struct z_erofs_lz4_cfgs lz4;
1061 } __packed lz4alg = {
1062 .size = cpu_to_le16(sizeof(struct z_erofs_lz4_cfgs)),
1063 .lz4 = {
1064 .max_distance =
1065 cpu_to_le16(sbi->lz4_max_distance),
1066 .max_pclusterblks = cfg.c_pclusterblks_max,
1067 }
1068 };
1069
1070 bh = erofs_battach(bh, META, sizeof(lz4alg));
1071 if (IS_ERR(bh)) {
1072 DBG_BUGON(1);
1073 return PTR_ERR(bh);
1074 }
1075 erofs_mapbh(bh->block);
1076 ret = dev_write(sbi, &lz4alg, erofs_btell(bh, false),
1077 sizeof(lz4alg));
1078 bh->op = &erofs_drop_directly_bhops;
1079 }
1080 #ifdef HAVE_LIBLZMA
1081 if (sbi->available_compr_algs & (1 << Z_EROFS_COMPRESSION_LZMA)) {
1082 struct {
1083 __le16 size;
1084 struct z_erofs_lzma_cfgs lzma;
1085 } __packed lzmaalg = {
1086 .size = cpu_to_le16(sizeof(struct z_erofs_lzma_cfgs)),
1087 .lzma = {
1088 .dict_size = cpu_to_le32(cfg.c_dict_size),
1089 }
1090 };
1091
1092 bh = erofs_battach(bh, META, sizeof(lzmaalg));
1093 if (IS_ERR(bh)) {
1094 DBG_BUGON(1);
1095 return PTR_ERR(bh);
1096 }
1097 erofs_mapbh(bh->block);
1098 ret = dev_write(sbi, &lzmaalg, erofs_btell(bh, false),
1099 sizeof(lzmaalg));
1100 bh->op = &erofs_drop_directly_bhops;
1101 }
1102 #endif
1103 if (sbi->available_compr_algs & (1 << Z_EROFS_COMPRESSION_DEFLATE)) {
1104 struct {
1105 __le16 size;
1106 struct z_erofs_deflate_cfgs z;
1107 } __packed zalg = {
1108 .size = cpu_to_le16(sizeof(struct z_erofs_deflate_cfgs)),
1109 .z = {
1110 .windowbits =
1111 cpu_to_le32(ilog2(cfg.c_dict_size)),
1112 }
1113 };
1114
1115 bh = erofs_battach(bh, META, sizeof(zalg));
1116 if (IS_ERR(bh)) {
1117 DBG_BUGON(1);
1118 return PTR_ERR(bh);
1119 }
1120 erofs_mapbh(bh->block);
1121 ret = dev_write(sbi, &zalg, erofs_btell(bh, false),
1122 sizeof(zalg));
1123 bh->op = &erofs_drop_directly_bhops;
1124 }
1125 return ret;
1126 }
1127
z_erofs_compress_init(struct erofs_sb_info * sbi,struct erofs_buffer_head * sb_bh)1128 int z_erofs_compress_init(struct erofs_sb_info *sbi, struct erofs_buffer_head *sb_bh)
1129 {
1130 int i, ret;
1131
1132 for (i = 0; cfg.c_compr_alg[i]; ++i) {
1133 struct erofs_compress *c = &erofs_ccfg[i].handle;
1134
1135 ret = erofs_compressor_init(sbi, c, cfg.c_compr_alg[i]);
1136 if (ret)
1137 return ret;
1138
1139 ret = erofs_compressor_setlevel(c, cfg.c_compr_level[i]);
1140 if (ret)
1141 return ret;
1142
1143 erofs_ccfg[i].algorithmtype =
1144 z_erofs_get_compress_algorithm_id(c);
1145 erofs_ccfg[i].enable = true;
1146 sbi->available_compr_algs |= 1 << erofs_ccfg[i].algorithmtype;
1147 if (erofs_ccfg[i].algorithmtype != Z_EROFS_COMPRESSION_LZ4)
1148 erofs_sb_set_compr_cfgs(sbi);
1149 }
1150
1151 /*
1152 * if primary algorithm is empty (e.g. compression off),
1153 * clear 0PADDING feature for old kernel compatibility.
1154 */
1155 if (!cfg.c_compr_alg[0] ||
1156 (cfg.c_legacy_compress && !strncmp(cfg.c_compr_alg[0], "lz4", 3)))
1157 erofs_sb_clear_lz4_0padding(sbi);
1158
1159 if (!cfg.c_compr_alg[0])
1160 return 0;
1161
1162 /*
1163 * if big pcluster is enabled, an extra CBLKCNT lcluster index needs
1164 * to be loaded in order to get those compressed block counts.
1165 */
1166 if (cfg.c_pclusterblks_max > 1) {
1167 if (cfg.c_pclusterblks_max >
1168 Z_EROFS_PCLUSTER_MAX_SIZE / erofs_blksiz(sbi)) {
1169 erofs_err("unsupported clusterblks %u (too large)",
1170 cfg.c_pclusterblks_max);
1171 return -EINVAL;
1172 }
1173 erofs_sb_set_big_pcluster(sbi);
1174 }
1175 if (cfg.c_pclusterblks_packed > cfg.c_pclusterblks_max) {
1176 erofs_err("invalid physical cluster size for the packed file");
1177 return -EINVAL;
1178 }
1179
1180 if (erofs_sb_has_compr_cfgs(sbi))
1181 return z_erofs_build_compr_cfgs(sbi, sb_bh);
1182 return 0;
1183 }
1184
z_erofs_compress_exit(void)1185 int z_erofs_compress_exit(void)
1186 {
1187 int i, ret;
1188
1189 for (i = 0; cfg.c_compr_alg[i]; ++i) {
1190 ret = erofs_compressor_exit(&erofs_ccfg[i].handle);
1191 if (ret)
1192 return ret;
1193 }
1194 return 0;
1195 }
1196