1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Miao Xie <miaoxie@huawei.com>
6 * with heavy changes by Gao Xiang <xiang@kernel.org>
7 */
8 #ifndef _LARGEFILE64_SOURCE
9 #define _LARGEFILE64_SOURCE
10 #endif
11 #ifdef EROFS_MT_ENABLED
12 #include <pthread.h>
13 #endif
14 #include <string.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include "erofs/print.h"
18 #include "erofs/cache.h"
19 #include "erofs/compress.h"
20 #include "erofs/dedupe.h"
21 #include "compressor.h"
22 #include "erofs/block_list.h"
23 #include "erofs/compress_hints.h"
24 #include "erofs/fragments.h"
25 #ifdef EROFS_MT_ENABLED
26 #include "erofs/workqueue.h"
27 #endif
28
29 /* compressing configuration specified by users */
30 struct erofs_compress_cfg {
31 struct erofs_compress handle;
32 unsigned int algorithmtype;
33 bool enable;
34 } erofs_ccfg[EROFS_MAX_COMPR_CFGS];
35
36 struct z_erofs_extent_item {
37 struct list_head list;
38 struct z_erofs_inmem_extent e;
39 };
40
41 struct z_erofs_compress_ictx { /* inode context */
42 struct erofs_inode *inode;
43 struct erofs_compress_cfg *ccfg;
44 int fd;
45 u64 fpos;
46
47 u32 tof_chksum;
48 bool fix_dedupedfrag;
49 bool fragemitted;
50
51 /* fields for write indexes */
52 u8 *metacur;
53 struct list_head extents;
54 u16 clusterofs;
55
56 int seg_num;
57
58 #if EROFS_MT_ENABLED
59 pthread_mutex_t mutex;
60 pthread_cond_t cond;
61 int nfini;
62
63 struct erofs_compress_work *mtworks;
64 #endif
65 };
66
67 struct z_erofs_compress_sctx { /* segment context */
68 struct z_erofs_compress_ictx *ictx;
69
70 u8 *queue;
71 struct list_head extents;
72 struct z_erofs_extent_item *pivot;
73
74 struct erofs_compress *chandle;
75 char *destbuf;
76
77 erofs_off_t remaining;
78 unsigned int head, tail;
79
80 unsigned int pclustersize;
81 erofs_blk_t blkaddr; /* pointing to the next blkaddr */
82 u16 clusterofs;
83
84 int seg_idx;
85
86 void *membuf;
87 erofs_off_t memoff;
88 };
89
90 #ifdef EROFS_MT_ENABLED
91 struct erofs_compress_wq_tls {
92 u8 *queue;
93 char *destbuf;
94 struct erofs_compress_cfg *ccfg;
95 };
96
97 struct erofs_compress_work {
98 /* Note: struct erofs_work must be the first member */
99 struct erofs_work work;
100 struct z_erofs_compress_sctx ctx;
101 struct erofs_compress_work *next;
102
103 unsigned int alg_id;
104 char *alg_name;
105 unsigned int comp_level;
106 unsigned int dict_size;
107
108 int errcode;
109 };
110
111 static struct {
112 struct erofs_workqueue wq;
113 struct erofs_compress_work *idle;
114 pthread_mutex_t mutex;
115 } z_erofs_mt_ctrl;
116 #endif
117
118 static bool z_erofs_mt_enabled;
119
120 #define Z_EROFS_LEGACY_MAP_HEADER_SIZE Z_EROFS_FULL_INDEX_ALIGN(0)
121
z_erofs_write_indexes_final(struct z_erofs_compress_ictx * ctx)122 static void z_erofs_write_indexes_final(struct z_erofs_compress_ictx *ctx)
123 {
124 const unsigned int type = Z_EROFS_LCLUSTER_TYPE_PLAIN;
125 struct z_erofs_lcluster_index di;
126
127 if (!ctx->clusterofs)
128 return;
129
130 di.di_clusterofs = cpu_to_le16(ctx->clusterofs);
131 di.di_u.blkaddr = 0;
132 di.di_advise = cpu_to_le16(type);
133
134 memcpy(ctx->metacur, &di, sizeof(di));
135 ctx->metacur += sizeof(di);
136 }
137
z_erofs_write_extent(struct z_erofs_compress_ictx * ctx,struct z_erofs_inmem_extent * e)138 static void z_erofs_write_extent(struct z_erofs_compress_ictx *ctx,
139 struct z_erofs_inmem_extent *e)
140 {
141 struct erofs_inode *inode = ctx->inode;
142 struct erofs_sb_info *sbi = inode->sbi;
143 unsigned int clusterofs = ctx->clusterofs;
144 unsigned int count = e->length;
145 unsigned int d0 = 0, d1 = (clusterofs + count) / erofs_blksiz(sbi);
146 struct z_erofs_lcluster_index di;
147 unsigned int type, advise;
148
149 DBG_BUGON(!count);
150 di.di_clusterofs = cpu_to_le16(ctx->clusterofs);
151
152 /* whether the tail-end (un)compressed block or not */
153 if (!d1) {
154 /*
155 * A lcluster cannot have three parts with the middle one which
156 * is well-compressed for !ztailpacking cases.
157 */
158 DBG_BUGON(!e->raw && !cfg.c_ztailpacking && !cfg.c_fragments);
159 DBG_BUGON(e->partial);
160 type = e->raw ? Z_EROFS_LCLUSTER_TYPE_PLAIN :
161 Z_EROFS_LCLUSTER_TYPE_HEAD1;
162 di.di_advise = cpu_to_le16(type);
163
164 if (inode->datalayout == EROFS_INODE_COMPRESSED_FULL &&
165 !e->compressedblks)
166 di.di_u.blkaddr = cpu_to_le32(inode->fragmentoff >> 32);
167 else
168 di.di_u.blkaddr = cpu_to_le32(e->blkaddr);
169 memcpy(ctx->metacur, &di, sizeof(di));
170 ctx->metacur += sizeof(di);
171
172 /* don't add the final index if the tail-end block exists */
173 ctx->clusterofs = 0;
174 return;
175 }
176
177 do {
178 advise = 0;
179 /* XXX: big pcluster feature should be per-inode */
180 if (d0 == 1 && erofs_sb_has_big_pcluster(sbi)) {
181 type = Z_EROFS_LCLUSTER_TYPE_NONHEAD;
182 di.di_u.delta[0] = cpu_to_le16(e->compressedblks |
183 Z_EROFS_LI_D0_CBLKCNT);
184 di.di_u.delta[1] = cpu_to_le16(d1);
185 } else if (d0) {
186 type = Z_EROFS_LCLUSTER_TYPE_NONHEAD;
187
188 /*
189 * If the |Z_EROFS_VLE_DI_D0_CBLKCNT| bit is set, parser
190 * will interpret |delta[0]| as size of pcluster, rather
191 * than distance to last head cluster. Normally this
192 * isn't a problem, because uncompressed extent size are
193 * below Z_EROFS_VLE_DI_D0_CBLKCNT * BLOCK_SIZE = 8MB.
194 * But with large pcluster it's possible to go over this
195 * number, resulting in corrupted compressed indices.
196 * To solve this, we replace d0 with
197 * Z_EROFS_VLE_DI_D0_CBLKCNT-1.
198 */
199 if (d0 >= Z_EROFS_LI_D0_CBLKCNT)
200 di.di_u.delta[0] = cpu_to_le16(
201 Z_EROFS_LI_D0_CBLKCNT - 1);
202 else
203 di.di_u.delta[0] = cpu_to_le16(d0);
204 di.di_u.delta[1] = cpu_to_le16(d1);
205 } else {
206 type = e->raw ? Z_EROFS_LCLUSTER_TYPE_PLAIN :
207 Z_EROFS_LCLUSTER_TYPE_HEAD1;
208
209 if (inode->datalayout == EROFS_INODE_COMPRESSED_FULL &&
210 !e->compressedblks)
211 di.di_u.blkaddr = cpu_to_le32(inode->fragmentoff >> 32);
212 else
213 di.di_u.blkaddr = cpu_to_le32(e->blkaddr);
214
215 if (e->partial) {
216 DBG_BUGON(e->raw);
217 advise |= Z_EROFS_LI_PARTIAL_REF;
218 }
219 }
220 di.di_advise = cpu_to_le16(advise | type);
221
222 memcpy(ctx->metacur, &di, sizeof(di));
223 ctx->metacur += sizeof(di);
224
225 count -= erofs_blksiz(sbi) - clusterofs;
226 clusterofs = 0;
227
228 ++d0;
229 --d1;
230 } while (clusterofs + count >= erofs_blksiz(sbi));
231
232 ctx->clusterofs = clusterofs + count;
233 }
234
z_erofs_write_indexes(struct z_erofs_compress_ictx * ctx)235 static void z_erofs_write_indexes(struct z_erofs_compress_ictx *ctx)
236 {
237 struct z_erofs_extent_item *ei, *n;
238
239 ctx->clusterofs = 0;
240 list_for_each_entry_safe(ei, n, &ctx->extents, list) {
241 z_erofs_write_extent(ctx, &ei->e);
242
243 list_del(&ei->list);
244 free(ei);
245 }
246 z_erofs_write_indexes_final(ctx);
247 }
248
z_erofs_need_refill(struct z_erofs_compress_sctx * ctx)249 static bool z_erofs_need_refill(struct z_erofs_compress_sctx *ctx)
250 {
251 const bool final = !ctx->remaining;
252 unsigned int qh_aligned, qh_after;
253 struct erofs_inode *inode = ctx->ictx->inode;
254
255 if (final || ctx->head < EROFS_CONFIG_COMPR_MAX_SZ)
256 return false;
257
258 qh_aligned = round_down(ctx->head, erofs_blksiz(inode->sbi));
259 qh_after = ctx->head - qh_aligned;
260 memmove(ctx->queue, ctx->queue + qh_aligned, ctx->tail - qh_aligned);
261 ctx->tail -= qh_aligned;
262 ctx->head = qh_after;
263 return true;
264 }
265
266 static struct z_erofs_extent_item dummy_pivot = {
267 .e.length = 0
268 };
269
z_erofs_commit_extent(struct z_erofs_compress_sctx * ctx,struct z_erofs_extent_item * ei)270 static void z_erofs_commit_extent(struct z_erofs_compress_sctx *ctx,
271 struct z_erofs_extent_item *ei)
272 {
273 if (ei == &dummy_pivot)
274 return;
275
276 list_add_tail(&ei->list, &ctx->extents);
277 ctx->clusterofs = (ctx->clusterofs + ei->e.length) &
278 (erofs_blksiz(ctx->ictx->inode->sbi) - 1);
279 }
280
z_erofs_compress_dedupe(struct z_erofs_compress_sctx * ctx,unsigned int * len)281 static int z_erofs_compress_dedupe(struct z_erofs_compress_sctx *ctx,
282 unsigned int *len)
283 {
284 struct erofs_inode *inode = ctx->ictx->inode;
285 const unsigned int lclustermask = (1 << inode->z_logical_clusterbits) - 1;
286 struct erofs_sb_info *sbi = inode->sbi;
287 struct z_erofs_extent_item *ei = ctx->pivot;
288
289 if (!ei)
290 return 0;
291
292 /*
293 * No need dedupe for packed inode since it is composed of
294 * fragments which have already been deduplicated.
295 */
296 if (erofs_is_packed_inode(inode))
297 goto out;
298
299 do {
300 struct z_erofs_dedupe_ctx dctx = {
301 .start = ctx->queue + ctx->head - ({ int rc;
302 if (ei->e.length <= erofs_blksiz(sbi))
303 rc = 0;
304 else if (ei->e.length - erofs_blksiz(sbi) >= ctx->head)
305 rc = ctx->head;
306 else
307 rc = ei->e.length - erofs_blksiz(sbi);
308 rc; }),
309 .end = ctx->queue + ctx->head + *len,
310 .cur = ctx->queue + ctx->head,
311 };
312 int delta;
313
314 if (z_erofs_dedupe_match(&dctx))
315 break;
316
317 DBG_BUGON(dctx.e.inlined);
318 delta = ctx->queue + ctx->head - dctx.cur;
319 /*
320 * For big pcluster dedupe, leave two indices at least to store
321 * CBLKCNT as the first step. Even laterly, an one-block
322 * decompresssion could be done as another try in practice.
323 */
324 if (dctx.e.compressedblks > 1 &&
325 ((ctx->clusterofs + ei->e.length - delta) & lclustermask) +
326 dctx.e.length < 2 * (lclustermask + 1))
327 break;
328
329 ctx->pivot = malloc(sizeof(struct z_erofs_extent_item));
330 if (!ctx->pivot) {
331 z_erofs_commit_extent(ctx, ei);
332 return -ENOMEM;
333 }
334
335 if (delta) {
336 DBG_BUGON(delta < 0);
337 DBG_BUGON(!ei->e.length);
338
339 /*
340 * For big pcluster dedupe, if we decide to shorten the
341 * previous big pcluster, make sure that the previous
342 * CBLKCNT is still kept.
343 */
344 if (ei->e.compressedblks > 1 &&
345 (ctx->clusterofs & lclustermask) + ei->e.length
346 - delta < 2 * (lclustermask + 1))
347 break;
348 ei->e.partial = true;
349 ei->e.length -= delta;
350 }
351
352 /* fall back to noncompact indexes for deduplication */
353 inode->z_advise &= ~Z_EROFS_ADVISE_COMPACTED_2B;
354 inode->datalayout = EROFS_INODE_COMPRESSED_FULL;
355 erofs_sb_set_dedupe(sbi);
356
357 sbi->saved_by_deduplication +=
358 dctx.e.compressedblks * erofs_blksiz(sbi);
359 erofs_dbg("Dedupe %u %scompressed data (delta %d) to %u of %u blocks",
360 dctx.e.length, dctx.e.raw ? "un" : "",
361 delta, dctx.e.blkaddr, dctx.e.compressedblks);
362
363 z_erofs_commit_extent(ctx, ei);
364 ei = ctx->pivot;
365 init_list_head(&ei->list);
366 ei->e = dctx.e;
367
368 ctx->head += dctx.e.length - delta;
369 DBG_BUGON(*len < dctx.e.length - delta);
370 *len -= dctx.e.length - delta;
371
372 if (z_erofs_need_refill(ctx))
373 return 1;
374 } while (*len);
375 out:
376 z_erofs_commit_extent(ctx, ei);
377 ctx->pivot = NULL;
378 return 0;
379 }
380
write_uncompressed_extent(struct z_erofs_compress_sctx * ctx,unsigned int len,char * dst)381 static int write_uncompressed_extent(struct z_erofs_compress_sctx *ctx,
382 unsigned int len, char *dst)
383 {
384 struct erofs_inode *inode = ctx->ictx->inode;
385 struct erofs_sb_info *sbi = inode->sbi;
386 unsigned int count = min(erofs_blksiz(sbi), len);
387 unsigned int interlaced_offset, rightpart;
388 int ret;
389
390 /* write interlaced uncompressed data if needed */
391 if (inode->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
392 interlaced_offset = ctx->clusterofs;
393 else
394 interlaced_offset = 0;
395 rightpart = min(erofs_blksiz(sbi) - interlaced_offset, count);
396
397 memset(dst, 0, erofs_blksiz(sbi));
398
399 memcpy(dst + interlaced_offset, ctx->queue + ctx->head, rightpart);
400 memcpy(dst, ctx->queue + ctx->head + rightpart, count - rightpart);
401
402 if (ctx->membuf) {
403 erofs_dbg("Writing %u uncompressed data of %s", count,
404 inode->i_srcpath);
405 memcpy(ctx->membuf + ctx->memoff, dst, erofs_blksiz(sbi));
406 ctx->memoff += erofs_blksiz(sbi);
407 } else {
408 erofs_dbg("Writing %u uncompressed data to block %u", count,
409 ctx->blkaddr);
410 ret = erofs_blk_write(sbi, dst, ctx->blkaddr, 1);
411 if (ret)
412 return ret;
413 }
414 return count;
415 }
416
z_erofs_get_max_pclustersize(struct erofs_inode * inode)417 static unsigned int z_erofs_get_max_pclustersize(struct erofs_inode *inode)
418 {
419 if (erofs_is_packed_inode(inode)) {
420 return cfg.c_mkfs_pclustersize_packed;
421 #ifndef NDEBUG
422 } else if (cfg.c_random_pclusterblks) {
423 unsigned int pclusterblks =
424 cfg.c_mkfs_pclustersize_max >> inode->sbi->blkszbits;
425
426 return (1 + rand() % pclusterblks) << inode->sbi->blkszbits;
427 #endif
428 } else if (cfg.c_compress_hints_file) {
429 z_erofs_apply_compress_hints(inode);
430 DBG_BUGON(!inode->z_physical_clusterblks);
431 return inode->z_physical_clusterblks << inode->sbi->blkszbits;
432 }
433 return cfg.c_mkfs_pclustersize_def;
434 }
435
z_erofs_fill_inline_data(struct erofs_inode * inode,void * data,unsigned int len,bool raw)436 static int z_erofs_fill_inline_data(struct erofs_inode *inode, void *data,
437 unsigned int len, bool raw)
438 {
439 inode->z_advise |= Z_EROFS_ADVISE_INLINE_PCLUSTER;
440 inode->idata_size = len;
441 inode->compressed_idata = !raw;
442
443 inode->idata = malloc(inode->idata_size);
444 if (!inode->idata)
445 return -ENOMEM;
446 erofs_dbg("Recording %u %scompressed inline data",
447 inode->idata_size, raw ? "un" : "");
448 memcpy(inode->idata, data, inode->idata_size);
449 return len;
450 }
451
tryrecompress_trailing(struct z_erofs_compress_sctx * ctx,struct erofs_compress * ec,void * in,unsigned int * insize,void * out,unsigned int * compressedsize)452 static int tryrecompress_trailing(struct z_erofs_compress_sctx *ctx,
453 struct erofs_compress *ec,
454 void *in, unsigned int *insize,
455 void *out, unsigned int *compressedsize)
456 {
457 struct erofs_sb_info *sbi = ctx->ictx->inode->sbi;
458 char *tmp;
459 unsigned int count;
460 int ret = *compressedsize;
461
462 /* no need to recompress */
463 if (!(ret & (erofs_blksiz(sbi) - 1)))
464 return 0;
465
466 tmp = malloc(Z_EROFS_PCLUSTER_MAX_SIZE);
467 if (!tmp)
468 return -ENOMEM;
469
470 count = *insize;
471 ret = erofs_compress_destsize(ec, in, &count, (void *)tmp,
472 rounddown(ret, erofs_blksiz(sbi)));
473 if (ret <= 0 || ret + (*insize - count) >=
474 roundup(*compressedsize, erofs_blksiz(sbi)))
475 goto out;
476
477 /* replace the original compressed data if any gain */
478 memcpy(out, tmp, ret);
479 *insize = count;
480 *compressedsize = ret;
481
482 out:
483 free(tmp);
484 return 0;
485 }
486
z_erofs_fixup_deduped_fragment(struct z_erofs_compress_sctx * ctx,unsigned int len)487 static bool z_erofs_fixup_deduped_fragment(struct z_erofs_compress_sctx *ctx,
488 unsigned int len)
489 {
490 struct z_erofs_compress_ictx *ictx = ctx->ictx;
491 struct erofs_inode *inode = ictx->inode;
492 struct erofs_sb_info *sbi = inode->sbi;
493 const unsigned int newsize = ctx->remaining + len;
494
495 DBG_BUGON(!inode->fragment_size);
496
497 /* try to fix again if it gets larger (should be rare) */
498 if (inode->fragment_size < newsize) {
499 ctx->pclustersize = min_t(erofs_off_t,
500 z_erofs_get_max_pclustersize(inode),
501 roundup(newsize - inode->fragment_size,
502 erofs_blksiz(sbi)));
503 return false;
504 }
505
506 inode->fragmentoff += inode->fragment_size - newsize;
507 inode->fragment_size = newsize;
508
509 erofs_dbg("Reducing fragment size to %llu at %llu",
510 inode->fragment_size | 0ULL, inode->fragmentoff | 0ULL);
511
512 /* it's the end */
513 DBG_BUGON(ctx->tail - ctx->head + ctx->remaining != newsize);
514 ctx->head = ctx->tail;
515 ctx->remaining = 0;
516 return true;
517 }
518
__z_erofs_compress_one(struct z_erofs_compress_sctx * ctx,struct z_erofs_inmem_extent * e)519 static int __z_erofs_compress_one(struct z_erofs_compress_sctx *ctx,
520 struct z_erofs_inmem_extent *e)
521 {
522 static char g_dstbuf[EROFS_CONFIG_COMPR_MAX_SZ + EROFS_MAX_BLOCK_SIZE];
523 char *dstbuf = ctx->destbuf ?: g_dstbuf;
524 struct z_erofs_compress_ictx *ictx = ctx->ictx;
525 struct erofs_inode *inode = ictx->inode;
526 struct erofs_sb_info *sbi = inode->sbi;
527 unsigned int blksz = erofs_blksiz(sbi);
528 char *const dst = dstbuf + blksz;
529 struct erofs_compress *const h = ctx->chandle;
530 unsigned int len = ctx->tail - ctx->head;
531 bool is_packed_inode = erofs_is_packed_inode(inode);
532 bool tsg = (ctx->seg_idx + 1 >= ictx->seg_num), final = !ctx->remaining;
533 bool may_packing = (cfg.c_fragments && tsg && final &&
534 !is_packed_inode && !z_erofs_mt_enabled);
535 bool may_inline = (cfg.c_ztailpacking && tsg && final && !may_packing);
536 unsigned int compressedsize;
537 int ret;
538
539 *e = (struct z_erofs_inmem_extent){};
540 if (len <= ctx->pclustersize) {
541 if (!final || !len)
542 return 1;
543 if (inode->fragment_size && !ictx->fix_dedupedfrag) {
544 ctx->pclustersize = roundup(len, blksz);
545 goto fix_dedupedfrag;
546 }
547 if (may_packing) {
548 e->length = len;
549 goto frag_packing;
550 }
551 if (!may_inline && len <= blksz)
552 goto nocompression;
553 }
554
555 e->length = min(len, cfg.c_max_decompressed_extent_bytes);
556 ret = erofs_compress_destsize(h, ctx->queue + ctx->head,
557 &e->length, dst, ctx->pclustersize);
558 if (ret <= 0) {
559 erofs_err("failed to compress %s: %s", inode->i_srcpath,
560 erofs_strerror(ret));
561 return ret;
562 }
563
564 compressedsize = ret;
565 /* even compressed size is smaller, there is no real gain */
566 if (!(may_inline && e->length == len && ret < blksz))
567 ret = roundup(ret, blksz);
568
569 /* check if there is enough gain to keep the compressed data */
570 if (ret * h->compress_threshold / 100 >= e->length) {
571 if (may_inline && len < blksz) {
572 ret = z_erofs_fill_inline_data(inode,
573 ctx->queue + ctx->head, len, true);
574 if (ret < 0)
575 return ret;
576 e->inlined = true;
577 } else {
578 may_inline = false;
579 may_packing = false;
580 nocompression:
581 /* TODO: reset clusterofs to 0 if permitted */
582 ret = write_uncompressed_extent(ctx, len, dst);
583 if (ret < 0)
584 return ret;
585 }
586 e->length = ret;
587
588 /*
589 * XXX: For now, we have to leave `ctx->compressedblk = 1'
590 * since there is no way to generate compressed indexes after
591 * the time that ztailpacking is decided.
592 */
593 e->compressedblks = 1;
594 e->raw = true;
595 } else if (may_packing && len == e->length &&
596 compressedsize < ctx->pclustersize &&
597 (!inode->fragment_size || ictx->fix_dedupedfrag)) {
598 frag_packing:
599 ret = z_erofs_pack_fragments(inode, ctx->queue + ctx->head,
600 len, ictx->tof_chksum);
601 if (ret < 0)
602 return ret;
603 e->compressedblks = 0; /* indicate a fragment */
604 e->raw = false;
605 ictx->fragemitted = true;
606 /* tailpcluster should be less than 1 block */
607 } else if (may_inline && len == e->length && compressedsize < blksz) {
608 if (ctx->clusterofs + len <= blksz) {
609 inode->eof_tailraw = malloc(len);
610 if (!inode->eof_tailraw)
611 return -ENOMEM;
612
613 memcpy(inode->eof_tailraw, ctx->queue + ctx->head, len);
614 inode->eof_tailrawsize = len;
615 }
616
617 ret = z_erofs_fill_inline_data(inode, dst,
618 compressedsize, false);
619 if (ret < 0)
620 return ret;
621 e->inlined = true;
622 e->compressedblks = 1;
623 e->raw = false;
624 } else {
625 unsigned int tailused, padding;
626
627 /*
628 * If there's space left for the last round when deduping
629 * fragments, try to read the fragment and recompress a little
630 * more to check whether it can be filled up. Fix the fragment
631 * if succeeds. Otherwise, just drop it and go on packing.
632 */
633 if (may_packing && len == e->length &&
634 (compressedsize & (blksz - 1)) &&
635 ctx->tail < Z_EROFS_COMPR_QUEUE_SZ) {
636 ctx->pclustersize = roundup(compressedsize, blksz);
637 goto fix_dedupedfrag;
638 }
639
640 if (may_inline && len == e->length) {
641 ret = tryrecompress_trailing(ctx, h,
642 ctx->queue + ctx->head,
643 &e->length, dst,
644 &compressedsize);
645 if (ret)
646 return ret;
647 }
648
649 e->compressedblks = BLK_ROUND_UP(sbi, compressedsize);
650 DBG_BUGON(e->compressedblks * blksz >= e->length);
651
652 padding = 0;
653 tailused = compressedsize & (blksz - 1);
654 if (tailused)
655 padding = blksz - tailused;
656
657 /* zero out garbage trailing data for non-0padding */
658 if (!erofs_sb_has_lz4_0padding(sbi)) {
659 memset(dst + compressedsize, 0, padding);
660 padding = 0;
661 }
662
663 /* write compressed data */
664 if (ctx->membuf) {
665 erofs_dbg("Writing %u compressed data of %u blocks of %s",
666 e->length, e->compressedblks, inode->i_srcpath);
667
668 memcpy(ctx->membuf + ctx->memoff, dst - padding,
669 e->compressedblks * blksz);
670 ctx->memoff += e->compressedblks * blksz;
671 } else {
672 erofs_dbg("Writing %u compressed data to %u of %u blocks",
673 e->length, ctx->blkaddr, e->compressedblks);
674
675 ret = erofs_blk_write(sbi, dst - padding, ctx->blkaddr,
676 e->compressedblks);
677 if (ret)
678 return ret;
679 }
680 e->raw = false;
681 may_inline = false;
682 may_packing = false;
683 }
684 e->partial = false;
685 e->blkaddr = ctx->blkaddr;
686 if (ctx->blkaddr != EROFS_NULL_ADDR)
687 ctx->blkaddr += e->compressedblks;
688 if (!may_inline && !may_packing && !is_packed_inode)
689 (void)z_erofs_dedupe_insert(e, ctx->queue + ctx->head);
690 ctx->head += e->length;
691 return 0;
692
693 fix_dedupedfrag:
694 DBG_BUGON(!inode->fragment_size);
695 ctx->remaining += inode->fragment_size;
696 ictx->fix_dedupedfrag = true;
697 return 1;
698 }
699
z_erofs_compress_one(struct z_erofs_compress_sctx * ctx)700 static int z_erofs_compress_one(struct z_erofs_compress_sctx *ctx)
701 {
702 struct z_erofs_compress_ictx *ictx = ctx->ictx;
703 unsigned int len = ctx->tail - ctx->head;
704 struct z_erofs_extent_item *ei;
705
706 while (len) {
707 int ret = z_erofs_compress_dedupe(ctx, &len);
708
709 if (ret > 0)
710 break;
711 else if (ret < 0)
712 return ret;
713
714 DBG_BUGON(ctx->pivot);
715 ei = malloc(sizeof(*ei));
716 if (!ei)
717 return -ENOMEM;
718
719 init_list_head(&ei->list);
720 ret = __z_erofs_compress_one(ctx, &ei->e);
721 if (ret) {
722 free(ei);
723 if (ret > 0)
724 break; /* need more data */
725 return ret;
726 }
727
728 len -= ei->e.length;
729 ctx->pivot = ei;
730 if (ictx->fix_dedupedfrag && !ictx->fragemitted &&
731 z_erofs_fixup_deduped_fragment(ctx, len))
732 break;
733
734 if (z_erofs_need_refill(ctx))
735 break;
736 }
737 return 0;
738 }
739
740 struct z_erofs_compressindex_vec {
741 union {
742 erofs_blk_t blkaddr;
743 u16 delta[2];
744 } u;
745 u16 clusterofs;
746 u8 clustertype;
747 };
748
parse_legacy_indexes(struct z_erofs_compressindex_vec * cv,unsigned int nr,void * metacur)749 static void *parse_legacy_indexes(struct z_erofs_compressindex_vec *cv,
750 unsigned int nr, void *metacur)
751 {
752 struct z_erofs_lcluster_index *const db = metacur;
753 unsigned int i;
754
755 for (i = 0; i < nr; ++i, ++cv) {
756 struct z_erofs_lcluster_index *const di = db + i;
757 const unsigned int advise = le16_to_cpu(di->di_advise);
758
759 cv->clustertype = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
760 cv->clusterofs = le16_to_cpu(di->di_clusterofs);
761
762 if (cv->clustertype == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
763 cv->u.delta[0] = le16_to_cpu(di->di_u.delta[0]);
764 cv->u.delta[1] = le16_to_cpu(di->di_u.delta[1]);
765 } else {
766 cv->u.blkaddr = le32_to_cpu(di->di_u.blkaddr);
767 }
768 }
769 return db + nr;
770 }
771
write_compacted_indexes(u8 * out,struct z_erofs_compressindex_vec * cv,erofs_blk_t * blkaddr_ret,unsigned int destsize,unsigned int lclusterbits,bool final,bool * dummy_head,bool update_blkaddr)772 static void *write_compacted_indexes(u8 *out,
773 struct z_erofs_compressindex_vec *cv,
774 erofs_blk_t *blkaddr_ret,
775 unsigned int destsize,
776 unsigned int lclusterbits,
777 bool final, bool *dummy_head,
778 bool update_blkaddr)
779 {
780 unsigned int vcnt, lobits, encodebits, pos, i, cblks;
781 erofs_blk_t blkaddr;
782
783 if (destsize == 4)
784 vcnt = 2;
785 else if (destsize == 2 && lclusterbits <= 12)
786 vcnt = 16;
787 else
788 return ERR_PTR(-EINVAL);
789 lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
790 encodebits = (vcnt * destsize * 8 - 32) / vcnt;
791 blkaddr = *blkaddr_ret;
792
793 pos = 0;
794 for (i = 0; i < vcnt; ++i) {
795 unsigned int offset, v;
796 u8 ch, rem;
797
798 if (cv[i].clustertype == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
799 if (cv[i].u.delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
800 cblks = cv[i].u.delta[0] & ~Z_EROFS_LI_D0_CBLKCNT;
801 offset = cv[i].u.delta[0];
802 blkaddr += cblks;
803 *dummy_head = false;
804 } else if (i + 1 == vcnt) {
805 offset = min_t(u16, cv[i].u.delta[1],
806 (1 << lobits) - 1);
807 } else {
808 offset = cv[i].u.delta[0];
809 }
810 } else {
811 offset = cv[i].clusterofs;
812 if (*dummy_head) {
813 ++blkaddr;
814 if (update_blkaddr)
815 *blkaddr_ret = blkaddr;
816 }
817 *dummy_head = true;
818 update_blkaddr = false;
819
820 if (cv[i].u.blkaddr != blkaddr) {
821 if (i + 1 != vcnt)
822 DBG_BUGON(!final);
823 DBG_BUGON(cv[i].u.blkaddr);
824 }
825 }
826 v = (cv[i].clustertype << lobits) | offset;
827 rem = pos & 7;
828 ch = out[pos / 8] & ((1 << rem) - 1);
829 out[pos / 8] = (v << rem) | ch;
830 out[pos / 8 + 1] = v >> (8 - rem);
831 out[pos / 8 + 2] = v >> (16 - rem);
832 pos += encodebits;
833 }
834 DBG_BUGON(destsize * vcnt * 8 != pos + 32);
835 *(__le32 *)(out + destsize * vcnt - 4) = cpu_to_le32(*blkaddr_ret);
836 *blkaddr_ret = blkaddr;
837 return out + destsize * vcnt;
838 }
839
z_erofs_convert_to_compacted_format(struct erofs_inode * inode,erofs_blk_t blkaddr,unsigned int legacymetasize,void * compressmeta)840 int z_erofs_convert_to_compacted_format(struct erofs_inode *inode,
841 erofs_blk_t blkaddr,
842 unsigned int legacymetasize,
843 void *compressmeta)
844 {
845 const unsigned int mpos = roundup(inode->inode_isize +
846 inode->xattr_isize, 8) +
847 sizeof(struct z_erofs_map_header);
848 const unsigned int totalidx = (legacymetasize -
849 Z_EROFS_LEGACY_MAP_HEADER_SIZE) /
850 sizeof(struct z_erofs_lcluster_index);
851 const unsigned int logical_clusterbits = inode->z_logical_clusterbits;
852 u8 *out, *in;
853 struct z_erofs_compressindex_vec cv[16];
854 struct erofs_sb_info *sbi = inode->sbi;
855 /* # of 8-byte units so that it can be aligned with 32 bytes */
856 unsigned int compacted_4b_initial, compacted_4b_end;
857 unsigned int compacted_2b;
858 bool dummy_head;
859 bool big_pcluster = erofs_sb_has_big_pcluster(sbi);
860
861 if (logical_clusterbits < sbi->blkszbits)
862 return -EINVAL;
863 if (logical_clusterbits > 14) {
864 erofs_err("compact format is unsupported for lcluster size %u",
865 1 << logical_clusterbits);
866 return -EOPNOTSUPP;
867 }
868
869 if (inode->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) {
870 if (logical_clusterbits > 12) {
871 erofs_err("compact 2B is unsupported for lcluster size %u",
872 1 << logical_clusterbits);
873 return -EINVAL;
874 }
875
876 compacted_4b_initial = (32 - mpos % 32) / 4;
877 if (compacted_4b_initial == 32 / 4)
878 compacted_4b_initial = 0;
879
880 if (compacted_4b_initial > totalidx) {
881 compacted_4b_initial = compacted_2b = 0;
882 compacted_4b_end = totalidx;
883 } else {
884 compacted_2b = rounddown(totalidx -
885 compacted_4b_initial, 16);
886 compacted_4b_end = totalidx - compacted_4b_initial -
887 compacted_2b;
888 }
889 } else {
890 compacted_2b = compacted_4b_initial = 0;
891 compacted_4b_end = totalidx;
892 }
893
894 out = in = compressmeta;
895
896 out += sizeof(struct z_erofs_map_header);
897 in += Z_EROFS_LEGACY_MAP_HEADER_SIZE;
898
899 dummy_head = false;
900 /* prior to bigpcluster, blkaddr was bumped up once coming into HEAD */
901 if (!big_pcluster) {
902 --blkaddr;
903 dummy_head = true;
904 }
905
906 /* generate compacted_4b_initial */
907 while (compacted_4b_initial) {
908 in = parse_legacy_indexes(cv, 2, in);
909 out = write_compacted_indexes(out, cv, &blkaddr,
910 4, logical_clusterbits, false,
911 &dummy_head, big_pcluster);
912 compacted_4b_initial -= 2;
913 }
914 DBG_BUGON(compacted_4b_initial);
915
916 /* generate compacted_2b */
917 while (compacted_2b) {
918 in = parse_legacy_indexes(cv, 16, in);
919 out = write_compacted_indexes(out, cv, &blkaddr,
920 2, logical_clusterbits, false,
921 &dummy_head, big_pcluster);
922 compacted_2b -= 16;
923 }
924 DBG_BUGON(compacted_2b);
925
926 /* generate compacted_4b_end */
927 while (compacted_4b_end > 1) {
928 in = parse_legacy_indexes(cv, 2, in);
929 out = write_compacted_indexes(out, cv, &blkaddr,
930 4, logical_clusterbits, false,
931 &dummy_head, big_pcluster);
932 compacted_4b_end -= 2;
933 }
934
935 /* generate final compacted_4b_end if needed */
936 if (compacted_4b_end) {
937 memset(cv, 0, sizeof(cv));
938 in = parse_legacy_indexes(cv, 1, in);
939 out = write_compacted_indexes(out, cv, &blkaddr,
940 4, logical_clusterbits, true,
941 &dummy_head, big_pcluster);
942 }
943 inode->extent_isize = out - (u8 *)compressmeta;
944 return 0;
945 }
946
z_erofs_write_mapheader(struct erofs_inode * inode,void * compressmeta)947 static void z_erofs_write_mapheader(struct erofs_inode *inode,
948 void *compressmeta)
949 {
950 struct erofs_sb_info *sbi = inode->sbi;
951 struct z_erofs_map_header h = {
952 .h_advise = cpu_to_le16(inode->z_advise),
953 .h_algorithmtype = inode->z_algorithmtype[1] << 4 |
954 inode->z_algorithmtype[0],
955 /* lclustersize */
956 .h_clusterbits = inode->z_logical_clusterbits - sbi->blkszbits,
957 };
958
959 if (inode->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)
960 h.h_fragmentoff = cpu_to_le32(inode->fragmentoff);
961 else
962 h.h_idata_size = cpu_to_le16(inode->idata_size);
963
964 memset(compressmeta, 0, Z_EROFS_LEGACY_MAP_HEADER_SIZE);
965 /* write out map header */
966 memcpy(compressmeta, &h, sizeof(struct z_erofs_map_header));
967 }
968
z_erofs_drop_inline_pcluster(struct erofs_inode * inode)969 void z_erofs_drop_inline_pcluster(struct erofs_inode *inode)
970 {
971 struct erofs_sb_info *sbi = inode->sbi;
972 const unsigned int type = Z_EROFS_LCLUSTER_TYPE_PLAIN;
973 struct z_erofs_map_header *h = inode->compressmeta;
974
975 h->h_advise = cpu_to_le16(le16_to_cpu(h->h_advise) &
976 ~Z_EROFS_ADVISE_INLINE_PCLUSTER);
977 h->h_idata_size = 0;
978 if (!inode->eof_tailraw)
979 return;
980 DBG_BUGON(inode->compressed_idata != true);
981
982 /* patch the EOF lcluster to uncompressed type first */
983 if (inode->datalayout == EROFS_INODE_COMPRESSED_FULL) {
984 struct z_erofs_lcluster_index *di =
985 (inode->compressmeta + inode->extent_isize) -
986 sizeof(struct z_erofs_lcluster_index);
987
988 di->di_advise = cpu_to_le16(type);
989 } else if (inode->datalayout == EROFS_INODE_COMPRESSED_COMPACT) {
990 /* handle the last compacted 4B pack */
991 unsigned int eofs, base, pos, v, lo;
992 u8 *out;
993
994 eofs = inode->extent_isize -
995 (4 << (BLK_ROUND_UP(sbi, inode->i_size) & 1));
996 base = round_down(eofs, 8);
997 pos = 16 /* encodebits */ * ((eofs - base) / 4);
998 out = inode->compressmeta + base;
999 lo = erofs_blkoff(sbi, get_unaligned_le32(out + pos / 8));
1000 v = (type << sbi->blkszbits) | lo;
1001 out[pos / 8] = v & 0xff;
1002 out[pos / 8 + 1] = v >> 8;
1003 } else {
1004 DBG_BUGON(1);
1005 return;
1006 }
1007 free(inode->idata);
1008 /* replace idata with prepared uncompressed data */
1009 inode->idata = inode->eof_tailraw;
1010 inode->idata_size = inode->eof_tailrawsize;
1011 inode->compressed_idata = false;
1012 inode->eof_tailraw = NULL;
1013 }
1014
z_erofs_compress_segment(struct z_erofs_compress_sctx * ctx,u64 offset,erofs_blk_t blkaddr)1015 int z_erofs_compress_segment(struct z_erofs_compress_sctx *ctx,
1016 u64 offset, erofs_blk_t blkaddr)
1017 {
1018 struct z_erofs_compress_ictx *ictx = ctx->ictx;
1019 int fd = ictx->fd;
1020
1021 ctx->blkaddr = blkaddr;
1022 while (ctx->remaining) {
1023 const u64 rx = min_t(u64, ctx->remaining,
1024 Z_EROFS_COMPR_QUEUE_SZ - ctx->tail);
1025 int ret;
1026
1027 ret = (offset == -1 ?
1028 read(fd, ctx->queue + ctx->tail, rx) :
1029 pread(fd, ctx->queue + ctx->tail, rx,
1030 ictx->fpos + offset));
1031 if (ret != rx)
1032 return -errno;
1033
1034 ctx->remaining -= rx;
1035 ctx->tail += rx;
1036 if (offset != -1)
1037 offset += rx;
1038
1039 ret = z_erofs_compress_one(ctx);
1040 if (ret)
1041 return ret;
1042 }
1043 DBG_BUGON(ctx->head != ctx->tail);
1044
1045 if (ctx->pivot) {
1046 z_erofs_commit_extent(ctx, ctx->pivot);
1047 ctx->pivot = NULL;
1048 }
1049
1050 /* generate an extra extent for the deduplicated fragment */
1051 if (ctx->seg_idx >= ictx->seg_num - 1 &&
1052 ictx->inode->fragment_size && !ictx->fragemitted) {
1053 struct z_erofs_extent_item *ei;
1054
1055 ei = malloc(sizeof(*ei));
1056 if (!ei)
1057 return -ENOMEM;
1058
1059 ei->e = (struct z_erofs_inmem_extent) {
1060 .length = ictx->inode->fragment_size,
1061 .compressedblks = 0,
1062 .raw = false,
1063 .partial = false,
1064 .blkaddr = ctx->blkaddr,
1065 };
1066 init_list_head(&ei->list);
1067 z_erofs_commit_extent(ctx, ei);
1068 }
1069 return 0;
1070 }
1071
erofs_commit_compressed_file(struct z_erofs_compress_ictx * ictx,struct erofs_buffer_head * bh,erofs_blk_t blkaddr,erofs_blk_t compressed_blocks)1072 int erofs_commit_compressed_file(struct z_erofs_compress_ictx *ictx,
1073 struct erofs_buffer_head *bh,
1074 erofs_blk_t blkaddr,
1075 erofs_blk_t compressed_blocks)
1076 {
1077 struct erofs_inode *inode = ictx->inode;
1078 struct erofs_sb_info *sbi = inode->sbi;
1079 unsigned int legacymetasize;
1080 u8 *compressmeta;
1081 int ret;
1082
1083 z_erofs_fragments_commit(inode);
1084
1085 /* fall back to no compression mode */
1086 DBG_BUGON(compressed_blocks < !!inode->idata_size);
1087 compressed_blocks -= !!inode->idata_size;
1088
1089 compressmeta = malloc(BLK_ROUND_UP(sbi, inode->i_size) *
1090 sizeof(struct z_erofs_lcluster_index) +
1091 Z_EROFS_LEGACY_MAP_HEADER_SIZE);
1092 if (!compressmeta) {
1093 ret = -ENOMEM;
1094 goto err_free_idata;
1095 }
1096 ictx->metacur = compressmeta + Z_EROFS_LEGACY_MAP_HEADER_SIZE;
1097 z_erofs_write_indexes(ictx);
1098
1099 legacymetasize = ictx->metacur - compressmeta;
1100 /* estimate if data compression saves space or not */
1101 if (!inode->fragment_size &&
1102 compressed_blocks * erofs_blksiz(sbi) + inode->idata_size +
1103 legacymetasize >= inode->i_size) {
1104 z_erofs_dedupe_commit(true);
1105 ret = -ENOSPC;
1106 goto err_free_meta;
1107 }
1108 z_erofs_dedupe_commit(false);
1109 z_erofs_write_mapheader(inode, compressmeta);
1110
1111 if (!ictx->fragemitted)
1112 sbi->saved_by_deduplication += inode->fragment_size;
1113
1114 /* if the entire file is a fragment, a simplified form is used. */
1115 if (inode->i_size <= inode->fragment_size) {
1116 DBG_BUGON(inode->i_size < inode->fragment_size);
1117 DBG_BUGON(inode->fragmentoff >> 63);
1118 *(__le64 *)compressmeta =
1119 cpu_to_le64(inode->fragmentoff | 1ULL << 63);
1120 inode->datalayout = EROFS_INODE_COMPRESSED_FULL;
1121 legacymetasize = Z_EROFS_LEGACY_MAP_HEADER_SIZE;
1122 }
1123
1124 if (compressed_blocks) {
1125 ret = erofs_bh_balloon(bh, erofs_pos(sbi, compressed_blocks));
1126 DBG_BUGON(ret != erofs_blksiz(sbi));
1127 } else {
1128 if (!cfg.c_fragments && !cfg.c_dedupe)
1129 DBG_BUGON(!inode->idata_size);
1130 }
1131
1132 erofs_info("compressed %s (%llu bytes) into %u blocks",
1133 inode->i_srcpath, (unsigned long long)inode->i_size,
1134 compressed_blocks);
1135
1136 if (inode->idata_size) {
1137 bh->op = &erofs_skip_write_bhops;
1138 inode->bh_data = bh;
1139 } else {
1140 erofs_bdrop(bh, false);
1141 }
1142
1143 inode->u.i_blocks = compressed_blocks;
1144
1145 if (inode->datalayout == EROFS_INODE_COMPRESSED_FULL) {
1146 inode->extent_isize = legacymetasize;
1147 } else {
1148 ret = z_erofs_convert_to_compacted_format(inode, blkaddr,
1149 legacymetasize,
1150 compressmeta);
1151 DBG_BUGON(ret);
1152 }
1153 inode->compressmeta = compressmeta;
1154 if (!erofs_is_packed_inode(inode))
1155 erofs_droid_blocklist_write(inode, blkaddr, compressed_blocks);
1156 return 0;
1157
1158 err_free_meta:
1159 free(compressmeta);
1160 inode->compressmeta = NULL;
1161 err_free_idata:
1162 erofs_bdrop(bh, true); /* revoke buffer */
1163 if (inode->idata) {
1164 free(inode->idata);
1165 inode->idata = NULL;
1166 }
1167 return ret;
1168 }
1169
1170 #ifdef EROFS_MT_ENABLED
z_erofs_mt_wq_tls_alloc(struct erofs_workqueue * wq,void * ptr)1171 void *z_erofs_mt_wq_tls_alloc(struct erofs_workqueue *wq, void *ptr)
1172 {
1173 struct erofs_compress_wq_tls *tls;
1174
1175 tls = calloc(1, sizeof(*tls));
1176 if (!tls)
1177 return NULL;
1178
1179 tls->queue = malloc(Z_EROFS_COMPR_QUEUE_SZ);
1180 if (!tls->queue)
1181 goto err_free_priv;
1182
1183 tls->destbuf = calloc(1, EROFS_CONFIG_COMPR_MAX_SZ +
1184 EROFS_MAX_BLOCK_SIZE);
1185 if (!tls->destbuf)
1186 goto err_free_queue;
1187
1188 tls->ccfg = calloc(EROFS_MAX_COMPR_CFGS, sizeof(*tls->ccfg));
1189 if (!tls->ccfg)
1190 goto err_free_destbuf;
1191 return tls;
1192
1193 err_free_destbuf:
1194 free(tls->destbuf);
1195 err_free_queue:
1196 free(tls->queue);
1197 err_free_priv:
1198 free(tls);
1199 return NULL;
1200 }
1201
z_erofs_mt_wq_tls_init_compr(struct erofs_sb_info * sbi,struct erofs_compress_wq_tls * tls,unsigned int alg_id,char * alg_name,unsigned int comp_level,unsigned int dict_size)1202 int z_erofs_mt_wq_tls_init_compr(struct erofs_sb_info *sbi,
1203 struct erofs_compress_wq_tls *tls,
1204 unsigned int alg_id, char *alg_name,
1205 unsigned int comp_level,
1206 unsigned int dict_size)
1207 {
1208 struct erofs_compress_cfg *lc = &tls->ccfg[alg_id];
1209 int ret;
1210
1211 if (__erofs_likely(lc->enable))
1212 return 0;
1213
1214 ret = erofs_compressor_init(sbi, &lc->handle, alg_name,
1215 comp_level, dict_size);
1216 if (ret)
1217 return ret;
1218 lc->algorithmtype = alg_id;
1219 lc->enable = true;
1220 return 0;
1221 }
1222
z_erofs_mt_wq_tls_free(struct erofs_workqueue * wq,void * priv)1223 void *z_erofs_mt_wq_tls_free(struct erofs_workqueue *wq, void *priv)
1224 {
1225 struct erofs_compress_wq_tls *tls = priv;
1226 int i;
1227
1228 for (i = 0; i < EROFS_MAX_COMPR_CFGS; i++)
1229 if (tls->ccfg[i].enable)
1230 erofs_compressor_exit(&tls->ccfg[i].handle);
1231
1232 free(tls->ccfg);
1233 free(tls->destbuf);
1234 free(tls->queue);
1235 free(tls);
1236 return NULL;
1237 }
1238
z_erofs_mt_workfn(struct erofs_work * work,void * tlsp)1239 void z_erofs_mt_workfn(struct erofs_work *work, void *tlsp)
1240 {
1241 struct erofs_compress_work *cwork = (struct erofs_compress_work *)work;
1242 struct erofs_compress_wq_tls *tls = tlsp;
1243 struct z_erofs_compress_sctx *sctx = &cwork->ctx;
1244 struct z_erofs_compress_ictx *ictx = sctx->ictx;
1245 struct erofs_inode *inode = ictx->inode;
1246 struct erofs_sb_info *sbi = inode->sbi;
1247 int ret = 0;
1248
1249 ret = z_erofs_mt_wq_tls_init_compr(sbi, tls, cwork->alg_id,
1250 cwork->alg_name, cwork->comp_level,
1251 cwork->dict_size);
1252 if (ret)
1253 goto out;
1254
1255 sctx->pclustersize = z_erofs_get_max_pclustersize(inode);
1256 sctx->queue = tls->queue;
1257 sctx->destbuf = tls->destbuf;
1258 sctx->chandle = &tls->ccfg[cwork->alg_id].handle;
1259 erofs_compressor_reset(sctx->chandle);
1260 sctx->membuf = malloc(round_up(sctx->remaining, erofs_blksiz(sbi)));
1261 if (!sctx->membuf) {
1262 ret = -ENOMEM;
1263 goto out;
1264 }
1265 sctx->memoff = 0;
1266
1267 ret = z_erofs_compress_segment(sctx, sctx->seg_idx * cfg.c_mkfs_segment_size,
1268 EROFS_NULL_ADDR);
1269
1270 out:
1271 cwork->errcode = ret;
1272 pthread_mutex_lock(&ictx->mutex);
1273 if (++ictx->nfini >= ictx->seg_num) {
1274 DBG_BUGON(ictx->nfini > ictx->seg_num);
1275 pthread_cond_signal(&ictx->cond);
1276 }
1277 pthread_mutex_unlock(&ictx->mutex);
1278 }
1279
z_erofs_merge_segment(struct z_erofs_compress_ictx * ictx,struct z_erofs_compress_sctx * sctx)1280 int z_erofs_merge_segment(struct z_erofs_compress_ictx *ictx,
1281 struct z_erofs_compress_sctx *sctx)
1282 {
1283 struct z_erofs_extent_item *ei, *n;
1284 struct erofs_sb_info *sbi = ictx->inode->sbi;
1285 erofs_blk_t blkoff = 0;
1286 int ret = 0, ret2;
1287
1288 list_for_each_entry_safe(ei, n, &sctx->extents, list) {
1289 list_del(&ei->list);
1290 list_add_tail(&ei->list, &ictx->extents);
1291
1292 if (ei->e.blkaddr != EROFS_NULL_ADDR) /* deduped extents */
1293 continue;
1294
1295 ei->e.blkaddr = sctx->blkaddr;
1296 sctx->blkaddr += ei->e.compressedblks;
1297
1298 /* skip write data but leave blkaddr for inline fallback */
1299 if (ei->e.inlined || !ei->e.compressedblks)
1300 continue;
1301 ret2 = erofs_blk_write(sbi, sctx->membuf + blkoff * erofs_blksiz(sbi),
1302 ei->e.blkaddr, ei->e.compressedblks);
1303 blkoff += ei->e.compressedblks;
1304 if (ret2) {
1305 ret = ret2;
1306 continue;
1307 }
1308 }
1309 free(sctx->membuf);
1310 return ret;
1311 }
1312
z_erofs_mt_compress(struct z_erofs_compress_ictx * ictx)1313 int z_erofs_mt_compress(struct z_erofs_compress_ictx *ictx)
1314 {
1315 struct erofs_compress_work *cur, *head = NULL, **last = &head;
1316 struct erofs_compress_cfg *ccfg = ictx->ccfg;
1317 struct erofs_inode *inode = ictx->inode;
1318 int nsegs = DIV_ROUND_UP(inode->i_size, cfg.c_mkfs_segment_size);
1319 int i;
1320
1321 ictx->seg_num = nsegs;
1322 ictx->nfini = 0;
1323 pthread_mutex_init(&ictx->mutex, NULL);
1324 pthread_cond_init(&ictx->cond, NULL);
1325
1326 for (i = 0; i < nsegs; i++) {
1327 pthread_mutex_lock(&z_erofs_mt_ctrl.mutex);
1328 cur = z_erofs_mt_ctrl.idle;
1329 if (cur) {
1330 z_erofs_mt_ctrl.idle = cur->next;
1331 cur->next = NULL;
1332 }
1333 pthread_mutex_unlock(&z_erofs_mt_ctrl.mutex);
1334 if (!cur) {
1335 cur = calloc(1, sizeof(*cur));
1336 if (!cur)
1337 return -ENOMEM;
1338 }
1339 *last = cur;
1340 last = &cur->next;
1341
1342 cur->ctx = (struct z_erofs_compress_sctx) {
1343 .ictx = ictx,
1344 .seg_idx = i,
1345 .pivot = &dummy_pivot,
1346 };
1347 init_list_head(&cur->ctx.extents);
1348
1349 if (i == nsegs - 1)
1350 cur->ctx.remaining = inode->i_size -
1351 inode->fragment_size -
1352 i * cfg.c_mkfs_segment_size;
1353 else
1354 cur->ctx.remaining = cfg.c_mkfs_segment_size;
1355
1356 cur->alg_id = ccfg->handle.alg->id;
1357 cur->alg_name = ccfg->handle.alg->name;
1358 cur->comp_level = ccfg->handle.compression_level;
1359 cur->dict_size = ccfg->handle.dict_size;
1360
1361 cur->work.fn = z_erofs_mt_workfn;
1362 erofs_queue_work(&z_erofs_mt_ctrl.wq, &cur->work);
1363 }
1364 ictx->mtworks = head;
1365 return 0;
1366 }
1367
erofs_mt_write_compressed_file(struct z_erofs_compress_ictx * ictx)1368 int erofs_mt_write_compressed_file(struct z_erofs_compress_ictx *ictx)
1369 {
1370 struct erofs_sb_info *sbi = ictx->inode->sbi;
1371 struct erofs_buffer_head *bh = NULL;
1372 struct erofs_compress_work *head = ictx->mtworks, *cur;
1373 erofs_blk_t blkaddr, compressed_blocks = 0;
1374 int ret;
1375
1376 pthread_mutex_lock(&ictx->mutex);
1377 while (ictx->nfini < ictx->seg_num)
1378 pthread_cond_wait(&ictx->cond, &ictx->mutex);
1379 pthread_mutex_unlock(&ictx->mutex);
1380
1381 bh = erofs_balloc(sbi->bmgr, DATA, 0, 0, 0);
1382 if (IS_ERR(bh)) {
1383 ret = PTR_ERR(bh);
1384 goto out;
1385 }
1386
1387 DBG_BUGON(!head);
1388 blkaddr = erofs_mapbh(NULL, bh->block);
1389
1390 ret = 0;
1391 do {
1392 cur = head;
1393 head = cur->next;
1394
1395 if (cur->errcode) {
1396 ret = cur->errcode;
1397 } else {
1398 int ret2;
1399
1400 cur->ctx.blkaddr = blkaddr;
1401 ret2 = z_erofs_merge_segment(ictx, &cur->ctx);
1402 if (ret2)
1403 ret = ret2;
1404
1405 compressed_blocks += cur->ctx.blkaddr - blkaddr;
1406 blkaddr = cur->ctx.blkaddr;
1407 }
1408
1409 pthread_mutex_lock(&z_erofs_mt_ctrl.mutex);
1410 cur->next = z_erofs_mt_ctrl.idle;
1411 z_erofs_mt_ctrl.idle = cur;
1412 pthread_mutex_unlock(&z_erofs_mt_ctrl.mutex);
1413 } while (head);
1414
1415 if (ret)
1416 goto out;
1417 ret = erofs_commit_compressed_file(ictx, bh,
1418 blkaddr - compressed_blocks, compressed_blocks);
1419
1420 out:
1421 close(ictx->fd);
1422 free(ictx);
1423 return ret;
1424 }
1425 #endif
1426
1427 static struct z_erofs_compress_ictx g_ictx;
1428
erofs_begin_compressed_file(struct erofs_inode * inode,int fd,u64 fpos)1429 void *erofs_begin_compressed_file(struct erofs_inode *inode, int fd, u64 fpos)
1430 {
1431 struct erofs_sb_info *sbi = inode->sbi;
1432 struct z_erofs_compress_ictx *ictx;
1433 int ret;
1434
1435 /* initialize per-file compression setting */
1436 inode->z_advise = 0;
1437 inode->z_logical_clusterbits = sbi->blkszbits;
1438 if (!cfg.c_legacy_compress && inode->z_logical_clusterbits <= 14) {
1439 if (inode->z_logical_clusterbits <= 12)
1440 inode->z_advise |= Z_EROFS_ADVISE_COMPACTED_2B;
1441 inode->datalayout = EROFS_INODE_COMPRESSED_COMPACT;
1442 } else {
1443 inode->datalayout = EROFS_INODE_COMPRESSED_FULL;
1444 }
1445
1446 if (erofs_sb_has_big_pcluster(sbi)) {
1447 inode->z_advise |= Z_EROFS_ADVISE_BIG_PCLUSTER_1;
1448 if (inode->datalayout == EROFS_INODE_COMPRESSED_COMPACT)
1449 inode->z_advise |= Z_EROFS_ADVISE_BIG_PCLUSTER_2;
1450 }
1451 if (cfg.c_fragments && !cfg.c_dedupe)
1452 inode->z_advise |= Z_EROFS_ADVISE_INTERLACED_PCLUSTER;
1453
1454 #ifndef NDEBUG
1455 if (cfg.c_random_algorithms) {
1456 while (1) {
1457 inode->z_algorithmtype[0] =
1458 rand() % EROFS_MAX_COMPR_CFGS;
1459 if (erofs_ccfg[inode->z_algorithmtype[0]].enable)
1460 break;
1461 }
1462 }
1463 #endif
1464 inode->idata_size = 0;
1465 inode->fragment_size = 0;
1466
1467 if (!z_erofs_mt_enabled ||
1468 (cfg.c_all_fragments && !erofs_is_packed_inode(inode))) {
1469 #ifdef EROFS_MT_ENABLED
1470 pthread_mutex_lock(&g_ictx.mutex);
1471 if (g_ictx.seg_num)
1472 pthread_cond_wait(&g_ictx.cond, &g_ictx.mutex);
1473 g_ictx.seg_num = 1;
1474 pthread_mutex_unlock(&g_ictx.mutex);
1475 #endif
1476 ictx = &g_ictx;
1477 ictx->fd = fd;
1478 } else {
1479 ictx = malloc(sizeof(*ictx));
1480 if (!ictx)
1481 return ERR_PTR(-ENOMEM);
1482 ictx->fd = dup(fd);
1483 }
1484
1485 ictx->ccfg = &erofs_ccfg[inode->z_algorithmtype[0]];
1486 inode->z_algorithmtype[0] = ictx->ccfg->algorithmtype;
1487 inode->z_algorithmtype[1] = 0;
1488
1489 /*
1490 * Handle tails in advance to avoid writing duplicated
1491 * parts into the packed inode.
1492 */
1493 if (cfg.c_fragments && !erofs_is_packed_inode(inode)) {
1494 ret = z_erofs_fragments_dedupe(inode, fd, &ictx->tof_chksum);
1495 if (ret < 0)
1496 goto err_free_ictx;
1497 }
1498
1499 ictx->inode = inode;
1500 ictx->fpos = fpos;
1501 init_list_head(&ictx->extents);
1502 ictx->fix_dedupedfrag = false;
1503 ictx->fragemitted = false;
1504
1505 if (cfg.c_all_fragments && !erofs_is_packed_inode(inode) &&
1506 !inode->fragment_size) {
1507 ret = z_erofs_pack_file_from_fd(inode, fd, ictx->tof_chksum);
1508 if (ret)
1509 goto err_free_idata;
1510 }
1511 #ifdef EROFS_MT_ENABLED
1512 if (ictx != &g_ictx) {
1513 ret = z_erofs_mt_compress(ictx);
1514 if (ret)
1515 goto err_free_idata;
1516 }
1517 #endif
1518 return ictx;
1519
1520 err_free_idata:
1521 if (inode->idata) {
1522 free(inode->idata);
1523 inode->idata = NULL;
1524 }
1525 err_free_ictx:
1526 if (ictx != &g_ictx)
1527 free(ictx);
1528 return ERR_PTR(ret);
1529 }
1530
erofs_write_compressed_file(struct z_erofs_compress_ictx * ictx)1531 int erofs_write_compressed_file(struct z_erofs_compress_ictx *ictx)
1532 {
1533 static u8 g_queue[Z_EROFS_COMPR_QUEUE_SZ];
1534 struct erofs_buffer_head *bh;
1535 static struct z_erofs_compress_sctx sctx;
1536 struct erofs_compress_cfg *ccfg = ictx->ccfg;
1537 struct erofs_inode *inode = ictx->inode;
1538 erofs_blk_t blkaddr;
1539 int ret;
1540
1541 #ifdef EROFS_MT_ENABLED
1542 if (ictx != &g_ictx)
1543 return erofs_mt_write_compressed_file(ictx);
1544 #endif
1545
1546 /* allocate main data buffer */
1547 bh = erofs_balloc(inode->sbi->bmgr, DATA, 0, 0, 0);
1548 if (IS_ERR(bh)) {
1549 ret = PTR_ERR(bh);
1550 goto err_free_idata;
1551 }
1552 blkaddr = erofs_mapbh(NULL, bh->block); /* start_blkaddr */
1553
1554 ictx->seg_num = 1;
1555 sctx = (struct z_erofs_compress_sctx) {
1556 .ictx = ictx,
1557 .queue = g_queue,
1558 .chandle = &ccfg->handle,
1559 .remaining = inode->i_size - inode->fragment_size,
1560 .seg_idx = 0,
1561 .pivot = &dummy_pivot,
1562 .pclustersize = z_erofs_get_max_pclustersize(inode),
1563 };
1564 init_list_head(&sctx.extents);
1565
1566 ret = z_erofs_compress_segment(&sctx, -1, blkaddr);
1567 if (ret)
1568 goto err_free_idata;
1569
1570 list_splice_tail(&sctx.extents, &ictx->extents);
1571 ret = erofs_commit_compressed_file(ictx, bh, blkaddr,
1572 sctx.blkaddr - blkaddr);
1573 goto out;
1574
1575 err_free_idata:
1576 erofs_bdrop(bh, true); /* revoke buffer */
1577 if (inode->idata) {
1578 free(inode->idata);
1579 inode->idata = NULL;
1580 }
1581 out:
1582 #ifdef EROFS_MT_ENABLED
1583 pthread_mutex_lock(&ictx->mutex);
1584 ictx->seg_num = 0;
1585 pthread_cond_signal(&ictx->cond);
1586 pthread_mutex_unlock(&ictx->mutex);
1587 #endif
1588 return ret;
1589 }
1590
z_erofs_build_compr_cfgs(struct erofs_sb_info * sbi,struct erofs_buffer_head * sb_bh,u32 * max_dict_size)1591 static int z_erofs_build_compr_cfgs(struct erofs_sb_info *sbi,
1592 struct erofs_buffer_head *sb_bh,
1593 u32 *max_dict_size)
1594 {
1595 struct erofs_buffer_head *bh = sb_bh;
1596 int ret = 0;
1597
1598 if (sbi->available_compr_algs & (1 << Z_EROFS_COMPRESSION_LZ4)) {
1599 struct {
1600 __le16 size;
1601 struct z_erofs_lz4_cfgs lz4;
1602 } __packed lz4alg = {
1603 .size = cpu_to_le16(sizeof(struct z_erofs_lz4_cfgs)),
1604 .lz4 = {
1605 .max_distance =
1606 cpu_to_le16(sbi->lz4.max_distance),
1607 .max_pclusterblks =
1608 cfg.c_mkfs_pclustersize_max >> sbi->blkszbits,
1609 }
1610 };
1611
1612 bh = erofs_battach(bh, META, sizeof(lz4alg));
1613 if (IS_ERR(bh)) {
1614 DBG_BUGON(1);
1615 return PTR_ERR(bh);
1616 }
1617 erofs_mapbh(NULL, bh->block);
1618 ret = erofs_dev_write(sbi, &lz4alg, erofs_btell(bh, false),
1619 sizeof(lz4alg));
1620 bh->op = &erofs_drop_directly_bhops;
1621 }
1622 #ifdef HAVE_LIBLZMA
1623 if (sbi->available_compr_algs & (1 << Z_EROFS_COMPRESSION_LZMA)) {
1624 struct {
1625 __le16 size;
1626 struct z_erofs_lzma_cfgs lzma;
1627 } __packed lzmaalg = {
1628 .size = cpu_to_le16(sizeof(struct z_erofs_lzma_cfgs)),
1629 .lzma = {
1630 .dict_size = cpu_to_le32(
1631 max_dict_size
1632 [Z_EROFS_COMPRESSION_LZMA]),
1633 }
1634 };
1635
1636 bh = erofs_battach(bh, META, sizeof(lzmaalg));
1637 if (IS_ERR(bh)) {
1638 DBG_BUGON(1);
1639 return PTR_ERR(bh);
1640 }
1641 erofs_mapbh(NULL, bh->block);
1642 ret = erofs_dev_write(sbi, &lzmaalg, erofs_btell(bh, false),
1643 sizeof(lzmaalg));
1644 bh->op = &erofs_drop_directly_bhops;
1645 }
1646 #endif
1647 if (sbi->available_compr_algs & (1 << Z_EROFS_COMPRESSION_DEFLATE)) {
1648 struct {
1649 __le16 size;
1650 struct z_erofs_deflate_cfgs z;
1651 } __packed zalg = {
1652 .size = cpu_to_le16(sizeof(struct z_erofs_deflate_cfgs)),
1653 .z = {
1654 .windowbits = cpu_to_le32(ilog2(
1655 max_dict_size
1656 [Z_EROFS_COMPRESSION_DEFLATE])),
1657 }
1658 };
1659
1660 bh = erofs_battach(bh, META, sizeof(zalg));
1661 if (IS_ERR(bh)) {
1662 DBG_BUGON(1);
1663 return PTR_ERR(bh);
1664 }
1665 erofs_mapbh(NULL, bh->block);
1666 ret = erofs_dev_write(sbi, &zalg, erofs_btell(bh, false),
1667 sizeof(zalg));
1668 bh->op = &erofs_drop_directly_bhops;
1669 }
1670 #ifdef HAVE_LIBZSTD
1671 if (sbi->available_compr_algs & (1 << Z_EROFS_COMPRESSION_ZSTD)) {
1672 struct {
1673 __le16 size;
1674 struct z_erofs_zstd_cfgs z;
1675 } __packed zalg = {
1676 .size = cpu_to_le16(sizeof(struct z_erofs_zstd_cfgs)),
1677 .z = {
1678 .windowlog =
1679 ilog2(max_dict_size[Z_EROFS_COMPRESSION_ZSTD]) - 10,
1680 }
1681 };
1682
1683 bh = erofs_battach(bh, META, sizeof(zalg));
1684 if (IS_ERR(bh)) {
1685 DBG_BUGON(1);
1686 return PTR_ERR(bh);
1687 }
1688 erofs_mapbh(NULL, bh->block);
1689 ret = erofs_dev_write(sbi, &zalg, erofs_btell(bh, false),
1690 sizeof(zalg));
1691 bh->op = &erofs_drop_directly_bhops;
1692 }
1693 #endif
1694 return ret;
1695 }
1696
z_erofs_compress_init(struct erofs_sb_info * sbi,struct erofs_buffer_head * sb_bh)1697 int z_erofs_compress_init(struct erofs_sb_info *sbi, struct erofs_buffer_head *sb_bh)
1698 {
1699 int i, ret, id;
1700 u32 max_dict_size[Z_EROFS_COMPRESSION_MAX] = {};
1701 u32 available_compr_algs = 0;
1702
1703 for (i = 0; cfg.c_compr_opts[i].alg; ++i) {
1704 struct erofs_compress *c = &erofs_ccfg[i].handle;
1705
1706 ret = erofs_compressor_init(sbi, c, cfg.c_compr_opts[i].alg,
1707 cfg.c_compr_opts[i].level,
1708 cfg.c_compr_opts[i].dict_size);
1709 if (ret)
1710 return ret;
1711
1712 id = z_erofs_get_compress_algorithm_id(c);
1713 erofs_ccfg[i].algorithmtype = id;
1714 erofs_ccfg[i].enable = true;
1715 available_compr_algs |= 1 << erofs_ccfg[i].algorithmtype;
1716 if (erofs_ccfg[i].algorithmtype != Z_EROFS_COMPRESSION_LZ4)
1717 erofs_sb_set_compr_cfgs(sbi);
1718 if (c->dict_size > max_dict_size[id])
1719 max_dict_size[id] = c->dict_size;
1720 }
1721
1722 /*
1723 * if primary algorithm is empty (e.g. compression off),
1724 * clear 0PADDING feature for old kernel compatibility.
1725 */
1726 if (!available_compr_algs ||
1727 (cfg.c_legacy_compress && available_compr_algs == 1))
1728 erofs_sb_clear_lz4_0padding(sbi);
1729
1730 if (!available_compr_algs)
1731 return 0;
1732
1733 if (!sb_bh) {
1734 u32 dalg = available_compr_algs & (~sbi->available_compr_algs);
1735
1736 if (dalg) {
1737 erofs_err("unavailable algorithms 0x%x on incremental builds",
1738 dalg);
1739 return -EOPNOTSUPP;
1740 }
1741 if (available_compr_algs & (1 << Z_EROFS_COMPRESSION_LZ4) &&
1742 sbi->lz4.max_pclusterblks << sbi->blkszbits <
1743 cfg.c_mkfs_pclustersize_max) {
1744 erofs_err("pclustersize %u is too large on incremental builds",
1745 cfg.c_mkfs_pclustersize_max);
1746 return -EOPNOTSUPP;
1747 }
1748 } else {
1749 sbi->available_compr_algs = available_compr_algs;
1750 }
1751
1752 /*
1753 * if big pcluster is enabled, an extra CBLKCNT lcluster index needs
1754 * to be loaded in order to get those compressed block counts.
1755 */
1756 if (cfg.c_mkfs_pclustersize_max > erofs_blksiz(sbi)) {
1757 if (cfg.c_mkfs_pclustersize_max > Z_EROFS_PCLUSTER_MAX_SIZE) {
1758 erofs_err("unsupported pclustersize %u (too large)",
1759 cfg.c_mkfs_pclustersize_max);
1760 return -EINVAL;
1761 }
1762 erofs_sb_set_big_pcluster(sbi);
1763 }
1764 if (cfg.c_mkfs_pclustersize_packed > cfg.c_mkfs_pclustersize_max) {
1765 erofs_err("invalid pclustersize for the packed file %u",
1766 cfg.c_mkfs_pclustersize_packed);
1767 return -EINVAL;
1768 }
1769
1770 if (sb_bh && erofs_sb_has_compr_cfgs(sbi)) {
1771 ret = z_erofs_build_compr_cfgs(sbi, sb_bh, max_dict_size);
1772 if (ret)
1773 return ret;
1774 }
1775
1776 z_erofs_mt_enabled = false;
1777 #ifdef EROFS_MT_ENABLED
1778 if (cfg.c_mt_workers >= 1 && (cfg.c_dedupe ||
1779 (cfg.c_fragments && !cfg.c_all_fragments))) {
1780 if (cfg.c_dedupe)
1781 erofs_warn("multi-threaded dedupe is NOT implemented for now");
1782 if (cfg.c_fragments)
1783 erofs_warn("multi-threaded fragments is NOT implemented for now");
1784 cfg.c_mt_workers = 0;
1785 }
1786
1787 if (cfg.c_mt_workers >= 1) {
1788 ret = erofs_alloc_workqueue(&z_erofs_mt_ctrl.wq,
1789 cfg.c_mt_workers,
1790 cfg.c_mt_workers << 2,
1791 z_erofs_mt_wq_tls_alloc,
1792 z_erofs_mt_wq_tls_free);
1793 if (ret)
1794 return ret;
1795 z_erofs_mt_enabled = true;
1796 }
1797 pthread_mutex_init(&g_ictx.mutex, NULL);
1798 pthread_cond_init(&g_ictx.cond, NULL);
1799 #endif
1800 return 0;
1801 }
1802
z_erofs_compress_exit(void)1803 int z_erofs_compress_exit(void)
1804 {
1805 int i, ret;
1806
1807 for (i = 0; cfg.c_compr_opts[i].alg; ++i) {
1808 ret = erofs_compressor_exit(&erofs_ccfg[i].handle);
1809 if (ret)
1810 return ret;
1811 }
1812
1813 if (z_erofs_mt_enabled) {
1814 #ifdef EROFS_MT_ENABLED
1815 ret = erofs_destroy_workqueue(&z_erofs_mt_ctrl.wq);
1816 if (ret)
1817 return ret;
1818 while (z_erofs_mt_ctrl.idle) {
1819 struct erofs_compress_work *tmp =
1820 z_erofs_mt_ctrl.idle->next;
1821 free(z_erofs_mt_ctrl.idle);
1822 z_erofs_mt_ctrl.idle = tmp;
1823 }
1824 #endif
1825 }
1826 return 0;
1827 }
1828