1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Miao Xie <miaoxie@huawei.com>
6 * with heavy changes by Gao Xiang <gaoxiang25@huawei.com>
7 */
8 #ifndef _LARGEFILE64_SOURCE
9 #define _LARGEFILE64_SOURCE
10 #endif
11 #include <string.h>
12 #include <stdlib.h>
13 #include <unistd.h>
14 #include "erofs/print.h"
15 #include "erofs/io.h"
16 #include "erofs/cache.h"
17 #include "erofs/compress.h"
18 #include "compressor.h"
19 #include "erofs/block_list.h"
20 #include "erofs/compress_hints.h"
21
22 static struct erofs_compress compresshandle;
23 static unsigned int algorithmtype[2];
24
25 struct z_erofs_vle_compress_ctx {
26 u8 *metacur;
27
28 u8 queue[EROFS_CONFIG_COMPR_MAX_SZ * 2];
29 unsigned int head, tail;
30 unsigned int compressedblks;
31 erofs_blk_t blkaddr; /* pointing to the next blkaddr */
32 u16 clusterofs;
33 };
34
35 #define Z_EROFS_LEGACY_MAP_HEADER_SIZE \
36 (sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING)
37
vle_compressmeta_capacity(erofs_off_t filesize)38 static unsigned int vle_compressmeta_capacity(erofs_off_t filesize)
39 {
40 const unsigned int indexsize = BLK_ROUND_UP(filesize) *
41 sizeof(struct z_erofs_vle_decompressed_index);
42
43 return Z_EROFS_LEGACY_MAP_HEADER_SIZE + indexsize;
44 }
45
vle_write_indexes_final(struct z_erofs_vle_compress_ctx * ctx)46 static void vle_write_indexes_final(struct z_erofs_vle_compress_ctx *ctx)
47 {
48 const unsigned int type = Z_EROFS_VLE_CLUSTER_TYPE_PLAIN;
49 struct z_erofs_vle_decompressed_index di;
50
51 if (!ctx->clusterofs)
52 return;
53
54 di.di_clusterofs = cpu_to_le16(ctx->clusterofs);
55 di.di_u.blkaddr = 0;
56 di.di_advise = cpu_to_le16(type << Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT);
57
58 memcpy(ctx->metacur, &di, sizeof(di));
59 ctx->metacur += sizeof(di);
60 }
61
vle_write_indexes(struct z_erofs_vle_compress_ctx * ctx,unsigned int count,bool raw)62 static void vle_write_indexes(struct z_erofs_vle_compress_ctx *ctx,
63 unsigned int count, bool raw)
64 {
65 unsigned int clusterofs = ctx->clusterofs;
66 unsigned int d0 = 0, d1 = (clusterofs + count) / EROFS_BLKSIZ;
67 struct z_erofs_vle_decompressed_index di;
68 unsigned int type;
69 __le16 advise;
70
71 di.di_clusterofs = cpu_to_le16(ctx->clusterofs);
72
73 /* whether the tail-end uncompressed block or not */
74 if (!d1) {
75 /* TODO: tail-packing inline compressed data */
76 DBG_BUGON(!raw);
77 type = Z_EROFS_VLE_CLUSTER_TYPE_PLAIN;
78 advise = cpu_to_le16(type << Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT);
79
80 di.di_advise = advise;
81 di.di_u.blkaddr = cpu_to_le32(ctx->blkaddr);
82 memcpy(ctx->metacur, &di, sizeof(di));
83 ctx->metacur += sizeof(di);
84
85 /* don't add the final index if the tail-end block exists */
86 ctx->clusterofs = 0;
87 return;
88 }
89
90 do {
91 /* XXX: big pcluster feature should be per-inode */
92 if (d0 == 1 && erofs_sb_has_big_pcluster()) {
93 type = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD;
94 di.di_u.delta[0] = cpu_to_le16(ctx->compressedblks |
95 Z_EROFS_VLE_DI_D0_CBLKCNT);
96 di.di_u.delta[1] = cpu_to_le16(d1);
97 } else if (d0) {
98 type = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD;
99
100 di.di_u.delta[0] = cpu_to_le16(d0);
101 di.di_u.delta[1] = cpu_to_le16(d1);
102 } else {
103 type = raw ? Z_EROFS_VLE_CLUSTER_TYPE_PLAIN :
104 Z_EROFS_VLE_CLUSTER_TYPE_HEAD;
105 di.di_u.blkaddr = cpu_to_le32(ctx->blkaddr);
106 }
107 advise = cpu_to_le16(type << Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT);
108 di.di_advise = advise;
109
110 memcpy(ctx->metacur, &di, sizeof(di));
111 ctx->metacur += sizeof(di);
112
113 count -= EROFS_BLKSIZ - clusterofs;
114 clusterofs = 0;
115
116 ++d0;
117 --d1;
118 } while (clusterofs + count >= EROFS_BLKSIZ);
119
120 ctx->clusterofs = clusterofs + count;
121 }
122
write_uncompressed_extent(struct z_erofs_vle_compress_ctx * ctx,unsigned int * len,char * dst)123 static int write_uncompressed_extent(struct z_erofs_vle_compress_ctx *ctx,
124 unsigned int *len, char *dst)
125 {
126 int ret;
127 unsigned int count;
128
129 /* reset clusterofs to 0 if permitted */
130 if (!erofs_sb_has_lz4_0padding() && ctx->clusterofs &&
131 ctx->head >= ctx->clusterofs) {
132 ctx->head -= ctx->clusterofs;
133 *len += ctx->clusterofs;
134 ctx->clusterofs = 0;
135 }
136
137 /* write uncompressed data */
138 count = min(EROFS_BLKSIZ, *len);
139
140 memcpy(dst, ctx->queue + ctx->head, count);
141 memset(dst + count, 0, EROFS_BLKSIZ - count);
142
143 erofs_dbg("Writing %u uncompressed data to block %u",
144 count, ctx->blkaddr);
145 ret = blk_write(dst, ctx->blkaddr, 1);
146 if (ret)
147 return ret;
148 return count;
149 }
150
z_erofs_get_max_pclusterblks(struct erofs_inode * inode)151 static unsigned int z_erofs_get_max_pclusterblks(struct erofs_inode *inode)
152 {
153 #ifndef NDEBUG
154 if (cfg.c_random_pclusterblks)
155 return 1 + rand() % cfg.c_pclusterblks_max;
156 #endif
157 if (cfg.c_compress_hints_file) {
158 z_erofs_apply_compress_hints(inode);
159 DBG_BUGON(!inode->z_physical_clusterblks);
160 return inode->z_physical_clusterblks;
161 }
162 return cfg.c_pclusterblks_def;
163 }
164
vle_compress_one(struct erofs_inode * inode,struct z_erofs_vle_compress_ctx * ctx,bool final)165 static int vle_compress_one(struct erofs_inode *inode,
166 struct z_erofs_vle_compress_ctx *ctx,
167 bool final)
168 {
169 struct erofs_compress *const h = &compresshandle;
170 unsigned int len = ctx->tail - ctx->head;
171 unsigned int count;
172 int ret;
173 static char dstbuf[EROFS_CONFIG_COMPR_MAX_SZ + EROFS_BLKSIZ];
174 char *const dst = dstbuf + EROFS_BLKSIZ;
175
176 while (len) {
177 const unsigned int pclustersize =
178 z_erofs_get_max_pclusterblks(inode) * EROFS_BLKSIZ;
179 bool raw;
180
181 if (len <= pclustersize) {
182 if (final) {
183 if (len <= EROFS_BLKSIZ)
184 goto nocompression;
185 } else {
186 break;
187 }
188 }
189
190 count = min(len, cfg.c_max_decompressed_extent_bytes);
191 ret = erofs_compress_destsize(h, ctx->queue + ctx->head,
192 &count, dst, pclustersize);
193 if (ret <= 0) {
194 if (ret != -EAGAIN) {
195 erofs_err("failed to compress %s: %s",
196 inode->i_srcpath,
197 erofs_strerror(ret));
198 }
199 nocompression:
200 ret = write_uncompressed_extent(ctx, &len, dst);
201 if (ret < 0)
202 return ret;
203 count = ret;
204 ctx->compressedblks = 1;
205 raw = true;
206 } else {
207 const unsigned int tailused = ret & (EROFS_BLKSIZ - 1);
208 const unsigned int padding =
209 erofs_sb_has_lz4_0padding() && tailused ?
210 EROFS_BLKSIZ - tailused : 0;
211
212 ctx->compressedblks = DIV_ROUND_UP(ret, EROFS_BLKSIZ);
213 DBG_BUGON(ctx->compressedblks * EROFS_BLKSIZ >= count);
214
215 /* zero out garbage trailing data for non-0padding */
216 if (!erofs_sb_has_lz4_0padding())
217 memset(dst + ret, 0,
218 roundup(ret, EROFS_BLKSIZ) - ret);
219
220 /* write compressed data */
221 erofs_dbg("Writing %u compressed data to %u of %u blocks",
222 count, ctx->blkaddr, ctx->compressedblks);
223
224 ret = blk_write(dst - padding, ctx->blkaddr,
225 ctx->compressedblks);
226 if (ret)
227 return ret;
228 raw = false;
229 }
230
231 ctx->head += count;
232 /* write compression indexes for this pcluster */
233 vle_write_indexes(ctx, count, raw);
234
235 ctx->blkaddr += ctx->compressedblks;
236 len -= count;
237
238 if (!final && ctx->head >= EROFS_CONFIG_COMPR_MAX_SZ) {
239 const unsigned int qh_aligned =
240 round_down(ctx->head, EROFS_BLKSIZ);
241 const unsigned int qh_after = ctx->head - qh_aligned;
242
243 memmove(ctx->queue, ctx->queue + qh_aligned,
244 len + qh_after);
245 ctx->head = qh_after;
246 ctx->tail = qh_after + len;
247 break;
248 }
249 }
250 return 0;
251 }
252
253 struct z_erofs_compressindex_vec {
254 union {
255 erofs_blk_t blkaddr;
256 u16 delta[2];
257 } u;
258 u16 clusterofs;
259 u8 clustertype;
260 };
261
parse_legacy_indexes(struct z_erofs_compressindex_vec * cv,unsigned int nr,void * metacur)262 static void *parse_legacy_indexes(struct z_erofs_compressindex_vec *cv,
263 unsigned int nr, void *metacur)
264 {
265 struct z_erofs_vle_decompressed_index *const db = metacur;
266 unsigned int i;
267
268 for (i = 0; i < nr; ++i, ++cv) {
269 struct z_erofs_vle_decompressed_index *const di = db + i;
270 const unsigned int advise = le16_to_cpu(di->di_advise);
271
272 cv->clustertype = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
273 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
274 cv->clusterofs = le16_to_cpu(di->di_clusterofs);
275
276 if (cv->clustertype == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
277 cv->u.delta[0] = le16_to_cpu(di->di_u.delta[0]);
278 cv->u.delta[1] = le16_to_cpu(di->di_u.delta[1]);
279 } else {
280 cv->u.blkaddr = le32_to_cpu(di->di_u.blkaddr);
281 }
282 }
283 return db + nr;
284 }
285
write_compacted_indexes(u8 * out,struct z_erofs_compressindex_vec * cv,erofs_blk_t * blkaddr_ret,unsigned int destsize,unsigned int logical_clusterbits,bool final,bool * dummy_head)286 static void *write_compacted_indexes(u8 *out,
287 struct z_erofs_compressindex_vec *cv,
288 erofs_blk_t *blkaddr_ret,
289 unsigned int destsize,
290 unsigned int logical_clusterbits,
291 bool final, bool *dummy_head)
292 {
293 unsigned int vcnt, encodebits, pos, i, cblks;
294 bool update_blkaddr;
295 erofs_blk_t blkaddr;
296
297 if (destsize == 4)
298 vcnt = 2;
299 else if (destsize == 2 && logical_clusterbits == 12)
300 vcnt = 16;
301 else
302 return ERR_PTR(-EINVAL);
303 encodebits = (vcnt * destsize * 8 - 32) / vcnt;
304 blkaddr = *blkaddr_ret;
305 update_blkaddr = erofs_sb_has_big_pcluster();
306
307 pos = 0;
308 for (i = 0; i < vcnt; ++i) {
309 unsigned int offset, v;
310 u8 ch, rem;
311
312 if (cv[i].clustertype == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
313 if (cv[i].u.delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
314 cblks = cv[i].u.delta[0] & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
315 offset = cv[i].u.delta[0];
316 blkaddr += cblks;
317 *dummy_head = false;
318 } else if (i + 1 == vcnt) {
319 offset = cv[i].u.delta[1];
320 } else {
321 offset = cv[i].u.delta[0];
322 }
323 } else {
324 offset = cv[i].clusterofs;
325 if (*dummy_head) {
326 ++blkaddr;
327 if (update_blkaddr)
328 *blkaddr_ret = blkaddr;
329 }
330 *dummy_head = true;
331 update_blkaddr = false;
332
333 if (cv[i].u.blkaddr != blkaddr) {
334 if (i + 1 != vcnt)
335 DBG_BUGON(!final);
336 DBG_BUGON(cv[i].u.blkaddr);
337 }
338 }
339 v = (cv[i].clustertype << logical_clusterbits) | offset;
340 rem = pos & 7;
341 ch = out[pos / 8] & ((1 << rem) - 1);
342 out[pos / 8] = (v << rem) | ch;
343 out[pos / 8 + 1] = v >> (8 - rem);
344 out[pos / 8 + 2] = v >> (16 - rem);
345 pos += encodebits;
346 }
347 DBG_BUGON(destsize * vcnt * 8 != pos + 32);
348 *(__le32 *)(out + destsize * vcnt - 4) = cpu_to_le32(*blkaddr_ret);
349 *blkaddr_ret = blkaddr;
350 return out + destsize * vcnt;
351 }
352
z_erofs_convert_to_compacted_format(struct erofs_inode * inode,erofs_blk_t blkaddr,unsigned int legacymetasize,void * compressmeta)353 int z_erofs_convert_to_compacted_format(struct erofs_inode *inode,
354 erofs_blk_t blkaddr,
355 unsigned int legacymetasize,
356 void *compressmeta)
357 {
358 const unsigned int mpos = Z_EROFS_VLE_EXTENT_ALIGN(inode->inode_isize +
359 inode->xattr_isize) +
360 sizeof(struct z_erofs_map_header);
361 const unsigned int totalidx = (legacymetasize -
362 Z_EROFS_LEGACY_MAP_HEADER_SIZE) / 8;
363 const unsigned int logical_clusterbits = inode->z_logical_clusterbits;
364 u8 *out, *in;
365 struct z_erofs_compressindex_vec cv[16];
366 /* # of 8-byte units so that it can be aligned with 32 bytes */
367 unsigned int compacted_4b_initial, compacted_4b_end;
368 unsigned int compacted_2b;
369 bool dummy_head;
370
371 if (logical_clusterbits < LOG_BLOCK_SIZE || LOG_BLOCK_SIZE < 12)
372 return -EINVAL;
373 if (logical_clusterbits > 14) /* currently not supported */
374 return -ENOTSUP;
375 if (logical_clusterbits == 12) {
376 compacted_4b_initial = (32 - mpos % 32) / 4;
377 if (compacted_4b_initial == 32 / 4)
378 compacted_4b_initial = 0;
379
380 if (compacted_4b_initial > totalidx) {
381 compacted_4b_initial = compacted_2b = 0;
382 compacted_4b_end = totalidx;
383 } else {
384 compacted_2b = rounddown(totalidx -
385 compacted_4b_initial, 16);
386 compacted_4b_end = totalidx - compacted_4b_initial -
387 compacted_2b;
388 }
389 } else {
390 compacted_2b = compacted_4b_initial = 0;
391 compacted_4b_end = totalidx;
392 }
393
394 out = in = compressmeta;
395
396 out += sizeof(struct z_erofs_map_header);
397 in += Z_EROFS_LEGACY_MAP_HEADER_SIZE;
398
399 dummy_head = false;
400 /* prior to bigpcluster, blkaddr was bumped up once coming into HEAD */
401 if (!erofs_sb_has_big_pcluster()) {
402 --blkaddr;
403 dummy_head = true;
404 }
405
406 /* generate compacted_4b_initial */
407 while (compacted_4b_initial) {
408 in = parse_legacy_indexes(cv, 2, in);
409 out = write_compacted_indexes(out, cv, &blkaddr,
410 4, logical_clusterbits, false,
411 &dummy_head);
412 compacted_4b_initial -= 2;
413 }
414 DBG_BUGON(compacted_4b_initial);
415
416 /* generate compacted_2b */
417 while (compacted_2b) {
418 in = parse_legacy_indexes(cv, 16, in);
419 out = write_compacted_indexes(out, cv, &blkaddr,
420 2, logical_clusterbits, false,
421 &dummy_head);
422 compacted_2b -= 16;
423 }
424 DBG_BUGON(compacted_2b);
425
426 /* generate compacted_4b_end */
427 while (compacted_4b_end > 1) {
428 in = parse_legacy_indexes(cv, 2, in);
429 out = write_compacted_indexes(out, cv, &blkaddr,
430 4, logical_clusterbits, false,
431 &dummy_head);
432 compacted_4b_end -= 2;
433 }
434
435 /* generate final compacted_4b_end if needed */
436 if (compacted_4b_end) {
437 memset(cv, 0, sizeof(cv));
438 in = parse_legacy_indexes(cv, 1, in);
439 out = write_compacted_indexes(out, cv, &blkaddr,
440 4, logical_clusterbits, true,
441 &dummy_head);
442 }
443 inode->extent_isize = out - (u8 *)compressmeta;
444 return 0;
445 }
446
z_erofs_write_mapheader(struct erofs_inode * inode,void * compressmeta)447 static void z_erofs_write_mapheader(struct erofs_inode *inode,
448 void *compressmeta)
449 {
450 struct z_erofs_map_header h = {
451 .h_advise = cpu_to_le16(inode->z_advise),
452 .h_algorithmtype = inode->z_algorithmtype[1] << 4 |
453 inode->z_algorithmtype[0],
454 /* lclustersize */
455 .h_clusterbits = inode->z_logical_clusterbits - 12,
456 };
457
458 memset(compressmeta, 0, Z_EROFS_LEGACY_MAP_HEADER_SIZE);
459 /* write out map header */
460 memcpy(compressmeta, &h, sizeof(struct z_erofs_map_header));
461 }
462
erofs_write_compressed_file(struct erofs_inode * inode)463 int erofs_write_compressed_file(struct erofs_inode *inode)
464 {
465 struct erofs_buffer_head *bh;
466 struct z_erofs_vle_compress_ctx ctx;
467 erofs_off_t remaining;
468 erofs_blk_t blkaddr, compressed_blocks;
469 unsigned int legacymetasize;
470 int ret, fd;
471 u8 *compressmeta = malloc(vle_compressmeta_capacity(inode->i_size));
472
473 if (!compressmeta)
474 return -ENOMEM;
475
476 fd = open(inode->i_srcpath, O_RDONLY | O_BINARY);
477 if (fd < 0) {
478 ret = -errno;
479 goto err_free;
480 }
481
482 /* allocate main data buffer */
483 bh = erofs_balloc(DATA, 0, 0, 0);
484 if (IS_ERR(bh)) {
485 ret = PTR_ERR(bh);
486 goto err_close;
487 }
488
489 /* initialize per-file compression setting */
490 inode->z_advise = 0;
491 if (!cfg.c_legacy_compress) {
492 inode->z_advise |= Z_EROFS_ADVISE_COMPACTED_2B;
493 inode->datalayout = EROFS_INODE_FLAT_COMPRESSION;
494 } else {
495 inode->datalayout = EROFS_INODE_FLAT_COMPRESSION_LEGACY;
496 }
497
498 if (erofs_sb_has_big_pcluster()) {
499 inode->z_advise |= Z_EROFS_ADVISE_BIG_PCLUSTER_1;
500 if (inode->datalayout == EROFS_INODE_FLAT_COMPRESSION)
501 inode->z_advise |= Z_EROFS_ADVISE_BIG_PCLUSTER_2;
502 }
503 inode->z_algorithmtype[0] = algorithmtype[0];
504 inode->z_algorithmtype[1] = algorithmtype[1];
505 inode->z_logical_clusterbits = LOG_BLOCK_SIZE;
506
507 z_erofs_write_mapheader(inode, compressmeta);
508
509 blkaddr = erofs_mapbh(bh->block); /* start_blkaddr */
510 ctx.blkaddr = blkaddr;
511 ctx.metacur = compressmeta + Z_EROFS_LEGACY_MAP_HEADER_SIZE;
512 ctx.head = ctx.tail = 0;
513 ctx.clusterofs = 0;
514 remaining = inode->i_size;
515
516 while (remaining) {
517 const u64 readcount = min_t(u64, remaining,
518 sizeof(ctx.queue) - ctx.tail);
519
520 ret = read(fd, ctx.queue + ctx.tail, readcount);
521 if (ret != readcount) {
522 ret = -errno;
523 goto err_bdrop;
524 }
525 remaining -= readcount;
526 ctx.tail += readcount;
527
528 /* do one compress round */
529 ret = vle_compress_one(inode, &ctx, false);
530 if (ret)
531 goto err_bdrop;
532 }
533
534 /* do the final round */
535 ret = vle_compress_one(inode, &ctx, true);
536 if (ret)
537 goto err_bdrop;
538
539 /* fall back to no compression mode */
540 compressed_blocks = ctx.blkaddr - blkaddr;
541 if (compressed_blocks >= BLK_ROUND_UP(inode->i_size)) {
542 ret = -ENOSPC;
543 goto err_bdrop;
544 }
545
546 vle_write_indexes_final(&ctx);
547
548 close(fd);
549 DBG_BUGON(!compressed_blocks);
550 ret = erofs_bh_balloon(bh, blknr_to_addr(compressed_blocks));
551 DBG_BUGON(ret != EROFS_BLKSIZ);
552
553 erofs_info("compressed %s (%llu bytes) into %u blocks",
554 inode->i_srcpath, (unsigned long long)inode->i_size,
555 compressed_blocks);
556
557 /*
558 * TODO: need to move erofs_bdrop to erofs_write_tail_end
559 * when both mkfs & kernel support compression inline.
560 */
561 erofs_bdrop(bh, false);
562 inode->idata_size = 0;
563 inode->u.i_blocks = compressed_blocks;
564
565 legacymetasize = ctx.metacur - compressmeta;
566 if (inode->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
567 inode->extent_isize = legacymetasize;
568 } else {
569 ret = z_erofs_convert_to_compacted_format(inode, blkaddr,
570 legacymetasize,
571 compressmeta);
572 DBG_BUGON(ret);
573 }
574 inode->compressmeta = compressmeta;
575 erofs_droid_blocklist_write(inode, blkaddr, compressed_blocks);
576 return 0;
577
578 err_bdrop:
579 erofs_bdrop(bh, true); /* revoke buffer */
580 err_close:
581 close(fd);
582 err_free:
583 free(compressmeta);
584 return ret;
585 }
586
erofs_get_compress_algorithm_id(const char * name)587 static int erofs_get_compress_algorithm_id(const char *name)
588 {
589 if (!strcmp(name, "lz4") || !strcmp(name, "lz4hc"))
590 return Z_EROFS_COMPRESSION_LZ4;
591 if (!strcmp(name, "lzma"))
592 return Z_EROFS_COMPRESSION_LZMA;
593 return -ENOTSUP;
594 }
595
z_erofs_build_compr_cfgs(struct erofs_buffer_head * sb_bh)596 int z_erofs_build_compr_cfgs(struct erofs_buffer_head *sb_bh)
597 {
598 struct erofs_buffer_head *bh = sb_bh;
599 int ret = 0;
600
601 if (sbi.available_compr_algs & (1 << Z_EROFS_COMPRESSION_LZ4)) {
602 struct {
603 __le16 size;
604 struct z_erofs_lz4_cfgs lz4;
605 } __packed lz4alg = {
606 .size = cpu_to_le16(sizeof(struct z_erofs_lz4_cfgs)),
607 .lz4 = {
608 .max_distance =
609 cpu_to_le16(sbi.lz4_max_distance),
610 .max_pclusterblks = cfg.c_pclusterblks_max,
611 }
612 };
613
614 bh = erofs_battach(bh, META, sizeof(lz4alg));
615 if (IS_ERR(bh)) {
616 DBG_BUGON(1);
617 return PTR_ERR(bh);
618 }
619 erofs_mapbh(bh->block);
620 ret = dev_write(&lz4alg, erofs_btell(bh, false),
621 sizeof(lz4alg));
622 bh->op = &erofs_drop_directly_bhops;
623 }
624 #ifdef HAVE_LIBLZMA
625 if (sbi.available_compr_algs & (1 << Z_EROFS_COMPRESSION_LZMA)) {
626 struct {
627 __le16 size;
628 struct z_erofs_lzma_cfgs lzma;
629 } __packed lzmaalg = {
630 .size = cpu_to_le16(sizeof(struct z_erofs_lzma_cfgs)),
631 .lzma = {
632 .dict_size = cpu_to_le32(cfg.c_dict_size),
633 }
634 };
635
636 bh = erofs_battach(bh, META, sizeof(lzmaalg));
637 if (IS_ERR(bh)) {
638 DBG_BUGON(1);
639 return PTR_ERR(bh);
640 }
641 erofs_mapbh(bh->block);
642 ret = dev_write(&lzmaalg, erofs_btell(bh, false),
643 sizeof(lzmaalg));
644 bh->op = &erofs_drop_directly_bhops;
645 }
646 #endif
647 return ret;
648 }
649
z_erofs_compress_init(struct erofs_buffer_head * sb_bh)650 int z_erofs_compress_init(struct erofs_buffer_head *sb_bh)
651 {
652 /* initialize for primary compression algorithm */
653 int ret = erofs_compressor_init(&compresshandle,
654 cfg.c_compr_alg_master);
655
656 if (ret)
657 return ret;
658
659 /*
660 * if primary algorithm is empty (e.g. compression off),
661 * clear 0PADDING feature for old kernel compatibility.
662 */
663 if (!cfg.c_compr_alg_master ||
664 (cfg.c_legacy_compress && !strcmp(cfg.c_compr_alg_master, "lz4")))
665 erofs_sb_clear_lz4_0padding();
666
667 if (!cfg.c_compr_alg_master)
668 return 0;
669
670 ret = erofs_compressor_setlevel(&compresshandle,
671 cfg.c_compr_level_master);
672 if (ret)
673 return ret;
674
675 /* figure out primary algorithm */
676 ret = erofs_get_compress_algorithm_id(cfg.c_compr_alg_master);
677 if (ret < 0)
678 return ret;
679
680 algorithmtype[0] = ret; /* primary algorithm (head 0) */
681 algorithmtype[1] = 0; /* secondary algorithm (head 1) */
682 /*
683 * if big pcluster is enabled, an extra CBLKCNT lcluster index needs
684 * to be loaded in order to get those compressed block counts.
685 */
686 if (cfg.c_pclusterblks_max > 1) {
687 if (cfg.c_pclusterblks_max >
688 Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
689 erofs_err("unsupported clusterblks %u (too large)",
690 cfg.c_pclusterblks_max);
691 return -EINVAL;
692 }
693 erofs_sb_set_big_pcluster();
694 erofs_warn("EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
695 }
696
697 if (ret != Z_EROFS_COMPRESSION_LZ4)
698 erofs_sb_set_compr_cfgs();
699
700 if (erofs_sb_has_compr_cfgs()) {
701 sbi.available_compr_algs |= 1 << ret;
702 return z_erofs_build_compr_cfgs(sb_bh);
703 }
704 return 0;
705 }
706
z_erofs_compress_exit(void)707 int z_erofs_compress_exit(void)
708 {
709 return erofs_compressor_exit(&compresshandle);
710 }
711