1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Miao Xie <miaoxie@huawei.com>
6 * with heavy changes by Gao Xiang <gaoxiang25@huawei.com>
7 */
8 #ifndef _LARGEFILE64_SOURCE
9 #define _LARGEFILE64_SOURCE
10 #endif
11 #include <string.h>
12 #include <stdlib.h>
13 #include <unistd.h>
14 #include "erofs/print.h"
15 #include "erofs/io.h"
16 #include "erofs/cache.h"
17 #include "erofs/compress.h"
18 #include "compressor.h"
19 #include "erofs/block_list.h"
20 #include "erofs/compress_hints.h"
21
22 static struct erofs_compress compresshandle;
23 static unsigned int algorithmtype[2];
24
25 struct z_erofs_vle_compress_ctx {
26 u8 *metacur;
27
28 u8 queue[EROFS_CONFIG_COMPR_MAX_SZ * 2];
29 unsigned int head, tail;
30 unsigned int compressedblks;
31 erofs_blk_t blkaddr; /* pointing to the next blkaddr */
32 u16 clusterofs;
33 };
34
35 #define Z_EROFS_LEGACY_MAP_HEADER_SIZE \
36 (sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING)
37
vle_compressmeta_capacity(erofs_off_t filesize)38 static unsigned int vle_compressmeta_capacity(erofs_off_t filesize)
39 {
40 const unsigned int indexsize = BLK_ROUND_UP(filesize) *
41 sizeof(struct z_erofs_vle_decompressed_index);
42
43 return Z_EROFS_LEGACY_MAP_HEADER_SIZE + indexsize;
44 }
45
vle_write_indexes_final(struct z_erofs_vle_compress_ctx * ctx)46 static void vle_write_indexes_final(struct z_erofs_vle_compress_ctx *ctx)
47 {
48 const unsigned int type = Z_EROFS_VLE_CLUSTER_TYPE_PLAIN;
49 struct z_erofs_vle_decompressed_index di;
50
51 if (!ctx->clusterofs)
52 return;
53
54 di.di_clusterofs = cpu_to_le16(ctx->clusterofs);
55 di.di_u.blkaddr = 0;
56 di.di_advise = cpu_to_le16(type << Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT);
57
58 memcpy(ctx->metacur, &di, sizeof(di));
59 ctx->metacur += sizeof(di);
60 }
61
vle_write_indexes(struct z_erofs_vle_compress_ctx * ctx,unsigned int count,bool raw)62 static void vle_write_indexes(struct z_erofs_vle_compress_ctx *ctx,
63 unsigned int count, bool raw)
64 {
65 unsigned int clusterofs = ctx->clusterofs;
66 unsigned int d0 = 0, d1 = (clusterofs + count) / EROFS_BLKSIZ;
67 struct z_erofs_vle_decompressed_index di;
68 unsigned int type;
69 __le16 advise;
70
71 di.di_clusterofs = cpu_to_le16(ctx->clusterofs);
72
73 /* whether the tail-end (un)compressed block or not */
74 if (!d1) {
75 /*
76 * A lcluster cannot have three parts with the middle one which
77 * is well-compressed for !ztailpacking cases.
78 */
79 DBG_BUGON(!raw && !cfg.c_ztailpacking);
80 type = raw ? Z_EROFS_VLE_CLUSTER_TYPE_PLAIN :
81 Z_EROFS_VLE_CLUSTER_TYPE_HEAD;
82 advise = cpu_to_le16(type << Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT);
83
84 di.di_advise = advise;
85 di.di_u.blkaddr = cpu_to_le32(ctx->blkaddr);
86 memcpy(ctx->metacur, &di, sizeof(di));
87 ctx->metacur += sizeof(di);
88
89 /* don't add the final index if the tail-end block exists */
90 ctx->clusterofs = 0;
91 return;
92 }
93
94 do {
95 /* XXX: big pcluster feature should be per-inode */
96 if (d0 == 1 && erofs_sb_has_big_pcluster()) {
97 type = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD;
98 di.di_u.delta[0] = cpu_to_le16(ctx->compressedblks |
99 Z_EROFS_VLE_DI_D0_CBLKCNT);
100 di.di_u.delta[1] = cpu_to_le16(d1);
101 } else if (d0) {
102 type = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD;
103
104 /*
105 * If the |Z_EROFS_VLE_DI_D0_CBLKCNT| bit is set, parser
106 * will interpret |delta[0]| as size of pcluster, rather
107 * than distance to last head cluster. Normally this
108 * isn't a problem, because uncompressed extent size are
109 * below Z_EROFS_VLE_DI_D0_CBLKCNT * BLOCK_SIZE = 8MB.
110 * But with large pcluster it's possible to go over this
111 * number, resulting in corrupted compressed indices.
112 * To solve this, we replace d0 with
113 * Z_EROFS_VLE_DI_D0_CBLKCNT-1.
114 */
115 if (d0 >= Z_EROFS_VLE_DI_D0_CBLKCNT)
116 di.di_u.delta[0] = cpu_to_le16(
117 Z_EROFS_VLE_DI_D0_CBLKCNT - 1);
118 else
119 di.di_u.delta[0] = cpu_to_le16(d0);
120 di.di_u.delta[1] = cpu_to_le16(d1);
121 } else {
122 type = raw ? Z_EROFS_VLE_CLUSTER_TYPE_PLAIN :
123 Z_EROFS_VLE_CLUSTER_TYPE_HEAD;
124 di.di_u.blkaddr = cpu_to_le32(ctx->blkaddr);
125 }
126 advise = cpu_to_le16(type << Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT);
127 di.di_advise = advise;
128
129 memcpy(ctx->metacur, &di, sizeof(di));
130 ctx->metacur += sizeof(di);
131
132 count -= EROFS_BLKSIZ - clusterofs;
133 clusterofs = 0;
134
135 ++d0;
136 --d1;
137 } while (clusterofs + count >= EROFS_BLKSIZ);
138
139 ctx->clusterofs = clusterofs + count;
140 }
141
write_uncompressed_extent(struct z_erofs_vle_compress_ctx * ctx,unsigned int * len,char * dst)142 static int write_uncompressed_extent(struct z_erofs_vle_compress_ctx *ctx,
143 unsigned int *len, char *dst)
144 {
145 int ret;
146 unsigned int count;
147
148 /* reset clusterofs to 0 if permitted */
149 if (!erofs_sb_has_lz4_0padding() && ctx->clusterofs &&
150 ctx->head >= ctx->clusterofs) {
151 ctx->head -= ctx->clusterofs;
152 *len += ctx->clusterofs;
153 ctx->clusterofs = 0;
154 }
155
156 /* write uncompressed data */
157 count = min(EROFS_BLKSIZ, *len);
158
159 memcpy(dst, ctx->queue + ctx->head, count);
160 memset(dst + count, 0, EROFS_BLKSIZ - count);
161
162 erofs_dbg("Writing %u uncompressed data to block %u",
163 count, ctx->blkaddr);
164 ret = blk_write(dst, ctx->blkaddr, 1);
165 if (ret)
166 return ret;
167 return count;
168 }
169
z_erofs_get_max_pclusterblks(struct erofs_inode * inode)170 static unsigned int z_erofs_get_max_pclusterblks(struct erofs_inode *inode)
171 {
172 #ifndef NDEBUG
173 if (cfg.c_random_pclusterblks)
174 return 1 + rand() % cfg.c_pclusterblks_max;
175 #endif
176 if (cfg.c_compress_hints_file) {
177 z_erofs_apply_compress_hints(inode);
178 DBG_BUGON(!inode->z_physical_clusterblks);
179 return inode->z_physical_clusterblks;
180 }
181 return cfg.c_pclusterblks_def;
182 }
183
z_erofs_fill_inline_data(struct erofs_inode * inode,void * data,unsigned int len,bool raw)184 static int z_erofs_fill_inline_data(struct erofs_inode *inode, void *data,
185 unsigned int len, bool raw)
186 {
187 inode->z_advise |= Z_EROFS_ADVISE_INLINE_PCLUSTER;
188 inode->idata_size = len;
189 inode->compressed_idata = !raw;
190
191 inode->idata = malloc(inode->idata_size);
192 if (!inode->idata)
193 return -ENOMEM;
194 erofs_dbg("Recording %u %scompressed inline data",
195 inode->idata_size, raw ? "un" : "");
196 memcpy(inode->idata, data, inode->idata_size);
197 return len;
198 }
199
tryrecompress_trailing(void * in,unsigned int * insize,void * out,int * compressedsize)200 static void tryrecompress_trailing(void *in, unsigned int *insize,
201 void *out, int *compressedsize)
202 {
203 static char tmp[Z_EROFS_PCLUSTER_MAX_SIZE];
204 unsigned int count;
205 int ret = *compressedsize;
206
207 /* no need to recompress */
208 if (!(ret & (EROFS_BLKSIZ - 1)))
209 return;
210
211 count = *insize;
212 ret = erofs_compress_destsize(&compresshandle,
213 in, &count, (void *)tmp,
214 rounddown(ret, EROFS_BLKSIZ), false);
215 if (ret <= 0 || ret + (*insize - count) >=
216 roundup(*compressedsize, EROFS_BLKSIZ))
217 return;
218
219 /* replace the original compressed data if any gain */
220 memcpy(out, tmp, ret);
221 *insize = count;
222 *compressedsize = ret;
223 }
224
vle_compress_one(struct erofs_inode * inode,struct z_erofs_vle_compress_ctx * ctx,bool final)225 static int vle_compress_one(struct erofs_inode *inode,
226 struct z_erofs_vle_compress_ctx *ctx,
227 bool final)
228 {
229 struct erofs_compress *const h = &compresshandle;
230 unsigned int len = ctx->tail - ctx->head;
231 unsigned int count;
232 int ret;
233 static char dstbuf[EROFS_CONFIG_COMPR_MAX_SZ + EROFS_BLKSIZ];
234 char *const dst = dstbuf + EROFS_BLKSIZ;
235
236 while (len) {
237 unsigned int pclustersize =
238 z_erofs_get_max_pclusterblks(inode) * EROFS_BLKSIZ;
239 bool may_inline = (cfg.c_ztailpacking && final);
240 bool raw;
241
242 if (len <= pclustersize) {
243 if (!final)
244 break;
245 if (!may_inline && len <= EROFS_BLKSIZ)
246 goto nocompression;
247 }
248
249 count = min(len, cfg.c_max_decompressed_extent_bytes);
250 ret = erofs_compress_destsize(h, ctx->queue + ctx->head,
251 &count, dst, pclustersize,
252 !(final && len == count));
253 if (ret <= 0) {
254 if (ret != -EAGAIN) {
255 erofs_err("failed to compress %s: %s",
256 inode->i_srcpath,
257 erofs_strerror(ret));
258 }
259
260 if (may_inline && len < EROFS_BLKSIZ)
261 ret = z_erofs_fill_inline_data(inode,
262 ctx->queue + ctx->head,
263 len, true);
264 else
265 nocompression:
266 ret = write_uncompressed_extent(ctx, &len, dst);
267
268 if (ret < 0)
269 return ret;
270 count = ret;
271
272 /*
273 * XXX: For now, we have to leave `ctx->compressedblks
274 * = 1' since there is no way to generate compressed
275 * indexes after the time that ztailpacking is decided.
276 */
277 ctx->compressedblks = 1;
278 raw = true;
279 /* tailpcluster should be less than 1 block */
280 } else if (may_inline && len == count &&
281 ret < EROFS_BLKSIZ) {
282 if (ctx->clusterofs + len <= EROFS_BLKSIZ) {
283 inode->eof_tailraw = malloc(len);
284 if (!inode->eof_tailraw)
285 return -ENOMEM;
286
287 memcpy(inode->eof_tailraw,
288 ctx->queue + ctx->head, len);
289 inode->eof_tailrawsize = len;
290 }
291
292 ret = z_erofs_fill_inline_data(inode, dst, ret, false);
293 if (ret < 0)
294 return ret;
295 ctx->compressedblks = 1;
296 raw = false;
297 } else {
298 unsigned int tailused, padding;
299
300 if (may_inline && len == count)
301 tryrecompress_trailing(ctx->queue + ctx->head,
302 &count, dst, &ret);
303
304 tailused = ret & (EROFS_BLKSIZ - 1);
305 padding = 0;
306 ctx->compressedblks = DIV_ROUND_UP(ret, EROFS_BLKSIZ);
307 DBG_BUGON(ctx->compressedblks * EROFS_BLKSIZ >= count);
308
309 /* zero out garbage trailing data for non-0padding */
310 if (!erofs_sb_has_lz4_0padding())
311 memset(dst + ret, 0,
312 roundup(ret, EROFS_BLKSIZ) - ret);
313 else if (tailused)
314 padding = EROFS_BLKSIZ - tailused;
315
316 /* write compressed data */
317 erofs_dbg("Writing %u compressed data to %u of %u blocks",
318 count, ctx->blkaddr, ctx->compressedblks);
319
320 ret = blk_write(dst - padding, ctx->blkaddr,
321 ctx->compressedblks);
322 if (ret)
323 return ret;
324 raw = false;
325 }
326
327 ctx->head += count;
328 /* write compression indexes for this pcluster */
329 vle_write_indexes(ctx, count, raw);
330
331 ctx->blkaddr += ctx->compressedblks;
332 len -= count;
333
334 if (!final && ctx->head >= EROFS_CONFIG_COMPR_MAX_SZ) {
335 const unsigned int qh_aligned =
336 round_down(ctx->head, EROFS_BLKSIZ);
337 const unsigned int qh_after = ctx->head - qh_aligned;
338
339 memmove(ctx->queue, ctx->queue + qh_aligned,
340 len + qh_after);
341 ctx->head = qh_after;
342 ctx->tail = qh_after + len;
343 break;
344 }
345 }
346 return 0;
347 }
348
349 struct z_erofs_compressindex_vec {
350 union {
351 erofs_blk_t blkaddr;
352 u16 delta[2];
353 } u;
354 u16 clusterofs;
355 u8 clustertype;
356 };
357
parse_legacy_indexes(struct z_erofs_compressindex_vec * cv,unsigned int nr,void * metacur)358 static void *parse_legacy_indexes(struct z_erofs_compressindex_vec *cv,
359 unsigned int nr, void *metacur)
360 {
361 struct z_erofs_vle_decompressed_index *const db = metacur;
362 unsigned int i;
363
364 for (i = 0; i < nr; ++i, ++cv) {
365 struct z_erofs_vle_decompressed_index *const di = db + i;
366 const unsigned int advise = le16_to_cpu(di->di_advise);
367
368 cv->clustertype = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
369 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
370 cv->clusterofs = le16_to_cpu(di->di_clusterofs);
371
372 if (cv->clustertype == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
373 cv->u.delta[0] = le16_to_cpu(di->di_u.delta[0]);
374 cv->u.delta[1] = le16_to_cpu(di->di_u.delta[1]);
375 } else {
376 cv->u.blkaddr = le32_to_cpu(di->di_u.blkaddr);
377 }
378 }
379 return db + nr;
380 }
381
write_compacted_indexes(u8 * out,struct z_erofs_compressindex_vec * cv,erofs_blk_t * blkaddr_ret,unsigned int destsize,unsigned int logical_clusterbits,bool final,bool * dummy_head)382 static void *write_compacted_indexes(u8 *out,
383 struct z_erofs_compressindex_vec *cv,
384 erofs_blk_t *blkaddr_ret,
385 unsigned int destsize,
386 unsigned int logical_clusterbits,
387 bool final, bool *dummy_head)
388 {
389 unsigned int vcnt, encodebits, pos, i, cblks;
390 bool update_blkaddr;
391 erofs_blk_t blkaddr;
392
393 if (destsize == 4)
394 vcnt = 2;
395 else if (destsize == 2 && logical_clusterbits == 12)
396 vcnt = 16;
397 else
398 return ERR_PTR(-EINVAL);
399 encodebits = (vcnt * destsize * 8 - 32) / vcnt;
400 blkaddr = *blkaddr_ret;
401 update_blkaddr = erofs_sb_has_big_pcluster();
402
403 pos = 0;
404 for (i = 0; i < vcnt; ++i) {
405 unsigned int offset, v;
406 u8 ch, rem;
407
408 if (cv[i].clustertype == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
409 if (cv[i].u.delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
410 cblks = cv[i].u.delta[0] & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
411 offset = cv[i].u.delta[0];
412 blkaddr += cblks;
413 *dummy_head = false;
414 } else if (i + 1 == vcnt) {
415 offset = cv[i].u.delta[1];
416 } else {
417 offset = cv[i].u.delta[0];
418 }
419 } else {
420 offset = cv[i].clusterofs;
421 if (*dummy_head) {
422 ++blkaddr;
423 if (update_blkaddr)
424 *blkaddr_ret = blkaddr;
425 }
426 *dummy_head = true;
427 update_blkaddr = false;
428
429 if (cv[i].u.blkaddr != blkaddr) {
430 if (i + 1 != vcnt)
431 DBG_BUGON(!final);
432 DBG_BUGON(cv[i].u.blkaddr);
433 }
434 }
435 v = (cv[i].clustertype << logical_clusterbits) | offset;
436 rem = pos & 7;
437 ch = out[pos / 8] & ((1 << rem) - 1);
438 out[pos / 8] = (v << rem) | ch;
439 out[pos / 8 + 1] = v >> (8 - rem);
440 out[pos / 8 + 2] = v >> (16 - rem);
441 pos += encodebits;
442 }
443 DBG_BUGON(destsize * vcnt * 8 != pos + 32);
444 *(__le32 *)(out + destsize * vcnt - 4) = cpu_to_le32(*blkaddr_ret);
445 *blkaddr_ret = blkaddr;
446 return out + destsize * vcnt;
447 }
448
z_erofs_convert_to_compacted_format(struct erofs_inode * inode,erofs_blk_t blkaddr,unsigned int legacymetasize,void * compressmeta)449 int z_erofs_convert_to_compacted_format(struct erofs_inode *inode,
450 erofs_blk_t blkaddr,
451 unsigned int legacymetasize,
452 void *compressmeta)
453 {
454 const unsigned int mpos = Z_EROFS_VLE_EXTENT_ALIGN(inode->inode_isize +
455 inode->xattr_isize) +
456 sizeof(struct z_erofs_map_header);
457 const unsigned int totalidx = (legacymetasize -
458 Z_EROFS_LEGACY_MAP_HEADER_SIZE) /
459 sizeof(struct z_erofs_vle_decompressed_index);
460 const unsigned int logical_clusterbits = inode->z_logical_clusterbits;
461 u8 *out, *in;
462 struct z_erofs_compressindex_vec cv[16];
463 /* # of 8-byte units so that it can be aligned with 32 bytes */
464 unsigned int compacted_4b_initial, compacted_4b_end;
465 unsigned int compacted_2b;
466 bool dummy_head;
467
468 if (logical_clusterbits < LOG_BLOCK_SIZE || LOG_BLOCK_SIZE < 12)
469 return -EINVAL;
470 if (logical_clusterbits > 14) /* currently not supported */
471 return -ENOTSUP;
472 if (logical_clusterbits == 12) {
473 compacted_4b_initial = (32 - mpos % 32) / 4;
474 if (compacted_4b_initial == 32 / 4)
475 compacted_4b_initial = 0;
476
477 if (compacted_4b_initial > totalidx) {
478 compacted_4b_initial = compacted_2b = 0;
479 compacted_4b_end = totalidx;
480 } else {
481 compacted_2b = rounddown(totalidx -
482 compacted_4b_initial, 16);
483 compacted_4b_end = totalidx - compacted_4b_initial -
484 compacted_2b;
485 }
486 } else {
487 compacted_2b = compacted_4b_initial = 0;
488 compacted_4b_end = totalidx;
489 }
490
491 out = in = compressmeta;
492
493 out += sizeof(struct z_erofs_map_header);
494 in += Z_EROFS_LEGACY_MAP_HEADER_SIZE;
495
496 dummy_head = false;
497 /* prior to bigpcluster, blkaddr was bumped up once coming into HEAD */
498 if (!erofs_sb_has_big_pcluster()) {
499 --blkaddr;
500 dummy_head = true;
501 }
502
503 /* generate compacted_4b_initial */
504 while (compacted_4b_initial) {
505 in = parse_legacy_indexes(cv, 2, in);
506 out = write_compacted_indexes(out, cv, &blkaddr,
507 4, logical_clusterbits, false,
508 &dummy_head);
509 compacted_4b_initial -= 2;
510 }
511 DBG_BUGON(compacted_4b_initial);
512
513 /* generate compacted_2b */
514 while (compacted_2b) {
515 in = parse_legacy_indexes(cv, 16, in);
516 out = write_compacted_indexes(out, cv, &blkaddr,
517 2, logical_clusterbits, false,
518 &dummy_head);
519 compacted_2b -= 16;
520 }
521 DBG_BUGON(compacted_2b);
522
523 /* generate compacted_4b_end */
524 while (compacted_4b_end > 1) {
525 in = parse_legacy_indexes(cv, 2, in);
526 out = write_compacted_indexes(out, cv, &blkaddr,
527 4, logical_clusterbits, false,
528 &dummy_head);
529 compacted_4b_end -= 2;
530 }
531
532 /* generate final compacted_4b_end if needed */
533 if (compacted_4b_end) {
534 memset(cv, 0, sizeof(cv));
535 in = parse_legacy_indexes(cv, 1, in);
536 out = write_compacted_indexes(out, cv, &blkaddr,
537 4, logical_clusterbits, true,
538 &dummy_head);
539 }
540 inode->extent_isize = out - (u8 *)compressmeta;
541 return 0;
542 }
543
z_erofs_write_mapheader(struct erofs_inode * inode,void * compressmeta)544 static void z_erofs_write_mapheader(struct erofs_inode *inode,
545 void *compressmeta)
546 {
547 struct z_erofs_map_header h = {
548 .h_advise = cpu_to_le16(inode->z_advise),
549 .h_idata_size = cpu_to_le16(inode->idata_size),
550 .h_algorithmtype = inode->z_algorithmtype[1] << 4 |
551 inode->z_algorithmtype[0],
552 /* lclustersize */
553 .h_clusterbits = inode->z_logical_clusterbits - 12,
554 };
555
556 memset(compressmeta, 0, Z_EROFS_LEGACY_MAP_HEADER_SIZE);
557 /* write out map header */
558 memcpy(compressmeta, &h, sizeof(struct z_erofs_map_header));
559 }
560
z_erofs_drop_inline_pcluster(struct erofs_inode * inode)561 void z_erofs_drop_inline_pcluster(struct erofs_inode *inode)
562 {
563 const unsigned int type = Z_EROFS_VLE_CLUSTER_TYPE_PLAIN;
564 struct z_erofs_map_header *h = inode->compressmeta;
565
566 h->h_advise = cpu_to_le16(le16_to_cpu(h->h_advise) &
567 ~Z_EROFS_ADVISE_INLINE_PCLUSTER);
568 if (!inode->eof_tailraw)
569 return;
570 DBG_BUGON(inode->compressed_idata != true);
571
572 /* patch the EOF lcluster to uncompressed type first */
573 if (inode->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
574 struct z_erofs_vle_decompressed_index *di =
575 (inode->compressmeta + inode->extent_isize) -
576 sizeof(struct z_erofs_vle_decompressed_index);
577 __le16 advise =
578 cpu_to_le16(type << Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT);
579
580 di->di_advise = advise;
581 } else if (inode->datalayout == EROFS_INODE_FLAT_COMPRESSION) {
582 /* handle the last compacted 4B pack */
583 unsigned int eofs, base, pos, v, lo;
584 u8 *out;
585
586 eofs = inode->extent_isize -
587 (4 << (DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ) & 1));
588 base = round_down(eofs, 8);
589 pos = 16 /* encodebits */ * ((eofs - base) / 4);
590 out = inode->compressmeta + base;
591 lo = get_unaligned_le32(out + pos / 8) & (EROFS_BLKSIZ - 1);
592 v = (type << LOG_BLOCK_SIZE) | lo;
593 out[pos / 8] = v & 0xff;
594 out[pos / 8 + 1] = v >> 8;
595 } else {
596 DBG_BUGON(1);
597 return;
598 }
599 free(inode->idata);
600 /* replace idata with prepared uncompressed data */
601 inode->idata = inode->eof_tailraw;
602 inode->idata_size = inode->eof_tailrawsize;
603 inode->compressed_idata = false;
604 inode->eof_tailraw = NULL;
605 }
606
erofs_write_compressed_file(struct erofs_inode * inode)607 int erofs_write_compressed_file(struct erofs_inode *inode)
608 {
609 struct erofs_buffer_head *bh;
610 static struct z_erofs_vle_compress_ctx ctx;
611 erofs_off_t remaining;
612 erofs_blk_t blkaddr, compressed_blocks;
613 unsigned int legacymetasize;
614 int ret, fd;
615 u8 *compressmeta = malloc(vle_compressmeta_capacity(inode->i_size));
616
617 if (!compressmeta)
618 return -ENOMEM;
619
620 fd = open(inode->i_srcpath, O_RDONLY | O_BINARY);
621 if (fd < 0) {
622 ret = -errno;
623 goto err_free_meta;
624 }
625
626 /* allocate main data buffer */
627 bh = erofs_balloc(DATA, 0, 0, 0);
628 if (IS_ERR(bh)) {
629 ret = PTR_ERR(bh);
630 goto err_close;
631 }
632
633 /* initialize per-file compression setting */
634 inode->z_advise = 0;
635 if (!cfg.c_legacy_compress) {
636 inode->z_advise |= Z_EROFS_ADVISE_COMPACTED_2B;
637 inode->datalayout = EROFS_INODE_FLAT_COMPRESSION;
638 } else {
639 inode->datalayout = EROFS_INODE_FLAT_COMPRESSION_LEGACY;
640 }
641
642 if (erofs_sb_has_big_pcluster()) {
643 inode->z_advise |= Z_EROFS_ADVISE_BIG_PCLUSTER_1;
644 if (inode->datalayout == EROFS_INODE_FLAT_COMPRESSION)
645 inode->z_advise |= Z_EROFS_ADVISE_BIG_PCLUSTER_2;
646 }
647 inode->z_algorithmtype[0] = algorithmtype[0];
648 inode->z_algorithmtype[1] = algorithmtype[1];
649 inode->z_logical_clusterbits = LOG_BLOCK_SIZE;
650
651 blkaddr = erofs_mapbh(bh->block); /* start_blkaddr */
652 ctx.blkaddr = blkaddr;
653 ctx.metacur = compressmeta + Z_EROFS_LEGACY_MAP_HEADER_SIZE;
654 ctx.head = ctx.tail = 0;
655 ctx.clusterofs = 0;
656 remaining = inode->i_size;
657
658 while (remaining) {
659 const u64 readcount = min_t(u64, remaining,
660 sizeof(ctx.queue) - ctx.tail);
661
662 ret = read(fd, ctx.queue + ctx.tail, readcount);
663 if (ret != readcount) {
664 ret = -errno;
665 goto err_bdrop;
666 }
667 remaining -= readcount;
668 ctx.tail += readcount;
669
670 ret = vle_compress_one(inode, &ctx, !remaining);
671 if (ret)
672 goto err_free_idata;
673 }
674 DBG_BUGON(ctx.head != ctx.tail);
675
676 /* fall back to no compression mode */
677 compressed_blocks = ctx.blkaddr - blkaddr;
678 DBG_BUGON(compressed_blocks < !!inode->idata_size);
679 compressed_blocks -= !!inode->idata_size;
680
681 vle_write_indexes_final(&ctx);
682 legacymetasize = ctx.metacur - compressmeta;
683 /* estimate if data compression saves space or not */
684 if (compressed_blocks * EROFS_BLKSIZ + inode->idata_size +
685 legacymetasize >= inode->i_size) {
686 ret = -ENOSPC;
687 goto err_free_idata;
688 }
689 z_erofs_write_mapheader(inode, compressmeta);
690
691 close(fd);
692 if (compressed_blocks) {
693 ret = erofs_bh_balloon(bh, blknr_to_addr(compressed_blocks));
694 DBG_BUGON(ret != EROFS_BLKSIZ);
695 } else {
696 DBG_BUGON(!inode->idata_size);
697 }
698
699 erofs_info("compressed %s (%llu bytes) into %u blocks",
700 inode->i_srcpath, (unsigned long long)inode->i_size,
701 compressed_blocks);
702
703 if (inode->idata_size)
704 inode->bh_data = bh;
705 else
706 erofs_bdrop(bh, false);
707
708 inode->u.i_blocks = compressed_blocks;
709
710 if (inode->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
711 inode->extent_isize = legacymetasize;
712 } else {
713 ret = z_erofs_convert_to_compacted_format(inode, blkaddr,
714 legacymetasize,
715 compressmeta);
716 DBG_BUGON(ret);
717 }
718 inode->compressmeta = compressmeta;
719 erofs_droid_blocklist_write(inode, blkaddr, compressed_blocks);
720 return 0;
721
722 err_free_idata:
723 if (inode->idata) {
724 free(inode->idata);
725 inode->idata = NULL;
726 }
727 err_bdrop:
728 erofs_bdrop(bh, true); /* revoke buffer */
729 err_close:
730 close(fd);
731 err_free_meta:
732 free(compressmeta);
733 return ret;
734 }
735
erofs_get_compress_algorithm_id(const char * name)736 static int erofs_get_compress_algorithm_id(const char *name)
737 {
738 if (!strcmp(name, "lz4") || !strcmp(name, "lz4hc"))
739 return Z_EROFS_COMPRESSION_LZ4;
740 if (!strcmp(name, "lzma"))
741 return Z_EROFS_COMPRESSION_LZMA;
742 return -ENOTSUP;
743 }
744
z_erofs_build_compr_cfgs(struct erofs_buffer_head * sb_bh)745 int z_erofs_build_compr_cfgs(struct erofs_buffer_head *sb_bh)
746 {
747 struct erofs_buffer_head *bh = sb_bh;
748 int ret = 0;
749
750 if (sbi.available_compr_algs & (1 << Z_EROFS_COMPRESSION_LZ4)) {
751 struct {
752 __le16 size;
753 struct z_erofs_lz4_cfgs lz4;
754 } __packed lz4alg = {
755 .size = cpu_to_le16(sizeof(struct z_erofs_lz4_cfgs)),
756 .lz4 = {
757 .max_distance =
758 cpu_to_le16(sbi.lz4_max_distance),
759 .max_pclusterblks = cfg.c_pclusterblks_max,
760 }
761 };
762
763 bh = erofs_battach(bh, META, sizeof(lz4alg));
764 if (IS_ERR(bh)) {
765 DBG_BUGON(1);
766 return PTR_ERR(bh);
767 }
768 erofs_mapbh(bh->block);
769 ret = dev_write(&lz4alg, erofs_btell(bh, false),
770 sizeof(lz4alg));
771 bh->op = &erofs_drop_directly_bhops;
772 }
773 #ifdef HAVE_LIBLZMA
774 if (sbi.available_compr_algs & (1 << Z_EROFS_COMPRESSION_LZMA)) {
775 struct {
776 __le16 size;
777 struct z_erofs_lzma_cfgs lzma;
778 } __packed lzmaalg = {
779 .size = cpu_to_le16(sizeof(struct z_erofs_lzma_cfgs)),
780 .lzma = {
781 .dict_size = cpu_to_le32(cfg.c_dict_size),
782 }
783 };
784
785 bh = erofs_battach(bh, META, sizeof(lzmaalg));
786 if (IS_ERR(bh)) {
787 DBG_BUGON(1);
788 return PTR_ERR(bh);
789 }
790 erofs_mapbh(bh->block);
791 ret = dev_write(&lzmaalg, erofs_btell(bh, false),
792 sizeof(lzmaalg));
793 bh->op = &erofs_drop_directly_bhops;
794 }
795 #endif
796 return ret;
797 }
798
z_erofs_compress_init(struct erofs_buffer_head * sb_bh)799 int z_erofs_compress_init(struct erofs_buffer_head *sb_bh)
800 {
801 /* initialize for primary compression algorithm */
802 int ret = erofs_compressor_init(&compresshandle,
803 cfg.c_compr_alg_master);
804
805 if (ret)
806 return ret;
807
808 /*
809 * if primary algorithm is empty (e.g. compression off),
810 * clear 0PADDING feature for old kernel compatibility.
811 */
812 if (!cfg.c_compr_alg_master ||
813 (cfg.c_legacy_compress && !strcmp(cfg.c_compr_alg_master, "lz4")))
814 erofs_sb_clear_lz4_0padding();
815
816 if (!cfg.c_compr_alg_master)
817 return 0;
818
819 ret = erofs_compressor_setlevel(&compresshandle,
820 cfg.c_compr_level_master);
821 if (ret)
822 return ret;
823
824 /* figure out primary algorithm */
825 ret = erofs_get_compress_algorithm_id(cfg.c_compr_alg_master);
826 if (ret < 0)
827 return ret;
828
829 algorithmtype[0] = ret; /* primary algorithm (head 0) */
830 algorithmtype[1] = 0; /* secondary algorithm (head 1) */
831 /*
832 * if big pcluster is enabled, an extra CBLKCNT lcluster index needs
833 * to be loaded in order to get those compressed block counts.
834 */
835 if (cfg.c_pclusterblks_max > 1) {
836 if (cfg.c_pclusterblks_max >
837 Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
838 erofs_err("unsupported clusterblks %u (too large)",
839 cfg.c_pclusterblks_max);
840 return -EINVAL;
841 }
842 erofs_sb_set_big_pcluster();
843 }
844
845 if (ret != Z_EROFS_COMPRESSION_LZ4)
846 erofs_sb_set_compr_cfgs();
847
848 if (erofs_sb_has_compr_cfgs()) {
849 sbi.available_compr_algs |= 1 << ret;
850 return z_erofs_build_compr_cfgs(sb_bh);
851 }
852 return 0;
853 }
854
z_erofs_compress_exit(void)855 int z_erofs_compress_exit(void)
856 {
857 return erofs_compressor_exit(&compresshandle);
858 }
859