1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3 * erofs-utils/lib/blobchunk.c
4 *
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7 #define _GNU_SOURCE
8 #include "erofs/hashmap.h"
9 #include "erofs/blobchunk.h"
10 #include "erofs/block_list.h"
11 #include "erofs/cache.h"
12 #include "sha256.h"
13 #include <unistd.h>
14
15 struct erofs_blobchunk {
16 union {
17 struct hashmap_entry ent;
18 struct list_head list;
19 };
20 char sha256[32];
21 unsigned int device_id;
22 union {
23 erofs_off_t chunksize;
24 erofs_off_t sourceoffset;
25 };
26 erofs_blk_t blkaddr;
27 };
28
29 static struct hashmap blob_hashmap;
30 static FILE *blobfile;
31 static erofs_blk_t remapped_base;
32 static erofs_off_t datablob_size;
33 static bool multidev;
34 static struct erofs_buffer_head *bh_devt;
35 struct erofs_blobchunk erofs_holechunk = {
36 .blkaddr = EROFS_NULL_ADDR,
37 };
38 static LIST_HEAD(unhashed_blobchunks);
39
erofs_get_unhashed_chunk(unsigned int device_id,erofs_blk_t blkaddr,erofs_off_t sourceoffset)40 struct erofs_blobchunk *erofs_get_unhashed_chunk(unsigned int device_id,
41 erofs_blk_t blkaddr, erofs_off_t sourceoffset)
42 {
43 struct erofs_blobchunk *chunk;
44
45 chunk = calloc(1, sizeof(struct erofs_blobchunk));
46 if (!chunk)
47 return ERR_PTR(-ENOMEM);
48
49 chunk->device_id = device_id;
50 chunk->blkaddr = blkaddr;
51 chunk->sourceoffset = sourceoffset;
52 list_add_tail(&chunk->list, &unhashed_blobchunks);
53 return chunk;
54 }
55
erofs_blob_getchunk(struct erofs_sb_info * sbi,u8 * buf,erofs_off_t chunksize)56 static struct erofs_blobchunk *erofs_blob_getchunk(struct erofs_sb_info *sbi,
57 u8 *buf, erofs_off_t chunksize)
58 {
59 static u8 zeroed[EROFS_MAX_BLOCK_SIZE];
60 struct erofs_blobchunk *chunk;
61 unsigned int hash, padding;
62 u8 sha256[32];
63 erofs_off_t blkpos;
64 int ret;
65
66 erofs_sha256(buf, chunksize, sha256);
67 hash = memhash(sha256, sizeof(sha256));
68 chunk = hashmap_get_from_hash(&blob_hashmap, hash, sha256);
69 if (chunk) {
70 DBG_BUGON(chunksize != chunk->chunksize);
71
72 sbi->saved_by_deduplication += chunksize;
73 if (chunk->blkaddr == erofs_holechunk.blkaddr) {
74 chunk = &erofs_holechunk;
75 erofs_dbg("Found duplicated hole chunk");
76 } else {
77 erofs_dbg("Found duplicated chunk at %u",
78 chunk->blkaddr);
79 }
80 return chunk;
81 }
82
83 chunk = malloc(sizeof(struct erofs_blobchunk));
84 if (!chunk)
85 return ERR_PTR(-ENOMEM);
86
87 chunk->chunksize = chunksize;
88 memcpy(chunk->sha256, sha256, sizeof(sha256));
89 blkpos = ftell(blobfile);
90 DBG_BUGON(erofs_blkoff(sbi, blkpos));
91
92 if (sbi->extra_devices)
93 chunk->device_id = 1;
94 else
95 chunk->device_id = 0;
96 chunk->blkaddr = erofs_blknr(sbi, blkpos);
97
98 erofs_dbg("Writing chunk (%llu bytes) to %u", chunksize | 0ULL,
99 chunk->blkaddr);
100 ret = fwrite(buf, chunksize, 1, blobfile);
101 if (ret == 1) {
102 padding = erofs_blkoff(sbi, chunksize);
103 if (padding) {
104 padding = erofs_blksiz(sbi) - padding;
105 ret = fwrite(zeroed, padding, 1, blobfile);
106 }
107 }
108
109 if (ret < 1) {
110 free(chunk);
111 return ERR_PTR(-ENOSPC);
112 }
113
114 hashmap_entry_init(&chunk->ent, hash);
115 hashmap_add(&blob_hashmap, chunk);
116 return chunk;
117 }
118
erofs_blob_hashmap_cmp(const void * a,const void * b,const void * key)119 static int erofs_blob_hashmap_cmp(const void *a, const void *b,
120 const void *key)
121 {
122 const struct erofs_blobchunk *ec1 =
123 container_of((struct hashmap_entry *)a,
124 struct erofs_blobchunk, ent);
125 const struct erofs_blobchunk *ec2 =
126 container_of((struct hashmap_entry *)b,
127 struct erofs_blobchunk, ent);
128
129 return memcmp(ec1->sha256, key ? key : ec2->sha256,
130 sizeof(ec1->sha256));
131 }
132
erofs_blob_write_chunk_indexes(struct erofs_inode * inode,erofs_off_t off)133 int erofs_blob_write_chunk_indexes(struct erofs_inode *inode,
134 erofs_off_t off)
135 {
136 struct erofs_sb_info *sbi = inode->sbi;
137 erofs_blk_t remaining_blks = BLK_ROUND_UP(sbi, inode->i_size);
138 struct erofs_inode_chunk_index idx = {0};
139 erofs_blk_t extent_start = EROFS_NULL_ADDR;
140 erofs_blk_t extent_end, chunkblks;
141 erofs_off_t source_offset;
142 unsigned int dst, src, unit, zeroedlen;
143 bool first_extent = true;
144
145 if (inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
146 unit = sizeof(struct erofs_inode_chunk_index);
147 else
148 unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
149
150 chunkblks = 1U << (inode->u.chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
151 for (dst = src = 0; dst < inode->extent_isize;
152 src += sizeof(void *), dst += unit) {
153 struct erofs_blobchunk *chunk;
154
155 chunk = *(void **)(inode->chunkindexes + src);
156
157 if (chunk->blkaddr == EROFS_NULL_ADDR) {
158 idx.blkaddr = EROFS_NULL_ADDR;
159 } else if (chunk->device_id) {
160 DBG_BUGON(!(inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES));
161 idx.blkaddr = chunk->blkaddr;
162 extent_start = EROFS_NULL_ADDR;
163 } else {
164 idx.blkaddr = remapped_base + chunk->blkaddr;
165 }
166
167 if (extent_start == EROFS_NULL_ADDR ||
168 idx.blkaddr != extent_end) {
169 if (extent_start != EROFS_NULL_ADDR) {
170 remaining_blks -= extent_end - extent_start;
171 tarerofs_blocklist_write(extent_start,
172 extent_end - extent_start,
173 source_offset, 0);
174 erofs_droid_blocklist_write_extent(inode,
175 extent_start,
176 extent_end - extent_start,
177 first_extent, false);
178 first_extent = false;
179 }
180 extent_start = idx.blkaddr;
181 source_offset = chunk->sourceoffset;
182 }
183 extent_end = idx.blkaddr + chunkblks;
184 idx.device_id = cpu_to_le16(chunk->device_id);
185 idx.blkaddr = cpu_to_le32(idx.blkaddr);
186
187 if (unit == EROFS_BLOCK_MAP_ENTRY_SIZE)
188 memcpy(inode->chunkindexes + dst, &idx.blkaddr, unit);
189 else
190 memcpy(inode->chunkindexes + dst, &idx, sizeof(idx));
191 }
192 off = roundup(off, unit);
193 extent_end = min(extent_end, extent_start + remaining_blks);
194 if (extent_start != EROFS_NULL_ADDR) {
195 zeroedlen = inode->i_size & (erofs_blksiz(sbi) - 1);
196 if (zeroedlen)
197 zeroedlen = erofs_blksiz(sbi) - zeroedlen;
198 tarerofs_blocklist_write(extent_start, extent_end - extent_start,
199 source_offset, zeroedlen);
200 }
201 erofs_droid_blocklist_write_extent(inode, extent_start,
202 extent_start == EROFS_NULL_ADDR ?
203 0 : extent_end - extent_start,
204 first_extent, true);
205
206 return erofs_dev_write(inode->sbi, inode->chunkindexes, off,
207 inode->extent_isize);
208 }
209
erofs_blob_mergechunks(struct erofs_inode * inode,unsigned int chunkbits,unsigned int new_chunkbits)210 int erofs_blob_mergechunks(struct erofs_inode *inode, unsigned int chunkbits,
211 unsigned int new_chunkbits)
212 {
213 struct erofs_sb_info *sbi = inode->sbi;
214 unsigned int dst, src, unit, count;
215
216 if (new_chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
217 new_chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
218 if (chunkbits >= new_chunkbits) /* no need to merge */
219 goto out;
220
221 if (inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
222 unit = sizeof(struct erofs_inode_chunk_index);
223 else
224 unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
225
226 count = round_up(inode->i_size, 1ULL << new_chunkbits) >> new_chunkbits;
227 for (dst = src = 0; dst < count; ++dst) {
228 *((void **)inode->chunkindexes + dst) =
229 *((void **)inode->chunkindexes + src);
230 src += 1U << (new_chunkbits - chunkbits);
231 }
232
233 DBG_BUGON(count * unit >= inode->extent_isize);
234 inode->extent_isize = count * unit;
235 chunkbits = new_chunkbits;
236 out:
237 inode->u.chunkformat = (chunkbits - sbi->blkszbits) |
238 (inode->u.chunkformat & ~EROFS_CHUNK_FORMAT_BLKBITS_MASK);
239 return 0;
240 }
241
erofs_update_minextblks(struct erofs_sb_info * sbi,erofs_off_t start,erofs_off_t end,erofs_blk_t * minextblks)242 static void erofs_update_minextblks(struct erofs_sb_info *sbi,
243 erofs_off_t start, erofs_off_t end, erofs_blk_t *minextblks)
244 {
245 erofs_blk_t lb;
246 lb = lowbit((end - start) >> sbi->blkszbits);
247 if (lb && lb < *minextblks)
248 *minextblks = lb;
249 }
erofs_blob_can_merge(struct erofs_sb_info * sbi,struct erofs_blobchunk * lastch,struct erofs_blobchunk * chunk)250 static bool erofs_blob_can_merge(struct erofs_sb_info *sbi,
251 struct erofs_blobchunk *lastch,
252 struct erofs_blobchunk *chunk)
253 {
254 if (!lastch)
255 return true;
256 if (lastch == &erofs_holechunk && chunk == &erofs_holechunk)
257 return true;
258 if (lastch->device_id == chunk->device_id &&
259 erofs_pos(sbi, lastch->blkaddr) + lastch->chunksize ==
260 erofs_pos(sbi, chunk->blkaddr))
261 return true;
262
263 return false;
264 }
erofs_blob_write_chunked_file(struct erofs_inode * inode,int fd,erofs_off_t startoff)265 int erofs_blob_write_chunked_file(struct erofs_inode *inode, int fd,
266 erofs_off_t startoff)
267 {
268 struct erofs_sb_info *sbi = inode->sbi;
269 unsigned int chunkbits = cfg.c_chunkbits;
270 unsigned int count, unit;
271 struct erofs_blobchunk *chunk, *lastch;
272 struct erofs_inode_chunk_index *idx;
273 erofs_off_t pos, len, chunksize, interval_start;
274 erofs_blk_t minextblks;
275 u8 *chunkdata;
276 int ret;
277
278 #ifdef SEEK_DATA
279 /* if the file is fully sparsed, use one big chunk instead */
280 if (lseek(fd, startoff, SEEK_DATA) < 0 && errno == ENXIO) {
281 chunkbits = ilog2(inode->i_size - 1) + 1;
282 if (chunkbits < sbi->blkszbits)
283 chunkbits = sbi->blkszbits;
284 }
285 #endif
286 if (chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
287 chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
288 chunksize = 1ULL << chunkbits;
289 count = DIV_ROUND_UP(inode->i_size, chunksize);
290
291 if (sbi->extra_devices)
292 inode->u.chunkformat |= EROFS_CHUNK_FORMAT_INDEXES;
293 if (inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
294 unit = sizeof(struct erofs_inode_chunk_index);
295 else
296 unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
297
298 chunkdata = malloc(chunksize);
299 if (!chunkdata)
300 return -ENOMEM;
301
302 inode->extent_isize = count * unit;
303 inode->chunkindexes = malloc(count * max(sizeof(*idx), sizeof(void *)));
304 if (!inode->chunkindexes) {
305 ret = -ENOMEM;
306 goto err;
307 }
308 idx = inode->chunkindexes;
309 lastch = NULL;
310 minextblks = BLK_ROUND_UP(sbi, inode->i_size);
311 interval_start = 0;
312
313 for (pos = 0; pos < inode->i_size; pos += len) {
314 #ifdef SEEK_DATA
315 off_t offset = lseek(fd, pos + startoff, SEEK_DATA);
316
317 if (offset < 0) {
318 if (errno != ENXIO)
319 offset = pos;
320 else
321 offset = ((pos >> chunkbits) + 1) << chunkbits;
322 } else {
323 offset -= startoff;
324
325 if (offset != (offset & ~(chunksize - 1))) {
326 offset &= ~(chunksize - 1);
327 if (lseek(fd, offset + startoff, SEEK_SET) !=
328 startoff + offset) {
329 ret = -EIO;
330 goto err;
331 }
332 }
333 }
334
335 if (offset > pos) {
336 if (!erofs_blob_can_merge(sbi, lastch,
337 &erofs_holechunk)) {
338 erofs_update_minextblks(sbi, interval_start,
339 pos, &minextblks);
340 interval_start = pos;
341 }
342 do {
343 *(void **)idx++ = &erofs_holechunk;
344 pos += chunksize;
345 } while (pos < offset);
346 DBG_BUGON(pos != offset);
347 lastch = &erofs_holechunk;
348 len = 0;
349 continue;
350 }
351 #endif
352
353 len = min_t(u64, inode->i_size - pos, chunksize);
354 ret = read(fd, chunkdata, len);
355 if (ret < len) {
356 ret = -EIO;
357 goto err;
358 }
359
360 chunk = erofs_blob_getchunk(sbi, chunkdata, len);
361 if (IS_ERR(chunk)) {
362 ret = PTR_ERR(chunk);
363 goto err;
364 }
365
366 if (!erofs_blob_can_merge(sbi, lastch, chunk)) {
367 erofs_update_minextblks(sbi, interval_start, pos,
368 &minextblks);
369 interval_start = pos;
370 }
371 *(void **)idx++ = chunk;
372 lastch = chunk;
373 }
374 erofs_update_minextblks(sbi, interval_start, pos, &minextblks);
375 inode->datalayout = EROFS_INODE_CHUNK_BASED;
376 free(chunkdata);
377 return erofs_blob_mergechunks(inode, chunkbits,
378 ilog2(minextblks) + sbi->blkszbits);
379 err:
380 free(inode->chunkindexes);
381 inode->chunkindexes = NULL;
382 free(chunkdata);
383 return ret;
384 }
385
erofs_write_zero_inode(struct erofs_inode * inode)386 int erofs_write_zero_inode(struct erofs_inode *inode)
387 {
388 struct erofs_sb_info *sbi = inode->sbi;
389 unsigned int chunkbits = ilog2(inode->i_size - 1) + 1;
390 unsigned int count;
391 erofs_off_t chunksize, len, pos;
392 struct erofs_inode_chunk_index *idx;
393
394 if (chunkbits < sbi->blkszbits)
395 chunkbits = sbi->blkszbits;
396 if (chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
397 chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
398
399 inode->u.chunkformat |= chunkbits - sbi->blkszbits;
400
401 chunksize = 1ULL << chunkbits;
402 count = DIV_ROUND_UP(inode->i_size, chunksize);
403
404 inode->extent_isize = count * EROFS_BLOCK_MAP_ENTRY_SIZE;
405 idx = calloc(count, max(sizeof(*idx), sizeof(void *)));
406 if (!idx)
407 return -ENOMEM;
408 inode->chunkindexes = idx;
409
410 for (pos = 0; pos < inode->i_size; pos += len) {
411 struct erofs_blobchunk *chunk;
412
413 len = min_t(erofs_off_t, inode->i_size - pos, chunksize);
414 chunk = erofs_get_unhashed_chunk(0, EROFS_NULL_ADDR, -1);
415 if (IS_ERR(chunk)) {
416 free(inode->chunkindexes);
417 inode->chunkindexes = NULL;
418 return PTR_ERR(chunk);
419 }
420
421 *(void **)idx++ = chunk;
422 }
423 inode->datalayout = EROFS_INODE_CHUNK_BASED;
424 return 0;
425 }
426
tarerofs_write_chunkes(struct erofs_inode * inode,erofs_off_t data_offset)427 int tarerofs_write_chunkes(struct erofs_inode *inode, erofs_off_t data_offset)
428 {
429 struct erofs_sb_info *sbi = inode->sbi;
430 unsigned int chunkbits = ilog2(inode->i_size - 1) + 1;
431 unsigned int count, unit, device_id;
432 erofs_off_t chunksize, len, pos;
433 erofs_blk_t blkaddr;
434 struct erofs_inode_chunk_index *idx;
435
436 if (chunkbits < sbi->blkszbits)
437 chunkbits = sbi->blkszbits;
438 if (chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
439 chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
440
441 inode->u.chunkformat |= chunkbits - sbi->blkszbits;
442 if (sbi->extra_devices) {
443 device_id = 1;
444 inode->u.chunkformat |= EROFS_CHUNK_FORMAT_INDEXES;
445 unit = sizeof(struct erofs_inode_chunk_index);
446 DBG_BUGON(erofs_blkoff(sbi, data_offset));
447 blkaddr = erofs_blknr(sbi, data_offset);
448 } else {
449 device_id = 0;
450 unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
451 DBG_BUGON(erofs_blkoff(sbi, datablob_size));
452 blkaddr = erofs_blknr(sbi, datablob_size);
453 datablob_size += round_up(inode->i_size, erofs_blksiz(sbi));
454 }
455 chunksize = 1ULL << chunkbits;
456 count = DIV_ROUND_UP(inode->i_size, chunksize);
457
458 inode->extent_isize = count * unit;
459 idx = calloc(count, max(sizeof(*idx), sizeof(void *)));
460 if (!idx)
461 return -ENOMEM;
462 inode->chunkindexes = idx;
463
464 for (pos = 0; pos < inode->i_size; pos += len) {
465 struct erofs_blobchunk *chunk;
466
467 len = min_t(erofs_off_t, inode->i_size - pos, chunksize);
468
469 chunk = erofs_get_unhashed_chunk(device_id, blkaddr,
470 data_offset);
471 if (IS_ERR(chunk)) {
472 free(inode->chunkindexes);
473 inode->chunkindexes = NULL;
474 return PTR_ERR(chunk);
475 }
476
477 *(void **)idx++ = chunk;
478 blkaddr += erofs_blknr(sbi, len);
479 data_offset += len;
480 }
481 inode->datalayout = EROFS_INODE_CHUNK_BASED;
482 return 0;
483 }
484
erofs_mkfs_dump_blobs(struct erofs_sb_info * sbi)485 int erofs_mkfs_dump_blobs(struct erofs_sb_info *sbi)
486 {
487 struct erofs_buffer_head *bh;
488 ssize_t length, ret;
489 u64 pos_in, pos_out;
490
491 if (blobfile) {
492 fflush(blobfile);
493 length = ftell(blobfile);
494 if (length < 0)
495 return -errno;
496
497 if (sbi->extra_devices)
498 sbi->devs[0].blocks = erofs_blknr(sbi, length);
499 else
500 datablob_size = length;
501 }
502
503 if (sbi->extra_devices) {
504 unsigned int i, ret;
505 erofs_blk_t nblocks;
506
507 nblocks = erofs_mapbh(sbi->bmgr, NULL);
508 pos_out = erofs_btell(bh_devt, false);
509 i = 0;
510 do {
511 struct erofs_deviceslot dis = {
512 .mapped_blkaddr = cpu_to_le32(nblocks),
513 .blocks = cpu_to_le32(sbi->devs[i].blocks),
514 };
515
516 memcpy(dis.tag, sbi->devs[i].tag, sizeof(dis.tag));
517 ret = erofs_dev_write(sbi, &dis, pos_out, sizeof(dis));
518 if (ret)
519 return ret;
520 pos_out += sizeof(dis);
521 nblocks += sbi->devs[i].blocks;
522 } while (++i < sbi->extra_devices);
523 bh_devt->op = &erofs_drop_directly_bhops;
524 erofs_bdrop(bh_devt, false);
525 return 0;
526 }
527
528 bh = erofs_balloc(sbi->bmgr, DATA, datablob_size, 0, 0);
529 if (IS_ERR(bh))
530 return PTR_ERR(bh);
531
532 erofs_mapbh(NULL, bh->block);
533
534 pos_out = erofs_btell(bh, false);
535 remapped_base = erofs_blknr(sbi, pos_out);
536 pos_out += sbi->bdev.offset;
537 if (blobfile) {
538 pos_in = 0;
539 do {
540 length = min_t(erofs_off_t, datablob_size, SSIZE_MAX);
541 ret = erofs_copy_file_range(fileno(blobfile), &pos_in,
542 sbi->bdev.fd, &pos_out, length);
543 } while (ret > 0 && (datablob_size -= ret));
544
545 if (ret >= 0) {
546 if (datablob_size) {
547 erofs_err("failed to append the remaining %llu-byte chunk data",
548 datablob_size);
549 ret = -EIO;
550 } else {
551 ret = 0;
552 }
553 }
554 } else {
555 ret = erofs_io_ftruncate(&sbi->bdev, pos_out + datablob_size);
556 }
557 bh->op = &erofs_drop_directly_bhops;
558 erofs_bdrop(bh, false);
559 return ret;
560 }
561
erofs_blob_exit(void)562 void erofs_blob_exit(void)
563 {
564 struct hashmap_iter iter;
565 struct hashmap_entry *e;
566 struct erofs_blobchunk *bc, *n;
567
568 if (blobfile)
569 fclose(blobfile);
570
571 /* Disable hashmap shrink, effectively disabling rehash.
572 * This way we can iterate over entire hashmap efficiently
573 * and safely by using hashmap_iter_next() */
574 hashmap_disable_shrink(&blob_hashmap);
575 e = hashmap_iter_first(&blob_hashmap, &iter);
576 while (e) {
577 bc = container_of((struct hashmap_entry *)e,
578 struct erofs_blobchunk, ent);
579 DBG_BUGON(hashmap_remove(&blob_hashmap, e) != e);
580 free(bc);
581 e = hashmap_iter_next(&iter);
582 }
583 DBG_BUGON(hashmap_free(&blob_hashmap));
584
585 list_for_each_entry_safe(bc, n, &unhashed_blobchunks, list) {
586 list_del(&bc->list);
587 free(bc);
588 }
589 }
590
erofs_insert_zerochunk(erofs_off_t chunksize)591 static int erofs_insert_zerochunk(erofs_off_t chunksize)
592 {
593 u8 *zeros;
594 struct erofs_blobchunk *chunk;
595 u8 sha256[32];
596 unsigned int hash;
597 int ret = 0;
598
599 zeros = calloc(1, chunksize);
600 if (!zeros)
601 return -ENOMEM;
602
603 erofs_sha256(zeros, chunksize, sha256);
604 free(zeros);
605 hash = memhash(sha256, sizeof(sha256));
606 chunk = malloc(sizeof(struct erofs_blobchunk));
607 if (!chunk)
608 return -ENOMEM;
609
610 chunk->chunksize = chunksize;
611 /* treat chunk filled with zeros as hole */
612 chunk->blkaddr = erofs_holechunk.blkaddr;
613 memcpy(chunk->sha256, sha256, sizeof(sha256));
614
615 hashmap_entry_init(&chunk->ent, hash);
616 hashmap_add(&blob_hashmap, chunk);
617 return ret;
618 }
619
erofs_blob_init(const char * blobfile_path,erofs_off_t chunksize)620 int erofs_blob_init(const char *blobfile_path, erofs_off_t chunksize)
621 {
622 if (!blobfile_path) {
623 #ifdef HAVE_TMPFILE64
624 blobfile = tmpfile64();
625 #else
626 blobfile = tmpfile();
627 #endif
628 multidev = false;
629 } else {
630 blobfile = fopen(blobfile_path, "wb");
631 multidev = true;
632 }
633 if (!blobfile)
634 return -EACCES;
635
636 hashmap_init(&blob_hashmap, erofs_blob_hashmap_cmp, 0);
637 return erofs_insert_zerochunk(chunksize);
638 }
639
erofs_mkfs_init_devices(struct erofs_sb_info * sbi,unsigned int devices)640 int erofs_mkfs_init_devices(struct erofs_sb_info *sbi, unsigned int devices)
641 {
642 if (!devices)
643 return 0;
644
645 sbi->devs = calloc(devices, sizeof(sbi->devs[0]));
646 if (!sbi->devs)
647 return -ENOMEM;
648
649 bh_devt = erofs_balloc(sbi->bmgr, DEVT,
650 sizeof(struct erofs_deviceslot) * devices, 0, 0);
651 if (IS_ERR(bh_devt)) {
652 free(sbi->devs);
653 return PTR_ERR(bh_devt);
654 }
655 erofs_mapbh(NULL, bh_devt->block);
656 bh_devt->op = &erofs_skip_write_bhops;
657 sbi->devt_slotoff = erofs_btell(bh_devt, false) / EROFS_DEVT_SLOT_SIZE;
658 sbi->extra_devices = devices;
659 erofs_sb_set_device_table(sbi);
660 return 0;
661 }
662