• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3  * erofs-utils/lib/blobchunk.c
4  *
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #define _GNU_SOURCE
8 #include "erofs/hashmap.h"
9 #include "erofs/blobchunk.h"
10 #include "erofs/block_list.h"
11 #include "erofs/cache.h"
12 #include "erofs/io.h"
13 #include "sha256.h"
14 #include <unistd.h>
15 
16 struct erofs_blobchunk {
17 	union {
18 		struct hashmap_entry ent;
19 		struct list_head list;
20 	};
21 	char		sha256[32];
22 	unsigned int	device_id;
23 	union {
24 		erofs_off_t	chunksize;
25 		erofs_off_t	sourceoffset;
26 	};
27 	erofs_blk_t	blkaddr;
28 };
29 
30 static struct hashmap blob_hashmap;
31 static FILE *blobfile;
32 static erofs_blk_t remapped_base;
33 static erofs_off_t datablob_size;
34 static bool multidev;
35 static struct erofs_buffer_head *bh_devt;
36 struct erofs_blobchunk erofs_holechunk = {
37 	.blkaddr = EROFS_NULL_ADDR,
38 };
39 static LIST_HEAD(unhashed_blobchunks);
40 
erofs_get_unhashed_chunk(unsigned int device_id,erofs_blk_t blkaddr,erofs_off_t sourceoffset)41 struct erofs_blobchunk *erofs_get_unhashed_chunk(unsigned int device_id,
42 		erofs_blk_t blkaddr, erofs_off_t sourceoffset)
43 {
44 	struct erofs_blobchunk *chunk;
45 
46 	chunk = calloc(1, sizeof(struct erofs_blobchunk));
47 	if (!chunk)
48 		return ERR_PTR(-ENOMEM);
49 
50 	chunk->device_id = device_id;
51 	chunk->blkaddr = blkaddr;
52 	chunk->sourceoffset = sourceoffset;
53 	list_add_tail(&chunk->list, &unhashed_blobchunks);
54 	return chunk;
55 }
56 
erofs_blob_getchunk(struct erofs_sb_info * sbi,u8 * buf,erofs_off_t chunksize)57 static struct erofs_blobchunk *erofs_blob_getchunk(struct erofs_sb_info *sbi,
58 						u8 *buf, erofs_off_t chunksize)
59 {
60 	static u8 zeroed[EROFS_MAX_BLOCK_SIZE];
61 	struct erofs_blobchunk *chunk;
62 	unsigned int hash, padding;
63 	u8 sha256[32];
64 	erofs_off_t blkpos;
65 	int ret;
66 
67 	erofs_sha256(buf, chunksize, sha256);
68 	hash = memhash(sha256, sizeof(sha256));
69 	chunk = hashmap_get_from_hash(&blob_hashmap, hash, sha256);
70 	if (chunk) {
71 		DBG_BUGON(chunksize != chunk->chunksize);
72 
73 		sbi->saved_by_deduplication += chunksize;
74 		if (chunk->blkaddr == erofs_holechunk.blkaddr) {
75 			chunk = &erofs_holechunk;
76 			erofs_dbg("Found duplicated hole chunk");
77 		} else {
78 			erofs_dbg("Found duplicated chunk at %u",
79 				  chunk->blkaddr);
80 		}
81 		return chunk;
82 	}
83 
84 	chunk = malloc(sizeof(struct erofs_blobchunk));
85 	if (!chunk)
86 		return ERR_PTR(-ENOMEM);
87 
88 	chunk->chunksize = chunksize;
89 	memcpy(chunk->sha256, sha256, sizeof(sha256));
90 	blkpos = ftell(blobfile);
91 	DBG_BUGON(erofs_blkoff(sbi, blkpos));
92 
93 	if (sbi->extra_devices)
94 		chunk->device_id = 1;
95 	else
96 		chunk->device_id = 0;
97 	chunk->blkaddr = erofs_blknr(sbi, blkpos);
98 
99 	erofs_dbg("Writing chunk (%u bytes) to %u", chunksize, chunk->blkaddr);
100 	ret = fwrite(buf, chunksize, 1, blobfile);
101 	if (ret == 1) {
102 		padding = erofs_blkoff(sbi, chunksize);
103 		if (padding) {
104 			padding = erofs_blksiz(sbi) - padding;
105 			ret = fwrite(zeroed, padding, 1, blobfile);
106 		}
107 	}
108 
109 	if (ret < 1) {
110 		free(chunk);
111 		return ERR_PTR(-ENOSPC);
112 	}
113 
114 	hashmap_entry_init(&chunk->ent, hash);
115 	hashmap_add(&blob_hashmap, chunk);
116 	return chunk;
117 }
118 
erofs_blob_hashmap_cmp(const void * a,const void * b,const void * key)119 static int erofs_blob_hashmap_cmp(const void *a, const void *b,
120 				  const void *key)
121 {
122 	const struct erofs_blobchunk *ec1 =
123 			container_of((struct hashmap_entry *)a,
124 				     struct erofs_blobchunk, ent);
125 	const struct erofs_blobchunk *ec2 =
126 			container_of((struct hashmap_entry *)b,
127 				     struct erofs_blobchunk, ent);
128 
129 	return memcmp(ec1->sha256, key ? key : ec2->sha256,
130 		      sizeof(ec1->sha256));
131 }
132 
erofs_blob_write_chunk_indexes(struct erofs_inode * inode,erofs_off_t off)133 int erofs_blob_write_chunk_indexes(struct erofs_inode *inode,
134 				   erofs_off_t off)
135 {
136 	struct erofs_inode_chunk_index idx = {0};
137 	erofs_blk_t extent_start = EROFS_NULL_ADDR;
138 	erofs_blk_t extent_end, chunkblks;
139 	erofs_off_t source_offset;
140 	unsigned int dst, src, unit;
141 	bool first_extent = true;
142 
143 	if (inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
144 		unit = sizeof(struct erofs_inode_chunk_index);
145 	else
146 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
147 
148 	chunkblks = 1U << (inode->u.chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
149 	for (dst = src = 0; dst < inode->extent_isize;
150 	     src += sizeof(void *), dst += unit) {
151 		struct erofs_blobchunk *chunk;
152 
153 		chunk = *(void **)(inode->chunkindexes + src);
154 
155 		if (chunk->blkaddr == EROFS_NULL_ADDR) {
156 			idx.blkaddr = EROFS_NULL_ADDR;
157 		} else if (chunk->device_id) {
158 			DBG_BUGON(!(inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES));
159 			idx.blkaddr = chunk->blkaddr;
160 			extent_start = EROFS_NULL_ADDR;
161 		} else {
162 			idx.blkaddr = remapped_base + chunk->blkaddr;
163 		}
164 
165 		if (extent_start == EROFS_NULL_ADDR ||
166 		    idx.blkaddr != extent_end) {
167 			if (extent_start != EROFS_NULL_ADDR) {
168 				tarerofs_blocklist_write(extent_start,
169 						extent_end - extent_start,
170 						source_offset);
171 				erofs_droid_blocklist_write_extent(inode,
172 					extent_start,
173 					extent_end - extent_start,
174 					first_extent, false);
175 				first_extent = false;
176 			}
177 			extent_start = idx.blkaddr;
178 			source_offset = chunk->sourceoffset;
179 		}
180 		extent_end = idx.blkaddr + chunkblks;
181 		idx.device_id = cpu_to_le16(chunk->device_id);
182 		idx.blkaddr = cpu_to_le32(idx.blkaddr);
183 
184 		if (unit == EROFS_BLOCK_MAP_ENTRY_SIZE)
185 			memcpy(inode->chunkindexes + dst, &idx.blkaddr, unit);
186 		else
187 			memcpy(inode->chunkindexes + dst, &idx, sizeof(idx));
188 	}
189 	off = roundup(off, unit);
190 	if (extent_start != EROFS_NULL_ADDR)
191 		tarerofs_blocklist_write(extent_start, extent_end - extent_start,
192 					 source_offset);
193 	erofs_droid_blocklist_write_extent(inode, extent_start,
194 			extent_start == EROFS_NULL_ADDR ?
195 					0 : extent_end - extent_start,
196 					   first_extent, true);
197 
198 	return dev_write(inode->sbi, inode->chunkindexes, off, inode->extent_isize);
199 }
200 
erofs_blob_mergechunks(struct erofs_inode * inode,unsigned int chunkbits,unsigned int new_chunkbits)201 int erofs_blob_mergechunks(struct erofs_inode *inode, unsigned int chunkbits,
202 			   unsigned int new_chunkbits)
203 {
204 	struct erofs_sb_info *sbi = inode->sbi;
205 	unsigned int dst, src, unit, count;
206 
207 	if (new_chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
208 		new_chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
209 	if (chunkbits >= new_chunkbits)		/* no need to merge */
210 		goto out;
211 
212 	if (inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
213 		unit = sizeof(struct erofs_inode_chunk_index);
214 	else
215 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
216 
217 	count = round_up(inode->i_size, 1ULL << new_chunkbits) >> new_chunkbits;
218 	for (dst = src = 0; dst < count; ++dst) {
219 		*((void **)inode->chunkindexes + dst) =
220 			*((void **)inode->chunkindexes + src);
221 		src += 1U << (new_chunkbits - chunkbits);
222 	}
223 
224 	DBG_BUGON(count * unit >= inode->extent_isize);
225 	inode->extent_isize = count * unit;
226 	chunkbits = new_chunkbits;
227 out:
228 	inode->u.chunkformat = (chunkbits - sbi->blkszbits) |
229 		(inode->u.chunkformat & ~EROFS_CHUNK_FORMAT_BLKBITS_MASK);
230 	return 0;
231 }
232 
erofs_update_minextblks(struct erofs_sb_info * sbi,erofs_off_t start,erofs_off_t end,erofs_blk_t * minextblks)233 static void erofs_update_minextblks(struct erofs_sb_info *sbi,
234 		    erofs_off_t start, erofs_off_t end, erofs_blk_t *minextblks)
235 {
236 	erofs_blk_t lb;
237 	lb = lowbit((end - start) >> sbi->blkszbits);
238 	if (lb && lb < *minextblks)
239 		*minextblks = lb;
240 }
erofs_blob_can_merge(struct erofs_sb_info * sbi,struct erofs_blobchunk * lastch,struct erofs_blobchunk * chunk)241 static bool erofs_blob_can_merge(struct erofs_sb_info *sbi,
242 				 struct erofs_blobchunk *lastch,
243 				 struct erofs_blobchunk *chunk)
244 {
245 	if (!lastch)
246 		return true;
247 	if (lastch == &erofs_holechunk && chunk == &erofs_holechunk)
248 		return true;
249 	if (lastch->device_id == chunk->device_id &&
250 		erofs_pos(sbi, lastch->blkaddr) + lastch->chunksize ==
251 		erofs_pos(sbi, chunk->blkaddr))
252 		return true;
253 
254 	return false;
255 }
erofs_blob_write_chunked_file(struct erofs_inode * inode,int fd,erofs_off_t startoff)256 int erofs_blob_write_chunked_file(struct erofs_inode *inode, int fd,
257 				  erofs_off_t startoff)
258 {
259 	struct erofs_sb_info *sbi = inode->sbi;
260 	unsigned int chunkbits = cfg.c_chunkbits;
261 	unsigned int count, unit;
262 	struct erofs_blobchunk *chunk, *lastch;
263 	struct erofs_inode_chunk_index *idx;
264 	erofs_off_t pos, len, chunksize, interval_start;
265 	erofs_blk_t minextblks;
266 	u8 *chunkdata;
267 	int ret;
268 
269 #ifdef SEEK_DATA
270 	/* if the file is fully sparsed, use one big chunk instead */
271 	if (lseek(fd, startoff, SEEK_DATA) < 0 && errno == ENXIO) {
272 		chunkbits = ilog2(inode->i_size - 1) + 1;
273 		if (chunkbits < sbi->blkszbits)
274 			chunkbits = sbi->blkszbits;
275 	}
276 #endif
277 	if (chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
278 		chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
279 	chunksize = 1ULL << chunkbits;
280 	count = DIV_ROUND_UP(inode->i_size, chunksize);
281 
282 	if (sbi->extra_devices)
283 		inode->u.chunkformat |= EROFS_CHUNK_FORMAT_INDEXES;
284 	if (inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
285 		unit = sizeof(struct erofs_inode_chunk_index);
286 	else
287 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
288 
289 	chunkdata = malloc(chunksize);
290 	if (!chunkdata)
291 		return -ENOMEM;
292 
293 	inode->extent_isize = count * unit;
294 	inode->chunkindexes = malloc(count * max(sizeof(*idx), sizeof(void *)));
295 	if (!inode->chunkindexes) {
296 		ret = -ENOMEM;
297 		goto err;
298 	}
299 	idx = inode->chunkindexes;
300 	lastch = NULL;
301 	minextblks = BLK_ROUND_UP(sbi, inode->i_size);
302 	interval_start = 0;
303 
304 	for (pos = 0; pos < inode->i_size; pos += len) {
305 #ifdef SEEK_DATA
306 		off_t offset = lseek(fd, pos + startoff, SEEK_DATA);
307 
308 		if (offset < 0) {
309 			if (errno != ENXIO)
310 				offset = pos;
311 			else
312 				offset = ((pos >> chunkbits) + 1) << chunkbits;
313 		} else {
314 			offset -= startoff;
315 
316 			if (offset != (offset & ~(chunksize - 1))) {
317 				offset &= ~(chunksize - 1);
318 				if (lseek(fd, offset + startoff, SEEK_SET) !=
319 					  startoff + offset) {
320 					ret = -EIO;
321 					goto err;
322 				}
323 			}
324 		}
325 
326 		if (offset > pos) {
327 			if (!erofs_blob_can_merge(sbi, lastch,
328 							&erofs_holechunk)) {
329 				erofs_update_minextblks(sbi, interval_start,
330 							pos, &minextblks);
331 				interval_start = pos;
332 			}
333 			do {
334 				*(void **)idx++ = &erofs_holechunk;
335 				pos += chunksize;
336 			} while (pos < offset);
337 			DBG_BUGON(pos != offset);
338 			lastch = &erofs_holechunk;
339 			len = 0;
340 			continue;
341 		}
342 #endif
343 
344 		len = min_t(u64, inode->i_size - pos, chunksize);
345 		ret = read(fd, chunkdata, len);
346 		if (ret < len) {
347 			ret = -EIO;
348 			goto err;
349 		}
350 
351 		chunk = erofs_blob_getchunk(sbi, chunkdata, len);
352 		if (IS_ERR(chunk)) {
353 			ret = PTR_ERR(chunk);
354 			goto err;
355 		}
356 
357 		if (!erofs_blob_can_merge(sbi, lastch, chunk)) {
358 			erofs_update_minextblks(sbi, interval_start, pos,
359 						&minextblks);
360 			interval_start = pos;
361 		}
362 		*(void **)idx++ = chunk;
363 		lastch = chunk;
364 	}
365 	erofs_update_minextblks(sbi, interval_start, pos, &minextblks);
366 	inode->datalayout = EROFS_INODE_CHUNK_BASED;
367 	free(chunkdata);
368 	return erofs_blob_mergechunks(inode, chunkbits,
369 				      ilog2(minextblks) + sbi->blkszbits);
370 err:
371 	free(inode->chunkindexes);
372 	inode->chunkindexes = NULL;
373 	free(chunkdata);
374 	return ret;
375 }
376 
tarerofs_write_chunkes(struct erofs_inode * inode,erofs_off_t data_offset)377 int tarerofs_write_chunkes(struct erofs_inode *inode, erofs_off_t data_offset)
378 {
379 	struct erofs_sb_info *sbi = inode->sbi;
380 	unsigned int chunkbits = ilog2(inode->i_size - 1) + 1;
381 	unsigned int count, unit, device_id;
382 	erofs_off_t chunksize, len, pos;
383 	erofs_blk_t blkaddr;
384 	struct erofs_inode_chunk_index *idx;
385 
386 	if (chunkbits < sbi->blkszbits)
387 		chunkbits = sbi->blkszbits;
388 	if (chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
389 		chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
390 
391 	inode->u.chunkformat |= chunkbits - sbi->blkszbits;
392 	if (sbi->extra_devices) {
393 		device_id = 1;
394 		inode->u.chunkformat |= EROFS_CHUNK_FORMAT_INDEXES;
395 		unit = sizeof(struct erofs_inode_chunk_index);
396 		DBG_BUGON(erofs_blkoff(sbi, data_offset));
397 		blkaddr = erofs_blknr(sbi, data_offset);
398 	} else {
399 		device_id = 0;
400 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
401 		DBG_BUGON(erofs_blkoff(sbi, datablob_size));
402 		blkaddr = erofs_blknr(sbi, datablob_size);
403 		datablob_size += round_up(inode->i_size, erofs_blksiz(sbi));
404 	}
405 	chunksize = 1ULL << chunkbits;
406 	count = DIV_ROUND_UP(inode->i_size, chunksize);
407 
408 	inode->extent_isize = count * unit;
409 	idx = calloc(count, max(sizeof(*idx), sizeof(void *)));
410 	if (!idx)
411 		return -ENOMEM;
412 	inode->chunkindexes = idx;
413 
414 	for (pos = 0; pos < inode->i_size; pos += len) {
415 		struct erofs_blobchunk *chunk;
416 
417 		len = min_t(erofs_off_t, inode->i_size - pos, chunksize);
418 
419 		chunk = erofs_get_unhashed_chunk(device_id, blkaddr,
420 						 data_offset);
421 		if (IS_ERR(chunk)) {
422 			free(inode->chunkindexes);
423 			inode->chunkindexes = NULL;
424 			return PTR_ERR(chunk);
425 		}
426 
427 		*(void **)idx++ = chunk;
428 		blkaddr += erofs_blknr(sbi, len);
429 		data_offset += len;
430 	}
431 	inode->datalayout = EROFS_INODE_CHUNK_BASED;
432 	return 0;
433 }
434 
erofs_mkfs_dump_blobs(struct erofs_sb_info * sbi)435 int erofs_mkfs_dump_blobs(struct erofs_sb_info *sbi)
436 {
437 	struct erofs_buffer_head *bh;
438 	ssize_t length;
439 	erofs_off_t pos_in, pos_out;
440 	ssize_t ret;
441 
442 	if (blobfile) {
443 		fflush(blobfile);
444 		length = ftell(blobfile);
445 		if (length < 0)
446 			return -errno;
447 
448 		if (sbi->extra_devices)
449 			sbi->devs[0].blocks = erofs_blknr(sbi, length);
450 		else
451 			datablob_size = length;
452 	}
453 
454 	if (sbi->extra_devices) {
455 		unsigned int i, ret;
456 		erofs_blk_t nblocks;
457 
458 		nblocks = erofs_mapbh(NULL);
459 		pos_out = erofs_btell(bh_devt, false);
460 		i = 0;
461 		do {
462 			struct erofs_deviceslot dis = {
463 				.mapped_blkaddr = cpu_to_le32(nblocks),
464 				.blocks = cpu_to_le32(sbi->devs[i].blocks),
465 			};
466 
467 			memcpy(dis.tag, sbi->devs[i].tag, sizeof(dis.tag));
468 			ret = dev_write(sbi, &dis, pos_out, sizeof(dis));
469 			if (ret)
470 				return ret;
471 			pos_out += sizeof(dis);
472 			nblocks += sbi->devs[i].blocks;
473 		} while (++i < sbi->extra_devices);
474 		bh_devt->op = &erofs_drop_directly_bhops;
475 		erofs_bdrop(bh_devt, false);
476 		return 0;
477 	}
478 
479 	bh = erofs_balloc(DATA, blobfile ? datablob_size : 0, 0, 0);
480 	if (IS_ERR(bh))
481 		return PTR_ERR(bh);
482 
483 	erofs_mapbh(bh->block);
484 
485 	pos_out = erofs_btell(bh, false);
486 	remapped_base = erofs_blknr(sbi, pos_out);
487 	if (blobfile) {
488 		pos_in = 0;
489 		ret = erofs_copy_file_range(fileno(blobfile), &pos_in,
490 				sbi->devfd, &pos_out, datablob_size);
491 		ret = ret < datablob_size ? -EIO : 0;
492 	} else {
493 		ret = 0;
494 	}
495 	bh->op = &erofs_drop_directly_bhops;
496 	erofs_bdrop(bh, false);
497 	return ret;
498 }
499 
erofs_blob_exit(void)500 void erofs_blob_exit(void)
501 {
502 	struct hashmap_iter iter;
503 	struct hashmap_entry *e;
504 	struct erofs_blobchunk *bc, *n;
505 
506 	if (blobfile)
507 		fclose(blobfile);
508 
509 	while ((e = hashmap_iter_first(&blob_hashmap, &iter))) {
510 		bc = container_of((struct hashmap_entry *)e,
511 				  struct erofs_blobchunk, ent);
512 		DBG_BUGON(hashmap_remove(&blob_hashmap, e) != e);
513 		free(bc);
514 	}
515 	DBG_BUGON(hashmap_free(&blob_hashmap));
516 
517 	list_for_each_entry_safe(bc, n, &unhashed_blobchunks, list) {
518 		list_del(&bc->list);
519 		free(bc);
520 	}
521 }
522 
erofs_insert_zerochunk(erofs_off_t chunksize)523 static int erofs_insert_zerochunk(erofs_off_t chunksize)
524 {
525 	u8 *zeros;
526 	struct erofs_blobchunk *chunk;
527 	u8 sha256[32];
528 	unsigned int hash;
529 	int ret = 0;
530 
531 	zeros = calloc(1, chunksize);
532 	if (!zeros)
533 		return -ENOMEM;
534 
535 	erofs_sha256(zeros, chunksize, sha256);
536 	free(zeros);
537 	hash = memhash(sha256, sizeof(sha256));
538 	chunk = malloc(sizeof(struct erofs_blobchunk));
539 	if (!chunk)
540 		return -ENOMEM;
541 
542 	chunk->chunksize = chunksize;
543 	/* treat chunk filled with zeros as hole */
544 	chunk->blkaddr = erofs_holechunk.blkaddr;
545 	memcpy(chunk->sha256, sha256, sizeof(sha256));
546 
547 	hashmap_entry_init(&chunk->ent, hash);
548 	hashmap_add(&blob_hashmap, chunk);
549 	return ret;
550 }
551 
erofs_blob_init(const char * blobfile_path,erofs_off_t chunksize)552 int erofs_blob_init(const char *blobfile_path, erofs_off_t chunksize)
553 {
554 	if (!blobfile_path) {
555 #ifdef HAVE_TMPFILE64
556 		blobfile = tmpfile64();
557 #else
558 		blobfile = tmpfile();
559 #endif
560 		multidev = false;
561 	} else {
562 		blobfile = fopen(blobfile_path, "wb");
563 		multidev = true;
564 	}
565 	if (!blobfile)
566 		return -EACCES;
567 
568 	hashmap_init(&blob_hashmap, erofs_blob_hashmap_cmp, 0);
569 	return erofs_insert_zerochunk(chunksize);
570 }
571 
erofs_mkfs_init_devices(struct erofs_sb_info * sbi,unsigned int devices)572 int erofs_mkfs_init_devices(struct erofs_sb_info *sbi, unsigned int devices)
573 {
574 	if (!devices)
575 		return 0;
576 
577 	sbi->devs = calloc(devices, sizeof(sbi->devs[0]));
578 	if (!sbi->devs)
579 		return -ENOMEM;
580 
581 	bh_devt = erofs_balloc(DEVT,
582 		sizeof(struct erofs_deviceslot) * devices, 0, 0);
583 	if (IS_ERR(bh_devt)) {
584 		free(sbi->devs);
585 		return PTR_ERR(bh_devt);
586 	}
587 	erofs_mapbh(bh_devt->block);
588 	bh_devt->op = &erofs_skip_write_bhops;
589 	sbi->devt_slotoff = erofs_btell(bh_devt, false) / EROFS_DEVT_SLOT_SIZE;
590 	sbi->extra_devices = devices;
591 	erofs_sb_set_device_table(sbi);
592 	return 0;
593 }
594