1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3 * Copyright (C) 2020 Gao Xiang <hsiangkao@aol.com>
4 * Compression support by Huang Jianan <huangjianan@oppo.com>
5 */
6 #include <stdlib.h>
7 #include "erofs/print.h"
8 #include "erofs/internal.h"
9 #include "erofs/io.h"
10 #include "erofs/trace.h"
11 #include "erofs/decompress.h"
12
erofs_map_blocks_flatmode(struct erofs_inode * inode,struct erofs_map_blocks * map,int flags)13 static int erofs_map_blocks_flatmode(struct erofs_inode *inode,
14 struct erofs_map_blocks *map,
15 int flags)
16 {
17 int err = 0;
18 erofs_blk_t nblocks, lastblk;
19 u64 offset = map->m_la;
20 struct erofs_inode *vi = inode;
21 struct erofs_sb_info *sbi = inode->sbi;
22 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
23
24 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
25
26 nblocks = BLK_ROUND_UP(sbi, inode->i_size);
27 lastblk = nblocks - tailendpacking;
28
29 /* there is no hole in flatmode */
30 map->m_flags = EROFS_MAP_MAPPED;
31
32 if (offset < erofs_pos(sbi, lastblk)) {
33 map->m_pa = erofs_pos(sbi, vi->u.i_blkaddr) + map->m_la;
34 map->m_plen = erofs_pos(sbi, lastblk) - offset;
35 } else if (tailendpacking) {
36 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
37 map->m_pa = erofs_iloc(vi) + vi->inode_isize +
38 vi->xattr_isize + erofs_blkoff(sbi, map->m_la);
39 map->m_plen = inode->i_size - offset;
40
41 /* inline data should be located in the same meta block */
42 if (erofs_blkoff(sbi, map->m_pa) + map->m_plen >
43 erofs_blksiz(sbi)) {
44 erofs_err("inline data cross block boundary @ nid %" PRIu64,
45 vi->nid);
46 DBG_BUGON(1);
47 err = -EFSCORRUPTED;
48 goto err_out;
49 }
50
51 map->m_flags |= EROFS_MAP_META;
52 } else {
53 erofs_err("internal error @ nid: %" PRIu64 " (size %llu), m_la 0x%" PRIx64,
54 vi->nid, (unsigned long long)inode->i_size, map->m_la);
55 DBG_BUGON(1);
56 err = -EIO;
57 goto err_out;
58 }
59
60 map->m_llen = map->m_plen;
61 err_out:
62 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
63 return err;
64 }
65
erofs_map_blocks(struct erofs_inode * inode,struct erofs_map_blocks * map,int flags)66 int erofs_map_blocks(struct erofs_inode *inode,
67 struct erofs_map_blocks *map, int flags)
68 {
69 struct erofs_inode *vi = inode;
70 struct erofs_sb_info *sbi = inode->sbi;
71 struct erofs_inode_chunk_index *idx;
72 u8 buf[EROFS_MAX_BLOCK_SIZE];
73 u64 chunknr;
74 unsigned int unit;
75 erofs_off_t pos;
76 int err = 0;
77
78 map->m_deviceid = 0;
79 if (map->m_la >= inode->i_size) {
80 /* leave out-of-bound access unmapped */
81 map->m_flags = 0;
82 map->m_plen = 0;
83 goto out;
84 }
85
86 if (vi->datalayout != EROFS_INODE_CHUNK_BASED)
87 return erofs_map_blocks_flatmode(inode, map, flags);
88
89 if (vi->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
90 unit = sizeof(*idx); /* chunk index */
91 else
92 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
93
94 chunknr = map->m_la >> vi->u.chunkbits;
95 pos = roundup(erofs_iloc(vi) + vi->inode_isize +
96 vi->xattr_isize, unit) + unit * chunknr;
97
98 err = blk_read(sbi, 0, buf, erofs_blknr(sbi, pos), 1);
99 if (err < 0)
100 return -EIO;
101
102 map->m_la = chunknr << vi->u.chunkbits;
103 map->m_plen = min_t(erofs_off_t, 1UL << vi->u.chunkbits,
104 roundup(inode->i_size - map->m_la, erofs_blksiz(sbi)));
105
106 /* handle block map */
107 if (!(vi->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
108 __le32 *blkaddr = (void *)buf + erofs_blkoff(sbi, pos);
109
110 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
111 map->m_flags = 0;
112 } else {
113 map->m_pa = erofs_pos(sbi, le32_to_cpu(*blkaddr));
114 map->m_flags = EROFS_MAP_MAPPED;
115 }
116 goto out;
117 }
118 /* parse chunk indexes */
119 idx = (void *)buf + erofs_blkoff(sbi, pos);
120 switch (le32_to_cpu(idx->blkaddr)) {
121 case EROFS_NULL_ADDR:
122 map->m_flags = 0;
123 break;
124 default:
125 map->m_deviceid = le16_to_cpu(idx->device_id) &
126 sbi->device_id_mask;
127 map->m_pa = erofs_pos(sbi, le32_to_cpu(idx->blkaddr));
128 map->m_flags = EROFS_MAP_MAPPED;
129 break;
130 }
131 out:
132 map->m_llen = map->m_plen;
133 return err;
134 }
135
erofs_map_dev(struct erofs_sb_info * sbi,struct erofs_map_dev * map)136 int erofs_map_dev(struct erofs_sb_info *sbi, struct erofs_map_dev *map)
137 {
138 struct erofs_device_info *dif;
139 int id;
140
141 if (map->m_deviceid) {
142 if (sbi->extra_devices < map->m_deviceid)
143 return -ENODEV;
144 } else if (sbi->extra_devices) {
145 for (id = 0; id < sbi->extra_devices; ++id) {
146 erofs_off_t startoff, length;
147
148 dif = sbi->devs + id;
149 if (!dif->mapped_blkaddr)
150 continue;
151 startoff = erofs_pos(sbi, dif->mapped_blkaddr);
152 length = erofs_pos(sbi, dif->blocks);
153
154 if (map->m_pa >= startoff &&
155 map->m_pa < startoff + length) {
156 map->m_pa -= startoff;
157 break;
158 }
159 }
160 }
161 return 0;
162 }
163
erofs_read_one_data(struct erofs_inode * inode,struct erofs_map_blocks * map,char * buffer,u64 offset,size_t len)164 int erofs_read_one_data(struct erofs_inode *inode, struct erofs_map_blocks *map,
165 char *buffer, u64 offset, size_t len)
166 {
167 struct erofs_sb_info *sbi = inode->sbi;
168 struct erofs_map_dev mdev;
169 int ret;
170
171 mdev = (struct erofs_map_dev) {
172 .m_deviceid = map->m_deviceid,
173 .m_pa = map->m_pa,
174 };
175 ret = erofs_map_dev(sbi, &mdev);
176 if (ret)
177 return ret;
178
179 ret = dev_read(sbi, mdev.m_deviceid, buffer, mdev.m_pa + offset, len);
180 if (ret < 0)
181 return -EIO;
182 return 0;
183 }
184
erofs_read_raw_data(struct erofs_inode * inode,char * buffer,erofs_off_t size,erofs_off_t offset)185 static int erofs_read_raw_data(struct erofs_inode *inode, char *buffer,
186 erofs_off_t size, erofs_off_t offset)
187 {
188 struct erofs_map_blocks map = {
189 .index = UINT_MAX,
190 };
191 int ret;
192 erofs_off_t ptr = offset;
193
194 while (ptr < offset + size) {
195 char *const estart = buffer + ptr - offset;
196 erofs_off_t eend, moff = 0;
197
198 map.m_la = ptr;
199 ret = erofs_map_blocks(inode, &map, 0);
200 if (ret)
201 return ret;
202
203 DBG_BUGON(map.m_plen != map.m_llen);
204
205 /* trim extent */
206 eend = min(offset + size, map.m_la + map.m_llen);
207 DBG_BUGON(ptr < map.m_la);
208
209 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
210 if (!map.m_llen) {
211 /* reached EOF */
212 memset(estart, 0, offset + size - ptr);
213 ptr = offset + size;
214 continue;
215 }
216 memset(estart, 0, eend - ptr);
217 ptr = eend;
218 continue;
219 }
220
221 if (ptr > map.m_la) {
222 moff = ptr - map.m_la;
223 map.m_la = ptr;
224 }
225
226 ret = erofs_read_one_data(inode, &map, estart, moff,
227 eend - map.m_la);
228 if (ret)
229 return ret;
230 ptr = eend;
231 }
232 return 0;
233 }
234
z_erofs_read_one_data(struct erofs_inode * inode,struct erofs_map_blocks * map,char * raw,char * buffer,erofs_off_t skip,erofs_off_t length,bool trimmed)235 int z_erofs_read_one_data(struct erofs_inode *inode,
236 struct erofs_map_blocks *map, char *raw, char *buffer,
237 erofs_off_t skip, erofs_off_t length, bool trimmed)
238 {
239 struct erofs_sb_info *sbi = inode->sbi;
240 struct erofs_map_dev mdev;
241 int ret = 0;
242
243 if (map->m_flags & EROFS_MAP_FRAGMENT) {
244 struct erofs_inode packed_inode = {
245 .sbi = sbi,
246 .nid = sbi->packed_nid,
247 };
248
249 ret = erofs_read_inode_from_disk(&packed_inode);
250 if (ret) {
251 erofs_err("failed to read packed inode from disk");
252 return ret;
253 }
254
255 return erofs_pread(&packed_inode, buffer, length - skip,
256 inode->fragmentoff + skip);
257 }
258
259 /* no device id here, thus it will always succeed */
260 mdev = (struct erofs_map_dev) {
261 .m_pa = map->m_pa,
262 };
263 ret = erofs_map_dev(sbi, &mdev);
264 if (ret) {
265 DBG_BUGON(1);
266 return ret;
267 }
268
269 ret = dev_read(sbi, mdev.m_deviceid, raw, mdev.m_pa, map->m_plen);
270 if (ret < 0)
271 return ret;
272
273 ret = z_erofs_decompress(&(struct z_erofs_decompress_req) {
274 .sbi = sbi,
275 .in = raw,
276 .out = buffer,
277 .decodedskip = skip,
278 .interlaced_offset =
279 map->m_algorithmformat == Z_EROFS_COMPRESSION_INTERLACED ?
280 erofs_blkoff(sbi, map->m_la) : 0,
281 .inputsize = map->m_plen,
282 .decodedlength = length,
283 .alg = map->m_algorithmformat,
284 .partial_decoding = trimmed ? true :
285 !(map->m_flags & EROFS_MAP_FULL_MAPPED) ||
286 (map->m_flags & EROFS_MAP_PARTIAL_REF),
287 });
288 if (ret < 0)
289 return ret;
290 return 0;
291 }
292
z_erofs_read_data(struct erofs_inode * inode,char * buffer,erofs_off_t size,erofs_off_t offset)293 static int z_erofs_read_data(struct erofs_inode *inode, char *buffer,
294 erofs_off_t size, erofs_off_t offset)
295 {
296 erofs_off_t end, length, skip;
297 struct erofs_map_blocks map = {
298 .index = UINT_MAX,
299 };
300 bool trimmed;
301 unsigned int bufsize = 0;
302 char *raw = NULL;
303 int ret = 0;
304
305 end = offset + size;
306 while (end > offset) {
307 map.m_la = end - 1;
308
309 ret = z_erofs_map_blocks_iter(inode, &map, 0);
310 if (ret)
311 break;
312
313 /*
314 * trim to the needed size if the returned extent is quite
315 * larger than requested, and set up partial flag as well.
316 */
317 if (end < map.m_la + map.m_llen) {
318 length = end - map.m_la;
319 trimmed = true;
320 } else {
321 DBG_BUGON(end != map.m_la + map.m_llen);
322 length = map.m_llen;
323 trimmed = false;
324 }
325
326 if (map.m_la < offset) {
327 skip = offset - map.m_la;
328 end = offset;
329 } else {
330 skip = 0;
331 end = map.m_la;
332 }
333
334 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
335 memset(buffer + end - offset, 0, length - skip);
336 end = map.m_la;
337 continue;
338 }
339
340 if (map.m_plen > bufsize) {
341 bufsize = map.m_plen;
342 raw = realloc(raw, bufsize);
343 if (!raw) {
344 ret = -ENOMEM;
345 break;
346 }
347 }
348
349 ret = z_erofs_read_one_data(inode, &map, raw,
350 buffer + end - offset, skip, length, trimmed);
351 if (ret < 0)
352 break;
353 }
354 if (raw)
355 free(raw);
356 return ret < 0 ? ret : 0;
357 }
358
erofs_pread(struct erofs_inode * inode,char * buf,erofs_off_t count,erofs_off_t offset)359 int erofs_pread(struct erofs_inode *inode, char *buf,
360 erofs_off_t count, erofs_off_t offset)
361 {
362 switch (inode->datalayout) {
363 case EROFS_INODE_FLAT_PLAIN:
364 case EROFS_INODE_FLAT_INLINE:
365 case EROFS_INODE_CHUNK_BASED:
366 return erofs_read_raw_data(inode, buf, count, offset);
367 case EROFS_INODE_COMPRESSED_FULL:
368 case EROFS_INODE_COMPRESSED_COMPACT:
369 return z_erofs_read_data(inode, buf, count, offset);
370 default:
371 break;
372 }
373 return -EINVAL;
374 }
375
erofs_read_metadata_nid(struct erofs_sb_info * sbi,erofs_nid_t nid,erofs_off_t * offset,int * lengthp)376 static void *erofs_read_metadata_nid(struct erofs_sb_info *sbi, erofs_nid_t nid,
377 erofs_off_t *offset, int *lengthp)
378 {
379 struct erofs_inode vi = { .sbi = sbi, .nid = nid };
380 __le16 __len;
381 int ret, len;
382 char *buffer;
383
384 ret = erofs_read_inode_from_disk(&vi);
385 if (ret)
386 return ERR_PTR(ret);
387
388 *offset = round_up(*offset, 4);
389 ret = erofs_pread(&vi, (void *)&__len, sizeof(__le16), *offset);
390 if (ret)
391 return ERR_PTR(ret);
392
393 len = le16_to_cpu(__len);
394 if (!len)
395 return ERR_PTR(-EFSCORRUPTED);
396
397 buffer = malloc(len);
398 if (!buffer)
399 return ERR_PTR(-ENOMEM);
400 *offset += sizeof(__le16);
401 *lengthp = len;
402
403 ret = erofs_pread(&vi, buffer, len, *offset);
404 if (ret) {
405 free(buffer);
406 return ERR_PTR(ret);
407 }
408 *offset += len;
409 return buffer;
410 }
411
erofs_read_metadata_bdi(struct erofs_sb_info * sbi,erofs_off_t * offset,int * lengthp)412 static void *erofs_read_metadata_bdi(struct erofs_sb_info *sbi,
413 erofs_off_t *offset, int *lengthp)
414 {
415 int ret, len, i, cnt;
416 void *buffer;
417 u8 data[EROFS_MAX_BLOCK_SIZE];
418
419 *offset = round_up(*offset, 4);
420 ret = blk_read(sbi, 0, data, erofs_blknr(sbi, *offset), 1);
421 if (ret)
422 return ERR_PTR(ret);
423 len = le16_to_cpu(*(__le16 *)&data[erofs_blkoff(sbi, *offset)]);
424 if (!len)
425 return ERR_PTR(-EFSCORRUPTED);
426
427 buffer = malloc(len);
428 if (!buffer)
429 return ERR_PTR(-ENOMEM);
430 *offset += sizeof(__le16);
431 *lengthp = len;
432
433 for (i = 0; i < len; i += cnt) {
434 cnt = min_t(int, erofs_blksiz(sbi) - erofs_blkoff(sbi, *offset),
435 len - i);
436 ret = blk_read(sbi, 0, data, erofs_blknr(sbi, *offset), 1);
437 if (ret) {
438 free(buffer);
439 return ERR_PTR(ret);
440 }
441 memcpy(buffer + i, data + erofs_blkoff(sbi, *offset), cnt);
442 *offset += cnt;
443 }
444 return buffer;
445 }
446
447 /*
448 * read variable-sized metadata, offset will be aligned by 4-byte
449 *
450 * @nid is 0 if metadata is in meta inode
451 */
erofs_read_metadata(struct erofs_sb_info * sbi,erofs_nid_t nid,erofs_off_t * offset,int * lengthp)452 void *erofs_read_metadata(struct erofs_sb_info *sbi, erofs_nid_t nid,
453 erofs_off_t *offset, int *lengthp)
454 {
455 if (nid)
456 return erofs_read_metadata_nid(sbi, nid, offset, lengthp);
457 return erofs_read_metadata_bdi(sbi, offset, lengthp);
458 }
459