1 /*
2 * bmap.c --- logical to physical block mapping
3 *
4 * Copyright (C) 1997 Theodore Ts'o.
5 *
6 * %Begin-Header%
7 * This file may be redistributed under the terms of the GNU Library
8 * General Public License, version 2.
9 * %End-Header%
10 */
11
12 #include "config.h"
13 #include <stdio.h>
14 #include <string.h>
15 #if HAVE_UNISTD_H
16 #include <unistd.h>
17 #endif
18 #include <errno.h>
19
20 #include "ext2_fs.h"
21 #include "ext2fsP.h"
22
23 #if defined(__GNUC__) && !defined(NO_INLINE_FUNCS)
24 #define _BMAP_INLINE_ __inline__
25 #else
26 #define _BMAP_INLINE_
27 #endif
28
29 extern errcode_t ext2fs_bmap(ext2_filsys fs, ext2_ino_t ino,
30 struct ext2_inode *inode,
31 char *block_buf, int bmap_flags,
32 blk_t block, blk_t *phys_blk);
33
34 #define inode_bmap(inode, nr) ((inode)->i_block[(nr)])
35
block_ind_bmap(ext2_filsys fs,int flags,blk_t ind,char * block_buf,int * blocks_alloc,blk_t nr,blk_t * ret_blk)36 static _BMAP_INLINE_ errcode_t block_ind_bmap(ext2_filsys fs, int flags,
37 blk_t ind, char *block_buf,
38 int *blocks_alloc,
39 blk_t nr, blk_t *ret_blk)
40 {
41 errcode_t retval;
42 blk_t b;
43
44 if (!ind) {
45 if (flags & BMAP_SET)
46 return EXT2_ET_SET_BMAP_NO_IND;
47 *ret_blk = 0;
48 return 0;
49 }
50 retval = io_channel_read_blk(fs->io, ind, 1, block_buf);
51 if (retval)
52 return retval;
53
54 if (flags & BMAP_SET) {
55 b = *ret_blk;
56 #ifdef WORDS_BIGENDIAN
57 b = ext2fs_swab32(b);
58 #endif
59 ((blk_t *) block_buf)[nr] = b;
60 return io_channel_write_blk(fs->io, ind, 1, block_buf);
61 }
62
63 b = ((blk_t *) block_buf)[nr];
64
65 #ifdef WORDS_BIGENDIAN
66 b = ext2fs_swab32(b);
67 #endif
68
69 if (!b && (flags & BMAP_ALLOC)) {
70 b = nr ? ext2fs_le32_to_cpu(((blk_t *)block_buf)[nr - 1]) : ind;
71 retval = ext2fs_alloc_block(fs, b,
72 block_buf + fs->blocksize, &b);
73 if (retval)
74 return retval;
75
76 #ifdef WORDS_BIGENDIAN
77 ((blk_t *) block_buf)[nr] = ext2fs_swab32(b);
78 #else
79 ((blk_t *) block_buf)[nr] = b;
80 #endif
81
82 retval = io_channel_write_blk(fs->io, ind, 1, block_buf);
83 if (retval)
84 return retval;
85
86 (*blocks_alloc)++;
87 }
88
89 *ret_blk = b;
90 return 0;
91 }
92
block_dind_bmap(ext2_filsys fs,int flags,blk_t dind,char * block_buf,int * blocks_alloc,blk_t nr,blk_t * ret_blk)93 static _BMAP_INLINE_ errcode_t block_dind_bmap(ext2_filsys fs, int flags,
94 blk_t dind, char *block_buf,
95 int *blocks_alloc,
96 blk_t nr, blk_t *ret_blk)
97 {
98 blk_t b = 0;
99 errcode_t retval;
100 blk_t addr_per_block;
101
102 addr_per_block = (blk_t) fs->blocksize >> 2;
103
104 retval = block_ind_bmap(fs, flags & ~BMAP_SET, dind, block_buf,
105 blocks_alloc, nr / addr_per_block, &b);
106 if (retval)
107 return retval;
108 retval = block_ind_bmap(fs, flags, b, block_buf, blocks_alloc,
109 nr % addr_per_block, ret_blk);
110 return retval;
111 }
112
block_tind_bmap(ext2_filsys fs,int flags,blk_t tind,char * block_buf,int * blocks_alloc,blk_t nr,blk_t * ret_blk)113 static _BMAP_INLINE_ errcode_t block_tind_bmap(ext2_filsys fs, int flags,
114 blk_t tind, char *block_buf,
115 int *blocks_alloc,
116 blk_t nr, blk_t *ret_blk)
117 {
118 blk_t b = 0;
119 errcode_t retval;
120 blk_t addr_per_block;
121
122 addr_per_block = (blk_t) fs->blocksize >> 2;
123
124 retval = block_dind_bmap(fs, flags & ~BMAP_SET, tind, block_buf,
125 blocks_alloc, nr / addr_per_block, &b);
126 if (retval)
127 return retval;
128 retval = block_ind_bmap(fs, flags, b, block_buf, blocks_alloc,
129 nr % addr_per_block, ret_blk);
130 return retval;
131 }
132
133 static errcode_t extent_bmap(ext2_filsys fs, ext2_ino_t ino,
134 struct ext2_inode *inode,
135 ext2_extent_handle_t handle,
136 char *block_buf, int bmap_flags, blk64_t block,
137 int *ret_flags, int *blocks_alloc,
138 blk64_t *phys_blk);
139
implied_cluster_alloc(ext2_filsys fs,ext2_ino_t ino,struct ext2_inode * inode,ext2_extent_handle_t handle,blk64_t lblk,blk64_t * phys_blk)140 static errcode_t implied_cluster_alloc(ext2_filsys fs, ext2_ino_t ino,
141 struct ext2_inode *inode,
142 ext2_extent_handle_t handle,
143 blk64_t lblk, blk64_t *phys_blk)
144 {
145 blk64_t base_block, pblock = 0;
146 int i;
147
148 if (!ext2fs_has_feature_bigalloc(fs->super))
149 return 0;
150
151 base_block = lblk & ~EXT2FS_CLUSTER_MASK(fs);
152 /*
153 * Except for the logical block (lblk) that was passed in, search all
154 * blocks in this logical cluster for a mapping to a physical cluster.
155 * If any such map exists, calculate the physical block that maps to
156 * the logical block and return that.
157 *
158 * The old code wouldn't even look if (block % cluster_ratio) == 0;
159 * this is incorrect if we're allocating blocks in reverse order.
160 */
161 for (i = 0; i < EXT2FS_CLUSTER_RATIO(fs); i++) {
162 if (base_block + i == lblk)
163 continue;
164 extent_bmap(fs, ino, inode, handle, 0, 0,
165 base_block + i, 0, 0, &pblock);
166 if (pblock)
167 break;
168 }
169 if (pblock == 0)
170 return 0;
171 *phys_blk = pblock - i + (lblk - base_block);
172 return 0;
173 }
174
175 /* Try to map a logical block to an already-allocated physical cluster. */
ext2fs_map_cluster_block(ext2_filsys fs,ext2_ino_t ino,struct ext2_inode * inode,blk64_t lblk,blk64_t * pblk)176 errcode_t ext2fs_map_cluster_block(ext2_filsys fs, ext2_ino_t ino,
177 struct ext2_inode *inode, blk64_t lblk,
178 blk64_t *pblk)
179 {
180 ext2_extent_handle_t handle;
181 errcode_t retval;
182
183 /* Need bigalloc and extents to be enabled */
184 *pblk = 0;
185 if (!ext2fs_has_feature_bigalloc(fs->super) ||
186 !(inode->i_flags & EXT4_EXTENTS_FL))
187 return 0;
188
189 retval = ext2fs_extent_open2(fs, ino, inode, &handle);
190 if (retval)
191 goto out;
192
193 retval = implied_cluster_alloc(fs, ino, inode, handle, lblk, pblk);
194 if (retval)
195 goto out2;
196
197 out2:
198 ext2fs_extent_free(handle);
199 out:
200 return retval;
201 }
202
extent_bmap(ext2_filsys fs,ext2_ino_t ino,struct ext2_inode * inode,ext2_extent_handle_t handle,char * block_buf,int bmap_flags,blk64_t block,int * ret_flags,int * blocks_alloc,blk64_t * phys_blk)203 static errcode_t extent_bmap(ext2_filsys fs, ext2_ino_t ino,
204 struct ext2_inode *inode,
205 ext2_extent_handle_t handle,
206 char *block_buf, int bmap_flags, blk64_t block,
207 int *ret_flags, int *blocks_alloc,
208 blk64_t *phys_blk)
209 {
210 struct blk_alloc_ctx alloc_ctx;
211 struct ext2fs_extent extent;
212 unsigned int offset;
213 errcode_t retval = 0;
214 blk64_t blk64 = 0;
215 int alloc = 0;
216 int set_flags;
217
218 set_flags = bmap_flags & BMAP_UNINIT ? EXT2_EXTENT_SET_BMAP_UNINIT : 0;
219
220 if (bmap_flags & BMAP_SET) {
221 retval = ext2fs_extent_set_bmap(handle, block,
222 *phys_blk, set_flags);
223 return retval;
224 }
225 retval = ext2fs_extent_goto(handle, block);
226 if (retval) {
227 /* If the extent is not found, return phys_blk = 0 */
228 if (retval == EXT2_ET_EXTENT_NOT_FOUND)
229 goto got_block;
230 return retval;
231 }
232 retval = ext2fs_extent_get(handle, EXT2_EXTENT_CURRENT, &extent);
233 if (retval)
234 return retval;
235 offset = block - extent.e_lblk;
236 if (block >= extent.e_lblk && (offset <= extent.e_len)) {
237 *phys_blk = extent.e_pblk + offset;
238 if (ret_flags && extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
239 *ret_flags |= BMAP_RET_UNINIT;
240 }
241 got_block:
242 if ((*phys_blk == 0) && (bmap_flags & BMAP_ALLOC)) {
243 implied_cluster_alloc(fs, ino, inode, handle, block, &blk64);
244 if (blk64)
245 goto set_extent;
246 retval = extent_bmap(fs, ino, inode, handle, block_buf,
247 0, block-1, 0, blocks_alloc, &blk64);
248 if (retval)
249 blk64 = ext2fs_find_inode_goal(fs, ino, inode, block);
250 alloc_ctx.ino = ino;
251 alloc_ctx.inode = inode;
252 alloc_ctx.lblk = extent.e_lblk;
253 alloc_ctx.flags = BLOCK_ALLOC_DATA;
254 retval = ext2fs_alloc_block3(fs, blk64, block_buf, &blk64,
255 &alloc_ctx);
256 if (retval)
257 return retval;
258 blk64 &= ~EXT2FS_CLUSTER_MASK(fs);
259 blk64 += EXT2FS_CLUSTER_MASK(fs) & block;
260 alloc++;
261 set_extent:
262 retval = ext2fs_extent_set_bmap(handle, block,
263 blk64, set_flags);
264 if (retval) {
265 ext2fs_block_alloc_stats2(fs, blk64, -1);
266 return retval;
267 }
268 /* Update inode after setting extent */
269 retval = ext2fs_read_inode(fs, ino, inode);
270 if (retval)
271 return retval;
272 *blocks_alloc += alloc;
273 *phys_blk = blk64;
274 }
275 return 0;
276 }
277
ext2fs_file_block_offset_too_big(ext2_filsys fs,struct ext2_inode * inode,blk64_t offset)278 int ext2fs_file_block_offset_too_big(ext2_filsys fs,
279 struct ext2_inode *inode,
280 blk64_t offset)
281 {
282 blk64_t addr_per_block, max_map_block;
283
284 /* Kernel seems to cut us off at 4294967294 blocks */
285 if (offset >= (1ULL << 32) - 1)
286 return 1;
287
288 if (inode->i_flags & EXT4_EXTENTS_FL)
289 return 0;
290
291 addr_per_block = fs->blocksize >> 2;
292 max_map_block = addr_per_block;
293 max_map_block += addr_per_block * addr_per_block;
294 max_map_block += addr_per_block * addr_per_block * addr_per_block;
295 max_map_block += 12;
296
297 return offset >= max_map_block;
298 }
299
ext2fs_bmap2(ext2_filsys fs,ext2_ino_t ino,struct ext2_inode * inode,char * block_buf,int bmap_flags,blk64_t block,int * ret_flags,blk64_t * phys_blk)300 errcode_t ext2fs_bmap2(ext2_filsys fs, ext2_ino_t ino, struct ext2_inode *inode,
301 char *block_buf, int bmap_flags, blk64_t block,
302 int *ret_flags, blk64_t *phys_blk)
303 {
304 struct ext2_inode inode_buf;
305 ext2_extent_handle_t handle = 0;
306 blk_t addr_per_block;
307 blk_t b, blk32;
308 blk64_t b64;
309 char *buf = 0;
310 errcode_t retval = 0;
311 int blocks_alloc = 0, inode_dirty = 0;
312 struct blk_alloc_ctx alloc_ctx = {
313 .ino = ino,
314 .inode = inode,
315 .lblk = 0,
316 .flags = BLOCK_ALLOC_DATA,
317 };
318
319 if (!(bmap_flags & BMAP_SET))
320 *phys_blk = 0;
321
322 if (ret_flags)
323 *ret_flags = 0;
324
325 /* Read inode structure if necessary */
326 if (!inode) {
327 retval = ext2fs_read_inode(fs, ino, &inode_buf);
328 if (retval)
329 return retval;
330 inode = &inode_buf;
331 }
332 addr_per_block = (blk_t) fs->blocksize >> 2;
333
334 if (ext2fs_file_block_offset_too_big(fs, inode, block))
335 return EXT2_ET_FILE_TOO_BIG;
336
337 /*
338 * If an inode has inline data, that means that it doesn't have
339 * any blocks and we shouldn't map any blocks for it.
340 */
341 if (inode->i_flags & EXT4_INLINE_DATA_FL)
342 return EXT2_ET_INLINE_DATA_NO_BLOCK;
343
344 if (!block_buf) {
345 retval = ext2fs_get_array(2, fs->blocksize, &buf);
346 if (retval)
347 return retval;
348 block_buf = buf;
349 }
350
351 if (inode->i_flags & EXT4_EXTENTS_FL) {
352 retval = ext2fs_extent_open2(fs, ino, inode, &handle);
353 if (retval)
354 goto done;
355 retval = extent_bmap(fs, ino, inode, handle, block_buf,
356 bmap_flags, block, ret_flags,
357 &blocks_alloc, phys_blk);
358 goto done;
359 }
360
361 if (block < EXT2_NDIR_BLOCKS) {
362 if (bmap_flags & BMAP_SET) {
363 b = *phys_blk;
364 inode_bmap(inode, block) = b;
365 inode_dirty++;
366 goto done;
367 }
368
369 *phys_blk = inode_bmap(inode, block);
370 b = block ? inode_bmap(inode, block - 1) :
371 ext2fs_find_inode_goal(fs, ino, inode, block);
372
373 if ((*phys_blk == 0) && (bmap_flags & BMAP_ALLOC)) {
374 b64 = b;
375 retval = ext2fs_alloc_block3(fs, b64, block_buf, &b64,
376 &alloc_ctx);
377 b = b64;
378 if (retval)
379 goto done;
380 inode_bmap(inode, block) = b;
381 blocks_alloc++;
382 *phys_blk = b;
383 }
384 goto done;
385 }
386
387 /* Indirect block */
388 block -= EXT2_NDIR_BLOCKS;
389 blk32 = *phys_blk;
390 if (block < addr_per_block) {
391 b = inode_bmap(inode, EXT2_IND_BLOCK);
392 if (!b) {
393 if (!(bmap_flags & BMAP_ALLOC)) {
394 if (bmap_flags & BMAP_SET)
395 retval = EXT2_ET_SET_BMAP_NO_IND;
396 goto done;
397 }
398
399 b = inode_bmap(inode, EXT2_IND_BLOCK-1);
400 b64 = b;
401 retval = ext2fs_alloc_block3(fs, b64, block_buf, &b64,
402 &alloc_ctx);
403 b = b64;
404 if (retval)
405 goto done;
406 inode_bmap(inode, EXT2_IND_BLOCK) = b;
407 blocks_alloc++;
408 }
409 retval = block_ind_bmap(fs, bmap_flags, b, block_buf,
410 &blocks_alloc, block, &blk32);
411 if (retval == 0)
412 *phys_blk = blk32;
413 goto done;
414 }
415
416 /* Doubly indirect block */
417 block -= addr_per_block;
418 if (block < addr_per_block * addr_per_block) {
419 b = inode_bmap(inode, EXT2_DIND_BLOCK);
420 if (!b) {
421 if (!(bmap_flags & BMAP_ALLOC)) {
422 if (bmap_flags & BMAP_SET)
423 retval = EXT2_ET_SET_BMAP_NO_IND;
424 goto done;
425 }
426
427 b = inode_bmap(inode, EXT2_IND_BLOCK);
428 b64 = b;
429 retval = ext2fs_alloc_block3(fs, b64, block_buf, &b64,
430 &alloc_ctx);
431 b = b64;
432 if (retval)
433 goto done;
434 inode_bmap(inode, EXT2_DIND_BLOCK) = b;
435 blocks_alloc++;
436 }
437 retval = block_dind_bmap(fs, bmap_flags, b, block_buf,
438 &blocks_alloc, block, &blk32);
439 if (retval == 0)
440 *phys_blk = blk32;
441 goto done;
442 }
443
444 /* Triply indirect block */
445 block -= addr_per_block * addr_per_block;
446 b = inode_bmap(inode, EXT2_TIND_BLOCK);
447 if (!b) {
448 if (!(bmap_flags & BMAP_ALLOC)) {
449 if (bmap_flags & BMAP_SET)
450 retval = EXT2_ET_SET_BMAP_NO_IND;
451 goto done;
452 }
453
454 b = inode_bmap(inode, EXT2_DIND_BLOCK);
455 b64 = b;
456 retval = ext2fs_alloc_block3(fs, b64, block_buf, &b64,
457 &alloc_ctx);
458 b = b64;
459 if (retval)
460 goto done;
461 inode_bmap(inode, EXT2_TIND_BLOCK) = b;
462 blocks_alloc++;
463 }
464 retval = block_tind_bmap(fs, bmap_flags, b, block_buf,
465 &blocks_alloc, block, &blk32);
466 if (retval == 0)
467 *phys_blk = blk32;
468 done:
469 if (*phys_blk && retval == 0 && (bmap_flags & BMAP_ZERO))
470 retval = ext2fs_zero_blocks2(fs, *phys_blk, 1, NULL, NULL);
471 if (buf)
472 ext2fs_free_mem(&buf);
473 if (handle)
474 ext2fs_extent_free(handle);
475 if ((retval == 0) && (blocks_alloc || inode_dirty)) {
476 ext2fs_iblk_add_blocks(fs, inode, blocks_alloc);
477 retval = ext2fs_write_inode(fs, ino, inode);
478 }
479 return retval;
480 }
481
ext2fs_bmap(ext2_filsys fs,ext2_ino_t ino,struct ext2_inode * inode,char * block_buf,int bmap_flags,blk_t block,blk_t * phys_blk)482 errcode_t ext2fs_bmap(ext2_filsys fs, ext2_ino_t ino, struct ext2_inode *inode,
483 char *block_buf, int bmap_flags, blk_t block,
484 blk_t *phys_blk)
485 {
486 errcode_t ret;
487 blk64_t ret_blk = *phys_blk;
488
489 ret = ext2fs_bmap2(fs, ino, inode, block_buf, bmap_flags, block,
490 0, &ret_blk);
491 if (ret)
492 return ret;
493 if (ret_blk >= ((long long) 1 << 32))
494 return EOVERFLOW;
495 *phys_blk = ret_blk;
496 return 0;
497 }
498