1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "ext4_utils.h"
18 #include "ext4.h"
19 #include "ext4_extents.h"
20 #include "backed_block.h"
21 #include "indirect.h"
22 #include "allocate.h"
23
24 #include <stdlib.h>
25 #include <stdio.h>
26
27 /* Creates data buffers for the first backing_len bytes of a block allocation
28 and queues them to be written */
create_backing(struct block_allocation * alloc,unsigned long backing_len)29 static u8 *create_backing(struct block_allocation *alloc,
30 unsigned long backing_len)
31 {
32 if (DIV_ROUND_UP(backing_len, info.block_size) > EXT4_NDIR_BLOCKS)
33 critical_error("indirect backing larger than %d blocks", EXT4_NDIR_BLOCKS);
34
35 u8 *data = calloc(backing_len, 1);
36 if (!data)
37 critical_error_errno("calloc");
38
39 u8 *ptr = data;
40 for (; alloc != NULL && backing_len > 0; get_next_region(alloc)) {
41 u32 region_block;
42 u32 region_len;
43 u32 len;
44 get_region(alloc, ®ion_block, ®ion_len);
45
46 len = min(region_len * info.block_size, backing_len);
47
48 queue_data_block(ptr, len, region_block);
49 ptr += len;
50 backing_len -= len;
51 }
52
53 return data;
54 }
55
reserve_indirect_block(struct block_allocation * alloc,int len)56 static void reserve_indirect_block(struct block_allocation *alloc, int len)
57 {
58 if (reserve_oob_blocks(alloc, 1)) {
59 error("failed to reserve oob block");
60 return;
61 }
62
63 if (advance_blocks(alloc, len)) {
64 error("failed to advance %d blocks", len);
65 return;
66 }
67 }
68
reserve_dindirect_block(struct block_allocation * alloc,int len)69 static void reserve_dindirect_block(struct block_allocation *alloc, int len)
70 {
71 if (reserve_oob_blocks(alloc, 1)) {
72 error("failed to reserve oob block");
73 return;
74 }
75
76 while (len > 0) {
77 int ind_block_len = min((int)aux_info.blocks_per_ind, len);
78
79 reserve_indirect_block(alloc, ind_block_len);
80
81 len -= ind_block_len;
82 }
83
84 }
85
reserve_tindirect_block(struct block_allocation * alloc,int len)86 static void reserve_tindirect_block(struct block_allocation *alloc, int len)
87 {
88 if (reserve_oob_blocks(alloc, 1)) {
89 error("failed to reserve oob block");
90 return;
91 }
92
93 while (len > 0) {
94 int dind_block_len = min((int)aux_info.blocks_per_dind, len);
95
96 reserve_dindirect_block(alloc, dind_block_len);
97
98 len -= dind_block_len;
99 }
100 }
101
fill_indirect_block(u32 * ind_block,int len,struct block_allocation * alloc)102 static void fill_indirect_block(u32 *ind_block, int len, struct block_allocation *alloc)
103 {
104 int i;
105 for (i = 0; i < len; i++) {
106 ind_block[i] = get_block(alloc, i);
107 }
108 }
109
fill_dindirect_block(u32 * dind_block,int len,struct block_allocation * alloc)110 static void fill_dindirect_block(u32 *dind_block, int len, struct block_allocation *alloc)
111 {
112 int i;
113 u32 ind_block;
114
115 for (i = 0; len > 0; i++) {
116 ind_block = get_oob_block(alloc, 0);
117 if (advance_oob_blocks(alloc, 1)) {
118 error("failed to reserve oob block");
119 return;
120 }
121
122 dind_block[i] = ind_block;
123
124 u32 *ind_block_data = calloc(info.block_size, 1);
125 queue_data_block((u8*)ind_block_data, info.block_size, ind_block);
126 int ind_block_len = min((int)aux_info.blocks_per_ind, len);
127
128 fill_indirect_block(ind_block_data, ind_block_len, alloc);
129
130 if (advance_blocks(alloc, ind_block_len)) {
131 error("failed to advance %d blocks", ind_block_len);
132 return;
133 }
134
135 len -= ind_block_len;
136 }
137 }
138
fill_tindirect_block(u32 * tind_block,int len,struct block_allocation * alloc)139 static void fill_tindirect_block(u32 *tind_block, int len, struct block_allocation *alloc)
140 {
141 int i;
142 u32 dind_block;
143
144 for (i = 0; len > 0; i++) {
145 dind_block = get_oob_block(alloc, 0);
146 if (advance_oob_blocks(alloc, 1)) {
147 error("failed to reserve oob block");
148 return;
149 }
150
151 tind_block[i] = dind_block;
152
153 u32 *dind_block_data = calloc(info.block_size, 1);
154 queue_data_block((u8*)dind_block_data, info.block_size, dind_block);
155 int dind_block_len = min((int)aux_info.blocks_per_dind, len);
156
157 fill_dindirect_block(dind_block_data, dind_block_len, alloc);
158
159 len -= dind_block_len;
160 }
161 }
162
163 /* Given an allocation, attach as many blocks as possible to direct inode
164 blocks, and return the rest */
inode_attach_direct_blocks(struct ext4_inode * inode,struct block_allocation * alloc,u32 * block_len)165 static int inode_attach_direct_blocks(struct ext4_inode *inode,
166 struct block_allocation *alloc, u32 *block_len)
167 {
168 int len = min(*block_len, EXT4_NDIR_BLOCKS);
169 int i;
170
171 for (i = 0; i < len; i++) {
172 inode->i_block[i] = get_block(alloc, i);
173 }
174
175 if (advance_blocks(alloc, len)) {
176 error("failed to advance %d blocks", len);
177 return -1;
178 }
179
180 *block_len -= len;
181 return 0;
182 }
183
184 /* Given an allocation, attach as many blocks as possible to indirect blocks,
185 and return the rest
186 Assumes that the blocks necessary to hold the indirect blocks were included
187 as part of the allocation */
inode_attach_indirect_blocks(struct ext4_inode * inode,struct block_allocation * alloc,u32 * block_len)188 static int inode_attach_indirect_blocks(struct ext4_inode *inode,
189 struct block_allocation *alloc, u32 *block_len)
190 {
191 int len = min(*block_len, aux_info.blocks_per_ind);
192
193 int ind_block = get_oob_block(alloc, 0);
194 inode->i_block[EXT4_IND_BLOCK] = ind_block;
195
196 if (advance_oob_blocks(alloc, 1)) {
197 error("failed to advance oob block");
198 return -1;
199 }
200
201 u32 *ind_block_data = calloc(info.block_size, 1);
202 queue_data_block((u8*)ind_block_data, info.block_size, ind_block);
203
204 fill_indirect_block(ind_block_data, len, alloc);
205
206 if (advance_blocks(alloc, len)) {
207 error("failed to advance %d blocks", len);
208 return -1;
209 }
210
211 *block_len -= len;
212 return 0;
213 }
214
215 /* Given an allocation, attach as many blocks as possible to doubly indirect
216 blocks, and return the rest.
217 Assumes that the blocks necessary to hold the indirect and doubly indirect
218 blocks were included as part of the allocation */
inode_attach_dindirect_blocks(struct ext4_inode * inode,struct block_allocation * alloc,u32 * block_len)219 static int inode_attach_dindirect_blocks(struct ext4_inode *inode,
220 struct block_allocation *alloc, u32 *block_len)
221 {
222 int len = min(*block_len, aux_info.blocks_per_dind);
223
224 int dind_block = get_oob_block(alloc, 0);
225 inode->i_block[EXT4_DIND_BLOCK] = dind_block;
226
227 if (advance_oob_blocks(alloc, 1)) {
228 error("failed to advance oob block");
229 return -1;
230 }
231
232 u32 *dind_block_data = calloc(info.block_size, 1);
233 queue_data_block((u8*)dind_block_data, info.block_size, dind_block);
234
235 fill_dindirect_block(dind_block_data, len, alloc);
236
237 if (advance_blocks(alloc, len)) {
238 error("failed to advance %d blocks", len);
239 return -1;
240 }
241
242 *block_len -= len;
243 return 0;
244 }
245
246 /* Given an allocation, attach as many blocks as possible to triply indirect
247 blocks, and return the rest.
248 Assumes that the blocks necessary to hold the indirect, doubly indirect and
249 triply indirect blocks were included as part of the allocation */
inode_attach_tindirect_blocks(struct ext4_inode * inode,struct block_allocation * alloc,u32 * block_len)250 static int inode_attach_tindirect_blocks(struct ext4_inode *inode,
251 struct block_allocation *alloc, u32 *block_len)
252 {
253 int len = min(*block_len, aux_info.blocks_per_tind);
254
255 int tind_block = get_oob_block(alloc, 0);
256 inode->i_block[EXT4_TIND_BLOCK] = tind_block;
257
258 if (advance_oob_blocks(alloc, 1)) {
259 error("failed to advance oob block");
260 return -1;
261 }
262
263 u32 *tind_block_data = calloc(info.block_size, 1);
264 queue_data_block((u8*)tind_block_data, info.block_size, tind_block);
265
266 fill_tindirect_block(tind_block_data, len, alloc);
267
268 if (advance_blocks(alloc, len)) {
269 error("failed to advance %d blocks", len);
270 return -1;
271 }
272
273 *block_len -= len;
274 return 0;
275 }
276
reserve_all_indirect_blocks(struct block_allocation * alloc,u32 len)277 static void reserve_all_indirect_blocks(struct block_allocation *alloc, u32 len)
278 {
279 if (len <= EXT4_NDIR_BLOCKS)
280 return;
281
282 len -= EXT4_NDIR_BLOCKS;
283 advance_blocks(alloc, EXT4_NDIR_BLOCKS);
284
285 u32 ind_block_len = min(aux_info.blocks_per_ind, len);
286 reserve_indirect_block(alloc, ind_block_len);
287
288 len -= ind_block_len;
289 if (len == 0)
290 return;
291
292 u32 dind_block_len = min(aux_info.blocks_per_dind, len);
293 reserve_dindirect_block(alloc, dind_block_len);
294
295 len -= dind_block_len;
296 if (len == 0)
297 return;
298
299 u32 tind_block_len = min(aux_info.blocks_per_tind, len);
300 reserve_tindirect_block(alloc, tind_block_len);
301
302 len -= tind_block_len;
303 if (len == 0)
304 return;
305
306 error("%d blocks remaining", len);
307 }
308
indirect_blocks_needed(u32 len)309 static u32 indirect_blocks_needed(u32 len)
310 {
311 u32 ind = 0;
312
313 if (len <= EXT4_NDIR_BLOCKS)
314 return ind;
315
316 len -= EXT4_NDIR_BLOCKS;
317
318 /* We will need an indirect block for the rest of the blocks */
319 ind += DIV_ROUND_UP(len, aux_info.blocks_per_ind);
320
321 if (len <= aux_info.blocks_per_ind)
322 return ind;
323
324 len -= aux_info.blocks_per_ind;
325
326 ind += DIV_ROUND_UP(len, aux_info.blocks_per_dind);
327
328 if (len <= aux_info.blocks_per_dind)
329 return ind;
330
331 len -= aux_info.blocks_per_dind;
332
333 ind += DIV_ROUND_UP(len, aux_info.blocks_per_tind);
334
335 if (len <= aux_info.blocks_per_tind)
336 return ind;
337
338 critical_error("request too large");
339 return 0;
340 }
341
do_inode_attach_indirect(struct ext4_inode * inode,struct block_allocation * alloc,u32 block_len)342 static int do_inode_attach_indirect(struct ext4_inode *inode,
343 struct block_allocation *alloc, u32 block_len)
344 {
345 u32 count = block_len;
346
347 if (inode_attach_direct_blocks(inode, alloc, &count)) {
348 error("failed to attach direct blocks to inode");
349 return -1;
350 }
351
352 if (count > 0) {
353 if (inode_attach_indirect_blocks(inode, alloc, &count)) {
354 error("failed to attach indirect blocks to inode");
355 return -1;
356 }
357 }
358
359 if (count > 0) {
360 if (inode_attach_dindirect_blocks(inode, alloc, &count)) {
361 error("failed to attach dindirect blocks to inode");
362 return -1;
363 }
364 }
365
366 if (count > 0) {
367 if (inode_attach_tindirect_blocks(inode, alloc, &count)) {
368 error("failed to attach tindirect blocks to inode");
369 return -1;
370 }
371 }
372
373 if (count) {
374 error("blocks left after triply-indirect allocation");
375 return -1;
376 }
377
378 rewind_alloc(alloc);
379
380 return 0;
381 }
382
do_inode_allocate_indirect(struct ext4_inode * inode,u32 block_len)383 static struct block_allocation *do_inode_allocate_indirect(
384 struct ext4_inode *inode, u32 block_len)
385 {
386 u32 indirect_len = indirect_blocks_needed(block_len);
387
388 struct block_allocation *alloc = allocate_blocks(block_len + indirect_len);
389
390 if (alloc == NULL) {
391 error("Failed to allocate %d blocks", block_len + indirect_len);
392 return NULL;
393 }
394
395 return alloc;
396 }
397
398 /* Allocates enough blocks to hold len bytes and connects them to an inode */
inode_allocate_indirect(struct ext4_inode * inode,unsigned long len)399 void inode_allocate_indirect(struct ext4_inode *inode, unsigned long len)
400 {
401 struct block_allocation *alloc;
402 u32 block_len = DIV_ROUND_UP(len, info.block_size);
403 u32 indirect_len = indirect_blocks_needed(block_len);
404
405 alloc = do_inode_allocate_indirect(inode, block_len);
406 if (alloc == NULL) {
407 error("failed to allocate extents for %lu bytes", len);
408 return;
409 }
410
411 reserve_all_indirect_blocks(alloc, block_len);
412 rewind_alloc(alloc);
413
414 if (do_inode_attach_indirect(inode, alloc, block_len))
415 error("failed to attach blocks to indirect inode");
416
417 inode->i_flags = 0;
418 inode->i_blocks_lo = (block_len + indirect_len) * info.block_size / 512;
419 inode->i_size_lo = len;
420
421 free_alloc(alloc);
422 }
423
inode_attach_resize(struct ext4_inode * inode,struct block_allocation * alloc)424 void inode_attach_resize(struct ext4_inode *inode,
425 struct block_allocation *alloc)
426 {
427 u32 block_len = block_allocation_len(alloc);
428 u32 superblocks = block_len / info.bg_desc_reserve_blocks;
429 u32 i, j;
430 u64 blocks;
431 u64 size;
432
433 if (block_len % info.bg_desc_reserve_blocks)
434 critical_error("reserved blocks not a multiple of %d",
435 info.bg_desc_reserve_blocks);
436
437 append_oob_allocation(alloc, 1);
438 u32 dind_block = get_oob_block(alloc, 0);
439
440 u32 *dind_block_data = calloc(info.block_size, 1);
441 if (!dind_block_data)
442 critical_error_errno("calloc");
443 queue_data_block((u8 *)dind_block_data, info.block_size, dind_block);
444
445 u32 *ind_block_data = calloc(info.block_size, info.bg_desc_reserve_blocks);
446 if (!ind_block_data)
447 critical_error_errno("calloc");
448 queue_data_block((u8 *)ind_block_data,
449 info.block_size * info.bg_desc_reserve_blocks,
450 get_block(alloc, 0));
451
452 for (i = 0; i < info.bg_desc_reserve_blocks; i++) {
453 int r = (i - aux_info.bg_desc_blocks) % info.bg_desc_reserve_blocks;
454 if (r < 0)
455 r += info.bg_desc_reserve_blocks;
456
457 dind_block_data[i] = get_block(alloc, r);
458
459 for (j = 1; j < superblocks; j++) {
460 u32 b = j * info.bg_desc_reserve_blocks + r;
461 ind_block_data[r * aux_info.blocks_per_ind + j - 1] = get_block(alloc, b);
462 }
463 }
464
465 u32 last_block = EXT4_NDIR_BLOCKS + aux_info.blocks_per_ind +
466 aux_info.blocks_per_ind * (info.bg_desc_reserve_blocks - 1) +
467 superblocks - 2;
468
469 blocks = ((u64)block_len + 1) * info.block_size / 512;
470 size = (u64)last_block * info.block_size;
471
472 inode->i_block[EXT4_DIND_BLOCK] = dind_block;
473 inode->i_flags = 0;
474 inode->i_blocks_lo = blocks;
475 inode->osd2.linux2.l_i_blocks_high = blocks >> 32;
476 inode->i_size_lo = size;
477 inode->i_size_high = size >> 32;
478 }
479
480 /* Allocates enough blocks to hold len bytes, with backing_len bytes in a data
481 buffer, and connects them to an inode. Returns a pointer to the data
482 buffer. */
inode_allocate_data_indirect(struct ext4_inode * inode,unsigned long len,unsigned long backing_len)483 u8 *inode_allocate_data_indirect(struct ext4_inode *inode, unsigned long len,
484 unsigned long backing_len)
485 {
486 struct block_allocation *alloc;
487 u8 *data = NULL;
488
489 alloc = do_inode_allocate_indirect(inode, len);
490 if (alloc == NULL) {
491 error("failed to allocate extents for %lu bytes", len);
492 return NULL;
493 }
494
495 if (backing_len) {
496 data = create_backing(alloc, backing_len);
497 if (!data)
498 error("failed to create backing for %lu bytes", backing_len);
499 }
500
501 free_alloc(alloc);
502
503 return data;
504 }
505