1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2018 Google LLC
4 */
5 #include <linux/fs.h>
6 #include <linux/file.h>
7 #include <linux/types.h>
8 #include <linux/mutex.h>
9 #include <linux/mm.h>
10 #include <linux/falloc.h>
11 #include <linux/slab.h>
12 #include <linux/crc32.h>
13 #include <linux/kernel.h>
14
15 #include "format.h"
16 #include "data_mgmt.h"
17
incfs_alloc_bfc(struct mount_info * mi,struct file * backing_file)18 struct backing_file_context *incfs_alloc_bfc(struct mount_info *mi,
19 struct file *backing_file)
20 {
21 struct backing_file_context *result = NULL;
22
23 result = kzalloc(sizeof(*result), GFP_NOFS);
24 if (!result)
25 return ERR_PTR(-ENOMEM);
26
27 result->bc_file = get_file(backing_file);
28 result->bc_cred = mi->mi_owner;
29 mutex_init(&result->bc_mutex);
30 return result;
31 }
32
incfs_free_bfc(struct backing_file_context * bfc)33 void incfs_free_bfc(struct backing_file_context *bfc)
34 {
35 if (!bfc)
36 return;
37
38 if (bfc->bc_file)
39 fput(bfc->bc_file);
40
41 mutex_destroy(&bfc->bc_mutex);
42 kfree(bfc);
43 }
44
incfs_get_end_offset(struct file * f)45 static loff_t incfs_get_end_offset(struct file *f)
46 {
47 /*
48 * This function assumes that file size and the end-offset
49 * are the same. This is not always true.
50 */
51 return i_size_read(file_inode(f));
52 }
53
54 /*
55 * Truncate the tail of the file to the given length.
56 * Used to rollback partially successful multistep writes.
57 */
truncate_backing_file(struct backing_file_context * bfc,loff_t new_end)58 static int truncate_backing_file(struct backing_file_context *bfc,
59 loff_t new_end)
60 {
61 struct inode *inode = NULL;
62 struct dentry *dentry = NULL;
63 loff_t old_end = 0;
64 struct iattr attr;
65 int result = 0;
66
67 if (!bfc)
68 return -EFAULT;
69
70 LOCK_REQUIRED(bfc->bc_mutex);
71
72 if (!bfc->bc_file)
73 return -EFAULT;
74
75 old_end = incfs_get_end_offset(bfc->bc_file);
76 if (old_end == new_end)
77 return 0;
78 if (old_end < new_end)
79 return -EINVAL;
80
81 inode = bfc->bc_file->f_inode;
82 dentry = bfc->bc_file->f_path.dentry;
83
84 attr.ia_size = new_end;
85 attr.ia_valid = ATTR_SIZE;
86
87 inode_lock(inode);
88 result = notify_change(dentry, &attr, NULL);
89 inode_unlock(inode);
90
91 return result;
92 }
93
write_to_bf(struct backing_file_context * bfc,const void * buf,size_t count,loff_t pos)94 static int write_to_bf(struct backing_file_context *bfc, const void *buf,
95 size_t count, loff_t pos)
96 {
97 ssize_t res = incfs_kwrite(bfc, buf, count, pos);
98
99 if (res < 0)
100 return res;
101 if (res != count)
102 return -EIO;
103 return 0;
104 }
105
append_zeros_no_fallocate(struct backing_file_context * bfc,size_t file_size,size_t len)106 static int append_zeros_no_fallocate(struct backing_file_context *bfc,
107 size_t file_size, size_t len)
108 {
109 u8 buffer[256] = {};
110 size_t i;
111
112 for (i = 0; i < len; i += sizeof(buffer)) {
113 int to_write = len - i > sizeof(buffer)
114 ? sizeof(buffer) : len - i;
115 int err = write_to_bf(bfc, buffer, to_write, file_size + i);
116
117 if (err)
118 return err;
119 }
120
121 return 0;
122 }
123
124 /* Append a given number of zero bytes to the end of the backing file. */
append_zeros(struct backing_file_context * bfc,size_t len)125 static int append_zeros(struct backing_file_context *bfc, size_t len)
126 {
127 loff_t file_size = 0;
128 loff_t new_last_byte_offset = 0;
129 int result;
130
131 if (!bfc)
132 return -EFAULT;
133
134 if (len == 0)
135 return 0;
136
137 LOCK_REQUIRED(bfc->bc_mutex);
138
139 /*
140 * Allocate only one byte at the new desired end of the file.
141 * It will increase file size and create a zeroed area of
142 * a given size.
143 */
144 file_size = incfs_get_end_offset(bfc->bc_file);
145 new_last_byte_offset = file_size + len - 1;
146 result = vfs_fallocate(bfc->bc_file, 0, new_last_byte_offset, 1);
147 if (result != -EOPNOTSUPP)
148 return result;
149
150 return append_zeros_no_fallocate(bfc, file_size, len);
151 }
152
153 /*
154 * Append a given metadata record to the backing file and update a previous
155 * record to add the new record the the metadata list.
156 */
append_md_to_backing_file(struct backing_file_context * bfc,struct incfs_md_header * record)157 static int append_md_to_backing_file(struct backing_file_context *bfc,
158 struct incfs_md_header *record)
159 {
160 int result = 0;
161 loff_t record_offset;
162 loff_t file_pos;
163 __le64 new_md_offset;
164 size_t record_size;
165
166 if (!bfc || !record)
167 return -EFAULT;
168
169 if (bfc->bc_last_md_record_offset < 0)
170 return -EINVAL;
171
172 LOCK_REQUIRED(bfc->bc_mutex);
173
174 record_size = le16_to_cpu(record->h_record_size);
175 file_pos = incfs_get_end_offset(bfc->bc_file);
176 record->h_next_md_offset = 0;
177
178 /* Write the metadata record to the end of the backing file */
179 record_offset = file_pos;
180 new_md_offset = cpu_to_le64(record_offset);
181 result = write_to_bf(bfc, record, record_size, file_pos);
182 if (result)
183 return result;
184
185 /* Update next metadata offset in a previous record or a superblock. */
186 if (bfc->bc_last_md_record_offset) {
187 /*
188 * Find a place in the previous md record where new record's
189 * offset needs to be saved.
190 */
191 file_pos = bfc->bc_last_md_record_offset +
192 offsetof(struct incfs_md_header, h_next_md_offset);
193 } else {
194 /*
195 * No metadata yet, file a place to update in the
196 * file_header.
197 */
198 file_pos = offsetof(struct incfs_file_header,
199 fh_first_md_offset);
200 }
201 result = write_to_bf(bfc, &new_md_offset, sizeof(new_md_offset),
202 file_pos);
203 if (result)
204 return result;
205
206 bfc->bc_last_md_record_offset = record_offset;
207 return result;
208 }
209
210 /*
211 * Reserve 0-filled space for the blockmap body, and append
212 * incfs_blockmap metadata record pointing to it.
213 */
incfs_write_blockmap_to_backing_file(struct backing_file_context * bfc,u32 block_count)214 int incfs_write_blockmap_to_backing_file(struct backing_file_context *bfc,
215 u32 block_count)
216 {
217 struct incfs_blockmap blockmap = {};
218 int result = 0;
219 loff_t file_end = 0;
220 size_t map_size = block_count * sizeof(struct incfs_blockmap_entry);
221
222 if (!bfc)
223 return -EFAULT;
224
225 blockmap.m_header.h_md_entry_type = INCFS_MD_BLOCK_MAP;
226 blockmap.m_header.h_record_size = cpu_to_le16(sizeof(blockmap));
227 blockmap.m_header.h_next_md_offset = cpu_to_le64(0);
228 blockmap.m_block_count = cpu_to_le32(block_count);
229
230 LOCK_REQUIRED(bfc->bc_mutex);
231
232 /* Reserve 0-filled space for the blockmap body in the backing file. */
233 file_end = incfs_get_end_offset(bfc->bc_file);
234 result = append_zeros(bfc, map_size);
235 if (result)
236 return result;
237
238 /* Write blockmap metadata record pointing to the body written above. */
239 blockmap.m_base_offset = cpu_to_le64(file_end);
240 result = append_md_to_backing_file(bfc, &blockmap.m_header);
241 if (result)
242 /* Error, rollback file changes */
243 truncate_backing_file(bfc, file_end);
244
245 return result;
246 }
247
incfs_write_signature_to_backing_file(struct backing_file_context * bfc,struct mem_range sig,u32 tree_size,loff_t * tree_offset,loff_t * sig_offset)248 int incfs_write_signature_to_backing_file(struct backing_file_context *bfc,
249 struct mem_range sig, u32 tree_size,
250 loff_t *tree_offset, loff_t *sig_offset)
251 {
252 struct incfs_file_signature sg = {};
253 int result = 0;
254 loff_t rollback_pos = 0;
255 loff_t tree_area_pos = 0;
256 size_t alignment = 0;
257
258 if (!bfc)
259 return -EFAULT;
260
261 LOCK_REQUIRED(bfc->bc_mutex);
262
263 rollback_pos = incfs_get_end_offset(bfc->bc_file);
264
265 sg.sg_header.h_md_entry_type = INCFS_MD_SIGNATURE;
266 sg.sg_header.h_record_size = cpu_to_le16(sizeof(sg));
267 sg.sg_header.h_next_md_offset = cpu_to_le64(0);
268 if (sig.data != NULL && sig.len > 0) {
269 sg.sg_sig_size = cpu_to_le32(sig.len);
270 sg.sg_sig_offset = cpu_to_le64(rollback_pos);
271
272 result = write_to_bf(bfc, sig.data, sig.len, rollback_pos);
273 if (result)
274 goto err;
275 }
276
277 tree_area_pos = incfs_get_end_offset(bfc->bc_file);
278 if (tree_size > 0) {
279 if (tree_size > 5 * INCFS_DATA_FILE_BLOCK_SIZE) {
280 /*
281 * If hash tree is big enough, it makes sense to
282 * align in the backing file for faster access.
283 */
284 loff_t offset = round_up(tree_area_pos, PAGE_SIZE);
285
286 alignment = offset - tree_area_pos;
287 tree_area_pos = offset;
288 }
289
290 /*
291 * If root hash is not the only hash in the tree.
292 * reserve 0-filled space for the tree.
293 */
294 result = append_zeros(bfc, tree_size + alignment);
295 if (result)
296 goto err;
297
298 sg.sg_hash_tree_size = cpu_to_le32(tree_size);
299 sg.sg_hash_tree_offset = cpu_to_le64(tree_area_pos);
300 }
301
302 /* Write a hash tree metadata record pointing to the hash tree above. */
303 result = append_md_to_backing_file(bfc, &sg.sg_header);
304 err:
305 if (result)
306 /* Error, rollback file changes */
307 truncate_backing_file(bfc, rollback_pos);
308 else {
309 if (tree_offset)
310 *tree_offset = tree_area_pos;
311 if (sig_offset)
312 *sig_offset = rollback_pos;
313 }
314
315 return result;
316 }
317
write_new_status_to_backing_file(struct backing_file_context * bfc,u32 data_blocks_written,u32 hash_blocks_written)318 static int write_new_status_to_backing_file(struct backing_file_context *bfc,
319 u32 data_blocks_written,
320 u32 hash_blocks_written)
321 {
322 int result;
323 loff_t rollback_pos;
324 struct incfs_status is = {
325 .is_header = {
326 .h_md_entry_type = INCFS_MD_STATUS,
327 .h_record_size = cpu_to_le16(sizeof(is)),
328 },
329 .is_data_blocks_written = cpu_to_le32(data_blocks_written),
330 .is_hash_blocks_written = cpu_to_le32(hash_blocks_written),
331 };
332
333 LOCK_REQUIRED(bfc->bc_mutex);
334 rollback_pos = incfs_get_end_offset(bfc->bc_file);
335 result = append_md_to_backing_file(bfc, &is.is_header);
336 if (result)
337 truncate_backing_file(bfc, rollback_pos);
338
339 return result;
340 }
341
incfs_write_status_to_backing_file(struct backing_file_context * bfc,loff_t status_offset,u32 data_blocks_written,u32 hash_blocks_written)342 int incfs_write_status_to_backing_file(struct backing_file_context *bfc,
343 loff_t status_offset,
344 u32 data_blocks_written,
345 u32 hash_blocks_written)
346 {
347 struct incfs_status is;
348 int result;
349
350 if (!bfc)
351 return -EFAULT;
352
353 if (status_offset == 0)
354 return write_new_status_to_backing_file(bfc,
355 data_blocks_written, hash_blocks_written);
356
357 result = incfs_kread(bfc, &is, sizeof(is), status_offset);
358 if (result != sizeof(is))
359 return -EIO;
360
361 is.is_data_blocks_written = cpu_to_le32(data_blocks_written);
362 is.is_hash_blocks_written = cpu_to_le32(hash_blocks_written);
363 result = incfs_kwrite(bfc, &is, sizeof(is), status_offset);
364 if (result != sizeof(is))
365 return -EIO;
366
367 return 0;
368 }
369
incfs_write_verity_signature_to_backing_file(struct backing_file_context * bfc,struct mem_range signature,loff_t * offset)370 int incfs_write_verity_signature_to_backing_file(
371 struct backing_file_context *bfc, struct mem_range signature,
372 loff_t *offset)
373 {
374 struct incfs_file_verity_signature vs = {};
375 int result;
376 loff_t pos;
377
378 /* No verity signature section is equivalent to an empty section */
379 if (signature.data == NULL || signature.len == 0)
380 return 0;
381
382 pos = incfs_get_end_offset(bfc->bc_file);
383
384 vs = (struct incfs_file_verity_signature) {
385 .vs_header = (struct incfs_md_header) {
386 .h_md_entry_type = INCFS_MD_VERITY_SIGNATURE,
387 .h_record_size = cpu_to_le16(sizeof(vs)),
388 .h_next_md_offset = cpu_to_le64(0),
389 },
390 .vs_size = cpu_to_le32(signature.len),
391 .vs_offset = cpu_to_le64(pos),
392 };
393
394 result = write_to_bf(bfc, signature.data, signature.len, pos);
395 if (result)
396 goto err;
397
398 result = append_md_to_backing_file(bfc, &vs.vs_header);
399 if (result)
400 goto err;
401
402 *offset = pos;
403 err:
404 if (result)
405 /* Error, rollback file changes */
406 truncate_backing_file(bfc, pos);
407 return result;
408 }
409
410 /*
411 * Write a backing file header
412 * It should always be called only on empty file.
413 * fh.fh_first_md_offset is 0 for now, but will be updated
414 * once first metadata record is added.
415 */
incfs_write_fh_to_backing_file(struct backing_file_context * bfc,incfs_uuid_t * uuid,u64 file_size)416 int incfs_write_fh_to_backing_file(struct backing_file_context *bfc,
417 incfs_uuid_t *uuid, u64 file_size)
418 {
419 struct incfs_file_header fh = {};
420 loff_t file_pos = 0;
421
422 if (!bfc)
423 return -EFAULT;
424
425 fh.fh_magic = cpu_to_le64(INCFS_MAGIC_NUMBER);
426 fh.fh_version = cpu_to_le64(INCFS_FORMAT_CURRENT_VER);
427 fh.fh_header_size = cpu_to_le16(sizeof(fh));
428 fh.fh_first_md_offset = cpu_to_le64(0);
429 fh.fh_data_block_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
430
431 fh.fh_file_size = cpu_to_le64(file_size);
432 fh.fh_uuid = *uuid;
433
434 LOCK_REQUIRED(bfc->bc_mutex);
435
436 file_pos = incfs_get_end_offset(bfc->bc_file);
437 if (file_pos != 0)
438 return -EEXIST;
439
440 return write_to_bf(bfc, &fh, sizeof(fh), file_pos);
441 }
442
443 /*
444 * Write a backing file header for a mapping file
445 * It should always be called only on empty file.
446 */
incfs_write_mapping_fh_to_backing_file(struct backing_file_context * bfc,incfs_uuid_t * uuid,u64 file_size,u64 offset)447 int incfs_write_mapping_fh_to_backing_file(struct backing_file_context *bfc,
448 incfs_uuid_t *uuid, u64 file_size, u64 offset)
449 {
450 struct incfs_file_header fh = {};
451 loff_t file_pos = 0;
452
453 if (!bfc)
454 return -EFAULT;
455
456 fh.fh_magic = cpu_to_le64(INCFS_MAGIC_NUMBER);
457 fh.fh_version = cpu_to_le64(INCFS_FORMAT_CURRENT_VER);
458 fh.fh_header_size = cpu_to_le16(sizeof(fh));
459 fh.fh_original_offset = cpu_to_le64(offset);
460 fh.fh_data_block_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
461
462 fh.fh_mapped_file_size = cpu_to_le64(file_size);
463 fh.fh_original_uuid = *uuid;
464 fh.fh_flags = cpu_to_le32(INCFS_FILE_MAPPED);
465
466 LOCK_REQUIRED(bfc->bc_mutex);
467
468 file_pos = incfs_get_end_offset(bfc->bc_file);
469 if (file_pos != 0)
470 return -EEXIST;
471
472 return write_to_bf(bfc, &fh, sizeof(fh), file_pos);
473 }
474
475 /* Write a given data block and update file's blockmap to point it. */
incfs_write_data_block_to_backing_file(struct backing_file_context * bfc,struct mem_range block,int block_index,loff_t bm_base_off,u16 flags)476 int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc,
477 struct mem_range block, int block_index,
478 loff_t bm_base_off, u16 flags)
479 {
480 struct incfs_blockmap_entry bm_entry = {};
481 int result = 0;
482 loff_t data_offset = 0;
483 loff_t bm_entry_off =
484 bm_base_off + sizeof(struct incfs_blockmap_entry) * block_index;
485
486 if (!bfc)
487 return -EFAULT;
488
489 if (block.len >= (1 << 16) || block_index < 0)
490 return -EINVAL;
491
492 LOCK_REQUIRED(bfc->bc_mutex);
493
494 data_offset = incfs_get_end_offset(bfc->bc_file);
495 if (data_offset <= bm_entry_off) {
496 /* Blockmap entry is beyond the file's end. It is not normal. */
497 return -EINVAL;
498 }
499
500 /* Write the block data at the end of the backing file. */
501 result = write_to_bf(bfc, block.data, block.len, data_offset);
502 if (result)
503 return result;
504
505 /* Update the blockmap to point to the newly written data. */
506 bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
507 bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
508 bm_entry.me_data_size = cpu_to_le16((u16)block.len);
509 bm_entry.me_flags = cpu_to_le16(flags);
510
511 return write_to_bf(bfc, &bm_entry, sizeof(bm_entry),
512 bm_entry_off);
513 }
514
incfs_write_hash_block_to_backing_file(struct backing_file_context * bfc,struct mem_range block,int block_index,loff_t hash_area_off,loff_t bm_base_off,loff_t file_size)515 int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc,
516 struct mem_range block,
517 int block_index,
518 loff_t hash_area_off,
519 loff_t bm_base_off,
520 loff_t file_size)
521 {
522 struct incfs_blockmap_entry bm_entry = {};
523 int result;
524 loff_t data_offset = 0;
525 loff_t file_end = 0;
526 loff_t bm_entry_off =
527 bm_base_off +
528 sizeof(struct incfs_blockmap_entry) *
529 (block_index + get_blocks_count_for_size(file_size));
530
531 if (!bfc)
532 return -EFAULT;
533
534 LOCK_REQUIRED(bfc->bc_mutex);
535
536 data_offset = hash_area_off + block_index * INCFS_DATA_FILE_BLOCK_SIZE;
537 file_end = incfs_get_end_offset(bfc->bc_file);
538 if (data_offset + block.len > file_end) {
539 /* Block is located beyond the file's end. It is not normal. */
540 return -EINVAL;
541 }
542
543 result = write_to_bf(bfc, block.data, block.len, data_offset);
544 if (result)
545 return result;
546
547 bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
548 bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
549 bm_entry.me_data_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
550
551 return write_to_bf(bfc, &bm_entry, sizeof(bm_entry), bm_entry_off);
552 }
553
incfs_read_blockmap_entry(struct backing_file_context * bfc,int block_index,loff_t bm_base_off,struct incfs_blockmap_entry * bm_entry)554 int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index,
555 loff_t bm_base_off,
556 struct incfs_blockmap_entry *bm_entry)
557 {
558 int error = incfs_read_blockmap_entries(bfc, bm_entry, block_index, 1,
559 bm_base_off);
560
561 if (error < 0)
562 return error;
563
564 if (error == 0)
565 return -EIO;
566
567 if (error != 1)
568 return -EFAULT;
569
570 return 0;
571 }
572
incfs_read_blockmap_entries(struct backing_file_context * bfc,struct incfs_blockmap_entry * entries,int start_index,int blocks_number,loff_t bm_base_off)573 int incfs_read_blockmap_entries(struct backing_file_context *bfc,
574 struct incfs_blockmap_entry *entries,
575 int start_index, int blocks_number,
576 loff_t bm_base_off)
577 {
578 loff_t bm_entry_off =
579 bm_base_off + sizeof(struct incfs_blockmap_entry) * start_index;
580 const size_t bytes_to_read = sizeof(struct incfs_blockmap_entry)
581 * blocks_number;
582 int result = 0;
583
584 if (!bfc || !entries)
585 return -EFAULT;
586
587 if (start_index < 0 || bm_base_off <= 0)
588 return -ENODATA;
589
590 result = incfs_kread(bfc, entries, bytes_to_read, bm_entry_off);
591 if (result < 0)
592 return result;
593 return result / sizeof(*entries);
594 }
595
incfs_read_file_header(struct backing_file_context * bfc,loff_t * first_md_off,incfs_uuid_t * uuid,u64 * file_size,u32 * flags)596 int incfs_read_file_header(struct backing_file_context *bfc,
597 loff_t *first_md_off, incfs_uuid_t *uuid,
598 u64 *file_size, u32 *flags)
599 {
600 ssize_t bytes_read = 0;
601 struct incfs_file_header fh = {};
602
603 if (!bfc || !first_md_off)
604 return -EFAULT;
605
606 bytes_read = incfs_kread(bfc, &fh, sizeof(fh), 0);
607 if (bytes_read < 0)
608 return bytes_read;
609
610 if (bytes_read < sizeof(fh))
611 return -EBADMSG;
612
613 if (le64_to_cpu(fh.fh_magic) != INCFS_MAGIC_NUMBER)
614 return -EILSEQ;
615
616 if (le64_to_cpu(fh.fh_version) > INCFS_FORMAT_CURRENT_VER)
617 return -EILSEQ;
618
619 if (le16_to_cpu(fh.fh_data_block_size) != INCFS_DATA_FILE_BLOCK_SIZE)
620 return -EILSEQ;
621
622 if (le16_to_cpu(fh.fh_header_size) != sizeof(fh))
623 return -EILSEQ;
624
625 if (first_md_off)
626 *first_md_off = le64_to_cpu(fh.fh_first_md_offset);
627 if (uuid)
628 *uuid = fh.fh_uuid;
629 if (file_size)
630 *file_size = le64_to_cpu(fh.fh_file_size);
631 if (flags)
632 *flags = le32_to_cpu(fh.fh_flags);
633 return 0;
634 }
635
636 /*
637 * Read through metadata records from the backing file one by one
638 * and call provided metadata handlers.
639 */
incfs_read_next_metadata_record(struct backing_file_context * bfc,struct metadata_handler * handler)640 int incfs_read_next_metadata_record(struct backing_file_context *bfc,
641 struct metadata_handler *handler)
642 {
643 const ssize_t max_md_size = INCFS_MAX_METADATA_RECORD_SIZE;
644 ssize_t bytes_read = 0;
645 size_t md_record_size = 0;
646 loff_t next_record = 0;
647 int res = 0;
648 struct incfs_md_header *md_hdr = NULL;
649
650 if (!bfc || !handler)
651 return -EFAULT;
652
653 if (handler->md_record_offset == 0)
654 return -EPERM;
655
656 memset(&handler->md_buffer, 0, max_md_size);
657 bytes_read = incfs_kread(bfc, &handler->md_buffer, max_md_size,
658 handler->md_record_offset);
659 if (bytes_read < 0)
660 return bytes_read;
661 if (bytes_read < sizeof(*md_hdr))
662 return -EBADMSG;
663
664 md_hdr = &handler->md_buffer.md_header;
665 next_record = le64_to_cpu(md_hdr->h_next_md_offset);
666 md_record_size = le16_to_cpu(md_hdr->h_record_size);
667
668 if (md_record_size > max_md_size) {
669 pr_warn("incfs: The record is too large. Size: %zu",
670 md_record_size);
671 return -EBADMSG;
672 }
673
674 if (bytes_read < md_record_size) {
675 pr_warn("incfs: The record hasn't been fully read.");
676 return -EBADMSG;
677 }
678
679 if (next_record <= handler->md_record_offset && next_record != 0) {
680 pr_warn("incfs: Next record (%lld) points back in file.",
681 next_record);
682 return -EBADMSG;
683 }
684
685 switch (md_hdr->h_md_entry_type) {
686 case INCFS_MD_NONE:
687 break;
688 case INCFS_MD_BLOCK_MAP:
689 if (handler->handle_blockmap)
690 res = handler->handle_blockmap(
691 &handler->md_buffer.blockmap, handler);
692 break;
693 case INCFS_MD_FILE_ATTR:
694 /*
695 * File attrs no longer supported, ignore section for
696 * compatibility
697 */
698 break;
699 case INCFS_MD_SIGNATURE:
700 if (handler->handle_signature)
701 res = handler->handle_signature(
702 &handler->md_buffer.signature, handler);
703 break;
704 case INCFS_MD_STATUS:
705 if (handler->handle_status)
706 res = handler->handle_status(
707 &handler->md_buffer.status, handler);
708 break;
709 case INCFS_MD_VERITY_SIGNATURE:
710 if (handler->handle_verity_signature)
711 res = handler->handle_verity_signature(
712 &handler->md_buffer.verity_signature, handler);
713 break;
714 default:
715 res = -ENOTSUPP;
716 break;
717 }
718
719 if (!res) {
720 if (next_record == 0) {
721 /*
722 * Zero offset for the next record means that the last
723 * metadata record has just been processed.
724 */
725 bfc->bc_last_md_record_offset =
726 handler->md_record_offset;
727 }
728 handler->md_prev_record_offset = handler->md_record_offset;
729 handler->md_record_offset = next_record;
730 }
731 return res;
732 }
733
incfs_kread(struct backing_file_context * bfc,void * buf,size_t size,loff_t pos)734 ssize_t incfs_kread(struct backing_file_context *bfc, void *buf, size_t size,
735 loff_t pos)
736 {
737 const struct cred *old_cred = override_creds(bfc->bc_cred);
738 int ret = kernel_read(bfc->bc_file, buf, size, &pos);
739
740 revert_creds(old_cred);
741 return ret;
742 }
743
incfs_kwrite(struct backing_file_context * bfc,const void * buf,size_t size,loff_t pos)744 ssize_t incfs_kwrite(struct backing_file_context *bfc, const void *buf,
745 size_t size, loff_t pos)
746 {
747 const struct cred *old_cred = override_creds(bfc->bc_cred);
748 int ret = kernel_write(bfc->bc_file, buf, size, &pos);
749
750 revert_creds(old_cred);
751 return ret;
752 }
753