• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Google LLC
4  */
5 #include <linux/fs.h>
6 #include <linux/file.h>
7 #include <linux/types.h>
8 #include <linux/mutex.h>
9 #include <linux/mm.h>
10 #include <linux/falloc.h>
11 #include <linux/slab.h>
12 #include <linux/crc32.h>
13 #include <linux/kernel.h>
14 
15 #include "format.h"
16 
incfs_alloc_bfc(struct file * backing_file)17 struct backing_file_context *incfs_alloc_bfc(struct file *backing_file)
18 {
19 	struct backing_file_context *result = NULL;
20 
21 	result = kzalloc(sizeof(*result), GFP_NOFS);
22 	if (!result)
23 		return ERR_PTR(-ENOMEM);
24 
25 	result->bc_file = get_file(backing_file);
26 	mutex_init(&result->bc_mutex);
27 	return result;
28 }
29 
incfs_free_bfc(struct backing_file_context * bfc)30 void incfs_free_bfc(struct backing_file_context *bfc)
31 {
32 	if (!bfc)
33 		return;
34 
35 	if (bfc->bc_file)
36 		fput(bfc->bc_file);
37 
38 	mutex_destroy(&bfc->bc_mutex);
39 	kfree(bfc);
40 }
41 
incfs_get_end_offset(struct file * f)42 loff_t incfs_get_end_offset(struct file *f)
43 {
44 	/*
45 	 * This function assumes that file size and the end-offset
46 	 * are the same. This is not always true.
47 	 */
48 	return i_size_read(file_inode(f));
49 }
50 
51 /*
52  * Truncate the tail of the file to the given length.
53  * Used to rollback partially successful multistep writes.
54  */
truncate_backing_file(struct backing_file_context * bfc,loff_t new_end)55 static int truncate_backing_file(struct backing_file_context *bfc,
56 				loff_t new_end)
57 {
58 	struct inode *inode = NULL;
59 	struct dentry *dentry = NULL;
60 	loff_t old_end = 0;
61 	struct iattr attr;
62 	int result = 0;
63 
64 	if (!bfc)
65 		return -EFAULT;
66 
67 	LOCK_REQUIRED(bfc->bc_mutex);
68 
69 	if (!bfc->bc_file)
70 		return -EFAULT;
71 
72 	old_end = incfs_get_end_offset(bfc->bc_file);
73 	if (old_end == new_end)
74 		return 0;
75 	if (old_end < new_end)
76 		return -EINVAL;
77 
78 	inode = bfc->bc_file->f_inode;
79 	dentry = bfc->bc_file->f_path.dentry;
80 
81 	attr.ia_size = new_end;
82 	attr.ia_valid = ATTR_SIZE;
83 
84 	inode_lock(inode);
85 	result = notify_change(dentry, &attr, NULL);
86 	inode_unlock(inode);
87 
88 	return result;
89 }
90 
91 /* Append a given number of zero bytes to the end of the backing file. */
append_zeros(struct backing_file_context * bfc,size_t len)92 static int append_zeros(struct backing_file_context *bfc, size_t len)
93 {
94 	loff_t file_size = 0;
95 	loff_t new_last_byte_offset = 0;
96 	int res = 0;
97 
98 	if (!bfc)
99 		return -EFAULT;
100 
101 	if (len == 0)
102 		return 0;
103 
104 	LOCK_REQUIRED(bfc->bc_mutex);
105 
106 	/*
107 	 * Allocate only one byte at the new desired end of the file.
108 	 * It will increase file size and create a zeroed area of
109 	 * a given size.
110 	 */
111 	file_size = incfs_get_end_offset(bfc->bc_file);
112 	new_last_byte_offset = file_size + len - 1;
113 	res = vfs_fallocate(bfc->bc_file, 0, new_last_byte_offset, 1);
114 	if (res)
115 		return res;
116 
117 	res = vfs_fsync_range(bfc->bc_file, file_size, file_size + len, 1);
118 	return res;
119 }
120 
write_to_bf(struct backing_file_context * bfc,const void * buf,size_t count,loff_t pos,bool sync)121 static int write_to_bf(struct backing_file_context *bfc, const void *buf,
122 			size_t count, loff_t pos, bool sync)
123 {
124 	ssize_t res = 0;
125 
126 	res = incfs_kwrite(bfc->bc_file, buf, count, pos);
127 	if (res < 0)
128 		return res;
129 	if (res != count)
130 		return -EIO;
131 
132 	if (sync)
133 		return vfs_fsync_range(bfc->bc_file, pos, pos + count, 1);
134 
135 	return 0;
136 }
137 
calc_md_crc(struct incfs_md_header * record)138 static u32 calc_md_crc(struct incfs_md_header *record)
139 {
140 	u32 result = 0;
141 	__le32 saved_crc = record->h_record_crc;
142 	__le64 saved_md_offset = record->h_next_md_offset;
143 	size_t record_size = min_t(size_t, le16_to_cpu(record->h_record_size),
144 				INCFS_MAX_METADATA_RECORD_SIZE);
145 
146 	/* Zero fields which needs to be excluded from CRC calculation. */
147 	record->h_record_crc = 0;
148 	record->h_next_md_offset = 0;
149 	result = crc32(0, record, record_size);
150 
151 	/* Restore excluded fields. */
152 	record->h_record_crc = saved_crc;
153 	record->h_next_md_offset = saved_md_offset;
154 
155 	return result;
156 }
157 
158 /*
159  * Append a given metadata record to the backing file and update a previous
160  * record to add the new record the the metadata list.
161  */
append_md_to_backing_file(struct backing_file_context * bfc,struct incfs_md_header * record)162 static int append_md_to_backing_file(struct backing_file_context *bfc,
163 			      struct incfs_md_header *record)
164 {
165 	int result = 0;
166 	loff_t record_offset;
167 	loff_t file_pos;
168 	__le64 new_md_offset;
169 	size_t record_size;
170 
171 	if (!bfc || !record)
172 		return -EFAULT;
173 
174 	if (bfc->bc_last_md_record_offset < 0)
175 		return -EINVAL;
176 
177 	LOCK_REQUIRED(bfc->bc_mutex);
178 
179 	record_size = le16_to_cpu(record->h_record_size);
180 	file_pos = incfs_get_end_offset(bfc->bc_file);
181 	record->h_prev_md_offset = cpu_to_le64(bfc->bc_last_md_record_offset);
182 	record->h_next_md_offset = 0;
183 	record->h_record_crc = cpu_to_le32(calc_md_crc(record));
184 
185 	/* Write the metadata record to the end of the backing file */
186 	record_offset = file_pos;
187 	new_md_offset = cpu_to_le64(record_offset);
188 	result = write_to_bf(bfc, record, record_size, file_pos, true);
189 	if (result)
190 		return result;
191 
192 	/* Update next metadata offset in a previous record or a superblock. */
193 	if (bfc->bc_last_md_record_offset) {
194 		/*
195 		 * Find a place in the previous md record where new record's
196 		 * offset needs to be saved.
197 		 */
198 		file_pos = bfc->bc_last_md_record_offset +
199 			offsetof(struct incfs_md_header, h_next_md_offset);
200 	} else {
201 		/*
202 		 * No metadata yet, file a place to update in the
203 		 * file_header.
204 		 */
205 		file_pos = offsetof(struct incfs_file_header,
206 				    fh_first_md_offset);
207 	}
208 	result = write_to_bf(bfc, &new_md_offset, sizeof(new_md_offset),
209 				file_pos, true);
210 	if (result)
211 		return result;
212 
213 	bfc->bc_last_md_record_offset = record_offset;
214 	return result;
215 }
216 
217 /*
218  * Reserve 0-filled space for the blockmap body, and append
219  * incfs_blockmap metadata record pointing to it.
220  */
incfs_write_blockmap_to_backing_file(struct backing_file_context * bfc,u32 block_count,loff_t * map_base_off)221 int incfs_write_blockmap_to_backing_file(struct backing_file_context *bfc,
222 				u32 block_count, loff_t *map_base_off)
223 {
224 	struct incfs_blockmap blockmap = {};
225 	int result = 0;
226 	loff_t file_end = 0;
227 	size_t map_size = block_count * sizeof(struct incfs_blockmap_entry);
228 
229 	if (!bfc)
230 		return -EFAULT;
231 
232 	blockmap.m_header.h_md_entry_type = INCFS_MD_BLOCK_MAP;
233 	blockmap.m_header.h_record_size = cpu_to_le16(sizeof(blockmap));
234 	blockmap.m_header.h_next_md_offset = cpu_to_le64(0);
235 	blockmap.m_block_count = cpu_to_le32(block_count);
236 
237 	LOCK_REQUIRED(bfc->bc_mutex);
238 
239 	/* Reserve 0-filled space for the blockmap body in the backing file. */
240 	file_end = incfs_get_end_offset(bfc->bc_file);
241 	result = append_zeros(bfc, map_size);
242 	if (result)
243 		return result;
244 
245 	/* Write blockmap metadata record pointing to the body written above. */
246 	blockmap.m_base_offset = cpu_to_le64(file_end);
247 	result = append_md_to_backing_file(bfc, &blockmap.m_header);
248 	if (result) {
249 		/* Error, rollback file changes */
250 		truncate_backing_file(bfc, file_end);
251 	} else if (map_base_off) {
252 		*map_base_off = file_end;
253 	}
254 
255 	return result;
256 }
257 
258 /*
259  * Write file attribute data and metadata record to the backing file.
260  */
incfs_write_file_attr_to_backing_file(struct backing_file_context * bfc,struct mem_range value,struct incfs_file_attr * attr)261 int incfs_write_file_attr_to_backing_file(struct backing_file_context *bfc,
262 		struct mem_range value, struct incfs_file_attr *attr)
263 {
264 	struct incfs_file_attr file_attr = {};
265 	int result = 0;
266 	u32 crc = 0;
267 	loff_t value_offset = 0;
268 
269 	if (!bfc)
270 		return -EFAULT;
271 
272 	if (value.len > INCFS_MAX_FILE_ATTR_SIZE)
273 		return -ENOSPC;
274 
275 	LOCK_REQUIRED(bfc->bc_mutex);
276 
277 	crc = crc32(0, value.data, value.len);
278 	value_offset = incfs_get_end_offset(bfc->bc_file);
279 	file_attr.fa_header.h_md_entry_type = INCFS_MD_FILE_ATTR;
280 	file_attr.fa_header.h_record_size = cpu_to_le16(sizeof(file_attr));
281 	file_attr.fa_header.h_next_md_offset = cpu_to_le64(0);
282 	file_attr.fa_size = cpu_to_le16((u16)value.len);
283 	file_attr.fa_offset = cpu_to_le64(value_offset);
284 	file_attr.fa_crc = cpu_to_le32(crc);
285 
286 	result = write_to_bf(bfc, value.data, value.len, value_offset, true);
287 	if (result)
288 		return result;
289 
290 	result = append_md_to_backing_file(bfc, &file_attr.fa_header);
291 	if (result) {
292 		/* Error, rollback file changes */
293 		truncate_backing_file(bfc, value_offset);
294 	} else if (attr) {
295 		*attr = file_attr;
296 	}
297 
298 	return result;
299 }
300 
incfs_write_signature_to_backing_file(struct backing_file_context * bfc,u8 hash_alg,u32 tree_size,struct mem_range root_hash,struct mem_range add_data,struct mem_range sig)301 int incfs_write_signature_to_backing_file(struct backing_file_context *bfc,
302 		u8 hash_alg, u32 tree_size,
303 		struct mem_range root_hash, struct mem_range add_data,
304 		struct mem_range sig)
305 {
306 	struct incfs_file_signature sg = {};
307 	int result = 0;
308 	loff_t rollback_pos = 0;
309 	loff_t tree_area_pos = 0;
310 	size_t alignment = 0;
311 
312 	if (!bfc)
313 		return -EFAULT;
314 	if (root_hash.len > sizeof(sg.sg_root_hash))
315 		return -E2BIG;
316 
317 	LOCK_REQUIRED(bfc->bc_mutex);
318 
319 	rollback_pos = incfs_get_end_offset(bfc->bc_file);
320 
321 	sg.sg_header.h_md_entry_type = INCFS_MD_SIGNATURE;
322 	sg.sg_header.h_record_size = cpu_to_le16(sizeof(sg));
323 	sg.sg_header.h_next_md_offset = cpu_to_le64(0);
324 	sg.sg_hash_alg = hash_alg;
325 	if (sig.data != NULL && sig.len > 0) {
326 		loff_t pos = incfs_get_end_offset(bfc->bc_file);
327 
328 		sg.sg_sig_size = cpu_to_le32(sig.len);
329 		sg.sg_sig_offset = cpu_to_le64(pos);
330 
331 		result = write_to_bf(bfc, sig.data, sig.len, pos, false);
332 		if (result)
333 			goto err;
334 	}
335 
336 	if (add_data.len > 0) {
337 		loff_t pos = incfs_get_end_offset(bfc->bc_file);
338 
339 		sg.sg_add_data_size = cpu_to_le32(add_data.len);
340 		sg.sg_add_data_offset = cpu_to_le64(pos);
341 
342 		result = write_to_bf(bfc, add_data.data,
343 			add_data.len, pos, false);
344 		if (result)
345 			goto err;
346 	}
347 
348 	tree_area_pos = incfs_get_end_offset(bfc->bc_file);
349 	if (hash_alg && tree_size > 0) {
350 		if (tree_size > 5 * INCFS_DATA_FILE_BLOCK_SIZE) {
351 			/*
352 			 * If hash tree is big enough, it makes sense to
353 			 * align in the backing file for faster access.
354 			 */
355 			loff_t offset = round_up(tree_area_pos, PAGE_SIZE);
356 
357 			alignment = offset - tree_area_pos;
358 			tree_area_pos = offset;
359 		}
360 
361 		/*
362 		 * If root hash is not the only hash in the tree.
363 		 * reserve 0-filled space for the tree.
364 		 */
365 		result = append_zeros(bfc, tree_size + alignment);
366 		if (result)
367 			goto err;
368 
369 		sg.sg_hash_tree_size = cpu_to_le32(tree_size);
370 		sg.sg_hash_tree_offset = cpu_to_le64(tree_area_pos);
371 	}
372 	memcpy(sg.sg_root_hash, root_hash.data, root_hash.len);
373 
374 	/* Write a hash tree metadata record pointing to the hash tree above. */
375 	result = append_md_to_backing_file(bfc, &sg.sg_header);
376 err:
377 	if (result) {
378 		/* Error, rollback file changes */
379 		truncate_backing_file(bfc, rollback_pos);
380 	}
381 	return result;
382 }
383 
384 /*
385  * Write a backing file header
386  * It should always be called only on empty file.
387  * incfs_super_block.s_first_md_offset is 0 for now, but will be updated
388  * once first metadata record is added.
389  */
incfs_write_fh_to_backing_file(struct backing_file_context * bfc,incfs_uuid_t * uuid,u64 file_size)390 int incfs_write_fh_to_backing_file(struct backing_file_context *bfc,
391 				   incfs_uuid_t *uuid, u64 file_size)
392 {
393 	struct incfs_file_header fh = {};
394 	loff_t file_pos = 0;
395 
396 	if (!bfc)
397 		return -EFAULT;
398 
399 	fh.fh_magic = cpu_to_le64(INCFS_MAGIC_NUMBER);
400 	fh.fh_version = cpu_to_le64(INCFS_FORMAT_CURRENT_VER);
401 	fh.fh_header_size = cpu_to_le16(sizeof(fh));
402 	fh.fh_first_md_offset = cpu_to_le64(0);
403 	fh.fh_data_block_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
404 
405 	fh.fh_file_size = cpu_to_le64(file_size);
406 	fh.fh_uuid = *uuid;
407 
408 	LOCK_REQUIRED(bfc->bc_mutex);
409 
410 	file_pos = incfs_get_end_offset(bfc->bc_file);
411 	if (file_pos != 0)
412 		return -EEXIST;
413 
414 	return write_to_bf(bfc, &fh, sizeof(fh), file_pos, true);
415 }
416 
417 /* Write a given data block and update file's blockmap to point it. */
incfs_write_data_block_to_backing_file(struct backing_file_context * bfc,struct mem_range block,int block_index,loff_t bm_base_off,u16 flags)418 int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc,
419 				     struct mem_range block, int block_index,
420 				     loff_t bm_base_off, u16 flags)
421 {
422 	struct incfs_blockmap_entry bm_entry = {};
423 	int result = 0;
424 	loff_t data_offset = 0;
425 	loff_t bm_entry_off =
426 		bm_base_off + sizeof(struct incfs_blockmap_entry) * block_index;
427 
428 	if (!bfc)
429 		return -EFAULT;
430 
431 	if (block.len >= (1 << 16) || block_index < 0)
432 		return -EINVAL;
433 
434 	LOCK_REQUIRED(bfc->bc_mutex);
435 
436 	data_offset = incfs_get_end_offset(bfc->bc_file);
437 	if (data_offset <= bm_entry_off) {
438 		/* Blockmap entry is beyond the file's end. It is not normal. */
439 		return -EINVAL;
440 	}
441 
442 	/* Write the block data at the end of the backing file. */
443 	result = write_to_bf(bfc, block.data, block.len, data_offset, false);
444 	if (result)
445 		return result;
446 
447 	/* Update the blockmap to point to the newly written data. */
448 	bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
449 	bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
450 	bm_entry.me_data_size = cpu_to_le16((u16)block.len);
451 	bm_entry.me_flags = cpu_to_le16(flags);
452 
453 	result = write_to_bf(bfc, &bm_entry, sizeof(bm_entry),
454 				bm_entry_off, false);
455 	return result;
456 }
457 
incfs_write_hash_block_to_backing_file(struct backing_file_context * bfc,struct mem_range block,int block_index,loff_t hash_area_off)458 int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc,
459 					struct mem_range block,
460 					int block_index, loff_t hash_area_off)
461 {
462 	loff_t data_offset = 0;
463 	loff_t file_end = 0;
464 
465 
466 	if (!bfc)
467 		return -EFAULT;
468 
469 	LOCK_REQUIRED(bfc->bc_mutex);
470 
471 	data_offset = hash_area_off + block_index * INCFS_DATA_FILE_BLOCK_SIZE;
472 	file_end = incfs_get_end_offset(bfc->bc_file);
473 	if (data_offset + block.len > file_end) {
474 		/* Block is located beyond the file's end. It is not normal. */
475 		return -EINVAL;
476 	}
477 
478 	return write_to_bf(bfc, block.data, block.len, data_offset, false);
479 }
480 
481 /* Initialize a new image in a given backing file. */
incfs_make_empty_backing_file(struct backing_file_context * bfc,incfs_uuid_t * uuid,u64 file_size)482 int incfs_make_empty_backing_file(struct backing_file_context *bfc,
483 				  incfs_uuid_t *uuid, u64 file_size)
484 {
485 	int result = 0;
486 
487 	if (!bfc || !bfc->bc_file)
488 		return -EFAULT;
489 
490 	result = mutex_lock_interruptible(&bfc->bc_mutex);
491 	if (result)
492 		goto out;
493 
494 	result = truncate_backing_file(bfc, 0);
495 	if (result)
496 		goto out;
497 
498 	result = incfs_write_fh_to_backing_file(bfc, uuid, file_size);
499 out:
500 	mutex_unlock(&bfc->bc_mutex);
501 	return result;
502 }
503 
incfs_read_blockmap_entry(struct backing_file_context * bfc,int block_index,loff_t bm_base_off,struct incfs_blockmap_entry * bm_entry)504 int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index,
505 			loff_t bm_base_off,
506 			struct incfs_blockmap_entry *bm_entry)
507 {
508 	return incfs_read_blockmap_entries(bfc, bm_entry, block_index, 1,
509 		bm_base_off);
510 }
511 
incfs_read_blockmap_entries(struct backing_file_context * bfc,struct incfs_blockmap_entry * entries,int start_index,int blocks_number,loff_t bm_base_off)512 int incfs_read_blockmap_entries(struct backing_file_context *bfc,
513 		struct incfs_blockmap_entry *entries,
514 		int start_index, int blocks_number,
515 		loff_t bm_base_off)
516 {
517 	loff_t bm_entry_off =
518 		bm_base_off + sizeof(struct incfs_blockmap_entry) * start_index;
519 	const size_t bytes_to_read = sizeof(struct incfs_blockmap_entry)
520 					* blocks_number;
521 	int result = 0;
522 
523 	if (!bfc || !entries)
524 		return -EFAULT;
525 
526 	if (start_index < 0 || bm_base_off <= 0)
527 		return -ENODATA;
528 
529 	result = incfs_kread(bfc->bc_file, entries, bytes_to_read,
530 			     bm_entry_off);
531 	if (result < 0)
532 		return result;
533 	if (result < bytes_to_read)
534 		return -EIO;
535 	return 0;
536 }
537 
538 
incfs_read_file_header(struct backing_file_context * bfc,loff_t * first_md_off,incfs_uuid_t * uuid,u64 * file_size)539 int incfs_read_file_header(struct backing_file_context *bfc,
540 			   loff_t *first_md_off, incfs_uuid_t *uuid,
541 			   u64 *file_size)
542 {
543 	ssize_t bytes_read = 0;
544 	struct incfs_file_header fh = {};
545 
546 	if (!bfc || !first_md_off)
547 		return -EFAULT;
548 
549 	LOCK_REQUIRED(bfc->bc_mutex);
550 	bytes_read = incfs_kread(bfc->bc_file, &fh, sizeof(fh), 0);
551 	if (bytes_read < 0)
552 		return bytes_read;
553 
554 	if (bytes_read < sizeof(fh))
555 		return -EBADMSG;
556 
557 	if (le64_to_cpu(fh.fh_magic) != INCFS_MAGIC_NUMBER)
558 		return -EILSEQ;
559 
560 	if (le64_to_cpu(fh.fh_version) > INCFS_FORMAT_CURRENT_VER)
561 		return -EILSEQ;
562 
563 	if (le16_to_cpu(fh.fh_data_block_size) != INCFS_DATA_FILE_BLOCK_SIZE)
564 		return -EILSEQ;
565 
566 	if (le16_to_cpu(fh.fh_header_size) != sizeof(fh))
567 		return -EILSEQ;
568 
569 	if (first_md_off)
570 		*first_md_off = le64_to_cpu(fh.fh_first_md_offset);
571 	if (uuid)
572 		*uuid = fh.fh_uuid;
573 	if (file_size)
574 		*file_size = le64_to_cpu(fh.fh_file_size);
575 	return 0;
576 }
577 
578 /*
579  * Read through metadata records from the backing file one by one
580  * and call provided metadata handlers.
581  */
incfs_read_next_metadata_record(struct backing_file_context * bfc,struct metadata_handler * handler)582 int incfs_read_next_metadata_record(struct backing_file_context *bfc,
583 			      struct metadata_handler *handler)
584 {
585 	const ssize_t max_md_size = INCFS_MAX_METADATA_RECORD_SIZE;
586 	ssize_t bytes_read = 0;
587 	size_t md_record_size = 0;
588 	loff_t next_record = 0;
589 	loff_t prev_record = 0;
590 	int res = 0;
591 	struct incfs_md_header *md_hdr = NULL;
592 
593 	if (!bfc || !handler)
594 		return -EFAULT;
595 
596 	LOCK_REQUIRED(bfc->bc_mutex);
597 
598 	if (handler->md_record_offset == 0)
599 		return -EPERM;
600 
601 	memset(&handler->md_buffer, 0, max_md_size);
602 	bytes_read = incfs_kread(bfc->bc_file, &handler->md_buffer,
603 				 max_md_size, handler->md_record_offset);
604 	if (bytes_read < 0)
605 		return bytes_read;
606 	if (bytes_read < sizeof(*md_hdr))
607 		return -EBADMSG;
608 
609 	md_hdr = &handler->md_buffer.md_header;
610 	next_record = le64_to_cpu(md_hdr->h_next_md_offset);
611 	prev_record = le64_to_cpu(md_hdr->h_prev_md_offset);
612 	md_record_size = le16_to_cpu(md_hdr->h_record_size);
613 
614 	if (md_record_size > max_md_size) {
615 		pr_warn("incfs: The record is too large. Size: %ld",
616 				md_record_size);
617 		return -EBADMSG;
618 	}
619 
620 	if (bytes_read < md_record_size) {
621 		pr_warn("incfs: The record hasn't been fully read.");
622 		return -EBADMSG;
623 	}
624 
625 	if (next_record <= handler->md_record_offset && next_record != 0) {
626 		pr_warn("incfs: Next record (%lld) points back in file.",
627 			next_record);
628 		return -EBADMSG;
629 	}
630 
631 	if (prev_record != handler->md_prev_record_offset) {
632 		pr_warn("incfs: Metadata chain has been corrupted.");
633 		return -EBADMSG;
634 	}
635 
636 	if (le32_to_cpu(md_hdr->h_record_crc) != calc_md_crc(md_hdr)) {
637 		pr_warn("incfs: Metadata CRC mismatch.");
638 		return -EBADMSG;
639 	}
640 
641 	switch (md_hdr->h_md_entry_type) {
642 	case INCFS_MD_NONE:
643 		break;
644 	case INCFS_MD_BLOCK_MAP:
645 		if (handler->handle_blockmap)
646 			res = handler->handle_blockmap(
647 				&handler->md_buffer.blockmap, handler);
648 		break;
649 	case INCFS_MD_FILE_ATTR:
650 		if (handler->handle_file_attr)
651 			res = handler->handle_file_attr(
652 				&handler->md_buffer.file_attr, handler);
653 		break;
654 	case INCFS_MD_SIGNATURE:
655 		if (handler->handle_signature)
656 			res = handler->handle_signature(
657 				&handler->md_buffer.signature, handler);
658 		break;
659 	default:
660 		res = -ENOTSUPP;
661 		break;
662 	}
663 
664 	if (!res) {
665 		if (next_record == 0) {
666 			/*
667 			 * Zero offset for the next record means that the last
668 			 * metadata record has just been processed.
669 			 */
670 			bfc->bc_last_md_record_offset =
671 				handler->md_record_offset;
672 		}
673 		handler->md_prev_record_offset = handler->md_record_offset;
674 		handler->md_record_offset = next_record;
675 	}
676 	return res;
677 }
678 
incfs_kread(struct file * f,void * buf,size_t size,loff_t pos)679 ssize_t incfs_kread(struct file *f, void *buf, size_t size, loff_t pos)
680 {
681 	return kernel_read(f, buf, size, &pos);
682 }
683 
incfs_kwrite(struct file * f,const void * buf,size_t size,loff_t pos)684 ssize_t incfs_kwrite(struct file *f, const void *buf, size_t size, loff_t pos)
685 {
686 	return kernel_write(f, buf, size, &pos);
687 }
688