• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Google LLC
4  */
5 #include <linux/fs.h>
6 #include <linux/file.h>
7 #include <linux/types.h>
8 #include <linux/mutex.h>
9 #include <linux/mm.h>
10 #include <linux/falloc.h>
11 #include <linux/slab.h>
12 #include <linux/crc32.h>
13 #include <linux/kernel.h>
14 
15 #include "format.h"
16 #include "data_mgmt.h"
17 
incfs_alloc_bfc(struct mount_info * mi,struct file * backing_file)18 struct backing_file_context *incfs_alloc_bfc(struct mount_info *mi,
19 					     struct file *backing_file)
20 {
21 	struct backing_file_context *result = NULL;
22 
23 	result = kzalloc(sizeof(*result), GFP_NOFS);
24 	if (!result)
25 		return ERR_PTR(-ENOMEM);
26 
27 	result->bc_file = get_file(backing_file);
28 	result->bc_cred = mi->mi_owner;
29 	mutex_init(&result->bc_mutex);
30 	return result;
31 }
32 
incfs_free_bfc(struct backing_file_context * bfc)33 void incfs_free_bfc(struct backing_file_context *bfc)
34 {
35 	if (!bfc)
36 		return;
37 
38 	if (bfc->bc_file)
39 		fput(bfc->bc_file);
40 
41 	mutex_destroy(&bfc->bc_mutex);
42 	kfree(bfc);
43 }
44 
incfs_get_end_offset(struct file * f)45 loff_t incfs_get_end_offset(struct file *f)
46 {
47 	/*
48 	 * This function assumes that file size and the end-offset
49 	 * are the same. This is not always true.
50 	 */
51 	return i_size_read(file_inode(f));
52 }
53 
54 /*
55  * Truncate the tail of the file to the given length.
56  * Used to rollback partially successful multistep writes.
57  */
truncate_backing_file(struct backing_file_context * bfc,loff_t new_end)58 static int truncate_backing_file(struct backing_file_context *bfc,
59 				loff_t new_end)
60 {
61 	struct inode *inode = NULL;
62 	struct dentry *dentry = NULL;
63 	loff_t old_end = 0;
64 	struct iattr attr;
65 	int result = 0;
66 
67 	if (!bfc)
68 		return -EFAULT;
69 
70 	LOCK_REQUIRED(bfc->bc_mutex);
71 
72 	if (!bfc->bc_file)
73 		return -EFAULT;
74 
75 	old_end = incfs_get_end_offset(bfc->bc_file);
76 	if (old_end == new_end)
77 		return 0;
78 	if (old_end < new_end)
79 		return -EINVAL;
80 
81 	inode = bfc->bc_file->f_inode;
82 	dentry = bfc->bc_file->f_path.dentry;
83 
84 	attr.ia_size = new_end;
85 	attr.ia_valid = ATTR_SIZE;
86 
87 	inode_lock(inode);
88 	result = notify_change(dentry, &attr, NULL);
89 	inode_unlock(inode);
90 
91 	return result;
92 }
93 
94 /* Append a given number of zero bytes to the end of the backing file. */
append_zeros(struct backing_file_context * bfc,size_t len)95 static int append_zeros(struct backing_file_context *bfc, size_t len)
96 {
97 	loff_t file_size = 0;
98 	loff_t new_last_byte_offset = 0;
99 
100 	if (!bfc)
101 		return -EFAULT;
102 
103 	if (len == 0)
104 		return 0;
105 
106 	LOCK_REQUIRED(bfc->bc_mutex);
107 
108 	/*
109 	 * Allocate only one byte at the new desired end of the file.
110 	 * It will increase file size and create a zeroed area of
111 	 * a given size.
112 	 */
113 	file_size = incfs_get_end_offset(bfc->bc_file);
114 	new_last_byte_offset = file_size + len - 1;
115 	return vfs_fallocate(bfc->bc_file, 0, new_last_byte_offset, 1);
116 }
117 
write_to_bf(struct backing_file_context * bfc,const void * buf,size_t count,loff_t pos)118 static int write_to_bf(struct backing_file_context *bfc, const void *buf,
119 			size_t count, loff_t pos)
120 {
121 	ssize_t res = incfs_kwrite(bfc, buf, count, pos);
122 
123 	if (res < 0)
124 		return res;
125 	if (res != count)
126 		return -EIO;
127 	return 0;
128 }
129 
calc_md_crc(struct incfs_md_header * record)130 static u32 calc_md_crc(struct incfs_md_header *record)
131 {
132 	u32 result = 0;
133 	__le32 saved_crc = record->h_record_crc;
134 	__le64 saved_md_offset = record->h_next_md_offset;
135 	size_t record_size = min_t(size_t, le16_to_cpu(record->h_record_size),
136 				INCFS_MAX_METADATA_RECORD_SIZE);
137 
138 	/* Zero fields which needs to be excluded from CRC calculation. */
139 	record->h_record_crc = 0;
140 	record->h_next_md_offset = 0;
141 	result = crc32(0, record, record_size);
142 
143 	/* Restore excluded fields. */
144 	record->h_record_crc = saved_crc;
145 	record->h_next_md_offset = saved_md_offset;
146 
147 	return result;
148 }
149 
150 /*
151  * Append a given metadata record to the backing file and update a previous
152  * record to add the new record the the metadata list.
153  */
append_md_to_backing_file(struct backing_file_context * bfc,struct incfs_md_header * record)154 static int append_md_to_backing_file(struct backing_file_context *bfc,
155 			      struct incfs_md_header *record)
156 {
157 	int result = 0;
158 	loff_t record_offset;
159 	loff_t file_pos;
160 	__le64 new_md_offset;
161 	size_t record_size;
162 
163 	if (!bfc || !record)
164 		return -EFAULT;
165 
166 	if (bfc->bc_last_md_record_offset < 0)
167 		return -EINVAL;
168 
169 	LOCK_REQUIRED(bfc->bc_mutex);
170 
171 	record_size = le16_to_cpu(record->h_record_size);
172 	file_pos = incfs_get_end_offset(bfc->bc_file);
173 	record->h_prev_md_offset = cpu_to_le64(bfc->bc_last_md_record_offset);
174 	record->h_next_md_offset = 0;
175 	record->h_record_crc = cpu_to_le32(calc_md_crc(record));
176 
177 	/* Write the metadata record to the end of the backing file */
178 	record_offset = file_pos;
179 	new_md_offset = cpu_to_le64(record_offset);
180 	result = write_to_bf(bfc, record, record_size, file_pos);
181 	if (result)
182 		return result;
183 
184 	/* Update next metadata offset in a previous record or a superblock. */
185 	if (bfc->bc_last_md_record_offset) {
186 		/*
187 		 * Find a place in the previous md record where new record's
188 		 * offset needs to be saved.
189 		 */
190 		file_pos = bfc->bc_last_md_record_offset +
191 			offsetof(struct incfs_md_header, h_next_md_offset);
192 	} else {
193 		/*
194 		 * No metadata yet, file a place to update in the
195 		 * file_header.
196 		 */
197 		file_pos = offsetof(struct incfs_file_header,
198 				    fh_first_md_offset);
199 	}
200 	result = write_to_bf(bfc, &new_md_offset, sizeof(new_md_offset),
201 			     file_pos);
202 	if (result)
203 		return result;
204 
205 	bfc->bc_last_md_record_offset = record_offset;
206 	return result;
207 }
208 
incfs_write_file_header_flags(struct backing_file_context * bfc,u32 flags)209 int incfs_write_file_header_flags(struct backing_file_context *bfc, u32 flags)
210 {
211 	if (!bfc)
212 		return -EFAULT;
213 
214 	return write_to_bf(bfc, &flags, sizeof(flags),
215 			   offsetof(struct incfs_file_header,
216 				    fh_file_header_flags));
217 }
218 
219 /*
220  * Reserve 0-filled space for the blockmap body, and append
221  * incfs_blockmap metadata record pointing to it.
222  */
incfs_write_blockmap_to_backing_file(struct backing_file_context * bfc,u32 block_count)223 int incfs_write_blockmap_to_backing_file(struct backing_file_context *bfc,
224 					 u32 block_count)
225 {
226 	struct incfs_blockmap blockmap = {};
227 	int result = 0;
228 	loff_t file_end = 0;
229 	size_t map_size = block_count * sizeof(struct incfs_blockmap_entry);
230 
231 	if (!bfc)
232 		return -EFAULT;
233 
234 	blockmap.m_header.h_md_entry_type = INCFS_MD_BLOCK_MAP;
235 	blockmap.m_header.h_record_size = cpu_to_le16(sizeof(blockmap));
236 	blockmap.m_header.h_next_md_offset = cpu_to_le64(0);
237 	blockmap.m_block_count = cpu_to_le32(block_count);
238 
239 	LOCK_REQUIRED(bfc->bc_mutex);
240 
241 	/* Reserve 0-filled space for the blockmap body in the backing file. */
242 	file_end = incfs_get_end_offset(bfc->bc_file);
243 	result = append_zeros(bfc, map_size);
244 	if (result)
245 		return result;
246 
247 	/* Write blockmap metadata record pointing to the body written above. */
248 	blockmap.m_base_offset = cpu_to_le64(file_end);
249 	result = append_md_to_backing_file(bfc, &blockmap.m_header);
250 	if (result)
251 		/* Error, rollback file changes */
252 		truncate_backing_file(bfc, file_end);
253 
254 	return result;
255 }
256 
257 /*
258  * Write file attribute data and metadata record to the backing file.
259  */
incfs_write_file_attr_to_backing_file(struct backing_file_context * bfc,struct mem_range value,struct incfs_file_attr * attr)260 int incfs_write_file_attr_to_backing_file(struct backing_file_context *bfc,
261 		struct mem_range value, struct incfs_file_attr *attr)
262 {
263 	struct incfs_file_attr file_attr = {};
264 	int result = 0;
265 	u32 crc = 0;
266 	loff_t value_offset = 0;
267 
268 	if (!bfc)
269 		return -EFAULT;
270 
271 	if (value.len > INCFS_MAX_FILE_ATTR_SIZE)
272 		return -ENOSPC;
273 
274 	LOCK_REQUIRED(bfc->bc_mutex);
275 
276 	crc = crc32(0, value.data, value.len);
277 	value_offset = incfs_get_end_offset(bfc->bc_file);
278 	file_attr.fa_header.h_md_entry_type = INCFS_MD_FILE_ATTR;
279 	file_attr.fa_header.h_record_size = cpu_to_le16(sizeof(file_attr));
280 	file_attr.fa_header.h_next_md_offset = cpu_to_le64(0);
281 	file_attr.fa_size = cpu_to_le16((u16)value.len);
282 	file_attr.fa_offset = cpu_to_le64(value_offset);
283 	file_attr.fa_crc = cpu_to_le32(crc);
284 
285 	result = write_to_bf(bfc, value.data, value.len, value_offset);
286 	if (result)
287 		return result;
288 
289 	result = append_md_to_backing_file(bfc, &file_attr.fa_header);
290 	if (result) {
291 		/* Error, rollback file changes */
292 		truncate_backing_file(bfc, value_offset);
293 	} else if (attr) {
294 		*attr = file_attr;
295 	}
296 
297 	return result;
298 }
299 
incfs_write_signature_to_backing_file(struct backing_file_context * bfc,struct mem_range sig,u32 tree_size)300 int incfs_write_signature_to_backing_file(struct backing_file_context *bfc,
301 					  struct mem_range sig, u32 tree_size)
302 {
303 	struct incfs_file_signature sg = {};
304 	int result = 0;
305 	loff_t rollback_pos = 0;
306 	loff_t tree_area_pos = 0;
307 	size_t alignment = 0;
308 
309 	if (!bfc)
310 		return -EFAULT;
311 
312 	LOCK_REQUIRED(bfc->bc_mutex);
313 
314 	rollback_pos = incfs_get_end_offset(bfc->bc_file);
315 
316 	sg.sg_header.h_md_entry_type = INCFS_MD_SIGNATURE;
317 	sg.sg_header.h_record_size = cpu_to_le16(sizeof(sg));
318 	sg.sg_header.h_next_md_offset = cpu_to_le64(0);
319 	if (sig.data != NULL && sig.len > 0) {
320 		loff_t pos = incfs_get_end_offset(bfc->bc_file);
321 
322 		sg.sg_sig_size = cpu_to_le32(sig.len);
323 		sg.sg_sig_offset = cpu_to_le64(pos);
324 
325 		result = write_to_bf(bfc, sig.data, sig.len, pos);
326 		if (result)
327 			goto err;
328 	}
329 
330 	tree_area_pos = incfs_get_end_offset(bfc->bc_file);
331 	if (tree_size > 0) {
332 		if (tree_size > 5 * INCFS_DATA_FILE_BLOCK_SIZE) {
333 			/*
334 			 * If hash tree is big enough, it makes sense to
335 			 * align in the backing file for faster access.
336 			 */
337 			loff_t offset = round_up(tree_area_pos, PAGE_SIZE);
338 
339 			alignment = offset - tree_area_pos;
340 			tree_area_pos = offset;
341 		}
342 
343 		/*
344 		 * If root hash is not the only hash in the tree.
345 		 * reserve 0-filled space for the tree.
346 		 */
347 		result = append_zeros(bfc, tree_size + alignment);
348 		if (result)
349 			goto err;
350 
351 		sg.sg_hash_tree_size = cpu_to_le32(tree_size);
352 		sg.sg_hash_tree_offset = cpu_to_le64(tree_area_pos);
353 	}
354 
355 	/* Write a hash tree metadata record pointing to the hash tree above. */
356 	result = append_md_to_backing_file(bfc, &sg.sg_header);
357 err:
358 	if (result)
359 		/* Error, rollback file changes */
360 		truncate_backing_file(bfc, rollback_pos);
361 	return result;
362 }
363 
364 /*
365  * Write a backing file header
366  * It should always be called only on empty file.
367  * incfs_super_block.s_first_md_offset is 0 for now, but will be updated
368  * once first metadata record is added.
369  */
incfs_write_fh_to_backing_file(struct backing_file_context * bfc,incfs_uuid_t * uuid,u64 file_size)370 int incfs_write_fh_to_backing_file(struct backing_file_context *bfc,
371 				   incfs_uuid_t *uuid, u64 file_size)
372 {
373 	struct incfs_file_header fh = {};
374 	loff_t file_pos = 0;
375 
376 	if (!bfc)
377 		return -EFAULT;
378 
379 	fh.fh_magic = cpu_to_le64(INCFS_MAGIC_NUMBER);
380 	fh.fh_version = cpu_to_le64(INCFS_FORMAT_CURRENT_VER);
381 	fh.fh_header_size = cpu_to_le16(sizeof(fh));
382 	fh.fh_first_md_offset = cpu_to_le64(0);
383 	fh.fh_data_block_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
384 
385 	fh.fh_file_size = cpu_to_le64(file_size);
386 	fh.fh_uuid = *uuid;
387 
388 	LOCK_REQUIRED(bfc->bc_mutex);
389 
390 	file_pos = incfs_get_end_offset(bfc->bc_file);
391 	if (file_pos != 0)
392 		return -EEXIST;
393 
394 	return write_to_bf(bfc, &fh, sizeof(fh), file_pos);
395 }
396 
397 /* Write a given data block and update file's blockmap to point it. */
incfs_write_data_block_to_backing_file(struct backing_file_context * bfc,struct mem_range block,int block_index,loff_t bm_base_off,u16 flags)398 int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc,
399 				     struct mem_range block, int block_index,
400 				     loff_t bm_base_off, u16 flags)
401 {
402 	struct incfs_blockmap_entry bm_entry = {};
403 	int result = 0;
404 	loff_t data_offset = 0;
405 	loff_t bm_entry_off =
406 		bm_base_off + sizeof(struct incfs_blockmap_entry) * block_index;
407 
408 	if (!bfc)
409 		return -EFAULT;
410 
411 	if (block.len >= (1 << 16) || block_index < 0)
412 		return -EINVAL;
413 
414 	LOCK_REQUIRED(bfc->bc_mutex);
415 
416 	data_offset = incfs_get_end_offset(bfc->bc_file);
417 	if (data_offset <= bm_entry_off) {
418 		/* Blockmap entry is beyond the file's end. It is not normal. */
419 		return -EINVAL;
420 	}
421 
422 	/* Write the block data at the end of the backing file. */
423 	result = write_to_bf(bfc, block.data, block.len, data_offset);
424 	if (result)
425 		return result;
426 
427 	/* Update the blockmap to point to the newly written data. */
428 	bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
429 	bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
430 	bm_entry.me_data_size = cpu_to_le16((u16)block.len);
431 	bm_entry.me_flags = cpu_to_le16(flags);
432 
433 	return write_to_bf(bfc, &bm_entry, sizeof(bm_entry),
434 				bm_entry_off);
435 }
436 
incfs_write_hash_block_to_backing_file(struct backing_file_context * bfc,struct mem_range block,int block_index,loff_t hash_area_off,loff_t bm_base_off,loff_t file_size)437 int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc,
438 					   struct mem_range block,
439 					   int block_index,
440 					   loff_t hash_area_off,
441 					   loff_t bm_base_off,
442 					   loff_t file_size)
443 {
444 	struct incfs_blockmap_entry bm_entry = {};
445 	int result;
446 	loff_t data_offset = 0;
447 	loff_t file_end = 0;
448 	loff_t bm_entry_off =
449 		bm_base_off +
450 		sizeof(struct incfs_blockmap_entry) *
451 			(block_index + get_blocks_count_for_size(file_size));
452 
453 	if (!bfc)
454 		return -EFAULT;
455 
456 	LOCK_REQUIRED(bfc->bc_mutex);
457 
458 	data_offset = hash_area_off + block_index * INCFS_DATA_FILE_BLOCK_SIZE;
459 	file_end = incfs_get_end_offset(bfc->bc_file);
460 	if (data_offset + block.len > file_end) {
461 		/* Block is located beyond the file's end. It is not normal. */
462 		return -EINVAL;
463 	}
464 
465 	result = write_to_bf(bfc, block.data, block.len, data_offset);
466 	if (result)
467 		return result;
468 
469 	bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
470 	bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
471 	bm_entry.me_data_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
472 	bm_entry.me_flags = cpu_to_le16(INCFS_BLOCK_HASH);
473 
474 	return write_to_bf(bfc, &bm_entry, sizeof(bm_entry), bm_entry_off);
475 }
476 
477 /* Initialize a new image in a given backing file. */
incfs_make_empty_backing_file(struct backing_file_context * bfc,incfs_uuid_t * uuid,u64 file_size)478 int incfs_make_empty_backing_file(struct backing_file_context *bfc,
479 				  incfs_uuid_t *uuid, u64 file_size)
480 {
481 	int result = 0;
482 
483 	if (!bfc || !bfc->bc_file)
484 		return -EFAULT;
485 
486 	result = mutex_lock_interruptible(&bfc->bc_mutex);
487 	if (result)
488 		goto out;
489 
490 	result = truncate_backing_file(bfc, 0);
491 	if (result)
492 		goto out;
493 
494 	result = incfs_write_fh_to_backing_file(bfc, uuid, file_size);
495 out:
496 	mutex_unlock(&bfc->bc_mutex);
497 	return result;
498 }
499 
incfs_read_blockmap_entry(struct backing_file_context * bfc,int block_index,loff_t bm_base_off,struct incfs_blockmap_entry * bm_entry)500 int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index,
501 			loff_t bm_base_off,
502 			struct incfs_blockmap_entry *bm_entry)
503 {
504 	int error = incfs_read_blockmap_entries(bfc, bm_entry, block_index, 1,
505 						bm_base_off);
506 
507 	if (error < 0)
508 		return error;
509 
510 	if (error == 0)
511 		return -EIO;
512 
513 	if (error != 1)
514 		return -EFAULT;
515 
516 	return 0;
517 }
518 
incfs_read_blockmap_entries(struct backing_file_context * bfc,struct incfs_blockmap_entry * entries,int start_index,int blocks_number,loff_t bm_base_off)519 int incfs_read_blockmap_entries(struct backing_file_context *bfc,
520 		struct incfs_blockmap_entry *entries,
521 		int start_index, int blocks_number,
522 		loff_t bm_base_off)
523 {
524 	loff_t bm_entry_off =
525 		bm_base_off + sizeof(struct incfs_blockmap_entry) * start_index;
526 	const size_t bytes_to_read = sizeof(struct incfs_blockmap_entry)
527 					* blocks_number;
528 	int result = 0;
529 
530 	if (!bfc || !entries)
531 		return -EFAULT;
532 
533 	if (start_index < 0 || bm_base_off <= 0)
534 		return -ENODATA;
535 
536 	result = incfs_kread(bfc, entries, bytes_to_read, bm_entry_off);
537 	if (result < 0)
538 		return result;
539 	return result / sizeof(*entries);
540 }
541 
incfs_read_file_header(struct backing_file_context * bfc,loff_t * first_md_off,incfs_uuid_t * uuid,u64 * file_size,u32 * flags)542 int incfs_read_file_header(struct backing_file_context *bfc,
543 			   loff_t *first_md_off, incfs_uuid_t *uuid,
544 			   u64 *file_size, u32 *flags)
545 {
546 	ssize_t bytes_read = 0;
547 	struct incfs_file_header fh = {};
548 
549 	if (!bfc || !first_md_off)
550 		return -EFAULT;
551 
552 	bytes_read = incfs_kread(bfc, &fh, sizeof(fh), 0);
553 	if (bytes_read < 0)
554 		return bytes_read;
555 
556 	if (bytes_read < sizeof(fh))
557 		return -EBADMSG;
558 
559 	if (le64_to_cpu(fh.fh_magic) != INCFS_MAGIC_NUMBER)
560 		return -EILSEQ;
561 
562 	if (le64_to_cpu(fh.fh_version) > INCFS_FORMAT_CURRENT_VER)
563 		return -EILSEQ;
564 
565 	if (le16_to_cpu(fh.fh_data_block_size) != INCFS_DATA_FILE_BLOCK_SIZE)
566 		return -EILSEQ;
567 
568 	if (le16_to_cpu(fh.fh_header_size) != sizeof(fh))
569 		return -EILSEQ;
570 
571 	if (first_md_off)
572 		*first_md_off = le64_to_cpu(fh.fh_first_md_offset);
573 	if (uuid)
574 		*uuid = fh.fh_uuid;
575 	if (file_size)
576 		*file_size = le64_to_cpu(fh.fh_file_size);
577 	if (flags)
578 		*flags = le32_to_cpu(fh.fh_file_header_flags);
579 	return 0;
580 }
581 
582 /*
583  * Read through metadata records from the backing file one by one
584  * and call provided metadata handlers.
585  */
incfs_read_next_metadata_record(struct backing_file_context * bfc,struct metadata_handler * handler)586 int incfs_read_next_metadata_record(struct backing_file_context *bfc,
587 			      struct metadata_handler *handler)
588 {
589 	const ssize_t max_md_size = INCFS_MAX_METADATA_RECORD_SIZE;
590 	ssize_t bytes_read = 0;
591 	size_t md_record_size = 0;
592 	loff_t next_record = 0;
593 	loff_t prev_record = 0;
594 	int res = 0;
595 	struct incfs_md_header *md_hdr = NULL;
596 
597 	if (!bfc || !handler)
598 		return -EFAULT;
599 
600 	LOCK_REQUIRED(bfc->bc_mutex);
601 
602 	if (handler->md_record_offset == 0)
603 		return -EPERM;
604 
605 	memset(&handler->md_buffer, 0, max_md_size);
606 	bytes_read = incfs_kread(bfc, &handler->md_buffer, max_md_size,
607 				 handler->md_record_offset);
608 	if (bytes_read < 0)
609 		return bytes_read;
610 	if (bytes_read < sizeof(*md_hdr))
611 		return -EBADMSG;
612 
613 	md_hdr = &handler->md_buffer.md_header;
614 	next_record = le64_to_cpu(md_hdr->h_next_md_offset);
615 	prev_record = le64_to_cpu(md_hdr->h_prev_md_offset);
616 	md_record_size = le16_to_cpu(md_hdr->h_record_size);
617 
618 	if (md_record_size > max_md_size) {
619 		pr_warn("incfs: The record is too large. Size: %ld",
620 				md_record_size);
621 		return -EBADMSG;
622 	}
623 
624 	if (bytes_read < md_record_size) {
625 		pr_warn("incfs: The record hasn't been fully read.");
626 		return -EBADMSG;
627 	}
628 
629 	if (next_record <= handler->md_record_offset && next_record != 0) {
630 		pr_warn("incfs: Next record (%lld) points back in file.",
631 			next_record);
632 		return -EBADMSG;
633 	}
634 
635 	if (prev_record != handler->md_prev_record_offset) {
636 		pr_warn("incfs: Metadata chain has been corrupted.");
637 		return -EBADMSG;
638 	}
639 
640 	if (le32_to_cpu(md_hdr->h_record_crc) != calc_md_crc(md_hdr)) {
641 		pr_warn("incfs: Metadata CRC mismatch.");
642 		return -EBADMSG;
643 	}
644 
645 	switch (md_hdr->h_md_entry_type) {
646 	case INCFS_MD_NONE:
647 		break;
648 	case INCFS_MD_BLOCK_MAP:
649 		if (handler->handle_blockmap)
650 			res = handler->handle_blockmap(
651 				&handler->md_buffer.blockmap, handler);
652 		break;
653 	case INCFS_MD_FILE_ATTR:
654 		if (handler->handle_file_attr)
655 			res = handler->handle_file_attr(
656 				&handler->md_buffer.file_attr, handler);
657 		break;
658 	case INCFS_MD_SIGNATURE:
659 		if (handler->handle_signature)
660 			res = handler->handle_signature(
661 				&handler->md_buffer.signature, handler);
662 		break;
663 	default:
664 		res = -ENOTSUPP;
665 		break;
666 	}
667 
668 	if (!res) {
669 		if (next_record == 0) {
670 			/*
671 			 * Zero offset for the next record means that the last
672 			 * metadata record has just been processed.
673 			 */
674 			bfc->bc_last_md_record_offset =
675 				handler->md_record_offset;
676 		}
677 		handler->md_prev_record_offset = handler->md_record_offset;
678 		handler->md_record_offset = next_record;
679 	}
680 	return res;
681 }
682 
incfs_kread(struct backing_file_context * bfc,void * buf,size_t size,loff_t pos)683 ssize_t incfs_kread(struct backing_file_context *bfc, void *buf, size_t size,
684 		    loff_t pos)
685 {
686 	const struct cred *old_cred = override_creds(bfc->bc_cred);
687 	int ret = kernel_read(bfc->bc_file, buf, size, &pos);
688 
689 	revert_creds(old_cred);
690 	return ret;
691 }
692 
incfs_kwrite(struct backing_file_context * bfc,const void * buf,size_t size,loff_t pos)693 ssize_t incfs_kwrite(struct backing_file_context *bfc, const void *buf,
694 		     size_t size, loff_t pos)
695 {
696 	const struct cred *old_cred = override_creds(bfc->bc_cred);
697 	int ret = kernel_write(bfc->bc_file, buf, size, &pos);
698 
699 	revert_creds(old_cred);
700 	return ret;
701 }
702