• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12 
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16 
17 // clang-format off
18 const struct cpu_str NAME_MFT = {
19 	4, 0, { '$', 'M', 'F', 'T' },
20 };
21 const struct cpu_str NAME_MIRROR = {
22 	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
23 };
24 const struct cpu_str NAME_LOGFILE = {
25 	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
26 };
27 const struct cpu_str NAME_VOLUME = {
28 	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
29 };
30 const struct cpu_str NAME_ATTRDEF = {
31 	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
32 };
33 const struct cpu_str NAME_ROOT = {
34 	1, 0, { '.' },
35 };
36 const struct cpu_str NAME_BITMAP = {
37 	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
38 };
39 const struct cpu_str NAME_BOOT = {
40 	5, 0, { '$', 'B', 'o', 'o', 't' },
41 };
42 const struct cpu_str NAME_BADCLUS = {
43 	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
44 };
45 const struct cpu_str NAME_QUOTA = {
46 	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
47 };
48 const struct cpu_str NAME_SECURE = {
49 	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
50 };
51 const struct cpu_str NAME_UPCASE = {
52 	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
53 };
54 const struct cpu_str NAME_EXTEND = {
55 	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
56 };
57 const struct cpu_str NAME_OBJID = {
58 	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
59 };
60 const struct cpu_str NAME_REPARSE = {
61 	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
62 };
63 const struct cpu_str NAME_USNJRNL = {
64 	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
65 };
66 const __le16 BAD_NAME[4] = {
67 	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
68 };
69 const __le16 I30_NAME[4] = {
70 	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
71 };
72 const __le16 SII_NAME[4] = {
73 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
74 };
75 const __le16 SDH_NAME[4] = {
76 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
77 };
78 const __le16 SDS_NAME[4] = {
79 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
80 };
81 const __le16 SO_NAME[2] = {
82 	cpu_to_le16('$'), cpu_to_le16('O'),
83 };
84 const __le16 SQ_NAME[2] = {
85 	cpu_to_le16('$'), cpu_to_le16('Q'),
86 };
87 const __le16 SR_NAME[2] = {
88 	cpu_to_le16('$'), cpu_to_le16('R'),
89 };
90 
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
97 	cpu_to_le16('a'),
98 };
99 #endif
100 
101 // clang-format on
102 
103 /*
104  * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
105  */
ntfs_fix_pre_write(struct NTFS_RECORD_HEADER * rhdr,size_t bytes)106 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
107 {
108 	u16 *fixup, *ptr;
109 	u16 sample;
110 	u16 fo = le16_to_cpu(rhdr->fix_off);
111 	u16 fn = le16_to_cpu(rhdr->fix_num);
112 
113 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
114 	    fn * SECTOR_SIZE > bytes) {
115 		return false;
116 	}
117 
118 	/* Get fixup pointer. */
119 	fixup = Add2Ptr(rhdr, fo);
120 
121 	if (*fixup >= 0x7FFF)
122 		*fixup = 1;
123 	else
124 		*fixup += 1;
125 
126 	sample = *fixup;
127 
128 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
129 
130 	while (fn--) {
131 		*++fixup = *ptr;
132 		*ptr = sample;
133 		ptr += SECTOR_SIZE / sizeof(short);
134 	}
135 	return true;
136 }
137 
138 /*
139  * ntfs_fix_post_read - Remove fixups after reading from disk.
140  *
141  * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
142  */
ntfs_fix_post_read(struct NTFS_RECORD_HEADER * rhdr,size_t bytes,bool simple)143 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
144 		       bool simple)
145 {
146 	int ret;
147 	u16 *fixup, *ptr;
148 	u16 sample, fo, fn;
149 
150 	fo = le16_to_cpu(rhdr->fix_off);
151 	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
152 		    : le16_to_cpu(rhdr->fix_num);
153 
154 	/* Check errors. */
155 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
156 	    fn * SECTOR_SIZE > bytes) {
157 		return -E_NTFS_CORRUPT;
158 	}
159 
160 	/* Get fixup pointer. */
161 	fixup = Add2Ptr(rhdr, fo);
162 	sample = *fixup;
163 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
164 	ret = 0;
165 
166 	while (fn--) {
167 		/* Test current word. */
168 		if (*ptr != sample) {
169 			/* Fixup does not match! Is it serious error? */
170 			ret = -E_NTFS_FIXUP;
171 		}
172 
173 		/* Replace fixup. */
174 		*ptr = *++fixup;
175 		ptr += SECTOR_SIZE / sizeof(short);
176 	}
177 
178 	return ret;
179 }
180 
181 /*
182  * ntfs_extend_init - Load $Extend file.
183  */
ntfs_extend_init(struct ntfs_sb_info * sbi)184 int ntfs_extend_init(struct ntfs_sb_info *sbi)
185 {
186 	int err;
187 	struct super_block *sb = sbi->sb;
188 	struct inode *inode, *inode2;
189 	struct MFT_REF ref;
190 
191 	if (sbi->volume.major_ver < 3) {
192 		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
193 		return 0;
194 	}
195 
196 	ref.low = cpu_to_le32(MFT_REC_EXTEND);
197 	ref.high = 0;
198 	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
199 	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
200 	if (IS_ERR(inode)) {
201 		err = PTR_ERR(inode);
202 		ntfs_err(sb, "Failed to load $Extend.");
203 		inode = NULL;
204 		goto out;
205 	}
206 
207 	/* If ntfs_iget5() reads from disk it never returns bad inode. */
208 	if (!S_ISDIR(inode->i_mode)) {
209 		err = -EINVAL;
210 		goto out;
211 	}
212 
213 	/* Try to find $ObjId */
214 	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
215 	if (inode2 && !IS_ERR(inode2)) {
216 		if (is_bad_inode(inode2)) {
217 			iput(inode2);
218 		} else {
219 			sbi->objid.ni = ntfs_i(inode2);
220 			sbi->objid_no = inode2->i_ino;
221 		}
222 	}
223 
224 	/* Try to find $Quota */
225 	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
226 	if (inode2 && !IS_ERR(inode2)) {
227 		sbi->quota_no = inode2->i_ino;
228 		iput(inode2);
229 	}
230 
231 	/* Try to find $Reparse */
232 	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
233 	if (inode2 && !IS_ERR(inode2)) {
234 		sbi->reparse.ni = ntfs_i(inode2);
235 		sbi->reparse_no = inode2->i_ino;
236 	}
237 
238 	/* Try to find $UsnJrnl */
239 	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
240 	if (inode2 && !IS_ERR(inode2)) {
241 		sbi->usn_jrnl_no = inode2->i_ino;
242 		iput(inode2);
243 	}
244 
245 	err = 0;
246 out:
247 	iput(inode);
248 	return err;
249 }
250 
ntfs_loadlog_and_replay(struct ntfs_inode * ni,struct ntfs_sb_info * sbi)251 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
252 {
253 	int err = 0;
254 	struct super_block *sb = sbi->sb;
255 	bool initialized = false;
256 	struct MFT_REF ref;
257 	struct inode *inode;
258 
259 	/* Check for 4GB. */
260 	if (ni->vfs_inode.i_size >= 0x100000000ull) {
261 		ntfs_err(sb, "\x24LogFile is too big");
262 		err = -EINVAL;
263 		goto out;
264 	}
265 
266 	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
267 
268 	ref.low = cpu_to_le32(MFT_REC_MFT);
269 	ref.high = 0;
270 	ref.seq = cpu_to_le16(1);
271 
272 	inode = ntfs_iget5(sb, &ref, NULL);
273 
274 	if (IS_ERR(inode))
275 		inode = NULL;
276 
277 	if (!inode) {
278 		/* Try to use MFT copy. */
279 		u64 t64 = sbi->mft.lbo;
280 
281 		sbi->mft.lbo = sbi->mft.lbo2;
282 		inode = ntfs_iget5(sb, &ref, NULL);
283 		sbi->mft.lbo = t64;
284 		if (IS_ERR(inode))
285 			inode = NULL;
286 	}
287 
288 	if (!inode) {
289 		err = -EINVAL;
290 		ntfs_err(sb, "Failed to load $MFT.");
291 		goto out;
292 	}
293 
294 	sbi->mft.ni = ntfs_i(inode);
295 
296 	/* LogFile should not contains attribute list. */
297 	err = ni_load_all_mi(sbi->mft.ni);
298 	if (!err)
299 		err = log_replay(ni, &initialized);
300 
301 	iput(inode);
302 	sbi->mft.ni = NULL;
303 
304 	sync_blockdev(sb->s_bdev);
305 	invalidate_bdev(sb->s_bdev);
306 
307 	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
308 		err = 0;
309 		goto out;
310 	}
311 
312 	if (sb_rdonly(sb) || !initialized)
313 		goto out;
314 
315 	/* Fill LogFile by '-1' if it is initialized. */
316 	err = ntfs_bio_fill_1(sbi, &ni->file.run);
317 
318 out:
319 	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
320 
321 	return err;
322 }
323 
324 /*
325  * ntfs_query_def
326  *
327  * Return: Current ATTR_DEF_ENTRY for given attribute type.
328  */
ntfs_query_def(struct ntfs_sb_info * sbi,enum ATTR_TYPE type)329 const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
330 					    enum ATTR_TYPE type)
331 {
332 	int type_in = le32_to_cpu(type);
333 	size_t min_idx = 0;
334 	size_t max_idx = sbi->def_entries - 1;
335 
336 	while (min_idx <= max_idx) {
337 		size_t i = min_idx + ((max_idx - min_idx) >> 1);
338 		const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
339 		int diff = le32_to_cpu(entry->type) - type_in;
340 
341 		if (!diff)
342 			return entry;
343 		if (diff < 0)
344 			min_idx = i + 1;
345 		else if (i)
346 			max_idx = i - 1;
347 		else
348 			return NULL;
349 	}
350 	return NULL;
351 }
352 
353 /*
354  * ntfs_look_for_free_space - Look for a free space in bitmap.
355  */
ntfs_look_for_free_space(struct ntfs_sb_info * sbi,CLST lcn,CLST len,CLST * new_lcn,CLST * new_len,enum ALLOCATE_OPT opt)356 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
357 			     CLST *new_lcn, CLST *new_len,
358 			     enum ALLOCATE_OPT opt)
359 {
360 	int err;
361 	CLST alen;
362 	struct super_block *sb = sbi->sb;
363 	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
364 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
365 
366 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
367 	if (opt & ALLOCATE_MFT) {
368 		zlen = wnd_zone_len(wnd);
369 
370 		if (!zlen) {
371 			err = ntfs_refresh_zone(sbi);
372 			if (err)
373 				goto up_write;
374 
375 			zlen = wnd_zone_len(wnd);
376 		}
377 
378 		if (!zlen) {
379 			ntfs_err(sbi->sb, "no free space to extend mft");
380 			err = -ENOSPC;
381 			goto up_write;
382 		}
383 
384 		lcn = wnd_zone_bit(wnd);
385 		alen = min_t(CLST, len, zlen);
386 
387 		wnd_zone_set(wnd, lcn + alen, zlen - alen);
388 
389 		err = wnd_set_used(wnd, lcn, alen);
390 		if (err)
391 			goto up_write;
392 
393 		alcn = lcn;
394 		goto space_found;
395 	}
396 	/*
397 	 * 'Cause cluster 0 is always used this value means that we should use
398 	 * cached value of 'next_free_lcn' to improve performance.
399 	 */
400 	if (!lcn)
401 		lcn = sbi->used.next_free_lcn;
402 
403 	if (lcn >= wnd->nbits)
404 		lcn = 0;
405 
406 	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
407 	if (alen)
408 		goto space_found;
409 
410 	/* Try to use clusters from MftZone. */
411 	zlen = wnd_zone_len(wnd);
412 	zeroes = wnd_zeroes(wnd);
413 
414 	/* Check too big request */
415 	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
416 		err = -ENOSPC;
417 		goto up_write;
418 	}
419 
420 	/* How many clusters to cat from zone. */
421 	zlcn = wnd_zone_bit(wnd);
422 	zlen2 = zlen >> 1;
423 	ztrim = clamp_val(len, zlen2, zlen);
424 	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
425 
426 	wnd_zone_set(wnd, zlcn, new_zlen);
427 
428 	/* Allocate continues clusters. */
429 	alen = wnd_find(wnd, len, 0,
430 			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
431 	if (!alen) {
432 		err = -ENOSPC;
433 		goto up_write;
434 	}
435 
436 space_found:
437 	err = 0;
438 	*new_len = alen;
439 	*new_lcn = alcn;
440 
441 	ntfs_unmap_meta(sb, alcn, alen);
442 
443 	/* Set hint for next requests. */
444 	if (!(opt & ALLOCATE_MFT))
445 		sbi->used.next_free_lcn = alcn + alen;
446 up_write:
447 	up_write(&wnd->rw_lock);
448 	return err;
449 }
450 
451 /*
452  * ntfs_extend_mft - Allocate additional MFT records.
453  *
454  * sbi->mft.bitmap is locked for write.
455  *
456  * NOTE: recursive:
457  *	ntfs_look_free_mft ->
458  *	ntfs_extend_mft ->
459  *	attr_set_size ->
460  *	ni_insert_nonresident ->
461  *	ni_insert_attr ->
462  *	ni_ins_attr_ext ->
463  *	ntfs_look_free_mft ->
464  *	ntfs_extend_mft
465  *
466  * To avoid recursive always allocate space for two new MFT records
467  * see attrib.c: "at least two MFT to avoid recursive loop".
468  */
ntfs_extend_mft(struct ntfs_sb_info * sbi)469 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
470 {
471 	int err;
472 	struct ntfs_inode *ni = sbi->mft.ni;
473 	size_t new_mft_total;
474 	u64 new_mft_bytes, new_bitmap_bytes;
475 	struct ATTRIB *attr;
476 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
477 
478 	new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
479 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
480 
481 	/* Step 1: Resize $MFT::DATA. */
482 	down_write(&ni->file.run_lock);
483 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
484 			    new_mft_bytes, NULL, false, &attr);
485 
486 	if (err) {
487 		up_write(&ni->file.run_lock);
488 		goto out;
489 	}
490 
491 	attr->nres.valid_size = attr->nres.data_size;
492 	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
493 	ni->mi.dirty = true;
494 
495 	/* Step 2: Resize $MFT::BITMAP. */
496 	new_bitmap_bytes = bitmap_size(new_mft_total);
497 
498 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
499 			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
500 
501 	/* Refresh MFT Zone if necessary. */
502 	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
503 
504 	ntfs_refresh_zone(sbi);
505 
506 	up_write(&sbi->used.bitmap.rw_lock);
507 	up_write(&ni->file.run_lock);
508 
509 	if (err)
510 		goto out;
511 
512 	err = wnd_extend(wnd, new_mft_total);
513 
514 	if (err)
515 		goto out;
516 
517 	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
518 
519 	err = _ni_write_inode(&ni->vfs_inode, 0);
520 out:
521 	return err;
522 }
523 
524 /*
525  * ntfs_look_free_mft - Look for a free MFT record.
526  */
ntfs_look_free_mft(struct ntfs_sb_info * sbi,CLST * rno,bool mft,struct ntfs_inode * ni,struct mft_inode ** mi)527 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
528 		       struct ntfs_inode *ni, struct mft_inode **mi)
529 {
530 	int err = 0;
531 	size_t zbit, zlen, from, to, fr;
532 	size_t mft_total;
533 	struct MFT_REF ref;
534 	struct super_block *sb = sbi->sb;
535 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
536 	u32 ir;
537 
538 	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
539 		      MFT_REC_FREE - MFT_REC_RESERVED);
540 
541 	if (!mft)
542 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
543 
544 	zlen = wnd_zone_len(wnd);
545 
546 	/* Always reserve space for MFT. */
547 	if (zlen) {
548 		if (mft) {
549 			zbit = wnd_zone_bit(wnd);
550 			*rno = zbit;
551 			wnd_zone_set(wnd, zbit + 1, zlen - 1);
552 		}
553 		goto found;
554 	}
555 
556 	/* No MFT zone. Find the nearest to '0' free MFT. */
557 	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
558 		/* Resize MFT */
559 		mft_total = wnd->nbits;
560 
561 		err = ntfs_extend_mft(sbi);
562 		if (!err) {
563 			zbit = mft_total;
564 			goto reserve_mft;
565 		}
566 
567 		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
568 			goto out;
569 
570 		err = 0;
571 
572 		/*
573 		 * Look for free record reserved area [11-16) ==
574 		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
575 		 * marks it as used.
576 		 */
577 		if (!sbi->mft.reserved_bitmap) {
578 			/* Once per session create internal bitmap for 5 bits. */
579 			sbi->mft.reserved_bitmap = 0xFF;
580 
581 			ref.high = 0;
582 			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
583 				struct inode *i;
584 				struct ntfs_inode *ni;
585 				struct MFT_REC *mrec;
586 
587 				ref.low = cpu_to_le32(ir);
588 				ref.seq = cpu_to_le16(ir);
589 
590 				i = ntfs_iget5(sb, &ref, NULL);
591 				if (IS_ERR(i)) {
592 next:
593 					ntfs_notice(
594 						sb,
595 						"Invalid reserved record %x",
596 						ref.low);
597 					continue;
598 				}
599 				if (is_bad_inode(i)) {
600 					iput(i);
601 					goto next;
602 				}
603 
604 				ni = ntfs_i(i);
605 
606 				mrec = ni->mi.mrec;
607 
608 				if (!is_rec_base(mrec))
609 					goto next;
610 
611 				if (mrec->hard_links)
612 					goto next;
613 
614 				if (!ni_std(ni))
615 					goto next;
616 
617 				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
618 						 NULL, 0, NULL, NULL))
619 					goto next;
620 
621 				__clear_bit(ir - MFT_REC_RESERVED,
622 					    &sbi->mft.reserved_bitmap);
623 			}
624 		}
625 
626 		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
627 		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
628 					  MFT_REC_FREE, MFT_REC_RESERVED);
629 		if (zbit >= MFT_REC_FREE) {
630 			sbi->mft.next_reserved = MFT_REC_FREE;
631 			goto out;
632 		}
633 
634 		zlen = 1;
635 		sbi->mft.next_reserved = zbit;
636 	} else {
637 reserve_mft:
638 		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
639 		if (zbit + zlen > wnd->nbits)
640 			zlen = wnd->nbits - zbit;
641 
642 		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
643 			zlen -= 1;
644 
645 		/* [zbit, zbit + zlen) will be used for MFT itself. */
646 		from = sbi->mft.used;
647 		if (from < zbit)
648 			from = zbit;
649 		to = zbit + zlen;
650 		if (from < to) {
651 			ntfs_clear_mft_tail(sbi, from, to);
652 			sbi->mft.used = to;
653 		}
654 	}
655 
656 	if (mft) {
657 		*rno = zbit;
658 		zbit += 1;
659 		zlen -= 1;
660 	}
661 
662 	wnd_zone_set(wnd, zbit, zlen);
663 
664 found:
665 	if (!mft) {
666 		/* The request to get record for general purpose. */
667 		if (sbi->mft.next_free < MFT_REC_USER)
668 			sbi->mft.next_free = MFT_REC_USER;
669 
670 		for (;;) {
671 			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
672 			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
673 				sbi->mft.next_free = sbi->mft.bitmap.nbits;
674 			} else {
675 				*rno = fr;
676 				sbi->mft.next_free = *rno + 1;
677 				break;
678 			}
679 
680 			err = ntfs_extend_mft(sbi);
681 			if (err)
682 				goto out;
683 		}
684 	}
685 
686 	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
687 		err = -ENOMEM;
688 		goto out;
689 	}
690 
691 	/* We have found a record that are not reserved for next MFT. */
692 	if (*rno >= MFT_REC_FREE)
693 		wnd_set_used(wnd, *rno, 1);
694 	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
695 		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
696 
697 out:
698 	if (!mft)
699 		up_write(&wnd->rw_lock);
700 
701 	return err;
702 }
703 
704 /*
705  * ntfs_mark_rec_free - Mark record as free.
706  * is_mft - true if we are changing MFT
707  */
ntfs_mark_rec_free(struct ntfs_sb_info * sbi,CLST rno,bool is_mft)708 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
709 {
710 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
711 
712 	if (!is_mft)
713 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
714 	if (rno >= wnd->nbits)
715 		goto out;
716 
717 	if (rno >= MFT_REC_FREE) {
718 		if (!wnd_is_used(wnd, rno, 1))
719 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
720 		else
721 			wnd_set_free(wnd, rno, 1);
722 	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
723 		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
724 	}
725 
726 	if (rno < wnd_zone_bit(wnd))
727 		wnd_zone_set(wnd, rno, 1);
728 	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
729 		sbi->mft.next_free = rno;
730 
731 out:
732 	if (!is_mft)
733 		up_write(&wnd->rw_lock);
734 }
735 
736 /*
737  * ntfs_clear_mft_tail - Format empty records [from, to).
738  *
739  * sbi->mft.bitmap is locked for write.
740  */
ntfs_clear_mft_tail(struct ntfs_sb_info * sbi,size_t from,size_t to)741 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
742 {
743 	int err;
744 	u32 rs;
745 	u64 vbo;
746 	struct runs_tree *run;
747 	struct ntfs_inode *ni;
748 
749 	if (from >= to)
750 		return 0;
751 
752 	rs = sbi->record_size;
753 	ni = sbi->mft.ni;
754 	run = &ni->file.run;
755 
756 	down_read(&ni->file.run_lock);
757 	vbo = (u64)from * rs;
758 	for (; from < to; from++, vbo += rs) {
759 		struct ntfs_buffers nb;
760 
761 		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
762 		if (err)
763 			goto out;
764 
765 		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
766 		nb_put(&nb);
767 		if (err)
768 			goto out;
769 	}
770 
771 out:
772 	sbi->mft.used = from;
773 	up_read(&ni->file.run_lock);
774 	return err;
775 }
776 
777 /*
778  * ntfs_refresh_zone - Refresh MFT zone.
779  *
780  * sbi->used.bitmap is locked for rw.
781  * sbi->mft.bitmap is locked for write.
782  * sbi->mft.ni->file.run_lock for write.
783  */
ntfs_refresh_zone(struct ntfs_sb_info * sbi)784 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
785 {
786 	CLST lcn, vcn, len;
787 	size_t lcn_s, zlen;
788 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
789 	struct ntfs_inode *ni = sbi->mft.ni;
790 
791 	/* Do not change anything unless we have non empty MFT zone. */
792 	if (wnd_zone_len(wnd))
793 		return 0;
794 
795 	vcn = bytes_to_cluster(sbi,
796 			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
797 
798 	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
799 		lcn = SPARSE_LCN;
800 
801 	/* We should always find Last Lcn for MFT. */
802 	if (lcn == SPARSE_LCN)
803 		return -EINVAL;
804 
805 	lcn_s = lcn + 1;
806 
807 	/* Try to allocate clusters after last MFT run. */
808 	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
809 	wnd_zone_set(wnd, lcn_s, zlen);
810 
811 	return 0;
812 }
813 
814 /*
815  * ntfs_update_mftmirr - Update $MFTMirr data.
816  */
ntfs_update_mftmirr(struct ntfs_sb_info * sbi,int wait)817 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
818 {
819 	int err;
820 	struct super_block *sb = sbi->sb;
821 	u32 blocksize;
822 	sector_t block1, block2;
823 	u32 bytes;
824 
825 	if (!sb)
826 		return;
827 
828 	blocksize = sb->s_blocksize;
829 
830 	if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
831 		return;
832 
833 	err = 0;
834 	bytes = sbi->mft.recs_mirr << sbi->record_bits;
835 	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
836 	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
837 
838 	for (; bytes >= blocksize; bytes -= blocksize) {
839 		struct buffer_head *bh1, *bh2;
840 
841 		bh1 = sb_bread(sb, block1++);
842 		if (!bh1)
843 			return;
844 
845 		bh2 = sb_getblk(sb, block2++);
846 		if (!bh2) {
847 			put_bh(bh1);
848 			return;
849 		}
850 
851 		if (buffer_locked(bh2))
852 			__wait_on_buffer(bh2);
853 
854 		lock_buffer(bh2);
855 		memcpy(bh2->b_data, bh1->b_data, blocksize);
856 		set_buffer_uptodate(bh2);
857 		mark_buffer_dirty(bh2);
858 		unlock_buffer(bh2);
859 
860 		put_bh(bh1);
861 		bh1 = NULL;
862 
863 		if (wait)
864 			err = sync_dirty_buffer(bh2);
865 
866 		put_bh(bh2);
867 		if (err)
868 			return;
869 	}
870 
871 	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
872 }
873 
874 /*
875  * ntfs_bad_inode
876  *
877  * Marks inode as bad and marks fs as 'dirty'
878  */
ntfs_bad_inode(struct inode * inode,const char * hint)879 void ntfs_bad_inode(struct inode *inode, const char *hint)
880 {
881 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
882 
883 	ntfs_inode_err(inode, "%s", hint);
884 	make_bad_inode(inode);
885 	ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
886 }
887 
888 /*
889  * ntfs_set_state
890  *
891  * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
892  * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
893  * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
894  */
ntfs_set_state(struct ntfs_sb_info * sbi,enum NTFS_DIRTY_FLAGS dirty)895 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
896 {
897 	int err;
898 	struct ATTRIB *attr;
899 	struct VOLUME_INFO *info;
900 	struct mft_inode *mi;
901 	struct ntfs_inode *ni;
902 
903 	/*
904 	 * Do not change state if fs was real_dirty.
905 	 * Do not change state if fs already dirty(clear).
906 	 * Do not change any thing if mounted read only.
907 	 */
908 	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
909 		return 0;
910 
911 	/* Check cached value. */
912 	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
913 	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
914 		return 0;
915 
916 	ni = sbi->volume.ni;
917 	if (!ni)
918 		return -EINVAL;
919 
920 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
921 
922 	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
923 	if (!attr) {
924 		err = -EINVAL;
925 		goto out;
926 	}
927 
928 	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
929 	if (!info) {
930 		err = -EINVAL;
931 		goto out;
932 	}
933 
934 	switch (dirty) {
935 	case NTFS_DIRTY_ERROR:
936 		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
937 		sbi->volume.real_dirty = true;
938 		fallthrough;
939 	case NTFS_DIRTY_DIRTY:
940 		info->flags |= VOLUME_FLAG_DIRTY;
941 		break;
942 	case NTFS_DIRTY_CLEAR:
943 		info->flags &= ~VOLUME_FLAG_DIRTY;
944 		break;
945 	}
946 	/* Cache current volume flags. */
947 	sbi->volume.flags = info->flags;
948 	mi->dirty = true;
949 	err = 0;
950 
951 out:
952 	ni_unlock(ni);
953 	if (err)
954 		return err;
955 
956 	mark_inode_dirty_sync(&ni->vfs_inode);
957 	/* verify(!ntfs_update_mftmirr()); */
958 
959 	/* write mft record on disk. */
960 	err = _ni_write_inode(&ni->vfs_inode, 1);
961 
962 	return err;
963 }
964 
965 /*
966  * security_hash - Calculates a hash of security descriptor.
967  */
security_hash(const void * sd,size_t bytes)968 static inline __le32 security_hash(const void *sd, size_t bytes)
969 {
970 	u32 hash = 0;
971 	const __le32 *ptr = sd;
972 
973 	bytes >>= 2;
974 	while (bytes--)
975 		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
976 	return cpu_to_le32(hash);
977 }
978 
979 /*
980  * simple wrapper for sb_bread_unmovable.
981  */
ntfs_bread(struct super_block * sb,sector_t block)982 struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
983 {
984 	struct ntfs_sb_info *sbi = sb->s_fs_info;
985 	struct buffer_head *bh;
986 
987 	if (unlikely(block >= sbi->volume.blocks)) {
988 		/* prevent generic message "attempt to access beyond end of device" */
989 		ntfs_err(sb, "try to read out of volume at offset 0x%llx",
990 			 (u64)block << sb->s_blocksize_bits);
991 		return NULL;
992 	}
993 
994 	bh = sb_bread_unmovable(sb, block);
995 	if (bh)
996 		return bh;
997 
998 	ntfs_err(sb, "failed to read volume at offset 0x%llx",
999 		 (u64)block << sb->s_blocksize_bits);
1000 	return NULL;
1001 }
1002 
ntfs_sb_read(struct super_block * sb,u64 lbo,size_t bytes,void * buffer)1003 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1004 {
1005 	struct block_device *bdev = sb->s_bdev;
1006 	u32 blocksize = sb->s_blocksize;
1007 	u64 block = lbo >> sb->s_blocksize_bits;
1008 	u32 off = lbo & (blocksize - 1);
1009 	u32 op = blocksize - off;
1010 
1011 	for (; bytes; block += 1, off = 0, op = blocksize) {
1012 		struct buffer_head *bh = __bread(bdev, block, blocksize);
1013 
1014 		if (!bh)
1015 			return -EIO;
1016 
1017 		if (op > bytes)
1018 			op = bytes;
1019 
1020 		memcpy(buffer, bh->b_data + off, op);
1021 
1022 		put_bh(bh);
1023 
1024 		bytes -= op;
1025 		buffer = Add2Ptr(buffer, op);
1026 	}
1027 
1028 	return 0;
1029 }
1030 
ntfs_sb_write(struct super_block * sb,u64 lbo,size_t bytes,const void * buf,int wait)1031 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1032 		  const void *buf, int wait)
1033 {
1034 	u32 blocksize = sb->s_blocksize;
1035 	struct block_device *bdev = sb->s_bdev;
1036 	sector_t block = lbo >> sb->s_blocksize_bits;
1037 	u32 off = lbo & (blocksize - 1);
1038 	u32 op = blocksize - off;
1039 	struct buffer_head *bh;
1040 
1041 	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1042 		wait = 1;
1043 
1044 	for (; bytes; block += 1, off = 0, op = blocksize) {
1045 		if (op > bytes)
1046 			op = bytes;
1047 
1048 		if (op < blocksize) {
1049 			bh = __bread(bdev, block, blocksize);
1050 			if (!bh) {
1051 				ntfs_err(sb, "failed to read block %llx",
1052 					 (u64)block);
1053 				return -EIO;
1054 			}
1055 		} else {
1056 			bh = __getblk(bdev, block, blocksize);
1057 			if (!bh)
1058 				return -ENOMEM;
1059 		}
1060 
1061 		if (buffer_locked(bh))
1062 			__wait_on_buffer(bh);
1063 
1064 		lock_buffer(bh);
1065 		if (buf) {
1066 			memcpy(bh->b_data + off, buf, op);
1067 			buf = Add2Ptr(buf, op);
1068 		} else {
1069 			memset(bh->b_data + off, -1, op);
1070 		}
1071 
1072 		set_buffer_uptodate(bh);
1073 		mark_buffer_dirty(bh);
1074 		unlock_buffer(bh);
1075 
1076 		if (wait) {
1077 			int err = sync_dirty_buffer(bh);
1078 
1079 			if (err) {
1080 				ntfs_err(
1081 					sb,
1082 					"failed to sync buffer at block %llx, error %d",
1083 					(u64)block, err);
1084 				put_bh(bh);
1085 				return err;
1086 			}
1087 		}
1088 
1089 		put_bh(bh);
1090 
1091 		bytes -= op;
1092 	}
1093 	return 0;
1094 }
1095 
ntfs_sb_write_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,const void * buf,size_t bytes,int sync)1096 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1097 		      u64 vbo, const void *buf, size_t bytes, int sync)
1098 {
1099 	struct super_block *sb = sbi->sb;
1100 	u8 cluster_bits = sbi->cluster_bits;
1101 	u32 off = vbo & sbi->cluster_mask;
1102 	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1103 	u64 lbo, len;
1104 	size_t idx;
1105 
1106 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1107 		return -ENOENT;
1108 
1109 	if (lcn == SPARSE_LCN)
1110 		return -EINVAL;
1111 
1112 	lbo = ((u64)lcn << cluster_bits) + off;
1113 	len = ((u64)clen << cluster_bits) - off;
1114 
1115 	for (;;) {
1116 		u32 op = min_t(u64, len, bytes);
1117 		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1118 
1119 		if (err)
1120 			return err;
1121 
1122 		bytes -= op;
1123 		if (!bytes)
1124 			break;
1125 
1126 		vcn_next = vcn + clen;
1127 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1128 		    vcn != vcn_next)
1129 			return -ENOENT;
1130 
1131 		if (lcn == SPARSE_LCN)
1132 			return -EINVAL;
1133 
1134 		if (buf)
1135 			buf = Add2Ptr(buf, op);
1136 
1137 		lbo = ((u64)lcn << cluster_bits);
1138 		len = ((u64)clen << cluster_bits);
1139 	}
1140 
1141 	return 0;
1142 }
1143 
ntfs_bread_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo)1144 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1145 				   const struct runs_tree *run, u64 vbo)
1146 {
1147 	struct super_block *sb = sbi->sb;
1148 	u8 cluster_bits = sbi->cluster_bits;
1149 	CLST lcn;
1150 	u64 lbo;
1151 
1152 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1153 		return ERR_PTR(-ENOENT);
1154 
1155 	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1156 
1157 	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1158 }
1159 
ntfs_read_run_nb(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,void * buf,u32 bytes,struct ntfs_buffers * nb)1160 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1161 		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1162 {
1163 	int err;
1164 	struct super_block *sb = sbi->sb;
1165 	u32 blocksize = sb->s_blocksize;
1166 	u8 cluster_bits = sbi->cluster_bits;
1167 	u32 off = vbo & sbi->cluster_mask;
1168 	u32 nbh = 0;
1169 	CLST vcn_next, vcn = vbo >> cluster_bits;
1170 	CLST lcn, clen;
1171 	u64 lbo, len;
1172 	size_t idx;
1173 	struct buffer_head *bh;
1174 
1175 	if (!run) {
1176 		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1177 		if (vbo > MFT_REC_VOL * sbi->record_size) {
1178 			err = -ENOENT;
1179 			goto out;
1180 		}
1181 
1182 		/* Use absolute boot's 'MFTCluster' to read record. */
1183 		lbo = vbo + sbi->mft.lbo;
1184 		len = sbi->record_size;
1185 	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1186 		err = -ENOENT;
1187 		goto out;
1188 	} else {
1189 		if (lcn == SPARSE_LCN) {
1190 			err = -EINVAL;
1191 			goto out;
1192 		}
1193 
1194 		lbo = ((u64)lcn << cluster_bits) + off;
1195 		len = ((u64)clen << cluster_bits) - off;
1196 	}
1197 
1198 	off = lbo & (blocksize - 1);
1199 	if (nb) {
1200 		nb->off = off;
1201 		nb->bytes = bytes;
1202 	}
1203 
1204 	for (;;) {
1205 		u32 len32 = len >= bytes ? bytes : len;
1206 		sector_t block = lbo >> sb->s_blocksize_bits;
1207 
1208 		do {
1209 			u32 op = blocksize - off;
1210 
1211 			if (op > len32)
1212 				op = len32;
1213 
1214 			bh = ntfs_bread(sb, block);
1215 			if (!bh) {
1216 				err = -EIO;
1217 				goto out;
1218 			}
1219 
1220 			if (buf) {
1221 				memcpy(buf, bh->b_data + off, op);
1222 				buf = Add2Ptr(buf, op);
1223 			}
1224 
1225 			if (!nb) {
1226 				put_bh(bh);
1227 			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1228 				err = -EINVAL;
1229 				goto out;
1230 			} else {
1231 				nb->bh[nbh++] = bh;
1232 				nb->nbufs = nbh;
1233 			}
1234 
1235 			bytes -= op;
1236 			if (!bytes)
1237 				return 0;
1238 			len32 -= op;
1239 			block += 1;
1240 			off = 0;
1241 
1242 		} while (len32);
1243 
1244 		vcn_next = vcn + clen;
1245 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1246 		    vcn != vcn_next) {
1247 			err = -ENOENT;
1248 			goto out;
1249 		}
1250 
1251 		if (lcn == SPARSE_LCN) {
1252 			err = -EINVAL;
1253 			goto out;
1254 		}
1255 
1256 		lbo = ((u64)lcn << cluster_bits);
1257 		len = ((u64)clen << cluster_bits);
1258 	}
1259 
1260 out:
1261 	if (!nbh)
1262 		return err;
1263 
1264 	while (nbh) {
1265 		put_bh(nb->bh[--nbh]);
1266 		nb->bh[nbh] = NULL;
1267 	}
1268 
1269 	nb->nbufs = 0;
1270 	return err;
1271 }
1272 
1273 /*
1274  * ntfs_read_bh
1275  *
1276  * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1277  */
ntfs_read_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,struct NTFS_RECORD_HEADER * rhdr,u32 bytes,struct ntfs_buffers * nb)1278 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1279 		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1280 		 struct ntfs_buffers *nb)
1281 {
1282 	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1283 
1284 	if (err)
1285 		return err;
1286 	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1287 }
1288 
ntfs_get_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u32 bytes,struct ntfs_buffers * nb)1289 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1290 		u32 bytes, struct ntfs_buffers *nb)
1291 {
1292 	int err = 0;
1293 	struct super_block *sb = sbi->sb;
1294 	u32 blocksize = sb->s_blocksize;
1295 	u8 cluster_bits = sbi->cluster_bits;
1296 	CLST vcn_next, vcn = vbo >> cluster_bits;
1297 	u32 off;
1298 	u32 nbh = 0;
1299 	CLST lcn, clen;
1300 	u64 lbo, len;
1301 	size_t idx;
1302 
1303 	nb->bytes = bytes;
1304 
1305 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1306 		err = -ENOENT;
1307 		goto out;
1308 	}
1309 
1310 	off = vbo & sbi->cluster_mask;
1311 	lbo = ((u64)lcn << cluster_bits) + off;
1312 	len = ((u64)clen << cluster_bits) - off;
1313 
1314 	nb->off = off = lbo & (blocksize - 1);
1315 
1316 	for (;;) {
1317 		u32 len32 = min_t(u64, len, bytes);
1318 		sector_t block = lbo >> sb->s_blocksize_bits;
1319 
1320 		do {
1321 			u32 op;
1322 			struct buffer_head *bh;
1323 
1324 			if (nbh >= ARRAY_SIZE(nb->bh)) {
1325 				err = -EINVAL;
1326 				goto out;
1327 			}
1328 
1329 			op = blocksize - off;
1330 			if (op > len32)
1331 				op = len32;
1332 
1333 			if (op == blocksize) {
1334 				bh = sb_getblk(sb, block);
1335 				if (!bh) {
1336 					err = -ENOMEM;
1337 					goto out;
1338 				}
1339 				if (buffer_locked(bh))
1340 					__wait_on_buffer(bh);
1341 				set_buffer_uptodate(bh);
1342 			} else {
1343 				bh = ntfs_bread(sb, block);
1344 				if (!bh) {
1345 					err = -EIO;
1346 					goto out;
1347 				}
1348 			}
1349 
1350 			nb->bh[nbh++] = bh;
1351 			bytes -= op;
1352 			if (!bytes) {
1353 				nb->nbufs = nbh;
1354 				return 0;
1355 			}
1356 
1357 			block += 1;
1358 			len32 -= op;
1359 			off = 0;
1360 		} while (len32);
1361 
1362 		vcn_next = vcn + clen;
1363 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1364 		    vcn != vcn_next) {
1365 			err = -ENOENT;
1366 			goto out;
1367 		}
1368 
1369 		lbo = ((u64)lcn << cluster_bits);
1370 		len = ((u64)clen << cluster_bits);
1371 	}
1372 
1373 out:
1374 	while (nbh) {
1375 		put_bh(nb->bh[--nbh]);
1376 		nb->bh[nbh] = NULL;
1377 	}
1378 
1379 	nb->nbufs = 0;
1380 
1381 	return err;
1382 }
1383 
ntfs_write_bh(struct ntfs_sb_info * sbi,struct NTFS_RECORD_HEADER * rhdr,struct ntfs_buffers * nb,int sync)1384 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1385 		  struct ntfs_buffers *nb, int sync)
1386 {
1387 	int err = 0;
1388 	struct super_block *sb = sbi->sb;
1389 	u32 block_size = sb->s_blocksize;
1390 	u32 bytes = nb->bytes;
1391 	u32 off = nb->off;
1392 	u16 fo = le16_to_cpu(rhdr->fix_off);
1393 	u16 fn = le16_to_cpu(rhdr->fix_num);
1394 	u32 idx;
1395 	__le16 *fixup;
1396 	__le16 sample;
1397 
1398 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1399 	    fn * SECTOR_SIZE > bytes) {
1400 		return -EINVAL;
1401 	}
1402 
1403 	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1404 		u32 op = block_size - off;
1405 		char *bh_data;
1406 		struct buffer_head *bh = nb->bh[idx];
1407 		__le16 *ptr, *end_data;
1408 
1409 		if (op > bytes)
1410 			op = bytes;
1411 
1412 		if (buffer_locked(bh))
1413 			__wait_on_buffer(bh);
1414 
1415 		lock_buffer(bh);
1416 
1417 		bh_data = bh->b_data + off;
1418 		end_data = Add2Ptr(bh_data, op);
1419 		memcpy(bh_data, rhdr, op);
1420 
1421 		if (!idx) {
1422 			u16 t16;
1423 
1424 			fixup = Add2Ptr(bh_data, fo);
1425 			sample = *fixup;
1426 			t16 = le16_to_cpu(sample);
1427 			if (t16 >= 0x7FFF) {
1428 				sample = *fixup = cpu_to_le16(1);
1429 			} else {
1430 				sample = cpu_to_le16(t16 + 1);
1431 				*fixup = sample;
1432 			}
1433 
1434 			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1435 		}
1436 
1437 		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1438 
1439 		do {
1440 			*++fixup = *ptr;
1441 			*ptr = sample;
1442 			ptr += SECTOR_SIZE / sizeof(short);
1443 		} while (ptr < end_data);
1444 
1445 		set_buffer_uptodate(bh);
1446 		mark_buffer_dirty(bh);
1447 		unlock_buffer(bh);
1448 
1449 		if (sync) {
1450 			int err2 = sync_dirty_buffer(bh);
1451 
1452 			if (!err && err2)
1453 				err = err2;
1454 		}
1455 
1456 		bytes -= op;
1457 		rhdr = Add2Ptr(rhdr, op);
1458 	}
1459 
1460 	return err;
1461 }
1462 
1463 /*
1464  * ntfs_bio_pages - Read/write pages from/to disk.
1465  */
ntfs_bio_pages(struct ntfs_sb_info * sbi,const struct runs_tree * run,struct page ** pages,u32 nr_pages,u64 vbo,u32 bytes,enum req_op op)1466 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1467 		   struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1468 		   enum req_op op)
1469 {
1470 	int err = 0;
1471 	struct bio *new, *bio = NULL;
1472 	struct super_block *sb = sbi->sb;
1473 	struct block_device *bdev = sb->s_bdev;
1474 	struct page *page;
1475 	u8 cluster_bits = sbi->cluster_bits;
1476 	CLST lcn, clen, vcn, vcn_next;
1477 	u32 add, off, page_idx;
1478 	u64 lbo, len;
1479 	size_t run_idx;
1480 	struct blk_plug plug;
1481 
1482 	if (!bytes)
1483 		return 0;
1484 
1485 	blk_start_plug(&plug);
1486 
1487 	/* Align vbo and bytes to be 512 bytes aligned. */
1488 	lbo = (vbo + bytes + 511) & ~511ull;
1489 	vbo = vbo & ~511ull;
1490 	bytes = lbo - vbo;
1491 
1492 	vcn = vbo >> cluster_bits;
1493 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1494 		err = -ENOENT;
1495 		goto out;
1496 	}
1497 	off = vbo & sbi->cluster_mask;
1498 	page_idx = 0;
1499 	page = pages[0];
1500 
1501 	for (;;) {
1502 		lbo = ((u64)lcn << cluster_bits) + off;
1503 		len = ((u64)clen << cluster_bits) - off;
1504 new_bio:
1505 		new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1506 		if (bio) {
1507 			bio_chain(bio, new);
1508 			submit_bio(bio);
1509 		}
1510 		bio = new;
1511 		bio->bi_iter.bi_sector = lbo >> 9;
1512 
1513 		while (len) {
1514 			off = vbo & (PAGE_SIZE - 1);
1515 			add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1516 
1517 			if (bio_add_page(bio, page, add, off) < add)
1518 				goto new_bio;
1519 
1520 			if (bytes <= add)
1521 				goto out;
1522 			bytes -= add;
1523 			vbo += add;
1524 
1525 			if (add + off == PAGE_SIZE) {
1526 				page_idx += 1;
1527 				if (WARN_ON(page_idx >= nr_pages)) {
1528 					err = -EINVAL;
1529 					goto out;
1530 				}
1531 				page = pages[page_idx];
1532 			}
1533 
1534 			if (len <= add)
1535 				break;
1536 			len -= add;
1537 			lbo += add;
1538 		}
1539 
1540 		vcn_next = vcn + clen;
1541 		if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1542 		    vcn != vcn_next) {
1543 			err = -ENOENT;
1544 			goto out;
1545 		}
1546 		off = 0;
1547 	}
1548 out:
1549 	if (bio) {
1550 		if (!err)
1551 			err = submit_bio_wait(bio);
1552 		bio_put(bio);
1553 	}
1554 	blk_finish_plug(&plug);
1555 
1556 	return err;
1557 }
1558 
1559 /*
1560  * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1561  *
1562  * Fill on-disk logfile range by (-1)
1563  * this means empty logfile.
1564  */
ntfs_bio_fill_1(struct ntfs_sb_info * sbi,const struct runs_tree * run)1565 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1566 {
1567 	int err = 0;
1568 	struct super_block *sb = sbi->sb;
1569 	struct block_device *bdev = sb->s_bdev;
1570 	u8 cluster_bits = sbi->cluster_bits;
1571 	struct bio *new, *bio = NULL;
1572 	CLST lcn, clen;
1573 	u64 lbo, len;
1574 	size_t run_idx;
1575 	struct page *fill;
1576 	void *kaddr;
1577 	struct blk_plug plug;
1578 
1579 	fill = alloc_page(GFP_KERNEL);
1580 	if (!fill)
1581 		return -ENOMEM;
1582 
1583 	kaddr = kmap_atomic(fill);
1584 	memset(kaddr, -1, PAGE_SIZE);
1585 	kunmap_atomic(kaddr);
1586 	flush_dcache_page(fill);
1587 	lock_page(fill);
1588 
1589 	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1590 		err = -ENOENT;
1591 		goto out;
1592 	}
1593 
1594 	/*
1595 	 * TODO: Try blkdev_issue_write_same.
1596 	 */
1597 	blk_start_plug(&plug);
1598 	do {
1599 		lbo = (u64)lcn << cluster_bits;
1600 		len = (u64)clen << cluster_bits;
1601 new_bio:
1602 		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1603 		if (bio) {
1604 			bio_chain(bio, new);
1605 			submit_bio(bio);
1606 		}
1607 		bio = new;
1608 		bio->bi_iter.bi_sector = lbo >> 9;
1609 
1610 		for (;;) {
1611 			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1612 
1613 			if (bio_add_page(bio, fill, add, 0) < add)
1614 				goto new_bio;
1615 
1616 			lbo += add;
1617 			if (len <= add)
1618 				break;
1619 			len -= add;
1620 		}
1621 	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1622 
1623 	if (!err)
1624 		err = submit_bio_wait(bio);
1625 	bio_put(bio);
1626 
1627 	blk_finish_plug(&plug);
1628 out:
1629 	unlock_page(fill);
1630 	put_page(fill);
1631 
1632 	return err;
1633 }
1634 
ntfs_vbo_to_lbo(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u64 * lbo,u64 * bytes)1635 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1636 		    u64 vbo, u64 *lbo, u64 *bytes)
1637 {
1638 	u32 off;
1639 	CLST lcn, len;
1640 	u8 cluster_bits = sbi->cluster_bits;
1641 
1642 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1643 		return -ENOENT;
1644 
1645 	off = vbo & sbi->cluster_mask;
1646 	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1647 	*bytes = ((u64)len << cluster_bits) - off;
1648 
1649 	return 0;
1650 }
1651 
ntfs_new_inode(struct ntfs_sb_info * sbi,CLST rno,bool dir)1652 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1653 {
1654 	int err = 0;
1655 	struct super_block *sb = sbi->sb;
1656 	struct inode *inode = new_inode(sb);
1657 	struct ntfs_inode *ni;
1658 
1659 	if (!inode)
1660 		return ERR_PTR(-ENOMEM);
1661 
1662 	ni = ntfs_i(inode);
1663 
1664 	err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1665 			    false);
1666 	if (err)
1667 		goto out;
1668 
1669 	inode->i_ino = rno;
1670 	if (insert_inode_locked(inode) < 0) {
1671 		err = -EIO;
1672 		goto out;
1673 	}
1674 
1675 out:
1676 	if (err) {
1677 		iput(inode);
1678 		ni = ERR_PTR(err);
1679 	}
1680 	return ni;
1681 }
1682 
1683 /*
1684  * O:BAG:BAD:(A;OICI;FA;;;WD)
1685  * Owner S-1-5-32-544 (Administrators)
1686  * Group S-1-5-32-544 (Administrators)
1687  * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1688  */
1689 const u8 s_default_security[] __aligned(8) = {
1690 	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1691 	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1692 	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1693 	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1694 	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1695 	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1696 	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1697 };
1698 
1699 static_assert(sizeof(s_default_security) == 0x50);
1700 
sid_length(const struct SID * sid)1701 static inline u32 sid_length(const struct SID *sid)
1702 {
1703 	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1704 }
1705 
1706 /*
1707  * is_acl_valid
1708  *
1709  * Thanks Mark Harmstone for idea.
1710  */
is_acl_valid(const struct ACL * acl,u32 len)1711 static bool is_acl_valid(const struct ACL *acl, u32 len)
1712 {
1713 	const struct ACE_HEADER *ace;
1714 	u32 i;
1715 	u16 ace_count, ace_size;
1716 
1717 	if (acl->AclRevision != ACL_REVISION &&
1718 	    acl->AclRevision != ACL_REVISION_DS) {
1719 		/*
1720 		 * This value should be ACL_REVISION, unless the ACL contains an
1721 		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1722 		 * All ACEs in an ACL must be at the same revision level.
1723 		 */
1724 		return false;
1725 	}
1726 
1727 	if (acl->Sbz1)
1728 		return false;
1729 
1730 	if (le16_to_cpu(acl->AclSize) > len)
1731 		return false;
1732 
1733 	if (acl->Sbz2)
1734 		return false;
1735 
1736 	len -= sizeof(struct ACL);
1737 	ace = (struct ACE_HEADER *)&acl[1];
1738 	ace_count = le16_to_cpu(acl->AceCount);
1739 
1740 	for (i = 0; i < ace_count; i++) {
1741 		if (len < sizeof(struct ACE_HEADER))
1742 			return false;
1743 
1744 		ace_size = le16_to_cpu(ace->AceSize);
1745 		if (len < ace_size)
1746 			return false;
1747 
1748 		len -= ace_size;
1749 		ace = Add2Ptr(ace, ace_size);
1750 	}
1751 
1752 	return true;
1753 }
1754 
is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 len)1755 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1756 {
1757 	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1758 
1759 	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1760 		return false;
1761 
1762 	if (sd->Revision != 1)
1763 		return false;
1764 
1765 	if (sd->Sbz1)
1766 		return false;
1767 
1768 	if (!(sd->Control & SE_SELF_RELATIVE))
1769 		return false;
1770 
1771 	sd_owner = le32_to_cpu(sd->Owner);
1772 	if (sd_owner) {
1773 		const struct SID *owner = Add2Ptr(sd, sd_owner);
1774 
1775 		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1776 			return false;
1777 
1778 		if (owner->Revision != 1)
1779 			return false;
1780 
1781 		if (sd_owner + sid_length(owner) > len)
1782 			return false;
1783 	}
1784 
1785 	sd_group = le32_to_cpu(sd->Group);
1786 	if (sd_group) {
1787 		const struct SID *group = Add2Ptr(sd, sd_group);
1788 
1789 		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1790 			return false;
1791 
1792 		if (group->Revision != 1)
1793 			return false;
1794 
1795 		if (sd_group + sid_length(group) > len)
1796 			return false;
1797 	}
1798 
1799 	sd_sacl = le32_to_cpu(sd->Sacl);
1800 	if (sd_sacl) {
1801 		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1802 
1803 		if (sd_sacl + sizeof(struct ACL) > len)
1804 			return false;
1805 
1806 		if (!is_acl_valid(sacl, len - sd_sacl))
1807 			return false;
1808 	}
1809 
1810 	sd_dacl = le32_to_cpu(sd->Dacl);
1811 	if (sd_dacl) {
1812 		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1813 
1814 		if (sd_dacl + sizeof(struct ACL) > len)
1815 			return false;
1816 
1817 		if (!is_acl_valid(dacl, len - sd_dacl))
1818 			return false;
1819 	}
1820 
1821 	return true;
1822 }
1823 
1824 /*
1825  * ntfs_security_init - Load and parse $Secure.
1826  */
ntfs_security_init(struct ntfs_sb_info * sbi)1827 int ntfs_security_init(struct ntfs_sb_info *sbi)
1828 {
1829 	int err;
1830 	struct super_block *sb = sbi->sb;
1831 	struct inode *inode;
1832 	struct ntfs_inode *ni;
1833 	struct MFT_REF ref;
1834 	struct ATTRIB *attr;
1835 	struct ATTR_LIST_ENTRY *le;
1836 	u64 sds_size;
1837 	size_t off;
1838 	struct NTFS_DE *ne;
1839 	struct NTFS_DE_SII *sii_e;
1840 	struct ntfs_fnd *fnd_sii = NULL;
1841 	const struct INDEX_ROOT *root_sii;
1842 	const struct INDEX_ROOT *root_sdh;
1843 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1844 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1845 
1846 	ref.low = cpu_to_le32(MFT_REC_SECURE);
1847 	ref.high = 0;
1848 	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1849 
1850 	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1851 	if (IS_ERR(inode)) {
1852 		err = PTR_ERR(inode);
1853 		ntfs_err(sb, "Failed to load $Secure.");
1854 		inode = NULL;
1855 		goto out;
1856 	}
1857 
1858 	ni = ntfs_i(inode);
1859 
1860 	le = NULL;
1861 
1862 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1863 			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1864 	if (!attr) {
1865 		err = -EINVAL;
1866 		goto out;
1867 	}
1868 
1869 	if(!(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1870 	    root_sdh->type != ATTR_ZERO ||
1871 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1872 	    offsetof(struct INDEX_ROOT, ihdr) +
1873 			le32_to_cpu(root_sdh->ihdr.used) >
1874 			le32_to_cpu(attr->res.data_size)) {
1875 		err = -EINVAL;
1876 		goto out;
1877 	}
1878 
1879 	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1880 	if (err)
1881 		goto out;
1882 
1883 	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1884 			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1885 	if (!attr) {
1886 		err = -EINVAL;
1887 		goto out;
1888 	}
1889 
1890 	if(!(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1891 	    root_sii->type != ATTR_ZERO ||
1892 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1893 	    offsetof(struct INDEX_ROOT, ihdr) +
1894 			le32_to_cpu(root_sii->ihdr.used) >
1895 			le32_to_cpu(attr->res.data_size)) {
1896 		err = -EINVAL;
1897 		goto out;
1898 	}
1899 
1900 	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1901 	if (err)
1902 		goto out;
1903 
1904 	fnd_sii = fnd_get();
1905 	if (!fnd_sii) {
1906 		err = -ENOMEM;
1907 		goto out;
1908 	}
1909 
1910 	sds_size = inode->i_size;
1911 
1912 	/* Find the last valid Id. */
1913 	sbi->security.next_id = SECURITY_ID_FIRST;
1914 	/* Always write new security at the end of bucket. */
1915 	sbi->security.next_off =
1916 		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1917 
1918 	off = 0;
1919 	ne = NULL;
1920 
1921 	for (;;) {
1922 		u32 next_id;
1923 
1924 		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1925 		if (err || !ne)
1926 			break;
1927 
1928 		sii_e = (struct NTFS_DE_SII *)ne;
1929 		if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1930 			continue;
1931 
1932 		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1933 		if (next_id >= sbi->security.next_id)
1934 			sbi->security.next_id = next_id;
1935 	}
1936 
1937 	sbi->security.ni = ni;
1938 	inode = NULL;
1939 out:
1940 	iput(inode);
1941 	fnd_put(fnd_sii);
1942 
1943 	return err;
1944 }
1945 
1946 /*
1947  * ntfs_get_security_by_id - Read security descriptor by id.
1948  */
ntfs_get_security_by_id(struct ntfs_sb_info * sbi,__le32 security_id,struct SECURITY_DESCRIPTOR_RELATIVE ** sd,size_t * size)1949 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1950 			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1951 			    size_t *size)
1952 {
1953 	int err;
1954 	int diff;
1955 	struct ntfs_inode *ni = sbi->security.ni;
1956 	struct ntfs_index *indx = &sbi->security.index_sii;
1957 	void *p = NULL;
1958 	struct NTFS_DE_SII *sii_e;
1959 	struct ntfs_fnd *fnd_sii;
1960 	struct SECURITY_HDR d_security;
1961 	const struct INDEX_ROOT *root_sii;
1962 	u32 t32;
1963 
1964 	*sd = NULL;
1965 
1966 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1967 
1968 	fnd_sii = fnd_get();
1969 	if (!fnd_sii) {
1970 		err = -ENOMEM;
1971 		goto out;
1972 	}
1973 
1974 	root_sii = indx_get_root(indx, ni, NULL, NULL);
1975 	if (!root_sii) {
1976 		err = -EINVAL;
1977 		goto out;
1978 	}
1979 
1980 	/* Try to find this SECURITY descriptor in SII indexes. */
1981 	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1982 			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1983 	if (err)
1984 		goto out;
1985 
1986 	if (diff)
1987 		goto out;
1988 
1989 	t32 = le32_to_cpu(sii_e->sec_hdr.size);
1990 	if (t32 < SIZEOF_SECURITY_HDR) {
1991 		err = -EINVAL;
1992 		goto out;
1993 	}
1994 
1995 	if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
1996 		/* Looks like too big security. 0x10000 - is arbitrary big number. */
1997 		err = -EFBIG;
1998 		goto out;
1999 	}
2000 
2001 	*size = t32 - SIZEOF_SECURITY_HDR;
2002 
2003 	p = kmalloc(*size, GFP_NOFS);
2004 	if (!p) {
2005 		err = -ENOMEM;
2006 		goto out;
2007 	}
2008 
2009 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2010 			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2011 			       sizeof(d_security), NULL);
2012 	if (err)
2013 		goto out;
2014 
2015 	if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2016 		err = -EINVAL;
2017 		goto out;
2018 	}
2019 
2020 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2021 			       le64_to_cpu(sii_e->sec_hdr.off) +
2022 				       SIZEOF_SECURITY_HDR,
2023 			       p, *size, NULL);
2024 	if (err)
2025 		goto out;
2026 
2027 	*sd = p;
2028 	p = NULL;
2029 
2030 out:
2031 	kfree(p);
2032 	fnd_put(fnd_sii);
2033 	ni_unlock(ni);
2034 
2035 	return err;
2036 }
2037 
2038 /*
2039  * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2040  *
2041  * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2042  * and it contains a mirror copy of each security descriptor.  When writing
2043  * to a security descriptor at location X, another copy will be written at
2044  * location (X+256K).
2045  * When writing a security descriptor that will cross the 256K boundary,
2046  * the pointer will be advanced by 256K to skip
2047  * over the mirror portion.
2048  */
ntfs_insert_security(struct ntfs_sb_info * sbi,const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 size_sd,__le32 * security_id,bool * inserted)2049 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2050 			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2051 			 u32 size_sd, __le32 *security_id, bool *inserted)
2052 {
2053 	int err, diff;
2054 	struct ntfs_inode *ni = sbi->security.ni;
2055 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2056 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2057 	struct NTFS_DE_SDH *e;
2058 	struct NTFS_DE_SDH sdh_e;
2059 	struct NTFS_DE_SII sii_e;
2060 	struct SECURITY_HDR *d_security;
2061 	u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2062 	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2063 	struct SECURITY_KEY hash_key;
2064 	struct ntfs_fnd *fnd_sdh = NULL;
2065 	const struct INDEX_ROOT *root_sdh;
2066 	const struct INDEX_ROOT *root_sii;
2067 	u64 mirr_off, new_sds_size;
2068 	u32 next, left;
2069 
2070 	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2071 		      SecurityDescriptorsBlockSize);
2072 
2073 	hash_key.hash = security_hash(sd, size_sd);
2074 	hash_key.sec_id = SECURITY_ID_INVALID;
2075 
2076 	if (inserted)
2077 		*inserted = false;
2078 	*security_id = SECURITY_ID_INVALID;
2079 
2080 	/* Allocate a temporal buffer. */
2081 	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2082 	if (!d_security)
2083 		return -ENOMEM;
2084 
2085 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2086 
2087 	fnd_sdh = fnd_get();
2088 	if (!fnd_sdh) {
2089 		err = -ENOMEM;
2090 		goto out;
2091 	}
2092 
2093 	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2094 	if (!root_sdh) {
2095 		err = -EINVAL;
2096 		goto out;
2097 	}
2098 
2099 	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2100 	if (!root_sii) {
2101 		err = -EINVAL;
2102 		goto out;
2103 	}
2104 
2105 	/*
2106 	 * Check if such security already exists.
2107 	 * Use "SDH" and hash -> to get the offset in "SDS".
2108 	 */
2109 	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2110 			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2111 			fnd_sdh);
2112 	if (err)
2113 		goto out;
2114 
2115 	while (e) {
2116 		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2117 			err = ntfs_read_run_nb(sbi, &ni->file.run,
2118 					       le64_to_cpu(e->sec_hdr.off),
2119 					       d_security, new_sec_size, NULL);
2120 			if (err)
2121 				goto out;
2122 
2123 			if (le32_to_cpu(d_security->size) == new_sec_size &&
2124 			    d_security->key.hash == hash_key.hash &&
2125 			    !memcmp(d_security + 1, sd, size_sd)) {
2126 				*security_id = d_security->key.sec_id;
2127 				/* Such security already exists. */
2128 				err = 0;
2129 				goto out;
2130 			}
2131 		}
2132 
2133 		err = indx_find_sort(indx_sdh, ni, root_sdh,
2134 				     (struct NTFS_DE **)&e, fnd_sdh);
2135 		if (err)
2136 			goto out;
2137 
2138 		if (!e || e->key.hash != hash_key.hash)
2139 			break;
2140 	}
2141 
2142 	/* Zero unused space. */
2143 	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2144 	left = SecurityDescriptorsBlockSize - next;
2145 
2146 	/* Zero gap until SecurityDescriptorsBlockSize. */
2147 	if (left < new_sec_size) {
2148 		/* Zero "left" bytes from sbi->security.next_off. */
2149 		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2150 	}
2151 
2152 	/* Zero tail of previous security. */
2153 	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2154 
2155 	/*
2156 	 * Example:
2157 	 * 0x40438 == ni->vfs_inode.i_size
2158 	 * 0x00440 == sbi->security.next_off
2159 	 * need to zero [0x438-0x440)
2160 	 * if (next > used) {
2161 	 *  u32 tozero = next - used;
2162 	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2163 	 */
2164 
2165 	/* Format new security descriptor. */
2166 	d_security->key.hash = hash_key.hash;
2167 	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2168 	d_security->off = cpu_to_le64(sbi->security.next_off);
2169 	d_security->size = cpu_to_le32(new_sec_size);
2170 	memcpy(d_security + 1, sd, size_sd);
2171 
2172 	/* Write main SDS bucket. */
2173 	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2174 				d_security, aligned_sec_size, 0);
2175 
2176 	if (err)
2177 		goto out;
2178 
2179 	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2180 	new_sds_size = mirr_off + aligned_sec_size;
2181 
2182 	if (new_sds_size > ni->vfs_inode.i_size) {
2183 		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2184 				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2185 				    new_sds_size, &new_sds_size, false, NULL);
2186 		if (err)
2187 			goto out;
2188 	}
2189 
2190 	/* Write copy SDS bucket. */
2191 	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2192 				aligned_sec_size, 0);
2193 	if (err)
2194 		goto out;
2195 
2196 	/* Fill SII entry. */
2197 	sii_e.de.view.data_off =
2198 		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2199 	sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2200 	sii_e.de.view.res = 0;
2201 	sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2202 	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2203 	sii_e.de.flags = 0;
2204 	sii_e.de.res = 0;
2205 	sii_e.sec_id = d_security->key.sec_id;
2206 	memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2207 
2208 	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2209 	if (err)
2210 		goto out;
2211 
2212 	/* Fill SDH entry. */
2213 	sdh_e.de.view.data_off =
2214 		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2215 	sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2216 	sdh_e.de.view.res = 0;
2217 	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2218 	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2219 	sdh_e.de.flags = 0;
2220 	sdh_e.de.res = 0;
2221 	sdh_e.key.hash = d_security->key.hash;
2222 	sdh_e.key.sec_id = d_security->key.sec_id;
2223 	memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2224 	sdh_e.magic[0] = cpu_to_le16('I');
2225 	sdh_e.magic[1] = cpu_to_le16('I');
2226 
2227 	fnd_clear(fnd_sdh);
2228 	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2229 				fnd_sdh, 0);
2230 	if (err)
2231 		goto out;
2232 
2233 	*security_id = d_security->key.sec_id;
2234 	if (inserted)
2235 		*inserted = true;
2236 
2237 	/* Update Id and offset for next descriptor. */
2238 	sbi->security.next_id += 1;
2239 	sbi->security.next_off += aligned_sec_size;
2240 
2241 out:
2242 	fnd_put(fnd_sdh);
2243 	mark_inode_dirty(&ni->vfs_inode);
2244 	ni_unlock(ni);
2245 	kfree(d_security);
2246 
2247 	return err;
2248 }
2249 
2250 /*
2251  * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2252  */
ntfs_reparse_init(struct ntfs_sb_info * sbi)2253 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2254 {
2255 	int err;
2256 	struct ntfs_inode *ni = sbi->reparse.ni;
2257 	struct ntfs_index *indx = &sbi->reparse.index_r;
2258 	struct ATTRIB *attr;
2259 	struct ATTR_LIST_ENTRY *le;
2260 	const struct INDEX_ROOT *root_r;
2261 
2262 	if (!ni)
2263 		return 0;
2264 
2265 	le = NULL;
2266 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2267 			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2268 	if (!attr) {
2269 		err = -EINVAL;
2270 		goto out;
2271 	}
2272 
2273 	root_r = resident_data(attr);
2274 	if (root_r->type != ATTR_ZERO ||
2275 	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2276 		err = -EINVAL;
2277 		goto out;
2278 	}
2279 
2280 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2281 	if (err)
2282 		goto out;
2283 
2284 out:
2285 	return err;
2286 }
2287 
2288 /*
2289  * ntfs_objid_init - Load and parse $Extend/$ObjId.
2290  */
ntfs_objid_init(struct ntfs_sb_info * sbi)2291 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2292 {
2293 	int err;
2294 	struct ntfs_inode *ni = sbi->objid.ni;
2295 	struct ntfs_index *indx = &sbi->objid.index_o;
2296 	struct ATTRIB *attr;
2297 	struct ATTR_LIST_ENTRY *le;
2298 	const struct INDEX_ROOT *root;
2299 
2300 	if (!ni)
2301 		return 0;
2302 
2303 	le = NULL;
2304 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2305 			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2306 	if (!attr) {
2307 		err = -EINVAL;
2308 		goto out;
2309 	}
2310 
2311 	root = resident_data(attr);
2312 	if (root->type != ATTR_ZERO ||
2313 	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2314 		err = -EINVAL;
2315 		goto out;
2316 	}
2317 
2318 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2319 	if (err)
2320 		goto out;
2321 
2322 out:
2323 	return err;
2324 }
2325 
ntfs_objid_remove(struct ntfs_sb_info * sbi,struct GUID * guid)2326 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2327 {
2328 	int err;
2329 	struct ntfs_inode *ni = sbi->objid.ni;
2330 	struct ntfs_index *indx = &sbi->objid.index_o;
2331 
2332 	if (!ni)
2333 		return -EINVAL;
2334 
2335 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2336 
2337 	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2338 
2339 	mark_inode_dirty(&ni->vfs_inode);
2340 	ni_unlock(ni);
2341 
2342 	return err;
2343 }
2344 
ntfs_insert_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2345 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2346 			const struct MFT_REF *ref)
2347 {
2348 	int err;
2349 	struct ntfs_inode *ni = sbi->reparse.ni;
2350 	struct ntfs_index *indx = &sbi->reparse.index_r;
2351 	struct NTFS_DE_R re;
2352 
2353 	if (!ni)
2354 		return -EINVAL;
2355 
2356 	memset(&re, 0, sizeof(re));
2357 
2358 	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2359 	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2360 	re.de.key_size = cpu_to_le16(sizeof(re.key));
2361 
2362 	re.key.ReparseTag = rtag;
2363 	memcpy(&re.key.ref, ref, sizeof(*ref));
2364 
2365 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2366 
2367 	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2368 
2369 	mark_inode_dirty(&ni->vfs_inode);
2370 	ni_unlock(ni);
2371 
2372 	return err;
2373 }
2374 
ntfs_remove_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2375 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2376 			const struct MFT_REF *ref)
2377 {
2378 	int err, diff;
2379 	struct ntfs_inode *ni = sbi->reparse.ni;
2380 	struct ntfs_index *indx = &sbi->reparse.index_r;
2381 	struct ntfs_fnd *fnd = NULL;
2382 	struct REPARSE_KEY rkey;
2383 	struct NTFS_DE_R *re;
2384 	struct INDEX_ROOT *root_r;
2385 
2386 	if (!ni)
2387 		return -EINVAL;
2388 
2389 	rkey.ReparseTag = rtag;
2390 	rkey.ref = *ref;
2391 
2392 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2393 
2394 	if (rtag) {
2395 		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2396 		goto out1;
2397 	}
2398 
2399 	fnd = fnd_get();
2400 	if (!fnd) {
2401 		err = -ENOMEM;
2402 		goto out1;
2403 	}
2404 
2405 	root_r = indx_get_root(indx, ni, NULL, NULL);
2406 	if (!root_r) {
2407 		err = -EINVAL;
2408 		goto out;
2409 	}
2410 
2411 	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2412 	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2413 			(struct NTFS_DE **)&re, fnd);
2414 	if (err)
2415 		goto out;
2416 
2417 	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2418 		/* Impossible. Looks like volume corrupt? */
2419 		goto out;
2420 	}
2421 
2422 	memcpy(&rkey, &re->key, sizeof(rkey));
2423 
2424 	fnd_put(fnd);
2425 	fnd = NULL;
2426 
2427 	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2428 	if (err)
2429 		goto out;
2430 
2431 out:
2432 	fnd_put(fnd);
2433 
2434 out1:
2435 	mark_inode_dirty(&ni->vfs_inode);
2436 	ni_unlock(ni);
2437 
2438 	return err;
2439 }
2440 
ntfs_unmap_and_discard(struct ntfs_sb_info * sbi,CLST lcn,CLST len)2441 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2442 					  CLST len)
2443 {
2444 	ntfs_unmap_meta(sbi->sb, lcn, len);
2445 	ntfs_discard(sbi, lcn, len);
2446 }
2447 
mark_as_free_ex(struct ntfs_sb_info * sbi,CLST lcn,CLST len,bool trim)2448 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2449 {
2450 	CLST end, i, zone_len, zlen;
2451 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2452 	bool dirty = false;
2453 
2454 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2455 	if (!wnd_is_used(wnd, lcn, len)) {
2456 		/* mark volume as dirty out of wnd->rw_lock */
2457 		dirty = true;
2458 
2459 		end = lcn + len;
2460 		len = 0;
2461 		for (i = lcn; i < end; i++) {
2462 			if (wnd_is_used(wnd, i, 1)) {
2463 				if (!len)
2464 					lcn = i;
2465 				len += 1;
2466 				continue;
2467 			}
2468 
2469 			if (!len)
2470 				continue;
2471 
2472 			if (trim)
2473 				ntfs_unmap_and_discard(sbi, lcn, len);
2474 
2475 			wnd_set_free(wnd, lcn, len);
2476 			len = 0;
2477 		}
2478 
2479 		if (!len)
2480 			goto out;
2481 	}
2482 
2483 	if (trim)
2484 		ntfs_unmap_and_discard(sbi, lcn, len);
2485 	wnd_set_free(wnd, lcn, len);
2486 
2487 	/* append to MFT zone, if possible. */
2488 	zone_len = wnd_zone_len(wnd);
2489 	zlen = min(zone_len + len, sbi->zone_max);
2490 
2491 	if (zlen == zone_len) {
2492 		/* MFT zone already has maximum size. */
2493 	} else if (!zone_len) {
2494 		/* Create MFT zone only if 'zlen' is large enough. */
2495 		if (zlen == sbi->zone_max)
2496 			wnd_zone_set(wnd, lcn, zlen);
2497 	} else {
2498 		CLST zone_lcn = wnd_zone_bit(wnd);
2499 
2500 		if (lcn + len == zone_lcn) {
2501 			/* Append into head MFT zone. */
2502 			wnd_zone_set(wnd, lcn, zlen);
2503 		} else if (zone_lcn + zone_len == lcn) {
2504 			/* Append into tail MFT zone. */
2505 			wnd_zone_set(wnd, zone_lcn, zlen);
2506 		}
2507 	}
2508 
2509 out:
2510 	up_write(&wnd->rw_lock);
2511 	if (dirty)
2512 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2513 }
2514 
2515 /*
2516  * run_deallocate - Deallocate clusters.
2517  */
run_deallocate(struct ntfs_sb_info * sbi,struct runs_tree * run,bool trim)2518 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2519 {
2520 	CLST lcn, len;
2521 	size_t idx = 0;
2522 
2523 	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2524 		if (lcn == SPARSE_LCN)
2525 			continue;
2526 
2527 		mark_as_free_ex(sbi, lcn, len, trim);
2528 	}
2529 
2530 	return 0;
2531 }
2532