1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 */
7
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16
17 // clang-format off
18 const struct cpu_str NAME_MFT = {
19 4, 0, { '$', 'M', 'F', 'T' },
20 };
21 const struct cpu_str NAME_MIRROR = {
22 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
23 };
24 const struct cpu_str NAME_LOGFILE = {
25 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
26 };
27 const struct cpu_str NAME_VOLUME = {
28 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
29 };
30 const struct cpu_str NAME_ATTRDEF = {
31 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
32 };
33 const struct cpu_str NAME_ROOT = {
34 1, 0, { '.' },
35 };
36 const struct cpu_str NAME_BITMAP = {
37 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
38 };
39 const struct cpu_str NAME_BOOT = {
40 5, 0, { '$', 'B', 'o', 'o', 't' },
41 };
42 const struct cpu_str NAME_BADCLUS = {
43 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
44 };
45 const struct cpu_str NAME_QUOTA = {
46 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
47 };
48 const struct cpu_str NAME_SECURE = {
49 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
50 };
51 const struct cpu_str NAME_UPCASE = {
52 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
53 };
54 const struct cpu_str NAME_EXTEND = {
55 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
56 };
57 const struct cpu_str NAME_OBJID = {
58 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
59 };
60 const struct cpu_str NAME_REPARSE = {
61 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
62 };
63 const struct cpu_str NAME_USNJRNL = {
64 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
65 };
66 const __le16 BAD_NAME[4] = {
67 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
68 };
69 const __le16 I30_NAME[4] = {
70 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
71 };
72 const __le16 SII_NAME[4] = {
73 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
74 };
75 const __le16 SDH_NAME[4] = {
76 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
77 };
78 const __le16 SDS_NAME[4] = {
79 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
80 };
81 const __le16 SO_NAME[2] = {
82 cpu_to_le16('$'), cpu_to_le16('O'),
83 };
84 const __le16 SQ_NAME[2] = {
85 cpu_to_le16('$'), cpu_to_le16('Q'),
86 };
87 const __le16 SR_NAME[2] = {
88 cpu_to_le16('$'), cpu_to_le16('R'),
89 };
90
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
97 cpu_to_le16('a'),
98 };
99 #endif
100
101 // clang-format on
102
103 /*
104 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
105 */
ntfs_fix_pre_write(struct NTFS_RECORD_HEADER * rhdr,size_t bytes)106 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
107 {
108 u16 *fixup, *ptr;
109 u16 sample;
110 u16 fo = le16_to_cpu(rhdr->fix_off);
111 u16 fn = le16_to_cpu(rhdr->fix_num);
112
113 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
114 fn * SECTOR_SIZE > bytes) {
115 return false;
116 }
117
118 /* Get fixup pointer. */
119 fixup = Add2Ptr(rhdr, fo);
120
121 if (*fixup >= 0x7FFF)
122 *fixup = 1;
123 else
124 *fixup += 1;
125
126 sample = *fixup;
127
128 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
129
130 while (fn--) {
131 *++fixup = *ptr;
132 *ptr = sample;
133 ptr += SECTOR_SIZE / sizeof(short);
134 }
135 return true;
136 }
137
138 /*
139 * ntfs_fix_post_read - Remove fixups after reading from disk.
140 *
141 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
142 */
ntfs_fix_post_read(struct NTFS_RECORD_HEADER * rhdr,size_t bytes,bool simple)143 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
144 bool simple)
145 {
146 int ret;
147 u16 *fixup, *ptr;
148 u16 sample, fo, fn;
149
150 fo = le16_to_cpu(rhdr->fix_off);
151 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
152 : le16_to_cpu(rhdr->fix_num);
153
154 /* Check errors. */
155 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
156 fn * SECTOR_SIZE > bytes) {
157 return -E_NTFS_CORRUPT;
158 }
159
160 /* Get fixup pointer. */
161 fixup = Add2Ptr(rhdr, fo);
162 sample = *fixup;
163 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
164 ret = 0;
165
166 while (fn--) {
167 /* Test current word. */
168 if (*ptr != sample) {
169 /* Fixup does not match! Is it serious error? */
170 ret = -E_NTFS_FIXUP;
171 }
172
173 /* Replace fixup. */
174 *ptr = *++fixup;
175 ptr += SECTOR_SIZE / sizeof(short);
176 }
177
178 return ret;
179 }
180
181 /*
182 * ntfs_extend_init - Load $Extend file.
183 */
ntfs_extend_init(struct ntfs_sb_info * sbi)184 int ntfs_extend_init(struct ntfs_sb_info *sbi)
185 {
186 int err;
187 struct super_block *sb = sbi->sb;
188 struct inode *inode, *inode2;
189 struct MFT_REF ref;
190
191 if (sbi->volume.major_ver < 3) {
192 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
193 return 0;
194 }
195
196 ref.low = cpu_to_le32(MFT_REC_EXTEND);
197 ref.high = 0;
198 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
199 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
200 if (IS_ERR(inode)) {
201 err = PTR_ERR(inode);
202 ntfs_err(sb, "Failed to load $Extend.");
203 inode = NULL;
204 goto out;
205 }
206
207 /* If ntfs_iget5() reads from disk it never returns bad inode. */
208 if (!S_ISDIR(inode->i_mode)) {
209 err = -EINVAL;
210 goto out;
211 }
212
213 /* Try to find $ObjId */
214 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
215 if (inode2 && !IS_ERR(inode2)) {
216 if (is_bad_inode(inode2)) {
217 iput(inode2);
218 } else {
219 sbi->objid.ni = ntfs_i(inode2);
220 sbi->objid_no = inode2->i_ino;
221 }
222 }
223
224 /* Try to find $Quota */
225 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
226 if (inode2 && !IS_ERR(inode2)) {
227 sbi->quota_no = inode2->i_ino;
228 iput(inode2);
229 }
230
231 /* Try to find $Reparse */
232 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
233 if (inode2 && !IS_ERR(inode2)) {
234 sbi->reparse.ni = ntfs_i(inode2);
235 sbi->reparse_no = inode2->i_ino;
236 }
237
238 /* Try to find $UsnJrnl */
239 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
240 if (inode2 && !IS_ERR(inode2)) {
241 sbi->usn_jrnl_no = inode2->i_ino;
242 iput(inode2);
243 }
244
245 err = 0;
246 out:
247 iput(inode);
248 return err;
249 }
250
ntfs_loadlog_and_replay(struct ntfs_inode * ni,struct ntfs_sb_info * sbi)251 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
252 {
253 int err = 0;
254 struct super_block *sb = sbi->sb;
255 bool initialized = false;
256 struct MFT_REF ref;
257 struct inode *inode;
258
259 /* Check for 4GB. */
260 if (ni->vfs_inode.i_size >= 0x100000000ull) {
261 ntfs_err(sb, "\x24LogFile is too big");
262 err = -EINVAL;
263 goto out;
264 }
265
266 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
267
268 ref.low = cpu_to_le32(MFT_REC_MFT);
269 ref.high = 0;
270 ref.seq = cpu_to_le16(1);
271
272 inode = ntfs_iget5(sb, &ref, NULL);
273
274 if (IS_ERR(inode))
275 inode = NULL;
276
277 if (!inode) {
278 /* Try to use MFT copy. */
279 u64 t64 = sbi->mft.lbo;
280
281 sbi->mft.lbo = sbi->mft.lbo2;
282 inode = ntfs_iget5(sb, &ref, NULL);
283 sbi->mft.lbo = t64;
284 if (IS_ERR(inode))
285 inode = NULL;
286 }
287
288 if (!inode) {
289 err = -EINVAL;
290 ntfs_err(sb, "Failed to load $MFT.");
291 goto out;
292 }
293
294 sbi->mft.ni = ntfs_i(inode);
295
296 /* LogFile should not contains attribute list. */
297 err = ni_load_all_mi(sbi->mft.ni);
298 if (!err)
299 err = log_replay(ni, &initialized);
300
301 iput(inode);
302 sbi->mft.ni = NULL;
303
304 sync_blockdev(sb->s_bdev);
305 invalidate_bdev(sb->s_bdev);
306
307 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
308 err = 0;
309 goto out;
310 }
311
312 if (sb_rdonly(sb) || !initialized)
313 goto out;
314
315 /* Fill LogFile by '-1' if it is initialized. */
316 err = ntfs_bio_fill_1(sbi, &ni->file.run);
317
318 out:
319 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
320
321 return err;
322 }
323
324 /*
325 * ntfs_query_def
326 *
327 * Return: Current ATTR_DEF_ENTRY for given attribute type.
328 */
ntfs_query_def(struct ntfs_sb_info * sbi,enum ATTR_TYPE type)329 const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
330 enum ATTR_TYPE type)
331 {
332 int type_in = le32_to_cpu(type);
333 size_t min_idx = 0;
334 size_t max_idx = sbi->def_entries - 1;
335
336 while (min_idx <= max_idx) {
337 size_t i = min_idx + ((max_idx - min_idx) >> 1);
338 const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
339 int diff = le32_to_cpu(entry->type) - type_in;
340
341 if (!diff)
342 return entry;
343 if (diff < 0)
344 min_idx = i + 1;
345 else if (i)
346 max_idx = i - 1;
347 else
348 return NULL;
349 }
350 return NULL;
351 }
352
353 /*
354 * ntfs_look_for_free_space - Look for a free space in bitmap.
355 */
ntfs_look_for_free_space(struct ntfs_sb_info * sbi,CLST lcn,CLST len,CLST * new_lcn,CLST * new_len,enum ALLOCATE_OPT opt)356 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
357 CLST *new_lcn, CLST *new_len,
358 enum ALLOCATE_OPT opt)
359 {
360 int err;
361 CLST alen;
362 struct super_block *sb = sbi->sb;
363 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
364 struct wnd_bitmap *wnd = &sbi->used.bitmap;
365
366 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
367 if (opt & ALLOCATE_MFT) {
368 zlen = wnd_zone_len(wnd);
369
370 if (!zlen) {
371 err = ntfs_refresh_zone(sbi);
372 if (err)
373 goto up_write;
374
375 zlen = wnd_zone_len(wnd);
376 }
377
378 if (!zlen) {
379 ntfs_err(sbi->sb, "no free space to extend mft");
380 err = -ENOSPC;
381 goto up_write;
382 }
383
384 lcn = wnd_zone_bit(wnd);
385 alen = min_t(CLST, len, zlen);
386
387 wnd_zone_set(wnd, lcn + alen, zlen - alen);
388
389 err = wnd_set_used(wnd, lcn, alen);
390 if (err)
391 goto up_write;
392
393 alcn = lcn;
394 goto space_found;
395 }
396 /*
397 * 'Cause cluster 0 is always used this value means that we should use
398 * cached value of 'next_free_lcn' to improve performance.
399 */
400 if (!lcn)
401 lcn = sbi->used.next_free_lcn;
402
403 if (lcn >= wnd->nbits)
404 lcn = 0;
405
406 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
407 if (alen)
408 goto space_found;
409
410 /* Try to use clusters from MftZone. */
411 zlen = wnd_zone_len(wnd);
412 zeroes = wnd_zeroes(wnd);
413
414 /* Check too big request */
415 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
416 err = -ENOSPC;
417 goto up_write;
418 }
419
420 /* How many clusters to cat from zone. */
421 zlcn = wnd_zone_bit(wnd);
422 zlen2 = zlen >> 1;
423 ztrim = clamp_val(len, zlen2, zlen);
424 new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
425
426 wnd_zone_set(wnd, zlcn, new_zlen);
427
428 /* Allocate continues clusters. */
429 alen = wnd_find(wnd, len, 0,
430 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
431 if (!alen) {
432 err = -ENOSPC;
433 goto up_write;
434 }
435
436 space_found:
437 err = 0;
438 *new_len = alen;
439 *new_lcn = alcn;
440
441 ntfs_unmap_meta(sb, alcn, alen);
442
443 /* Set hint for next requests. */
444 if (!(opt & ALLOCATE_MFT))
445 sbi->used.next_free_lcn = alcn + alen;
446 up_write:
447 up_write(&wnd->rw_lock);
448 return err;
449 }
450
451 /*
452 * ntfs_extend_mft - Allocate additional MFT records.
453 *
454 * sbi->mft.bitmap is locked for write.
455 *
456 * NOTE: recursive:
457 * ntfs_look_free_mft ->
458 * ntfs_extend_mft ->
459 * attr_set_size ->
460 * ni_insert_nonresident ->
461 * ni_insert_attr ->
462 * ni_ins_attr_ext ->
463 * ntfs_look_free_mft ->
464 * ntfs_extend_mft
465 *
466 * To avoid recursive always allocate space for two new MFT records
467 * see attrib.c: "at least two MFT to avoid recursive loop".
468 */
ntfs_extend_mft(struct ntfs_sb_info * sbi)469 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
470 {
471 int err;
472 struct ntfs_inode *ni = sbi->mft.ni;
473 size_t new_mft_total;
474 u64 new_mft_bytes, new_bitmap_bytes;
475 struct ATTRIB *attr;
476 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
477
478 new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
479 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
480
481 /* Step 1: Resize $MFT::DATA. */
482 down_write(&ni->file.run_lock);
483 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
484 new_mft_bytes, NULL, false, &attr);
485
486 if (err) {
487 up_write(&ni->file.run_lock);
488 goto out;
489 }
490
491 attr->nres.valid_size = attr->nres.data_size;
492 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
493 ni->mi.dirty = true;
494
495 /* Step 2: Resize $MFT::BITMAP. */
496 new_bitmap_bytes = bitmap_size(new_mft_total);
497
498 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
499 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
500
501 /* Refresh MFT Zone if necessary. */
502 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
503
504 ntfs_refresh_zone(sbi);
505
506 up_write(&sbi->used.bitmap.rw_lock);
507 up_write(&ni->file.run_lock);
508
509 if (err)
510 goto out;
511
512 err = wnd_extend(wnd, new_mft_total);
513
514 if (err)
515 goto out;
516
517 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
518
519 err = _ni_write_inode(&ni->vfs_inode, 0);
520 out:
521 return err;
522 }
523
524 /*
525 * ntfs_look_free_mft - Look for a free MFT record.
526 */
ntfs_look_free_mft(struct ntfs_sb_info * sbi,CLST * rno,bool mft,struct ntfs_inode * ni,struct mft_inode ** mi)527 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
528 struct ntfs_inode *ni, struct mft_inode **mi)
529 {
530 int err = 0;
531 size_t zbit, zlen, from, to, fr;
532 size_t mft_total;
533 struct MFT_REF ref;
534 struct super_block *sb = sbi->sb;
535 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
536 u32 ir;
537
538 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
539 MFT_REC_FREE - MFT_REC_RESERVED);
540
541 if (!mft)
542 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
543
544 zlen = wnd_zone_len(wnd);
545
546 /* Always reserve space for MFT. */
547 if (zlen) {
548 if (mft) {
549 zbit = wnd_zone_bit(wnd);
550 *rno = zbit;
551 wnd_zone_set(wnd, zbit + 1, zlen - 1);
552 }
553 goto found;
554 }
555
556 /* No MFT zone. Find the nearest to '0' free MFT. */
557 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
558 /* Resize MFT */
559 mft_total = wnd->nbits;
560
561 err = ntfs_extend_mft(sbi);
562 if (!err) {
563 zbit = mft_total;
564 goto reserve_mft;
565 }
566
567 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
568 goto out;
569
570 err = 0;
571
572 /*
573 * Look for free record reserved area [11-16) ==
574 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
575 * marks it as used.
576 */
577 if (!sbi->mft.reserved_bitmap) {
578 /* Once per session create internal bitmap for 5 bits. */
579 sbi->mft.reserved_bitmap = 0xFF;
580
581 ref.high = 0;
582 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
583 struct inode *i;
584 struct ntfs_inode *ni;
585 struct MFT_REC *mrec;
586
587 ref.low = cpu_to_le32(ir);
588 ref.seq = cpu_to_le16(ir);
589
590 i = ntfs_iget5(sb, &ref, NULL);
591 if (IS_ERR(i)) {
592 next:
593 ntfs_notice(
594 sb,
595 "Invalid reserved record %x",
596 ref.low);
597 continue;
598 }
599 if (is_bad_inode(i)) {
600 iput(i);
601 goto next;
602 }
603
604 ni = ntfs_i(i);
605
606 mrec = ni->mi.mrec;
607
608 if (!is_rec_base(mrec))
609 goto next;
610
611 if (mrec->hard_links)
612 goto next;
613
614 if (!ni_std(ni))
615 goto next;
616
617 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
618 NULL, 0, NULL, NULL))
619 goto next;
620
621 __clear_bit(ir - MFT_REC_RESERVED,
622 &sbi->mft.reserved_bitmap);
623 }
624 }
625
626 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
627 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
628 MFT_REC_FREE, MFT_REC_RESERVED);
629 if (zbit >= MFT_REC_FREE) {
630 sbi->mft.next_reserved = MFT_REC_FREE;
631 goto out;
632 }
633
634 zlen = 1;
635 sbi->mft.next_reserved = zbit;
636 } else {
637 reserve_mft:
638 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
639 if (zbit + zlen > wnd->nbits)
640 zlen = wnd->nbits - zbit;
641
642 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
643 zlen -= 1;
644
645 /* [zbit, zbit + zlen) will be used for MFT itself. */
646 from = sbi->mft.used;
647 if (from < zbit)
648 from = zbit;
649 to = zbit + zlen;
650 if (from < to) {
651 ntfs_clear_mft_tail(sbi, from, to);
652 sbi->mft.used = to;
653 }
654 }
655
656 if (mft) {
657 *rno = zbit;
658 zbit += 1;
659 zlen -= 1;
660 }
661
662 wnd_zone_set(wnd, zbit, zlen);
663
664 found:
665 if (!mft) {
666 /* The request to get record for general purpose. */
667 if (sbi->mft.next_free < MFT_REC_USER)
668 sbi->mft.next_free = MFT_REC_USER;
669
670 for (;;) {
671 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
672 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
673 sbi->mft.next_free = sbi->mft.bitmap.nbits;
674 } else {
675 *rno = fr;
676 sbi->mft.next_free = *rno + 1;
677 break;
678 }
679
680 err = ntfs_extend_mft(sbi);
681 if (err)
682 goto out;
683 }
684 }
685
686 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
687 err = -ENOMEM;
688 goto out;
689 }
690
691 /* We have found a record that are not reserved for next MFT. */
692 if (*rno >= MFT_REC_FREE)
693 wnd_set_used(wnd, *rno, 1);
694 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
695 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
696
697 out:
698 if (!mft)
699 up_write(&wnd->rw_lock);
700
701 return err;
702 }
703
704 /*
705 * ntfs_mark_rec_free - Mark record as free.
706 */
ntfs_mark_rec_free(struct ntfs_sb_info * sbi,CLST rno)707 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
708 {
709 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
710
711 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
712 if (rno >= wnd->nbits)
713 goto out;
714
715 if (rno >= MFT_REC_FREE) {
716 if (!wnd_is_used(wnd, rno, 1))
717 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
718 else
719 wnd_set_free(wnd, rno, 1);
720 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
721 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
722 }
723
724 if (rno < wnd_zone_bit(wnd))
725 wnd_zone_set(wnd, rno, 1);
726 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
727 sbi->mft.next_free = rno;
728
729 out:
730 up_write(&wnd->rw_lock);
731 }
732
733 /*
734 * ntfs_clear_mft_tail - Format empty records [from, to).
735 *
736 * sbi->mft.bitmap is locked for write.
737 */
ntfs_clear_mft_tail(struct ntfs_sb_info * sbi,size_t from,size_t to)738 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
739 {
740 int err;
741 u32 rs;
742 u64 vbo;
743 struct runs_tree *run;
744 struct ntfs_inode *ni;
745
746 if (from >= to)
747 return 0;
748
749 rs = sbi->record_size;
750 ni = sbi->mft.ni;
751 run = &ni->file.run;
752
753 down_read(&ni->file.run_lock);
754 vbo = (u64)from * rs;
755 for (; from < to; from++, vbo += rs) {
756 struct ntfs_buffers nb;
757
758 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
759 if (err)
760 goto out;
761
762 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
763 nb_put(&nb);
764 if (err)
765 goto out;
766 }
767
768 out:
769 sbi->mft.used = from;
770 up_read(&ni->file.run_lock);
771 return err;
772 }
773
774 /*
775 * ntfs_refresh_zone - Refresh MFT zone.
776 *
777 * sbi->used.bitmap is locked for rw.
778 * sbi->mft.bitmap is locked for write.
779 * sbi->mft.ni->file.run_lock for write.
780 */
ntfs_refresh_zone(struct ntfs_sb_info * sbi)781 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
782 {
783 CLST zone_limit, zone_max, lcn, vcn, len;
784 size_t lcn_s, zlen;
785 struct wnd_bitmap *wnd = &sbi->used.bitmap;
786 struct ntfs_inode *ni = sbi->mft.ni;
787
788 /* Do not change anything unless we have non empty MFT zone. */
789 if (wnd_zone_len(wnd))
790 return 0;
791
792 /*
793 * Compute the MFT zone at two steps.
794 * It would be nice if we are able to allocate 1/8 of
795 * total clusters for MFT but not more then 512 MB.
796 */
797 zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
798 zone_max = wnd->nbits >> 3;
799 if (zone_max > zone_limit)
800 zone_max = zone_limit;
801
802 vcn = bytes_to_cluster(sbi,
803 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
804
805 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
806 lcn = SPARSE_LCN;
807
808 /* We should always find Last Lcn for MFT. */
809 if (lcn == SPARSE_LCN)
810 return -EINVAL;
811
812 lcn_s = lcn + 1;
813
814 /* Try to allocate clusters after last MFT run. */
815 zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
816 if (!zlen) {
817 ntfs_notice(sbi->sb, "MftZone: unavailable");
818 return 0;
819 }
820
821 /* Truncate too large zone. */
822 wnd_zone_set(wnd, lcn_s, zlen);
823
824 return 0;
825 }
826
827 /*
828 * ntfs_update_mftmirr - Update $MFTMirr data.
829 */
ntfs_update_mftmirr(struct ntfs_sb_info * sbi,int wait)830 int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
831 {
832 int err;
833 struct super_block *sb = sbi->sb;
834 u32 blocksize;
835 sector_t block1, block2;
836 u32 bytes;
837
838 if (!sb)
839 return -EINVAL;
840
841 blocksize = sb->s_blocksize;
842
843 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
844 return 0;
845
846 err = 0;
847 bytes = sbi->mft.recs_mirr << sbi->record_bits;
848 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
849 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
850
851 for (; bytes >= blocksize; bytes -= blocksize) {
852 struct buffer_head *bh1, *bh2;
853
854 bh1 = sb_bread(sb, block1++);
855 if (!bh1) {
856 err = -EIO;
857 goto out;
858 }
859
860 bh2 = sb_getblk(sb, block2++);
861 if (!bh2) {
862 put_bh(bh1);
863 err = -EIO;
864 goto out;
865 }
866
867 if (buffer_locked(bh2))
868 __wait_on_buffer(bh2);
869
870 lock_buffer(bh2);
871 memcpy(bh2->b_data, bh1->b_data, blocksize);
872 set_buffer_uptodate(bh2);
873 mark_buffer_dirty(bh2);
874 unlock_buffer(bh2);
875
876 put_bh(bh1);
877 bh1 = NULL;
878
879 if (wait)
880 err = sync_dirty_buffer(bh2);
881
882 put_bh(bh2);
883 if (err)
884 goto out;
885 }
886
887 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
888
889 out:
890 return err;
891 }
892
893 /*
894 * ntfs_set_state
895 *
896 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
897 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
898 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
899 */
ntfs_set_state(struct ntfs_sb_info * sbi,enum NTFS_DIRTY_FLAGS dirty)900 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
901 {
902 int err;
903 struct ATTRIB *attr;
904 struct VOLUME_INFO *info;
905 struct mft_inode *mi;
906 struct ntfs_inode *ni;
907
908 /*
909 * Do not change state if fs was real_dirty.
910 * Do not change state if fs already dirty(clear).
911 * Do not change any thing if mounted read only.
912 */
913 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
914 return 0;
915
916 /* Check cached value. */
917 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
918 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
919 return 0;
920
921 ni = sbi->volume.ni;
922 if (!ni)
923 return -EINVAL;
924
925 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
926
927 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
928 if (!attr) {
929 err = -EINVAL;
930 goto out;
931 }
932
933 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
934 if (!info) {
935 err = -EINVAL;
936 goto out;
937 }
938
939 switch (dirty) {
940 case NTFS_DIRTY_ERROR:
941 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
942 sbi->volume.real_dirty = true;
943 fallthrough;
944 case NTFS_DIRTY_DIRTY:
945 info->flags |= VOLUME_FLAG_DIRTY;
946 break;
947 case NTFS_DIRTY_CLEAR:
948 info->flags &= ~VOLUME_FLAG_DIRTY;
949 break;
950 }
951 /* Cache current volume flags. */
952 sbi->volume.flags = info->flags;
953 mi->dirty = true;
954 err = 0;
955
956 out:
957 ni_unlock(ni);
958 if (err)
959 return err;
960
961 mark_inode_dirty_sync(&ni->vfs_inode);
962 /* verify(!ntfs_update_mftmirr()); */
963
964 /* write mft record on disk. */
965 err = _ni_write_inode(&ni->vfs_inode, 1);
966
967 return err;
968 }
969
970 /*
971 * security_hash - Calculates a hash of security descriptor.
972 */
security_hash(const void * sd,size_t bytes)973 static inline __le32 security_hash(const void *sd, size_t bytes)
974 {
975 u32 hash = 0;
976 const __le32 *ptr = sd;
977
978 bytes >>= 2;
979 while (bytes--)
980 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
981 return cpu_to_le32(hash);
982 }
983
984 /*
985 * simple wrapper for sb_bread_unmovable.
986 */
ntfs_bread(struct super_block * sb,sector_t block)987 struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
988 {
989 struct ntfs_sb_info *sbi = sb->s_fs_info;
990 struct buffer_head *bh;
991
992 if (unlikely(block >= sbi->volume.blocks)) {
993 /* prevent generic message "attempt to access beyond end of device" */
994 ntfs_err(sb, "try to read out of volume at offset 0x%llx",
995 (u64)block << sb->s_blocksize_bits);
996 return NULL;
997 }
998
999 bh = sb_bread_unmovable(sb, block);
1000 if (bh)
1001 return bh;
1002
1003 ntfs_err(sb, "failed to read volume at offset 0x%llx",
1004 (u64)block << sb->s_blocksize_bits);
1005 return NULL;
1006 }
1007
ntfs_sb_read(struct super_block * sb,u64 lbo,size_t bytes,void * buffer)1008 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1009 {
1010 struct block_device *bdev = sb->s_bdev;
1011 u32 blocksize = sb->s_blocksize;
1012 u64 block = lbo >> sb->s_blocksize_bits;
1013 u32 off = lbo & (blocksize - 1);
1014 u32 op = blocksize - off;
1015
1016 for (; bytes; block += 1, off = 0, op = blocksize) {
1017 struct buffer_head *bh = __bread(bdev, block, blocksize);
1018
1019 if (!bh)
1020 return -EIO;
1021
1022 if (op > bytes)
1023 op = bytes;
1024
1025 memcpy(buffer, bh->b_data + off, op);
1026
1027 put_bh(bh);
1028
1029 bytes -= op;
1030 buffer = Add2Ptr(buffer, op);
1031 }
1032
1033 return 0;
1034 }
1035
ntfs_sb_write(struct super_block * sb,u64 lbo,size_t bytes,const void * buf,int wait)1036 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1037 const void *buf, int wait)
1038 {
1039 u32 blocksize = sb->s_blocksize;
1040 struct block_device *bdev = sb->s_bdev;
1041 sector_t block = lbo >> sb->s_blocksize_bits;
1042 u32 off = lbo & (blocksize - 1);
1043 u32 op = blocksize - off;
1044 struct buffer_head *bh;
1045
1046 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1047 wait = 1;
1048
1049 for (; bytes; block += 1, off = 0, op = blocksize) {
1050 if (op > bytes)
1051 op = bytes;
1052
1053 if (op < blocksize) {
1054 bh = __bread(bdev, block, blocksize);
1055 if (!bh) {
1056 ntfs_err(sb, "failed to read block %llx",
1057 (u64)block);
1058 return -EIO;
1059 }
1060 } else {
1061 bh = __getblk(bdev, block, blocksize);
1062 if (!bh)
1063 return -ENOMEM;
1064 }
1065
1066 if (buffer_locked(bh))
1067 __wait_on_buffer(bh);
1068
1069 lock_buffer(bh);
1070 if (buf) {
1071 memcpy(bh->b_data + off, buf, op);
1072 buf = Add2Ptr(buf, op);
1073 } else {
1074 memset(bh->b_data + off, -1, op);
1075 }
1076
1077 set_buffer_uptodate(bh);
1078 mark_buffer_dirty(bh);
1079 unlock_buffer(bh);
1080
1081 if (wait) {
1082 int err = sync_dirty_buffer(bh);
1083
1084 if (err) {
1085 ntfs_err(
1086 sb,
1087 "failed to sync buffer at block %llx, error %d",
1088 (u64)block, err);
1089 put_bh(bh);
1090 return err;
1091 }
1092 }
1093
1094 put_bh(bh);
1095
1096 bytes -= op;
1097 }
1098 return 0;
1099 }
1100
ntfs_sb_write_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,const void * buf,size_t bytes,int sync)1101 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1102 u64 vbo, const void *buf, size_t bytes, int sync)
1103 {
1104 struct super_block *sb = sbi->sb;
1105 u8 cluster_bits = sbi->cluster_bits;
1106 u32 off = vbo & sbi->cluster_mask;
1107 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1108 u64 lbo, len;
1109 size_t idx;
1110
1111 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1112 return -ENOENT;
1113
1114 if (lcn == SPARSE_LCN)
1115 return -EINVAL;
1116
1117 lbo = ((u64)lcn << cluster_bits) + off;
1118 len = ((u64)clen << cluster_bits) - off;
1119
1120 for (;;) {
1121 u32 op = min_t(u64, len, bytes);
1122 int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1123
1124 if (err)
1125 return err;
1126
1127 bytes -= op;
1128 if (!bytes)
1129 break;
1130
1131 vcn_next = vcn + clen;
1132 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1133 vcn != vcn_next)
1134 return -ENOENT;
1135
1136 if (lcn == SPARSE_LCN)
1137 return -EINVAL;
1138
1139 if (buf)
1140 buf = Add2Ptr(buf, op);
1141
1142 lbo = ((u64)lcn << cluster_bits);
1143 len = ((u64)clen << cluster_bits);
1144 }
1145
1146 return 0;
1147 }
1148
ntfs_bread_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo)1149 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1150 const struct runs_tree *run, u64 vbo)
1151 {
1152 struct super_block *sb = sbi->sb;
1153 u8 cluster_bits = sbi->cluster_bits;
1154 CLST lcn;
1155 u64 lbo;
1156
1157 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1158 return ERR_PTR(-ENOENT);
1159
1160 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1161
1162 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1163 }
1164
ntfs_read_run_nb(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,void * buf,u32 bytes,struct ntfs_buffers * nb)1165 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1166 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1167 {
1168 int err;
1169 struct super_block *sb = sbi->sb;
1170 u32 blocksize = sb->s_blocksize;
1171 u8 cluster_bits = sbi->cluster_bits;
1172 u32 off = vbo & sbi->cluster_mask;
1173 u32 nbh = 0;
1174 CLST vcn_next, vcn = vbo >> cluster_bits;
1175 CLST lcn, clen;
1176 u64 lbo, len;
1177 size_t idx;
1178 struct buffer_head *bh;
1179
1180 if (!run) {
1181 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1182 if (vbo > MFT_REC_VOL * sbi->record_size) {
1183 err = -ENOENT;
1184 goto out;
1185 }
1186
1187 /* Use absolute boot's 'MFTCluster' to read record. */
1188 lbo = vbo + sbi->mft.lbo;
1189 len = sbi->record_size;
1190 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1191 err = -ENOENT;
1192 goto out;
1193 } else {
1194 if (lcn == SPARSE_LCN) {
1195 err = -EINVAL;
1196 goto out;
1197 }
1198
1199 lbo = ((u64)lcn << cluster_bits) + off;
1200 len = ((u64)clen << cluster_bits) - off;
1201 }
1202
1203 off = lbo & (blocksize - 1);
1204 if (nb) {
1205 nb->off = off;
1206 nb->bytes = bytes;
1207 }
1208
1209 for (;;) {
1210 u32 len32 = len >= bytes ? bytes : len;
1211 sector_t block = lbo >> sb->s_blocksize_bits;
1212
1213 do {
1214 u32 op = blocksize - off;
1215
1216 if (op > len32)
1217 op = len32;
1218
1219 bh = ntfs_bread(sb, block);
1220 if (!bh) {
1221 err = -EIO;
1222 goto out;
1223 }
1224
1225 if (buf) {
1226 memcpy(buf, bh->b_data + off, op);
1227 buf = Add2Ptr(buf, op);
1228 }
1229
1230 if (!nb) {
1231 put_bh(bh);
1232 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1233 err = -EINVAL;
1234 goto out;
1235 } else {
1236 nb->bh[nbh++] = bh;
1237 nb->nbufs = nbh;
1238 }
1239
1240 bytes -= op;
1241 if (!bytes)
1242 return 0;
1243 len32 -= op;
1244 block += 1;
1245 off = 0;
1246
1247 } while (len32);
1248
1249 vcn_next = vcn + clen;
1250 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1251 vcn != vcn_next) {
1252 err = -ENOENT;
1253 goto out;
1254 }
1255
1256 if (lcn == SPARSE_LCN) {
1257 err = -EINVAL;
1258 goto out;
1259 }
1260
1261 lbo = ((u64)lcn << cluster_bits);
1262 len = ((u64)clen << cluster_bits);
1263 }
1264
1265 out:
1266 if (!nbh)
1267 return err;
1268
1269 while (nbh) {
1270 put_bh(nb->bh[--nbh]);
1271 nb->bh[nbh] = NULL;
1272 }
1273
1274 nb->nbufs = 0;
1275 return err;
1276 }
1277
1278 /*
1279 * ntfs_read_bh
1280 *
1281 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1282 */
ntfs_read_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,struct NTFS_RECORD_HEADER * rhdr,u32 bytes,struct ntfs_buffers * nb)1283 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1284 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1285 struct ntfs_buffers *nb)
1286 {
1287 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1288
1289 if (err)
1290 return err;
1291 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1292 }
1293
ntfs_get_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u32 bytes,struct ntfs_buffers * nb)1294 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1295 u32 bytes, struct ntfs_buffers *nb)
1296 {
1297 int err = 0;
1298 struct super_block *sb = sbi->sb;
1299 u32 blocksize = sb->s_blocksize;
1300 u8 cluster_bits = sbi->cluster_bits;
1301 CLST vcn_next, vcn = vbo >> cluster_bits;
1302 u32 off;
1303 u32 nbh = 0;
1304 CLST lcn, clen;
1305 u64 lbo, len;
1306 size_t idx;
1307
1308 nb->bytes = bytes;
1309
1310 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1311 err = -ENOENT;
1312 goto out;
1313 }
1314
1315 off = vbo & sbi->cluster_mask;
1316 lbo = ((u64)lcn << cluster_bits) + off;
1317 len = ((u64)clen << cluster_bits) - off;
1318
1319 nb->off = off = lbo & (blocksize - 1);
1320
1321 for (;;) {
1322 u32 len32 = min_t(u64, len, bytes);
1323 sector_t block = lbo >> sb->s_blocksize_bits;
1324
1325 do {
1326 u32 op;
1327 struct buffer_head *bh;
1328
1329 if (nbh >= ARRAY_SIZE(nb->bh)) {
1330 err = -EINVAL;
1331 goto out;
1332 }
1333
1334 op = blocksize - off;
1335 if (op > len32)
1336 op = len32;
1337
1338 if (op == blocksize) {
1339 bh = sb_getblk(sb, block);
1340 if (!bh) {
1341 err = -ENOMEM;
1342 goto out;
1343 }
1344 if (buffer_locked(bh))
1345 __wait_on_buffer(bh);
1346 set_buffer_uptodate(bh);
1347 } else {
1348 bh = ntfs_bread(sb, block);
1349 if (!bh) {
1350 err = -EIO;
1351 goto out;
1352 }
1353 }
1354
1355 nb->bh[nbh++] = bh;
1356 bytes -= op;
1357 if (!bytes) {
1358 nb->nbufs = nbh;
1359 return 0;
1360 }
1361
1362 block += 1;
1363 len32 -= op;
1364 off = 0;
1365 } while (len32);
1366
1367 vcn_next = vcn + clen;
1368 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1369 vcn != vcn_next) {
1370 err = -ENOENT;
1371 goto out;
1372 }
1373
1374 lbo = ((u64)lcn << cluster_bits);
1375 len = ((u64)clen << cluster_bits);
1376 }
1377
1378 out:
1379 while (nbh) {
1380 put_bh(nb->bh[--nbh]);
1381 nb->bh[nbh] = NULL;
1382 }
1383
1384 nb->nbufs = 0;
1385
1386 return err;
1387 }
1388
ntfs_write_bh(struct ntfs_sb_info * sbi,struct NTFS_RECORD_HEADER * rhdr,struct ntfs_buffers * nb,int sync)1389 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1390 struct ntfs_buffers *nb, int sync)
1391 {
1392 int err = 0;
1393 struct super_block *sb = sbi->sb;
1394 u32 block_size = sb->s_blocksize;
1395 u32 bytes = nb->bytes;
1396 u32 off = nb->off;
1397 u16 fo = le16_to_cpu(rhdr->fix_off);
1398 u16 fn = le16_to_cpu(rhdr->fix_num);
1399 u32 idx;
1400 __le16 *fixup;
1401 __le16 sample;
1402
1403 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1404 fn * SECTOR_SIZE > bytes) {
1405 return -EINVAL;
1406 }
1407
1408 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1409 u32 op = block_size - off;
1410 char *bh_data;
1411 struct buffer_head *bh = nb->bh[idx];
1412 __le16 *ptr, *end_data;
1413
1414 if (op > bytes)
1415 op = bytes;
1416
1417 if (buffer_locked(bh))
1418 __wait_on_buffer(bh);
1419
1420 lock_buffer(nb->bh[idx]);
1421
1422 bh_data = bh->b_data + off;
1423 end_data = Add2Ptr(bh_data, op);
1424 memcpy(bh_data, rhdr, op);
1425
1426 if (!idx) {
1427 u16 t16;
1428
1429 fixup = Add2Ptr(bh_data, fo);
1430 sample = *fixup;
1431 t16 = le16_to_cpu(sample);
1432 if (t16 >= 0x7FFF) {
1433 sample = *fixup = cpu_to_le16(1);
1434 } else {
1435 sample = cpu_to_le16(t16 + 1);
1436 *fixup = sample;
1437 }
1438
1439 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1440 }
1441
1442 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1443
1444 do {
1445 *++fixup = *ptr;
1446 *ptr = sample;
1447 ptr += SECTOR_SIZE / sizeof(short);
1448 } while (ptr < end_data);
1449
1450 set_buffer_uptodate(bh);
1451 mark_buffer_dirty(bh);
1452 unlock_buffer(bh);
1453
1454 if (sync) {
1455 int err2 = sync_dirty_buffer(bh);
1456
1457 if (!err && err2)
1458 err = err2;
1459 }
1460
1461 bytes -= op;
1462 rhdr = Add2Ptr(rhdr, op);
1463 }
1464
1465 return err;
1466 }
1467
ntfs_alloc_bio(u32 nr_vecs)1468 static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
1469 {
1470 struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1471
1472 if (!bio && (current->flags & PF_MEMALLOC)) {
1473 while (!bio && (nr_vecs /= 2))
1474 bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1475 }
1476 return bio;
1477 }
1478
1479 /*
1480 * ntfs_bio_pages - Read/write pages from/to disk.
1481 */
ntfs_bio_pages(struct ntfs_sb_info * sbi,const struct runs_tree * run,struct page ** pages,u32 nr_pages,u64 vbo,u32 bytes,u32 op)1482 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1483 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1484 u32 op)
1485 {
1486 int err = 0;
1487 struct bio *new, *bio = NULL;
1488 struct super_block *sb = sbi->sb;
1489 struct block_device *bdev = sb->s_bdev;
1490 struct page *page;
1491 u8 cluster_bits = sbi->cluster_bits;
1492 CLST lcn, clen, vcn, vcn_next;
1493 u32 add, off, page_idx;
1494 u64 lbo, len;
1495 size_t run_idx;
1496 struct blk_plug plug;
1497
1498 if (!bytes)
1499 return 0;
1500
1501 blk_start_plug(&plug);
1502
1503 /* Align vbo and bytes to be 512 bytes aligned. */
1504 lbo = (vbo + bytes + 511) & ~511ull;
1505 vbo = vbo & ~511ull;
1506 bytes = lbo - vbo;
1507
1508 vcn = vbo >> cluster_bits;
1509 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1510 err = -ENOENT;
1511 goto out;
1512 }
1513 off = vbo & sbi->cluster_mask;
1514 page_idx = 0;
1515 page = pages[0];
1516
1517 for (;;) {
1518 lbo = ((u64)lcn << cluster_bits) + off;
1519 len = ((u64)clen << cluster_bits) - off;
1520 new_bio:
1521 new = ntfs_alloc_bio(nr_pages - page_idx);
1522 if (!new) {
1523 err = -ENOMEM;
1524 goto out;
1525 }
1526 if (bio) {
1527 bio_chain(bio, new);
1528 submit_bio(bio);
1529 }
1530 bio = new;
1531 bio_set_dev(bio, bdev);
1532 bio->bi_iter.bi_sector = lbo >> 9;
1533 bio->bi_opf = op;
1534
1535 while (len) {
1536 off = vbo & (PAGE_SIZE - 1);
1537 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1538
1539 if (bio_add_page(bio, page, add, off) < add)
1540 goto new_bio;
1541
1542 if (bytes <= add)
1543 goto out;
1544 bytes -= add;
1545 vbo += add;
1546
1547 if (add + off == PAGE_SIZE) {
1548 page_idx += 1;
1549 if (WARN_ON(page_idx >= nr_pages)) {
1550 err = -EINVAL;
1551 goto out;
1552 }
1553 page = pages[page_idx];
1554 }
1555
1556 if (len <= add)
1557 break;
1558 len -= add;
1559 lbo += add;
1560 }
1561
1562 vcn_next = vcn + clen;
1563 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1564 vcn != vcn_next) {
1565 err = -ENOENT;
1566 goto out;
1567 }
1568 off = 0;
1569 }
1570 out:
1571 if (bio) {
1572 if (!err)
1573 err = submit_bio_wait(bio);
1574 bio_put(bio);
1575 }
1576 blk_finish_plug(&plug);
1577
1578 return err;
1579 }
1580
1581 /*
1582 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1583 *
1584 * Fill on-disk logfile range by (-1)
1585 * this means empty logfile.
1586 */
ntfs_bio_fill_1(struct ntfs_sb_info * sbi,const struct runs_tree * run)1587 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1588 {
1589 int err = 0;
1590 struct super_block *sb = sbi->sb;
1591 struct block_device *bdev = sb->s_bdev;
1592 u8 cluster_bits = sbi->cluster_bits;
1593 struct bio *new, *bio = NULL;
1594 CLST lcn, clen;
1595 u64 lbo, len;
1596 size_t run_idx;
1597 struct page *fill;
1598 void *kaddr;
1599 struct blk_plug plug;
1600
1601 fill = alloc_page(GFP_KERNEL);
1602 if (!fill)
1603 return -ENOMEM;
1604
1605 kaddr = kmap_atomic(fill);
1606 memset(kaddr, -1, PAGE_SIZE);
1607 kunmap_atomic(kaddr);
1608 flush_dcache_page(fill);
1609 lock_page(fill);
1610
1611 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1612 err = -ENOENT;
1613 goto out;
1614 }
1615
1616 /*
1617 * TODO: Try blkdev_issue_write_same.
1618 */
1619 blk_start_plug(&plug);
1620 do {
1621 lbo = (u64)lcn << cluster_bits;
1622 len = (u64)clen << cluster_bits;
1623 new_bio:
1624 new = ntfs_alloc_bio(BIO_MAX_VECS);
1625 if (!new) {
1626 err = -ENOMEM;
1627 break;
1628 }
1629 if (bio) {
1630 bio_chain(bio, new);
1631 submit_bio(bio);
1632 }
1633 bio = new;
1634 bio_set_dev(bio, bdev);
1635 bio->bi_opf = REQ_OP_WRITE;
1636 bio->bi_iter.bi_sector = lbo >> 9;
1637
1638 for (;;) {
1639 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1640
1641 if (bio_add_page(bio, fill, add, 0) < add)
1642 goto new_bio;
1643
1644 lbo += add;
1645 if (len <= add)
1646 break;
1647 len -= add;
1648 }
1649 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1650
1651 if (bio) {
1652 if (!err)
1653 err = submit_bio_wait(bio);
1654 bio_put(bio);
1655 }
1656 blk_finish_plug(&plug);
1657 out:
1658 unlock_page(fill);
1659 put_page(fill);
1660
1661 return err;
1662 }
1663
ntfs_vbo_to_lbo(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u64 * lbo,u64 * bytes)1664 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1665 u64 vbo, u64 *lbo, u64 *bytes)
1666 {
1667 u32 off;
1668 CLST lcn, len;
1669 u8 cluster_bits = sbi->cluster_bits;
1670
1671 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1672 return -ENOENT;
1673
1674 off = vbo & sbi->cluster_mask;
1675 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1676 *bytes = ((u64)len << cluster_bits) - off;
1677
1678 return 0;
1679 }
1680
ntfs_new_inode(struct ntfs_sb_info * sbi,CLST rno,bool dir)1681 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1682 {
1683 int err = 0;
1684 struct super_block *sb = sbi->sb;
1685 struct inode *inode = new_inode(sb);
1686 struct ntfs_inode *ni;
1687
1688 if (!inode)
1689 return ERR_PTR(-ENOMEM);
1690
1691 ni = ntfs_i(inode);
1692
1693 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1694 false);
1695 if (err)
1696 goto out;
1697
1698 inode->i_ino = rno;
1699 if (insert_inode_locked(inode) < 0) {
1700 err = -EIO;
1701 goto out;
1702 }
1703
1704 out:
1705 if (err) {
1706 make_bad_inode(inode);
1707 iput(inode);
1708 ni = ERR_PTR(err);
1709 }
1710 return ni;
1711 }
1712
1713 /*
1714 * O:BAG:BAD:(A;OICI;FA;;;WD)
1715 * Owner S-1-5-32-544 (Administrators)
1716 * Group S-1-5-32-544 (Administrators)
1717 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1718 */
1719 const u8 s_default_security[] __aligned(8) = {
1720 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1721 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1722 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1723 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1724 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1725 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1726 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1727 };
1728
1729 static_assert(sizeof(s_default_security) == 0x50);
1730
sid_length(const struct SID * sid)1731 static inline u32 sid_length(const struct SID *sid)
1732 {
1733 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1734 }
1735
1736 /*
1737 * is_acl_valid
1738 *
1739 * Thanks Mark Harmstone for idea.
1740 */
is_acl_valid(const struct ACL * acl,u32 len)1741 static bool is_acl_valid(const struct ACL *acl, u32 len)
1742 {
1743 const struct ACE_HEADER *ace;
1744 u32 i;
1745 u16 ace_count, ace_size;
1746
1747 if (acl->AclRevision != ACL_REVISION &&
1748 acl->AclRevision != ACL_REVISION_DS) {
1749 /*
1750 * This value should be ACL_REVISION, unless the ACL contains an
1751 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1752 * All ACEs in an ACL must be at the same revision level.
1753 */
1754 return false;
1755 }
1756
1757 if (acl->Sbz1)
1758 return false;
1759
1760 if (le16_to_cpu(acl->AclSize) > len)
1761 return false;
1762
1763 if (acl->Sbz2)
1764 return false;
1765
1766 len -= sizeof(struct ACL);
1767 ace = (struct ACE_HEADER *)&acl[1];
1768 ace_count = le16_to_cpu(acl->AceCount);
1769
1770 for (i = 0; i < ace_count; i++) {
1771 if (len < sizeof(struct ACE_HEADER))
1772 return false;
1773
1774 ace_size = le16_to_cpu(ace->AceSize);
1775 if (len < ace_size)
1776 return false;
1777
1778 len -= ace_size;
1779 ace = Add2Ptr(ace, ace_size);
1780 }
1781
1782 return true;
1783 }
1784
is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 len)1785 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1786 {
1787 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1788
1789 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1790 return false;
1791
1792 if (sd->Revision != 1)
1793 return false;
1794
1795 if (sd->Sbz1)
1796 return false;
1797
1798 if (!(sd->Control & SE_SELF_RELATIVE))
1799 return false;
1800
1801 sd_owner = le32_to_cpu(sd->Owner);
1802 if (sd_owner) {
1803 const struct SID *owner = Add2Ptr(sd, sd_owner);
1804
1805 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1806 return false;
1807
1808 if (owner->Revision != 1)
1809 return false;
1810
1811 if (sd_owner + sid_length(owner) > len)
1812 return false;
1813 }
1814
1815 sd_group = le32_to_cpu(sd->Group);
1816 if (sd_group) {
1817 const struct SID *group = Add2Ptr(sd, sd_group);
1818
1819 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1820 return false;
1821
1822 if (group->Revision != 1)
1823 return false;
1824
1825 if (sd_group + sid_length(group) > len)
1826 return false;
1827 }
1828
1829 sd_sacl = le32_to_cpu(sd->Sacl);
1830 if (sd_sacl) {
1831 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1832
1833 if (sd_sacl + sizeof(struct ACL) > len)
1834 return false;
1835
1836 if (!is_acl_valid(sacl, len - sd_sacl))
1837 return false;
1838 }
1839
1840 sd_dacl = le32_to_cpu(sd->Dacl);
1841 if (sd_dacl) {
1842 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1843
1844 if (sd_dacl + sizeof(struct ACL) > len)
1845 return false;
1846
1847 if (!is_acl_valid(dacl, len - sd_dacl))
1848 return false;
1849 }
1850
1851 return true;
1852 }
1853
1854 /*
1855 * ntfs_security_init - Load and parse $Secure.
1856 */
ntfs_security_init(struct ntfs_sb_info * sbi)1857 int ntfs_security_init(struct ntfs_sb_info *sbi)
1858 {
1859 int err;
1860 struct super_block *sb = sbi->sb;
1861 struct inode *inode;
1862 struct ntfs_inode *ni;
1863 struct MFT_REF ref;
1864 struct ATTRIB *attr;
1865 struct ATTR_LIST_ENTRY *le;
1866 u64 sds_size;
1867 size_t off;
1868 struct NTFS_DE *ne;
1869 struct NTFS_DE_SII *sii_e;
1870 struct ntfs_fnd *fnd_sii = NULL;
1871 const struct INDEX_ROOT *root_sii;
1872 const struct INDEX_ROOT *root_sdh;
1873 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1874 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1875
1876 ref.low = cpu_to_le32(MFT_REC_SECURE);
1877 ref.high = 0;
1878 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1879
1880 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1881 if (IS_ERR(inode)) {
1882 err = PTR_ERR(inode);
1883 ntfs_err(sb, "Failed to load $Secure.");
1884 inode = NULL;
1885 goto out;
1886 }
1887
1888 ni = ntfs_i(inode);
1889
1890 le = NULL;
1891
1892 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1893 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1894 if (!attr) {
1895 err = -EINVAL;
1896 goto out;
1897 }
1898
1899 if(!(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1900 root_sdh->type != ATTR_ZERO ||
1901 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1902 offsetof(struct INDEX_ROOT, ihdr) +
1903 le32_to_cpu(root_sdh->ihdr.used) >
1904 le32_to_cpu(attr->res.data_size)) {
1905 err = -EINVAL;
1906 goto out;
1907 }
1908
1909 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1910 if (err)
1911 goto out;
1912
1913 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1914 ARRAY_SIZE(SII_NAME), NULL, NULL);
1915 if (!attr) {
1916 err = -EINVAL;
1917 goto out;
1918 }
1919
1920 if(!(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1921 root_sii->type != ATTR_ZERO ||
1922 root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1923 offsetof(struct INDEX_ROOT, ihdr) +
1924 le32_to_cpu(root_sii->ihdr.used) >
1925 le32_to_cpu(attr->res.data_size)) {
1926 err = -EINVAL;
1927 goto out;
1928 }
1929
1930 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1931 if (err)
1932 goto out;
1933
1934 fnd_sii = fnd_get();
1935 if (!fnd_sii) {
1936 err = -ENOMEM;
1937 goto out;
1938 }
1939
1940 sds_size = inode->i_size;
1941
1942 /* Find the last valid Id. */
1943 sbi->security.next_id = SECURITY_ID_FIRST;
1944 /* Always write new security at the end of bucket. */
1945 sbi->security.next_off =
1946 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1947
1948 off = 0;
1949 ne = NULL;
1950
1951 for (;;) {
1952 u32 next_id;
1953
1954 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1955 if (err || !ne)
1956 break;
1957
1958 sii_e = (struct NTFS_DE_SII *)ne;
1959 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1960 continue;
1961
1962 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1963 if (next_id >= sbi->security.next_id)
1964 sbi->security.next_id = next_id;
1965 }
1966
1967 sbi->security.ni = ni;
1968 inode = NULL;
1969 out:
1970 iput(inode);
1971 fnd_put(fnd_sii);
1972
1973 return err;
1974 }
1975
1976 /*
1977 * ntfs_get_security_by_id - Read security descriptor by id.
1978 */
ntfs_get_security_by_id(struct ntfs_sb_info * sbi,__le32 security_id,struct SECURITY_DESCRIPTOR_RELATIVE ** sd,size_t * size)1979 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1980 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1981 size_t *size)
1982 {
1983 int err;
1984 int diff;
1985 struct ntfs_inode *ni = sbi->security.ni;
1986 struct ntfs_index *indx = &sbi->security.index_sii;
1987 void *p = NULL;
1988 struct NTFS_DE_SII *sii_e;
1989 struct ntfs_fnd *fnd_sii;
1990 struct SECURITY_HDR d_security;
1991 const struct INDEX_ROOT *root_sii;
1992 u32 t32;
1993
1994 *sd = NULL;
1995
1996 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1997
1998 fnd_sii = fnd_get();
1999 if (!fnd_sii) {
2000 err = -ENOMEM;
2001 goto out;
2002 }
2003
2004 root_sii = indx_get_root(indx, ni, NULL, NULL);
2005 if (!root_sii) {
2006 err = -EINVAL;
2007 goto out;
2008 }
2009
2010 /* Try to find this SECURITY descriptor in SII indexes. */
2011 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
2012 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
2013 if (err)
2014 goto out;
2015
2016 if (diff)
2017 goto out;
2018
2019 t32 = le32_to_cpu(sii_e->sec_hdr.size);
2020 if (t32 < SIZEOF_SECURITY_HDR) {
2021 err = -EINVAL;
2022 goto out;
2023 }
2024
2025 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
2026 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2027 err = -EFBIG;
2028 goto out;
2029 }
2030
2031 *size = t32 - SIZEOF_SECURITY_HDR;
2032
2033 p = kmalloc(*size, GFP_NOFS);
2034 if (!p) {
2035 err = -ENOMEM;
2036 goto out;
2037 }
2038
2039 err = ntfs_read_run_nb(sbi, &ni->file.run,
2040 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2041 sizeof(d_security), NULL);
2042 if (err)
2043 goto out;
2044
2045 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2046 err = -EINVAL;
2047 goto out;
2048 }
2049
2050 err = ntfs_read_run_nb(sbi, &ni->file.run,
2051 le64_to_cpu(sii_e->sec_hdr.off) +
2052 SIZEOF_SECURITY_HDR,
2053 p, *size, NULL);
2054 if (err)
2055 goto out;
2056
2057 *sd = p;
2058 p = NULL;
2059
2060 out:
2061 kfree(p);
2062 fnd_put(fnd_sii);
2063 ni_unlock(ni);
2064
2065 return err;
2066 }
2067
2068 /*
2069 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2070 *
2071 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2072 * and it contains a mirror copy of each security descriptor. When writing
2073 * to a security descriptor at location X, another copy will be written at
2074 * location (X+256K).
2075 * When writing a security descriptor that will cross the 256K boundary,
2076 * the pointer will be advanced by 256K to skip
2077 * over the mirror portion.
2078 */
ntfs_insert_security(struct ntfs_sb_info * sbi,const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 size_sd,__le32 * security_id,bool * inserted)2079 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2080 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2081 u32 size_sd, __le32 *security_id, bool *inserted)
2082 {
2083 int err, diff;
2084 struct ntfs_inode *ni = sbi->security.ni;
2085 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2086 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2087 struct NTFS_DE_SDH *e;
2088 struct NTFS_DE_SDH sdh_e;
2089 struct NTFS_DE_SII sii_e;
2090 struct SECURITY_HDR *d_security;
2091 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2092 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2093 struct SECURITY_KEY hash_key;
2094 struct ntfs_fnd *fnd_sdh = NULL;
2095 const struct INDEX_ROOT *root_sdh;
2096 const struct INDEX_ROOT *root_sii;
2097 u64 mirr_off, new_sds_size;
2098 u32 next, left;
2099
2100 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2101 SecurityDescriptorsBlockSize);
2102
2103 hash_key.hash = security_hash(sd, size_sd);
2104 hash_key.sec_id = SECURITY_ID_INVALID;
2105
2106 if (inserted)
2107 *inserted = false;
2108 *security_id = SECURITY_ID_INVALID;
2109
2110 /* Allocate a temporal buffer. */
2111 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2112 if (!d_security)
2113 return -ENOMEM;
2114
2115 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2116
2117 fnd_sdh = fnd_get();
2118 if (!fnd_sdh) {
2119 err = -ENOMEM;
2120 goto out;
2121 }
2122
2123 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2124 if (!root_sdh) {
2125 err = -EINVAL;
2126 goto out;
2127 }
2128
2129 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2130 if (!root_sii) {
2131 err = -EINVAL;
2132 goto out;
2133 }
2134
2135 /*
2136 * Check if such security already exists.
2137 * Use "SDH" and hash -> to get the offset in "SDS".
2138 */
2139 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2140 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2141 fnd_sdh);
2142 if (err)
2143 goto out;
2144
2145 while (e) {
2146 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2147 err = ntfs_read_run_nb(sbi, &ni->file.run,
2148 le64_to_cpu(e->sec_hdr.off),
2149 d_security, new_sec_size, NULL);
2150 if (err)
2151 goto out;
2152
2153 if (le32_to_cpu(d_security->size) == new_sec_size &&
2154 d_security->key.hash == hash_key.hash &&
2155 !memcmp(d_security + 1, sd, size_sd)) {
2156 *security_id = d_security->key.sec_id;
2157 /* Such security already exists. */
2158 err = 0;
2159 goto out;
2160 }
2161 }
2162
2163 err = indx_find_sort(indx_sdh, ni, root_sdh,
2164 (struct NTFS_DE **)&e, fnd_sdh);
2165 if (err)
2166 goto out;
2167
2168 if (!e || e->key.hash != hash_key.hash)
2169 break;
2170 }
2171
2172 /* Zero unused space. */
2173 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2174 left = SecurityDescriptorsBlockSize - next;
2175
2176 /* Zero gap until SecurityDescriptorsBlockSize. */
2177 if (left < new_sec_size) {
2178 /* Zero "left" bytes from sbi->security.next_off. */
2179 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2180 }
2181
2182 /* Zero tail of previous security. */
2183 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2184
2185 /*
2186 * Example:
2187 * 0x40438 == ni->vfs_inode.i_size
2188 * 0x00440 == sbi->security.next_off
2189 * need to zero [0x438-0x440)
2190 * if (next > used) {
2191 * u32 tozero = next - used;
2192 * zero "tozero" bytes from sbi->security.next_off - tozero
2193 */
2194
2195 /* Format new security descriptor. */
2196 d_security->key.hash = hash_key.hash;
2197 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2198 d_security->off = cpu_to_le64(sbi->security.next_off);
2199 d_security->size = cpu_to_le32(new_sec_size);
2200 memcpy(d_security + 1, sd, size_sd);
2201
2202 /* Write main SDS bucket. */
2203 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2204 d_security, aligned_sec_size, 0);
2205
2206 if (err)
2207 goto out;
2208
2209 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2210 new_sds_size = mirr_off + aligned_sec_size;
2211
2212 if (new_sds_size > ni->vfs_inode.i_size) {
2213 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2214 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2215 new_sds_size, &new_sds_size, false, NULL);
2216 if (err)
2217 goto out;
2218 }
2219
2220 /* Write copy SDS bucket. */
2221 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2222 aligned_sec_size, 0);
2223 if (err)
2224 goto out;
2225
2226 /* Fill SII entry. */
2227 sii_e.de.view.data_off =
2228 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2229 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2230 sii_e.de.view.res = 0;
2231 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2232 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2233 sii_e.de.flags = 0;
2234 sii_e.de.res = 0;
2235 sii_e.sec_id = d_security->key.sec_id;
2236 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2237
2238 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2239 if (err)
2240 goto out;
2241
2242 /* Fill SDH entry. */
2243 sdh_e.de.view.data_off =
2244 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2245 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2246 sdh_e.de.view.res = 0;
2247 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2248 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2249 sdh_e.de.flags = 0;
2250 sdh_e.de.res = 0;
2251 sdh_e.key.hash = d_security->key.hash;
2252 sdh_e.key.sec_id = d_security->key.sec_id;
2253 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2254 sdh_e.magic[0] = cpu_to_le16('I');
2255 sdh_e.magic[1] = cpu_to_le16('I');
2256
2257 fnd_clear(fnd_sdh);
2258 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2259 fnd_sdh, 0);
2260 if (err)
2261 goto out;
2262
2263 *security_id = d_security->key.sec_id;
2264 if (inserted)
2265 *inserted = true;
2266
2267 /* Update Id and offset for next descriptor. */
2268 sbi->security.next_id += 1;
2269 sbi->security.next_off += aligned_sec_size;
2270
2271 out:
2272 fnd_put(fnd_sdh);
2273 mark_inode_dirty(&ni->vfs_inode);
2274 ni_unlock(ni);
2275 kfree(d_security);
2276
2277 return err;
2278 }
2279
2280 /*
2281 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2282 */
ntfs_reparse_init(struct ntfs_sb_info * sbi)2283 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2284 {
2285 int err;
2286 struct ntfs_inode *ni = sbi->reparse.ni;
2287 struct ntfs_index *indx = &sbi->reparse.index_r;
2288 struct ATTRIB *attr;
2289 struct ATTR_LIST_ENTRY *le;
2290 const struct INDEX_ROOT *root_r;
2291
2292 if (!ni)
2293 return 0;
2294
2295 le = NULL;
2296 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2297 ARRAY_SIZE(SR_NAME), NULL, NULL);
2298 if (!attr) {
2299 err = -EINVAL;
2300 goto out;
2301 }
2302
2303 root_r = resident_data(attr);
2304 if (root_r->type != ATTR_ZERO ||
2305 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2306 err = -EINVAL;
2307 goto out;
2308 }
2309
2310 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2311 if (err)
2312 goto out;
2313
2314 out:
2315 return err;
2316 }
2317
2318 /*
2319 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2320 */
ntfs_objid_init(struct ntfs_sb_info * sbi)2321 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2322 {
2323 int err;
2324 struct ntfs_inode *ni = sbi->objid.ni;
2325 struct ntfs_index *indx = &sbi->objid.index_o;
2326 struct ATTRIB *attr;
2327 struct ATTR_LIST_ENTRY *le;
2328 const struct INDEX_ROOT *root;
2329
2330 if (!ni)
2331 return 0;
2332
2333 le = NULL;
2334 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2335 ARRAY_SIZE(SO_NAME), NULL, NULL);
2336 if (!attr) {
2337 err = -EINVAL;
2338 goto out;
2339 }
2340
2341 root = resident_data(attr);
2342 if (root->type != ATTR_ZERO ||
2343 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2344 err = -EINVAL;
2345 goto out;
2346 }
2347
2348 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2349 if (err)
2350 goto out;
2351
2352 out:
2353 return err;
2354 }
2355
ntfs_objid_remove(struct ntfs_sb_info * sbi,struct GUID * guid)2356 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2357 {
2358 int err;
2359 struct ntfs_inode *ni = sbi->objid.ni;
2360 struct ntfs_index *indx = &sbi->objid.index_o;
2361
2362 if (!ni)
2363 return -EINVAL;
2364
2365 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2366
2367 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2368
2369 mark_inode_dirty(&ni->vfs_inode);
2370 ni_unlock(ni);
2371
2372 return err;
2373 }
2374
ntfs_insert_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2375 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2376 const struct MFT_REF *ref)
2377 {
2378 int err;
2379 struct ntfs_inode *ni = sbi->reparse.ni;
2380 struct ntfs_index *indx = &sbi->reparse.index_r;
2381 struct NTFS_DE_R re;
2382
2383 if (!ni)
2384 return -EINVAL;
2385
2386 memset(&re, 0, sizeof(re));
2387
2388 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2389 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2390 re.de.key_size = cpu_to_le16(sizeof(re.key));
2391
2392 re.key.ReparseTag = rtag;
2393 memcpy(&re.key.ref, ref, sizeof(*ref));
2394
2395 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2396
2397 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2398
2399 mark_inode_dirty(&ni->vfs_inode);
2400 ni_unlock(ni);
2401
2402 return err;
2403 }
2404
ntfs_remove_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2405 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2406 const struct MFT_REF *ref)
2407 {
2408 int err, diff;
2409 struct ntfs_inode *ni = sbi->reparse.ni;
2410 struct ntfs_index *indx = &sbi->reparse.index_r;
2411 struct ntfs_fnd *fnd = NULL;
2412 struct REPARSE_KEY rkey;
2413 struct NTFS_DE_R *re;
2414 struct INDEX_ROOT *root_r;
2415
2416 if (!ni)
2417 return -EINVAL;
2418
2419 rkey.ReparseTag = rtag;
2420 rkey.ref = *ref;
2421
2422 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2423
2424 if (rtag) {
2425 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2426 goto out1;
2427 }
2428
2429 fnd = fnd_get();
2430 if (!fnd) {
2431 err = -ENOMEM;
2432 goto out1;
2433 }
2434
2435 root_r = indx_get_root(indx, ni, NULL, NULL);
2436 if (!root_r) {
2437 err = -EINVAL;
2438 goto out;
2439 }
2440
2441 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2442 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2443 (struct NTFS_DE **)&re, fnd);
2444 if (err)
2445 goto out;
2446
2447 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2448 /* Impossible. Looks like volume corrupt? */
2449 goto out;
2450 }
2451
2452 memcpy(&rkey, &re->key, sizeof(rkey));
2453
2454 fnd_put(fnd);
2455 fnd = NULL;
2456
2457 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2458 if (err)
2459 goto out;
2460
2461 out:
2462 fnd_put(fnd);
2463
2464 out1:
2465 mark_inode_dirty(&ni->vfs_inode);
2466 ni_unlock(ni);
2467
2468 return err;
2469 }
2470
ntfs_unmap_and_discard(struct ntfs_sb_info * sbi,CLST lcn,CLST len)2471 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2472 CLST len)
2473 {
2474 ntfs_unmap_meta(sbi->sb, lcn, len);
2475 ntfs_discard(sbi, lcn, len);
2476 }
2477
mark_as_free_ex(struct ntfs_sb_info * sbi,CLST lcn,CLST len,bool trim)2478 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2479 {
2480 CLST end, i;
2481 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2482 bool dirty = false;
2483
2484 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2485 if (!wnd_is_used(wnd, lcn, len)) {
2486 /* mark volume as dirty out of wnd->rw_lock */
2487 dirty = true;
2488
2489 end = lcn + len;
2490 len = 0;
2491 for (i = lcn; i < end; i++) {
2492 if (wnd_is_used(wnd, i, 1)) {
2493 if (!len)
2494 lcn = i;
2495 len += 1;
2496 continue;
2497 }
2498
2499 if (!len)
2500 continue;
2501
2502 if (trim)
2503 ntfs_unmap_and_discard(sbi, lcn, len);
2504
2505 wnd_set_free(wnd, lcn, len);
2506 len = 0;
2507 }
2508
2509 if (!len)
2510 goto out;
2511 }
2512
2513 if (trim)
2514 ntfs_unmap_and_discard(sbi, lcn, len);
2515 wnd_set_free(wnd, lcn, len);
2516
2517 out:
2518 up_write(&wnd->rw_lock);
2519 if (dirty)
2520 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2521 }
2522
2523 /*
2524 * run_deallocate - Deallocate clusters.
2525 */
run_deallocate(struct ntfs_sb_info * sbi,struct runs_tree * run,bool trim)2526 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2527 {
2528 CLST lcn, len;
2529 size_t idx = 0;
2530
2531 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2532 if (lcn == SPARSE_LCN)
2533 continue;
2534
2535 mark_as_free_ex(sbi, lcn, len, trim);
2536 }
2537
2538 return 0;
2539 }
2540