1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 */
7
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16
17 // clang-format off
18 const struct cpu_str NAME_MFT = {
19 4, 0, { '$', 'M', 'F', 'T' },
20 };
21 const struct cpu_str NAME_MIRROR = {
22 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
23 };
24 const struct cpu_str NAME_LOGFILE = {
25 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
26 };
27 const struct cpu_str NAME_VOLUME = {
28 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
29 };
30 const struct cpu_str NAME_ATTRDEF = {
31 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
32 };
33 const struct cpu_str NAME_ROOT = {
34 1, 0, { '.' },
35 };
36 const struct cpu_str NAME_BITMAP = {
37 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
38 };
39 const struct cpu_str NAME_BOOT = {
40 5, 0, { '$', 'B', 'o', 'o', 't' },
41 };
42 const struct cpu_str NAME_BADCLUS = {
43 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
44 };
45 const struct cpu_str NAME_QUOTA = {
46 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
47 };
48 const struct cpu_str NAME_SECURE = {
49 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
50 };
51 const struct cpu_str NAME_UPCASE = {
52 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
53 };
54 const struct cpu_str NAME_EXTEND = {
55 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
56 };
57 const struct cpu_str NAME_OBJID = {
58 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
59 };
60 const struct cpu_str NAME_REPARSE = {
61 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
62 };
63 const struct cpu_str NAME_USNJRNL = {
64 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
65 };
66 const __le16 BAD_NAME[4] = {
67 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
68 };
69 const __le16 I30_NAME[4] = {
70 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
71 };
72 const __le16 SII_NAME[4] = {
73 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
74 };
75 const __le16 SDH_NAME[4] = {
76 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
77 };
78 const __le16 SDS_NAME[4] = {
79 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
80 };
81 const __le16 SO_NAME[2] = {
82 cpu_to_le16('$'), cpu_to_le16('O'),
83 };
84 const __le16 SQ_NAME[2] = {
85 cpu_to_le16('$'), cpu_to_le16('Q'),
86 };
87 const __le16 SR_NAME[2] = {
88 cpu_to_le16('$'), cpu_to_le16('R'),
89 };
90
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
97 cpu_to_le16('a'),
98 };
99 #endif
100
101 // clang-format on
102
103 /*
104 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
105 */
ntfs_fix_pre_write(struct NTFS_RECORD_HEADER * rhdr,size_t bytes)106 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
107 {
108 u16 *fixup, *ptr;
109 u16 sample;
110 u16 fo = le16_to_cpu(rhdr->fix_off);
111 u16 fn = le16_to_cpu(rhdr->fix_num);
112
113 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
114 fn * SECTOR_SIZE > bytes) {
115 return false;
116 }
117
118 /* Get fixup pointer. */
119 fixup = Add2Ptr(rhdr, fo);
120
121 if (*fixup >= 0x7FFF)
122 *fixup = 1;
123 else
124 *fixup += 1;
125
126 sample = *fixup;
127
128 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
129
130 while (fn--) {
131 *++fixup = *ptr;
132 *ptr = sample;
133 ptr += SECTOR_SIZE / sizeof(short);
134 }
135 return true;
136 }
137
138 /*
139 * ntfs_fix_post_read - Remove fixups after reading from disk.
140 *
141 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
142 */
ntfs_fix_post_read(struct NTFS_RECORD_HEADER * rhdr,size_t bytes,bool simple)143 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
144 bool simple)
145 {
146 int ret;
147 u16 *fixup, *ptr;
148 u16 sample, fo, fn;
149
150 fo = le16_to_cpu(rhdr->fix_off);
151 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
152 : le16_to_cpu(rhdr->fix_num);
153
154 /* Check errors. */
155 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
156 fn * SECTOR_SIZE > bytes) {
157 return -E_NTFS_CORRUPT;
158 }
159
160 /* Get fixup pointer. */
161 fixup = Add2Ptr(rhdr, fo);
162 sample = *fixup;
163 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
164 ret = 0;
165
166 while (fn--) {
167 /* Test current word. */
168 if (*ptr != sample) {
169 /* Fixup does not match! Is it serious error? */
170 ret = -E_NTFS_FIXUP;
171 }
172
173 /* Replace fixup. */
174 *ptr = *++fixup;
175 ptr += SECTOR_SIZE / sizeof(short);
176 }
177
178 return ret;
179 }
180
181 /*
182 * ntfs_extend_init - Load $Extend file.
183 */
ntfs_extend_init(struct ntfs_sb_info * sbi)184 int ntfs_extend_init(struct ntfs_sb_info *sbi)
185 {
186 int err;
187 struct super_block *sb = sbi->sb;
188 struct inode *inode, *inode2;
189 struct MFT_REF ref;
190
191 if (sbi->volume.major_ver < 3) {
192 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
193 return 0;
194 }
195
196 ref.low = cpu_to_le32(MFT_REC_EXTEND);
197 ref.high = 0;
198 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
199 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
200 if (IS_ERR(inode)) {
201 err = PTR_ERR(inode);
202 ntfs_err(sb, "Failed to load $Extend.");
203 inode = NULL;
204 goto out;
205 }
206
207 /* If ntfs_iget5() reads from disk it never returns bad inode. */
208 if (!S_ISDIR(inode->i_mode)) {
209 err = -EINVAL;
210 goto out;
211 }
212
213 /* Try to find $ObjId */
214 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
215 if (inode2 && !IS_ERR(inode2)) {
216 if (is_bad_inode(inode2)) {
217 iput(inode2);
218 } else {
219 sbi->objid.ni = ntfs_i(inode2);
220 sbi->objid_no = inode2->i_ino;
221 }
222 }
223
224 /* Try to find $Quota */
225 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
226 if (inode2 && !IS_ERR(inode2)) {
227 sbi->quota_no = inode2->i_ino;
228 iput(inode2);
229 }
230
231 /* Try to find $Reparse */
232 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
233 if (inode2 && !IS_ERR(inode2)) {
234 sbi->reparse.ni = ntfs_i(inode2);
235 sbi->reparse_no = inode2->i_ino;
236 }
237
238 /* Try to find $UsnJrnl */
239 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
240 if (inode2 && !IS_ERR(inode2)) {
241 sbi->usn_jrnl_no = inode2->i_ino;
242 iput(inode2);
243 }
244
245 err = 0;
246 out:
247 iput(inode);
248 return err;
249 }
250
ntfs_loadlog_and_replay(struct ntfs_inode * ni,struct ntfs_sb_info * sbi)251 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
252 {
253 int err = 0;
254 struct super_block *sb = sbi->sb;
255 bool initialized = false;
256 struct MFT_REF ref;
257 struct inode *inode;
258
259 /* Check for 4GB. */
260 if (ni->vfs_inode.i_size >= 0x100000000ull) {
261 ntfs_err(sb, "\x24LogFile is too big");
262 err = -EINVAL;
263 goto out;
264 }
265
266 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
267
268 ref.low = cpu_to_le32(MFT_REC_MFT);
269 ref.high = 0;
270 ref.seq = cpu_to_le16(1);
271
272 inode = ntfs_iget5(sb, &ref, NULL);
273
274 if (IS_ERR(inode))
275 inode = NULL;
276
277 if (!inode) {
278 /* Try to use MFT copy. */
279 u64 t64 = sbi->mft.lbo;
280
281 sbi->mft.lbo = sbi->mft.lbo2;
282 inode = ntfs_iget5(sb, &ref, NULL);
283 sbi->mft.lbo = t64;
284 if (IS_ERR(inode))
285 inode = NULL;
286 }
287
288 if (!inode) {
289 err = -EINVAL;
290 ntfs_err(sb, "Failed to load $MFT.");
291 goto out;
292 }
293
294 sbi->mft.ni = ntfs_i(inode);
295
296 /* LogFile should not contains attribute list. */
297 err = ni_load_all_mi(sbi->mft.ni);
298 if (!err)
299 err = log_replay(ni, &initialized);
300
301 iput(inode);
302 sbi->mft.ni = NULL;
303
304 sync_blockdev(sb->s_bdev);
305 invalidate_bdev(sb->s_bdev);
306
307 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
308 err = 0;
309 goto out;
310 }
311
312 if (sb_rdonly(sb) || !initialized)
313 goto out;
314
315 /* Fill LogFile by '-1' if it is initialized. */
316 err = ntfs_bio_fill_1(sbi, &ni->file.run);
317
318 out:
319 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
320
321 return err;
322 }
323
324 /*
325 * ntfs_query_def
326 *
327 * Return: Current ATTR_DEF_ENTRY for given attribute type.
328 */
ntfs_query_def(struct ntfs_sb_info * sbi,enum ATTR_TYPE type)329 const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
330 enum ATTR_TYPE type)
331 {
332 int type_in = le32_to_cpu(type);
333 size_t min_idx = 0;
334 size_t max_idx = sbi->def_entries - 1;
335
336 while (min_idx <= max_idx) {
337 size_t i = min_idx + ((max_idx - min_idx) >> 1);
338 const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
339 int diff = le32_to_cpu(entry->type) - type_in;
340
341 if (!diff)
342 return entry;
343 if (diff < 0)
344 min_idx = i + 1;
345 else if (i)
346 max_idx = i - 1;
347 else
348 return NULL;
349 }
350 return NULL;
351 }
352
353 /*
354 * ntfs_look_for_free_space - Look for a free space in bitmap.
355 */
ntfs_look_for_free_space(struct ntfs_sb_info * sbi,CLST lcn,CLST len,CLST * new_lcn,CLST * new_len,enum ALLOCATE_OPT opt)356 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
357 CLST *new_lcn, CLST *new_len,
358 enum ALLOCATE_OPT opt)
359 {
360 int err;
361 CLST alen;
362 struct super_block *sb = sbi->sb;
363 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
364 struct wnd_bitmap *wnd = &sbi->used.bitmap;
365
366 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
367 if (opt & ALLOCATE_MFT) {
368 zlen = wnd_zone_len(wnd);
369
370 if (!zlen) {
371 err = ntfs_refresh_zone(sbi);
372 if (err)
373 goto up_write;
374
375 zlen = wnd_zone_len(wnd);
376 }
377
378 if (!zlen) {
379 ntfs_err(sbi->sb, "no free space to extend mft");
380 err = -ENOSPC;
381 goto up_write;
382 }
383
384 lcn = wnd_zone_bit(wnd);
385 alen = min_t(CLST, len, zlen);
386
387 wnd_zone_set(wnd, lcn + alen, zlen - alen);
388
389 err = wnd_set_used(wnd, lcn, alen);
390 if (err)
391 goto up_write;
392
393 alcn = lcn;
394 goto space_found;
395 }
396 /*
397 * 'Cause cluster 0 is always used this value means that we should use
398 * cached value of 'next_free_lcn' to improve performance.
399 */
400 if (!lcn)
401 lcn = sbi->used.next_free_lcn;
402
403 if (lcn >= wnd->nbits)
404 lcn = 0;
405
406 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
407 if (alen)
408 goto space_found;
409
410 /* Try to use clusters from MftZone. */
411 zlen = wnd_zone_len(wnd);
412 zeroes = wnd_zeroes(wnd);
413
414 /* Check too big request */
415 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
416 err = -ENOSPC;
417 goto up_write;
418 }
419
420 /* How many clusters to cat from zone. */
421 zlcn = wnd_zone_bit(wnd);
422 zlen2 = zlen >> 1;
423 ztrim = clamp_val(len, zlen2, zlen);
424 new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
425
426 wnd_zone_set(wnd, zlcn, new_zlen);
427
428 /* Allocate continues clusters. */
429 alen = wnd_find(wnd, len, 0,
430 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
431 if (!alen) {
432 err = -ENOSPC;
433 goto up_write;
434 }
435
436 space_found:
437 err = 0;
438 *new_len = alen;
439 *new_lcn = alcn;
440
441 ntfs_unmap_meta(sb, alcn, alen);
442
443 /* Set hint for next requests. */
444 if (!(opt & ALLOCATE_MFT))
445 sbi->used.next_free_lcn = alcn + alen;
446 up_write:
447 up_write(&wnd->rw_lock);
448 return err;
449 }
450
451 /*
452 * ntfs_extend_mft - Allocate additional MFT records.
453 *
454 * sbi->mft.bitmap is locked for write.
455 *
456 * NOTE: recursive:
457 * ntfs_look_free_mft ->
458 * ntfs_extend_mft ->
459 * attr_set_size ->
460 * ni_insert_nonresident ->
461 * ni_insert_attr ->
462 * ni_ins_attr_ext ->
463 * ntfs_look_free_mft ->
464 * ntfs_extend_mft
465 *
466 * To avoid recursive always allocate space for two new MFT records
467 * see attrib.c: "at least two MFT to avoid recursive loop".
468 */
ntfs_extend_mft(struct ntfs_sb_info * sbi)469 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
470 {
471 int err;
472 struct ntfs_inode *ni = sbi->mft.ni;
473 size_t new_mft_total;
474 u64 new_mft_bytes, new_bitmap_bytes;
475 struct ATTRIB *attr;
476 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
477
478 new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
479 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
480
481 /* Step 1: Resize $MFT::DATA. */
482 down_write(&ni->file.run_lock);
483 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
484 new_mft_bytes, NULL, false, &attr);
485
486 if (err) {
487 up_write(&ni->file.run_lock);
488 goto out;
489 }
490
491 attr->nres.valid_size = attr->nres.data_size;
492 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
493 ni->mi.dirty = true;
494
495 /* Step 2: Resize $MFT::BITMAP. */
496 new_bitmap_bytes = bitmap_size(new_mft_total);
497
498 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
499 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
500
501 /* Refresh MFT Zone if necessary. */
502 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
503
504 ntfs_refresh_zone(sbi);
505
506 up_write(&sbi->used.bitmap.rw_lock);
507 up_write(&ni->file.run_lock);
508
509 if (err)
510 goto out;
511
512 err = wnd_extend(wnd, new_mft_total);
513
514 if (err)
515 goto out;
516
517 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
518
519 err = _ni_write_inode(&ni->vfs_inode, 0);
520 out:
521 return err;
522 }
523
524 /*
525 * ntfs_look_free_mft - Look for a free MFT record.
526 */
ntfs_look_free_mft(struct ntfs_sb_info * sbi,CLST * rno,bool mft,struct ntfs_inode * ni,struct mft_inode ** mi)527 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
528 struct ntfs_inode *ni, struct mft_inode **mi)
529 {
530 int err = 0;
531 size_t zbit, zlen, from, to, fr;
532 size_t mft_total;
533 struct MFT_REF ref;
534 struct super_block *sb = sbi->sb;
535 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
536 u32 ir;
537
538 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
539 MFT_REC_FREE - MFT_REC_RESERVED);
540
541 if (!mft)
542 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
543
544 zlen = wnd_zone_len(wnd);
545
546 /* Always reserve space for MFT. */
547 if (zlen) {
548 if (mft) {
549 zbit = wnd_zone_bit(wnd);
550 *rno = zbit;
551 wnd_zone_set(wnd, zbit + 1, zlen - 1);
552 }
553 goto found;
554 }
555
556 /* No MFT zone. Find the nearest to '0' free MFT. */
557 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
558 /* Resize MFT */
559 mft_total = wnd->nbits;
560
561 err = ntfs_extend_mft(sbi);
562 if (!err) {
563 zbit = mft_total;
564 goto reserve_mft;
565 }
566
567 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
568 goto out;
569
570 err = 0;
571
572 /*
573 * Look for free record reserved area [11-16) ==
574 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
575 * marks it as used.
576 */
577 if (!sbi->mft.reserved_bitmap) {
578 /* Once per session create internal bitmap for 5 bits. */
579 sbi->mft.reserved_bitmap = 0xFF;
580
581 ref.high = 0;
582 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
583 struct inode *i;
584 struct ntfs_inode *ni;
585 struct MFT_REC *mrec;
586
587 ref.low = cpu_to_le32(ir);
588 ref.seq = cpu_to_le16(ir);
589
590 i = ntfs_iget5(sb, &ref, NULL);
591 if (IS_ERR(i)) {
592 next:
593 ntfs_notice(
594 sb,
595 "Invalid reserved record %x",
596 ref.low);
597 continue;
598 }
599 if (is_bad_inode(i)) {
600 iput(i);
601 goto next;
602 }
603
604 ni = ntfs_i(i);
605
606 mrec = ni->mi.mrec;
607
608 if (!is_rec_base(mrec))
609 goto next;
610
611 if (mrec->hard_links)
612 goto next;
613
614 if (!ni_std(ni))
615 goto next;
616
617 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
618 NULL, 0, NULL, NULL))
619 goto next;
620
621 __clear_bit(ir - MFT_REC_RESERVED,
622 &sbi->mft.reserved_bitmap);
623 }
624 }
625
626 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
627 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
628 MFT_REC_FREE, MFT_REC_RESERVED);
629 if (zbit >= MFT_REC_FREE) {
630 sbi->mft.next_reserved = MFT_REC_FREE;
631 goto out;
632 }
633
634 zlen = 1;
635 sbi->mft.next_reserved = zbit;
636 } else {
637 reserve_mft:
638 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
639 if (zbit + zlen > wnd->nbits)
640 zlen = wnd->nbits - zbit;
641
642 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
643 zlen -= 1;
644
645 /* [zbit, zbit + zlen) will be used for MFT itself. */
646 from = sbi->mft.used;
647 if (from < zbit)
648 from = zbit;
649 to = zbit + zlen;
650 if (from < to) {
651 ntfs_clear_mft_tail(sbi, from, to);
652 sbi->mft.used = to;
653 }
654 }
655
656 if (mft) {
657 *rno = zbit;
658 zbit += 1;
659 zlen -= 1;
660 }
661
662 wnd_zone_set(wnd, zbit, zlen);
663
664 found:
665 if (!mft) {
666 /* The request to get record for general purpose. */
667 if (sbi->mft.next_free < MFT_REC_USER)
668 sbi->mft.next_free = MFT_REC_USER;
669
670 for (;;) {
671 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
672 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
673 sbi->mft.next_free = sbi->mft.bitmap.nbits;
674 } else {
675 *rno = fr;
676 sbi->mft.next_free = *rno + 1;
677 break;
678 }
679
680 err = ntfs_extend_mft(sbi);
681 if (err)
682 goto out;
683 }
684 }
685
686 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
687 err = -ENOMEM;
688 goto out;
689 }
690
691 /* We have found a record that are not reserved for next MFT. */
692 if (*rno >= MFT_REC_FREE)
693 wnd_set_used(wnd, *rno, 1);
694 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
695 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
696
697 out:
698 if (!mft)
699 up_write(&wnd->rw_lock);
700
701 return err;
702 }
703
704 /*
705 * ntfs_mark_rec_free - Mark record as free.
706 */
ntfs_mark_rec_free(struct ntfs_sb_info * sbi,CLST rno)707 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
708 {
709 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
710
711 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
712 if (rno >= wnd->nbits)
713 goto out;
714
715 if (rno >= MFT_REC_FREE) {
716 if (!wnd_is_used(wnd, rno, 1))
717 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
718 else
719 wnd_set_free(wnd, rno, 1);
720 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
721 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
722 }
723
724 if (rno < wnd_zone_bit(wnd))
725 wnd_zone_set(wnd, rno, 1);
726 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
727 sbi->mft.next_free = rno;
728
729 out:
730 up_write(&wnd->rw_lock);
731 }
732
733 /*
734 * ntfs_clear_mft_tail - Format empty records [from, to).
735 *
736 * sbi->mft.bitmap is locked for write.
737 */
ntfs_clear_mft_tail(struct ntfs_sb_info * sbi,size_t from,size_t to)738 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
739 {
740 int err;
741 u32 rs;
742 u64 vbo;
743 struct runs_tree *run;
744 struct ntfs_inode *ni;
745
746 if (from >= to)
747 return 0;
748
749 rs = sbi->record_size;
750 ni = sbi->mft.ni;
751 run = &ni->file.run;
752
753 down_read(&ni->file.run_lock);
754 vbo = (u64)from * rs;
755 for (; from < to; from++, vbo += rs) {
756 struct ntfs_buffers nb;
757
758 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
759 if (err)
760 goto out;
761
762 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
763 nb_put(&nb);
764 if (err)
765 goto out;
766 }
767
768 out:
769 sbi->mft.used = from;
770 up_read(&ni->file.run_lock);
771 return err;
772 }
773
774 /*
775 * ntfs_refresh_zone - Refresh MFT zone.
776 *
777 * sbi->used.bitmap is locked for rw.
778 * sbi->mft.bitmap is locked for write.
779 * sbi->mft.ni->file.run_lock for write.
780 */
ntfs_refresh_zone(struct ntfs_sb_info * sbi)781 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
782 {
783 CLST zone_limit, zone_max, lcn, vcn, len;
784 size_t lcn_s, zlen;
785 struct wnd_bitmap *wnd = &sbi->used.bitmap;
786 struct ntfs_inode *ni = sbi->mft.ni;
787
788 /* Do not change anything unless we have non empty MFT zone. */
789 if (wnd_zone_len(wnd))
790 return 0;
791
792 /*
793 * Compute the MFT zone at two steps.
794 * It would be nice if we are able to allocate 1/8 of
795 * total clusters for MFT but not more then 512 MB.
796 */
797 zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
798 zone_max = wnd->nbits >> 3;
799 if (zone_max > zone_limit)
800 zone_max = zone_limit;
801
802 vcn = bytes_to_cluster(sbi,
803 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
804
805 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
806 lcn = SPARSE_LCN;
807
808 /* We should always find Last Lcn for MFT. */
809 if (lcn == SPARSE_LCN)
810 return -EINVAL;
811
812 lcn_s = lcn + 1;
813
814 /* Try to allocate clusters after last MFT run. */
815 zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
816 if (!zlen) {
817 ntfs_notice(sbi->sb, "MftZone: unavailable");
818 return 0;
819 }
820
821 /* Truncate too large zone. */
822 wnd_zone_set(wnd, lcn_s, zlen);
823
824 return 0;
825 }
826
827 /*
828 * ntfs_update_mftmirr - Update $MFTMirr data.
829 */
ntfs_update_mftmirr(struct ntfs_sb_info * sbi,int wait)830 int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
831 {
832 int err;
833 struct super_block *sb = sbi->sb;
834 u32 blocksize;
835 sector_t block1, block2;
836 u32 bytes;
837
838 if (!sb)
839 return -EINVAL;
840
841 blocksize = sb->s_blocksize;
842
843 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
844 return 0;
845
846 err = 0;
847 bytes = sbi->mft.recs_mirr << sbi->record_bits;
848 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
849 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
850
851 for (; bytes >= blocksize; bytes -= blocksize) {
852 struct buffer_head *bh1, *bh2;
853
854 bh1 = sb_bread(sb, block1++);
855 if (!bh1) {
856 err = -EIO;
857 goto out;
858 }
859
860 bh2 = sb_getblk(sb, block2++);
861 if (!bh2) {
862 put_bh(bh1);
863 err = -EIO;
864 goto out;
865 }
866
867 if (buffer_locked(bh2))
868 __wait_on_buffer(bh2);
869
870 lock_buffer(bh2);
871 memcpy(bh2->b_data, bh1->b_data, blocksize);
872 set_buffer_uptodate(bh2);
873 mark_buffer_dirty(bh2);
874 unlock_buffer(bh2);
875
876 put_bh(bh1);
877 bh1 = NULL;
878
879 if (wait)
880 err = sync_dirty_buffer(bh2);
881
882 put_bh(bh2);
883 if (err)
884 goto out;
885 }
886
887 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
888
889 out:
890 return err;
891 }
892
893 /*
894 * ntfs_set_state
895 *
896 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
897 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
898 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
899 */
ntfs_set_state(struct ntfs_sb_info * sbi,enum NTFS_DIRTY_FLAGS dirty)900 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
901 {
902 int err;
903 struct ATTRIB *attr;
904 struct VOLUME_INFO *info;
905 struct mft_inode *mi;
906 struct ntfs_inode *ni;
907
908 /*
909 * Do not change state if fs was real_dirty.
910 * Do not change state if fs already dirty(clear).
911 * Do not change any thing if mounted read only.
912 */
913 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
914 return 0;
915
916 /* Check cached value. */
917 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
918 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
919 return 0;
920
921 ni = sbi->volume.ni;
922 if (!ni)
923 return -EINVAL;
924
925 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
926
927 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
928 if (!attr) {
929 err = -EINVAL;
930 goto out;
931 }
932
933 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
934 if (!info) {
935 err = -EINVAL;
936 goto out;
937 }
938
939 switch (dirty) {
940 case NTFS_DIRTY_ERROR:
941 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
942 sbi->volume.real_dirty = true;
943 fallthrough;
944 case NTFS_DIRTY_DIRTY:
945 info->flags |= VOLUME_FLAG_DIRTY;
946 break;
947 case NTFS_DIRTY_CLEAR:
948 info->flags &= ~VOLUME_FLAG_DIRTY;
949 break;
950 }
951 /* Cache current volume flags. */
952 sbi->volume.flags = info->flags;
953 mi->dirty = true;
954 err = 0;
955
956 out:
957 ni_unlock(ni);
958 if (err)
959 return err;
960
961 mark_inode_dirty_sync(&ni->vfs_inode);
962 /* verify(!ntfs_update_mftmirr()); */
963
964 /* write mft record on disk. */
965 err = _ni_write_inode(&ni->vfs_inode, 1);
966
967 return err;
968 }
969
970 /*
971 * security_hash - Calculates a hash of security descriptor.
972 */
security_hash(const void * sd,size_t bytes)973 static inline __le32 security_hash(const void *sd, size_t bytes)
974 {
975 u32 hash = 0;
976 const __le32 *ptr = sd;
977
978 bytes >>= 2;
979 while (bytes--)
980 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
981 return cpu_to_le32(hash);
982 }
983
ntfs_sb_read(struct super_block * sb,u64 lbo,size_t bytes,void * buffer)984 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
985 {
986 struct block_device *bdev = sb->s_bdev;
987 u32 blocksize = sb->s_blocksize;
988 u64 block = lbo >> sb->s_blocksize_bits;
989 u32 off = lbo & (blocksize - 1);
990 u32 op = blocksize - off;
991
992 for (; bytes; block += 1, off = 0, op = blocksize) {
993 struct buffer_head *bh = __bread(bdev, block, blocksize);
994
995 if (!bh)
996 return -EIO;
997
998 if (op > bytes)
999 op = bytes;
1000
1001 memcpy(buffer, bh->b_data + off, op);
1002
1003 put_bh(bh);
1004
1005 bytes -= op;
1006 buffer = Add2Ptr(buffer, op);
1007 }
1008
1009 return 0;
1010 }
1011
ntfs_sb_write(struct super_block * sb,u64 lbo,size_t bytes,const void * buf,int wait)1012 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1013 const void *buf, int wait)
1014 {
1015 u32 blocksize = sb->s_blocksize;
1016 struct block_device *bdev = sb->s_bdev;
1017 sector_t block = lbo >> sb->s_blocksize_bits;
1018 u32 off = lbo & (blocksize - 1);
1019 u32 op = blocksize - off;
1020 struct buffer_head *bh;
1021
1022 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1023 wait = 1;
1024
1025 for (; bytes; block += 1, off = 0, op = blocksize) {
1026 if (op > bytes)
1027 op = bytes;
1028
1029 if (op < blocksize) {
1030 bh = __bread(bdev, block, blocksize);
1031 if (!bh) {
1032 ntfs_err(sb, "failed to read block %llx",
1033 (u64)block);
1034 return -EIO;
1035 }
1036 } else {
1037 bh = __getblk(bdev, block, blocksize);
1038 if (!bh)
1039 return -ENOMEM;
1040 }
1041
1042 if (buffer_locked(bh))
1043 __wait_on_buffer(bh);
1044
1045 lock_buffer(bh);
1046 if (buf) {
1047 memcpy(bh->b_data + off, buf, op);
1048 buf = Add2Ptr(buf, op);
1049 } else {
1050 memset(bh->b_data + off, -1, op);
1051 }
1052
1053 set_buffer_uptodate(bh);
1054 mark_buffer_dirty(bh);
1055 unlock_buffer(bh);
1056
1057 if (wait) {
1058 int err = sync_dirty_buffer(bh);
1059
1060 if (err) {
1061 ntfs_err(
1062 sb,
1063 "failed to sync buffer at block %llx, error %d",
1064 (u64)block, err);
1065 put_bh(bh);
1066 return err;
1067 }
1068 }
1069
1070 put_bh(bh);
1071
1072 bytes -= op;
1073 }
1074 return 0;
1075 }
1076
ntfs_sb_write_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,const void * buf,size_t bytes,int sync)1077 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1078 u64 vbo, const void *buf, size_t bytes, int sync)
1079 {
1080 struct super_block *sb = sbi->sb;
1081 u8 cluster_bits = sbi->cluster_bits;
1082 u32 off = vbo & sbi->cluster_mask;
1083 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1084 u64 lbo, len;
1085 size_t idx;
1086
1087 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1088 return -ENOENT;
1089
1090 if (lcn == SPARSE_LCN)
1091 return -EINVAL;
1092
1093 lbo = ((u64)lcn << cluster_bits) + off;
1094 len = ((u64)clen << cluster_bits) - off;
1095
1096 for (;;) {
1097 u32 op = min_t(u64, len, bytes);
1098 int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1099
1100 if (err)
1101 return err;
1102
1103 bytes -= op;
1104 if (!bytes)
1105 break;
1106
1107 vcn_next = vcn + clen;
1108 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1109 vcn != vcn_next)
1110 return -ENOENT;
1111
1112 if (lcn == SPARSE_LCN)
1113 return -EINVAL;
1114
1115 if (buf)
1116 buf = Add2Ptr(buf, op);
1117
1118 lbo = ((u64)lcn << cluster_bits);
1119 len = ((u64)clen << cluster_bits);
1120 }
1121
1122 return 0;
1123 }
1124
ntfs_bread_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo)1125 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1126 const struct runs_tree *run, u64 vbo)
1127 {
1128 struct super_block *sb = sbi->sb;
1129 u8 cluster_bits = sbi->cluster_bits;
1130 CLST lcn;
1131 u64 lbo;
1132
1133 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1134 return ERR_PTR(-ENOENT);
1135
1136 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1137
1138 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1139 }
1140
ntfs_read_run_nb(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,void * buf,u32 bytes,struct ntfs_buffers * nb)1141 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1142 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1143 {
1144 int err;
1145 struct super_block *sb = sbi->sb;
1146 u32 blocksize = sb->s_blocksize;
1147 u8 cluster_bits = sbi->cluster_bits;
1148 u32 off = vbo & sbi->cluster_mask;
1149 u32 nbh = 0;
1150 CLST vcn_next, vcn = vbo >> cluster_bits;
1151 CLST lcn, clen;
1152 u64 lbo, len;
1153 size_t idx;
1154 struct buffer_head *bh;
1155
1156 if (!run) {
1157 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1158 if (vbo > MFT_REC_VOL * sbi->record_size) {
1159 err = -ENOENT;
1160 goto out;
1161 }
1162
1163 /* Use absolute boot's 'MFTCluster' to read record. */
1164 lbo = vbo + sbi->mft.lbo;
1165 len = sbi->record_size;
1166 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1167 err = -ENOENT;
1168 goto out;
1169 } else {
1170 if (lcn == SPARSE_LCN) {
1171 err = -EINVAL;
1172 goto out;
1173 }
1174
1175 lbo = ((u64)lcn << cluster_bits) + off;
1176 len = ((u64)clen << cluster_bits) - off;
1177 }
1178
1179 off = lbo & (blocksize - 1);
1180 if (nb) {
1181 nb->off = off;
1182 nb->bytes = bytes;
1183 }
1184
1185 for (;;) {
1186 u32 len32 = len >= bytes ? bytes : len;
1187 sector_t block = lbo >> sb->s_blocksize_bits;
1188
1189 do {
1190 u32 op = blocksize - off;
1191
1192 if (op > len32)
1193 op = len32;
1194
1195 bh = ntfs_bread(sb, block);
1196 if (!bh) {
1197 err = -EIO;
1198 goto out;
1199 }
1200
1201 if (buf) {
1202 memcpy(buf, bh->b_data + off, op);
1203 buf = Add2Ptr(buf, op);
1204 }
1205
1206 if (!nb) {
1207 put_bh(bh);
1208 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1209 err = -EINVAL;
1210 goto out;
1211 } else {
1212 nb->bh[nbh++] = bh;
1213 nb->nbufs = nbh;
1214 }
1215
1216 bytes -= op;
1217 if (!bytes)
1218 return 0;
1219 len32 -= op;
1220 block += 1;
1221 off = 0;
1222
1223 } while (len32);
1224
1225 vcn_next = vcn + clen;
1226 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1227 vcn != vcn_next) {
1228 err = -ENOENT;
1229 goto out;
1230 }
1231
1232 if (lcn == SPARSE_LCN) {
1233 err = -EINVAL;
1234 goto out;
1235 }
1236
1237 lbo = ((u64)lcn << cluster_bits);
1238 len = ((u64)clen << cluster_bits);
1239 }
1240
1241 out:
1242 if (!nbh)
1243 return err;
1244
1245 while (nbh) {
1246 put_bh(nb->bh[--nbh]);
1247 nb->bh[nbh] = NULL;
1248 }
1249
1250 nb->nbufs = 0;
1251 return err;
1252 }
1253
1254 /*
1255 * ntfs_read_bh
1256 *
1257 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1258 */
ntfs_read_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,struct NTFS_RECORD_HEADER * rhdr,u32 bytes,struct ntfs_buffers * nb)1259 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1260 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1261 struct ntfs_buffers *nb)
1262 {
1263 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1264
1265 if (err)
1266 return err;
1267 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1268 }
1269
ntfs_get_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u32 bytes,struct ntfs_buffers * nb)1270 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1271 u32 bytes, struct ntfs_buffers *nb)
1272 {
1273 int err = 0;
1274 struct super_block *sb = sbi->sb;
1275 u32 blocksize = sb->s_blocksize;
1276 u8 cluster_bits = sbi->cluster_bits;
1277 CLST vcn_next, vcn = vbo >> cluster_bits;
1278 u32 off;
1279 u32 nbh = 0;
1280 CLST lcn, clen;
1281 u64 lbo, len;
1282 size_t idx;
1283
1284 nb->bytes = bytes;
1285
1286 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1287 err = -ENOENT;
1288 goto out;
1289 }
1290
1291 off = vbo & sbi->cluster_mask;
1292 lbo = ((u64)lcn << cluster_bits) + off;
1293 len = ((u64)clen << cluster_bits) - off;
1294
1295 nb->off = off = lbo & (blocksize - 1);
1296
1297 for (;;) {
1298 u32 len32 = min_t(u64, len, bytes);
1299 sector_t block = lbo >> sb->s_blocksize_bits;
1300
1301 do {
1302 u32 op;
1303 struct buffer_head *bh;
1304
1305 if (nbh >= ARRAY_SIZE(nb->bh)) {
1306 err = -EINVAL;
1307 goto out;
1308 }
1309
1310 op = blocksize - off;
1311 if (op > len32)
1312 op = len32;
1313
1314 if (op == blocksize) {
1315 bh = sb_getblk(sb, block);
1316 if (!bh) {
1317 err = -ENOMEM;
1318 goto out;
1319 }
1320 if (buffer_locked(bh))
1321 __wait_on_buffer(bh);
1322 set_buffer_uptodate(bh);
1323 } else {
1324 bh = ntfs_bread(sb, block);
1325 if (!bh) {
1326 err = -EIO;
1327 goto out;
1328 }
1329 }
1330
1331 nb->bh[nbh++] = bh;
1332 bytes -= op;
1333 if (!bytes) {
1334 nb->nbufs = nbh;
1335 return 0;
1336 }
1337
1338 block += 1;
1339 len32 -= op;
1340 off = 0;
1341 } while (len32);
1342
1343 vcn_next = vcn + clen;
1344 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1345 vcn != vcn_next) {
1346 err = -ENOENT;
1347 goto out;
1348 }
1349
1350 lbo = ((u64)lcn << cluster_bits);
1351 len = ((u64)clen << cluster_bits);
1352 }
1353
1354 out:
1355 while (nbh) {
1356 put_bh(nb->bh[--nbh]);
1357 nb->bh[nbh] = NULL;
1358 }
1359
1360 nb->nbufs = 0;
1361
1362 return err;
1363 }
1364
ntfs_write_bh(struct ntfs_sb_info * sbi,struct NTFS_RECORD_HEADER * rhdr,struct ntfs_buffers * nb,int sync)1365 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1366 struct ntfs_buffers *nb, int sync)
1367 {
1368 int err = 0;
1369 struct super_block *sb = sbi->sb;
1370 u32 block_size = sb->s_blocksize;
1371 u32 bytes = nb->bytes;
1372 u32 off = nb->off;
1373 u16 fo = le16_to_cpu(rhdr->fix_off);
1374 u16 fn = le16_to_cpu(rhdr->fix_num);
1375 u32 idx;
1376 __le16 *fixup;
1377 __le16 sample;
1378
1379 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1380 fn * SECTOR_SIZE > bytes) {
1381 return -EINVAL;
1382 }
1383
1384 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1385 u32 op = block_size - off;
1386 char *bh_data;
1387 struct buffer_head *bh = nb->bh[idx];
1388 __le16 *ptr, *end_data;
1389
1390 if (op > bytes)
1391 op = bytes;
1392
1393 if (buffer_locked(bh))
1394 __wait_on_buffer(bh);
1395
1396 lock_buffer(nb->bh[idx]);
1397
1398 bh_data = bh->b_data + off;
1399 end_data = Add2Ptr(bh_data, op);
1400 memcpy(bh_data, rhdr, op);
1401
1402 if (!idx) {
1403 u16 t16;
1404
1405 fixup = Add2Ptr(bh_data, fo);
1406 sample = *fixup;
1407 t16 = le16_to_cpu(sample);
1408 if (t16 >= 0x7FFF) {
1409 sample = *fixup = cpu_to_le16(1);
1410 } else {
1411 sample = cpu_to_le16(t16 + 1);
1412 *fixup = sample;
1413 }
1414
1415 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1416 }
1417
1418 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1419
1420 do {
1421 *++fixup = *ptr;
1422 *ptr = sample;
1423 ptr += SECTOR_SIZE / sizeof(short);
1424 } while (ptr < end_data);
1425
1426 set_buffer_uptodate(bh);
1427 mark_buffer_dirty(bh);
1428 unlock_buffer(bh);
1429
1430 if (sync) {
1431 int err2 = sync_dirty_buffer(bh);
1432
1433 if (!err && err2)
1434 err = err2;
1435 }
1436
1437 bytes -= op;
1438 rhdr = Add2Ptr(rhdr, op);
1439 }
1440
1441 return err;
1442 }
1443
ntfs_alloc_bio(u32 nr_vecs)1444 static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
1445 {
1446 struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1447
1448 if (!bio && (current->flags & PF_MEMALLOC)) {
1449 while (!bio && (nr_vecs /= 2))
1450 bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1451 }
1452 return bio;
1453 }
1454
1455 /*
1456 * ntfs_bio_pages - Read/write pages from/to disk.
1457 */
ntfs_bio_pages(struct ntfs_sb_info * sbi,const struct runs_tree * run,struct page ** pages,u32 nr_pages,u64 vbo,u32 bytes,u32 op)1458 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1459 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1460 u32 op)
1461 {
1462 int err = 0;
1463 struct bio *new, *bio = NULL;
1464 struct super_block *sb = sbi->sb;
1465 struct block_device *bdev = sb->s_bdev;
1466 struct page *page;
1467 u8 cluster_bits = sbi->cluster_bits;
1468 CLST lcn, clen, vcn, vcn_next;
1469 u32 add, off, page_idx;
1470 u64 lbo, len;
1471 size_t run_idx;
1472 struct blk_plug plug;
1473
1474 if (!bytes)
1475 return 0;
1476
1477 blk_start_plug(&plug);
1478
1479 /* Align vbo and bytes to be 512 bytes aligned. */
1480 lbo = (vbo + bytes + 511) & ~511ull;
1481 vbo = vbo & ~511ull;
1482 bytes = lbo - vbo;
1483
1484 vcn = vbo >> cluster_bits;
1485 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1486 err = -ENOENT;
1487 goto out;
1488 }
1489 off = vbo & sbi->cluster_mask;
1490 page_idx = 0;
1491 page = pages[0];
1492
1493 for (;;) {
1494 lbo = ((u64)lcn << cluster_bits) + off;
1495 len = ((u64)clen << cluster_bits) - off;
1496 new_bio:
1497 new = ntfs_alloc_bio(nr_pages - page_idx);
1498 if (!new) {
1499 err = -ENOMEM;
1500 goto out;
1501 }
1502 if (bio) {
1503 bio_chain(bio, new);
1504 submit_bio(bio);
1505 }
1506 bio = new;
1507 bio_set_dev(bio, bdev);
1508 bio->bi_iter.bi_sector = lbo >> 9;
1509 bio->bi_opf = op;
1510
1511 while (len) {
1512 off = vbo & (PAGE_SIZE - 1);
1513 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1514
1515 if (bio_add_page(bio, page, add, off) < add)
1516 goto new_bio;
1517
1518 if (bytes <= add)
1519 goto out;
1520 bytes -= add;
1521 vbo += add;
1522
1523 if (add + off == PAGE_SIZE) {
1524 page_idx += 1;
1525 if (WARN_ON(page_idx >= nr_pages)) {
1526 err = -EINVAL;
1527 goto out;
1528 }
1529 page = pages[page_idx];
1530 }
1531
1532 if (len <= add)
1533 break;
1534 len -= add;
1535 lbo += add;
1536 }
1537
1538 vcn_next = vcn + clen;
1539 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1540 vcn != vcn_next) {
1541 err = -ENOENT;
1542 goto out;
1543 }
1544 off = 0;
1545 }
1546 out:
1547 if (bio) {
1548 if (!err)
1549 err = submit_bio_wait(bio);
1550 bio_put(bio);
1551 }
1552 blk_finish_plug(&plug);
1553
1554 return err;
1555 }
1556
1557 /*
1558 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1559 *
1560 * Fill on-disk logfile range by (-1)
1561 * this means empty logfile.
1562 */
ntfs_bio_fill_1(struct ntfs_sb_info * sbi,const struct runs_tree * run)1563 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1564 {
1565 int err = 0;
1566 struct super_block *sb = sbi->sb;
1567 struct block_device *bdev = sb->s_bdev;
1568 u8 cluster_bits = sbi->cluster_bits;
1569 struct bio *new, *bio = NULL;
1570 CLST lcn, clen;
1571 u64 lbo, len;
1572 size_t run_idx;
1573 struct page *fill;
1574 void *kaddr;
1575 struct blk_plug plug;
1576
1577 fill = alloc_page(GFP_KERNEL);
1578 if (!fill)
1579 return -ENOMEM;
1580
1581 kaddr = kmap_atomic(fill);
1582 memset(kaddr, -1, PAGE_SIZE);
1583 kunmap_atomic(kaddr);
1584 flush_dcache_page(fill);
1585 lock_page(fill);
1586
1587 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1588 err = -ENOENT;
1589 goto out;
1590 }
1591
1592 /*
1593 * TODO: Try blkdev_issue_write_same.
1594 */
1595 blk_start_plug(&plug);
1596 do {
1597 lbo = (u64)lcn << cluster_bits;
1598 len = (u64)clen << cluster_bits;
1599 new_bio:
1600 new = ntfs_alloc_bio(BIO_MAX_VECS);
1601 if (!new) {
1602 err = -ENOMEM;
1603 break;
1604 }
1605 if (bio) {
1606 bio_chain(bio, new);
1607 submit_bio(bio);
1608 }
1609 bio = new;
1610 bio_set_dev(bio, bdev);
1611 bio->bi_opf = REQ_OP_WRITE;
1612 bio->bi_iter.bi_sector = lbo >> 9;
1613
1614 for (;;) {
1615 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1616
1617 if (bio_add_page(bio, fill, add, 0) < add)
1618 goto new_bio;
1619
1620 lbo += add;
1621 if (len <= add)
1622 break;
1623 len -= add;
1624 }
1625 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1626
1627 if (bio) {
1628 if (!err)
1629 err = submit_bio_wait(bio);
1630 bio_put(bio);
1631 }
1632 blk_finish_plug(&plug);
1633 out:
1634 unlock_page(fill);
1635 put_page(fill);
1636
1637 return err;
1638 }
1639
ntfs_vbo_to_lbo(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u64 * lbo,u64 * bytes)1640 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1641 u64 vbo, u64 *lbo, u64 *bytes)
1642 {
1643 u32 off;
1644 CLST lcn, len;
1645 u8 cluster_bits = sbi->cluster_bits;
1646
1647 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1648 return -ENOENT;
1649
1650 off = vbo & sbi->cluster_mask;
1651 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1652 *bytes = ((u64)len << cluster_bits) - off;
1653
1654 return 0;
1655 }
1656
ntfs_new_inode(struct ntfs_sb_info * sbi,CLST rno,bool dir)1657 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1658 {
1659 int err = 0;
1660 struct super_block *sb = sbi->sb;
1661 struct inode *inode = new_inode(sb);
1662 struct ntfs_inode *ni;
1663
1664 if (!inode)
1665 return ERR_PTR(-ENOMEM);
1666
1667 ni = ntfs_i(inode);
1668
1669 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1670 false);
1671 if (err)
1672 goto out;
1673
1674 inode->i_ino = rno;
1675 if (insert_inode_locked(inode) < 0) {
1676 err = -EIO;
1677 goto out;
1678 }
1679
1680 out:
1681 if (err) {
1682 make_bad_inode(inode);
1683 iput(inode);
1684 ni = ERR_PTR(err);
1685 }
1686 return ni;
1687 }
1688
1689 /*
1690 * O:BAG:BAD:(A;OICI;FA;;;WD)
1691 * Owner S-1-5-32-544 (Administrators)
1692 * Group S-1-5-32-544 (Administrators)
1693 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1694 */
1695 const u8 s_default_security[] __aligned(8) = {
1696 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1697 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1698 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1699 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1700 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1701 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1702 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1703 };
1704
1705 static_assert(sizeof(s_default_security) == 0x50);
1706
sid_length(const struct SID * sid)1707 static inline u32 sid_length(const struct SID *sid)
1708 {
1709 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1710 }
1711
1712 /*
1713 * is_acl_valid
1714 *
1715 * Thanks Mark Harmstone for idea.
1716 */
is_acl_valid(const struct ACL * acl,u32 len)1717 static bool is_acl_valid(const struct ACL *acl, u32 len)
1718 {
1719 const struct ACE_HEADER *ace;
1720 u32 i;
1721 u16 ace_count, ace_size;
1722
1723 if (acl->AclRevision != ACL_REVISION &&
1724 acl->AclRevision != ACL_REVISION_DS) {
1725 /*
1726 * This value should be ACL_REVISION, unless the ACL contains an
1727 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1728 * All ACEs in an ACL must be at the same revision level.
1729 */
1730 return false;
1731 }
1732
1733 if (acl->Sbz1)
1734 return false;
1735
1736 if (le16_to_cpu(acl->AclSize) > len)
1737 return false;
1738
1739 if (acl->Sbz2)
1740 return false;
1741
1742 len -= sizeof(struct ACL);
1743 ace = (struct ACE_HEADER *)&acl[1];
1744 ace_count = le16_to_cpu(acl->AceCount);
1745
1746 for (i = 0; i < ace_count; i++) {
1747 if (len < sizeof(struct ACE_HEADER))
1748 return false;
1749
1750 ace_size = le16_to_cpu(ace->AceSize);
1751 if (len < ace_size)
1752 return false;
1753
1754 len -= ace_size;
1755 ace = Add2Ptr(ace, ace_size);
1756 }
1757
1758 return true;
1759 }
1760
is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 len)1761 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1762 {
1763 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1764
1765 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1766 return false;
1767
1768 if (sd->Revision != 1)
1769 return false;
1770
1771 if (sd->Sbz1)
1772 return false;
1773
1774 if (!(sd->Control & SE_SELF_RELATIVE))
1775 return false;
1776
1777 sd_owner = le32_to_cpu(sd->Owner);
1778 if (sd_owner) {
1779 const struct SID *owner = Add2Ptr(sd, sd_owner);
1780
1781 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1782 return false;
1783
1784 if (owner->Revision != 1)
1785 return false;
1786
1787 if (sd_owner + sid_length(owner) > len)
1788 return false;
1789 }
1790
1791 sd_group = le32_to_cpu(sd->Group);
1792 if (sd_group) {
1793 const struct SID *group = Add2Ptr(sd, sd_group);
1794
1795 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1796 return false;
1797
1798 if (group->Revision != 1)
1799 return false;
1800
1801 if (sd_group + sid_length(group) > len)
1802 return false;
1803 }
1804
1805 sd_sacl = le32_to_cpu(sd->Sacl);
1806 if (sd_sacl) {
1807 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1808
1809 if (sd_sacl + sizeof(struct ACL) > len)
1810 return false;
1811
1812 if (!is_acl_valid(sacl, len - sd_sacl))
1813 return false;
1814 }
1815
1816 sd_dacl = le32_to_cpu(sd->Dacl);
1817 if (sd_dacl) {
1818 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1819
1820 if (sd_dacl + sizeof(struct ACL) > len)
1821 return false;
1822
1823 if (!is_acl_valid(dacl, len - sd_dacl))
1824 return false;
1825 }
1826
1827 return true;
1828 }
1829
1830 /*
1831 * ntfs_security_init - Load and parse $Secure.
1832 */
ntfs_security_init(struct ntfs_sb_info * sbi)1833 int ntfs_security_init(struct ntfs_sb_info *sbi)
1834 {
1835 int err;
1836 struct super_block *sb = sbi->sb;
1837 struct inode *inode;
1838 struct ntfs_inode *ni;
1839 struct MFT_REF ref;
1840 struct ATTRIB *attr;
1841 struct ATTR_LIST_ENTRY *le;
1842 u64 sds_size;
1843 size_t off;
1844 struct NTFS_DE *ne;
1845 struct NTFS_DE_SII *sii_e;
1846 struct ntfs_fnd *fnd_sii = NULL;
1847 const struct INDEX_ROOT *root_sii;
1848 const struct INDEX_ROOT *root_sdh;
1849 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1850 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1851
1852 ref.low = cpu_to_le32(MFT_REC_SECURE);
1853 ref.high = 0;
1854 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1855
1856 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1857 if (IS_ERR(inode)) {
1858 err = PTR_ERR(inode);
1859 ntfs_err(sb, "Failed to load $Secure.");
1860 inode = NULL;
1861 goto out;
1862 }
1863
1864 ni = ntfs_i(inode);
1865
1866 le = NULL;
1867
1868 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1869 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1870 if (!attr) {
1871 err = -EINVAL;
1872 goto out;
1873 }
1874
1875 if(!(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1876 root_sdh->type != ATTR_ZERO ||
1877 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1878 offsetof(struct INDEX_ROOT, ihdr) +
1879 le32_to_cpu(root_sdh->ihdr.used) >
1880 le32_to_cpu(attr->res.data_size)) {
1881 err = -EINVAL;
1882 goto out;
1883 }
1884
1885 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1886 if (err)
1887 goto out;
1888
1889 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1890 ARRAY_SIZE(SII_NAME), NULL, NULL);
1891 if (!attr) {
1892 err = -EINVAL;
1893 goto out;
1894 }
1895
1896 if(!(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1897 root_sii->type != ATTR_ZERO ||
1898 root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1899 offsetof(struct INDEX_ROOT, ihdr) +
1900 le32_to_cpu(root_sii->ihdr.used) >
1901 le32_to_cpu(attr->res.data_size)) {
1902 err = -EINVAL;
1903 goto out;
1904 }
1905
1906 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1907 if (err)
1908 goto out;
1909
1910 fnd_sii = fnd_get();
1911 if (!fnd_sii) {
1912 err = -ENOMEM;
1913 goto out;
1914 }
1915
1916 sds_size = inode->i_size;
1917
1918 /* Find the last valid Id. */
1919 sbi->security.next_id = SECURITY_ID_FIRST;
1920 /* Always write new security at the end of bucket. */
1921 sbi->security.next_off =
1922 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1923
1924 off = 0;
1925 ne = NULL;
1926
1927 for (;;) {
1928 u32 next_id;
1929
1930 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1931 if (err || !ne)
1932 break;
1933
1934 sii_e = (struct NTFS_DE_SII *)ne;
1935 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1936 continue;
1937
1938 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1939 if (next_id >= sbi->security.next_id)
1940 sbi->security.next_id = next_id;
1941 }
1942
1943 sbi->security.ni = ni;
1944 inode = NULL;
1945 out:
1946 iput(inode);
1947 fnd_put(fnd_sii);
1948
1949 return err;
1950 }
1951
1952 /*
1953 * ntfs_get_security_by_id - Read security descriptor by id.
1954 */
ntfs_get_security_by_id(struct ntfs_sb_info * sbi,__le32 security_id,struct SECURITY_DESCRIPTOR_RELATIVE ** sd,size_t * size)1955 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1956 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1957 size_t *size)
1958 {
1959 int err;
1960 int diff;
1961 struct ntfs_inode *ni = sbi->security.ni;
1962 struct ntfs_index *indx = &sbi->security.index_sii;
1963 void *p = NULL;
1964 struct NTFS_DE_SII *sii_e;
1965 struct ntfs_fnd *fnd_sii;
1966 struct SECURITY_HDR d_security;
1967 const struct INDEX_ROOT *root_sii;
1968 u32 t32;
1969
1970 *sd = NULL;
1971
1972 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1973
1974 fnd_sii = fnd_get();
1975 if (!fnd_sii) {
1976 err = -ENOMEM;
1977 goto out;
1978 }
1979
1980 root_sii = indx_get_root(indx, ni, NULL, NULL);
1981 if (!root_sii) {
1982 err = -EINVAL;
1983 goto out;
1984 }
1985
1986 /* Try to find this SECURITY descriptor in SII indexes. */
1987 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1988 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1989 if (err)
1990 goto out;
1991
1992 if (diff)
1993 goto out;
1994
1995 t32 = le32_to_cpu(sii_e->sec_hdr.size);
1996 if (t32 < SIZEOF_SECURITY_HDR) {
1997 err = -EINVAL;
1998 goto out;
1999 }
2000
2001 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
2002 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2003 err = -EFBIG;
2004 goto out;
2005 }
2006
2007 *size = t32 - SIZEOF_SECURITY_HDR;
2008
2009 p = kmalloc(*size, GFP_NOFS);
2010 if (!p) {
2011 err = -ENOMEM;
2012 goto out;
2013 }
2014
2015 err = ntfs_read_run_nb(sbi, &ni->file.run,
2016 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2017 sizeof(d_security), NULL);
2018 if (err)
2019 goto out;
2020
2021 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2022 err = -EINVAL;
2023 goto out;
2024 }
2025
2026 err = ntfs_read_run_nb(sbi, &ni->file.run,
2027 le64_to_cpu(sii_e->sec_hdr.off) +
2028 SIZEOF_SECURITY_HDR,
2029 p, *size, NULL);
2030 if (err)
2031 goto out;
2032
2033 *sd = p;
2034 p = NULL;
2035
2036 out:
2037 kfree(p);
2038 fnd_put(fnd_sii);
2039 ni_unlock(ni);
2040
2041 return err;
2042 }
2043
2044 /*
2045 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2046 *
2047 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2048 * and it contains a mirror copy of each security descriptor. When writing
2049 * to a security descriptor at location X, another copy will be written at
2050 * location (X+256K).
2051 * When writing a security descriptor that will cross the 256K boundary,
2052 * the pointer will be advanced by 256K to skip
2053 * over the mirror portion.
2054 */
ntfs_insert_security(struct ntfs_sb_info * sbi,const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 size_sd,__le32 * security_id,bool * inserted)2055 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2056 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2057 u32 size_sd, __le32 *security_id, bool *inserted)
2058 {
2059 int err, diff;
2060 struct ntfs_inode *ni = sbi->security.ni;
2061 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2062 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2063 struct NTFS_DE_SDH *e;
2064 struct NTFS_DE_SDH sdh_e;
2065 struct NTFS_DE_SII sii_e;
2066 struct SECURITY_HDR *d_security;
2067 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2068 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2069 struct SECURITY_KEY hash_key;
2070 struct ntfs_fnd *fnd_sdh = NULL;
2071 const struct INDEX_ROOT *root_sdh;
2072 const struct INDEX_ROOT *root_sii;
2073 u64 mirr_off, new_sds_size;
2074 u32 next, left;
2075
2076 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2077 SecurityDescriptorsBlockSize);
2078
2079 hash_key.hash = security_hash(sd, size_sd);
2080 hash_key.sec_id = SECURITY_ID_INVALID;
2081
2082 if (inserted)
2083 *inserted = false;
2084 *security_id = SECURITY_ID_INVALID;
2085
2086 /* Allocate a temporal buffer. */
2087 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2088 if (!d_security)
2089 return -ENOMEM;
2090
2091 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2092
2093 fnd_sdh = fnd_get();
2094 if (!fnd_sdh) {
2095 err = -ENOMEM;
2096 goto out;
2097 }
2098
2099 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2100 if (!root_sdh) {
2101 err = -EINVAL;
2102 goto out;
2103 }
2104
2105 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2106 if (!root_sii) {
2107 err = -EINVAL;
2108 goto out;
2109 }
2110
2111 /*
2112 * Check if such security already exists.
2113 * Use "SDH" and hash -> to get the offset in "SDS".
2114 */
2115 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2116 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2117 fnd_sdh);
2118 if (err)
2119 goto out;
2120
2121 while (e) {
2122 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2123 err = ntfs_read_run_nb(sbi, &ni->file.run,
2124 le64_to_cpu(e->sec_hdr.off),
2125 d_security, new_sec_size, NULL);
2126 if (err)
2127 goto out;
2128
2129 if (le32_to_cpu(d_security->size) == new_sec_size &&
2130 d_security->key.hash == hash_key.hash &&
2131 !memcmp(d_security + 1, sd, size_sd)) {
2132 *security_id = d_security->key.sec_id;
2133 /* Such security already exists. */
2134 err = 0;
2135 goto out;
2136 }
2137 }
2138
2139 err = indx_find_sort(indx_sdh, ni, root_sdh,
2140 (struct NTFS_DE **)&e, fnd_sdh);
2141 if (err)
2142 goto out;
2143
2144 if (!e || e->key.hash != hash_key.hash)
2145 break;
2146 }
2147
2148 /* Zero unused space. */
2149 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2150 left = SecurityDescriptorsBlockSize - next;
2151
2152 /* Zero gap until SecurityDescriptorsBlockSize. */
2153 if (left < new_sec_size) {
2154 /* Zero "left" bytes from sbi->security.next_off. */
2155 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2156 }
2157
2158 /* Zero tail of previous security. */
2159 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2160
2161 /*
2162 * Example:
2163 * 0x40438 == ni->vfs_inode.i_size
2164 * 0x00440 == sbi->security.next_off
2165 * need to zero [0x438-0x440)
2166 * if (next > used) {
2167 * u32 tozero = next - used;
2168 * zero "tozero" bytes from sbi->security.next_off - tozero
2169 */
2170
2171 /* Format new security descriptor. */
2172 d_security->key.hash = hash_key.hash;
2173 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2174 d_security->off = cpu_to_le64(sbi->security.next_off);
2175 d_security->size = cpu_to_le32(new_sec_size);
2176 memcpy(d_security + 1, sd, size_sd);
2177
2178 /* Write main SDS bucket. */
2179 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2180 d_security, aligned_sec_size, 0);
2181
2182 if (err)
2183 goto out;
2184
2185 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2186 new_sds_size = mirr_off + aligned_sec_size;
2187
2188 if (new_sds_size > ni->vfs_inode.i_size) {
2189 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2190 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2191 new_sds_size, &new_sds_size, false, NULL);
2192 if (err)
2193 goto out;
2194 }
2195
2196 /* Write copy SDS bucket. */
2197 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2198 aligned_sec_size, 0);
2199 if (err)
2200 goto out;
2201
2202 /* Fill SII entry. */
2203 sii_e.de.view.data_off =
2204 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2205 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2206 sii_e.de.view.res = 0;
2207 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2208 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2209 sii_e.de.flags = 0;
2210 sii_e.de.res = 0;
2211 sii_e.sec_id = d_security->key.sec_id;
2212 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2213
2214 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2215 if (err)
2216 goto out;
2217
2218 /* Fill SDH entry. */
2219 sdh_e.de.view.data_off =
2220 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2221 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2222 sdh_e.de.view.res = 0;
2223 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2224 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2225 sdh_e.de.flags = 0;
2226 sdh_e.de.res = 0;
2227 sdh_e.key.hash = d_security->key.hash;
2228 sdh_e.key.sec_id = d_security->key.sec_id;
2229 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2230 sdh_e.magic[0] = cpu_to_le16('I');
2231 sdh_e.magic[1] = cpu_to_le16('I');
2232
2233 fnd_clear(fnd_sdh);
2234 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2235 fnd_sdh, 0);
2236 if (err)
2237 goto out;
2238
2239 *security_id = d_security->key.sec_id;
2240 if (inserted)
2241 *inserted = true;
2242
2243 /* Update Id and offset for next descriptor. */
2244 sbi->security.next_id += 1;
2245 sbi->security.next_off += aligned_sec_size;
2246
2247 out:
2248 fnd_put(fnd_sdh);
2249 mark_inode_dirty(&ni->vfs_inode);
2250 ni_unlock(ni);
2251 kfree(d_security);
2252
2253 return err;
2254 }
2255
2256 /*
2257 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2258 */
ntfs_reparse_init(struct ntfs_sb_info * sbi)2259 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2260 {
2261 int err;
2262 struct ntfs_inode *ni = sbi->reparse.ni;
2263 struct ntfs_index *indx = &sbi->reparse.index_r;
2264 struct ATTRIB *attr;
2265 struct ATTR_LIST_ENTRY *le;
2266 const struct INDEX_ROOT *root_r;
2267
2268 if (!ni)
2269 return 0;
2270
2271 le = NULL;
2272 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2273 ARRAY_SIZE(SR_NAME), NULL, NULL);
2274 if (!attr) {
2275 err = -EINVAL;
2276 goto out;
2277 }
2278
2279 root_r = resident_data(attr);
2280 if (root_r->type != ATTR_ZERO ||
2281 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2282 err = -EINVAL;
2283 goto out;
2284 }
2285
2286 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2287 if (err)
2288 goto out;
2289
2290 out:
2291 return err;
2292 }
2293
2294 /*
2295 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2296 */
ntfs_objid_init(struct ntfs_sb_info * sbi)2297 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2298 {
2299 int err;
2300 struct ntfs_inode *ni = sbi->objid.ni;
2301 struct ntfs_index *indx = &sbi->objid.index_o;
2302 struct ATTRIB *attr;
2303 struct ATTR_LIST_ENTRY *le;
2304 const struct INDEX_ROOT *root;
2305
2306 if (!ni)
2307 return 0;
2308
2309 le = NULL;
2310 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2311 ARRAY_SIZE(SO_NAME), NULL, NULL);
2312 if (!attr) {
2313 err = -EINVAL;
2314 goto out;
2315 }
2316
2317 root = resident_data(attr);
2318 if (root->type != ATTR_ZERO ||
2319 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2320 err = -EINVAL;
2321 goto out;
2322 }
2323
2324 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2325 if (err)
2326 goto out;
2327
2328 out:
2329 return err;
2330 }
2331
ntfs_objid_remove(struct ntfs_sb_info * sbi,struct GUID * guid)2332 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2333 {
2334 int err;
2335 struct ntfs_inode *ni = sbi->objid.ni;
2336 struct ntfs_index *indx = &sbi->objid.index_o;
2337
2338 if (!ni)
2339 return -EINVAL;
2340
2341 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2342
2343 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2344
2345 mark_inode_dirty(&ni->vfs_inode);
2346 ni_unlock(ni);
2347
2348 return err;
2349 }
2350
ntfs_insert_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2351 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2352 const struct MFT_REF *ref)
2353 {
2354 int err;
2355 struct ntfs_inode *ni = sbi->reparse.ni;
2356 struct ntfs_index *indx = &sbi->reparse.index_r;
2357 struct NTFS_DE_R re;
2358
2359 if (!ni)
2360 return -EINVAL;
2361
2362 memset(&re, 0, sizeof(re));
2363
2364 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2365 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2366 re.de.key_size = cpu_to_le16(sizeof(re.key));
2367
2368 re.key.ReparseTag = rtag;
2369 memcpy(&re.key.ref, ref, sizeof(*ref));
2370
2371 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2372
2373 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2374
2375 mark_inode_dirty(&ni->vfs_inode);
2376 ni_unlock(ni);
2377
2378 return err;
2379 }
2380
ntfs_remove_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2381 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2382 const struct MFT_REF *ref)
2383 {
2384 int err, diff;
2385 struct ntfs_inode *ni = sbi->reparse.ni;
2386 struct ntfs_index *indx = &sbi->reparse.index_r;
2387 struct ntfs_fnd *fnd = NULL;
2388 struct REPARSE_KEY rkey;
2389 struct NTFS_DE_R *re;
2390 struct INDEX_ROOT *root_r;
2391
2392 if (!ni)
2393 return -EINVAL;
2394
2395 rkey.ReparseTag = rtag;
2396 rkey.ref = *ref;
2397
2398 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2399
2400 if (rtag) {
2401 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2402 goto out1;
2403 }
2404
2405 fnd = fnd_get();
2406 if (!fnd) {
2407 err = -ENOMEM;
2408 goto out1;
2409 }
2410
2411 root_r = indx_get_root(indx, ni, NULL, NULL);
2412 if (!root_r) {
2413 err = -EINVAL;
2414 goto out;
2415 }
2416
2417 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2418 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2419 (struct NTFS_DE **)&re, fnd);
2420 if (err)
2421 goto out;
2422
2423 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2424 /* Impossible. Looks like volume corrupt? */
2425 goto out;
2426 }
2427
2428 memcpy(&rkey, &re->key, sizeof(rkey));
2429
2430 fnd_put(fnd);
2431 fnd = NULL;
2432
2433 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2434 if (err)
2435 goto out;
2436
2437 out:
2438 fnd_put(fnd);
2439
2440 out1:
2441 mark_inode_dirty(&ni->vfs_inode);
2442 ni_unlock(ni);
2443
2444 return err;
2445 }
2446
ntfs_unmap_and_discard(struct ntfs_sb_info * sbi,CLST lcn,CLST len)2447 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2448 CLST len)
2449 {
2450 ntfs_unmap_meta(sbi->sb, lcn, len);
2451 ntfs_discard(sbi, lcn, len);
2452 }
2453
mark_as_free_ex(struct ntfs_sb_info * sbi,CLST lcn,CLST len,bool trim)2454 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2455 {
2456 CLST end, i;
2457 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2458 bool dirty = false;
2459
2460 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2461 if (!wnd_is_used(wnd, lcn, len)) {
2462 /* mark volume as dirty out of wnd->rw_lock */
2463 dirty = true;
2464
2465 end = lcn + len;
2466 len = 0;
2467 for (i = lcn; i < end; i++) {
2468 if (wnd_is_used(wnd, i, 1)) {
2469 if (!len)
2470 lcn = i;
2471 len += 1;
2472 continue;
2473 }
2474
2475 if (!len)
2476 continue;
2477
2478 if (trim)
2479 ntfs_unmap_and_discard(sbi, lcn, len);
2480
2481 wnd_set_free(wnd, lcn, len);
2482 len = 0;
2483 }
2484
2485 if (!len)
2486 goto out;
2487 }
2488
2489 if (trim)
2490 ntfs_unmap_and_discard(sbi, lcn, len);
2491 wnd_set_free(wnd, lcn, len);
2492
2493 out:
2494 up_write(&wnd->rw_lock);
2495 if (dirty)
2496 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2497 }
2498
2499 /*
2500 * run_deallocate - Deallocate clusters.
2501 */
run_deallocate(struct ntfs_sb_info * sbi,struct runs_tree * run,bool trim)2502 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2503 {
2504 CLST lcn, len;
2505 size_t idx = 0;
2506
2507 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2508 if (lcn == SPARSE_LCN)
2509 continue;
2510
2511 mark_as_free_ex(sbi, lcn, len, trim);
2512 }
2513
2514 return 0;
2515 }
2516