1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 */
7
8 #include <linux/buffer_head.h>
9 #include <linux/fs.h>
10 #include <linux/mpage.h>
11 #include <linux/namei.h>
12 #include <linux/nls.h>
13 #include <linux/uio.h>
14 #include <linux/writeback.h>
15
16 #include "debug.h"
17 #include "ntfs.h"
18 #include "ntfs_fs.h"
19
20 /*
21 * ntfs_read_mft - Read record and parses MFT.
22 */
ntfs_read_mft(struct inode * inode,const struct cpu_str * name,const struct MFT_REF * ref)23 static struct inode *ntfs_read_mft(struct inode *inode,
24 const struct cpu_str *name,
25 const struct MFT_REF *ref)
26 {
27 int err = 0;
28 struct ntfs_inode *ni = ntfs_i(inode);
29 struct super_block *sb = inode->i_sb;
30 struct ntfs_sb_info *sbi = sb->s_fs_info;
31 mode_t mode = 0;
32 struct ATTR_STD_INFO5 *std5 = NULL;
33 struct ATTR_LIST_ENTRY *le;
34 struct ATTRIB *attr;
35 bool is_match = false;
36 bool is_root = false;
37 bool is_dir;
38 unsigned long ino = inode->i_ino;
39 u32 rp_fa = 0, asize, t32;
40 u16 roff, rsize, names = 0;
41 const struct ATTR_FILE_NAME *fname = NULL;
42 const struct INDEX_ROOT *root;
43 struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
44 u64 t64;
45 struct MFT_REC *rec;
46 struct runs_tree *run;
47
48 inode->i_op = NULL;
49 /* Setup 'uid' and 'gid' */
50 inode->i_uid = sbi->options->fs_uid;
51 inode->i_gid = sbi->options->fs_gid;
52
53 err = mi_init(&ni->mi, sbi, ino);
54 if (err)
55 goto out;
56
57 if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
58 t64 = sbi->mft.lbo >> sbi->cluster_bits;
59 t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
60 sbi->mft.ni = ni;
61 init_rwsem(&ni->file.run_lock);
62
63 if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
64 err = -ENOMEM;
65 goto out;
66 }
67 }
68
69 err = mi_read(&ni->mi, ino == MFT_REC_MFT);
70
71 if (err)
72 goto out;
73
74 rec = ni->mi.mrec;
75
76 if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
77 ;
78 } else if (ref->seq != rec->seq) {
79 err = -EINVAL;
80 ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
81 le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
82 goto out;
83 } else if (!is_rec_inuse(rec)) {
84 err = -ESTALE;
85 ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
86 goto out;
87 }
88
89 if (le32_to_cpu(rec->total) != sbi->record_size) {
90 /* Bad inode? */
91 err = -EINVAL;
92 goto out;
93 }
94
95 if (!is_rec_base(rec)) {
96 err = -EINVAL;
97 goto out;
98 }
99
100 /* Record should contain $I30 root. */
101 is_dir = rec->flags & RECORD_FLAG_DIR;
102
103 /* MFT_REC_MFT is not a dir */
104 if (is_dir && ino == MFT_REC_MFT) {
105 err = -EINVAL;
106 goto out;
107 }
108
109 inode->i_generation = le16_to_cpu(rec->seq);
110
111 /* Enumerate all struct Attributes MFT. */
112 le = NULL;
113 attr = NULL;
114
115 /*
116 * To reduce tab pressure use goto instead of
117 * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
118 */
119 next_attr:
120 run = NULL;
121 err = -EINVAL;
122 attr = ni_enum_attr_ex(ni, attr, &le, NULL);
123 if (!attr)
124 goto end_enum;
125
126 if (le && le->vcn) {
127 /* This is non primary attribute segment. Ignore if not MFT. */
128 if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
129 goto next_attr;
130
131 run = &ni->file.run;
132 asize = le32_to_cpu(attr->size);
133 goto attr_unpack_run;
134 }
135
136 roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
137 rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
138 asize = le32_to_cpu(attr->size);
139
140 if (le16_to_cpu(attr->name_off) + attr->name_len > asize)
141 goto out;
142
143 if (attr->non_res) {
144 t64 = le64_to_cpu(attr->nres.alloc_size);
145 if (le64_to_cpu(attr->nres.data_size) > t64 ||
146 le64_to_cpu(attr->nres.valid_size) > t64)
147 goto out;
148 }
149
150 switch (attr->type) {
151 case ATTR_STD:
152 if (attr->non_res ||
153 asize < sizeof(struct ATTR_STD_INFO) + roff ||
154 rsize < sizeof(struct ATTR_STD_INFO))
155 goto out;
156
157 if (std5)
158 goto next_attr;
159
160 std5 = Add2Ptr(attr, roff);
161
162 #ifdef STATX_BTIME
163 nt2kernel(std5->cr_time, &ni->i_crtime);
164 #endif
165 nt2kernel(std5->a_time, &inode->i_atime);
166 nt2kernel(std5->c_time, &inode->i_ctime);
167 nt2kernel(std5->m_time, &inode->i_mtime);
168
169 ni->std_fa = std5->fa;
170
171 if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
172 rsize >= sizeof(struct ATTR_STD_INFO5))
173 ni->std_security_id = std5->security_id;
174 goto next_attr;
175
176 case ATTR_LIST:
177 if (attr->name_len || le || ino == MFT_REC_LOG)
178 goto out;
179
180 err = ntfs_load_attr_list(ni, attr);
181 if (err)
182 goto out;
183
184 le = NULL;
185 attr = NULL;
186 goto next_attr;
187
188 case ATTR_NAME:
189 if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
190 rsize < SIZEOF_ATTRIBUTE_FILENAME)
191 goto out;
192
193 fname = Add2Ptr(attr, roff);
194 if (fname->type == FILE_NAME_DOS)
195 goto next_attr;
196
197 names += 1;
198 if (name && name->len == fname->name_len &&
199 !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
200 NULL, false))
201 is_match = true;
202
203 goto next_attr;
204
205 case ATTR_DATA:
206 if (is_dir) {
207 /* Ignore data attribute in dir record. */
208 goto next_attr;
209 }
210
211 if (ino == MFT_REC_BADCLUST && !attr->non_res)
212 goto next_attr;
213
214 if (attr->name_len &&
215 ((ino != MFT_REC_BADCLUST || !attr->non_res ||
216 attr->name_len != ARRAY_SIZE(BAD_NAME) ||
217 memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
218 (ino != MFT_REC_SECURE || !attr->non_res ||
219 attr->name_len != ARRAY_SIZE(SDS_NAME) ||
220 memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
221 /* File contains stream attribute. Ignore it. */
222 goto next_attr;
223 }
224
225 if (is_attr_sparsed(attr))
226 ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
227 else
228 ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
229
230 if (is_attr_compressed(attr))
231 ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
232 else
233 ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
234
235 if (is_attr_encrypted(attr))
236 ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
237 else
238 ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
239
240 if (!attr->non_res) {
241 ni->i_valid = inode->i_size = rsize;
242 inode_set_bytes(inode, rsize);
243 }
244
245 mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv);
246
247 if (!attr->non_res) {
248 ni->ni_flags |= NI_FLAG_RESIDENT;
249 goto next_attr;
250 }
251
252 inode_set_bytes(inode, attr_ondisk_size(attr));
253
254 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
255 inode->i_size = le64_to_cpu(attr->nres.data_size);
256 if (!attr->nres.alloc_size)
257 goto next_attr;
258
259 run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run
260 : &ni->file.run;
261 break;
262
263 case ATTR_ROOT:
264 if (attr->non_res)
265 goto out;
266
267 root = Add2Ptr(attr, roff);
268
269 if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
270 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
271 goto next_attr;
272
273 if (root->type != ATTR_NAME ||
274 root->rule != NTFS_COLLATION_TYPE_FILENAME)
275 goto out;
276
277 if (!is_dir)
278 goto next_attr;
279
280 is_root = true;
281 ni->ni_flags |= NI_FLAG_DIR;
282
283 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
284 if (err)
285 goto out;
286
287 mode = sb->s_root
288 ? (S_IFDIR | (0777 & sbi->options->fs_dmask_inv))
289 : (S_IFDIR | 0777);
290 goto next_attr;
291
292 case ATTR_ALLOC:
293 if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
294 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
295 goto next_attr;
296
297 inode->i_size = le64_to_cpu(attr->nres.data_size);
298 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
299 inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
300
301 run = &ni->dir.alloc_run;
302 break;
303
304 case ATTR_BITMAP:
305 if (ino == MFT_REC_MFT) {
306 if (!attr->non_res)
307 goto out;
308 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
309 /* 0x20000000 = 2^32 / 8 */
310 if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
311 goto out;
312 #endif
313 run = &sbi->mft.bitmap.run;
314 break;
315 } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
316 !memcmp(attr_name(attr), I30_NAME,
317 sizeof(I30_NAME)) &&
318 attr->non_res) {
319 run = &ni->dir.bitmap_run;
320 break;
321 }
322 goto next_attr;
323
324 case ATTR_REPARSE:
325 if (attr->name_len)
326 goto next_attr;
327
328 rp_fa = ni_parse_reparse(ni, attr, &rp);
329 switch (rp_fa) {
330 case REPARSE_LINK:
331 /*
332 * Normal symlink.
333 * Assume one unicode symbol == one utf8.
334 */
335 inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
336 .PrintNameLength) /
337 sizeof(u16);
338
339 ni->i_valid = inode->i_size;
340
341 /* Clear directory bit. */
342 if (ni->ni_flags & NI_FLAG_DIR) {
343 indx_clear(&ni->dir);
344 memset(&ni->dir, 0, sizeof(ni->dir));
345 ni->ni_flags &= ~NI_FLAG_DIR;
346 } else {
347 run_close(&ni->file.run);
348 }
349 mode = S_IFLNK | 0777;
350 is_dir = false;
351 if (attr->non_res) {
352 run = &ni->file.run;
353 goto attr_unpack_run; // Double break.
354 }
355 break;
356
357 case REPARSE_COMPRESSED:
358 break;
359
360 case REPARSE_DEDUPLICATED:
361 break;
362 }
363 goto next_attr;
364
365 case ATTR_EA_INFO:
366 if (!attr->name_len &&
367 resident_data_ex(attr, sizeof(struct EA_INFO))) {
368 ni->ni_flags |= NI_FLAG_EA;
369 /*
370 * ntfs_get_wsl_perm updates inode->i_uid, inode->i_gid, inode->i_mode
371 */
372 inode->i_mode = mode;
373 ntfs_get_wsl_perm(inode);
374 mode = inode->i_mode;
375 }
376 goto next_attr;
377
378 default:
379 goto next_attr;
380 }
381
382 attr_unpack_run:
383 roff = le16_to_cpu(attr->nres.run_off);
384
385 if (roff > asize) {
386 err = -EINVAL;
387 goto out;
388 }
389
390 t64 = le64_to_cpu(attr->nres.svcn);
391
392 err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
393 t64, Add2Ptr(attr, roff), asize - roff);
394 if (err < 0)
395 goto out;
396 err = 0;
397 goto next_attr;
398
399 end_enum:
400
401 if (!std5)
402 goto out;
403
404 if (!is_match && name) {
405 err = -ENOENT;
406 goto out;
407 }
408
409 if (std5->fa & FILE_ATTRIBUTE_READONLY)
410 mode &= ~0222;
411
412 if (!names) {
413 err = -EINVAL;
414 goto out;
415 }
416
417 if (names != le16_to_cpu(rec->hard_links)) {
418 /* Correct minor error on the fly. Do not mark inode as dirty. */
419 ntfs_inode_warn(inode, "Correct links count -> %u.", names);
420 rec->hard_links = cpu_to_le16(names);
421 ni->mi.dirty = true;
422 }
423
424 set_nlink(inode, names);
425
426 if (S_ISDIR(mode)) {
427 ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
428
429 /*
430 * Dot and dot-dot should be included in count but was not
431 * included in enumeration.
432 * Usually a hard links to directories are disabled.
433 */
434 inode->i_op = &ntfs_dir_inode_operations;
435 inode->i_fop = &ntfs_dir_operations;
436 ni->i_valid = 0;
437 } else if (S_ISLNK(mode)) {
438 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
439 inode->i_op = &ntfs_link_inode_operations;
440 inode->i_fop = NULL;
441 inode_nohighmem(inode);
442 } else if (S_ISREG(mode)) {
443 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
444 inode->i_op = &ntfs_file_inode_operations;
445 inode->i_fop = &ntfs_file_operations;
446 inode->i_mapping->a_ops =
447 is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
448 if (ino != MFT_REC_MFT)
449 init_rwsem(&ni->file.run_lock);
450 } else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
451 S_ISSOCK(mode)) {
452 inode->i_op = &ntfs_special_inode_operations;
453 init_special_inode(inode, mode, inode->i_rdev);
454 } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
455 fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
456 /* Records in $Extend are not a files or general directories. */
457 inode->i_op = &ntfs_file_inode_operations;
458 } else {
459 err = -EINVAL;
460 goto out;
461 }
462
463 if ((sbi->options->sys_immutable &&
464 (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
465 !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
466 inode->i_flags |= S_IMMUTABLE;
467 } else {
468 inode->i_flags &= ~S_IMMUTABLE;
469 }
470
471 inode->i_mode = mode;
472 if (!(ni->ni_flags & NI_FLAG_EA)) {
473 /* If no xattr then no security (stored in xattr). */
474 inode->i_flags |= S_NOSEC;
475 }
476
477 if (ino == MFT_REC_MFT && !sb->s_root)
478 sbi->mft.ni = NULL;
479
480 unlock_new_inode(inode);
481
482 return inode;
483
484 out:
485 if (ino == MFT_REC_MFT && !sb->s_root)
486 sbi->mft.ni = NULL;
487
488 iget_failed(inode);
489 return ERR_PTR(err);
490 }
491
492 /*
493 * ntfs_test_inode
494 *
495 * Return: 1 if match.
496 */
ntfs_test_inode(struct inode * inode,void * data)497 static int ntfs_test_inode(struct inode *inode, void *data)
498 {
499 struct MFT_REF *ref = data;
500
501 return ino_get(ref) == inode->i_ino;
502 }
503
ntfs_set_inode(struct inode * inode,void * data)504 static int ntfs_set_inode(struct inode *inode, void *data)
505 {
506 const struct MFT_REF *ref = data;
507
508 inode->i_ino = ino_get(ref);
509 return 0;
510 }
511
ntfs_iget5(struct super_block * sb,const struct MFT_REF * ref,const struct cpu_str * name)512 struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
513 const struct cpu_str *name)
514 {
515 struct inode *inode;
516
517 inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
518 (void *)ref);
519 if (unlikely(!inode))
520 return ERR_PTR(-ENOMEM);
521
522 /* If this is a freshly allocated inode, need to read it now. */
523 if (inode->i_state & I_NEW)
524 inode = ntfs_read_mft(inode, name, ref);
525 else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
526 /* Inode overlaps? */
527 _ntfs_bad_inode(inode);
528 }
529
530 if (IS_ERR(inode) && name)
531 ntfs_set_state(sb->s_fs_info, NTFS_DIRTY_ERROR);
532
533 return inode;
534 }
535
536 enum get_block_ctx {
537 GET_BLOCK_GENERAL = 0,
538 GET_BLOCK_WRITE_BEGIN = 1,
539 GET_BLOCK_DIRECT_IO_R = 2,
540 GET_BLOCK_DIRECT_IO_W = 3,
541 GET_BLOCK_BMAP = 4,
542 };
543
ntfs_get_block_vbo(struct inode * inode,u64 vbo,struct buffer_head * bh,int create,enum get_block_ctx ctx)544 static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
545 struct buffer_head *bh, int create,
546 enum get_block_ctx ctx)
547 {
548 struct super_block *sb = inode->i_sb;
549 struct ntfs_sb_info *sbi = sb->s_fs_info;
550 struct ntfs_inode *ni = ntfs_i(inode);
551 struct page *page = bh->b_page;
552 u8 cluster_bits = sbi->cluster_bits;
553 u32 block_size = sb->s_blocksize;
554 u64 bytes, lbo, valid;
555 u32 off;
556 int err;
557 CLST vcn, lcn, len;
558 bool new;
559
560 /* Clear previous state. */
561 clear_buffer_new(bh);
562 clear_buffer_uptodate(bh);
563
564 /* Direct write uses 'create=0'. */
565 if (!create && vbo >= ni->i_valid) {
566 /* Out of valid. */
567 return 0;
568 }
569
570 if (vbo >= inode->i_size) {
571 /* Out of size. */
572 return 0;
573 }
574
575 if (is_resident(ni)) {
576 ni_lock(ni);
577 err = attr_data_read_resident(ni, page);
578 ni_unlock(ni);
579
580 if (!err)
581 set_buffer_uptodate(bh);
582 bh->b_size = block_size;
583 return err;
584 }
585
586 vcn = vbo >> cluster_bits;
587 off = vbo & sbi->cluster_mask;
588 new = false;
589
590 err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
591 if (err)
592 goto out;
593
594 if (!len)
595 return 0;
596
597 bytes = ((u64)len << cluster_bits) - off;
598
599 if (lcn == SPARSE_LCN) {
600 if (!create) {
601 if (bh->b_size > bytes)
602 bh->b_size = bytes;
603 return 0;
604 }
605 WARN_ON(1);
606 }
607
608 if (new) {
609 set_buffer_new(bh);
610 if ((len << cluster_bits) > block_size)
611 ntfs_sparse_cluster(inode, page, vcn, len);
612 }
613
614 lbo = ((u64)lcn << cluster_bits) + off;
615
616 set_buffer_mapped(bh);
617 bh->b_bdev = sb->s_bdev;
618 bh->b_blocknr = lbo >> sb->s_blocksize_bits;
619
620 valid = ni->i_valid;
621
622 if (ctx == GET_BLOCK_DIRECT_IO_W) {
623 /* ntfs_direct_IO will update ni->i_valid. */
624 if (vbo >= valid)
625 set_buffer_new(bh);
626 } else if (create) {
627 /* Normal write. */
628 if (bytes > bh->b_size)
629 bytes = bh->b_size;
630
631 if (vbo >= valid)
632 set_buffer_new(bh);
633
634 if (vbo + bytes > valid) {
635 ni->i_valid = vbo + bytes;
636 mark_inode_dirty(inode);
637 }
638 } else if (vbo >= valid) {
639 /* Read out of valid data. */
640 /* Should never be here 'cause already checked. */
641 clear_buffer_mapped(bh);
642 } else if (vbo + bytes <= valid) {
643 /* Normal read. */
644 } else if (vbo + block_size <= valid) {
645 /* Normal short read. */
646 bytes = block_size;
647 } else {
648 /*
649 * Read across valid size: vbo < valid && valid < vbo + block_size
650 */
651 bytes = block_size;
652
653 if (page) {
654 u32 voff = valid - vbo;
655
656 bh->b_size = block_size;
657 off = vbo & (PAGE_SIZE - 1);
658 set_bh_page(bh, page, off);
659 err = bh_read(bh, 0);
660 if (err < 0)
661 goto out;
662 zero_user_segment(page, off + voff, off + block_size);
663 }
664 }
665
666 if (bh->b_size > bytes)
667 bh->b_size = bytes;
668
669 #ifndef __LP64__
670 if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) {
671 static_assert(sizeof(size_t) < sizeof(loff_t));
672 if (bytes > 0x40000000u)
673 bh->b_size = 0x40000000u;
674 }
675 #endif
676
677 return 0;
678
679 out:
680 return err;
681 }
682
ntfs_get_block(struct inode * inode,sector_t vbn,struct buffer_head * bh_result,int create)683 int ntfs_get_block(struct inode *inode, sector_t vbn,
684 struct buffer_head *bh_result, int create)
685 {
686 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
687 bh_result, create, GET_BLOCK_GENERAL);
688 }
689
ntfs_get_block_bmap(struct inode * inode,sector_t vsn,struct buffer_head * bh_result,int create)690 static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn,
691 struct buffer_head *bh_result, int create)
692 {
693 return ntfs_get_block_vbo(inode,
694 (u64)vsn << inode->i_sb->s_blocksize_bits,
695 bh_result, create, GET_BLOCK_BMAP);
696 }
697
ntfs_bmap(struct address_space * mapping,sector_t block)698 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
699 {
700 return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
701 }
702
ntfs_read_folio(struct file * file,struct folio * folio)703 static int ntfs_read_folio(struct file *file, struct folio *folio)
704 {
705 struct page *page = &folio->page;
706 int err;
707 struct address_space *mapping = page->mapping;
708 struct inode *inode = mapping->host;
709 struct ntfs_inode *ni = ntfs_i(inode);
710
711 if (is_resident(ni)) {
712 ni_lock(ni);
713 err = attr_data_read_resident(ni, page);
714 ni_unlock(ni);
715 if (err != E_NTFS_NONRESIDENT) {
716 unlock_page(page);
717 return err;
718 }
719 }
720
721 if (is_compressed(ni)) {
722 ni_lock(ni);
723 err = ni_readpage_cmpr(ni, page);
724 ni_unlock(ni);
725 return err;
726 }
727
728 /* Normal + sparse files. */
729 return mpage_read_folio(folio, ntfs_get_block);
730 }
731
ntfs_readahead(struct readahead_control * rac)732 static void ntfs_readahead(struct readahead_control *rac)
733 {
734 struct address_space *mapping = rac->mapping;
735 struct inode *inode = mapping->host;
736 struct ntfs_inode *ni = ntfs_i(inode);
737 u64 valid;
738 loff_t pos;
739
740 if (is_resident(ni)) {
741 /* No readahead for resident. */
742 return;
743 }
744
745 if (is_compressed(ni)) {
746 /* No readahead for compressed. */
747 return;
748 }
749
750 valid = ni->i_valid;
751 pos = readahead_pos(rac);
752
753 if (valid < i_size_read(inode) && pos <= valid &&
754 valid < pos + readahead_length(rac)) {
755 /* Range cross 'valid'. Read it page by page. */
756 return;
757 }
758
759 mpage_readahead(rac, ntfs_get_block);
760 }
761
ntfs_get_block_direct_IO_R(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)762 static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
763 struct buffer_head *bh_result, int create)
764 {
765 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
766 bh_result, create, GET_BLOCK_DIRECT_IO_R);
767 }
768
ntfs_get_block_direct_IO_W(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)769 static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
770 struct buffer_head *bh_result, int create)
771 {
772 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
773 bh_result, create, GET_BLOCK_DIRECT_IO_W);
774 }
775
ntfs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)776 static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
777 {
778 struct file *file = iocb->ki_filp;
779 struct address_space *mapping = file->f_mapping;
780 struct inode *inode = mapping->host;
781 struct ntfs_inode *ni = ntfs_i(inode);
782 loff_t vbo = iocb->ki_pos;
783 loff_t end;
784 int wr = iov_iter_rw(iter) & WRITE;
785 size_t iter_count = iov_iter_count(iter);
786 loff_t valid;
787 ssize_t ret;
788
789 if (is_resident(ni)) {
790 /* Switch to buffered write. */
791 ret = 0;
792 goto out;
793 }
794
795 ret = blockdev_direct_IO(iocb, inode, iter,
796 wr ? ntfs_get_block_direct_IO_W
797 : ntfs_get_block_direct_IO_R);
798
799 if (ret > 0)
800 end = vbo + ret;
801 else if (wr && ret == -EIOCBQUEUED)
802 end = vbo + iter_count;
803 else
804 goto out;
805
806 valid = ni->i_valid;
807 if (wr) {
808 if (end > valid && !S_ISBLK(inode->i_mode)) {
809 ni->i_valid = end;
810 mark_inode_dirty(inode);
811 }
812 } else if (vbo < valid && valid < end) {
813 /* Fix page. */
814 iov_iter_revert(iter, end - valid);
815 iov_iter_zero(end - valid, iter);
816 }
817
818 out:
819 return ret;
820 }
821
ntfs_set_size(struct inode * inode,u64 new_size)822 int ntfs_set_size(struct inode *inode, u64 new_size)
823 {
824 struct super_block *sb = inode->i_sb;
825 struct ntfs_sb_info *sbi = sb->s_fs_info;
826 struct ntfs_inode *ni = ntfs_i(inode);
827 int err;
828
829 /* Check for maximum file size. */
830 if (is_sparsed(ni) || is_compressed(ni)) {
831 if (new_size > sbi->maxbytes_sparse) {
832 err = -EFBIG;
833 goto out;
834 }
835 } else if (new_size > sbi->maxbytes) {
836 err = -EFBIG;
837 goto out;
838 }
839
840 ni_lock(ni);
841 down_write(&ni->file.run_lock);
842
843 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
844 &ni->i_valid, true, NULL);
845
846 up_write(&ni->file.run_lock);
847 ni_unlock(ni);
848
849 mark_inode_dirty(inode);
850
851 out:
852 return err;
853 }
854
ntfs_writepage(struct page * page,struct writeback_control * wbc)855 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
856 {
857 struct address_space *mapping = page->mapping;
858 struct inode *inode = mapping->host;
859 struct ntfs_inode *ni = ntfs_i(inode);
860 int err;
861
862 if (is_resident(ni)) {
863 ni_lock(ni);
864 err = attr_data_write_resident(ni, page);
865 ni_unlock(ni);
866 if (err != E_NTFS_NONRESIDENT) {
867 unlock_page(page);
868 return err;
869 }
870 }
871
872 return block_write_full_page(page, ntfs_get_block, wbc);
873 }
874
ntfs_writepages(struct address_space * mapping,struct writeback_control * wbc)875 static int ntfs_writepages(struct address_space *mapping,
876 struct writeback_control *wbc)
877 {
878 /* Redirect call to 'ntfs_writepage' for resident files. */
879 if (is_resident(ntfs_i(mapping->host)))
880 return generic_writepages(mapping, wbc);
881 return mpage_writepages(mapping, wbc, ntfs_get_block);
882 }
883
ntfs_get_block_write_begin(struct inode * inode,sector_t vbn,struct buffer_head * bh_result,int create)884 static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
885 struct buffer_head *bh_result, int create)
886 {
887 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
888 bh_result, create, GET_BLOCK_WRITE_BEGIN);
889 }
890
ntfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,u32 len,struct page ** pagep,void ** fsdata)891 int ntfs_write_begin(struct file *file, struct address_space *mapping,
892 loff_t pos, u32 len, struct page **pagep, void **fsdata)
893 {
894 int err;
895 struct inode *inode = mapping->host;
896 struct ntfs_inode *ni = ntfs_i(inode);
897
898 *pagep = NULL;
899 if (is_resident(ni)) {
900 struct page *page = grab_cache_page_write_begin(
901 mapping, pos >> PAGE_SHIFT);
902
903 if (!page) {
904 err = -ENOMEM;
905 goto out;
906 }
907
908 ni_lock(ni);
909 err = attr_data_read_resident(ni, page);
910 ni_unlock(ni);
911
912 if (!err) {
913 *pagep = page;
914 goto out;
915 }
916 unlock_page(page);
917 put_page(page);
918
919 if (err != E_NTFS_NONRESIDENT)
920 goto out;
921 }
922
923 err = block_write_begin(mapping, pos, len, pagep,
924 ntfs_get_block_write_begin);
925
926 out:
927 return err;
928 }
929
930 /*
931 * ntfs_write_end - Address_space_operations::write_end.
932 */
ntfs_write_end(struct file * file,struct address_space * mapping,loff_t pos,u32 len,u32 copied,struct page * page,void * fsdata)933 int ntfs_write_end(struct file *file, struct address_space *mapping,
934 loff_t pos, u32 len, u32 copied, struct page *page,
935 void *fsdata)
936 {
937 struct inode *inode = mapping->host;
938 struct ntfs_inode *ni = ntfs_i(inode);
939 u64 valid = ni->i_valid;
940 bool dirty = false;
941 int err;
942
943 if (is_resident(ni)) {
944 ni_lock(ni);
945 err = attr_data_write_resident(ni, page);
946 ni_unlock(ni);
947 if (!err) {
948 dirty = true;
949 /* Clear any buffers in page. */
950 if (page_has_buffers(page)) {
951 struct buffer_head *head, *bh;
952
953 bh = head = page_buffers(page);
954 do {
955 clear_buffer_dirty(bh);
956 clear_buffer_mapped(bh);
957 set_buffer_uptodate(bh);
958 } while (head != (bh = bh->b_this_page));
959 }
960 SetPageUptodate(page);
961 err = copied;
962 }
963 unlock_page(page);
964 put_page(page);
965 } else {
966 err = generic_write_end(file, mapping, pos, len, copied, page,
967 fsdata);
968 }
969
970 if (err >= 0) {
971 if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
972 inode->i_ctime = inode->i_mtime = current_time(inode);
973 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
974 dirty = true;
975 }
976
977 if (valid != ni->i_valid) {
978 /* ni->i_valid is changed in ntfs_get_block_vbo. */
979 dirty = true;
980 }
981
982 if (dirty)
983 mark_inode_dirty(inode);
984 }
985
986 return err;
987 }
988
reset_log_file(struct inode * inode)989 int reset_log_file(struct inode *inode)
990 {
991 int err;
992 loff_t pos = 0;
993 u32 log_size = inode->i_size;
994 struct address_space *mapping = inode->i_mapping;
995
996 for (;;) {
997 u32 len;
998 void *kaddr;
999 struct page *page;
1000
1001 len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
1002
1003 err = block_write_begin(mapping, pos, len, &page,
1004 ntfs_get_block_write_begin);
1005 if (err)
1006 goto out;
1007
1008 kaddr = kmap_atomic(page);
1009 memset(kaddr, -1, len);
1010 kunmap_atomic(kaddr);
1011 flush_dcache_page(page);
1012
1013 err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
1014 if (err < 0)
1015 goto out;
1016 pos += len;
1017
1018 if (pos >= log_size)
1019 break;
1020 balance_dirty_pages_ratelimited(mapping);
1021 }
1022 out:
1023 mark_inode_dirty_sync(inode);
1024
1025 return err;
1026 }
1027
ntfs3_write_inode(struct inode * inode,struct writeback_control * wbc)1028 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
1029 {
1030 return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1031 }
1032
ntfs_sync_inode(struct inode * inode)1033 int ntfs_sync_inode(struct inode *inode)
1034 {
1035 return _ni_write_inode(inode, 1);
1036 }
1037
1038 /*
1039 * writeback_inode - Helper function for ntfs_flush_inodes().
1040 *
1041 * This writes both the inode and the file data blocks, waiting
1042 * for in flight data blocks before the start of the call. It
1043 * does not wait for any io started during the call.
1044 */
writeback_inode(struct inode * inode)1045 static int writeback_inode(struct inode *inode)
1046 {
1047 int ret = sync_inode_metadata(inode, 0);
1048
1049 if (!ret)
1050 ret = filemap_fdatawrite(inode->i_mapping);
1051 return ret;
1052 }
1053
1054 /*
1055 * ntfs_flush_inodes
1056 *
1057 * Write data and metadata corresponding to i1 and i2. The io is
1058 * started but we do not wait for any of it to finish.
1059 *
1060 * filemap_flush() is used for the block device, so if there is a dirty
1061 * page for a block already in flight, we will not wait and start the
1062 * io over again.
1063 */
ntfs_flush_inodes(struct super_block * sb,struct inode * i1,struct inode * i2)1064 int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
1065 struct inode *i2)
1066 {
1067 int ret = 0;
1068
1069 if (i1)
1070 ret = writeback_inode(i1);
1071 if (!ret && i2)
1072 ret = writeback_inode(i2);
1073 if (!ret)
1074 ret = sync_blockdev_nowait(sb->s_bdev);
1075 return ret;
1076 }
1077
inode_write_data(struct inode * inode,const void * data,size_t bytes)1078 int inode_write_data(struct inode *inode, const void *data, size_t bytes)
1079 {
1080 pgoff_t idx;
1081
1082 /* Write non resident data. */
1083 for (idx = 0; bytes; idx++) {
1084 size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
1085 struct page *page = ntfs_map_page(inode->i_mapping, idx);
1086
1087 if (IS_ERR(page))
1088 return PTR_ERR(page);
1089
1090 lock_page(page);
1091 WARN_ON(!PageUptodate(page));
1092 ClearPageUptodate(page);
1093
1094 memcpy(page_address(page), data, op);
1095
1096 flush_dcache_page(page);
1097 SetPageUptodate(page);
1098 unlock_page(page);
1099
1100 ntfs_unmap_page(page);
1101
1102 bytes -= op;
1103 data = Add2Ptr(data, PAGE_SIZE);
1104 }
1105 return 0;
1106 }
1107
1108 /*
1109 * ntfs_reparse_bytes
1110 *
1111 * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
1112 * for unicode string of @uni_len length.
1113 */
ntfs_reparse_bytes(u32 uni_len)1114 static inline u32 ntfs_reparse_bytes(u32 uni_len)
1115 {
1116 /* Header + unicode string + decorated unicode string. */
1117 return sizeof(short) * (2 * uni_len + 4) +
1118 offsetof(struct REPARSE_DATA_BUFFER,
1119 SymbolicLinkReparseBuffer.PathBuffer);
1120 }
1121
1122 static struct REPARSE_DATA_BUFFER *
ntfs_create_reparse_buffer(struct ntfs_sb_info * sbi,const char * symname,u32 size,u16 * nsize)1123 ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
1124 u32 size, u16 *nsize)
1125 {
1126 int i, err;
1127 struct REPARSE_DATA_BUFFER *rp;
1128 __le16 *rp_name;
1129 typeof(rp->SymbolicLinkReparseBuffer) *rs;
1130
1131 rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS);
1132 if (!rp)
1133 return ERR_PTR(-ENOMEM);
1134
1135 rs = &rp->SymbolicLinkReparseBuffer;
1136 rp_name = rs->PathBuffer;
1137
1138 /* Convert link name to UTF-16. */
1139 err = ntfs_nls_to_utf16(sbi, symname, size,
1140 (struct cpu_str *)(rp_name - 1), 2 * size,
1141 UTF16_LITTLE_ENDIAN);
1142 if (err < 0)
1143 goto out;
1144
1145 /* err = the length of unicode name of symlink. */
1146 *nsize = ntfs_reparse_bytes(err);
1147
1148 if (*nsize > sbi->reparse.max_size) {
1149 err = -EFBIG;
1150 goto out;
1151 }
1152
1153 /* Translate Linux '/' into Windows '\'. */
1154 for (i = 0; i < err; i++) {
1155 if (rp_name[i] == cpu_to_le16('/'))
1156 rp_name[i] = cpu_to_le16('\\');
1157 }
1158
1159 rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
1160 rp->ReparseDataLength =
1161 cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
1162 SymbolicLinkReparseBuffer));
1163
1164 /* PrintName + SubstituteName. */
1165 rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
1166 rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
1167 rs->PrintNameLength = rs->SubstituteNameOffset;
1168
1169 /*
1170 * TODO: Use relative path if possible to allow Windows to
1171 * parse this path.
1172 * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE).
1173 */
1174 rs->Flags = 0;
1175
1176 memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
1177
1178 /* Decorate SubstituteName. */
1179 rp_name += err;
1180 rp_name[0] = cpu_to_le16('\\');
1181 rp_name[1] = cpu_to_le16('?');
1182 rp_name[2] = cpu_to_le16('?');
1183 rp_name[3] = cpu_to_le16('\\');
1184
1185 return rp;
1186 out:
1187 kfree(rp);
1188 return ERR_PTR(err);
1189 }
1190
ntfs_create_inode(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * dentry,const struct cpu_str * uni,umode_t mode,dev_t dev,const char * symname,u32 size,struct ntfs_fnd * fnd)1191 struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
1192 struct inode *dir, struct dentry *dentry,
1193 const struct cpu_str *uni, umode_t mode,
1194 dev_t dev, const char *symname, u32 size,
1195 struct ntfs_fnd *fnd)
1196 {
1197 int err;
1198 struct super_block *sb = dir->i_sb;
1199 struct ntfs_sb_info *sbi = sb->s_fs_info;
1200 const struct qstr *name = &dentry->d_name;
1201 CLST ino = 0;
1202 struct ntfs_inode *dir_ni = ntfs_i(dir);
1203 struct ntfs_inode *ni = NULL;
1204 struct inode *inode = NULL;
1205 struct ATTRIB *attr;
1206 struct ATTR_STD_INFO5 *std5;
1207 struct ATTR_FILE_NAME *fname;
1208 struct MFT_REC *rec;
1209 u32 asize, dsize, sd_size;
1210 enum FILE_ATTRIBUTE fa;
1211 __le32 security_id = SECURITY_ID_INVALID;
1212 CLST vcn;
1213 const void *sd;
1214 u16 t16, nsize = 0, aid = 0;
1215 struct INDEX_ROOT *root, *dir_root;
1216 struct NTFS_DE *e, *new_de = NULL;
1217 struct REPARSE_DATA_BUFFER *rp = NULL;
1218 bool rp_inserted = false;
1219
1220 ni_lock_dir(dir_ni);
1221
1222 dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
1223 if (!dir_root) {
1224 err = -EINVAL;
1225 goto out1;
1226 }
1227
1228 if (S_ISDIR(mode)) {
1229 /* Use parent's directory attributes. */
1230 fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
1231 FILE_ATTRIBUTE_ARCHIVE;
1232 /*
1233 * By default child directory inherits parent attributes.
1234 * Root directory is hidden + system.
1235 * Make an exception for children in root.
1236 */
1237 if (dir->i_ino == MFT_REC_ROOT)
1238 fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
1239 } else if (S_ISLNK(mode)) {
1240 /* It is good idea that link should be the same type (file/dir) as target */
1241 fa = FILE_ATTRIBUTE_REPARSE_POINT;
1242
1243 /*
1244 * Linux: there are dir/file/symlink and so on.
1245 * NTFS: symlinks are "dir + reparse" or "file + reparse"
1246 * It is good idea to create:
1247 * dir + reparse if 'symname' points to directory
1248 * or
1249 * file + reparse if 'symname' points to file
1250 * Unfortunately kern_path hangs if symname contains 'dir'.
1251 */
1252
1253 /*
1254 * struct path path;
1255 *
1256 * if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
1257 * struct inode *target = d_inode(path.dentry);
1258 *
1259 * if (S_ISDIR(target->i_mode))
1260 * fa |= FILE_ATTRIBUTE_DIRECTORY;
1261 * // if ( target->i_sb == sb ){
1262 * // use relative path?
1263 * // }
1264 * path_put(&path);
1265 * }
1266 */
1267 } else if (S_ISREG(mode)) {
1268 if (sbi->options->sparse) {
1269 /* Sparsed regular file, cause option 'sparse'. */
1270 fa = FILE_ATTRIBUTE_SPARSE_FILE |
1271 FILE_ATTRIBUTE_ARCHIVE;
1272 } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
1273 /* Compressed regular file, if parent is compressed. */
1274 fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
1275 } else {
1276 /* Regular file, default attributes. */
1277 fa = FILE_ATTRIBUTE_ARCHIVE;
1278 }
1279 } else {
1280 fa = FILE_ATTRIBUTE_ARCHIVE;
1281 }
1282
1283 if (!(mode & 0222))
1284 fa |= FILE_ATTRIBUTE_READONLY;
1285
1286 /* Allocate PATH_MAX bytes. */
1287 new_de = __getname();
1288 if (!new_de) {
1289 err = -ENOMEM;
1290 goto out1;
1291 }
1292
1293 /* Mark rw ntfs as dirty. it will be cleared at umount. */
1294 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1295
1296 /* Step 1: allocate and fill new mft record. */
1297 err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
1298 if (err)
1299 goto out2;
1300
1301 ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
1302 if (IS_ERR(ni)) {
1303 err = PTR_ERR(ni);
1304 ni = NULL;
1305 goto out3;
1306 }
1307 inode = &ni->vfs_inode;
1308 inode_init_owner(mnt_userns, inode, dir, mode);
1309 mode = inode->i_mode;
1310
1311 inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime =
1312 current_time(inode);
1313
1314 rec = ni->mi.mrec;
1315 rec->hard_links = cpu_to_le16(1);
1316 attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
1317
1318 /* Get default security id. */
1319 sd = s_default_security;
1320 sd_size = sizeof(s_default_security);
1321
1322 if (is_ntfs3(sbi)) {
1323 security_id = dir_ni->std_security_id;
1324 if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
1325 security_id = sbi->security.def_security_id;
1326
1327 if (security_id == SECURITY_ID_INVALID &&
1328 !ntfs_insert_security(sbi, sd, sd_size,
1329 &security_id, NULL))
1330 sbi->security.def_security_id = security_id;
1331 }
1332 }
1333
1334 /* Insert standard info. */
1335 std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
1336
1337 if (security_id == SECURITY_ID_INVALID) {
1338 dsize = sizeof(struct ATTR_STD_INFO);
1339 } else {
1340 dsize = sizeof(struct ATTR_STD_INFO5);
1341 std5->security_id = security_id;
1342 ni->std_security_id = security_id;
1343 }
1344 asize = SIZEOF_RESIDENT + dsize;
1345
1346 attr->type = ATTR_STD;
1347 attr->size = cpu_to_le32(asize);
1348 attr->id = cpu_to_le16(aid++);
1349 attr->res.data_off = SIZEOF_RESIDENT_LE;
1350 attr->res.data_size = cpu_to_le32(dsize);
1351
1352 std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
1353 kernel2nt(&inode->i_atime);
1354
1355 ni->std_fa = fa;
1356 std5->fa = fa;
1357
1358 attr = Add2Ptr(attr, asize);
1359
1360 /* Insert file name. */
1361 err = fill_name_de(sbi, new_de, name, uni);
1362 if (err)
1363 goto out4;
1364
1365 mi_get_ref(&ni->mi, &new_de->ref);
1366
1367 fname = (struct ATTR_FILE_NAME *)(new_de + 1);
1368 mi_get_ref(&dir_ni->mi, &fname->home);
1369 fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
1370 fname->dup.a_time = std5->cr_time;
1371 fname->dup.alloc_size = fname->dup.data_size = 0;
1372 fname->dup.fa = std5->fa;
1373 fname->dup.ea_size = fname->dup.reparse = 0;
1374
1375 dsize = le16_to_cpu(new_de->key_size);
1376 asize = ALIGN(SIZEOF_RESIDENT + dsize, 8);
1377
1378 attr->type = ATTR_NAME;
1379 attr->size = cpu_to_le32(asize);
1380 attr->res.data_off = SIZEOF_RESIDENT_LE;
1381 attr->res.flags = RESIDENT_FLAG_INDEXED;
1382 attr->id = cpu_to_le16(aid++);
1383 attr->res.data_size = cpu_to_le32(dsize);
1384 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
1385
1386 attr = Add2Ptr(attr, asize);
1387
1388 if (security_id == SECURITY_ID_INVALID) {
1389 /* Insert security attribute. */
1390 asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8);
1391
1392 attr->type = ATTR_SECURE;
1393 attr->size = cpu_to_le32(asize);
1394 attr->id = cpu_to_le16(aid++);
1395 attr->res.data_off = SIZEOF_RESIDENT_LE;
1396 attr->res.data_size = cpu_to_le32(sd_size);
1397 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
1398
1399 attr = Add2Ptr(attr, asize);
1400 }
1401
1402 attr->id = cpu_to_le16(aid++);
1403 if (fa & FILE_ATTRIBUTE_DIRECTORY) {
1404 /*
1405 * Regular directory or symlink to directory.
1406 * Create root attribute.
1407 */
1408 dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
1409 asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
1410
1411 attr->type = ATTR_ROOT;
1412 attr->size = cpu_to_le32(asize);
1413
1414 attr->name_len = ARRAY_SIZE(I30_NAME);
1415 attr->name_off = SIZEOF_RESIDENT_LE;
1416 attr->res.data_off =
1417 cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
1418 attr->res.data_size = cpu_to_le32(dsize);
1419 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
1420 sizeof(I30_NAME));
1421
1422 root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
1423 memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
1424 root->ihdr.de_off =
1425 cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
1426 root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
1427 sizeof(struct NTFS_DE));
1428 root->ihdr.total = root->ihdr.used;
1429
1430 e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
1431 e->size = cpu_to_le16(sizeof(struct NTFS_DE));
1432 e->flags = NTFS_IE_LAST;
1433 } else if (S_ISLNK(mode)) {
1434 /*
1435 * Symlink to file.
1436 * Create empty resident data attribute.
1437 */
1438 asize = SIZEOF_RESIDENT;
1439
1440 /* Insert empty ATTR_DATA */
1441 attr->type = ATTR_DATA;
1442 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1443 attr->name_off = SIZEOF_RESIDENT_LE;
1444 attr->res.data_off = SIZEOF_RESIDENT_LE;
1445 } else if (S_ISREG(mode)) {
1446 /*
1447 * Regular file. Create empty non resident data attribute.
1448 */
1449 attr->type = ATTR_DATA;
1450 attr->non_res = 1;
1451 attr->nres.evcn = cpu_to_le64(-1ll);
1452 if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
1453 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1454 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1455 attr->flags = ATTR_FLAG_SPARSED;
1456 asize = SIZEOF_NONRESIDENT_EX + 8;
1457 } else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
1458 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1459 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1460 attr->flags = ATTR_FLAG_COMPRESSED;
1461 attr->nres.c_unit = COMPRESSION_UNIT;
1462 asize = SIZEOF_NONRESIDENT_EX + 8;
1463 } else {
1464 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
1465 attr->name_off = SIZEOF_NONRESIDENT_LE;
1466 asize = SIZEOF_NONRESIDENT + 8;
1467 }
1468 attr->nres.run_off = attr->name_off;
1469 } else {
1470 /*
1471 * Node. Create empty resident data attribute.
1472 */
1473 attr->type = ATTR_DATA;
1474 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1475 attr->name_off = SIZEOF_RESIDENT_LE;
1476 if (fa & FILE_ATTRIBUTE_SPARSE_FILE)
1477 attr->flags = ATTR_FLAG_SPARSED;
1478 else if (fa & FILE_ATTRIBUTE_COMPRESSED)
1479 attr->flags = ATTR_FLAG_COMPRESSED;
1480 attr->res.data_off = SIZEOF_RESIDENT_LE;
1481 asize = SIZEOF_RESIDENT;
1482 ni->ni_flags |= NI_FLAG_RESIDENT;
1483 }
1484
1485 if (S_ISDIR(mode)) {
1486 ni->ni_flags |= NI_FLAG_DIR;
1487 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
1488 if (err)
1489 goto out4;
1490 } else if (S_ISLNK(mode)) {
1491 rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
1492
1493 if (IS_ERR(rp)) {
1494 err = PTR_ERR(rp);
1495 rp = NULL;
1496 goto out4;
1497 }
1498
1499 /*
1500 * Insert ATTR_REPARSE.
1501 */
1502 attr = Add2Ptr(attr, asize);
1503 attr->type = ATTR_REPARSE;
1504 attr->id = cpu_to_le16(aid++);
1505
1506 /* Resident or non resident? */
1507 asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
1508 t16 = PtrOffset(rec, attr);
1509
1510 /*
1511 * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes.
1512 * It is good idea to keep extened attributes resident.
1513 */
1514 if (asize + t16 + 0x78 + 8 > sbi->record_size) {
1515 CLST alen;
1516 CLST clst = bytes_to_cluster(sbi, nsize);
1517
1518 /* Bytes per runs. */
1519 t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
1520
1521 attr->non_res = 1;
1522 attr->nres.evcn = cpu_to_le64(clst - 1);
1523 attr->name_off = SIZEOF_NONRESIDENT_LE;
1524 attr->nres.run_off = attr->name_off;
1525 attr->nres.data_size = cpu_to_le64(nsize);
1526 attr->nres.valid_size = attr->nres.data_size;
1527 attr->nres.alloc_size =
1528 cpu_to_le64(ntfs_up_cluster(sbi, nsize));
1529
1530 err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
1531 clst, NULL, 0, &alen, 0,
1532 NULL);
1533 if (err)
1534 goto out5;
1535
1536 err = run_pack(&ni->file.run, 0, clst,
1537 Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
1538 &vcn);
1539 if (err < 0)
1540 goto out5;
1541
1542 if (vcn != clst) {
1543 err = -EINVAL;
1544 goto out5;
1545 }
1546
1547 asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
1548 } else {
1549 attr->res.data_off = SIZEOF_RESIDENT_LE;
1550 attr->res.data_size = cpu_to_le32(nsize);
1551 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
1552 nsize = 0;
1553 }
1554 /* Size of symlink equals the length of input string. */
1555 inode->i_size = size;
1556
1557 attr->size = cpu_to_le32(asize);
1558
1559 err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
1560 &new_de->ref);
1561 if (err)
1562 goto out5;
1563
1564 rp_inserted = true;
1565 }
1566
1567 attr = Add2Ptr(attr, asize);
1568 attr->type = ATTR_END;
1569
1570 rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
1571 rec->next_attr_id = cpu_to_le16(aid);
1572
1573 /* Step 2: Add new name in index. */
1574 err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0);
1575 if (err)
1576 goto out6;
1577
1578 /* Unlock parent directory before ntfs_init_acl. */
1579 ni_unlock(dir_ni);
1580
1581 inode->i_generation = le16_to_cpu(rec->seq);
1582
1583 dir->i_mtime = dir->i_ctime = inode->i_atime;
1584
1585 if (S_ISDIR(mode)) {
1586 inode->i_op = &ntfs_dir_inode_operations;
1587 inode->i_fop = &ntfs_dir_operations;
1588 } else if (S_ISLNK(mode)) {
1589 inode->i_op = &ntfs_link_inode_operations;
1590 inode->i_fop = NULL;
1591 inode->i_mapping->a_ops = &ntfs_aops;
1592 inode->i_size = size;
1593 inode_nohighmem(inode);
1594 } else if (S_ISREG(mode)) {
1595 inode->i_op = &ntfs_file_inode_operations;
1596 inode->i_fop = &ntfs_file_operations;
1597 inode->i_mapping->a_ops =
1598 is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
1599 init_rwsem(&ni->file.run_lock);
1600 } else {
1601 inode->i_op = &ntfs_special_inode_operations;
1602 init_special_inode(inode, mode, dev);
1603 }
1604
1605 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
1606 if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) {
1607 err = ntfs_init_acl(mnt_userns, inode, dir);
1608 if (err)
1609 goto out7;
1610 } else
1611 #endif
1612 {
1613 inode->i_flags |= S_NOSEC;
1614 }
1615
1616 /* Write non resident data. */
1617 if (nsize) {
1618 err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize, 0);
1619 if (err)
1620 goto out7;
1621 }
1622
1623 /*
1624 * Call 'd_instantiate' after inode->i_op is set
1625 * but before finish_open.
1626 */
1627 d_instantiate(dentry, inode);
1628
1629 ntfs_save_wsl_perm(inode);
1630 mark_inode_dirty(dir);
1631 mark_inode_dirty(inode);
1632
1633 /* Normal exit. */
1634 goto out2;
1635
1636 out7:
1637
1638 /* Undo 'indx_insert_entry'. */
1639 ni_lock_dir(dir_ni);
1640 indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
1641 le16_to_cpu(new_de->key_size), sbi);
1642 /* ni_unlock(dir_ni); will be called later. */
1643 out6:
1644 if (rp_inserted)
1645 ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
1646
1647 out5:
1648 if (!S_ISDIR(mode))
1649 run_deallocate(sbi, &ni->file.run, false);
1650
1651 out4:
1652 clear_rec_inuse(rec);
1653 clear_nlink(inode);
1654 ni->mi.dirty = false;
1655 discard_new_inode(inode);
1656 out3:
1657 ntfs_mark_rec_free(sbi, ino, false);
1658
1659 out2:
1660 __putname(new_de);
1661 kfree(rp);
1662
1663 out1:
1664 if (err) {
1665 ni_unlock(dir_ni);
1666 return ERR_PTR(err);
1667 }
1668
1669 unlock_new_inode(inode);
1670
1671 return inode;
1672 }
1673
ntfs_link_inode(struct inode * inode,struct dentry * dentry)1674 int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
1675 {
1676 int err;
1677 struct ntfs_inode *ni = ntfs_i(inode);
1678 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
1679 struct NTFS_DE *de;
1680
1681 /* Allocate PATH_MAX bytes. */
1682 de = __getname();
1683 if (!de)
1684 return -ENOMEM;
1685
1686 /* Mark rw ntfs as dirty. It will be cleared at umount. */
1687 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1688
1689 /* Construct 'de'. */
1690 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1691 if (err)
1692 goto out;
1693
1694 err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
1695 out:
1696 __putname(de);
1697 return err;
1698 }
1699
1700 /*
1701 * ntfs_unlink_inode
1702 *
1703 * inode_operations::unlink
1704 * inode_operations::rmdir
1705 */
ntfs_unlink_inode(struct inode * dir,const struct dentry * dentry)1706 int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
1707 {
1708 int err;
1709 struct ntfs_sb_info *sbi = dir->i_sb->s_fs_info;
1710 struct inode *inode = d_inode(dentry);
1711 struct ntfs_inode *ni = ntfs_i(inode);
1712 struct ntfs_inode *dir_ni = ntfs_i(dir);
1713 struct NTFS_DE *de, *de2 = NULL;
1714 int undo_remove;
1715
1716 if (ntfs_is_meta_file(sbi, ni->mi.rno))
1717 return -EINVAL;
1718
1719 /* Allocate PATH_MAX bytes. */
1720 de = __getname();
1721 if (!de)
1722 return -ENOMEM;
1723
1724 ni_lock(ni);
1725
1726 if (S_ISDIR(inode->i_mode) && !dir_is_empty(inode)) {
1727 err = -ENOTEMPTY;
1728 goto out;
1729 }
1730
1731 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1732 if (err < 0)
1733 goto out;
1734
1735 undo_remove = 0;
1736 err = ni_remove_name(dir_ni, ni, de, &de2, &undo_remove);
1737
1738 if (!err) {
1739 drop_nlink(inode);
1740 dir->i_mtime = dir->i_ctime = current_time(dir);
1741 mark_inode_dirty(dir);
1742 inode->i_ctime = dir->i_ctime;
1743 if (inode->i_nlink)
1744 mark_inode_dirty(inode);
1745 } else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
1746 _ntfs_bad_inode(inode);
1747 } else {
1748 if (ni_is_dirty(dir))
1749 mark_inode_dirty(dir);
1750 if (ni_is_dirty(inode))
1751 mark_inode_dirty(inode);
1752 }
1753
1754 out:
1755 ni_unlock(ni);
1756 __putname(de);
1757 return err;
1758 }
1759
ntfs_evict_inode(struct inode * inode)1760 void ntfs_evict_inode(struct inode *inode)
1761 {
1762 truncate_inode_pages_final(&inode->i_data);
1763
1764 if (inode->i_nlink)
1765 _ni_write_inode(inode, inode_needs_sync(inode));
1766
1767 invalidate_inode_buffers(inode);
1768 clear_inode(inode);
1769
1770 ni_clear(ntfs_i(inode));
1771 }
1772
ntfs_readlink_hlp(struct inode * inode,char * buffer,int buflen)1773 static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
1774 int buflen)
1775 {
1776 int i, err = -EINVAL;
1777 struct ntfs_inode *ni = ntfs_i(inode);
1778 struct super_block *sb = inode->i_sb;
1779 struct ntfs_sb_info *sbi = sb->s_fs_info;
1780 u64 size;
1781 u16 ulen = 0;
1782 void *to_free = NULL;
1783 struct REPARSE_DATA_BUFFER *rp;
1784 const __le16 *uname;
1785 struct ATTRIB *attr;
1786
1787 /* Reparse data present. Try to parse it. */
1788 static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
1789 static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
1790
1791 *buffer = 0;
1792
1793 attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
1794 if (!attr)
1795 goto out;
1796
1797 if (!attr->non_res) {
1798 rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
1799 if (!rp)
1800 goto out;
1801 size = le32_to_cpu(attr->res.data_size);
1802 } else {
1803 size = le64_to_cpu(attr->nres.data_size);
1804 rp = NULL;
1805 }
1806
1807 if (size > sbi->reparse.max_size || size <= sizeof(u32))
1808 goto out;
1809
1810 if (!rp) {
1811 rp = kmalloc(size, GFP_NOFS);
1812 if (!rp) {
1813 err = -ENOMEM;
1814 goto out;
1815 }
1816 to_free = rp;
1817 /* Read into temporal buffer. */
1818 err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL);
1819 if (err)
1820 goto out;
1821 }
1822
1823 /* Microsoft Tag. */
1824 switch (rp->ReparseTag) {
1825 case IO_REPARSE_TAG_MOUNT_POINT:
1826 /* Mount points and junctions. */
1827 /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
1828 if (size <= offsetof(struct REPARSE_DATA_BUFFER,
1829 MountPointReparseBuffer.PathBuffer))
1830 goto out;
1831 uname = Add2Ptr(rp,
1832 offsetof(struct REPARSE_DATA_BUFFER,
1833 MountPointReparseBuffer.PathBuffer) +
1834 le16_to_cpu(rp->MountPointReparseBuffer
1835 .PrintNameOffset));
1836 ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
1837 break;
1838
1839 case IO_REPARSE_TAG_SYMLINK:
1840 /* FolderSymbolicLink */
1841 /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
1842 if (size <= offsetof(struct REPARSE_DATA_BUFFER,
1843 SymbolicLinkReparseBuffer.PathBuffer))
1844 goto out;
1845 uname = Add2Ptr(
1846 rp, offsetof(struct REPARSE_DATA_BUFFER,
1847 SymbolicLinkReparseBuffer.PathBuffer) +
1848 le16_to_cpu(rp->SymbolicLinkReparseBuffer
1849 .PrintNameOffset));
1850 ulen = le16_to_cpu(
1851 rp->SymbolicLinkReparseBuffer.PrintNameLength);
1852 break;
1853
1854 case IO_REPARSE_TAG_CLOUD:
1855 case IO_REPARSE_TAG_CLOUD_1:
1856 case IO_REPARSE_TAG_CLOUD_2:
1857 case IO_REPARSE_TAG_CLOUD_3:
1858 case IO_REPARSE_TAG_CLOUD_4:
1859 case IO_REPARSE_TAG_CLOUD_5:
1860 case IO_REPARSE_TAG_CLOUD_6:
1861 case IO_REPARSE_TAG_CLOUD_7:
1862 case IO_REPARSE_TAG_CLOUD_8:
1863 case IO_REPARSE_TAG_CLOUD_9:
1864 case IO_REPARSE_TAG_CLOUD_A:
1865 case IO_REPARSE_TAG_CLOUD_B:
1866 case IO_REPARSE_TAG_CLOUD_C:
1867 case IO_REPARSE_TAG_CLOUD_D:
1868 case IO_REPARSE_TAG_CLOUD_E:
1869 case IO_REPARSE_TAG_CLOUD_F:
1870 err = sizeof("OneDrive") - 1;
1871 if (err > buflen)
1872 err = buflen;
1873 memcpy(buffer, "OneDrive", err);
1874 goto out;
1875
1876 default:
1877 if (IsReparseTagMicrosoft(rp->ReparseTag)) {
1878 /* Unknown Microsoft Tag. */
1879 goto out;
1880 }
1881 if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
1882 size <= sizeof(struct REPARSE_POINT)) {
1883 goto out;
1884 }
1885
1886 /* Users tag. */
1887 uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT));
1888 ulen = le16_to_cpu(rp->ReparseDataLength) -
1889 sizeof(struct REPARSE_POINT);
1890 }
1891
1892 /* Convert nlen from bytes to UNICODE chars. */
1893 ulen >>= 1;
1894
1895 /* Check that name is available. */
1896 if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size))
1897 goto out;
1898
1899 /* If name is already zero terminated then truncate it now. */
1900 if (!uname[ulen - 1])
1901 ulen -= 1;
1902
1903 err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen);
1904
1905 if (err < 0)
1906 goto out;
1907
1908 /* Translate Windows '\' into Linux '/'. */
1909 for (i = 0; i < err; i++) {
1910 if (buffer[i] == '\\')
1911 buffer[i] = '/';
1912 }
1913
1914 /* Always set last zero. */
1915 buffer[err] = 0;
1916 out:
1917 kfree(to_free);
1918 return err;
1919 }
1920
ntfs_get_link(struct dentry * de,struct inode * inode,struct delayed_call * done)1921 static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
1922 struct delayed_call *done)
1923 {
1924 int err;
1925 char *ret;
1926
1927 if (!de)
1928 return ERR_PTR(-ECHILD);
1929
1930 ret = kmalloc(PAGE_SIZE, GFP_NOFS);
1931 if (!ret)
1932 return ERR_PTR(-ENOMEM);
1933
1934 err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE);
1935 if (err < 0) {
1936 kfree(ret);
1937 return ERR_PTR(err);
1938 }
1939
1940 set_delayed_call(done, kfree_link, ret);
1941
1942 return ret;
1943 }
1944
1945 // clang-format off
1946 const struct inode_operations ntfs_link_inode_operations = {
1947 .get_link = ntfs_get_link,
1948 .setattr = ntfs3_setattr,
1949 .listxattr = ntfs_listxattr,
1950 .permission = ntfs_permission,
1951 };
1952
1953 const struct address_space_operations ntfs_aops = {
1954 .read_folio = ntfs_read_folio,
1955 .readahead = ntfs_readahead,
1956 .writepage = ntfs_writepage,
1957 .writepages = ntfs_writepages,
1958 .write_begin = ntfs_write_begin,
1959 .write_end = ntfs_write_end,
1960 .direct_IO = ntfs_direct_IO,
1961 .bmap = ntfs_bmap,
1962 .dirty_folio = block_dirty_folio,
1963 .invalidate_folio = block_invalidate_folio,
1964 };
1965
1966 const struct address_space_operations ntfs_aops_cmpr = {
1967 .read_folio = ntfs_read_folio,
1968 .readahead = ntfs_readahead,
1969 };
1970 // clang-format on
1971