• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/buffer_head.h>
9 #include <linux/fs.h>
10 #include <linux/mpage.h>
11 #include <linux/namei.h>
12 #include <linux/nls.h>
13 #include <linux/uio.h>
14 #include <linux/writeback.h>
15 
16 #include "debug.h"
17 #include "ntfs.h"
18 #include "ntfs_fs.h"
19 
20 /*
21  * ntfs_read_mft - Read record and parses MFT.
22  */
ntfs_read_mft(struct inode * inode,const struct cpu_str * name,const struct MFT_REF * ref)23 static struct inode *ntfs_read_mft(struct inode *inode,
24 				   const struct cpu_str *name,
25 				   const struct MFT_REF *ref)
26 {
27 	int err = 0;
28 	struct ntfs_inode *ni = ntfs_i(inode);
29 	struct super_block *sb = inode->i_sb;
30 	struct ntfs_sb_info *sbi = sb->s_fs_info;
31 	mode_t mode = 0;
32 	struct ATTR_STD_INFO5 *std5 = NULL;
33 	struct ATTR_LIST_ENTRY *le;
34 	struct ATTRIB *attr;
35 	bool is_match = false;
36 	bool is_root = false;
37 	bool is_dir;
38 	unsigned long ino = inode->i_ino;
39 	u32 rp_fa = 0, asize, t32;
40 	u16 roff, rsize, names = 0;
41 	const struct ATTR_FILE_NAME *fname = NULL;
42 	const struct INDEX_ROOT *root;
43 	struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
44 	u64 t64;
45 	struct MFT_REC *rec;
46 	struct runs_tree *run;
47 
48 	inode->i_op = NULL;
49 	/* Setup 'uid' and 'gid' */
50 	inode->i_uid = sbi->options->fs_uid;
51 	inode->i_gid = sbi->options->fs_gid;
52 
53 	err = mi_init(&ni->mi, sbi, ino);
54 	if (err)
55 		goto out;
56 
57 	if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
58 		t64 = sbi->mft.lbo >> sbi->cluster_bits;
59 		t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
60 		sbi->mft.ni = ni;
61 		init_rwsem(&ni->file.run_lock);
62 
63 		if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
64 			err = -ENOMEM;
65 			goto out;
66 		}
67 	}
68 
69 	err = mi_read(&ni->mi, ino == MFT_REC_MFT);
70 
71 	if (err)
72 		goto out;
73 
74 	rec = ni->mi.mrec;
75 
76 	if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
77 		;
78 	} else if (ref->seq != rec->seq) {
79 		err = -EINVAL;
80 		ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
81 			 le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
82 		goto out;
83 	} else if (!is_rec_inuse(rec)) {
84 		err = -ESTALE;
85 		ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
86 		goto out;
87 	}
88 
89 	if (le32_to_cpu(rec->total) != sbi->record_size) {
90 		/* Bad inode? */
91 		err = -EINVAL;
92 		goto out;
93 	}
94 
95 	if (!is_rec_base(rec)) {
96 		err = -EINVAL;
97 		goto out;
98 	}
99 
100 	/* Record should contain $I30 root. */
101 	is_dir = rec->flags & RECORD_FLAG_DIR;
102 
103 	/* MFT_REC_MFT is not a dir */
104 	if (is_dir && ino == MFT_REC_MFT) {
105 		err = -EINVAL;
106 		goto out;
107 	}
108 
109 	inode->i_generation = le16_to_cpu(rec->seq);
110 
111 	/* Enumerate all struct Attributes MFT. */
112 	le = NULL;
113 	attr = NULL;
114 
115 	/*
116 	 * To reduce tab pressure use goto instead of
117 	 * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
118 	 */
119 next_attr:
120 	run = NULL;
121 	err = -EINVAL;
122 	attr = ni_enum_attr_ex(ni, attr, &le, NULL);
123 	if (!attr)
124 		goto end_enum;
125 
126 	if (le && le->vcn) {
127 		/* This is non primary attribute segment. Ignore if not MFT. */
128 		if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
129 			goto next_attr;
130 
131 		run = &ni->file.run;
132 		asize = le32_to_cpu(attr->size);
133 		goto attr_unpack_run;
134 	}
135 
136 	roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
137 	rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
138 	asize = le32_to_cpu(attr->size);
139 
140 	if (le16_to_cpu(attr->name_off) + attr->name_len > asize)
141 		goto out;
142 
143 	if (attr->non_res) {
144 		t64 = le64_to_cpu(attr->nres.alloc_size);
145 		if (le64_to_cpu(attr->nres.data_size) > t64 ||
146 		    le64_to_cpu(attr->nres.valid_size) > t64)
147 			goto out;
148 	}
149 
150 	switch (attr->type) {
151 	case ATTR_STD:
152 		if (attr->non_res ||
153 		    asize < sizeof(struct ATTR_STD_INFO) + roff ||
154 		    rsize < sizeof(struct ATTR_STD_INFO))
155 			goto out;
156 
157 		if (std5)
158 			goto next_attr;
159 
160 		std5 = Add2Ptr(attr, roff);
161 
162 #ifdef STATX_BTIME
163 		nt2kernel(std5->cr_time, &ni->i_crtime);
164 #endif
165 		nt2kernel(std5->a_time, &inode->i_atime);
166 		nt2kernel(std5->c_time, &inode->i_ctime);
167 		nt2kernel(std5->m_time, &inode->i_mtime);
168 
169 		ni->std_fa = std5->fa;
170 
171 		if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
172 		    rsize >= sizeof(struct ATTR_STD_INFO5))
173 			ni->std_security_id = std5->security_id;
174 		goto next_attr;
175 
176 	case ATTR_LIST:
177 		if (attr->name_len || le || ino == MFT_REC_LOG)
178 			goto out;
179 
180 		err = ntfs_load_attr_list(ni, attr);
181 		if (err)
182 			goto out;
183 
184 		le = NULL;
185 		attr = NULL;
186 		goto next_attr;
187 
188 	case ATTR_NAME:
189 		if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
190 		    rsize < SIZEOF_ATTRIBUTE_FILENAME)
191 			goto out;
192 
193 		fname = Add2Ptr(attr, roff);
194 		if (fname->type == FILE_NAME_DOS)
195 			goto next_attr;
196 
197 		names += 1;
198 		if (name && name->len == fname->name_len &&
199 		    !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
200 					NULL, false))
201 			is_match = true;
202 
203 		goto next_attr;
204 
205 	case ATTR_DATA:
206 		if (is_dir) {
207 			/* Ignore data attribute in dir record. */
208 			goto next_attr;
209 		}
210 
211 		if (ino == MFT_REC_BADCLUST && !attr->non_res)
212 			goto next_attr;
213 
214 		if (attr->name_len &&
215 		    ((ino != MFT_REC_BADCLUST || !attr->non_res ||
216 		      attr->name_len != ARRAY_SIZE(BAD_NAME) ||
217 		      memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
218 		     (ino != MFT_REC_SECURE || !attr->non_res ||
219 		      attr->name_len != ARRAY_SIZE(SDS_NAME) ||
220 		      memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
221 			/* File contains stream attribute. Ignore it. */
222 			goto next_attr;
223 		}
224 
225 		if (is_attr_sparsed(attr))
226 			ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
227 		else
228 			ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
229 
230 		if (is_attr_compressed(attr))
231 			ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
232 		else
233 			ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
234 
235 		if (is_attr_encrypted(attr))
236 			ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
237 		else
238 			ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
239 
240 		if (!attr->non_res) {
241 			ni->i_valid = inode->i_size = rsize;
242 			inode_set_bytes(inode, rsize);
243 		}
244 
245 		mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv);
246 
247 		if (!attr->non_res) {
248 			ni->ni_flags |= NI_FLAG_RESIDENT;
249 			goto next_attr;
250 		}
251 
252 		inode_set_bytes(inode, attr_ondisk_size(attr));
253 
254 		ni->i_valid = le64_to_cpu(attr->nres.valid_size);
255 		inode->i_size = le64_to_cpu(attr->nres.data_size);
256 		if (!attr->nres.alloc_size)
257 			goto next_attr;
258 
259 		run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run
260 					    : &ni->file.run;
261 		break;
262 
263 	case ATTR_ROOT:
264 		if (attr->non_res)
265 			goto out;
266 
267 		root = Add2Ptr(attr, roff);
268 
269 		if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
270 		    memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
271 			goto next_attr;
272 
273 		if (root->type != ATTR_NAME ||
274 		    root->rule != NTFS_COLLATION_TYPE_FILENAME)
275 			goto out;
276 
277 		if (!is_dir)
278 			goto next_attr;
279 
280 		is_root = true;
281 		ni->ni_flags |= NI_FLAG_DIR;
282 
283 		err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
284 		if (err)
285 			goto out;
286 
287 		mode = sb->s_root
288 			       ? (S_IFDIR | (0777 & sbi->options->fs_dmask_inv))
289 			       : (S_IFDIR | 0777);
290 		goto next_attr;
291 
292 	case ATTR_ALLOC:
293 		if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
294 		    memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
295 			goto next_attr;
296 
297 		inode->i_size = le64_to_cpu(attr->nres.data_size);
298 		ni->i_valid = le64_to_cpu(attr->nres.valid_size);
299 		inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
300 
301 		run = &ni->dir.alloc_run;
302 		break;
303 
304 	case ATTR_BITMAP:
305 		if (ino == MFT_REC_MFT) {
306 			if (!attr->non_res)
307 				goto out;
308 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
309 			/* 0x20000000 = 2^32 / 8 */
310 			if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
311 				goto out;
312 #endif
313 			run = &sbi->mft.bitmap.run;
314 			break;
315 		} else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
316 			   !memcmp(attr_name(attr), I30_NAME,
317 				   sizeof(I30_NAME)) &&
318 			   attr->non_res) {
319 			run = &ni->dir.bitmap_run;
320 			break;
321 		}
322 		goto next_attr;
323 
324 	case ATTR_REPARSE:
325 		if (attr->name_len)
326 			goto next_attr;
327 
328 		rp_fa = ni_parse_reparse(ni, attr, &rp);
329 		switch (rp_fa) {
330 		case REPARSE_LINK:
331 			/*
332 			 * Normal symlink.
333 			 * Assume one unicode symbol == one utf8.
334 			 */
335 			inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
336 							    .PrintNameLength) /
337 					sizeof(u16);
338 
339 			ni->i_valid = inode->i_size;
340 
341 			/* Clear directory bit. */
342 			if (ni->ni_flags & NI_FLAG_DIR) {
343 				indx_clear(&ni->dir);
344 				memset(&ni->dir, 0, sizeof(ni->dir));
345 				ni->ni_flags &= ~NI_FLAG_DIR;
346 			} else {
347 				run_close(&ni->file.run);
348 			}
349 			mode = S_IFLNK | 0777;
350 			is_dir = false;
351 			if (attr->non_res) {
352 				run = &ni->file.run;
353 				goto attr_unpack_run; // Double break.
354 			}
355 			break;
356 
357 		case REPARSE_COMPRESSED:
358 			break;
359 
360 		case REPARSE_DEDUPLICATED:
361 			break;
362 		}
363 		goto next_attr;
364 
365 	case ATTR_EA_INFO:
366 		if (!attr->name_len &&
367 		    resident_data_ex(attr, sizeof(struct EA_INFO))) {
368 			ni->ni_flags |= NI_FLAG_EA;
369 			/*
370 			 * ntfs_get_wsl_perm updates inode->i_uid, inode->i_gid, inode->i_mode
371 			 */
372 			inode->i_mode = mode;
373 			ntfs_get_wsl_perm(inode);
374 			mode = inode->i_mode;
375 		}
376 		goto next_attr;
377 
378 	default:
379 		goto next_attr;
380 	}
381 
382 attr_unpack_run:
383 	roff = le16_to_cpu(attr->nres.run_off);
384 
385 	if (roff > asize) {
386 		err = -EINVAL;
387 		goto out;
388 	}
389 
390 	t64 = le64_to_cpu(attr->nres.svcn);
391 
392 	err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
393 			    t64, Add2Ptr(attr, roff), asize - roff);
394 	if (err < 0)
395 		goto out;
396 	err = 0;
397 	goto next_attr;
398 
399 end_enum:
400 
401 	if (!std5)
402 		goto out;
403 
404 	if (!is_match && name) {
405 		/* Reuse rec as buffer for ascii name. */
406 		err = -ENOENT;
407 		goto out;
408 	}
409 
410 	if (std5->fa & FILE_ATTRIBUTE_READONLY)
411 		mode &= ~0222;
412 
413 	if (!names) {
414 		err = -EINVAL;
415 		goto out;
416 	}
417 
418 	if (names != le16_to_cpu(rec->hard_links)) {
419 		/* Correct minor error on the fly. Do not mark inode as dirty. */
420 		rec->hard_links = cpu_to_le16(names);
421 		ni->mi.dirty = true;
422 	}
423 
424 	set_nlink(inode, names);
425 
426 	if (S_ISDIR(mode)) {
427 		ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
428 
429 		/*
430 		 * Dot and dot-dot should be included in count but was not
431 		 * included in enumeration.
432 		 * Usually a hard links to directories are disabled.
433 		 */
434 		inode->i_op = &ntfs_dir_inode_operations;
435 		inode->i_fop = &ntfs_dir_operations;
436 		ni->i_valid = 0;
437 	} else if (S_ISLNK(mode)) {
438 		ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
439 		inode->i_op = &ntfs_link_inode_operations;
440 		inode->i_fop = NULL;
441 		inode_nohighmem(inode);
442 	} else if (S_ISREG(mode)) {
443 		ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
444 		inode->i_op = &ntfs_file_inode_operations;
445 		inode->i_fop = &ntfs_file_operations;
446 		inode->i_mapping->a_ops =
447 			is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
448 		if (ino != MFT_REC_MFT)
449 			init_rwsem(&ni->file.run_lock);
450 	} else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
451 		   S_ISSOCK(mode)) {
452 		inode->i_op = &ntfs_special_inode_operations;
453 		init_special_inode(inode, mode, inode->i_rdev);
454 	} else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
455 		   fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
456 		/* Records in $Extend are not a files or general directories. */
457 		inode->i_op = &ntfs_file_inode_operations;
458 	} else {
459 		err = -EINVAL;
460 		goto out;
461 	}
462 
463 	if ((sbi->options->sys_immutable &&
464 	     (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
465 	    !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
466 		inode->i_flags |= S_IMMUTABLE;
467 	} else {
468 		inode->i_flags &= ~S_IMMUTABLE;
469 	}
470 
471 	inode->i_mode = mode;
472 	if (!(ni->ni_flags & NI_FLAG_EA)) {
473 		/* If no xattr then no security (stored in xattr). */
474 		inode->i_flags |= S_NOSEC;
475 	}
476 
477 	if (ino == MFT_REC_MFT && !sb->s_root)
478 		sbi->mft.ni = NULL;
479 
480 	unlock_new_inode(inode);
481 
482 	return inode;
483 
484 out:
485 	if (ino == MFT_REC_MFT && !sb->s_root)
486 		sbi->mft.ni = NULL;
487 
488 	iget_failed(inode);
489 	return ERR_PTR(err);
490 }
491 
492 /*
493  * ntfs_test_inode
494  *
495  * Return: 1 if match.
496  */
ntfs_test_inode(struct inode * inode,void * data)497 static int ntfs_test_inode(struct inode *inode, void *data)
498 {
499 	struct MFT_REF *ref = data;
500 
501 	return ino_get(ref) == inode->i_ino;
502 }
503 
ntfs_set_inode(struct inode * inode,void * data)504 static int ntfs_set_inode(struct inode *inode, void *data)
505 {
506 	const struct MFT_REF *ref = data;
507 
508 	inode->i_ino = ino_get(ref);
509 	return 0;
510 }
511 
ntfs_iget5(struct super_block * sb,const struct MFT_REF * ref,const struct cpu_str * name)512 struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
513 			 const struct cpu_str *name)
514 {
515 	struct inode *inode;
516 
517 	inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
518 			     (void *)ref);
519 	if (unlikely(!inode))
520 		return ERR_PTR(-ENOMEM);
521 
522 	/* If this is a freshly allocated inode, need to read it now. */
523 	if (inode->i_state & I_NEW)
524 		inode = ntfs_read_mft(inode, name, ref);
525 	else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
526 		/* Inode overlaps? */
527 		make_bad_inode(inode);
528 	}
529 
530 	if (IS_ERR(inode) && name)
531 		ntfs_set_state(sb->s_fs_info, NTFS_DIRTY_ERROR);
532 
533 	return inode;
534 }
535 
536 enum get_block_ctx {
537 	GET_BLOCK_GENERAL = 0,
538 	GET_BLOCK_WRITE_BEGIN = 1,
539 	GET_BLOCK_DIRECT_IO_R = 2,
540 	GET_BLOCK_DIRECT_IO_W = 3,
541 	GET_BLOCK_BMAP = 4,
542 };
543 
ntfs_get_block_vbo(struct inode * inode,u64 vbo,struct buffer_head * bh,int create,enum get_block_ctx ctx)544 static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
545 				       struct buffer_head *bh, int create,
546 				       enum get_block_ctx ctx)
547 {
548 	struct super_block *sb = inode->i_sb;
549 	struct ntfs_sb_info *sbi = sb->s_fs_info;
550 	struct ntfs_inode *ni = ntfs_i(inode);
551 	struct page *page = bh->b_page;
552 	u8 cluster_bits = sbi->cluster_bits;
553 	u32 block_size = sb->s_blocksize;
554 	u64 bytes, lbo, valid;
555 	u32 off;
556 	int err;
557 	CLST vcn, lcn, len;
558 	bool new;
559 
560 	/* Clear previous state. */
561 	clear_buffer_new(bh);
562 	clear_buffer_uptodate(bh);
563 
564 	/* Direct write uses 'create=0'. */
565 	if (!create && vbo >= ni->i_valid) {
566 		/* Out of valid. */
567 		return 0;
568 	}
569 
570 	if (vbo >= inode->i_size) {
571 		/* Out of size. */
572 		return 0;
573 	}
574 
575 	if (is_resident(ni)) {
576 		ni_lock(ni);
577 		err = attr_data_read_resident(ni, page);
578 		ni_unlock(ni);
579 
580 		if (!err)
581 			set_buffer_uptodate(bh);
582 		bh->b_size = block_size;
583 		return err;
584 	}
585 
586 	vcn = vbo >> cluster_bits;
587 	off = vbo & sbi->cluster_mask;
588 	new = false;
589 
590 	err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
591 	if (err)
592 		goto out;
593 
594 	if (!len)
595 		return 0;
596 
597 	bytes = ((u64)len << cluster_bits) - off;
598 
599 	if (lcn == SPARSE_LCN) {
600 		if (!create) {
601 			if (bh->b_size > bytes)
602 				bh->b_size = bytes;
603 			return 0;
604 		}
605 		WARN_ON(1);
606 	}
607 
608 	if (new) {
609 		set_buffer_new(bh);
610 		if ((len << cluster_bits) > block_size)
611 			ntfs_sparse_cluster(inode, page, vcn, len);
612 	}
613 
614 	lbo = ((u64)lcn << cluster_bits) + off;
615 
616 	set_buffer_mapped(bh);
617 	bh->b_bdev = sb->s_bdev;
618 	bh->b_blocknr = lbo >> sb->s_blocksize_bits;
619 
620 	valid = ni->i_valid;
621 
622 	if (ctx == GET_BLOCK_DIRECT_IO_W) {
623 		/* ntfs_direct_IO will update ni->i_valid. */
624 		if (vbo >= valid)
625 			set_buffer_new(bh);
626 	} else if (create) {
627 		/* Normal write. */
628 		if (bytes > bh->b_size)
629 			bytes = bh->b_size;
630 
631 		if (vbo >= valid)
632 			set_buffer_new(bh);
633 
634 		if (vbo + bytes > valid) {
635 			ni->i_valid = vbo + bytes;
636 			mark_inode_dirty(inode);
637 		}
638 	} else if (vbo >= valid) {
639 		/* Read out of valid data. */
640 		/* Should never be here 'cause already checked. */
641 		clear_buffer_mapped(bh);
642 	} else if (vbo + bytes <= valid) {
643 		/* Normal read. */
644 	} else if (vbo + block_size <= valid) {
645 		/* Normal short read. */
646 		bytes = block_size;
647 	} else {
648 		/*
649 		 * Read across valid size: vbo < valid && valid < vbo + block_size
650 		 */
651 		bytes = block_size;
652 
653 		if (page) {
654 			u32 voff = valid - vbo;
655 
656 			bh->b_size = block_size;
657 			off = vbo & (PAGE_SIZE - 1);
658 			set_bh_page(bh, page, off);
659 			ll_rw_block(REQ_OP_READ, 0, 1, &bh);
660 			wait_on_buffer(bh);
661 			if (!buffer_uptodate(bh)) {
662 				err = -EIO;
663 				goto out;
664 			}
665 			zero_user_segment(page, off + voff, off + block_size);
666 		}
667 	}
668 
669 	if (bh->b_size > bytes)
670 		bh->b_size = bytes;
671 
672 #ifndef __LP64__
673 	if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) {
674 		static_assert(sizeof(size_t) < sizeof(loff_t));
675 		if (bytes > 0x40000000u)
676 			bh->b_size = 0x40000000u;
677 	}
678 #endif
679 
680 	return 0;
681 
682 out:
683 	return err;
684 }
685 
ntfs_get_block(struct inode * inode,sector_t vbn,struct buffer_head * bh_result,int create)686 int ntfs_get_block(struct inode *inode, sector_t vbn,
687 		   struct buffer_head *bh_result, int create)
688 {
689 	return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
690 				  bh_result, create, GET_BLOCK_GENERAL);
691 }
692 
ntfs_get_block_bmap(struct inode * inode,sector_t vsn,struct buffer_head * bh_result,int create)693 static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn,
694 			       struct buffer_head *bh_result, int create)
695 {
696 	return ntfs_get_block_vbo(inode,
697 				  (u64)vsn << inode->i_sb->s_blocksize_bits,
698 				  bh_result, create, GET_BLOCK_BMAP);
699 }
700 
ntfs_bmap(struct address_space * mapping,sector_t block)701 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
702 {
703 	return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
704 }
705 
ntfs_readpage(struct file * file,struct page * page)706 static int ntfs_readpage(struct file *file, struct page *page)
707 {
708 	int err;
709 	struct address_space *mapping = page->mapping;
710 	struct inode *inode = mapping->host;
711 	struct ntfs_inode *ni = ntfs_i(inode);
712 
713 	if (is_resident(ni)) {
714 		ni_lock(ni);
715 		err = attr_data_read_resident(ni, page);
716 		ni_unlock(ni);
717 		if (err != E_NTFS_NONRESIDENT) {
718 			unlock_page(page);
719 			return err;
720 		}
721 	}
722 
723 	if (is_compressed(ni)) {
724 		ni_lock(ni);
725 		err = ni_readpage_cmpr(ni, page);
726 		ni_unlock(ni);
727 		return err;
728 	}
729 
730 	/* Normal + sparse files. */
731 	return mpage_readpage(page, ntfs_get_block);
732 }
733 
ntfs_readahead(struct readahead_control * rac)734 static void ntfs_readahead(struct readahead_control *rac)
735 {
736 	struct address_space *mapping = rac->mapping;
737 	struct inode *inode = mapping->host;
738 	struct ntfs_inode *ni = ntfs_i(inode);
739 	u64 valid;
740 	loff_t pos;
741 
742 	if (is_resident(ni)) {
743 		/* No readahead for resident. */
744 		return;
745 	}
746 
747 	if (is_compressed(ni)) {
748 		/* No readahead for compressed. */
749 		return;
750 	}
751 
752 	valid = ni->i_valid;
753 	pos = readahead_pos(rac);
754 
755 	if (valid < i_size_read(inode) && pos <= valid &&
756 	    valid < pos + readahead_length(rac)) {
757 		/* Range cross 'valid'. Read it page by page. */
758 		return;
759 	}
760 
761 	mpage_readahead(rac, ntfs_get_block);
762 }
763 
ntfs_get_block_direct_IO_R(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)764 static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
765 				      struct buffer_head *bh_result, int create)
766 {
767 	return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
768 				  bh_result, create, GET_BLOCK_DIRECT_IO_R);
769 }
770 
ntfs_get_block_direct_IO_W(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)771 static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
772 				      struct buffer_head *bh_result, int create)
773 {
774 	return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
775 				  bh_result, create, GET_BLOCK_DIRECT_IO_W);
776 }
777 
ntfs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)778 static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
779 {
780 	struct file *file = iocb->ki_filp;
781 	struct address_space *mapping = file->f_mapping;
782 	struct inode *inode = mapping->host;
783 	struct ntfs_inode *ni = ntfs_i(inode);
784 	loff_t vbo = iocb->ki_pos;
785 	loff_t end;
786 	int wr = iov_iter_rw(iter) & WRITE;
787 	size_t iter_count = iov_iter_count(iter);
788 	loff_t valid;
789 	ssize_t ret;
790 
791 	if (is_resident(ni)) {
792 		/* Switch to buffered write. */
793 		ret = 0;
794 		goto out;
795 	}
796 
797 	ret = blockdev_direct_IO(iocb, inode, iter,
798 				 wr ? ntfs_get_block_direct_IO_W
799 				    : ntfs_get_block_direct_IO_R);
800 
801 	if (ret > 0)
802 		end = vbo + ret;
803 	else if (wr && ret == -EIOCBQUEUED)
804 		end = vbo + iter_count;
805 	else
806 		goto out;
807 
808 	valid = ni->i_valid;
809 	if (wr) {
810 		if (end > valid && !S_ISBLK(inode->i_mode)) {
811 			ni->i_valid = end;
812 			mark_inode_dirty(inode);
813 		}
814 	} else if (vbo < valid && valid < end) {
815 		/* Fix page. */
816 		iov_iter_revert(iter, end - valid);
817 		iov_iter_zero(end - valid, iter);
818 	}
819 
820 out:
821 	return ret;
822 }
823 
ntfs_set_size(struct inode * inode,u64 new_size)824 int ntfs_set_size(struct inode *inode, u64 new_size)
825 {
826 	struct super_block *sb = inode->i_sb;
827 	struct ntfs_sb_info *sbi = sb->s_fs_info;
828 	struct ntfs_inode *ni = ntfs_i(inode);
829 	int err;
830 
831 	/* Check for maximum file size. */
832 	if (is_sparsed(ni) || is_compressed(ni)) {
833 		if (new_size > sbi->maxbytes_sparse) {
834 			err = -EFBIG;
835 			goto out;
836 		}
837 	} else if (new_size > sbi->maxbytes) {
838 		err = -EFBIG;
839 		goto out;
840 	}
841 
842 	ni_lock(ni);
843 	down_write(&ni->file.run_lock);
844 
845 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
846 			    &ni->i_valid, true, NULL);
847 
848 	up_write(&ni->file.run_lock);
849 	ni_unlock(ni);
850 
851 	mark_inode_dirty(inode);
852 
853 out:
854 	return err;
855 }
856 
ntfs_writepage(struct page * page,struct writeback_control * wbc)857 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
858 {
859 	struct address_space *mapping = page->mapping;
860 	struct inode *inode = mapping->host;
861 	struct ntfs_inode *ni = ntfs_i(inode);
862 	int err;
863 
864 	if (is_resident(ni)) {
865 		ni_lock(ni);
866 		err = attr_data_write_resident(ni, page);
867 		ni_unlock(ni);
868 		if (err != E_NTFS_NONRESIDENT) {
869 			unlock_page(page);
870 			return err;
871 		}
872 	}
873 
874 	return block_write_full_page(page, ntfs_get_block, wbc);
875 }
876 
ntfs_writepages(struct address_space * mapping,struct writeback_control * wbc)877 static int ntfs_writepages(struct address_space *mapping,
878 			   struct writeback_control *wbc)
879 {
880 	struct inode *inode = mapping->host;
881 	struct ntfs_inode *ni = ntfs_i(inode);
882 	/* Redirect call to 'ntfs_writepage' for resident files. */
883 	get_block_t *get_block = is_resident(ni) ? NULL : &ntfs_get_block;
884 
885 	return mpage_writepages(mapping, wbc, get_block);
886 }
887 
ntfs_get_block_write_begin(struct inode * inode,sector_t vbn,struct buffer_head * bh_result,int create)888 static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
889 				      struct buffer_head *bh_result, int create)
890 {
891 	return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
892 				  bh_result, create, GET_BLOCK_WRITE_BEGIN);
893 }
894 
ntfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,u32 len,u32 flags,struct page ** pagep,void ** fsdata)895 static int ntfs_write_begin(struct file *file, struct address_space *mapping,
896 			    loff_t pos, u32 len, u32 flags, struct page **pagep,
897 			    void **fsdata)
898 {
899 	int err;
900 	struct inode *inode = mapping->host;
901 	struct ntfs_inode *ni = ntfs_i(inode);
902 
903 	*pagep = NULL;
904 	if (is_resident(ni)) {
905 		struct page *page = grab_cache_page_write_begin(
906 			mapping, pos >> PAGE_SHIFT, flags);
907 
908 		if (!page) {
909 			err = -ENOMEM;
910 			goto out;
911 		}
912 
913 		ni_lock(ni);
914 		err = attr_data_read_resident(ni, page);
915 		ni_unlock(ni);
916 
917 		if (!err) {
918 			*pagep = page;
919 			goto out;
920 		}
921 		unlock_page(page);
922 		put_page(page);
923 
924 		if (err != E_NTFS_NONRESIDENT)
925 			goto out;
926 	}
927 
928 	err = block_write_begin(mapping, pos, len, flags, pagep,
929 				ntfs_get_block_write_begin);
930 
931 out:
932 	return err;
933 }
934 
935 /*
936  * ntfs_write_end - Address_space_operations::write_end.
937  */
ntfs_write_end(struct file * file,struct address_space * mapping,loff_t pos,u32 len,u32 copied,struct page * page,void * fsdata)938 static int ntfs_write_end(struct file *file, struct address_space *mapping,
939 			  loff_t pos, u32 len, u32 copied, struct page *page,
940 			  void *fsdata)
941 
942 {
943 	struct inode *inode = mapping->host;
944 	struct ntfs_inode *ni = ntfs_i(inode);
945 	u64 valid = ni->i_valid;
946 	bool dirty = false;
947 	int err;
948 
949 	if (is_resident(ni)) {
950 		ni_lock(ni);
951 		err = attr_data_write_resident(ni, page);
952 		ni_unlock(ni);
953 		if (!err) {
954 			dirty = true;
955 			/* Clear any buffers in page. */
956 			if (page_has_buffers(page)) {
957 				struct buffer_head *head, *bh;
958 
959 				bh = head = page_buffers(page);
960 				do {
961 					clear_buffer_dirty(bh);
962 					clear_buffer_mapped(bh);
963 					set_buffer_uptodate(bh);
964 				} while (head != (bh = bh->b_this_page));
965 			}
966 			SetPageUptodate(page);
967 			err = copied;
968 		}
969 		unlock_page(page);
970 		put_page(page);
971 	} else {
972 		err = generic_write_end(file, mapping, pos, len, copied, page,
973 					fsdata);
974 	}
975 
976 	if (err >= 0) {
977 		if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
978 			inode->i_ctime = inode->i_mtime = current_time(inode);
979 			ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
980 			dirty = true;
981 		}
982 
983 		if (valid != ni->i_valid) {
984 			/* ni->i_valid is changed in ntfs_get_block_vbo. */
985 			dirty = true;
986 		}
987 
988 		if (dirty)
989 			mark_inode_dirty(inode);
990 	}
991 
992 	return err;
993 }
994 
reset_log_file(struct inode * inode)995 int reset_log_file(struct inode *inode)
996 {
997 	int err;
998 	loff_t pos = 0;
999 	u32 log_size = inode->i_size;
1000 	struct address_space *mapping = inode->i_mapping;
1001 
1002 	for (;;) {
1003 		u32 len;
1004 		void *kaddr;
1005 		struct page *page;
1006 
1007 		len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
1008 
1009 		err = block_write_begin(mapping, pos, len, 0, &page,
1010 					ntfs_get_block_write_begin);
1011 		if (err)
1012 			goto out;
1013 
1014 		kaddr = kmap_atomic(page);
1015 		memset(kaddr, -1, len);
1016 		kunmap_atomic(kaddr);
1017 		flush_dcache_page(page);
1018 
1019 		err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
1020 		if (err < 0)
1021 			goto out;
1022 		pos += len;
1023 
1024 		if (pos >= log_size)
1025 			break;
1026 		balance_dirty_pages_ratelimited(mapping);
1027 	}
1028 out:
1029 	mark_inode_dirty_sync(inode);
1030 
1031 	return err;
1032 }
1033 
ntfs3_write_inode(struct inode * inode,struct writeback_control * wbc)1034 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
1035 {
1036 	return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1037 }
1038 
ntfs_sync_inode(struct inode * inode)1039 int ntfs_sync_inode(struct inode *inode)
1040 {
1041 	return _ni_write_inode(inode, 1);
1042 }
1043 
1044 /*
1045  * writeback_inode - Helper function for ntfs_flush_inodes().
1046  *
1047  * This writes both the inode and the file data blocks, waiting
1048  * for in flight data blocks before the start of the call.  It
1049  * does not wait for any io started during the call.
1050  */
writeback_inode(struct inode * inode)1051 static int writeback_inode(struct inode *inode)
1052 {
1053 	int ret = sync_inode_metadata(inode, 0);
1054 
1055 	if (!ret)
1056 		ret = filemap_fdatawrite(inode->i_mapping);
1057 	return ret;
1058 }
1059 
1060 /*
1061  * ntfs_flush_inodes
1062  *
1063  * Write data and metadata corresponding to i1 and i2.  The io is
1064  * started but we do not wait for any of it to finish.
1065  *
1066  * filemap_flush() is used for the block device, so if there is a dirty
1067  * page for a block already in flight, we will not wait and start the
1068  * io over again.
1069  */
ntfs_flush_inodes(struct super_block * sb,struct inode * i1,struct inode * i2)1070 int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
1071 		      struct inode *i2)
1072 {
1073 	int ret = 0;
1074 
1075 	if (i1)
1076 		ret = writeback_inode(i1);
1077 	if (!ret && i2)
1078 		ret = writeback_inode(i2);
1079 	if (!ret)
1080 		ret = filemap_flush(sb->s_bdev->bd_inode->i_mapping);
1081 	return ret;
1082 }
1083 
inode_write_data(struct inode * inode,const void * data,size_t bytes)1084 int inode_write_data(struct inode *inode, const void *data, size_t bytes)
1085 {
1086 	pgoff_t idx;
1087 
1088 	/* Write non resident data. */
1089 	for (idx = 0; bytes; idx++) {
1090 		size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
1091 		struct page *page = ntfs_map_page(inode->i_mapping, idx);
1092 
1093 		if (IS_ERR(page))
1094 			return PTR_ERR(page);
1095 
1096 		lock_page(page);
1097 		WARN_ON(!PageUptodate(page));
1098 		ClearPageUptodate(page);
1099 
1100 		memcpy(page_address(page), data, op);
1101 
1102 		flush_dcache_page(page);
1103 		SetPageUptodate(page);
1104 		unlock_page(page);
1105 
1106 		ntfs_unmap_page(page);
1107 
1108 		bytes -= op;
1109 		data = Add2Ptr(data, PAGE_SIZE);
1110 	}
1111 	return 0;
1112 }
1113 
1114 /*
1115  * ntfs_reparse_bytes
1116  *
1117  * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
1118  * for unicode string of @uni_len length.
1119  */
ntfs_reparse_bytes(u32 uni_len)1120 static inline u32 ntfs_reparse_bytes(u32 uni_len)
1121 {
1122 	/* Header + unicode string + decorated unicode string. */
1123 	return sizeof(short) * (2 * uni_len + 4) +
1124 	       offsetof(struct REPARSE_DATA_BUFFER,
1125 			SymbolicLinkReparseBuffer.PathBuffer);
1126 }
1127 
1128 static struct REPARSE_DATA_BUFFER *
ntfs_create_reparse_buffer(struct ntfs_sb_info * sbi,const char * symname,u32 size,u16 * nsize)1129 ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
1130 			   u32 size, u16 *nsize)
1131 {
1132 	int i, err;
1133 	struct REPARSE_DATA_BUFFER *rp;
1134 	__le16 *rp_name;
1135 	typeof(rp->SymbolicLinkReparseBuffer) *rs;
1136 
1137 	rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS);
1138 	if (!rp)
1139 		return ERR_PTR(-ENOMEM);
1140 
1141 	rs = &rp->SymbolicLinkReparseBuffer;
1142 	rp_name = rs->PathBuffer;
1143 
1144 	/* Convert link name to UTF-16. */
1145 	err = ntfs_nls_to_utf16(sbi, symname, size,
1146 				(struct cpu_str *)(rp_name - 1), 2 * size,
1147 				UTF16_LITTLE_ENDIAN);
1148 	if (err < 0)
1149 		goto out;
1150 
1151 	/* err = the length of unicode name of symlink. */
1152 	*nsize = ntfs_reparse_bytes(err);
1153 
1154 	if (*nsize > sbi->reparse.max_size) {
1155 		err = -EFBIG;
1156 		goto out;
1157 	}
1158 
1159 	/* Translate Linux '/' into Windows '\'. */
1160 	for (i = 0; i < err; i++) {
1161 		if (rp_name[i] == cpu_to_le16('/'))
1162 			rp_name[i] = cpu_to_le16('\\');
1163 	}
1164 
1165 	rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
1166 	rp->ReparseDataLength =
1167 		cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
1168 					      SymbolicLinkReparseBuffer));
1169 
1170 	/* PrintName + SubstituteName. */
1171 	rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
1172 	rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
1173 	rs->PrintNameLength = rs->SubstituteNameOffset;
1174 
1175 	/*
1176 	 * TODO: Use relative path if possible to allow Windows to
1177 	 * parse this path.
1178 	 * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE).
1179 	 */
1180 	rs->Flags = 0;
1181 
1182 	memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
1183 
1184 	/* Decorate SubstituteName. */
1185 	rp_name += err;
1186 	rp_name[0] = cpu_to_le16('\\');
1187 	rp_name[1] = cpu_to_le16('?');
1188 	rp_name[2] = cpu_to_le16('?');
1189 	rp_name[3] = cpu_to_le16('\\');
1190 
1191 	return rp;
1192 out:
1193 	kfree(rp);
1194 	return ERR_PTR(err);
1195 }
1196 
ntfs_create_inode(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * dentry,const struct cpu_str * uni,umode_t mode,dev_t dev,const char * symname,u32 size,struct ntfs_fnd * fnd)1197 struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
1198 				struct inode *dir, struct dentry *dentry,
1199 				const struct cpu_str *uni, umode_t mode,
1200 				dev_t dev, const char *symname, u32 size,
1201 				struct ntfs_fnd *fnd)
1202 {
1203 	int err;
1204 	struct super_block *sb = dir->i_sb;
1205 	struct ntfs_sb_info *sbi = sb->s_fs_info;
1206 	const struct qstr *name = &dentry->d_name;
1207 	CLST ino = 0;
1208 	struct ntfs_inode *dir_ni = ntfs_i(dir);
1209 	struct ntfs_inode *ni = NULL;
1210 	struct inode *inode = NULL;
1211 	struct ATTRIB *attr;
1212 	struct ATTR_STD_INFO5 *std5;
1213 	struct ATTR_FILE_NAME *fname;
1214 	struct MFT_REC *rec;
1215 	u32 asize, dsize, sd_size;
1216 	enum FILE_ATTRIBUTE fa;
1217 	__le32 security_id = SECURITY_ID_INVALID;
1218 	CLST vcn;
1219 	const void *sd;
1220 	u16 t16, nsize = 0, aid = 0;
1221 	struct INDEX_ROOT *root, *dir_root;
1222 	struct NTFS_DE *e, *new_de = NULL;
1223 	struct REPARSE_DATA_BUFFER *rp = NULL;
1224 	bool rp_inserted = false;
1225 
1226 	ni_lock_dir(dir_ni);
1227 
1228 	dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
1229 	if (!dir_root) {
1230 		err = -EINVAL;
1231 		goto out1;
1232 	}
1233 
1234 	if (S_ISDIR(mode)) {
1235 		/* Use parent's directory attributes. */
1236 		fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
1237 		     FILE_ATTRIBUTE_ARCHIVE;
1238 		/*
1239 		 * By default child directory inherits parent attributes.
1240 		 * Root directory is hidden + system.
1241 		 * Make an exception for children in root.
1242 		 */
1243 		if (dir->i_ino == MFT_REC_ROOT)
1244 			fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
1245 	} else if (S_ISLNK(mode)) {
1246 		/* It is good idea that link should be the same type (file/dir) as target */
1247 		fa = FILE_ATTRIBUTE_REPARSE_POINT;
1248 
1249 		/*
1250 		 * Linux: there are dir/file/symlink and so on.
1251 		 * NTFS: symlinks are "dir + reparse" or "file + reparse"
1252 		 * It is good idea to create:
1253 		 * dir + reparse if 'symname' points to directory
1254 		 * or
1255 		 * file + reparse if 'symname' points to file
1256 		 * Unfortunately kern_path hangs if symname contains 'dir'.
1257 		 */
1258 
1259 		/*
1260 		 *	struct path path;
1261 		 *
1262 		 *	if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
1263 		 *		struct inode *target = d_inode(path.dentry);
1264 		 *
1265 		 *		if (S_ISDIR(target->i_mode))
1266 		 *			fa |= FILE_ATTRIBUTE_DIRECTORY;
1267 		 *		// if ( target->i_sb == sb ){
1268 		 *		//	use relative path?
1269 		 *		// }
1270 		 *		path_put(&path);
1271 		 *	}
1272 		 */
1273 	} else if (S_ISREG(mode)) {
1274 		if (sbi->options->sparse) {
1275 			/* Sparsed regular file, cause option 'sparse'. */
1276 			fa = FILE_ATTRIBUTE_SPARSE_FILE |
1277 			     FILE_ATTRIBUTE_ARCHIVE;
1278 		} else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
1279 			/* Compressed regular file, if parent is compressed. */
1280 			fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
1281 		} else {
1282 			/* Regular file, default attributes. */
1283 			fa = FILE_ATTRIBUTE_ARCHIVE;
1284 		}
1285 	} else {
1286 		fa = FILE_ATTRIBUTE_ARCHIVE;
1287 	}
1288 
1289 	if (!(mode & 0222))
1290 		fa |= FILE_ATTRIBUTE_READONLY;
1291 
1292 	/* Allocate PATH_MAX bytes. */
1293 	new_de = __getname();
1294 	if (!new_de) {
1295 		err = -ENOMEM;
1296 		goto out1;
1297 	}
1298 
1299 	/* Mark rw ntfs as dirty. it will be cleared at umount. */
1300 	ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1301 
1302 	/* Step 1: allocate and fill new mft record. */
1303 	err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
1304 	if (err)
1305 		goto out2;
1306 
1307 	ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
1308 	if (IS_ERR(ni)) {
1309 		err = PTR_ERR(ni);
1310 		ni = NULL;
1311 		goto out3;
1312 	}
1313 	inode = &ni->vfs_inode;
1314 	inode_init_owner(mnt_userns, inode, dir, mode);
1315 	mode = inode->i_mode;
1316 
1317 	inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime =
1318 		current_time(inode);
1319 
1320 	rec = ni->mi.mrec;
1321 	rec->hard_links = cpu_to_le16(1);
1322 	attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
1323 
1324 	/* Get default security id. */
1325 	sd = s_default_security;
1326 	sd_size = sizeof(s_default_security);
1327 
1328 	if (is_ntfs3(sbi)) {
1329 		security_id = dir_ni->std_security_id;
1330 		if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
1331 			security_id = sbi->security.def_security_id;
1332 
1333 			if (security_id == SECURITY_ID_INVALID &&
1334 			    !ntfs_insert_security(sbi, sd, sd_size,
1335 						  &security_id, NULL))
1336 				sbi->security.def_security_id = security_id;
1337 		}
1338 	}
1339 
1340 	/* Insert standard info. */
1341 	std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
1342 
1343 	if (security_id == SECURITY_ID_INVALID) {
1344 		dsize = sizeof(struct ATTR_STD_INFO);
1345 	} else {
1346 		dsize = sizeof(struct ATTR_STD_INFO5);
1347 		std5->security_id = security_id;
1348 		ni->std_security_id = security_id;
1349 	}
1350 	asize = SIZEOF_RESIDENT + dsize;
1351 
1352 	attr->type = ATTR_STD;
1353 	attr->size = cpu_to_le32(asize);
1354 	attr->id = cpu_to_le16(aid++);
1355 	attr->res.data_off = SIZEOF_RESIDENT_LE;
1356 	attr->res.data_size = cpu_to_le32(dsize);
1357 
1358 	std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
1359 		kernel2nt(&inode->i_atime);
1360 
1361 	ni->std_fa = fa;
1362 	std5->fa = fa;
1363 
1364 	attr = Add2Ptr(attr, asize);
1365 
1366 	/* Insert file name. */
1367 	err = fill_name_de(sbi, new_de, name, uni);
1368 	if (err)
1369 		goto out4;
1370 
1371 	mi_get_ref(&ni->mi, &new_de->ref);
1372 
1373 	fname = (struct ATTR_FILE_NAME *)(new_de + 1);
1374 	mi_get_ref(&dir_ni->mi, &fname->home);
1375 	fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
1376 		fname->dup.a_time = std5->cr_time;
1377 	fname->dup.alloc_size = fname->dup.data_size = 0;
1378 	fname->dup.fa = std5->fa;
1379 	fname->dup.ea_size = fname->dup.reparse = 0;
1380 
1381 	dsize = le16_to_cpu(new_de->key_size);
1382 	asize = ALIGN(SIZEOF_RESIDENT + dsize, 8);
1383 
1384 	attr->type = ATTR_NAME;
1385 	attr->size = cpu_to_le32(asize);
1386 	attr->res.data_off = SIZEOF_RESIDENT_LE;
1387 	attr->res.flags = RESIDENT_FLAG_INDEXED;
1388 	attr->id = cpu_to_le16(aid++);
1389 	attr->res.data_size = cpu_to_le32(dsize);
1390 	memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
1391 
1392 	attr = Add2Ptr(attr, asize);
1393 
1394 	if (security_id == SECURITY_ID_INVALID) {
1395 		/* Insert security attribute. */
1396 		asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8);
1397 
1398 		attr->type = ATTR_SECURE;
1399 		attr->size = cpu_to_le32(asize);
1400 		attr->id = cpu_to_le16(aid++);
1401 		attr->res.data_off = SIZEOF_RESIDENT_LE;
1402 		attr->res.data_size = cpu_to_le32(sd_size);
1403 		memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
1404 
1405 		attr = Add2Ptr(attr, asize);
1406 	}
1407 
1408 	attr->id = cpu_to_le16(aid++);
1409 	if (fa & FILE_ATTRIBUTE_DIRECTORY) {
1410 		/*
1411 		 * Regular directory or symlink to directory.
1412 		 * Create root attribute.
1413 		 */
1414 		dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
1415 		asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
1416 
1417 		attr->type = ATTR_ROOT;
1418 		attr->size = cpu_to_le32(asize);
1419 
1420 		attr->name_len = ARRAY_SIZE(I30_NAME);
1421 		attr->name_off = SIZEOF_RESIDENT_LE;
1422 		attr->res.data_off =
1423 			cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
1424 		attr->res.data_size = cpu_to_le32(dsize);
1425 		memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
1426 		       sizeof(I30_NAME));
1427 
1428 		root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
1429 		memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
1430 		root->ihdr.de_off =
1431 			cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
1432 		root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
1433 					      sizeof(struct NTFS_DE));
1434 		root->ihdr.total = root->ihdr.used;
1435 
1436 		e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
1437 		e->size = cpu_to_le16(sizeof(struct NTFS_DE));
1438 		e->flags = NTFS_IE_LAST;
1439 	} else if (S_ISLNK(mode)) {
1440 		/*
1441 		 * Symlink to file.
1442 		 * Create empty resident data attribute.
1443 		 */
1444 		asize = SIZEOF_RESIDENT;
1445 
1446 		/* Insert empty ATTR_DATA */
1447 		attr->type = ATTR_DATA;
1448 		attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1449 		attr->name_off = SIZEOF_RESIDENT_LE;
1450 		attr->res.data_off = SIZEOF_RESIDENT_LE;
1451 	} else if (S_ISREG(mode)) {
1452 		/*
1453 		 * Regular file. Create empty non resident data attribute.
1454 		 */
1455 		attr->type = ATTR_DATA;
1456 		attr->non_res = 1;
1457 		attr->nres.evcn = cpu_to_le64(-1ll);
1458 		if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
1459 			attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1460 			attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1461 			attr->flags = ATTR_FLAG_SPARSED;
1462 			asize = SIZEOF_NONRESIDENT_EX + 8;
1463 		} else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
1464 			attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1465 			attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1466 			attr->flags = ATTR_FLAG_COMPRESSED;
1467 			attr->nres.c_unit = COMPRESSION_UNIT;
1468 			asize = SIZEOF_NONRESIDENT_EX + 8;
1469 		} else {
1470 			attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
1471 			attr->name_off = SIZEOF_NONRESIDENT_LE;
1472 			asize = SIZEOF_NONRESIDENT + 8;
1473 		}
1474 		attr->nres.run_off = attr->name_off;
1475 	} else {
1476 		/*
1477 		 * Node. Create empty resident data attribute.
1478 		 */
1479 		attr->type = ATTR_DATA;
1480 		attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1481 		attr->name_off = SIZEOF_RESIDENT_LE;
1482 		if (fa & FILE_ATTRIBUTE_SPARSE_FILE)
1483 			attr->flags = ATTR_FLAG_SPARSED;
1484 		else if (fa & FILE_ATTRIBUTE_COMPRESSED)
1485 			attr->flags = ATTR_FLAG_COMPRESSED;
1486 		attr->res.data_off = SIZEOF_RESIDENT_LE;
1487 		asize = SIZEOF_RESIDENT;
1488 		ni->ni_flags |= NI_FLAG_RESIDENT;
1489 	}
1490 
1491 	if (S_ISDIR(mode)) {
1492 		ni->ni_flags |= NI_FLAG_DIR;
1493 		err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
1494 		if (err)
1495 			goto out4;
1496 	} else if (S_ISLNK(mode)) {
1497 		rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
1498 
1499 		if (IS_ERR(rp)) {
1500 			err = PTR_ERR(rp);
1501 			rp = NULL;
1502 			goto out4;
1503 		}
1504 
1505 		/*
1506 		 * Insert ATTR_REPARSE.
1507 		 */
1508 		attr = Add2Ptr(attr, asize);
1509 		attr->type = ATTR_REPARSE;
1510 		attr->id = cpu_to_le16(aid++);
1511 
1512 		/* Resident or non resident? */
1513 		asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
1514 		t16 = PtrOffset(rec, attr);
1515 
1516 		/*
1517 		 * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes.
1518 		 * It is good idea to keep extened attributes resident.
1519 		 */
1520 		if (asize + t16 + 0x78 + 8 > sbi->record_size) {
1521 			CLST alen;
1522 			CLST clst = bytes_to_cluster(sbi, nsize);
1523 
1524 			/* Bytes per runs. */
1525 			t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
1526 
1527 			attr->non_res = 1;
1528 			attr->nres.evcn = cpu_to_le64(clst - 1);
1529 			attr->name_off = SIZEOF_NONRESIDENT_LE;
1530 			attr->nres.run_off = attr->name_off;
1531 			attr->nres.data_size = cpu_to_le64(nsize);
1532 			attr->nres.valid_size = attr->nres.data_size;
1533 			attr->nres.alloc_size =
1534 				cpu_to_le64(ntfs_up_cluster(sbi, nsize));
1535 
1536 			err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
1537 						     clst, NULL, 0, &alen, 0,
1538 						     NULL);
1539 			if (err)
1540 				goto out5;
1541 
1542 			err = run_pack(&ni->file.run, 0, clst,
1543 				       Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
1544 				       &vcn);
1545 			if (err < 0)
1546 				goto out5;
1547 
1548 			if (vcn != clst) {
1549 				err = -EINVAL;
1550 				goto out5;
1551 			}
1552 
1553 			asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
1554 		} else {
1555 			attr->res.data_off = SIZEOF_RESIDENT_LE;
1556 			attr->res.data_size = cpu_to_le32(nsize);
1557 			memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
1558 			nsize = 0;
1559 		}
1560 		/* Size of symlink equals the length of input string. */
1561 		inode->i_size = size;
1562 
1563 		attr->size = cpu_to_le32(asize);
1564 
1565 		err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
1566 					  &new_de->ref);
1567 		if (err)
1568 			goto out5;
1569 
1570 		rp_inserted = true;
1571 	}
1572 
1573 	attr = Add2Ptr(attr, asize);
1574 	attr->type = ATTR_END;
1575 
1576 	rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
1577 	rec->next_attr_id = cpu_to_le16(aid);
1578 
1579 	/* Step 2: Add new name in index. */
1580 	err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0);
1581 	if (err)
1582 		goto out6;
1583 
1584 	/* Unlock parent directory before ntfs_init_acl. */
1585 	ni_unlock(dir_ni);
1586 
1587 	inode->i_generation = le16_to_cpu(rec->seq);
1588 
1589 	dir->i_mtime = dir->i_ctime = inode->i_atime;
1590 
1591 	if (S_ISDIR(mode)) {
1592 		inode->i_op = &ntfs_dir_inode_operations;
1593 		inode->i_fop = &ntfs_dir_operations;
1594 	} else if (S_ISLNK(mode)) {
1595 		inode->i_op = &ntfs_link_inode_operations;
1596 		inode->i_fop = NULL;
1597 		inode->i_mapping->a_ops = &ntfs_aops;
1598 		inode->i_size = size;
1599 		inode_nohighmem(inode);
1600 	} else if (S_ISREG(mode)) {
1601 		inode->i_op = &ntfs_file_inode_operations;
1602 		inode->i_fop = &ntfs_file_operations;
1603 		inode->i_mapping->a_ops =
1604 			is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
1605 		init_rwsem(&ni->file.run_lock);
1606 	} else {
1607 		inode->i_op = &ntfs_special_inode_operations;
1608 		init_special_inode(inode, mode, dev);
1609 	}
1610 
1611 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
1612 	if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) {
1613 		err = ntfs_init_acl(mnt_userns, inode, dir);
1614 		if (err)
1615 			goto out7;
1616 	} else
1617 #endif
1618 	{
1619 		inode->i_flags |= S_NOSEC;
1620 	}
1621 
1622 	/* Write non resident data. */
1623 	if (nsize) {
1624 		err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize, 0);
1625 		if (err)
1626 			goto out7;
1627 	}
1628 
1629 	/*
1630 	 * Call 'd_instantiate' after inode->i_op is set
1631 	 * but before finish_open.
1632 	 */
1633 	d_instantiate(dentry, inode);
1634 
1635 	ntfs_save_wsl_perm(inode);
1636 	mark_inode_dirty(dir);
1637 	mark_inode_dirty(inode);
1638 
1639 	/* Normal exit. */
1640 	goto out2;
1641 
1642 out7:
1643 
1644 	/* Undo 'indx_insert_entry'. */
1645 	ni_lock_dir(dir_ni);
1646 	indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
1647 			  le16_to_cpu(new_de->key_size), sbi);
1648 	/* ni_unlock(dir_ni); will be called later. */
1649 out6:
1650 	if (rp_inserted)
1651 		ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
1652 
1653 out5:
1654 	if (!S_ISDIR(mode))
1655 		run_deallocate(sbi, &ni->file.run, false);
1656 
1657 out4:
1658 	clear_rec_inuse(rec);
1659 	clear_nlink(inode);
1660 	ni->mi.dirty = false;
1661 	discard_new_inode(inode);
1662 out3:
1663 	ntfs_mark_rec_free(sbi, ino);
1664 
1665 out2:
1666 	__putname(new_de);
1667 	kfree(rp);
1668 
1669 out1:
1670 	if (err) {
1671 		ni_unlock(dir_ni);
1672 		return ERR_PTR(err);
1673 	}
1674 
1675 	unlock_new_inode(inode);
1676 
1677 	return inode;
1678 }
1679 
ntfs_link_inode(struct inode * inode,struct dentry * dentry)1680 int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
1681 {
1682 	int err;
1683 	struct ntfs_inode *ni = ntfs_i(inode);
1684 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
1685 	struct NTFS_DE *de;
1686 	struct ATTR_FILE_NAME *de_name;
1687 
1688 	/* Allocate PATH_MAX bytes. */
1689 	de = __getname();
1690 	if (!de)
1691 		return -ENOMEM;
1692 
1693 	/* Mark rw ntfs as dirty. It will be cleared at umount. */
1694 	ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1695 
1696 	/* Construct 'de'. */
1697 	err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1698 	if (err)
1699 		goto out;
1700 
1701 	de_name = (struct ATTR_FILE_NAME *)(de + 1);
1702 	/* Fill duplicate info. */
1703 	de_name->dup.cr_time = de_name->dup.m_time = de_name->dup.c_time =
1704 		de_name->dup.a_time = kernel2nt(&inode->i_ctime);
1705 	de_name->dup.alloc_size = de_name->dup.data_size =
1706 		cpu_to_le64(inode->i_size);
1707 	de_name->dup.fa = ni->std_fa;
1708 	de_name->dup.ea_size = de_name->dup.reparse = 0;
1709 
1710 	err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
1711 out:
1712 	__putname(de);
1713 	return err;
1714 }
1715 
1716 /*
1717  * ntfs_unlink_inode
1718  *
1719  * inode_operations::unlink
1720  * inode_operations::rmdir
1721  */
ntfs_unlink_inode(struct inode * dir,const struct dentry * dentry)1722 int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
1723 {
1724 	int err;
1725 	struct ntfs_sb_info *sbi = dir->i_sb->s_fs_info;
1726 	struct inode *inode = d_inode(dentry);
1727 	struct ntfs_inode *ni = ntfs_i(inode);
1728 	struct ntfs_inode *dir_ni = ntfs_i(dir);
1729 	struct NTFS_DE *de, *de2 = NULL;
1730 	int undo_remove;
1731 
1732 	if (ntfs_is_meta_file(sbi, ni->mi.rno))
1733 		return -EINVAL;
1734 
1735 	/* Allocate PATH_MAX bytes. */
1736 	de = __getname();
1737 	if (!de)
1738 		return -ENOMEM;
1739 
1740 	ni_lock(ni);
1741 
1742 	if (S_ISDIR(inode->i_mode) && !dir_is_empty(inode)) {
1743 		err = -ENOTEMPTY;
1744 		goto out;
1745 	}
1746 
1747 	err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1748 	if (err < 0)
1749 		goto out;
1750 
1751 	undo_remove = 0;
1752 	err = ni_remove_name(dir_ni, ni, de, &de2, &undo_remove);
1753 
1754 	if (!err) {
1755 		drop_nlink(inode);
1756 		dir->i_mtime = dir->i_ctime = current_time(dir);
1757 		mark_inode_dirty(dir);
1758 		inode->i_ctime = dir->i_ctime;
1759 		if (inode->i_nlink)
1760 			mark_inode_dirty(inode);
1761 	} else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
1762 		make_bad_inode(inode);
1763 		ntfs_inode_err(inode, "failed to undo unlink");
1764 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
1765 	} else {
1766 		if (ni_is_dirty(dir))
1767 			mark_inode_dirty(dir);
1768 		if (ni_is_dirty(inode))
1769 			mark_inode_dirty(inode);
1770 	}
1771 
1772 out:
1773 	ni_unlock(ni);
1774 	__putname(de);
1775 	return err;
1776 }
1777 
ntfs_evict_inode(struct inode * inode)1778 void ntfs_evict_inode(struct inode *inode)
1779 {
1780 	truncate_inode_pages_final(&inode->i_data);
1781 
1782 	if (inode->i_nlink)
1783 		_ni_write_inode(inode, inode_needs_sync(inode));
1784 
1785 	invalidate_inode_buffers(inode);
1786 	clear_inode(inode);
1787 
1788 	ni_clear(ntfs_i(inode));
1789 }
1790 
ntfs_readlink_hlp(struct inode * inode,char * buffer,int buflen)1791 static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
1792 				      int buflen)
1793 {
1794 	int i, err = -EINVAL;
1795 	struct ntfs_inode *ni = ntfs_i(inode);
1796 	struct super_block *sb = inode->i_sb;
1797 	struct ntfs_sb_info *sbi = sb->s_fs_info;
1798 	u64 size;
1799 	u16 ulen = 0;
1800 	void *to_free = NULL;
1801 	struct REPARSE_DATA_BUFFER *rp;
1802 	const __le16 *uname;
1803 	struct ATTRIB *attr;
1804 
1805 	/* Reparse data present. Try to parse it. */
1806 	static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
1807 	static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
1808 
1809 	*buffer = 0;
1810 
1811 	attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
1812 	if (!attr)
1813 		goto out;
1814 
1815 	if (!attr->non_res) {
1816 		rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
1817 		if (!rp)
1818 			goto out;
1819 		size = le32_to_cpu(attr->res.data_size);
1820 	} else {
1821 		size = le64_to_cpu(attr->nres.data_size);
1822 		rp = NULL;
1823 	}
1824 
1825 	if (size > sbi->reparse.max_size || size <= sizeof(u32))
1826 		goto out;
1827 
1828 	if (!rp) {
1829 		rp = kmalloc(size, GFP_NOFS);
1830 		if (!rp) {
1831 			err = -ENOMEM;
1832 			goto out;
1833 		}
1834 		to_free = rp;
1835 		/* Read into temporal buffer. */
1836 		err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL);
1837 		if (err)
1838 			goto out;
1839 	}
1840 
1841 	/* Microsoft Tag. */
1842 	switch (rp->ReparseTag) {
1843 	case IO_REPARSE_TAG_MOUNT_POINT:
1844 		/* Mount points and junctions. */
1845 		/* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
1846 		if (size <= offsetof(struct REPARSE_DATA_BUFFER,
1847 				     MountPointReparseBuffer.PathBuffer))
1848 			goto out;
1849 		uname = Add2Ptr(rp,
1850 				offsetof(struct REPARSE_DATA_BUFFER,
1851 					 MountPointReparseBuffer.PathBuffer) +
1852 					le16_to_cpu(rp->MountPointReparseBuffer
1853 							    .PrintNameOffset));
1854 		ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
1855 		break;
1856 
1857 	case IO_REPARSE_TAG_SYMLINK:
1858 		/* FolderSymbolicLink */
1859 		/* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
1860 		if (size <= offsetof(struct REPARSE_DATA_BUFFER,
1861 				     SymbolicLinkReparseBuffer.PathBuffer))
1862 			goto out;
1863 		uname = Add2Ptr(
1864 			rp, offsetof(struct REPARSE_DATA_BUFFER,
1865 				     SymbolicLinkReparseBuffer.PathBuffer) +
1866 				    le16_to_cpu(rp->SymbolicLinkReparseBuffer
1867 							.PrintNameOffset));
1868 		ulen = le16_to_cpu(
1869 			rp->SymbolicLinkReparseBuffer.PrintNameLength);
1870 		break;
1871 
1872 	case IO_REPARSE_TAG_CLOUD:
1873 	case IO_REPARSE_TAG_CLOUD_1:
1874 	case IO_REPARSE_TAG_CLOUD_2:
1875 	case IO_REPARSE_TAG_CLOUD_3:
1876 	case IO_REPARSE_TAG_CLOUD_4:
1877 	case IO_REPARSE_TAG_CLOUD_5:
1878 	case IO_REPARSE_TAG_CLOUD_6:
1879 	case IO_REPARSE_TAG_CLOUD_7:
1880 	case IO_REPARSE_TAG_CLOUD_8:
1881 	case IO_REPARSE_TAG_CLOUD_9:
1882 	case IO_REPARSE_TAG_CLOUD_A:
1883 	case IO_REPARSE_TAG_CLOUD_B:
1884 	case IO_REPARSE_TAG_CLOUD_C:
1885 	case IO_REPARSE_TAG_CLOUD_D:
1886 	case IO_REPARSE_TAG_CLOUD_E:
1887 	case IO_REPARSE_TAG_CLOUD_F:
1888 		err = sizeof("OneDrive") - 1;
1889 		if (err > buflen)
1890 			err = buflen;
1891 		memcpy(buffer, "OneDrive", err);
1892 		goto out;
1893 
1894 	default:
1895 		if (IsReparseTagMicrosoft(rp->ReparseTag)) {
1896 			/* Unknown Microsoft Tag. */
1897 			goto out;
1898 		}
1899 		if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
1900 		    size <= sizeof(struct REPARSE_POINT)) {
1901 			goto out;
1902 		}
1903 
1904 		/* Users tag. */
1905 		uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT));
1906 		ulen = le16_to_cpu(rp->ReparseDataLength) -
1907 		       sizeof(struct REPARSE_POINT);
1908 	}
1909 
1910 	/* Convert nlen from bytes to UNICODE chars. */
1911 	ulen >>= 1;
1912 
1913 	/* Check that name is available. */
1914 	if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size))
1915 		goto out;
1916 
1917 	/* If name is already zero terminated then truncate it now. */
1918 	if (!uname[ulen - 1])
1919 		ulen -= 1;
1920 
1921 	err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen);
1922 
1923 	if (err < 0)
1924 		goto out;
1925 
1926 	/* Translate Windows '\' into Linux '/'. */
1927 	for (i = 0; i < err; i++) {
1928 		if (buffer[i] == '\\')
1929 			buffer[i] = '/';
1930 	}
1931 
1932 	/* Always set last zero. */
1933 	buffer[err] = 0;
1934 out:
1935 	kfree(to_free);
1936 	return err;
1937 }
1938 
ntfs_get_link(struct dentry * de,struct inode * inode,struct delayed_call * done)1939 static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
1940 				 struct delayed_call *done)
1941 {
1942 	int err;
1943 	char *ret;
1944 
1945 	if (!de)
1946 		return ERR_PTR(-ECHILD);
1947 
1948 	ret = kmalloc(PAGE_SIZE, GFP_NOFS);
1949 	if (!ret)
1950 		return ERR_PTR(-ENOMEM);
1951 
1952 	err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE);
1953 	if (err < 0) {
1954 		kfree(ret);
1955 		return ERR_PTR(err);
1956 	}
1957 
1958 	set_delayed_call(done, kfree_link, ret);
1959 
1960 	return ret;
1961 }
1962 
1963 // clang-format off
1964 const struct inode_operations ntfs_link_inode_operations = {
1965 	.get_link	= ntfs_get_link,
1966 	.setattr	= ntfs3_setattr,
1967 	.listxattr	= ntfs_listxattr,
1968 	.permission	= ntfs_permission,
1969 };
1970 
1971 const struct address_space_operations ntfs_aops = {
1972 	.readpage	= ntfs_readpage,
1973 	.readahead	= ntfs_readahead,
1974 	.writepage	= ntfs_writepage,
1975 	.writepages	= ntfs_writepages,
1976 	.write_begin	= ntfs_write_begin,
1977 	.write_end	= ntfs_write_end,
1978 	.direct_IO	= ntfs_direct_IO,
1979 	.bmap		= ntfs_bmap,
1980 	.set_page_dirty = __set_page_dirty_buffers,
1981 };
1982 
1983 const struct address_space_operations ntfs_aops_cmpr = {
1984 	.readpage	= ntfs_readpage,
1985 	.readahead	= ntfs_readahead,
1986 };
1987 // clang-format on
1988