• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16 
17 /*
18  * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19  * preallocate algorithm.
20  */
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
23 #endif
24 
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
27 #endif
28 
29 // 16M
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31 // 16G
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33 
get_pre_allocated(u64 size)34 static inline u64 get_pre_allocated(u64 size)
35 {
36 	u32 clump;
37 	u8 align_shift;
38 	u64 ret;
39 
40 	if (size <= NTFS_CLUMP_MIN) {
41 		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 	} else if (size >= NTFS_CLUMP_MAX) {
44 		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46 	} else {
47 		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 		clump = 1u << align_shift;
50 	}
51 
52 	ret = (((size + clump - 1) >> align_shift)) << align_shift;
53 
54 	return ret;
55 }
56 
57 /*
58  * attr_must_be_resident
59  *
60  * Return: True if attribute must be resident.
61  */
attr_must_be_resident(struct ntfs_sb_info * sbi,enum ATTR_TYPE type)62 static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
63 					 enum ATTR_TYPE type)
64 {
65 	const struct ATTR_DEF_ENTRY *de;
66 
67 	switch (type) {
68 	case ATTR_STD:
69 	case ATTR_NAME:
70 	case ATTR_ID:
71 	case ATTR_LABEL:
72 	case ATTR_VOL_INFO:
73 	case ATTR_ROOT:
74 	case ATTR_EA_INFO:
75 		return true;
76 	default:
77 		de = ntfs_query_def(sbi, type);
78 		if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
79 			return true;
80 		return false;
81 	}
82 }
83 
84 /*
85  * attr_load_runs - Load all runs stored in @attr.
86  */
attr_load_runs(struct ATTRIB * attr,struct ntfs_inode * ni,struct runs_tree * run,const CLST * vcn)87 int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
88 		   struct runs_tree *run, const CLST *vcn)
89 {
90 	int err;
91 	CLST svcn = le64_to_cpu(attr->nres.svcn);
92 	CLST evcn = le64_to_cpu(attr->nres.evcn);
93 	u32 asize;
94 	u16 run_off;
95 
96 	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
97 		return 0;
98 
99 	if (vcn && (evcn < *vcn || *vcn < svcn))
100 		return -EINVAL;
101 
102 	asize = le32_to_cpu(attr->size);
103 	run_off = le16_to_cpu(attr->nres.run_off);
104 
105 	if (run_off > asize)
106 		return -EINVAL;
107 
108 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
109 			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
110 			    asize - run_off);
111 	if (err < 0)
112 		return err;
113 
114 	return 0;
115 }
116 
117 /*
118  * run_deallocate_ex - Deallocate clusters.
119  */
run_deallocate_ex(struct ntfs_sb_info * sbi,struct runs_tree * run,CLST vcn,CLST len,CLST * done,bool trim)120 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
121 			     CLST vcn, CLST len, CLST *done, bool trim)
122 {
123 	int err = 0;
124 	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
125 	size_t idx;
126 
127 	if (!len)
128 		goto out;
129 
130 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
131 failed:
132 		run_truncate(run, vcn0);
133 		err = -EINVAL;
134 		goto out;
135 	}
136 
137 	for (;;) {
138 		if (clen > len)
139 			clen = len;
140 
141 		if (!clen) {
142 			err = -EINVAL;
143 			goto out;
144 		}
145 
146 		if (lcn != SPARSE_LCN) {
147 			mark_as_free_ex(sbi, lcn, clen, trim);
148 			dn += clen;
149 		}
150 
151 		len -= clen;
152 		if (!len)
153 			break;
154 
155 		vcn_next = vcn + clen;
156 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
157 		    vcn != vcn_next) {
158 			/* Save memory - don't load entire run. */
159 			goto failed;
160 		}
161 	}
162 
163 out:
164 	if (done)
165 		*done += dn;
166 
167 	return err;
168 }
169 
170 /*
171  * attr_allocate_clusters - Find free space, mark it as used and store in @run.
172  */
attr_allocate_clusters(struct ntfs_sb_info * sbi,struct runs_tree * run,CLST vcn,CLST lcn,CLST len,CLST * pre_alloc,enum ALLOCATE_OPT opt,CLST * alen,const size_t fr,CLST * new_lcn)173 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
174 			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
175 			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
176 			   CLST *new_lcn)
177 {
178 	int err;
179 	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
180 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
181 	size_t cnt = run->count;
182 
183 	for (;;) {
184 		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
185 					       opt);
186 
187 		if (err == -ENOSPC && pre) {
188 			pre = 0;
189 			if (*pre_alloc)
190 				*pre_alloc = 0;
191 			continue;
192 		}
193 
194 		if (err)
195 			goto out;
196 
197 		if (new_lcn && vcn == vcn0)
198 			*new_lcn = lcn;
199 
200 		/* Add new fragment into run storage. */
201 		if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
202 			/* Undo last 'ntfs_look_for_free_space' */
203 			down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
204 			wnd_set_free(wnd, lcn, flen);
205 			up_write(&wnd->rw_lock);
206 			err = -ENOMEM;
207 			goto out;
208 		}
209 
210 		vcn += flen;
211 
212 		if (flen >= len || opt == ALLOCATE_MFT ||
213 		    (fr && run->count - cnt >= fr)) {
214 			*alen = vcn - vcn0;
215 			return 0;
216 		}
217 
218 		len -= flen;
219 	}
220 
221 out:
222 	/* Undo 'ntfs_look_for_free_space' */
223 	if (vcn - vcn0) {
224 		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
225 		run_truncate(run, vcn0);
226 	}
227 
228 	return err;
229 }
230 
231 /*
232  * attr_make_nonresident
233  *
234  * If page is not NULL - it is already contains resident data
235  * and locked (called from ni_write_frame()).
236  */
attr_make_nonresident(struct ntfs_inode * ni,struct ATTRIB * attr,struct ATTR_LIST_ENTRY * le,struct mft_inode * mi,u64 new_size,struct runs_tree * run,struct ATTRIB ** ins_attr,struct page * page)237 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
238 			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
239 			  u64 new_size, struct runs_tree *run,
240 			  struct ATTRIB **ins_attr, struct page *page)
241 {
242 	struct ntfs_sb_info *sbi;
243 	struct ATTRIB *attr_s;
244 	struct MFT_REC *rec;
245 	u32 used, asize, rsize, aoff, align;
246 	bool is_data;
247 	CLST len, alen;
248 	char *next;
249 	int err;
250 
251 	if (attr->non_res) {
252 		*ins_attr = attr;
253 		return 0;
254 	}
255 
256 	sbi = mi->sbi;
257 	rec = mi->mrec;
258 	attr_s = NULL;
259 	used = le32_to_cpu(rec->used);
260 	asize = le32_to_cpu(attr->size);
261 	next = Add2Ptr(attr, asize);
262 	aoff = PtrOffset(rec, attr);
263 	rsize = le32_to_cpu(attr->res.data_size);
264 	is_data = attr->type == ATTR_DATA && !attr->name_len;
265 
266 	align = sbi->cluster_size;
267 	if (is_attr_compressed(attr))
268 		align <<= COMPRESSION_UNIT;
269 	len = (rsize + align - 1) >> sbi->cluster_bits;
270 
271 	run_init(run);
272 
273 	/* Make a copy of original attribute. */
274 	attr_s = kmemdup(attr, asize, GFP_NOFS);
275 	if (!attr_s) {
276 		err = -ENOMEM;
277 		goto out;
278 	}
279 
280 	if (!len) {
281 		/* Empty resident -> Empty nonresident. */
282 		alen = 0;
283 	} else {
284 		const char *data = resident_data(attr);
285 
286 		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
287 					     ALLOCATE_DEF, &alen, 0, NULL);
288 		if (err)
289 			goto out1;
290 
291 		if (!rsize) {
292 			/* Empty resident -> Non empty nonresident. */
293 		} else if (!is_data) {
294 			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
295 			if (err)
296 				goto out2;
297 		} else if (!page) {
298 			char *kaddr;
299 
300 			page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
301 			if (!page) {
302 				err = -ENOMEM;
303 				goto out2;
304 			}
305 			kaddr = kmap_atomic(page);
306 			memcpy(kaddr, data, rsize);
307 			memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
308 			kunmap_atomic(kaddr);
309 			flush_dcache_page(page);
310 			SetPageUptodate(page);
311 			set_page_dirty(page);
312 			unlock_page(page);
313 			put_page(page);
314 		}
315 	}
316 
317 	/* Remove original attribute. */
318 	used -= asize;
319 	memmove(attr, Add2Ptr(attr, asize), used - aoff);
320 	rec->used = cpu_to_le32(used);
321 	mi->dirty = true;
322 	if (le)
323 		al_remove_le(ni, le);
324 
325 	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
326 				    attr_s->name_len, run, 0, alen,
327 				    attr_s->flags, &attr, NULL);
328 	if (err)
329 		goto out3;
330 
331 	kfree(attr_s);
332 	attr->nres.data_size = cpu_to_le64(rsize);
333 	attr->nres.valid_size = attr->nres.data_size;
334 
335 	*ins_attr = attr;
336 
337 	if (is_data)
338 		ni->ni_flags &= ~NI_FLAG_RESIDENT;
339 
340 	/* Resident attribute becomes non resident. */
341 	return 0;
342 
343 out3:
344 	attr = Add2Ptr(rec, aoff);
345 	memmove(next, attr, used - aoff);
346 	memcpy(attr, attr_s, asize);
347 	rec->used = cpu_to_le32(used + asize);
348 	mi->dirty = true;
349 out2:
350 	/* Undo: do not trim new allocated clusters. */
351 	run_deallocate(sbi, run, false);
352 	run_close(run);
353 out1:
354 	kfree(attr_s);
355 out:
356 	return err;
357 }
358 
359 /*
360  * attr_set_size_res - Helper for attr_set_size().
361  */
attr_set_size_res(struct ntfs_inode * ni,struct ATTRIB * attr,struct ATTR_LIST_ENTRY * le,struct mft_inode * mi,u64 new_size,struct runs_tree * run,struct ATTRIB ** ins_attr)362 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
363 			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
364 			     u64 new_size, struct runs_tree *run,
365 			     struct ATTRIB **ins_attr)
366 {
367 	struct ntfs_sb_info *sbi = mi->sbi;
368 	struct MFT_REC *rec = mi->mrec;
369 	u32 used = le32_to_cpu(rec->used);
370 	u32 asize = le32_to_cpu(attr->size);
371 	u32 aoff = PtrOffset(rec, attr);
372 	u32 rsize = le32_to_cpu(attr->res.data_size);
373 	u32 tail = used - aoff - asize;
374 	char *next = Add2Ptr(attr, asize);
375 	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
376 
377 	if (dsize < 0) {
378 		memmove(next + dsize, next, tail);
379 	} else if (dsize > 0) {
380 		if (used + dsize > sbi->max_bytes_per_attr)
381 			return attr_make_nonresident(ni, attr, le, mi, new_size,
382 						     run, ins_attr, NULL);
383 
384 		memmove(next + dsize, next, tail);
385 		memset(next, 0, dsize);
386 	}
387 
388 	if (new_size > rsize)
389 		memset(Add2Ptr(resident_data(attr), rsize), 0,
390 		       new_size - rsize);
391 
392 	rec->used = cpu_to_le32(used + dsize);
393 	attr->size = cpu_to_le32(asize + dsize);
394 	attr->res.data_size = cpu_to_le32(new_size);
395 	mi->dirty = true;
396 	*ins_attr = attr;
397 
398 	return 0;
399 }
400 
401 /*
402  * attr_set_size - Change the size of attribute.
403  *
404  * Extend:
405  *   - Sparse/compressed: No allocated clusters.
406  *   - Normal: Append allocated and preallocated new clusters.
407  * Shrink:
408  *   - No deallocate if @keep_prealloc is set.
409  */
attr_set_size(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,u64 new_size,const u64 * new_valid,bool keep_prealloc,struct ATTRIB ** ret)410 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
411 		  const __le16 *name, u8 name_len, struct runs_tree *run,
412 		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
413 		  struct ATTRIB **ret)
414 {
415 	int err = 0;
416 	struct ntfs_sb_info *sbi = ni->mi.sbi;
417 	u8 cluster_bits = sbi->cluster_bits;
418 	bool is_mft =
419 		ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
420 	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
421 	struct ATTRIB *attr = NULL, *attr_b;
422 	struct ATTR_LIST_ENTRY *le, *le_b;
423 	struct mft_inode *mi, *mi_b;
424 	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
425 	CLST next_svcn, pre_alloc = -1, done = 0;
426 	bool is_ext;
427 	u32 align;
428 	struct MFT_REC *rec;
429 
430 again:
431 	le_b = NULL;
432 	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
433 			      &mi_b);
434 	if (!attr_b) {
435 		err = -ENOENT;
436 		goto out;
437 	}
438 
439 	if (!attr_b->non_res) {
440 		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
441 					&attr_b);
442 		if (err || !attr_b->non_res)
443 			goto out;
444 
445 		/* Layout of records may be changed, so do a full search. */
446 		goto again;
447 	}
448 
449 	is_ext = is_attr_ext(attr_b);
450 
451 again_1:
452 	align = sbi->cluster_size;
453 
454 	if (is_ext)
455 		align <<= attr_b->nres.c_unit;
456 
457 	old_valid = le64_to_cpu(attr_b->nres.valid_size);
458 	old_size = le64_to_cpu(attr_b->nres.data_size);
459 	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
460 	old_alen = old_alloc >> cluster_bits;
461 
462 	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
463 	new_alen = new_alloc >> cluster_bits;
464 
465 	if (keep_prealloc && new_size < old_size) {
466 		attr_b->nres.data_size = cpu_to_le64(new_size);
467 		mi_b->dirty = true;
468 		goto ok;
469 	}
470 
471 	vcn = old_alen - 1;
472 
473 	svcn = le64_to_cpu(attr_b->nres.svcn);
474 	evcn = le64_to_cpu(attr_b->nres.evcn);
475 
476 	if (svcn <= vcn && vcn <= evcn) {
477 		attr = attr_b;
478 		le = le_b;
479 		mi = mi_b;
480 	} else if (!le_b) {
481 		err = -EINVAL;
482 		goto out;
483 	} else {
484 		le = le_b;
485 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
486 				    &mi);
487 		if (!attr) {
488 			err = -EINVAL;
489 			goto out;
490 		}
491 
492 next_le_1:
493 		svcn = le64_to_cpu(attr->nres.svcn);
494 		evcn = le64_to_cpu(attr->nres.evcn);
495 	}
496 
497 next_le:
498 	rec = mi->mrec;
499 
500 	err = attr_load_runs(attr, ni, run, NULL);
501 	if (err)
502 		goto out;
503 
504 	if (new_size > old_size) {
505 		CLST to_allocate;
506 		size_t free;
507 
508 		if (new_alloc <= old_alloc) {
509 			attr_b->nres.data_size = cpu_to_le64(new_size);
510 			mi_b->dirty = true;
511 			goto ok;
512 		}
513 
514 		to_allocate = new_alen - old_alen;
515 add_alloc_in_same_attr_seg:
516 		lcn = 0;
517 		if (is_mft) {
518 			/* MFT allocates clusters from MFT zone. */
519 			pre_alloc = 0;
520 		} else if (is_ext) {
521 			/* No preallocate for sparse/compress. */
522 			pre_alloc = 0;
523 		} else if (pre_alloc == -1) {
524 			pre_alloc = 0;
525 			if (type == ATTR_DATA && !name_len &&
526 			    sbi->options->prealloc) {
527 				CLST new_alen2 = bytes_to_cluster(
528 					sbi, get_pre_allocated(new_size));
529 				pre_alloc = new_alen2 - new_alen;
530 			}
531 
532 			/* Get the last LCN to allocate from. */
533 			if (old_alen &&
534 			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
535 				lcn = SPARSE_LCN;
536 			}
537 
538 			if (lcn == SPARSE_LCN)
539 				lcn = 0;
540 			else if (lcn)
541 				lcn += 1;
542 
543 			free = wnd_zeroes(&sbi->used.bitmap);
544 			if (to_allocate > free) {
545 				err = -ENOSPC;
546 				goto out;
547 			}
548 
549 			if (pre_alloc && to_allocate + pre_alloc > free)
550 				pre_alloc = 0;
551 		}
552 
553 		vcn = old_alen;
554 
555 		if (is_ext) {
556 			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
557 					   false)) {
558 				err = -ENOMEM;
559 				goto out;
560 			}
561 			alen = to_allocate;
562 		} else {
563 			/* ~3 bytes per fragment. */
564 			err = attr_allocate_clusters(
565 				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
566 				is_mft ? ALLOCATE_MFT : 0, &alen,
567 				is_mft ? 0
568 				       : (sbi->record_size -
569 					  le32_to_cpu(rec->used) + 8) /
570 							 3 +
571 						 1,
572 				NULL);
573 			if (err)
574 				goto out;
575 		}
576 
577 		done += alen;
578 		vcn += alen;
579 		if (to_allocate > alen)
580 			to_allocate -= alen;
581 		else
582 			to_allocate = 0;
583 
584 pack_runs:
585 		err = mi_pack_runs(mi, attr, run, vcn - svcn);
586 		if (err)
587 			goto out;
588 
589 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
590 		new_alloc_tmp = (u64)next_svcn << cluster_bits;
591 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
592 		mi_b->dirty = true;
593 
594 		if (next_svcn >= vcn && !to_allocate) {
595 			/* Normal way. Update attribute and exit. */
596 			attr_b->nres.data_size = cpu_to_le64(new_size);
597 			goto ok;
598 		}
599 
600 		/* At least two MFT to avoid recursive loop. */
601 		if (is_mft && next_svcn == vcn &&
602 		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
603 			new_size = new_alloc_tmp;
604 			attr_b->nres.data_size = attr_b->nres.alloc_size;
605 			goto ok;
606 		}
607 
608 		if (le32_to_cpu(rec->used) < sbi->record_size) {
609 			old_alen = next_svcn;
610 			evcn = old_alen - 1;
611 			goto add_alloc_in_same_attr_seg;
612 		}
613 
614 		attr_b->nres.data_size = attr_b->nres.alloc_size;
615 		if (new_alloc_tmp < old_valid)
616 			attr_b->nres.valid_size = attr_b->nres.data_size;
617 
618 		if (type == ATTR_LIST) {
619 			err = ni_expand_list(ni);
620 			if (err)
621 				goto out;
622 			if (next_svcn < vcn)
623 				goto pack_runs;
624 
625 			/* Layout of records is changed. */
626 			goto again;
627 		}
628 
629 		if (!ni->attr_list.size) {
630 			err = ni_create_attr_list(ni);
631 			if (err)
632 				goto out;
633 			/* Layout of records is changed. */
634 		}
635 
636 		if (next_svcn >= vcn) {
637 			/* This is MFT data, repeat. */
638 			goto again;
639 		}
640 
641 		/* Insert new attribute segment. */
642 		err = ni_insert_nonresident(ni, type, name, name_len, run,
643 					    next_svcn, vcn - next_svcn,
644 					    attr_b->flags, &attr, &mi);
645 		if (err)
646 			goto out;
647 
648 		if (!is_mft)
649 			run_truncate_head(run, evcn + 1);
650 
651 		svcn = le64_to_cpu(attr->nres.svcn);
652 		evcn = le64_to_cpu(attr->nres.evcn);
653 
654 		le_b = NULL;
655 		/*
656 		 * Layout of records maybe changed.
657 		 * Find base attribute to update.
658 		 */
659 		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
660 				      NULL, &mi_b);
661 		if (!attr_b) {
662 			err = -ENOENT;
663 			goto out;
664 		}
665 
666 		attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
667 		attr_b->nres.data_size = attr_b->nres.alloc_size;
668 		attr_b->nres.valid_size = attr_b->nres.alloc_size;
669 		mi_b->dirty = true;
670 		goto again_1;
671 	}
672 
673 	if (new_size != old_size ||
674 	    (new_alloc != old_alloc && !keep_prealloc)) {
675 		vcn = max(svcn, new_alen);
676 		new_alloc_tmp = (u64)vcn << cluster_bits;
677 
678 		alen = 0;
679 		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
680 					true);
681 		if (err)
682 			goto out;
683 
684 		run_truncate(run, vcn);
685 
686 		if (vcn > svcn) {
687 			err = mi_pack_runs(mi, attr, run, vcn - svcn);
688 			if (err)
689 				goto out;
690 		} else if (le && le->vcn) {
691 			u16 le_sz = le16_to_cpu(le->size);
692 
693 			/*
694 			 * NOTE: List entries for one attribute are always
695 			 * the same size. We deal with last entry (vcn==0)
696 			 * and it is not first in entries array
697 			 * (list entry for std attribute always first).
698 			 * So it is safe to step back.
699 			 */
700 			mi_remove_attr(NULL, mi, attr);
701 
702 			if (!al_remove_le(ni, le)) {
703 				err = -EINVAL;
704 				goto out;
705 			}
706 
707 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
708 		} else {
709 			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
710 			mi->dirty = true;
711 		}
712 
713 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
714 
715 		if (vcn == new_alen) {
716 			attr_b->nres.data_size = cpu_to_le64(new_size);
717 			if (new_size < old_valid)
718 				attr_b->nres.valid_size =
719 					attr_b->nres.data_size;
720 		} else {
721 			if (new_alloc_tmp <=
722 			    le64_to_cpu(attr_b->nres.data_size))
723 				attr_b->nres.data_size =
724 					attr_b->nres.alloc_size;
725 			if (new_alloc_tmp <
726 			    le64_to_cpu(attr_b->nres.valid_size))
727 				attr_b->nres.valid_size =
728 					attr_b->nres.alloc_size;
729 		}
730 
731 		if (is_ext)
732 			le64_sub_cpu(&attr_b->nres.total_size,
733 				     ((u64)alen << cluster_bits));
734 
735 		mi_b->dirty = true;
736 
737 		if (new_alloc_tmp <= new_alloc)
738 			goto ok;
739 
740 		old_size = new_alloc_tmp;
741 		vcn = svcn - 1;
742 
743 		if (le == le_b) {
744 			attr = attr_b;
745 			mi = mi_b;
746 			evcn = svcn - 1;
747 			svcn = 0;
748 			goto next_le;
749 		}
750 
751 		if (le->type != type || le->name_len != name_len ||
752 		    memcmp(le_name(le), name, name_len * sizeof(short))) {
753 			err = -EINVAL;
754 			goto out;
755 		}
756 
757 		err = ni_load_mi(ni, le, &mi);
758 		if (err)
759 			goto out;
760 
761 		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
762 		if (!attr) {
763 			err = -EINVAL;
764 			goto out;
765 		}
766 		goto next_le_1;
767 	}
768 
769 ok:
770 	if (new_valid) {
771 		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
772 
773 		if (attr_b->nres.valid_size != valid) {
774 			attr_b->nres.valid_size = valid;
775 			mi_b->dirty = true;
776 		}
777 	}
778 
779 out:
780 	if (!err && attr_b && ret)
781 		*ret = attr_b;
782 
783 	/* Update inode_set_bytes. */
784 	if (!err && ((type == ATTR_DATA && !name_len) ||
785 		     (type == ATTR_ALLOC && name == I30_NAME))) {
786 		bool dirty = false;
787 
788 		if (ni->vfs_inode.i_size != new_size) {
789 			ni->vfs_inode.i_size = new_size;
790 			dirty = true;
791 		}
792 
793 		if (attr_b && attr_b->non_res) {
794 			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
795 			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
796 				inode_set_bytes(&ni->vfs_inode, new_alloc);
797 				dirty = true;
798 			}
799 		}
800 
801 		if (dirty) {
802 			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
803 			mark_inode_dirty(&ni->vfs_inode);
804 		}
805 	}
806 
807 	return err;
808 }
809 
attr_data_get_block(struct ntfs_inode * ni,CLST vcn,CLST clen,CLST * lcn,CLST * len,bool * new)810 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
811 			CLST *len, bool *new)
812 {
813 	int err = 0;
814 	struct runs_tree *run = &ni->file.run;
815 	struct ntfs_sb_info *sbi;
816 	u8 cluster_bits;
817 	struct ATTRIB *attr = NULL, *attr_b;
818 	struct ATTR_LIST_ENTRY *le, *le_b;
819 	struct mft_inode *mi, *mi_b;
820 	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
821 	u64 total_size;
822 	u32 clst_per_frame;
823 	bool ok;
824 
825 	if (new)
826 		*new = false;
827 
828 	down_read(&ni->file.run_lock);
829 	ok = run_lookup_entry(run, vcn, lcn, len, NULL);
830 	up_read(&ni->file.run_lock);
831 
832 	if (ok && (*lcn != SPARSE_LCN || !new)) {
833 		/* Normal way. */
834 		return 0;
835 	}
836 
837 	if (!clen)
838 		clen = 1;
839 
840 	if (ok && clen > *len)
841 		clen = *len;
842 
843 	sbi = ni->mi.sbi;
844 	cluster_bits = sbi->cluster_bits;
845 
846 	ni_lock(ni);
847 	down_write(&ni->file.run_lock);
848 
849 	le_b = NULL;
850 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
851 	if (!attr_b) {
852 		err = -ENOENT;
853 		goto out;
854 	}
855 
856 	if (!attr_b->non_res) {
857 		*lcn = RESIDENT_LCN;
858 		*len = 1;
859 		goto out;
860 	}
861 
862 	asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
863 	if (vcn >= asize) {
864 		err = -EINVAL;
865 		goto out;
866 	}
867 
868 	clst_per_frame = 1u << attr_b->nres.c_unit;
869 	to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
870 
871 	if (vcn + to_alloc > asize)
872 		to_alloc = asize - vcn;
873 
874 	svcn = le64_to_cpu(attr_b->nres.svcn);
875 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
876 
877 	attr = attr_b;
878 	le = le_b;
879 	mi = mi_b;
880 
881 	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
882 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
883 				    &mi);
884 		if (!attr) {
885 			err = -EINVAL;
886 			goto out;
887 		}
888 		svcn = le64_to_cpu(attr->nres.svcn);
889 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
890 	}
891 
892 	err = attr_load_runs(attr, ni, run, NULL);
893 	if (err)
894 		goto out;
895 
896 	if (!ok) {
897 		ok = run_lookup_entry(run, vcn, lcn, len, NULL);
898 		if (ok && (*lcn != SPARSE_LCN || !new)) {
899 			/* Normal way. */
900 			err = 0;
901 			goto ok;
902 		}
903 
904 		if (!ok && !new) {
905 			*len = 0;
906 			err = 0;
907 			goto ok;
908 		}
909 
910 		if (ok && clen > *len) {
911 			clen = *len;
912 			to_alloc = (clen + clst_per_frame - 1) &
913 				   ~(clst_per_frame - 1);
914 		}
915 	}
916 
917 	if (!is_attr_ext(attr_b)) {
918 		err = -EINVAL;
919 		goto out;
920 	}
921 
922 	/* Get the last LCN to allocate from. */
923 	hint = 0;
924 
925 	if (vcn > evcn1) {
926 		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
927 				   false)) {
928 			err = -ENOMEM;
929 			goto out;
930 		}
931 	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
932 		hint = -1;
933 	}
934 
935 	err = attr_allocate_clusters(
936 		sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
937 		(sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
938 		lcn);
939 	if (err)
940 		goto out;
941 	*new = true;
942 
943 	end = vcn + *len;
944 
945 	total_size = le64_to_cpu(attr_b->nres.total_size) +
946 		     ((u64)*len << cluster_bits);
947 
948 repack:
949 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
950 	if (err)
951 		goto out;
952 
953 	attr_b->nres.total_size = cpu_to_le64(total_size);
954 	inode_set_bytes(&ni->vfs_inode, total_size);
955 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
956 
957 	mi_b->dirty = true;
958 	mark_inode_dirty(&ni->vfs_inode);
959 
960 	/* Stored [vcn : next_svcn) from [vcn : end). */
961 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
962 
963 	if (end <= evcn1) {
964 		if (next_svcn == evcn1) {
965 			/* Normal way. Update attribute and exit. */
966 			goto ok;
967 		}
968 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
969 		if (!ni->attr_list.size) {
970 			err = ni_create_attr_list(ni);
971 			if (err)
972 				goto out;
973 			/* Layout of records is changed. */
974 			le_b = NULL;
975 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
976 					      0, NULL, &mi_b);
977 			if (!attr_b) {
978 				err = -ENOENT;
979 				goto out;
980 			}
981 
982 			attr = attr_b;
983 			le = le_b;
984 			mi = mi_b;
985 			goto repack;
986 		}
987 	}
988 
989 	svcn = evcn1;
990 
991 	/* Estimate next attribute. */
992 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
993 
994 	if (attr) {
995 		CLST alloc = bytes_to_cluster(
996 			sbi, le64_to_cpu(attr_b->nres.alloc_size));
997 		CLST evcn = le64_to_cpu(attr->nres.evcn);
998 
999 		if (end < next_svcn)
1000 			end = next_svcn;
1001 		while (end > evcn) {
1002 			/* Remove segment [svcn : evcn). */
1003 			mi_remove_attr(NULL, mi, attr);
1004 
1005 			if (!al_remove_le(ni, le)) {
1006 				err = -EINVAL;
1007 				goto out;
1008 			}
1009 
1010 			if (evcn + 1 >= alloc) {
1011 				/* Last attribute segment. */
1012 				evcn1 = evcn + 1;
1013 				goto ins_ext;
1014 			}
1015 
1016 			if (ni_load_mi(ni, le, &mi)) {
1017 				attr = NULL;
1018 				goto out;
1019 			}
1020 
1021 			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1022 					    &le->id);
1023 			if (!attr) {
1024 				err = -EINVAL;
1025 				goto out;
1026 			}
1027 			svcn = le64_to_cpu(attr->nres.svcn);
1028 			evcn = le64_to_cpu(attr->nres.evcn);
1029 		}
1030 
1031 		if (end < svcn)
1032 			end = svcn;
1033 
1034 		err = attr_load_runs(attr, ni, run, &end);
1035 		if (err)
1036 			goto out;
1037 
1038 		evcn1 = evcn + 1;
1039 		attr->nres.svcn = cpu_to_le64(next_svcn);
1040 		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1041 		if (err)
1042 			goto out;
1043 
1044 		le->vcn = cpu_to_le64(next_svcn);
1045 		ni->attr_list.dirty = true;
1046 		mi->dirty = true;
1047 
1048 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1049 	}
1050 ins_ext:
1051 	if (evcn1 > next_svcn) {
1052 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1053 					    next_svcn, evcn1 - next_svcn,
1054 					    attr_b->flags, &attr, &mi);
1055 		if (err)
1056 			goto out;
1057 	}
1058 ok:
1059 	run_truncate_around(run, vcn);
1060 out:
1061 	up_write(&ni->file.run_lock);
1062 	ni_unlock(ni);
1063 
1064 	return err;
1065 }
1066 
attr_data_read_resident(struct ntfs_inode * ni,struct page * page)1067 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1068 {
1069 	u64 vbo;
1070 	struct ATTRIB *attr;
1071 	u32 data_size;
1072 
1073 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1074 	if (!attr)
1075 		return -EINVAL;
1076 
1077 	if (attr->non_res)
1078 		return E_NTFS_NONRESIDENT;
1079 
1080 	vbo = page->index << PAGE_SHIFT;
1081 	data_size = le32_to_cpu(attr->res.data_size);
1082 	if (vbo < data_size) {
1083 		const char *data = resident_data(attr);
1084 		char *kaddr = kmap_atomic(page);
1085 		u32 use = data_size - vbo;
1086 
1087 		if (use > PAGE_SIZE)
1088 			use = PAGE_SIZE;
1089 
1090 		memcpy(kaddr, data + vbo, use);
1091 		memset(kaddr + use, 0, PAGE_SIZE - use);
1092 		kunmap_atomic(kaddr);
1093 		flush_dcache_page(page);
1094 		SetPageUptodate(page);
1095 	} else if (!PageUptodate(page)) {
1096 		zero_user_segment(page, 0, PAGE_SIZE);
1097 		SetPageUptodate(page);
1098 	}
1099 
1100 	return 0;
1101 }
1102 
attr_data_write_resident(struct ntfs_inode * ni,struct page * page)1103 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1104 {
1105 	u64 vbo;
1106 	struct mft_inode *mi;
1107 	struct ATTRIB *attr;
1108 	u32 data_size;
1109 
1110 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1111 	if (!attr)
1112 		return -EINVAL;
1113 
1114 	if (attr->non_res) {
1115 		/* Return special error code to check this case. */
1116 		return E_NTFS_NONRESIDENT;
1117 	}
1118 
1119 	vbo = page->index << PAGE_SHIFT;
1120 	data_size = le32_to_cpu(attr->res.data_size);
1121 	if (vbo < data_size) {
1122 		char *data = resident_data(attr);
1123 		char *kaddr = kmap_atomic(page);
1124 		u32 use = data_size - vbo;
1125 
1126 		if (use > PAGE_SIZE)
1127 			use = PAGE_SIZE;
1128 		memcpy(data + vbo, kaddr, use);
1129 		kunmap_atomic(kaddr);
1130 		mi->dirty = true;
1131 	}
1132 	ni->i_valid = data_size;
1133 
1134 	return 0;
1135 }
1136 
1137 /*
1138  * attr_load_runs_vcn - Load runs with VCN.
1139  */
attr_load_runs_vcn(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,CLST vcn)1140 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1141 		       const __le16 *name, u8 name_len, struct runs_tree *run,
1142 		       CLST vcn)
1143 {
1144 	struct ATTRIB *attr;
1145 	int err;
1146 	CLST svcn, evcn;
1147 	u16 ro;
1148 
1149 	if (!ni) {
1150 		/* Is record corrupted? */
1151 		return -ENOENT;
1152 	}
1153 
1154 	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1155 	if (!attr) {
1156 		/* Is record corrupted? */
1157 		return -ENOENT;
1158 	}
1159 
1160 	svcn = le64_to_cpu(attr->nres.svcn);
1161 	evcn = le64_to_cpu(attr->nres.evcn);
1162 
1163 	if (evcn < vcn || vcn < svcn) {
1164 		/* Is record corrupted? */
1165 		return -EINVAL;
1166 	}
1167 
1168 	ro = le16_to_cpu(attr->nres.run_off);
1169 
1170 	if (ro > le32_to_cpu(attr->size))
1171 		return -EINVAL;
1172 
1173 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1174 			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1175 	if (err < 0)
1176 		return err;
1177 	return 0;
1178 }
1179 
1180 /*
1181  * attr_load_runs_range - Load runs for given range [from to).
1182  */
attr_load_runs_range(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,u64 from,u64 to)1183 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1184 			 const __le16 *name, u8 name_len, struct runs_tree *run,
1185 			 u64 from, u64 to)
1186 {
1187 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1188 	u8 cluster_bits = sbi->cluster_bits;
1189 	CLST vcn = from >> cluster_bits;
1190 	CLST vcn_last = (to - 1) >> cluster_bits;
1191 	CLST lcn, clen;
1192 	int err;
1193 
1194 	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1195 		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1196 			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1197 						 vcn);
1198 			if (err)
1199 				return err;
1200 			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1201 		}
1202 	}
1203 
1204 	return 0;
1205 }
1206 
1207 #ifdef CONFIG_NTFS3_LZX_XPRESS
1208 /*
1209  * attr_wof_frame_info
1210  *
1211  * Read header of Xpress/LZX file to get info about frame.
1212  */
attr_wof_frame_info(struct ntfs_inode * ni,struct ATTRIB * attr,struct runs_tree * run,u64 frame,u64 frames,u8 frame_bits,u32 * ondisk_size,u64 * vbo_data)1213 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1214 			struct runs_tree *run, u64 frame, u64 frames,
1215 			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1216 {
1217 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1218 	u64 vbo[2], off[2], wof_size;
1219 	u32 voff;
1220 	u8 bytes_per_off;
1221 	char *addr;
1222 	struct page *page;
1223 	int i, err;
1224 	__le32 *off32;
1225 	__le64 *off64;
1226 
1227 	if (ni->vfs_inode.i_size < 0x100000000ull) {
1228 		/* File starts with array of 32 bit offsets. */
1229 		bytes_per_off = sizeof(__le32);
1230 		vbo[1] = frame << 2;
1231 		*vbo_data = frames << 2;
1232 	} else {
1233 		/* File starts with array of 64 bit offsets. */
1234 		bytes_per_off = sizeof(__le64);
1235 		vbo[1] = frame << 3;
1236 		*vbo_data = frames << 3;
1237 	}
1238 
1239 	/*
1240 	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1241 	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1242 	 */
1243 	if (!attr->non_res) {
1244 		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1245 			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1246 			return -EINVAL;
1247 		}
1248 		addr = resident_data(attr);
1249 
1250 		if (bytes_per_off == sizeof(__le32)) {
1251 			off32 = Add2Ptr(addr, vbo[1]);
1252 			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1253 			off[1] = le32_to_cpu(off32[0]);
1254 		} else {
1255 			off64 = Add2Ptr(addr, vbo[1]);
1256 			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1257 			off[1] = le64_to_cpu(off64[0]);
1258 		}
1259 
1260 		*vbo_data += off[0];
1261 		*ondisk_size = off[1] - off[0];
1262 		return 0;
1263 	}
1264 
1265 	wof_size = le64_to_cpu(attr->nres.data_size);
1266 	down_write(&ni->file.run_lock);
1267 	page = ni->file.offs_page;
1268 	if (!page) {
1269 		page = alloc_page(GFP_KERNEL);
1270 		if (!page) {
1271 			err = -ENOMEM;
1272 			goto out;
1273 		}
1274 		page->index = -1;
1275 		ni->file.offs_page = page;
1276 	}
1277 	lock_page(page);
1278 	addr = page_address(page);
1279 
1280 	if (vbo[1]) {
1281 		voff = vbo[1] & (PAGE_SIZE - 1);
1282 		vbo[0] = vbo[1] - bytes_per_off;
1283 		i = 0;
1284 	} else {
1285 		voff = 0;
1286 		vbo[0] = 0;
1287 		off[0] = 0;
1288 		i = 1;
1289 	}
1290 
1291 	do {
1292 		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1293 
1294 		if (index != page->index) {
1295 			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1296 			u64 to = min(from + PAGE_SIZE, wof_size);
1297 
1298 			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1299 						   ARRAY_SIZE(WOF_NAME), run,
1300 						   from, to);
1301 			if (err)
1302 				goto out1;
1303 
1304 			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1305 					     to - from, REQ_OP_READ);
1306 			if (err) {
1307 				page->index = -1;
1308 				goto out1;
1309 			}
1310 			page->index = index;
1311 		}
1312 
1313 		if (i) {
1314 			if (bytes_per_off == sizeof(__le32)) {
1315 				off32 = Add2Ptr(addr, voff);
1316 				off[1] = le32_to_cpu(*off32);
1317 			} else {
1318 				off64 = Add2Ptr(addr, voff);
1319 				off[1] = le64_to_cpu(*off64);
1320 			}
1321 		} else if (!voff) {
1322 			if (bytes_per_off == sizeof(__le32)) {
1323 				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1324 				off[0] = le32_to_cpu(*off32);
1325 			} else {
1326 				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1327 				off[0] = le64_to_cpu(*off64);
1328 			}
1329 		} else {
1330 			/* Two values in one page. */
1331 			if (bytes_per_off == sizeof(__le32)) {
1332 				off32 = Add2Ptr(addr, voff);
1333 				off[0] = le32_to_cpu(off32[-1]);
1334 				off[1] = le32_to_cpu(off32[0]);
1335 			} else {
1336 				off64 = Add2Ptr(addr, voff);
1337 				off[0] = le64_to_cpu(off64[-1]);
1338 				off[1] = le64_to_cpu(off64[0]);
1339 			}
1340 			break;
1341 		}
1342 	} while (++i < 2);
1343 
1344 	*vbo_data += off[0];
1345 	*ondisk_size = off[1] - off[0];
1346 
1347 out1:
1348 	unlock_page(page);
1349 out:
1350 	up_write(&ni->file.run_lock);
1351 	return err;
1352 }
1353 #endif
1354 
1355 /*
1356  * attr_is_frame_compressed - Used to detect compressed frame.
1357  */
attr_is_frame_compressed(struct ntfs_inode * ni,struct ATTRIB * attr,CLST frame,CLST * clst_data)1358 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1359 			     CLST frame, CLST *clst_data)
1360 {
1361 	int err;
1362 	u32 clst_frame;
1363 	CLST clen, lcn, vcn, alen, slen, vcn_next;
1364 	size_t idx;
1365 	struct runs_tree *run;
1366 
1367 	*clst_data = 0;
1368 
1369 	if (!is_attr_compressed(attr))
1370 		return 0;
1371 
1372 	if (!attr->non_res)
1373 		return 0;
1374 
1375 	clst_frame = 1u << attr->nres.c_unit;
1376 	vcn = frame * clst_frame;
1377 	run = &ni->file.run;
1378 
1379 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1380 		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1381 					 attr->name_len, run, vcn);
1382 		if (err)
1383 			return err;
1384 
1385 		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1386 			return -EINVAL;
1387 	}
1388 
1389 	if (lcn == SPARSE_LCN) {
1390 		/* Sparsed frame. */
1391 		return 0;
1392 	}
1393 
1394 	if (clen >= clst_frame) {
1395 		/*
1396 		 * The frame is not compressed 'cause
1397 		 * it does not contain any sparse clusters.
1398 		 */
1399 		*clst_data = clst_frame;
1400 		return 0;
1401 	}
1402 
1403 	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1404 	slen = 0;
1405 	*clst_data = clen;
1406 
1407 	/*
1408 	 * The frame is compressed if *clst_data + slen >= clst_frame.
1409 	 * Check next fragments.
1410 	 */
1411 	while ((vcn += clen) < alen) {
1412 		vcn_next = vcn;
1413 
1414 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1415 		    vcn_next != vcn) {
1416 			err = attr_load_runs_vcn(ni, attr->type,
1417 						 attr_name(attr),
1418 						 attr->name_len, run, vcn_next);
1419 			if (err)
1420 				return err;
1421 			vcn = vcn_next;
1422 
1423 			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1424 				return -EINVAL;
1425 		}
1426 
1427 		if (lcn == SPARSE_LCN) {
1428 			slen += clen;
1429 		} else {
1430 			if (slen) {
1431 				/*
1432 				 * Data_clusters + sparse_clusters =
1433 				 * not enough for frame.
1434 				 */
1435 				return -EINVAL;
1436 			}
1437 			*clst_data += clen;
1438 		}
1439 
1440 		if (*clst_data + slen >= clst_frame) {
1441 			if (!slen) {
1442 				/*
1443 				 * There is no sparsed clusters in this frame
1444 				 * so it is not compressed.
1445 				 */
1446 				*clst_data = clst_frame;
1447 			} else {
1448 				/* Frame is compressed. */
1449 			}
1450 			break;
1451 		}
1452 	}
1453 
1454 	return 0;
1455 }
1456 
1457 /*
1458  * attr_allocate_frame - Allocate/free clusters for @frame.
1459  *
1460  * Assumed: down_write(&ni->file.run_lock);
1461  */
attr_allocate_frame(struct ntfs_inode * ni,CLST frame,size_t compr_size,u64 new_valid)1462 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1463 			u64 new_valid)
1464 {
1465 	int err = 0;
1466 	struct runs_tree *run = &ni->file.run;
1467 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1468 	struct ATTRIB *attr = NULL, *attr_b;
1469 	struct ATTR_LIST_ENTRY *le, *le_b;
1470 	struct mft_inode *mi, *mi_b;
1471 	CLST svcn, evcn1, next_svcn, lcn, len;
1472 	CLST vcn, end, clst_data;
1473 	u64 total_size, valid_size, data_size;
1474 
1475 	le_b = NULL;
1476 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1477 	if (!attr_b)
1478 		return -ENOENT;
1479 
1480 	if (!is_attr_ext(attr_b))
1481 		return -EINVAL;
1482 
1483 	vcn = frame << NTFS_LZNT_CUNIT;
1484 	total_size = le64_to_cpu(attr_b->nres.total_size);
1485 
1486 	svcn = le64_to_cpu(attr_b->nres.svcn);
1487 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1488 	data_size = le64_to_cpu(attr_b->nres.data_size);
1489 
1490 	if (svcn <= vcn && vcn < evcn1) {
1491 		attr = attr_b;
1492 		le = le_b;
1493 		mi = mi_b;
1494 	} else if (!le_b) {
1495 		err = -EINVAL;
1496 		goto out;
1497 	} else {
1498 		le = le_b;
1499 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1500 				    &mi);
1501 		if (!attr) {
1502 			err = -EINVAL;
1503 			goto out;
1504 		}
1505 		svcn = le64_to_cpu(attr->nres.svcn);
1506 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1507 	}
1508 
1509 	err = attr_load_runs(attr, ni, run, NULL);
1510 	if (err)
1511 		goto out;
1512 
1513 	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1514 	if (err)
1515 		goto out;
1516 
1517 	total_size -= (u64)clst_data << sbi->cluster_bits;
1518 
1519 	len = bytes_to_cluster(sbi, compr_size);
1520 
1521 	if (len == clst_data)
1522 		goto out;
1523 
1524 	if (len < clst_data) {
1525 		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1526 					NULL, true);
1527 		if (err)
1528 			goto out;
1529 
1530 		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1531 				   false)) {
1532 			err = -ENOMEM;
1533 			goto out;
1534 		}
1535 		end = vcn + clst_data;
1536 		/* Run contains updated range [vcn + len : end). */
1537 	} else {
1538 		CLST alen, hint = 0;
1539 		/* Get the last LCN to allocate from. */
1540 		if (vcn + clst_data &&
1541 		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1542 				      NULL)) {
1543 			hint = -1;
1544 		}
1545 
1546 		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1547 					     hint + 1, len - clst_data, NULL, 0,
1548 					     &alen, 0, &lcn);
1549 		if (err)
1550 			goto out;
1551 
1552 		end = vcn + len;
1553 		/* Run contains updated range [vcn + clst_data : end). */
1554 	}
1555 
1556 	total_size += (u64)len << sbi->cluster_bits;
1557 
1558 repack:
1559 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1560 	if (err)
1561 		goto out;
1562 
1563 	attr_b->nres.total_size = cpu_to_le64(total_size);
1564 	inode_set_bytes(&ni->vfs_inode, total_size);
1565 
1566 	mi_b->dirty = true;
1567 	mark_inode_dirty(&ni->vfs_inode);
1568 
1569 	/* Stored [vcn : next_svcn) from [vcn : end). */
1570 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1571 
1572 	if (end <= evcn1) {
1573 		if (next_svcn == evcn1) {
1574 			/* Normal way. Update attribute and exit. */
1575 			goto ok;
1576 		}
1577 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1578 		if (!ni->attr_list.size) {
1579 			err = ni_create_attr_list(ni);
1580 			if (err)
1581 				goto out;
1582 			/* Layout of records is changed. */
1583 			le_b = NULL;
1584 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1585 					      0, NULL, &mi_b);
1586 			if (!attr_b)
1587 				return -ENOENT;
1588 
1589 			attr = attr_b;
1590 			le = le_b;
1591 			mi = mi_b;
1592 			goto repack;
1593 		}
1594 	}
1595 
1596 	svcn = evcn1;
1597 
1598 	/* Estimate next attribute. */
1599 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1600 
1601 	if (attr) {
1602 		CLST alloc = bytes_to_cluster(
1603 			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1604 		CLST evcn = le64_to_cpu(attr->nres.evcn);
1605 
1606 		if (end < next_svcn)
1607 			end = next_svcn;
1608 		while (end > evcn) {
1609 			/* Remove segment [svcn : evcn). */
1610 			mi_remove_attr(NULL, mi, attr);
1611 
1612 			if (!al_remove_le(ni, le)) {
1613 				err = -EINVAL;
1614 				goto out;
1615 			}
1616 
1617 			if (evcn + 1 >= alloc) {
1618 				/* Last attribute segment. */
1619 				evcn1 = evcn + 1;
1620 				goto ins_ext;
1621 			}
1622 
1623 			if (ni_load_mi(ni, le, &mi)) {
1624 				attr = NULL;
1625 				goto out;
1626 			}
1627 
1628 			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1629 					    &le->id);
1630 			if (!attr) {
1631 				err = -EINVAL;
1632 				goto out;
1633 			}
1634 			svcn = le64_to_cpu(attr->nres.svcn);
1635 			evcn = le64_to_cpu(attr->nres.evcn);
1636 		}
1637 
1638 		if (end < svcn)
1639 			end = svcn;
1640 
1641 		err = attr_load_runs(attr, ni, run, &end);
1642 		if (err)
1643 			goto out;
1644 
1645 		evcn1 = evcn + 1;
1646 		attr->nres.svcn = cpu_to_le64(next_svcn);
1647 		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1648 		if (err)
1649 			goto out;
1650 
1651 		le->vcn = cpu_to_le64(next_svcn);
1652 		ni->attr_list.dirty = true;
1653 		mi->dirty = true;
1654 
1655 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1656 	}
1657 ins_ext:
1658 	if (evcn1 > next_svcn) {
1659 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1660 					    next_svcn, evcn1 - next_svcn,
1661 					    attr_b->flags, &attr, &mi);
1662 		if (err)
1663 			goto out;
1664 	}
1665 ok:
1666 	run_truncate_around(run, vcn);
1667 out:
1668 	if (new_valid > data_size)
1669 		new_valid = data_size;
1670 
1671 	valid_size = le64_to_cpu(attr_b->nres.valid_size);
1672 	if (new_valid != valid_size) {
1673 		attr_b->nres.valid_size = cpu_to_le64(valid_size);
1674 		mi_b->dirty = true;
1675 	}
1676 
1677 	return err;
1678 }
1679 
1680 /*
1681  * attr_collapse_range - Collapse range in file.
1682  */
attr_collapse_range(struct ntfs_inode * ni,u64 vbo,u64 bytes)1683 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1684 {
1685 	int err = 0;
1686 	struct runs_tree *run = &ni->file.run;
1687 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1688 	struct ATTRIB *attr = NULL, *attr_b;
1689 	struct ATTR_LIST_ENTRY *le, *le_b;
1690 	struct mft_inode *mi, *mi_b;
1691 	CLST svcn, evcn1, len, dealloc, alen;
1692 	CLST vcn, end;
1693 	u64 valid_size, data_size, alloc_size, total_size;
1694 	u32 mask;
1695 	__le16 a_flags;
1696 
1697 	if (!bytes)
1698 		return 0;
1699 
1700 	le_b = NULL;
1701 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1702 	if (!attr_b)
1703 		return -ENOENT;
1704 
1705 	if (!attr_b->non_res) {
1706 		/* Attribute is resident. Nothing to do? */
1707 		return 0;
1708 	}
1709 
1710 	data_size = le64_to_cpu(attr_b->nres.data_size);
1711 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1712 	a_flags = attr_b->flags;
1713 
1714 	if (is_attr_ext(attr_b)) {
1715 		total_size = le64_to_cpu(attr_b->nres.total_size);
1716 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1717 	} else {
1718 		total_size = alloc_size;
1719 		mask = sbi->cluster_mask;
1720 	}
1721 
1722 	if ((vbo & mask) || (bytes & mask)) {
1723 		/* Allow to collapse only cluster aligned ranges. */
1724 		return -EINVAL;
1725 	}
1726 
1727 	if (vbo > data_size)
1728 		return -EINVAL;
1729 
1730 	down_write(&ni->file.run_lock);
1731 
1732 	if (vbo + bytes >= data_size) {
1733 		u64 new_valid = min(ni->i_valid, vbo);
1734 
1735 		/* Simple truncate file at 'vbo'. */
1736 		truncate_setsize(&ni->vfs_inode, vbo);
1737 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1738 				    &new_valid, true, NULL);
1739 
1740 		if (!err && new_valid < ni->i_valid)
1741 			ni->i_valid = new_valid;
1742 
1743 		goto out;
1744 	}
1745 
1746 	/*
1747 	 * Enumerate all attribute segments and collapse.
1748 	 */
1749 	alen = alloc_size >> sbi->cluster_bits;
1750 	vcn = vbo >> sbi->cluster_bits;
1751 	len = bytes >> sbi->cluster_bits;
1752 	end = vcn + len;
1753 	dealloc = 0;
1754 
1755 	svcn = le64_to_cpu(attr_b->nres.svcn);
1756 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1757 
1758 	if (svcn <= vcn && vcn < evcn1) {
1759 		attr = attr_b;
1760 		le = le_b;
1761 		mi = mi_b;
1762 	} else if (!le_b) {
1763 		err = -EINVAL;
1764 		goto out;
1765 	} else {
1766 		le = le_b;
1767 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1768 				    &mi);
1769 		if (!attr) {
1770 			err = -EINVAL;
1771 			goto out;
1772 		}
1773 
1774 		svcn = le64_to_cpu(attr->nres.svcn);
1775 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1776 	}
1777 
1778 	for (;;) {
1779 		if (svcn >= end) {
1780 			/* Shift VCN- */
1781 			attr->nres.svcn = cpu_to_le64(svcn - len);
1782 			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1783 			if (le) {
1784 				le->vcn = attr->nres.svcn;
1785 				ni->attr_list.dirty = true;
1786 			}
1787 			mi->dirty = true;
1788 		} else if (svcn < vcn || end < evcn1) {
1789 			CLST vcn1, eat, next_svcn;
1790 
1791 			/* Collapse a part of this attribute segment. */
1792 			err = attr_load_runs(attr, ni, run, &svcn);
1793 			if (err)
1794 				goto out;
1795 			vcn1 = max(vcn, svcn);
1796 			eat = min(end, evcn1) - vcn1;
1797 
1798 			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1799 						true);
1800 			if (err)
1801 				goto out;
1802 
1803 			if (!run_collapse_range(run, vcn1, eat)) {
1804 				err = -ENOMEM;
1805 				goto out;
1806 			}
1807 
1808 			if (svcn >= vcn) {
1809 				/* Shift VCN */
1810 				attr->nres.svcn = cpu_to_le64(vcn);
1811 				if (le) {
1812 					le->vcn = attr->nres.svcn;
1813 					ni->attr_list.dirty = true;
1814 				}
1815 			}
1816 
1817 			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1818 			if (err)
1819 				goto out;
1820 
1821 			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1822 			if (next_svcn + eat < evcn1) {
1823 				err = ni_insert_nonresident(
1824 					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1825 					evcn1 - eat - next_svcn, a_flags, &attr,
1826 					&mi);
1827 				if (err)
1828 					goto out;
1829 
1830 				/* Layout of records maybe changed. */
1831 				attr_b = NULL;
1832 				le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
1833 						&next_svcn);
1834 				if (!le) {
1835 					err = -EINVAL;
1836 					goto out;
1837 				}
1838 			}
1839 
1840 			/* Free all allocated memory. */
1841 			run_truncate(run, 0);
1842 		} else {
1843 			u16 le_sz;
1844 			u16 roff = le16_to_cpu(attr->nres.run_off);
1845 
1846 			if (roff > le32_to_cpu(attr->size)) {
1847 				err = -EINVAL;
1848 				goto out;
1849 			}
1850 
1851 			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1852 				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
1853 				      le32_to_cpu(attr->size) - roff);
1854 
1855 			/* Delete this attribute segment. */
1856 			mi_remove_attr(NULL, mi, attr);
1857 			if (!le)
1858 				break;
1859 
1860 			le_sz = le16_to_cpu(le->size);
1861 			if (!al_remove_le(ni, le)) {
1862 				err = -EINVAL;
1863 				goto out;
1864 			}
1865 
1866 			if (evcn1 >= alen)
1867 				break;
1868 
1869 			if (!svcn) {
1870 				/* Load next record that contains this attribute. */
1871 				if (ni_load_mi(ni, le, &mi)) {
1872 					err = -EINVAL;
1873 					goto out;
1874 				}
1875 
1876 				/* Look for required attribute. */
1877 				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
1878 						    0, &le->id);
1879 				if (!attr) {
1880 					err = -EINVAL;
1881 					goto out;
1882 				}
1883 				goto next_attr;
1884 			}
1885 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
1886 		}
1887 
1888 		if (evcn1 >= alen)
1889 			break;
1890 
1891 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
1892 		if (!attr) {
1893 			err = -EINVAL;
1894 			goto out;
1895 		}
1896 
1897 next_attr:
1898 		svcn = le64_to_cpu(attr->nres.svcn);
1899 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1900 	}
1901 
1902 	if (!attr_b) {
1903 		le_b = NULL;
1904 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
1905 				      &mi_b);
1906 		if (!attr_b) {
1907 			err = -ENOENT;
1908 			goto out;
1909 		}
1910 	}
1911 
1912 	data_size -= bytes;
1913 	valid_size = ni->i_valid;
1914 	if (vbo + bytes <= valid_size)
1915 		valid_size -= bytes;
1916 	else if (vbo < valid_size)
1917 		valid_size = vbo;
1918 
1919 	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
1920 	attr_b->nres.data_size = cpu_to_le64(data_size);
1921 	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
1922 	total_size -= (u64)dealloc << sbi->cluster_bits;
1923 	if (is_attr_ext(attr_b))
1924 		attr_b->nres.total_size = cpu_to_le64(total_size);
1925 	mi_b->dirty = true;
1926 
1927 	/* Update inode size. */
1928 	ni->i_valid = valid_size;
1929 	ni->vfs_inode.i_size = data_size;
1930 	inode_set_bytes(&ni->vfs_inode, total_size);
1931 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1932 	mark_inode_dirty(&ni->vfs_inode);
1933 
1934 out:
1935 	up_write(&ni->file.run_lock);
1936 	if (err)
1937 		make_bad_inode(&ni->vfs_inode);
1938 
1939 	return err;
1940 }
1941 
1942 /*
1943  * attr_punch_hole
1944  *
1945  * Not for normal files.
1946  */
attr_punch_hole(struct ntfs_inode * ni,u64 vbo,u64 bytes,u32 * frame_size)1947 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
1948 {
1949 	int err = 0;
1950 	struct runs_tree *run = &ni->file.run;
1951 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1952 	struct ATTRIB *attr = NULL, *attr_b;
1953 	struct ATTR_LIST_ENTRY *le, *le_b;
1954 	struct mft_inode *mi, *mi_b;
1955 	CLST svcn, evcn1, vcn, len, end, alen, dealloc;
1956 	u64 total_size, alloc_size;
1957 	u32 mask;
1958 
1959 	if (!bytes)
1960 		return 0;
1961 
1962 	le_b = NULL;
1963 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1964 	if (!attr_b)
1965 		return -ENOENT;
1966 
1967 	if (!attr_b->non_res) {
1968 		u32 data_size = le32_to_cpu(attr_b->res.data_size);
1969 		u32 from, to;
1970 
1971 		if (vbo > data_size)
1972 			return 0;
1973 
1974 		from = vbo;
1975 		to = min_t(u64, vbo + bytes, data_size);
1976 		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
1977 		return 0;
1978 	}
1979 
1980 	if (!is_attr_ext(attr_b))
1981 		return -EOPNOTSUPP;
1982 
1983 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1984 	total_size = le64_to_cpu(attr_b->nres.total_size);
1985 
1986 	if (vbo >= alloc_size) {
1987 		/* NOTE: It is allowed. */
1988 		return 0;
1989 	}
1990 
1991 	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1992 
1993 	bytes += vbo;
1994 	if (bytes > alloc_size)
1995 		bytes = alloc_size;
1996 	bytes -= vbo;
1997 
1998 	if ((vbo & mask) || (bytes & mask)) {
1999 		/* We have to zero a range(s). */
2000 		if (frame_size == NULL) {
2001 			/* Caller insists range is aligned. */
2002 			return -EINVAL;
2003 		}
2004 		*frame_size = mask + 1;
2005 		return E_NTFS_NOTALIGNED;
2006 	}
2007 
2008 	down_write(&ni->file.run_lock);
2009 	/*
2010 	 * Enumerate all attribute segments and punch hole where necessary.
2011 	 */
2012 	alen = alloc_size >> sbi->cluster_bits;
2013 	vcn = vbo >> sbi->cluster_bits;
2014 	len = bytes >> sbi->cluster_bits;
2015 	end = vcn + len;
2016 	dealloc = 0;
2017 
2018 	svcn = le64_to_cpu(attr_b->nres.svcn);
2019 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2020 
2021 	if (svcn <= vcn && vcn < evcn1) {
2022 		attr = attr_b;
2023 		le = le_b;
2024 		mi = mi_b;
2025 	} else if (!le_b) {
2026 		err = -EINVAL;
2027 		goto out;
2028 	} else {
2029 		le = le_b;
2030 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2031 				    &mi);
2032 		if (!attr) {
2033 			err = -EINVAL;
2034 			goto out;
2035 		}
2036 
2037 		svcn = le64_to_cpu(attr->nres.svcn);
2038 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2039 	}
2040 
2041 	while (svcn < end) {
2042 		CLST vcn1, zero, dealloc2;
2043 
2044 		err = attr_load_runs(attr, ni, run, &svcn);
2045 		if (err)
2046 			goto out;
2047 		vcn1 = max(vcn, svcn);
2048 		zero = min(end, evcn1) - vcn1;
2049 
2050 		dealloc2 = dealloc;
2051 		err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
2052 		if (err)
2053 			goto out;
2054 
2055 		if (dealloc2 == dealloc) {
2056 			/* Looks like the required range is already sparsed. */
2057 		} else {
2058 			if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
2059 					   false)) {
2060 				err = -ENOMEM;
2061 				goto out;
2062 			}
2063 
2064 			err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2065 			if (err)
2066 				goto out;
2067 		}
2068 		/* Free all allocated memory. */
2069 		run_truncate(run, 0);
2070 
2071 		if (evcn1 >= alen)
2072 			break;
2073 
2074 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2075 		if (!attr) {
2076 			err = -EINVAL;
2077 			goto out;
2078 		}
2079 
2080 		svcn = le64_to_cpu(attr->nres.svcn);
2081 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2082 	}
2083 
2084 	total_size -= (u64)dealloc << sbi->cluster_bits;
2085 	attr_b->nres.total_size = cpu_to_le64(total_size);
2086 	mi_b->dirty = true;
2087 
2088 	/* Update inode size. */
2089 	inode_set_bytes(&ni->vfs_inode, total_size);
2090 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2091 	mark_inode_dirty(&ni->vfs_inode);
2092 
2093 out:
2094 	up_write(&ni->file.run_lock);
2095 	if (err)
2096 		make_bad_inode(&ni->vfs_inode);
2097 
2098 	return err;
2099 }
2100