• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/ufs/truncate.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  *  from
9  *
10  *  linux/fs/ext2/truncate.c
11  *
12  * Copyright (C) 1992, 1993, 1994, 1995
13  * Remy Card (card@masi.ibp.fr)
14  * Laboratoire MASI - Institut Blaise Pascal
15  * Universite Pierre et Marie Curie (Paris VI)
16  *
17  *  from
18  *
19  *  linux/fs/minix/truncate.c
20  *
21  *  Copyright (C) 1991, 1992  Linus Torvalds
22  *
23  *  Big-endian to little-endian byte-swapping/bitmaps by
24  *        David S. Miller (davem@caip.rutgers.edu), 1995
25  */
26 
27 /*
28  * Real random numbers for secure rm added 94/02/18
29  * Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr>
30  */
31 
32 /*
33  * Adoptation to use page cache and UFS2 write support by
34  * Evgeniy Dushistov <dushistov@mail.ru>, 2006-2007
35  */
36 
37 #include <linux/errno.h>
38 #include <linux/fs.h>
39 #include <linux/fcntl.h>
40 #include <linux/time.h>
41 #include <linux/stat.h>
42 #include <linux/string.h>
43 #include <linux/buffer_head.h>
44 #include <linux/blkdev.h>
45 #include <linux/sched.h>
46 
47 #include "ufs_fs.h"
48 #include "ufs.h"
49 #include "swab.h"
50 #include "util.h"
51 
52 /*
53  * Secure deletion currently doesn't work. It interacts very badly
54  * with buffers shared with memory mappings, and for that reason
55  * can't be done in the truncate() routines. It should instead be
56  * done separately in "release()" before calling the truncate routines
57  * that will release the actual file blocks.
58  *
59  *		Linus
60  */
61 
62 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
63 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
64 
65 
ufs_trunc_direct(struct inode * inode)66 static int ufs_trunc_direct(struct inode *inode)
67 {
68 	struct ufs_inode_info *ufsi = UFS_I(inode);
69 	struct super_block * sb;
70 	struct ufs_sb_private_info * uspi;
71 	void *p;
72 	u64 frag1, frag2, frag3, frag4, block1, block2;
73 	unsigned frag_to_free, free_count;
74 	unsigned i, tmp;
75 	int retry;
76 
77 	UFSD("ENTER: ino %lu\n", inode->i_ino);
78 
79 	sb = inode->i_sb;
80 	uspi = UFS_SB(sb)->s_uspi;
81 
82 	frag_to_free = 0;
83 	free_count = 0;
84 	retry = 0;
85 
86 	frag1 = DIRECT_FRAGMENT;
87 	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
88 	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
89 	frag3 = frag4 & ~uspi->s_fpbmask;
90 	block1 = block2 = 0;
91 	if (frag2 > frag3) {
92 		frag2 = frag4;
93 		frag3 = frag4 = 0;
94 	} else if (frag2 < frag3) {
95 		block1 = ufs_fragstoblks (frag2);
96 		block2 = ufs_fragstoblks (frag3);
97 	}
98 
99 	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
100 	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
101 	     (unsigned long long)frag1, (unsigned long long)frag2,
102 	     (unsigned long long)block1, (unsigned long long)block2,
103 	     (unsigned long long)frag3, (unsigned long long)frag4);
104 
105 	if (frag1 >= frag2)
106 		goto next1;
107 
108 	/*
109 	 * Free first free fragments
110 	 */
111 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
112 	tmp = ufs_data_ptr_to_cpu(sb, p);
113 	if (!tmp )
114 		ufs_panic (sb, "ufs_trunc_direct", "internal error");
115 	frag2 -= frag1;
116 	frag1 = ufs_fragnum (frag1);
117 
118 	ufs_free_fragments(inode, tmp + frag1, frag2);
119 	mark_inode_dirty(inode);
120 	frag_to_free = tmp + frag1;
121 
122 next1:
123 	/*
124 	 * Free whole blocks
125 	 */
126 	for (i = block1 ; i < block2; i++) {
127 		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
128 		tmp = ufs_data_ptr_to_cpu(sb, p);
129 		if (!tmp)
130 			continue;
131 		ufs_data_ptr_clear(uspi, p);
132 
133 		if (free_count == 0) {
134 			frag_to_free = tmp;
135 			free_count = uspi->s_fpb;
136 		} else if (free_count > 0 && frag_to_free == tmp - free_count)
137 			free_count += uspi->s_fpb;
138 		else {
139 			ufs_free_blocks (inode, frag_to_free, free_count);
140 			frag_to_free = tmp;
141 			free_count = uspi->s_fpb;
142 		}
143 		mark_inode_dirty(inode);
144 	}
145 
146 	if (free_count > 0)
147 		ufs_free_blocks (inode, frag_to_free, free_count);
148 
149 	if (frag3 >= frag4)
150 		goto next3;
151 
152 	/*
153 	 * Free last free fragments
154 	 */
155 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
156 	tmp = ufs_data_ptr_to_cpu(sb, p);
157 	if (!tmp )
158 		ufs_panic(sb, "ufs_truncate_direct", "internal error");
159 	frag4 = ufs_fragnum (frag4);
160 	ufs_data_ptr_clear(uspi, p);
161 
162 	ufs_free_fragments (inode, tmp, frag4);
163 	mark_inode_dirty(inode);
164  next3:
165 
166 	UFSD("EXIT: ino %lu\n", inode->i_ino);
167 	return retry;
168 }
169 
170 
ufs_trunc_indirect(struct inode * inode,u64 offset,void * p)171 static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p)
172 {
173 	struct super_block * sb;
174 	struct ufs_sb_private_info * uspi;
175 	struct ufs_buffer_head * ind_ubh;
176 	void *ind;
177 	u64 tmp, indirect_block, i, frag_to_free;
178 	unsigned free_count;
179 	int retry;
180 
181 	UFSD("ENTER: ino %lu, offset %llu, p: %p\n",
182 	     inode->i_ino, (unsigned long long)offset, p);
183 
184 	BUG_ON(!p);
185 
186 	sb = inode->i_sb;
187 	uspi = UFS_SB(sb)->s_uspi;
188 
189 	frag_to_free = 0;
190 	free_count = 0;
191 	retry = 0;
192 
193 	tmp = ufs_data_ptr_to_cpu(sb, p);
194 	if (!tmp)
195 		return 0;
196 	ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize);
197 	if (tmp != ufs_data_ptr_to_cpu(sb, p)) {
198 		ubh_brelse (ind_ubh);
199 		return 1;
200 	}
201 	if (!ind_ubh) {
202 		ufs_data_ptr_clear(uspi, p);
203 		return 0;
204 	}
205 
206 	indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0;
207 	for (i = indirect_block; i < uspi->s_apb; i++) {
208 		ind = ubh_get_data_ptr(uspi, ind_ubh, i);
209 		tmp = ufs_data_ptr_to_cpu(sb, ind);
210 		if (!tmp)
211 			continue;
212 
213 		ufs_data_ptr_clear(uspi, ind);
214 		ubh_mark_buffer_dirty(ind_ubh);
215 		if (free_count == 0) {
216 			frag_to_free = tmp;
217 			free_count = uspi->s_fpb;
218 		} else if (free_count > 0 && frag_to_free == tmp - free_count)
219 			free_count += uspi->s_fpb;
220 		else {
221 			ufs_free_blocks (inode, frag_to_free, free_count);
222 			frag_to_free = tmp;
223 			free_count = uspi->s_fpb;
224 		}
225 
226 		mark_inode_dirty(inode);
227 	}
228 
229 	if (free_count > 0) {
230 		ufs_free_blocks (inode, frag_to_free, free_count);
231 	}
232 	for (i = 0; i < uspi->s_apb; i++)
233 		if (!ufs_is_data_ptr_zero(uspi,
234 					  ubh_get_data_ptr(uspi, ind_ubh, i)))
235 			break;
236 	if (i >= uspi->s_apb) {
237 		tmp = ufs_data_ptr_to_cpu(sb, p);
238 		ufs_data_ptr_clear(uspi, p);
239 
240 		ufs_free_blocks (inode, tmp, uspi->s_fpb);
241 		mark_inode_dirty(inode);
242 		ubh_bforget(ind_ubh);
243 		ind_ubh = NULL;
244 	}
245 	if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh))
246 		ubh_sync_block(ind_ubh);
247 	ubh_brelse (ind_ubh);
248 
249 	UFSD("EXIT: ino %lu\n", inode->i_ino);
250 
251 	return retry;
252 }
253 
ufs_trunc_dindirect(struct inode * inode,u64 offset,void * p)254 static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p)
255 {
256 	struct super_block * sb;
257 	struct ufs_sb_private_info * uspi;
258 	struct ufs_buffer_head *dind_bh;
259 	u64 i, tmp, dindirect_block;
260 	void *dind;
261 	int retry = 0;
262 
263 	UFSD("ENTER: ino %lu\n", inode->i_ino);
264 
265 	sb = inode->i_sb;
266 	uspi = UFS_SB(sb)->s_uspi;
267 
268 	dindirect_block = (DIRECT_BLOCK > offset)
269 		? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0;
270 	retry = 0;
271 
272 	tmp = ufs_data_ptr_to_cpu(sb, p);
273 	if (!tmp)
274 		return 0;
275 	dind_bh = ubh_bread(sb, tmp, uspi->s_bsize);
276 	if (tmp != ufs_data_ptr_to_cpu(sb, p)) {
277 		ubh_brelse (dind_bh);
278 		return 1;
279 	}
280 	if (!dind_bh) {
281 		ufs_data_ptr_clear(uspi, p);
282 		return 0;
283 	}
284 
285 	for (i = dindirect_block ; i < uspi->s_apb ; i++) {
286 		dind = ubh_get_data_ptr(uspi, dind_bh, i);
287 		tmp = ufs_data_ptr_to_cpu(sb, dind);
288 		if (!tmp)
289 			continue;
290 		retry |= ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind);
291 		ubh_mark_buffer_dirty(dind_bh);
292 	}
293 
294 	for (i = 0; i < uspi->s_apb; i++)
295 		if (!ufs_is_data_ptr_zero(uspi,
296 					  ubh_get_data_ptr(uspi, dind_bh, i)))
297 			break;
298 	if (i >= uspi->s_apb) {
299 		tmp = ufs_data_ptr_to_cpu(sb, p);
300 		ufs_data_ptr_clear(uspi, p);
301 
302 		ufs_free_blocks(inode, tmp, uspi->s_fpb);
303 		mark_inode_dirty(inode);
304 		ubh_bforget(dind_bh);
305 		dind_bh = NULL;
306 	}
307 	if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh))
308 		ubh_sync_block(dind_bh);
309 	ubh_brelse (dind_bh);
310 
311 	UFSD("EXIT: ino %lu\n", inode->i_ino);
312 
313 	return retry;
314 }
315 
ufs_trunc_tindirect(struct inode * inode)316 static int ufs_trunc_tindirect(struct inode *inode)
317 {
318 	struct super_block *sb = inode->i_sb;
319 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
320 	struct ufs_inode_info *ufsi = UFS_I(inode);
321 	struct ufs_buffer_head * tind_bh;
322 	u64 tindirect_block, tmp, i;
323 	void *tind, *p;
324 	int retry;
325 
326 	UFSD("ENTER: ino %lu\n", inode->i_ino);
327 
328 	retry = 0;
329 
330 	tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb))
331 		? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0;
332 
333 	p = ufs_get_direct_data_ptr(uspi, ufsi, UFS_TIND_BLOCK);
334 	if (!(tmp = ufs_data_ptr_to_cpu(sb, p)))
335 		return 0;
336 	tind_bh = ubh_bread (sb, tmp, uspi->s_bsize);
337 	if (tmp != ufs_data_ptr_to_cpu(sb, p)) {
338 		ubh_brelse (tind_bh);
339 		return 1;
340 	}
341 	if (!tind_bh) {
342 		ufs_data_ptr_clear(uspi, p);
343 		return 0;
344 	}
345 
346 	for (i = tindirect_block ; i < uspi->s_apb ; i++) {
347 		tind = ubh_get_data_ptr(uspi, tind_bh, i);
348 		retry |= ufs_trunc_dindirect(inode, UFS_NDADDR +
349 			uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind);
350 		ubh_mark_buffer_dirty(tind_bh);
351 	}
352 	for (i = 0; i < uspi->s_apb; i++)
353 		if (!ufs_is_data_ptr_zero(uspi,
354 					  ubh_get_data_ptr(uspi, tind_bh, i)))
355 			break;
356 	if (i >= uspi->s_apb) {
357 		tmp = ufs_data_ptr_to_cpu(sb, p);
358 		ufs_data_ptr_clear(uspi, p);
359 
360 		ufs_free_blocks(inode, tmp, uspi->s_fpb);
361 		mark_inode_dirty(inode);
362 		ubh_bforget(tind_bh);
363 		tind_bh = NULL;
364 	}
365 	if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh))
366 		ubh_sync_block(tind_bh);
367 	ubh_brelse (tind_bh);
368 
369 	UFSD("EXIT: ino %lu\n", inode->i_ino);
370 	return retry;
371 }
372 
ufs_alloc_lastblock(struct inode * inode)373 static int ufs_alloc_lastblock(struct inode *inode)
374 {
375 	int err = 0;
376 	struct super_block *sb = inode->i_sb;
377 	struct address_space *mapping = inode->i_mapping;
378 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
379 	unsigned i, end;
380 	sector_t lastfrag;
381 	struct page *lastpage;
382 	struct buffer_head *bh;
383 	u64 phys64;
384 
385 	lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift;
386 
387 	if (!lastfrag)
388 		goto out;
389 
390 	lastfrag--;
391 
392 	lastpage = ufs_get_locked_page(mapping, lastfrag >>
393 				       (PAGE_CACHE_SHIFT - inode->i_blkbits));
394        if (IS_ERR(lastpage)) {
395                err = -EIO;
396                goto out;
397        }
398 
399        end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
400        bh = page_buffers(lastpage);
401        for (i = 0; i < end; ++i)
402                bh = bh->b_this_page;
403 
404 
405        err = ufs_getfrag_block(inode, lastfrag, bh, 1);
406 
407        if (unlikely(err))
408 	       goto out_unlock;
409 
410        if (buffer_new(bh)) {
411 	       clear_buffer_new(bh);
412 	       unmap_underlying_metadata(bh->b_bdev,
413 					 bh->b_blocknr);
414 	       /*
415 		* we do not zeroize fragment, because of
416 		* if it maped to hole, it already contains zeroes
417 		*/
418 	       set_buffer_uptodate(bh);
419 	       mark_buffer_dirty(bh);
420 	       set_page_dirty(lastpage);
421        }
422 
423        if (lastfrag >= UFS_IND_FRAGMENT) {
424 	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
425 	       phys64 = bh->b_blocknr + 1;
426 	       for (i = 0; i < end; ++i) {
427 		       bh = sb_getblk(sb, i + phys64);
428 		       lock_buffer(bh);
429 		       memset(bh->b_data, 0, sb->s_blocksize);
430 		       set_buffer_uptodate(bh);
431 		       mark_buffer_dirty(bh);
432 		       unlock_buffer(bh);
433 		       sync_dirty_buffer(bh);
434 		       brelse(bh);
435 	       }
436        }
437 out_unlock:
438        ufs_put_locked_page(lastpage);
439 out:
440        return err;
441 }
442 
ufs_truncate(struct inode * inode,loff_t old_i_size)443 int ufs_truncate(struct inode *inode, loff_t old_i_size)
444 {
445 	struct ufs_inode_info *ufsi = UFS_I(inode);
446 	struct super_block *sb = inode->i_sb;
447 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
448 	int retry, err = 0;
449 
450 	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
451 	     inode->i_ino, (unsigned long long)i_size_read(inode),
452 	     (unsigned long long)old_i_size);
453 
454 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
455 	      S_ISLNK(inode->i_mode)))
456 		return -EINVAL;
457 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
458 		return -EPERM;
459 
460 	err = ufs_alloc_lastblock(inode);
461 
462 	if (err) {
463 		i_size_write(inode, old_i_size);
464 		goto out;
465 	}
466 
467 	block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block);
468 
469 	while (1) {
470 		retry = ufs_trunc_direct(inode);
471 		retry |= ufs_trunc_indirect(inode, UFS_IND_BLOCK,
472 					    ufs_get_direct_data_ptr(uspi, ufsi,
473 								    UFS_IND_BLOCK));
474 		retry |= ufs_trunc_dindirect(inode, UFS_IND_BLOCK + uspi->s_apb,
475 					     ufs_get_direct_data_ptr(uspi, ufsi,
476 								     UFS_DIND_BLOCK));
477 		retry |= ufs_trunc_tindirect (inode);
478 		if (!retry)
479 			break;
480 		if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
481 			ufs_sync_inode (inode);
482 		yield();
483 	}
484 
485 	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
486 	ufsi->i_lastfrag = DIRECT_FRAGMENT;
487 	mark_inode_dirty(inode);
488 out:
489 	UFSD("EXIT: err %d\n", err);
490 	return err;
491 }
492 
ufs_setattr(struct dentry * dentry,struct iattr * attr)493 int ufs_setattr(struct dentry *dentry, struct iattr *attr)
494 {
495 	struct inode *inode = dentry->d_inode;
496 	unsigned int ia_valid = attr->ia_valid;
497 	int error;
498 
499 	error = inode_change_ok(inode, attr);
500 	if (error)
501 		return error;
502 
503 	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
504 		loff_t old_i_size = inode->i_size;
505 
506 		/* XXX(truncate): truncate_setsize should be called last */
507 		truncate_setsize(inode, attr->ia_size);
508 
509 		lock_ufs(inode->i_sb);
510 		error = ufs_truncate(inode, old_i_size);
511 		unlock_ufs(inode->i_sb);
512 		if (error)
513 			return error;
514 	}
515 
516 	setattr_copy(inode, attr);
517 	mark_inode_dirty(inode);
518 	return 0;
519 }
520 
521 const struct inode_operations ufs_file_inode_operations = {
522 	.setattr = ufs_setattr,
523 };
524