• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/ufs/inode.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  *  from
9  *
10  *  linux/fs/ext2/inode.c
11  *
12  * Copyright (C) 1992, 1993, 1994, 1995
13  * Remy Card (card@masi.ibp.fr)
14  * Laboratoire MASI - Institut Blaise Pascal
15  * Universite Pierre et Marie Curie (Paris VI)
16  *
17  *  from
18  *
19  *  linux/fs/minix/inode.c
20  *
21  *  Copyright (C) 1991, 1992  Linus Torvalds
22  *
23  *  Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24  *  Big-endian to little-endian byte-swapping/bitmaps by
25  *        David S. Miller (davem@caip.rutgers.edu), 1995
26  */
27 
28 #include <asm/uaccess.h>
29 
30 #include <linux/errno.h>
31 #include <linux/fs.h>
32 #include <linux/time.h>
33 #include <linux/stat.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 
39 #include "ufs_fs.h"
40 #include "ufs.h"
41 #include "swab.h"
42 #include "util.h"
43 
ufs_block_to_path(struct inode * inode,sector_t i_block,unsigned offsets[4])44 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
45 {
46 	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
47 	int ptrs = uspi->s_apb;
48 	int ptrs_bits = uspi->s_apbshift;
49 	const long direct_blocks = UFS_NDADDR,
50 		indirect_blocks = ptrs,
51 		double_blocks = (1 << (ptrs_bits * 2));
52 	int n = 0;
53 
54 
55 	UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
56 	if (i_block < direct_blocks) {
57 		offsets[n++] = i_block;
58 	} else if ((i_block -= direct_blocks) < indirect_blocks) {
59 		offsets[n++] = UFS_IND_BLOCK;
60 		offsets[n++] = i_block;
61 	} else if ((i_block -= indirect_blocks) < double_blocks) {
62 		offsets[n++] = UFS_DIND_BLOCK;
63 		offsets[n++] = i_block >> ptrs_bits;
64 		offsets[n++] = i_block & (ptrs - 1);
65 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
66 		offsets[n++] = UFS_TIND_BLOCK;
67 		offsets[n++] = i_block >> (ptrs_bits * 2);
68 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
69 		offsets[n++] = i_block & (ptrs - 1);
70 	} else {
71 		ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
72 	}
73 	return n;
74 }
75 
76 typedef struct {
77 	void	*p;
78 	union {
79 		__fs32	key32;
80 		__fs64	key64;
81 	};
82 	struct buffer_head *bh;
83 } Indirect;
84 
grow_chain32(struct ufs_inode_info * ufsi,struct buffer_head * bh,__fs32 * v,Indirect * from,Indirect * to)85 static inline int grow_chain32(struct ufs_inode_info *ufsi,
86 			       struct buffer_head *bh, __fs32 *v,
87 			       Indirect *from, Indirect *to)
88 {
89 	Indirect *p;
90 	unsigned seq;
91 	to->bh = bh;
92 	do {
93 		seq = read_seqbegin(&ufsi->meta_lock);
94 		to->key32 = *(__fs32 *)(to->p = v);
95 		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
96 			;
97 	} while (read_seqretry(&ufsi->meta_lock, seq));
98 	return (p > to);
99 }
100 
grow_chain64(struct ufs_inode_info * ufsi,struct buffer_head * bh,__fs64 * v,Indirect * from,Indirect * to)101 static inline int grow_chain64(struct ufs_inode_info *ufsi,
102 			       struct buffer_head *bh, __fs64 *v,
103 			       Indirect *from, Indirect *to)
104 {
105 	Indirect *p;
106 	unsigned seq;
107 	to->bh = bh;
108 	do {
109 		seq = read_seqbegin(&ufsi->meta_lock);
110 		to->key64 = *(__fs64 *)(to->p = v);
111 		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
112 			;
113 	} while (read_seqretry(&ufsi->meta_lock, seq));
114 	return (p > to);
115 }
116 
117 /*
118  * Returns the location of the fragment from
119  * the beginning of the filesystem.
120  */
121 
ufs_frag_map(struct inode * inode,unsigned offsets[4],int depth)122 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
123 {
124 	struct ufs_inode_info *ufsi = UFS_I(inode);
125 	struct super_block *sb = inode->i_sb;
126 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
127 	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
128 	int shift = uspi->s_apbshift-uspi->s_fpbshift;
129 	Indirect chain[4], *q = chain;
130 	unsigned *p;
131 	unsigned flags = UFS_SB(sb)->s_flags;
132 	u64 res = 0;
133 
134 	UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
135 		uspi->s_fpbshift, uspi->s_apbmask,
136 		(unsigned long long)mask);
137 
138 	if (depth == 0)
139 		goto no_block;
140 
141 again:
142 	p = offsets;
143 
144 	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
145 		goto ufs2;
146 
147 	if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
148 		goto changed;
149 	if (!q->key32)
150 		goto no_block;
151 	while (--depth) {
152 		__fs32 *ptr;
153 		struct buffer_head *bh;
154 		unsigned n = *p++;
155 
156 		bh = sb_bread(sb, uspi->s_sbbase +
157 				  fs32_to_cpu(sb, q->key32) + (n>>shift));
158 		if (!bh)
159 			goto no_block;
160 		ptr = (__fs32 *)bh->b_data + (n & mask);
161 		if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
162 			goto changed;
163 		if (!q->key32)
164 			goto no_block;
165 	}
166 	res = fs32_to_cpu(sb, q->key32);
167 	goto found;
168 
169 ufs2:
170 	if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
171 		goto changed;
172 	if (!q->key64)
173 		goto no_block;
174 
175 	while (--depth) {
176 		__fs64 *ptr;
177 		struct buffer_head *bh;
178 		unsigned n = *p++;
179 
180 		bh = sb_bread(sb, uspi->s_sbbase +
181 				  fs64_to_cpu(sb, q->key64) + (n>>shift));
182 		if (!bh)
183 			goto no_block;
184 		ptr = (__fs64 *)bh->b_data + (n & mask);
185 		if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
186 			goto changed;
187 		if (!q->key64)
188 			goto no_block;
189 	}
190 	res = fs64_to_cpu(sb, q->key64);
191 found:
192 	res += uspi->s_sbbase;
193 no_block:
194 	while (q > chain) {
195 		brelse(q->bh);
196 		q--;
197 	}
198 	return res;
199 
200 changed:
201 	while (q > chain) {
202 		brelse(q->bh);
203 		q--;
204 	}
205 	goto again;
206 }
207 
208 /*
209  * Unpacking tails: we have a file with partial final block and
210  * we had been asked to extend it.  If the fragment being written
211  * is within the same block, we need to extend the tail just to cover
212  * that fragment.  Otherwise the tail is extended to full block.
213  *
214  * Note that we might need to create a _new_ tail, but that will
215  * be handled elsewhere; this is strictly for resizing old
216  * ones.
217  */
218 static bool
ufs_extend_tail(struct inode * inode,u64 writes_to,int * err,struct page * locked_page)219 ufs_extend_tail(struct inode *inode, u64 writes_to,
220 		  int *err, struct page *locked_page)
221 {
222 	struct ufs_inode_info *ufsi = UFS_I(inode);
223 	struct super_block *sb = inode->i_sb;
224 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
225 	unsigned lastfrag = ufsi->i_lastfrag;	/* it's a short file, so unsigned is enough */
226 	unsigned block = ufs_fragstoblks(lastfrag);
227 	unsigned new_size;
228 	void *p;
229 	u64 tmp;
230 
231 	if (writes_to < (lastfrag | uspi->s_fpbmask))
232 		new_size = (writes_to & uspi->s_fpbmask) + 1;
233 	else
234 		new_size = uspi->s_fpb;
235 
236 	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
237 	tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
238 				new_size - (lastfrag & uspi->s_fpbmask), err,
239 				locked_page);
240 	return tmp != 0;
241 }
242 
243 /**
244  * ufs_inode_getfrag() - allocate new fragment(s)
245  * @inode: pointer to inode
246  * @index: number of block pointer within the inode's array.
247  * @new_fragment: number of new allocated fragment(s)
248  * @err: we set it if something wrong
249  * @new: we set it if we allocate new block
250  * @locked_page: for ufs_new_fragments()
251  */
252 static u64
ufs_inode_getfrag(struct inode * inode,unsigned index,sector_t new_fragment,int * err,int * new,struct page * locked_page)253 ufs_inode_getfrag(struct inode *inode, unsigned index,
254 		  sector_t new_fragment, int *err,
255 		  int *new, struct page *locked_page)
256 {
257 	struct ufs_inode_info *ufsi = UFS_I(inode);
258 	struct super_block *sb = inode->i_sb;
259 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
260 	u64 tmp, goal, lastfrag;
261 	unsigned nfrags = uspi->s_fpb;
262 	void *p;
263 
264         /* TODO : to be done for write support
265         if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
266              goto ufs2;
267          */
268 
269 	p = ufs_get_direct_data_ptr(uspi, ufsi, index);
270 	tmp = ufs_data_ptr_to_cpu(sb, p);
271 	if (tmp)
272 		goto out;
273 
274 	lastfrag = ufsi->i_lastfrag;
275 
276 	/* will that be a new tail? */
277 	if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
278 		nfrags = (new_fragment & uspi->s_fpbmask) + 1;
279 
280 	goal = 0;
281 	if (index) {
282 		goal = ufs_data_ptr_to_cpu(sb,
283 				 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
284 		if (goal)
285 			goal += uspi->s_fpb;
286 	}
287 	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
288 				goal, nfrags, err, locked_page);
289 
290 	if (!tmp) {
291 		*err = -ENOSPC;
292 		return 0;
293 	}
294 
295 	if (new)
296 		*new = 1;
297 	inode->i_ctime = CURRENT_TIME_SEC;
298 	if (IS_SYNC(inode))
299 		ufs_sync_inode (inode);
300 	mark_inode_dirty(inode);
301 out:
302 	return tmp + uspi->s_sbbase;
303 
304      /* This part : To be implemented ....
305         Required only for writing, not required for READ-ONLY.
306 ufs2:
307 
308 	u2_block = ufs_fragstoblks(fragment);
309 	u2_blockoff = ufs_fragnum(fragment);
310 	p = ufsi->i_u1.u2_i_data + block;
311 	goal = 0;
312 
313 repeat2:
314 	tmp = fs32_to_cpu(sb, *p);
315 	lastfrag = ufsi->i_lastfrag;
316 
317      */
318 }
319 
320 /**
321  * ufs_inode_getblock() - allocate new block
322  * @inode: pointer to inode
323  * @ind_block: block number of the indirect block
324  * @index: number of pointer within the indirect block
325  * @new_fragment: number of new allocated fragment
326  *  (block will hold this fragment and also uspi->s_fpb-1)
327  * @err: see ufs_inode_getfrag()
328  * @new: see ufs_inode_getfrag()
329  * @locked_page: see ufs_inode_getfrag()
330  */
331 static u64
ufs_inode_getblock(struct inode * inode,u64 ind_block,unsigned index,sector_t new_fragment,int * err,int * new,struct page * locked_page)332 ufs_inode_getblock(struct inode *inode, u64 ind_block,
333 		  unsigned index, sector_t new_fragment, int *err,
334 		  int *new, struct page *locked_page)
335 {
336 	struct super_block *sb = inode->i_sb;
337 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
338 	int shift = uspi->s_apbshift - uspi->s_fpbshift;
339 	u64 tmp = 0, goal;
340 	struct buffer_head *bh;
341 	void *p;
342 
343 	if (!ind_block)
344 		return 0;
345 
346 	bh = sb_bread(sb, ind_block + (index >> shift));
347 	if (unlikely(!bh)) {
348 		*err = -EIO;
349 		return 0;
350 	}
351 
352 	index &= uspi->s_apbmask >> uspi->s_fpbshift;
353 	if (uspi->fs_magic == UFS2_MAGIC)
354 		p = (__fs64 *)bh->b_data + index;
355 	else
356 		p = (__fs32 *)bh->b_data + index;
357 
358 	tmp = ufs_data_ptr_to_cpu(sb, p);
359 	if (tmp)
360 		goto out;
361 
362 	if (index && (uspi->fs_magic == UFS2_MAGIC ?
363 		      (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
364 		      (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
365 		goal = tmp + uspi->s_fpb;
366 	else
367 		goal = bh->b_blocknr + uspi->s_fpb;
368 	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
369 				uspi->s_fpb, err, locked_page);
370 	if (!tmp)
371 		goto out;
372 
373 	if (new)
374 		*new = 1;
375 
376 	mark_buffer_dirty(bh);
377 	if (IS_SYNC(inode))
378 		sync_dirty_buffer(bh);
379 	inode->i_ctime = CURRENT_TIME_SEC;
380 	mark_inode_dirty(inode);
381 out:
382 	brelse (bh);
383 	UFSD("EXIT\n");
384 	if (tmp)
385 		tmp += uspi->s_sbbase;
386 	return tmp;
387 }
388 
389 /**
390  * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
391  * readpage, writepage and so on
392  */
393 
ufs_getfrag_block(struct inode * inode,sector_t fragment,struct buffer_head * bh_result,int create)394 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
395 {
396 	struct super_block *sb = inode->i_sb;
397 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
398 	int err = 0, new = 0;
399 	unsigned offsets[4];
400 	int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
401 	u64 phys64 = 0;
402 	unsigned frag = fragment & uspi->s_fpbmask;
403 
404 	if (!create) {
405 		phys64 = ufs_frag_map(inode, offsets, depth);
406 		if (phys64)
407 			map_bh(bh_result, sb, phys64 + frag);
408 		return 0;
409 	}
410 
411         /* This code entered only while writing ....? */
412 
413 	mutex_lock(&UFS_I(inode)->truncate_mutex);
414 
415 	UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
416 	if (unlikely(!depth)) {
417 		ufs_warning(sb, "ufs_get_block", "block > big");
418 		err = -EIO;
419 		goto out;
420 	}
421 
422 	if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
423 		unsigned lastfrag = UFS_I(inode)->i_lastfrag;
424 		unsigned tailfrags = lastfrag & uspi->s_fpbmask;
425 		if (tailfrags && fragment >= lastfrag) {
426 			if (!ufs_extend_tail(inode, fragment,
427 					     &err, bh_result->b_page))
428 				goto out;
429 		}
430 	}
431 
432 	if (depth == 1) {
433 		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
434 					   &err, &new, bh_result->b_page);
435 	} else {
436 		int i;
437 		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
438 					   &err, NULL, NULL);
439 		for (i = 1; i < depth - 1; i++)
440 			phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
441 						fragment, &err, NULL, NULL);
442 		phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
443 					fragment, &err, &new, bh_result->b_page);
444 	}
445 out:
446 	if (phys64) {
447 		phys64 += frag;
448 		map_bh(bh_result, sb, phys64);
449 		if (new)
450 			set_buffer_new(bh_result);
451 	}
452 	mutex_unlock(&UFS_I(inode)->truncate_mutex);
453 	return err;
454 }
455 
ufs_writepage(struct page * page,struct writeback_control * wbc)456 static int ufs_writepage(struct page *page, struct writeback_control *wbc)
457 {
458 	return block_write_full_page(page,ufs_getfrag_block,wbc);
459 }
460 
ufs_readpage(struct file * file,struct page * page)461 static int ufs_readpage(struct file *file, struct page *page)
462 {
463 	return block_read_full_page(page,ufs_getfrag_block);
464 }
465 
ufs_prepare_chunk(struct page * page,loff_t pos,unsigned len)466 int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
467 {
468 	return __block_write_begin(page, pos, len, ufs_getfrag_block);
469 }
470 
471 static void ufs_truncate_blocks(struct inode *);
472 
ufs_write_failed(struct address_space * mapping,loff_t to)473 static void ufs_write_failed(struct address_space *mapping, loff_t to)
474 {
475 	struct inode *inode = mapping->host;
476 
477 	if (to > inode->i_size) {
478 		truncate_pagecache(inode, inode->i_size);
479 		ufs_truncate_blocks(inode);
480 	}
481 }
482 
ufs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)483 static int ufs_write_begin(struct file *file, struct address_space *mapping,
484 			loff_t pos, unsigned len, unsigned flags,
485 			struct page **pagep, void **fsdata)
486 {
487 	int ret;
488 
489 	ret = block_write_begin(mapping, pos, len, flags, pagep,
490 				ufs_getfrag_block);
491 	if (unlikely(ret))
492 		ufs_write_failed(mapping, pos + len);
493 
494 	return ret;
495 }
496 
ufs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)497 static int ufs_write_end(struct file *file, struct address_space *mapping,
498 			loff_t pos, unsigned len, unsigned copied,
499 			struct page *page, void *fsdata)
500 {
501 	int ret;
502 
503 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
504 	if (ret < len)
505 		ufs_write_failed(mapping, pos + len);
506 	return ret;
507 }
508 
ufs_bmap(struct address_space * mapping,sector_t block)509 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
510 {
511 	return generic_block_bmap(mapping,block,ufs_getfrag_block);
512 }
513 
514 const struct address_space_operations ufs_aops = {
515 	.readpage = ufs_readpage,
516 	.writepage = ufs_writepage,
517 	.write_begin = ufs_write_begin,
518 	.write_end = ufs_write_end,
519 	.bmap = ufs_bmap
520 };
521 
ufs_set_inode_ops(struct inode * inode)522 static void ufs_set_inode_ops(struct inode *inode)
523 {
524 	if (S_ISREG(inode->i_mode)) {
525 		inode->i_op = &ufs_file_inode_operations;
526 		inode->i_fop = &ufs_file_operations;
527 		inode->i_mapping->a_ops = &ufs_aops;
528 	} else if (S_ISDIR(inode->i_mode)) {
529 		inode->i_op = &ufs_dir_inode_operations;
530 		inode->i_fop = &ufs_dir_operations;
531 		inode->i_mapping->a_ops = &ufs_aops;
532 	} else if (S_ISLNK(inode->i_mode)) {
533 		if (!inode->i_blocks) {
534 			inode->i_op = &ufs_fast_symlink_inode_operations;
535 			inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
536 		} else {
537 			inode->i_op = &ufs_symlink_inode_operations;
538 			inode->i_mapping->a_ops = &ufs_aops;
539 		}
540 	} else
541 		init_special_inode(inode, inode->i_mode,
542 				   ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
543 }
544 
ufs1_read_inode(struct inode * inode,struct ufs_inode * ufs_inode)545 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
546 {
547 	struct ufs_inode_info *ufsi = UFS_I(inode);
548 	struct super_block *sb = inode->i_sb;
549 	umode_t mode;
550 
551 	/*
552 	 * Copy data to the in-core inode.
553 	 */
554 	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
555 	set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
556 	if (inode->i_nlink == 0) {
557 		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
558 		return -1;
559 	}
560 
561 	/*
562 	 * Linux now has 32-bit uid and gid, so we can support EFT.
563 	 */
564 	i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
565 	i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
566 
567 	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
568 	inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
569 	inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
570 	inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
571 	inode->i_mtime.tv_nsec = 0;
572 	inode->i_atime.tv_nsec = 0;
573 	inode->i_ctime.tv_nsec = 0;
574 	inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
575 	inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
576 	ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
577 	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
578 	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
579 
580 
581 	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
582 		memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
583 		       sizeof(ufs_inode->ui_u2.ui_addr));
584 	} else {
585 		memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
586 		       sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
587 		ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
588 	}
589 	return 0;
590 }
591 
ufs2_read_inode(struct inode * inode,struct ufs2_inode * ufs2_inode)592 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
593 {
594 	struct ufs_inode_info *ufsi = UFS_I(inode);
595 	struct super_block *sb = inode->i_sb;
596 	umode_t mode;
597 
598 	UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
599 	/*
600 	 * Copy data to the in-core inode.
601 	 */
602 	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
603 	set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
604 	if (inode->i_nlink == 0) {
605 		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
606 		return -1;
607 	}
608 
609         /*
610          * Linux now has 32-bit uid and gid, so we can support EFT.
611          */
612 	i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
613 	i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
614 
615 	inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
616 	inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
617 	inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
618 	inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
619 	inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
620 	inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
621 	inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
622 	inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
623 	inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
624 	ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
625 	/*
626 	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
627 	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
628 	*/
629 
630 	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
631 		memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
632 		       sizeof(ufs2_inode->ui_u2.ui_addr));
633 	} else {
634 		memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
635 		       sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
636 		ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
637 	}
638 	return 0;
639 }
640 
ufs_iget(struct super_block * sb,unsigned long ino)641 struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
642 {
643 	struct ufs_inode_info *ufsi;
644 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
645 	struct buffer_head * bh;
646 	struct inode *inode;
647 	int err;
648 
649 	UFSD("ENTER, ino %lu\n", ino);
650 
651 	if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
652 		ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
653 			    ino);
654 		return ERR_PTR(-EIO);
655 	}
656 
657 	inode = iget_locked(sb, ino);
658 	if (!inode)
659 		return ERR_PTR(-ENOMEM);
660 	if (!(inode->i_state & I_NEW))
661 		return inode;
662 
663 	ufsi = UFS_I(inode);
664 
665 	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
666 	if (!bh) {
667 		ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
668 			    inode->i_ino);
669 		goto bad_inode;
670 	}
671 	if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
672 		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
673 
674 		err = ufs2_read_inode(inode,
675 				      ufs2_inode + ufs_inotofsbo(inode->i_ino));
676 	} else {
677 		struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
678 
679 		err = ufs1_read_inode(inode,
680 				      ufs_inode + ufs_inotofsbo(inode->i_ino));
681 	}
682 
683 	if (err)
684 		goto bad_inode;
685 	inode->i_version++;
686 	ufsi->i_lastfrag =
687 		(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
688 	ufsi->i_dir_start_lookup = 0;
689 	ufsi->i_osync = 0;
690 
691 	ufs_set_inode_ops(inode);
692 
693 	brelse(bh);
694 
695 	UFSD("EXIT\n");
696 	unlock_new_inode(inode);
697 	return inode;
698 
699 bad_inode:
700 	iget_failed(inode);
701 	return ERR_PTR(-EIO);
702 }
703 
ufs1_update_inode(struct inode * inode,struct ufs_inode * ufs_inode)704 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
705 {
706 	struct super_block *sb = inode->i_sb;
707  	struct ufs_inode_info *ufsi = UFS_I(inode);
708 
709 	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
710 	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
711 
712 	ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
713 	ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
714 
715 	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
716 	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
717 	ufs_inode->ui_atime.tv_usec = 0;
718 	ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
719 	ufs_inode->ui_ctime.tv_usec = 0;
720 	ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
721 	ufs_inode->ui_mtime.tv_usec = 0;
722 	ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
723 	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
724 	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
725 
726 	if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
727 		ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
728 		ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
729 	}
730 
731 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
732 		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
733 		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
734 	} else if (inode->i_blocks) {
735 		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
736 		       sizeof(ufs_inode->ui_u2.ui_addr));
737 	}
738 	else {
739 		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
740 		       sizeof(ufs_inode->ui_u2.ui_symlink));
741 	}
742 
743 	if (!inode->i_nlink)
744 		memset (ufs_inode, 0, sizeof(struct ufs_inode));
745 }
746 
ufs2_update_inode(struct inode * inode,struct ufs2_inode * ufs_inode)747 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
748 {
749 	struct super_block *sb = inode->i_sb;
750  	struct ufs_inode_info *ufsi = UFS_I(inode);
751 
752 	UFSD("ENTER\n");
753 	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
754 	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
755 
756 	ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
757 	ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
758 
759 	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
760 	ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
761 	ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
762 	ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
763 	ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
764 	ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
765 	ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
766 
767 	ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
768 	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
769 	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
770 
771 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
772 		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
773 		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
774 	} else if (inode->i_blocks) {
775 		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
776 		       sizeof(ufs_inode->ui_u2.ui_addr));
777 	} else {
778 		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
779 		       sizeof(ufs_inode->ui_u2.ui_symlink));
780  	}
781 
782 	if (!inode->i_nlink)
783 		memset (ufs_inode, 0, sizeof(struct ufs2_inode));
784 	UFSD("EXIT\n");
785 }
786 
ufs_update_inode(struct inode * inode,int do_sync)787 static int ufs_update_inode(struct inode * inode, int do_sync)
788 {
789 	struct super_block *sb = inode->i_sb;
790 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
791 	struct buffer_head * bh;
792 
793 	UFSD("ENTER, ino %lu\n", inode->i_ino);
794 
795 	if (inode->i_ino < UFS_ROOTINO ||
796 	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
797 		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
798 		return -1;
799 	}
800 
801 	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
802 	if (!bh) {
803 		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
804 		return -1;
805 	}
806 	if (uspi->fs_magic == UFS2_MAGIC) {
807 		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
808 
809 		ufs2_update_inode(inode,
810 				  ufs2_inode + ufs_inotofsbo(inode->i_ino));
811 	} else {
812 		struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
813 
814 		ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
815 	}
816 
817 	mark_buffer_dirty(bh);
818 	if (do_sync)
819 		sync_dirty_buffer(bh);
820 	brelse (bh);
821 
822 	UFSD("EXIT\n");
823 	return 0;
824 }
825 
ufs_write_inode(struct inode * inode,struct writeback_control * wbc)826 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
827 {
828 	return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
829 }
830 
ufs_sync_inode(struct inode * inode)831 int ufs_sync_inode (struct inode *inode)
832 {
833 	return ufs_update_inode (inode, 1);
834 }
835 
ufs_evict_inode(struct inode * inode)836 void ufs_evict_inode(struct inode * inode)
837 {
838 	int want_delete = 0;
839 
840 	if (!inode->i_nlink && !is_bad_inode(inode))
841 		want_delete = 1;
842 
843 	truncate_inode_pages_final(&inode->i_data);
844 	if (want_delete) {
845 		inode->i_size = 0;
846 		if (inode->i_blocks)
847 			ufs_truncate_blocks(inode);
848 	}
849 
850 	invalidate_inode_buffers(inode);
851 	clear_inode(inode);
852 
853 	if (want_delete)
854 		ufs_free_inode(inode);
855 }
856 
857 struct to_free {
858 	struct inode *inode;
859 	u64 to;
860 	unsigned count;
861 };
862 
free_data(struct to_free * ctx,u64 from,unsigned count)863 static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
864 {
865 	if (ctx->count && ctx->to != from) {
866 		ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
867 		ctx->count = 0;
868 	}
869 	ctx->count += count;
870 	ctx->to = from + count;
871 }
872 
873 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
874 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
875 
ufs_trunc_direct(struct inode * inode)876 static void ufs_trunc_direct(struct inode *inode)
877 {
878 	struct ufs_inode_info *ufsi = UFS_I(inode);
879 	struct super_block * sb;
880 	struct ufs_sb_private_info * uspi;
881 	void *p;
882 	u64 frag1, frag2, frag3, frag4, block1, block2;
883 	struct to_free ctx = {.inode = inode};
884 	unsigned i, tmp;
885 
886 	UFSD("ENTER: ino %lu\n", inode->i_ino);
887 
888 	sb = inode->i_sb;
889 	uspi = UFS_SB(sb)->s_uspi;
890 
891 	frag1 = DIRECT_FRAGMENT;
892 	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
893 	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
894 	frag3 = frag4 & ~uspi->s_fpbmask;
895 	block1 = block2 = 0;
896 	if (frag2 > frag3) {
897 		frag2 = frag4;
898 		frag3 = frag4 = 0;
899 	} else if (frag2 < frag3) {
900 		block1 = ufs_fragstoblks (frag2);
901 		block2 = ufs_fragstoblks (frag3);
902 	}
903 
904 	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
905 	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
906 	     (unsigned long long)frag1, (unsigned long long)frag2,
907 	     (unsigned long long)block1, (unsigned long long)block2,
908 	     (unsigned long long)frag3, (unsigned long long)frag4);
909 
910 	if (frag1 >= frag2)
911 		goto next1;
912 
913 	/*
914 	 * Free first free fragments
915 	 */
916 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
917 	tmp = ufs_data_ptr_to_cpu(sb, p);
918 	if (!tmp )
919 		ufs_panic (sb, "ufs_trunc_direct", "internal error");
920 	frag2 -= frag1;
921 	frag1 = ufs_fragnum (frag1);
922 
923 	ufs_free_fragments(inode, tmp + frag1, frag2);
924 
925 next1:
926 	/*
927 	 * Free whole blocks
928 	 */
929 	for (i = block1 ; i < block2; i++) {
930 		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
931 		tmp = ufs_data_ptr_to_cpu(sb, p);
932 		if (!tmp)
933 			continue;
934 		write_seqlock(&ufsi->meta_lock);
935 		ufs_data_ptr_clear(uspi, p);
936 		write_sequnlock(&ufsi->meta_lock);
937 
938 		free_data(&ctx, tmp, uspi->s_fpb);
939 	}
940 
941 	free_data(&ctx, 0, 0);
942 
943 	if (frag3 >= frag4)
944 		goto next3;
945 
946 	/*
947 	 * Free last free fragments
948 	 */
949 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
950 	tmp = ufs_data_ptr_to_cpu(sb, p);
951 	if (!tmp )
952 		ufs_panic(sb, "ufs_truncate_direct", "internal error");
953 	frag4 = ufs_fragnum (frag4);
954 	write_seqlock(&ufsi->meta_lock);
955 	ufs_data_ptr_clear(uspi, p);
956 	write_sequnlock(&ufsi->meta_lock);
957 
958 	ufs_free_fragments (inode, tmp, frag4);
959  next3:
960 
961 	UFSD("EXIT: ino %lu\n", inode->i_ino);
962 }
963 
free_full_branch(struct inode * inode,u64 ind_block,int depth)964 static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
965 {
966 	struct super_block *sb = inode->i_sb;
967 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
968 	struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
969 	unsigned i;
970 
971 	if (!ubh)
972 		return;
973 
974 	if (--depth) {
975 		for (i = 0; i < uspi->s_apb; i++) {
976 			void *p = ubh_get_data_ptr(uspi, ubh, i);
977 			u64 block = ufs_data_ptr_to_cpu(sb, p);
978 			if (block)
979 				free_full_branch(inode, block, depth);
980 		}
981 	} else {
982 		struct to_free ctx = {.inode = inode};
983 
984 		for (i = 0; i < uspi->s_apb; i++) {
985 			void *p = ubh_get_data_ptr(uspi, ubh, i);
986 			u64 block = ufs_data_ptr_to_cpu(sb, p);
987 			if (block)
988 				free_data(&ctx, block, uspi->s_fpb);
989 		}
990 		free_data(&ctx, 0, 0);
991 	}
992 
993 	ubh_bforget(ubh);
994 	ufs_free_blocks(inode, ind_block, uspi->s_fpb);
995 }
996 
free_branch_tail(struct inode * inode,unsigned from,struct ufs_buffer_head * ubh,int depth)997 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
998 {
999 	struct super_block *sb = inode->i_sb;
1000 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1001 	unsigned i;
1002 
1003 	if (--depth) {
1004 		for (i = from; i < uspi->s_apb ; i++) {
1005 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1006 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1007 			if (block) {
1008 				write_seqlock(&UFS_I(inode)->meta_lock);
1009 				ufs_data_ptr_clear(uspi, p);
1010 				write_sequnlock(&UFS_I(inode)->meta_lock);
1011 				ubh_mark_buffer_dirty(ubh);
1012 				free_full_branch(inode, block, depth);
1013 			}
1014 		}
1015 	} else {
1016 		struct to_free ctx = {.inode = inode};
1017 
1018 		for (i = from; i < uspi->s_apb; i++) {
1019 			void *p = ubh_get_data_ptr(uspi, ubh, i);
1020 			u64 block = ufs_data_ptr_to_cpu(sb, p);
1021 			if (block) {
1022 				write_seqlock(&UFS_I(inode)->meta_lock);
1023 				ufs_data_ptr_clear(uspi, p);
1024 				write_sequnlock(&UFS_I(inode)->meta_lock);
1025 				ubh_mark_buffer_dirty(ubh);
1026 				free_data(&ctx, block, uspi->s_fpb);
1027 			}
1028 		}
1029 		free_data(&ctx, 0, 0);
1030 	}
1031 	if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1032 		ubh_sync_block(ubh);
1033 	ubh_brelse(ubh);
1034 }
1035 
ufs_alloc_lastblock(struct inode * inode,loff_t size)1036 static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1037 {
1038 	int err = 0;
1039 	struct super_block *sb = inode->i_sb;
1040 	struct address_space *mapping = inode->i_mapping;
1041 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1042 	unsigned i, end;
1043 	sector_t lastfrag;
1044 	struct page *lastpage;
1045 	struct buffer_head *bh;
1046 	u64 phys64;
1047 
1048 	lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1049 
1050 	if (!lastfrag)
1051 		goto out;
1052 
1053 	lastfrag--;
1054 
1055 	lastpage = ufs_get_locked_page(mapping, lastfrag >>
1056 				       (PAGE_CACHE_SHIFT - inode->i_blkbits));
1057        if (IS_ERR(lastpage)) {
1058                err = -EIO;
1059                goto out;
1060        }
1061 
1062        end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
1063        bh = page_buffers(lastpage);
1064        for (i = 0; i < end; ++i)
1065                bh = bh->b_this_page;
1066 
1067 
1068        err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1069 
1070        if (unlikely(err))
1071 	       goto out_unlock;
1072 
1073        if (buffer_new(bh)) {
1074 	       clear_buffer_new(bh);
1075 	       unmap_underlying_metadata(bh->b_bdev,
1076 					 bh->b_blocknr);
1077 	       /*
1078 		* we do not zeroize fragment, because of
1079 		* if it maped to hole, it already contains zeroes
1080 		*/
1081 	       set_buffer_uptodate(bh);
1082 	       mark_buffer_dirty(bh);
1083 	       set_page_dirty(lastpage);
1084        }
1085 
1086        if (lastfrag >= UFS_IND_FRAGMENT) {
1087 	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1088 	       phys64 = bh->b_blocknr + 1;
1089 	       for (i = 0; i < end; ++i) {
1090 		       bh = sb_getblk(sb, i + phys64);
1091 		       lock_buffer(bh);
1092 		       memset(bh->b_data, 0, sb->s_blocksize);
1093 		       set_buffer_uptodate(bh);
1094 		       mark_buffer_dirty(bh);
1095 		       unlock_buffer(bh);
1096 		       sync_dirty_buffer(bh);
1097 		       brelse(bh);
1098 	       }
1099        }
1100 out_unlock:
1101        ufs_put_locked_page(lastpage);
1102 out:
1103        return err;
1104 }
1105 
__ufs_truncate_blocks(struct inode * inode)1106 static void __ufs_truncate_blocks(struct inode *inode)
1107 {
1108 	struct ufs_inode_info *ufsi = UFS_I(inode);
1109 	struct super_block *sb = inode->i_sb;
1110 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1111 	unsigned offsets[4];
1112 	int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
1113 	int depth2;
1114 	unsigned i;
1115 	struct ufs_buffer_head *ubh[3];
1116 	void *p;
1117 	u64 block;
1118 
1119 	if (!depth)
1120 		return;
1121 
1122 	/* find the last non-zero in offsets[] */
1123 	for (depth2 = depth - 1; depth2; depth2--)
1124 		if (offsets[depth2])
1125 			break;
1126 
1127 	mutex_lock(&ufsi->truncate_mutex);
1128 	if (depth == 1) {
1129 		ufs_trunc_direct(inode);
1130 		offsets[0] = UFS_IND_BLOCK;
1131 	} else {
1132 		/* get the blocks that should be partially emptied */
1133 		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
1134 		for (i = 0; i < depth2; i++) {
1135 			offsets[i]++;	/* next branch is fully freed */
1136 			block = ufs_data_ptr_to_cpu(sb, p);
1137 			if (!block)
1138 				break;
1139 			ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1140 			if (!ubh[i]) {
1141 				write_seqlock(&ufsi->meta_lock);
1142 				ufs_data_ptr_clear(uspi, p);
1143 				write_sequnlock(&ufsi->meta_lock);
1144 				break;
1145 			}
1146 			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
1147 		}
1148 		while (i--)
1149 			free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1150 	}
1151 	for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1152 		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1153 		block = ufs_data_ptr_to_cpu(sb, p);
1154 		if (block) {
1155 			write_seqlock(&ufsi->meta_lock);
1156 			ufs_data_ptr_clear(uspi, p);
1157 			write_sequnlock(&ufsi->meta_lock);
1158 			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1159 		}
1160 	}
1161 	ufsi->i_lastfrag = DIRECT_FRAGMENT;
1162 	mark_inode_dirty(inode);
1163 	mutex_unlock(&ufsi->truncate_mutex);
1164 }
1165 
ufs_truncate(struct inode * inode,loff_t size)1166 static int ufs_truncate(struct inode *inode, loff_t size)
1167 {
1168 	int err = 0;
1169 
1170 	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1171 	     inode->i_ino, (unsigned long long)size,
1172 	     (unsigned long long)i_size_read(inode));
1173 
1174 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1175 	      S_ISLNK(inode->i_mode)))
1176 		return -EINVAL;
1177 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1178 		return -EPERM;
1179 
1180 	err = ufs_alloc_lastblock(inode, size);
1181 
1182 	if (err)
1183 		goto out;
1184 
1185 	block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1186 
1187 	truncate_setsize(inode, size);
1188 
1189 	__ufs_truncate_blocks(inode);
1190 	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1191 	mark_inode_dirty(inode);
1192 out:
1193 	UFSD("EXIT: err %d\n", err);
1194 	return err;
1195 }
1196 
ufs_truncate_blocks(struct inode * inode)1197 void ufs_truncate_blocks(struct inode *inode)
1198 {
1199 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1200 	      S_ISLNK(inode->i_mode)))
1201 		return;
1202 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1203 		return;
1204 	__ufs_truncate_blocks(inode);
1205 }
1206 
ufs_setattr(struct dentry * dentry,struct iattr * attr)1207 int ufs_setattr(struct dentry *dentry, struct iattr *attr)
1208 {
1209 	struct inode *inode = d_inode(dentry);
1210 	unsigned int ia_valid = attr->ia_valid;
1211 	int error;
1212 
1213 	error = inode_change_ok(inode, attr);
1214 	if (error)
1215 		return error;
1216 
1217 	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1218 		error = ufs_truncate(inode, attr->ia_size);
1219 		if (error)
1220 			return error;
1221 	}
1222 
1223 	setattr_copy(inode, attr);
1224 	mark_inode_dirty(inode);
1225 	return 0;
1226 }
1227 
1228 const struct inode_operations ufs_file_inode_operations = {
1229 	.setattr = ufs_setattr,
1230 };
1231