• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/hfsplus/inode.c
3  *
4  * Copyright (C) 2001
5  * Brad Boyer (flar@allandria.com)
6  * (C) 2003 Ardis Technologies <roman@ardistech.com>
7  *
8  * Inode handling routines
9  */
10 
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/pagemap.h>
15 #include <linux/mpage.h>
16 #include <linux/sched.h>
17 #include <linux/aio.h>
18 
19 #include "hfsplus_fs.h"
20 #include "hfsplus_raw.h"
21 #include "xattr.h"
22 
hfsplus_readpage(struct file * file,struct page * page)23 static int hfsplus_readpage(struct file *file, struct page *page)
24 {
25 	return block_read_full_page(page, hfsplus_get_block);
26 }
27 
hfsplus_writepage(struct page * page,struct writeback_control * wbc)28 static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
29 {
30 	return block_write_full_page(page, hfsplus_get_block, wbc);
31 }
32 
hfsplus_write_failed(struct address_space * mapping,loff_t to)33 static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
34 {
35 	struct inode *inode = mapping->host;
36 
37 	if (to > inode->i_size) {
38 		truncate_pagecache(inode, to, inode->i_size);
39 		hfsplus_file_truncate(inode);
40 	}
41 }
42 
hfsplus_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)43 static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
44 			loff_t pos, unsigned len, unsigned flags,
45 			struct page **pagep, void **fsdata)
46 {
47 	int ret;
48 
49 	*pagep = NULL;
50 	ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
51 				hfsplus_get_block,
52 				&HFSPLUS_I(mapping->host)->phys_size);
53 	if (unlikely(ret))
54 		hfsplus_write_failed(mapping, pos + len);
55 
56 	return ret;
57 }
58 
hfsplus_bmap(struct address_space * mapping,sector_t block)59 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
60 {
61 	return generic_block_bmap(mapping, block, hfsplus_get_block);
62 }
63 
hfsplus_releasepage(struct page * page,gfp_t mask)64 static int hfsplus_releasepage(struct page *page, gfp_t mask)
65 {
66 	struct inode *inode = page->mapping->host;
67 	struct super_block *sb = inode->i_sb;
68 	struct hfs_btree *tree;
69 	struct hfs_bnode *node;
70 	u32 nidx;
71 	int i, res = 1;
72 
73 	switch (inode->i_ino) {
74 	case HFSPLUS_EXT_CNID:
75 		tree = HFSPLUS_SB(sb)->ext_tree;
76 		break;
77 	case HFSPLUS_CAT_CNID:
78 		tree = HFSPLUS_SB(sb)->cat_tree;
79 		break;
80 	case HFSPLUS_ATTR_CNID:
81 		tree = HFSPLUS_SB(sb)->attr_tree;
82 		break;
83 	default:
84 		BUG();
85 		return 0;
86 	}
87 	if (!tree)
88 		return 0;
89 	if (tree->node_size >= PAGE_CACHE_SIZE) {
90 		nidx = page->index >>
91 			(tree->node_size_shift - PAGE_CACHE_SHIFT);
92 		spin_lock(&tree->hash_lock);
93 		node = hfs_bnode_findhash(tree, nidx);
94 		if (!node)
95 			;
96 		else if (atomic_read(&node->refcnt))
97 			res = 0;
98 		if (res && node) {
99 			hfs_bnode_unhash(node);
100 			hfs_bnode_free(node);
101 		}
102 		spin_unlock(&tree->hash_lock);
103 	} else {
104 		nidx = page->index <<
105 			(PAGE_CACHE_SHIFT - tree->node_size_shift);
106 		i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
107 		spin_lock(&tree->hash_lock);
108 		do {
109 			node = hfs_bnode_findhash(tree, nidx++);
110 			if (!node)
111 				continue;
112 			if (atomic_read(&node->refcnt)) {
113 				res = 0;
114 				break;
115 			}
116 			hfs_bnode_unhash(node);
117 			hfs_bnode_free(node);
118 		} while (--i && nidx < tree->node_count);
119 		spin_unlock(&tree->hash_lock);
120 	}
121 	return res ? try_to_free_buffers(page) : 0;
122 }
123 
hfsplus_direct_IO(int rw,struct kiocb * iocb,const struct iovec * iov,loff_t offset,unsigned long nr_segs)124 static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
125 		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
126 {
127 	struct file *file = iocb->ki_filp;
128 	struct address_space *mapping = file->f_mapping;
129 	struct inode *inode = file_inode(file)->i_mapping->host;
130 	ssize_t ret;
131 
132 	ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
133 				 hfsplus_get_block);
134 
135 	/*
136 	 * In case of error extending write may have instantiated a few
137 	 * blocks outside i_size. Trim these off again.
138 	 */
139 	if (unlikely((rw & WRITE) && ret < 0)) {
140 		loff_t isize = i_size_read(inode);
141 		loff_t end = offset + iov_length(iov, nr_segs);
142 
143 		if (end > isize)
144 			hfsplus_write_failed(mapping, end);
145 	}
146 
147 	return ret;
148 }
149 
hfsplus_writepages(struct address_space * mapping,struct writeback_control * wbc)150 static int hfsplus_writepages(struct address_space *mapping,
151 			      struct writeback_control *wbc)
152 {
153 	return mpage_writepages(mapping, wbc, hfsplus_get_block);
154 }
155 
156 const struct address_space_operations hfsplus_btree_aops = {
157 	.readpage	= hfsplus_readpage,
158 	.writepage	= hfsplus_writepage,
159 	.write_begin	= hfsplus_write_begin,
160 	.write_end	= generic_write_end,
161 	.bmap		= hfsplus_bmap,
162 	.releasepage	= hfsplus_releasepage,
163 };
164 
165 const struct address_space_operations hfsplus_aops = {
166 	.readpage	= hfsplus_readpage,
167 	.writepage	= hfsplus_writepage,
168 	.write_begin	= hfsplus_write_begin,
169 	.write_end	= generic_write_end,
170 	.bmap		= hfsplus_bmap,
171 	.direct_IO	= hfsplus_direct_IO,
172 	.writepages	= hfsplus_writepages,
173 };
174 
175 const struct dentry_operations hfsplus_dentry_operations = {
176 	.d_hash       = hfsplus_hash_dentry,
177 	.d_compare    = hfsplus_compare_dentry,
178 };
179 
hfsplus_file_lookup(struct inode * dir,struct dentry * dentry,unsigned int flags)180 static struct dentry *hfsplus_file_lookup(struct inode *dir,
181 		struct dentry *dentry, unsigned int flags)
182 {
183 	struct hfs_find_data fd;
184 	struct super_block *sb = dir->i_sb;
185 	struct inode *inode = NULL;
186 	struct hfsplus_inode_info *hip;
187 	int err;
188 
189 	if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
190 		goto out;
191 
192 	inode = HFSPLUS_I(dir)->rsrc_inode;
193 	if (inode)
194 		goto out;
195 
196 	inode = new_inode(sb);
197 	if (!inode)
198 		return ERR_PTR(-ENOMEM);
199 
200 	hip = HFSPLUS_I(inode);
201 	inode->i_ino = dir->i_ino;
202 	INIT_LIST_HEAD(&hip->open_dir_list);
203 	mutex_init(&hip->extents_lock);
204 	hip->extent_state = 0;
205 	hip->flags = 0;
206 	hip->userflags = 0;
207 	set_bit(HFSPLUS_I_RSRC, &hip->flags);
208 
209 	err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
210 	if (!err) {
211 		err = hfsplus_find_cat(sb, dir->i_ino, &fd);
212 		if (!err)
213 			err = hfsplus_cat_read_inode(inode, &fd);
214 		hfs_find_exit(&fd);
215 	}
216 	if (err) {
217 		iput(inode);
218 		return ERR_PTR(err);
219 	}
220 	hip->rsrc_inode = dir;
221 	HFSPLUS_I(dir)->rsrc_inode = inode;
222 	igrab(dir);
223 
224 	/*
225 	 * __mark_inode_dirty expects inodes to be hashed.  Since we don't
226 	 * want resource fork inodes in the regular inode space, we make them
227 	 * appear hashed, but do not put on any lists.  hlist_del()
228 	 * will work fine and require no locking.
229 	 */
230 	hlist_add_fake(&inode->i_hash);
231 
232 	mark_inode_dirty(inode);
233 out:
234 	d_add(dentry, inode);
235 	return NULL;
236 }
237 
hfsplus_get_perms(struct inode * inode,struct hfsplus_perm * perms,int dir)238 static void hfsplus_get_perms(struct inode *inode,
239 		struct hfsplus_perm *perms, int dir)
240 {
241 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
242 	u16 mode;
243 
244 	mode = be16_to_cpu(perms->mode);
245 
246 	i_uid_write(inode, be32_to_cpu(perms->owner));
247 	if (!i_uid_read(inode) && !mode)
248 		inode->i_uid = sbi->uid;
249 
250 	i_gid_write(inode, be32_to_cpu(perms->group));
251 	if (!i_gid_read(inode) && !mode)
252 		inode->i_gid = sbi->gid;
253 
254 	if (dir) {
255 		mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask));
256 		mode |= S_IFDIR;
257 	} else if (!mode)
258 		mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask));
259 	inode->i_mode = mode;
260 
261 	HFSPLUS_I(inode)->userflags = perms->userflags;
262 	if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
263 		inode->i_flags |= S_IMMUTABLE;
264 	else
265 		inode->i_flags &= ~S_IMMUTABLE;
266 	if (perms->rootflags & HFSPLUS_FLG_APPEND)
267 		inode->i_flags |= S_APPEND;
268 	else
269 		inode->i_flags &= ~S_APPEND;
270 }
271 
hfsplus_file_open(struct inode * inode,struct file * file)272 static int hfsplus_file_open(struct inode *inode, struct file *file)
273 {
274 	if (HFSPLUS_IS_RSRC(inode))
275 		inode = HFSPLUS_I(inode)->rsrc_inode;
276 	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
277 		return -EOVERFLOW;
278 	atomic_inc(&HFSPLUS_I(inode)->opencnt);
279 	return 0;
280 }
281 
hfsplus_file_release(struct inode * inode,struct file * file)282 static int hfsplus_file_release(struct inode *inode, struct file *file)
283 {
284 	struct super_block *sb = inode->i_sb;
285 
286 	if (HFSPLUS_IS_RSRC(inode))
287 		inode = HFSPLUS_I(inode)->rsrc_inode;
288 	if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) {
289 		mutex_lock(&inode->i_mutex);
290 		hfsplus_file_truncate(inode);
291 		if (inode->i_flags & S_DEAD) {
292 			hfsplus_delete_cat(inode->i_ino,
293 					   HFSPLUS_SB(sb)->hidden_dir, NULL);
294 			hfsplus_delete_inode(inode);
295 		}
296 		mutex_unlock(&inode->i_mutex);
297 	}
298 	return 0;
299 }
300 
hfsplus_setattr(struct dentry * dentry,struct iattr * attr)301 static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
302 {
303 	struct inode *inode = dentry->d_inode;
304 	int error;
305 
306 	error = inode_change_ok(inode, attr);
307 	if (error)
308 		return error;
309 
310 	if ((attr->ia_valid & ATTR_SIZE) &&
311 	    attr->ia_size != i_size_read(inode)) {
312 		inode_dio_wait(inode);
313 		truncate_setsize(inode, attr->ia_size);
314 		hfsplus_file_truncate(inode);
315 	}
316 
317 	setattr_copy(inode, attr);
318 	mark_inode_dirty(inode);
319 	return 0;
320 }
321 
hfsplus_file_fsync(struct file * file,loff_t start,loff_t end,int datasync)322 int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
323 		       int datasync)
324 {
325 	struct inode *inode = file->f_mapping->host;
326 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
327 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
328 	int error = 0, error2;
329 
330 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
331 	if (error)
332 		return error;
333 	mutex_lock(&inode->i_mutex);
334 
335 	/*
336 	 * Sync inode metadata into the catalog and extent trees.
337 	 */
338 	sync_inode_metadata(inode, 1);
339 
340 	/*
341 	 * And explicitly write out the btrees.
342 	 */
343 	if (test_and_clear_bit(HFSPLUS_I_CAT_DIRTY, &hip->flags))
344 		error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
345 
346 	if (test_and_clear_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags)) {
347 		error2 =
348 			filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
349 		if (!error)
350 			error = error2;
351 	}
352 
353 	if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY, &hip->flags)) {
354 		if (sbi->attr_tree) {
355 			error2 =
356 				filemap_write_and_wait(
357 					    sbi->attr_tree->inode->i_mapping);
358 			if (!error)
359 				error = error2;
360 		} else {
361 			pr_err("sync non-existent attributes tree\n");
362 		}
363 	}
364 
365 	if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) {
366 		error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
367 		if (!error)
368 			error = error2;
369 	}
370 
371 	if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
372 		blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
373 
374 	mutex_unlock(&inode->i_mutex);
375 
376 	return error;
377 }
378 
379 static const struct inode_operations hfsplus_file_inode_operations = {
380 	.lookup		= hfsplus_file_lookup,
381 	.setattr	= hfsplus_setattr,
382 	.setxattr	= generic_setxattr,
383 	.getxattr	= generic_getxattr,
384 	.listxattr	= hfsplus_listxattr,
385 	.removexattr	= hfsplus_removexattr,
386 };
387 
388 static const struct file_operations hfsplus_file_operations = {
389 	.llseek		= generic_file_llseek,
390 	.read		= do_sync_read,
391 	.aio_read	= generic_file_aio_read,
392 	.write		= do_sync_write,
393 	.aio_write	= generic_file_aio_write,
394 	.mmap		= generic_file_mmap,
395 	.splice_read	= generic_file_splice_read,
396 	.fsync		= hfsplus_file_fsync,
397 	.open		= hfsplus_file_open,
398 	.release	= hfsplus_file_release,
399 	.unlocked_ioctl = hfsplus_ioctl,
400 };
401 
hfsplus_new_inode(struct super_block * sb,umode_t mode)402 struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
403 {
404 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
405 	struct inode *inode = new_inode(sb);
406 	struct hfsplus_inode_info *hip;
407 
408 	if (!inode)
409 		return NULL;
410 
411 	inode->i_ino = sbi->next_cnid++;
412 	inode->i_mode = mode;
413 	inode->i_uid = current_fsuid();
414 	inode->i_gid = current_fsgid();
415 	set_nlink(inode, 1);
416 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
417 
418 	hip = HFSPLUS_I(inode);
419 	INIT_LIST_HEAD(&hip->open_dir_list);
420 	mutex_init(&hip->extents_lock);
421 	atomic_set(&hip->opencnt, 0);
422 	hip->extent_state = 0;
423 	hip->flags = 0;
424 	hip->userflags = 0;
425 	memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
426 	memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
427 	hip->alloc_blocks = 0;
428 	hip->first_blocks = 0;
429 	hip->cached_start = 0;
430 	hip->cached_blocks = 0;
431 	hip->phys_size = 0;
432 	hip->fs_blocks = 0;
433 	hip->rsrc_inode = NULL;
434 	if (S_ISDIR(inode->i_mode)) {
435 		inode->i_size = 2;
436 		sbi->folder_count++;
437 		inode->i_op = &hfsplus_dir_inode_operations;
438 		inode->i_fop = &hfsplus_dir_operations;
439 	} else if (S_ISREG(inode->i_mode)) {
440 		sbi->file_count++;
441 		inode->i_op = &hfsplus_file_inode_operations;
442 		inode->i_fop = &hfsplus_file_operations;
443 		inode->i_mapping->a_ops = &hfsplus_aops;
444 		hip->clump_blocks = sbi->data_clump_blocks;
445 	} else if (S_ISLNK(inode->i_mode)) {
446 		sbi->file_count++;
447 		inode->i_op = &page_symlink_inode_operations;
448 		inode->i_mapping->a_ops = &hfsplus_aops;
449 		hip->clump_blocks = 1;
450 	} else
451 		sbi->file_count++;
452 	insert_inode_hash(inode);
453 	mark_inode_dirty(inode);
454 	hfsplus_mark_mdb_dirty(sb);
455 
456 	return inode;
457 }
458 
hfsplus_delete_inode(struct inode * inode)459 void hfsplus_delete_inode(struct inode *inode)
460 {
461 	struct super_block *sb = inode->i_sb;
462 
463 	if (S_ISDIR(inode->i_mode)) {
464 		HFSPLUS_SB(sb)->folder_count--;
465 		hfsplus_mark_mdb_dirty(sb);
466 		return;
467 	}
468 	HFSPLUS_SB(sb)->file_count--;
469 	if (S_ISREG(inode->i_mode)) {
470 		if (!inode->i_nlink) {
471 			inode->i_size = 0;
472 			hfsplus_file_truncate(inode);
473 		}
474 	} else if (S_ISLNK(inode->i_mode)) {
475 		inode->i_size = 0;
476 		hfsplus_file_truncate(inode);
477 	}
478 	hfsplus_mark_mdb_dirty(sb);
479 }
480 
hfsplus_inode_read_fork(struct inode * inode,struct hfsplus_fork_raw * fork)481 void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
482 {
483 	struct super_block *sb = inode->i_sb;
484 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
485 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
486 	u32 count;
487 	int i;
488 
489 	memcpy(&hip->first_extents, &fork->extents, sizeof(hfsplus_extent_rec));
490 	for (count = 0, i = 0; i < 8; i++)
491 		count += be32_to_cpu(fork->extents[i].block_count);
492 	hip->first_blocks = count;
493 	memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
494 	hip->cached_start = 0;
495 	hip->cached_blocks = 0;
496 
497 	hip->alloc_blocks = be32_to_cpu(fork->total_blocks);
498 	hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size);
499 	hip->fs_blocks =
500 		(inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
501 	inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
502 	hip->clump_blocks =
503 		be32_to_cpu(fork->clump_size) >> sbi->alloc_blksz_shift;
504 	if (!hip->clump_blocks) {
505 		hip->clump_blocks = HFSPLUS_IS_RSRC(inode) ?
506 			sbi->rsrc_clump_blocks :
507 			sbi->data_clump_blocks;
508 	}
509 }
510 
hfsplus_inode_write_fork(struct inode * inode,struct hfsplus_fork_raw * fork)511 void hfsplus_inode_write_fork(struct inode *inode,
512 		struct hfsplus_fork_raw *fork)
513 {
514 	memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents,
515 	       sizeof(hfsplus_extent_rec));
516 	fork->total_size = cpu_to_be64(inode->i_size);
517 	fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode)->alloc_blocks);
518 }
519 
hfsplus_cat_read_inode(struct inode * inode,struct hfs_find_data * fd)520 int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
521 {
522 	hfsplus_cat_entry entry;
523 	int res = 0;
524 	u16 type;
525 
526 	type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
527 
528 	HFSPLUS_I(inode)->linkid = 0;
529 	if (type == HFSPLUS_FOLDER) {
530 		struct hfsplus_cat_folder *folder = &entry.folder;
531 
532 		if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
533 			/* panic? */;
534 		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
535 					sizeof(struct hfsplus_cat_folder));
536 		hfsplus_get_perms(inode, &folder->permissions, 1);
537 		set_nlink(inode, 1);
538 		inode->i_size = 2 + be32_to_cpu(folder->valence);
539 		inode->i_atime = hfsp_mt2ut(folder->access_date);
540 		inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
541 		inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
542 		HFSPLUS_I(inode)->create_date = folder->create_date;
543 		HFSPLUS_I(inode)->fs_blocks = 0;
544 		inode->i_op = &hfsplus_dir_inode_operations;
545 		inode->i_fop = &hfsplus_dir_operations;
546 	} else if (type == HFSPLUS_FILE) {
547 		struct hfsplus_cat_file *file = &entry.file;
548 
549 		if (fd->entrylength < sizeof(struct hfsplus_cat_file))
550 			/* panic? */;
551 		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
552 					sizeof(struct hfsplus_cat_file));
553 
554 		hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ?
555 					&file->rsrc_fork : &file->data_fork);
556 		hfsplus_get_perms(inode, &file->permissions, 0);
557 		set_nlink(inode, 1);
558 		if (S_ISREG(inode->i_mode)) {
559 			if (file->permissions.dev)
560 				set_nlink(inode,
561 					  be32_to_cpu(file->permissions.dev));
562 			inode->i_op = &hfsplus_file_inode_operations;
563 			inode->i_fop = &hfsplus_file_operations;
564 			inode->i_mapping->a_ops = &hfsplus_aops;
565 		} else if (S_ISLNK(inode->i_mode)) {
566 			inode->i_op = &page_symlink_inode_operations;
567 			inode->i_mapping->a_ops = &hfsplus_aops;
568 		} else {
569 			init_special_inode(inode, inode->i_mode,
570 					   be32_to_cpu(file->permissions.dev));
571 		}
572 		inode->i_atime = hfsp_mt2ut(file->access_date);
573 		inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
574 		inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
575 		HFSPLUS_I(inode)->create_date = file->create_date;
576 	} else {
577 		pr_err("bad catalog entry used to create inode\n");
578 		res = -EIO;
579 	}
580 	return res;
581 }
582 
hfsplus_cat_write_inode(struct inode * inode)583 int hfsplus_cat_write_inode(struct inode *inode)
584 {
585 	struct inode *main_inode = inode;
586 	struct hfs_find_data fd;
587 	hfsplus_cat_entry entry;
588 
589 	if (HFSPLUS_IS_RSRC(inode))
590 		main_inode = HFSPLUS_I(inode)->rsrc_inode;
591 
592 	if (!main_inode->i_nlink)
593 		return 0;
594 
595 	if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb)->cat_tree, &fd))
596 		/* panic? */
597 		return -EIO;
598 
599 	if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd))
600 		/* panic? */
601 		goto out;
602 
603 	if (S_ISDIR(main_inode->i_mode)) {
604 		struct hfsplus_cat_folder *folder = &entry.folder;
605 
606 		if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
607 			/* panic? */;
608 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
609 					sizeof(struct hfsplus_cat_folder));
610 		/* simple node checks? */
611 		hfsplus_cat_set_perms(inode, &folder->permissions);
612 		folder->access_date = hfsp_ut2mt(inode->i_atime);
613 		folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
614 		folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
615 		folder->valence = cpu_to_be32(inode->i_size - 2);
616 		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
617 					 sizeof(struct hfsplus_cat_folder));
618 	} else if (HFSPLUS_IS_RSRC(inode)) {
619 		struct hfsplus_cat_file *file = &entry.file;
620 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
621 			       sizeof(struct hfsplus_cat_file));
622 		hfsplus_inode_write_fork(inode, &file->rsrc_fork);
623 		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
624 				sizeof(struct hfsplus_cat_file));
625 	} else {
626 		struct hfsplus_cat_file *file = &entry.file;
627 
628 		if (fd.entrylength < sizeof(struct hfsplus_cat_file))
629 			/* panic? */;
630 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
631 					sizeof(struct hfsplus_cat_file));
632 		hfsplus_inode_write_fork(inode, &file->data_fork);
633 		hfsplus_cat_set_perms(inode, &file->permissions);
634 		if (HFSPLUS_FLG_IMMUTABLE &
635 				(file->permissions.rootflags |
636 					file->permissions.userflags))
637 			file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
638 		else
639 			file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
640 		file->access_date = hfsp_ut2mt(inode->i_atime);
641 		file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
642 		file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
643 		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
644 					 sizeof(struct hfsplus_cat_file));
645 	}
646 
647 	set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
648 out:
649 	hfs_find_exit(&fd);
650 	return 0;
651 }
652