1 /*
2 * linux/fs/hfsplus/super.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 */
9
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/pagemap.h>
13 #include <linux/blkdev.h>
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/vfs.h>
17 #include <linux/nls.h>
18
19 static struct inode *hfsplus_alloc_inode(struct super_block *sb);
20 static void hfsplus_destroy_inode(struct inode *inode);
21
22 #include "hfsplus_fs.h"
23 #include "xattr.h"
24
hfsplus_system_read_inode(struct inode * inode)25 static int hfsplus_system_read_inode(struct inode *inode)
26 {
27 struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr;
28
29 switch (inode->i_ino) {
30 case HFSPLUS_EXT_CNID:
31 hfsplus_inode_read_fork(inode, &vhdr->ext_file);
32 inode->i_mapping->a_ops = &hfsplus_btree_aops;
33 break;
34 case HFSPLUS_CAT_CNID:
35 hfsplus_inode_read_fork(inode, &vhdr->cat_file);
36 inode->i_mapping->a_ops = &hfsplus_btree_aops;
37 break;
38 case HFSPLUS_ALLOC_CNID:
39 hfsplus_inode_read_fork(inode, &vhdr->alloc_file);
40 inode->i_mapping->a_ops = &hfsplus_aops;
41 break;
42 case HFSPLUS_START_CNID:
43 hfsplus_inode_read_fork(inode, &vhdr->start_file);
44 break;
45 case HFSPLUS_ATTR_CNID:
46 hfsplus_inode_read_fork(inode, &vhdr->attr_file);
47 inode->i_mapping->a_ops = &hfsplus_btree_aops;
48 break;
49 default:
50 return -EIO;
51 }
52
53 return 0;
54 }
55
hfsplus_iget(struct super_block * sb,unsigned long ino)56 struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
57 {
58 struct hfs_find_data fd;
59 struct inode *inode;
60 int err;
61
62 inode = iget_locked(sb, ino);
63 if (!inode)
64 return ERR_PTR(-ENOMEM);
65 if (!(inode->i_state & I_NEW))
66 return inode;
67
68 INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
69 mutex_init(&HFSPLUS_I(inode)->extents_lock);
70 HFSPLUS_I(inode)->flags = 0;
71 HFSPLUS_I(inode)->extent_state = 0;
72 HFSPLUS_I(inode)->rsrc_inode = NULL;
73 atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
74
75 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
76 inode->i_ino == HFSPLUS_ROOT_CNID) {
77 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
78 if (!err) {
79 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
80 if (!err)
81 err = hfsplus_cat_read_inode(inode, &fd);
82 hfs_find_exit(&fd);
83 }
84 } else {
85 err = hfsplus_system_read_inode(inode);
86 }
87
88 if (err) {
89 iget_failed(inode);
90 return ERR_PTR(err);
91 }
92
93 unlock_new_inode(inode);
94 return inode;
95 }
96
hfsplus_system_write_inode(struct inode * inode)97 static int hfsplus_system_write_inode(struct inode *inode)
98 {
99 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
100 struct hfsplus_vh *vhdr = sbi->s_vhdr;
101 struct hfsplus_fork_raw *fork;
102 struct hfs_btree *tree = NULL;
103
104 switch (inode->i_ino) {
105 case HFSPLUS_EXT_CNID:
106 fork = &vhdr->ext_file;
107 tree = sbi->ext_tree;
108 break;
109 case HFSPLUS_CAT_CNID:
110 fork = &vhdr->cat_file;
111 tree = sbi->cat_tree;
112 break;
113 case HFSPLUS_ALLOC_CNID:
114 fork = &vhdr->alloc_file;
115 break;
116 case HFSPLUS_START_CNID:
117 fork = &vhdr->start_file;
118 break;
119 case HFSPLUS_ATTR_CNID:
120 fork = &vhdr->attr_file;
121 tree = sbi->attr_tree;
122 break;
123 default:
124 return -EIO;
125 }
126
127 if (fork->total_size != cpu_to_be64(inode->i_size)) {
128 set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags);
129 hfsplus_mark_mdb_dirty(inode->i_sb);
130 }
131 hfsplus_inode_write_fork(inode, fork);
132 if (tree) {
133 int err = hfs_btree_write(tree);
134 if (err) {
135 pr_err("b-tree write err: %d, ino %lu\n",
136 err, inode->i_ino);
137 return err;
138 }
139 }
140 return 0;
141 }
142
hfsplus_write_inode(struct inode * inode,struct writeback_control * wbc)143 static int hfsplus_write_inode(struct inode *inode,
144 struct writeback_control *wbc)
145 {
146 int err;
147
148 hfs_dbg(INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
149
150 err = hfsplus_ext_write_extent(inode);
151 if (err)
152 return err;
153
154 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
155 inode->i_ino == HFSPLUS_ROOT_CNID)
156 return hfsplus_cat_write_inode(inode);
157 else
158 return hfsplus_system_write_inode(inode);
159 }
160
hfsplus_evict_inode(struct inode * inode)161 static void hfsplus_evict_inode(struct inode *inode)
162 {
163 hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
164 truncate_inode_pages(&inode->i_data, 0);
165 clear_inode(inode);
166 if (HFSPLUS_IS_RSRC(inode)) {
167 HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
168 iput(HFSPLUS_I(inode)->rsrc_inode);
169 }
170 }
171
hfsplus_sync_fs(struct super_block * sb,int wait)172 static int hfsplus_sync_fs(struct super_block *sb, int wait)
173 {
174 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
175 struct hfsplus_vh *vhdr = sbi->s_vhdr;
176 int write_backup = 0;
177 int error, error2;
178
179 if (!wait)
180 return 0;
181
182 hfs_dbg(SUPER, "hfsplus_sync_fs\n");
183
184 /*
185 * Explicitly write out the special metadata inodes.
186 *
187 * While these special inodes are marked as hashed and written
188 * out peridocically by the flusher threads we redirty them
189 * during writeout of normal inodes, and thus the life lock
190 * prevents us from getting the latest state to disk.
191 */
192 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
193 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
194 if (!error)
195 error = error2;
196 if (sbi->attr_tree) {
197 error2 =
198 filemap_write_and_wait(sbi->attr_tree->inode->i_mapping);
199 if (!error)
200 error = error2;
201 }
202 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
203 if (!error)
204 error = error2;
205
206 mutex_lock(&sbi->vh_mutex);
207 mutex_lock(&sbi->alloc_mutex);
208 vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
209 vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
210 vhdr->folder_count = cpu_to_be32(sbi->folder_count);
211 vhdr->file_count = cpu_to_be32(sbi->file_count);
212
213 if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
214 memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr));
215 write_backup = 1;
216 }
217
218 error2 = hfsplus_submit_bio(sb,
219 sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
220 sbi->s_vhdr_buf, NULL, WRITE_SYNC);
221 if (!error)
222 error = error2;
223 if (!write_backup)
224 goto out;
225
226 error2 = hfsplus_submit_bio(sb,
227 sbi->part_start + sbi->sect_count - 2,
228 sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC);
229 if (!error)
230 error2 = error;
231 out:
232 mutex_unlock(&sbi->alloc_mutex);
233 mutex_unlock(&sbi->vh_mutex);
234
235 if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
236 blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
237
238 return error;
239 }
240
delayed_sync_fs(struct work_struct * work)241 static void delayed_sync_fs(struct work_struct *work)
242 {
243 int err;
244 struct hfsplus_sb_info *sbi;
245
246 sbi = container_of(work, struct hfsplus_sb_info, sync_work.work);
247
248 spin_lock(&sbi->work_lock);
249 sbi->work_queued = 0;
250 spin_unlock(&sbi->work_lock);
251
252 err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
253 if (err)
254 pr_err("delayed sync fs err %d\n", err);
255 }
256
hfsplus_mark_mdb_dirty(struct super_block * sb)257 void hfsplus_mark_mdb_dirty(struct super_block *sb)
258 {
259 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
260 unsigned long delay;
261
262 if (sb->s_flags & MS_RDONLY)
263 return;
264
265 spin_lock(&sbi->work_lock);
266 if (!sbi->work_queued) {
267 delay = msecs_to_jiffies(dirty_writeback_interval * 10);
268 queue_delayed_work(system_long_wq, &sbi->sync_work, delay);
269 sbi->work_queued = 1;
270 }
271 spin_unlock(&sbi->work_lock);
272 }
273
hfsplus_put_super(struct super_block * sb)274 static void hfsplus_put_super(struct super_block *sb)
275 {
276 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
277
278 hfs_dbg(SUPER, "hfsplus_put_super\n");
279
280 cancel_delayed_work_sync(&sbi->sync_work);
281
282 if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) {
283 struct hfsplus_vh *vhdr = sbi->s_vhdr;
284
285 vhdr->modify_date = hfsp_now2mt();
286 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT);
287 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT);
288
289 hfsplus_sync_fs(sb, 1);
290 }
291
292 hfs_btree_close(sbi->attr_tree);
293 hfs_btree_close(sbi->cat_tree);
294 hfs_btree_close(sbi->ext_tree);
295 iput(sbi->alloc_file);
296 iput(sbi->hidden_dir);
297 kfree(sbi->s_vhdr_buf);
298 kfree(sbi->s_backup_vhdr_buf);
299 unload_nls(sbi->nls);
300 kfree(sb->s_fs_info);
301 sb->s_fs_info = NULL;
302 }
303
hfsplus_statfs(struct dentry * dentry,struct kstatfs * buf)304 static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
305 {
306 struct super_block *sb = dentry->d_sb;
307 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
308 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
309
310 buf->f_type = HFSPLUS_SUPER_MAGIC;
311 buf->f_bsize = sb->s_blocksize;
312 buf->f_blocks = sbi->total_blocks << sbi->fs_shift;
313 buf->f_bfree = sbi->free_blocks << sbi->fs_shift;
314 buf->f_bavail = buf->f_bfree;
315 buf->f_files = 0xFFFFFFFF;
316 buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid;
317 buf->f_fsid.val[0] = (u32)id;
318 buf->f_fsid.val[1] = (u32)(id >> 32);
319 buf->f_namelen = HFSPLUS_MAX_STRLEN;
320
321 return 0;
322 }
323
hfsplus_remount(struct super_block * sb,int * flags,char * data)324 static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
325 {
326 sync_filesystem(sb);
327 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
328 return 0;
329 if (!(*flags & MS_RDONLY)) {
330 struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
331 int force = 0;
332
333 if (!hfsplus_parse_options_remount(data, &force))
334 return -EINVAL;
335
336 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
337 pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n");
338 sb->s_flags |= MS_RDONLY;
339 *flags |= MS_RDONLY;
340 } else if (force) {
341 /* nothing */
342 } else if (vhdr->attributes &
343 cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
344 pr_warn("filesystem is marked locked, leaving read-only.\n");
345 sb->s_flags |= MS_RDONLY;
346 *flags |= MS_RDONLY;
347 } else if (vhdr->attributes &
348 cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
349 pr_warn("filesystem is marked journaled, leaving read-only.\n");
350 sb->s_flags |= MS_RDONLY;
351 *flags |= MS_RDONLY;
352 }
353 }
354 return 0;
355 }
356
357 static const struct super_operations hfsplus_sops = {
358 .alloc_inode = hfsplus_alloc_inode,
359 .destroy_inode = hfsplus_destroy_inode,
360 .write_inode = hfsplus_write_inode,
361 .evict_inode = hfsplus_evict_inode,
362 .put_super = hfsplus_put_super,
363 .sync_fs = hfsplus_sync_fs,
364 .statfs = hfsplus_statfs,
365 .remount_fs = hfsplus_remount,
366 .show_options = hfsplus_show_options,
367 };
368
hfsplus_fill_super(struct super_block * sb,void * data,int silent)369 static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
370 {
371 struct hfsplus_vh *vhdr;
372 struct hfsplus_sb_info *sbi;
373 hfsplus_cat_entry entry;
374 struct hfs_find_data fd;
375 struct inode *root, *inode;
376 struct qstr str;
377 struct nls_table *nls = NULL;
378 u64 last_fs_block, last_fs_page;
379 int err;
380
381 err = -ENOMEM;
382 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
383 if (!sbi)
384 goto out;
385
386 sb->s_fs_info = sbi;
387 mutex_init(&sbi->alloc_mutex);
388 mutex_init(&sbi->vh_mutex);
389 spin_lock_init(&sbi->work_lock);
390 INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
391 hfsplus_fill_defaults(sbi);
392
393 err = -EINVAL;
394 if (!hfsplus_parse_options(data, sbi)) {
395 pr_err("unable to parse mount options\n");
396 goto out_unload_nls;
397 }
398
399 /* temporarily use utf8 to correctly find the hidden dir below */
400 nls = sbi->nls;
401 sbi->nls = load_nls("utf8");
402 if (!sbi->nls) {
403 pr_err("unable to load nls for utf8\n");
404 goto out_unload_nls;
405 }
406
407 /* Grab the volume header */
408 if (hfsplus_read_wrapper(sb)) {
409 if (!silent)
410 pr_warn("unable to find HFS+ superblock\n");
411 goto out_unload_nls;
412 }
413 vhdr = sbi->s_vhdr;
414
415 /* Copy parts of the volume header into the superblock */
416 sb->s_magic = HFSPLUS_VOLHEAD_SIG;
417 if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION ||
418 be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) {
419 pr_err("wrong filesystem version\n");
420 goto out_free_vhdr;
421 }
422 sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
423 sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
424 sbi->next_cnid = be32_to_cpu(vhdr->next_cnid);
425 sbi->file_count = be32_to_cpu(vhdr->file_count);
426 sbi->folder_count = be32_to_cpu(vhdr->folder_count);
427 sbi->data_clump_blocks =
428 be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift;
429 if (!sbi->data_clump_blocks)
430 sbi->data_clump_blocks = 1;
431 sbi->rsrc_clump_blocks =
432 be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift;
433 if (!sbi->rsrc_clump_blocks)
434 sbi->rsrc_clump_blocks = 1;
435
436 err = -EFBIG;
437 last_fs_block = sbi->total_blocks - 1;
438 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
439 PAGE_CACHE_SHIFT;
440
441 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
442 (last_fs_page > (pgoff_t)(~0ULL))) {
443 pr_err("filesystem size too large\n");
444 goto out_free_vhdr;
445 }
446
447 /* Set up operations so we can load metadata */
448 sb->s_op = &hfsplus_sops;
449 sb->s_maxbytes = MAX_LFS_FILESIZE;
450
451 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
452 pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n");
453 sb->s_flags |= MS_RDONLY;
454 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
455 /* nothing */
456 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
457 pr_warn("Filesystem is marked locked, mounting read-only.\n");
458 sb->s_flags |= MS_RDONLY;
459 } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
460 !(sb->s_flags & MS_RDONLY)) {
461 pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
462 sb->s_flags |= MS_RDONLY;
463 }
464
465 err = -EINVAL;
466
467 /* Load metadata objects (B*Trees) */
468 sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
469 if (!sbi->ext_tree) {
470 pr_err("failed to load extents file\n");
471 goto out_free_vhdr;
472 }
473 sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
474 if (!sbi->cat_tree) {
475 pr_err("failed to load catalog file\n");
476 goto out_close_ext_tree;
477 }
478 if (vhdr->attr_file.total_blocks != 0) {
479 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
480 if (!sbi->attr_tree) {
481 pr_err("failed to load attributes file\n");
482 goto out_close_cat_tree;
483 }
484 }
485 sb->s_xattr = hfsplus_xattr_handlers;
486
487 inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
488 if (IS_ERR(inode)) {
489 pr_err("failed to load allocation file\n");
490 err = PTR_ERR(inode);
491 goto out_close_attr_tree;
492 }
493 sbi->alloc_file = inode;
494
495 /* Load the root directory */
496 root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID);
497 if (IS_ERR(root)) {
498 pr_err("failed to load root directory\n");
499 err = PTR_ERR(root);
500 goto out_put_alloc_file;
501 }
502
503 sb->s_d_op = &hfsplus_dentry_operations;
504 sb->s_root = d_make_root(root);
505 if (!sb->s_root) {
506 err = -ENOMEM;
507 goto out_put_alloc_file;
508 }
509
510 str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
511 str.name = HFSP_HIDDENDIR_NAME;
512 err = hfs_find_init(sbi->cat_tree, &fd);
513 if (err)
514 goto out_put_root;
515 hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
516 if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
517 hfs_find_exit(&fd);
518 if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
519 goto out_put_root;
520 inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
521 if (IS_ERR(inode)) {
522 err = PTR_ERR(inode);
523 goto out_put_root;
524 }
525 sbi->hidden_dir = inode;
526 } else
527 hfs_find_exit(&fd);
528
529 if (!(sb->s_flags & MS_RDONLY)) {
530 /*
531 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
532 * all three are registered with Apple for our use
533 */
534 vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
535 vhdr->modify_date = hfsp_now2mt();
536 be32_add_cpu(&vhdr->write_count, 1);
537 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
538 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
539 hfsplus_sync_fs(sb, 1);
540
541 if (!sbi->hidden_dir) {
542 mutex_lock(&sbi->vh_mutex);
543 sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
544 if (!sbi->hidden_dir) {
545 mutex_unlock(&sbi->vh_mutex);
546 err = -ENOMEM;
547 goto out_put_root;
548 }
549 err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root,
550 &str, sbi->hidden_dir);
551 if (err) {
552 mutex_unlock(&sbi->vh_mutex);
553 goto out_put_hidden_dir;
554 }
555
556 err = hfsplus_init_inode_security(sbi->hidden_dir,
557 root, &str);
558 if (err == -EOPNOTSUPP)
559 err = 0; /* Operation is not supported. */
560 else if (err) {
561 /*
562 * Try to delete anyway without
563 * error analysis.
564 */
565 hfsplus_delete_cat(sbi->hidden_dir->i_ino,
566 root, &str);
567 mutex_unlock(&sbi->vh_mutex);
568 goto out_put_hidden_dir;
569 }
570
571 mutex_unlock(&sbi->vh_mutex);
572 hfsplus_mark_inode_dirty(sbi->hidden_dir,
573 HFSPLUS_I_CAT_DIRTY);
574 }
575 }
576
577 unload_nls(sbi->nls);
578 sbi->nls = nls;
579 return 0;
580
581 out_put_hidden_dir:
582 iput(sbi->hidden_dir);
583 out_put_root:
584 dput(sb->s_root);
585 sb->s_root = NULL;
586 out_put_alloc_file:
587 iput(sbi->alloc_file);
588 out_close_attr_tree:
589 hfs_btree_close(sbi->attr_tree);
590 out_close_cat_tree:
591 hfs_btree_close(sbi->cat_tree);
592 out_close_ext_tree:
593 hfs_btree_close(sbi->ext_tree);
594 out_free_vhdr:
595 kfree(sbi->s_vhdr_buf);
596 kfree(sbi->s_backup_vhdr_buf);
597 out_unload_nls:
598 unload_nls(sbi->nls);
599 unload_nls(nls);
600 kfree(sbi);
601 out:
602 return err;
603 }
604
605 MODULE_AUTHOR("Brad Boyer");
606 MODULE_DESCRIPTION("Extended Macintosh Filesystem");
607 MODULE_LICENSE("GPL");
608
609 static struct kmem_cache *hfsplus_inode_cachep;
610
hfsplus_alloc_inode(struct super_block * sb)611 static struct inode *hfsplus_alloc_inode(struct super_block *sb)
612 {
613 struct hfsplus_inode_info *i;
614
615 i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL);
616 return i ? &i->vfs_inode : NULL;
617 }
618
hfsplus_i_callback(struct rcu_head * head)619 static void hfsplus_i_callback(struct rcu_head *head)
620 {
621 struct inode *inode = container_of(head, struct inode, i_rcu);
622
623 kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
624 }
625
hfsplus_destroy_inode(struct inode * inode)626 static void hfsplus_destroy_inode(struct inode *inode)
627 {
628 call_rcu(&inode->i_rcu, hfsplus_i_callback);
629 }
630
631 #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
632
hfsplus_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)633 static struct dentry *hfsplus_mount(struct file_system_type *fs_type,
634 int flags, const char *dev_name, void *data)
635 {
636 return mount_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super);
637 }
638
639 static struct file_system_type hfsplus_fs_type = {
640 .owner = THIS_MODULE,
641 .name = "hfsplus",
642 .mount = hfsplus_mount,
643 .kill_sb = kill_block_super,
644 .fs_flags = FS_REQUIRES_DEV,
645 };
646 MODULE_ALIAS_FS("hfsplus");
647
hfsplus_init_once(void * p)648 static void hfsplus_init_once(void *p)
649 {
650 struct hfsplus_inode_info *i = p;
651
652 inode_init_once(&i->vfs_inode);
653 }
654
init_hfsplus_fs(void)655 static int __init init_hfsplus_fs(void)
656 {
657 int err;
658
659 hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache",
660 HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN,
661 hfsplus_init_once);
662 if (!hfsplus_inode_cachep)
663 return -ENOMEM;
664 err = hfsplus_create_attr_tree_cache();
665 if (err)
666 goto destroy_inode_cache;
667 err = register_filesystem(&hfsplus_fs_type);
668 if (err)
669 goto destroy_attr_tree_cache;
670 return 0;
671
672 destroy_attr_tree_cache:
673 hfsplus_destroy_attr_tree_cache();
674
675 destroy_inode_cache:
676 kmem_cache_destroy(hfsplus_inode_cachep);
677
678 return err;
679 }
680
exit_hfsplus_fs(void)681 static void __exit exit_hfsplus_fs(void)
682 {
683 unregister_filesystem(&hfsplus_fs_type);
684
685 /*
686 * Make sure all delayed rcu free inodes are flushed before we
687 * destroy cache.
688 */
689 rcu_barrier();
690 hfsplus_destroy_attr_tree_cache();
691 kmem_cache_destroy(hfsplus_inode_cachep);
692 }
693
694 module_init(init_hfsplus_fs)
695 module_exit(exit_hfsplus_fs)
696