1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4 * SPU file system
5 *
6 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
7 *
8 * Author: Arnd Bergmann <arndb@de.ibm.com>
9 */
10
11 #include <linux/file.h>
12 #include <linux/fs.h>
13 #include <linux/fs_context.h>
14 #include <linux/fs_parser.h>
15 #include <linux/fsnotify.h>
16 #include <linux/backing-dev.h>
17 #include <linux/init.h>
18 #include <linux/ioctl.h>
19 #include <linux/module.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/pagemap.h>
23 #include <linux/poll.h>
24 #include <linux/slab.h>
25
26 #include <asm/prom.h>
27 #include <asm/spu.h>
28 #include <asm/spu_priv1.h>
29 #include <linux/uaccess.h>
30
31 #include "spufs.h"
32
33 struct spufs_sb_info {
34 bool debug;
35 };
36
37 static struct kmem_cache *spufs_inode_cache;
38 char *isolated_loader;
39 static int isolated_loader_size;
40
spufs_get_sb_info(struct super_block * sb)41 static struct spufs_sb_info *spufs_get_sb_info(struct super_block *sb)
42 {
43 return sb->s_fs_info;
44 }
45
46 static struct inode *
spufs_alloc_inode(struct super_block * sb)47 spufs_alloc_inode(struct super_block *sb)
48 {
49 struct spufs_inode_info *ei;
50
51 ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
52 if (!ei)
53 return NULL;
54
55 ei->i_gang = NULL;
56 ei->i_ctx = NULL;
57 ei->i_openers = 0;
58
59 return &ei->vfs_inode;
60 }
61
spufs_free_inode(struct inode * inode)62 static void spufs_free_inode(struct inode *inode)
63 {
64 kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
65 }
66
67 static void
spufs_init_once(void * p)68 spufs_init_once(void *p)
69 {
70 struct spufs_inode_info *ei = p;
71
72 inode_init_once(&ei->vfs_inode);
73 }
74
75 static struct inode *
spufs_new_inode(struct super_block * sb,umode_t mode)76 spufs_new_inode(struct super_block *sb, umode_t mode)
77 {
78 struct inode *inode;
79
80 inode = new_inode(sb);
81 if (!inode)
82 goto out;
83
84 inode->i_ino = get_next_ino();
85 inode->i_mode = mode;
86 inode->i_uid = current_fsuid();
87 inode->i_gid = current_fsgid();
88 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
89 out:
90 return inode;
91 }
92
93 static int
spufs_setattr(struct user_namespace * mnt_userns,struct dentry * dentry,struct iattr * attr)94 spufs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
95 struct iattr *attr)
96 {
97 struct inode *inode = d_inode(dentry);
98
99 if ((attr->ia_valid & ATTR_SIZE) &&
100 (attr->ia_size != inode->i_size))
101 return -EINVAL;
102 setattr_copy(&init_user_ns, inode, attr);
103 mark_inode_dirty(inode);
104 return 0;
105 }
106
107
108 static int
spufs_new_file(struct super_block * sb,struct dentry * dentry,const struct file_operations * fops,umode_t mode,size_t size,struct spu_context * ctx)109 spufs_new_file(struct super_block *sb, struct dentry *dentry,
110 const struct file_operations *fops, umode_t mode,
111 size_t size, struct spu_context *ctx)
112 {
113 static const struct inode_operations spufs_file_iops = {
114 .setattr = spufs_setattr,
115 };
116 struct inode *inode;
117 int ret;
118
119 ret = -ENOSPC;
120 inode = spufs_new_inode(sb, S_IFREG | mode);
121 if (!inode)
122 goto out;
123
124 ret = 0;
125 inode->i_op = &spufs_file_iops;
126 inode->i_fop = fops;
127 inode->i_size = size;
128 inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
129 d_add(dentry, inode);
130 out:
131 return ret;
132 }
133
134 static void
spufs_evict_inode(struct inode * inode)135 spufs_evict_inode(struct inode *inode)
136 {
137 struct spufs_inode_info *ei = SPUFS_I(inode);
138 clear_inode(inode);
139 if (ei->i_ctx)
140 put_spu_context(ei->i_ctx);
141 if (ei->i_gang)
142 put_spu_gang(ei->i_gang);
143 }
144
spufs_prune_dir(struct dentry * dir)145 static void spufs_prune_dir(struct dentry *dir)
146 {
147 struct dentry *dentry, *tmp;
148
149 inode_lock(d_inode(dir));
150 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
151 spin_lock(&dentry->d_lock);
152 if (simple_positive(dentry)) {
153 dget_dlock(dentry);
154 __d_drop(dentry);
155 spin_unlock(&dentry->d_lock);
156 simple_unlink(d_inode(dir), dentry);
157 /* XXX: what was dcache_lock protecting here? Other
158 * filesystems (IB, configfs) release dcache_lock
159 * before unlink */
160 dput(dentry);
161 } else {
162 spin_unlock(&dentry->d_lock);
163 }
164 }
165 shrink_dcache_parent(dir);
166 inode_unlock(d_inode(dir));
167 }
168
169 /* Caller must hold parent->i_mutex */
spufs_rmdir(struct inode * parent,struct dentry * dir)170 static int spufs_rmdir(struct inode *parent, struct dentry *dir)
171 {
172 /* remove all entries */
173 int res;
174 spufs_prune_dir(dir);
175 d_drop(dir);
176 res = simple_rmdir(parent, dir);
177 /* We have to give up the mm_struct */
178 spu_forget(SPUFS_I(d_inode(dir))->i_ctx);
179 return res;
180 }
181
spufs_fill_dir(struct dentry * dir,const struct spufs_tree_descr * files,umode_t mode,struct spu_context * ctx)182 static int spufs_fill_dir(struct dentry *dir,
183 const struct spufs_tree_descr *files, umode_t mode,
184 struct spu_context *ctx)
185 {
186 while (files->name && files->name[0]) {
187 int ret;
188 struct dentry *dentry = d_alloc_name(dir, files->name);
189 if (!dentry)
190 return -ENOMEM;
191 ret = spufs_new_file(dir->d_sb, dentry, files->ops,
192 files->mode & mode, files->size, ctx);
193 if (ret)
194 return ret;
195 files++;
196 }
197 return 0;
198 }
199
spufs_dir_close(struct inode * inode,struct file * file)200 static int spufs_dir_close(struct inode *inode, struct file *file)
201 {
202 struct inode *parent;
203 struct dentry *dir;
204 int ret;
205
206 dir = file->f_path.dentry;
207 parent = d_inode(dir->d_parent);
208
209 inode_lock_nested(parent, I_MUTEX_PARENT);
210 ret = spufs_rmdir(parent, dir);
211 inode_unlock(parent);
212 WARN_ON(ret);
213
214 return dcache_dir_close(inode, file);
215 }
216
217 const struct file_operations spufs_context_fops = {
218 .open = dcache_dir_open,
219 .release = spufs_dir_close,
220 .llseek = dcache_dir_lseek,
221 .read = generic_read_dir,
222 .iterate_shared = dcache_readdir,
223 .fsync = noop_fsync,
224 };
225 EXPORT_SYMBOL_GPL(spufs_context_fops);
226
227 static int
spufs_mkdir(struct inode * dir,struct dentry * dentry,unsigned int flags,umode_t mode)228 spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
229 umode_t mode)
230 {
231 int ret;
232 struct inode *inode;
233 struct spu_context *ctx;
234
235 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
236 if (!inode)
237 return -ENOSPC;
238
239 inode_init_owner(&init_user_ns, inode, dir, mode | S_IFDIR);
240 ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
241 SPUFS_I(inode)->i_ctx = ctx;
242 if (!ctx) {
243 iput(inode);
244 return -ENOSPC;
245 }
246
247 ctx->flags = flags;
248 inode->i_op = &simple_dir_inode_operations;
249 inode->i_fop = &simple_dir_operations;
250
251 inode_lock(inode);
252
253 dget(dentry);
254 inc_nlink(dir);
255 inc_nlink(inode);
256
257 d_instantiate(dentry, inode);
258
259 if (flags & SPU_CREATE_NOSCHED)
260 ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
261 mode, ctx);
262 else
263 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
264
265 if (!ret && spufs_get_sb_info(dir->i_sb)->debug)
266 ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
267 mode, ctx);
268
269 if (ret)
270 spufs_rmdir(dir, dentry);
271
272 inode_unlock(inode);
273
274 return ret;
275 }
276
spufs_context_open(struct path * path)277 static int spufs_context_open(struct path *path)
278 {
279 int ret;
280 struct file *filp;
281
282 ret = get_unused_fd_flags(0);
283 if (ret < 0)
284 return ret;
285
286 filp = dentry_open(path, O_RDONLY, current_cred());
287 if (IS_ERR(filp)) {
288 put_unused_fd(ret);
289 return PTR_ERR(filp);
290 }
291
292 filp->f_op = &spufs_context_fops;
293 fd_install(ret, filp);
294 return ret;
295 }
296
297 static struct spu_context *
spufs_assert_affinity(unsigned int flags,struct spu_gang * gang,struct file * filp)298 spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
299 struct file *filp)
300 {
301 struct spu_context *tmp, *neighbor, *err;
302 int count, node;
303 int aff_supp;
304
305 aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next,
306 struct spu, cbe_list))->aff_list);
307
308 if (!aff_supp)
309 return ERR_PTR(-EINVAL);
310
311 if (flags & SPU_CREATE_GANG)
312 return ERR_PTR(-EINVAL);
313
314 if (flags & SPU_CREATE_AFFINITY_MEM &&
315 gang->aff_ref_ctx &&
316 gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM)
317 return ERR_PTR(-EEXIST);
318
319 if (gang->aff_flags & AFF_MERGED)
320 return ERR_PTR(-EBUSY);
321
322 neighbor = NULL;
323 if (flags & SPU_CREATE_AFFINITY_SPU) {
324 if (!filp || filp->f_op != &spufs_context_fops)
325 return ERR_PTR(-EINVAL);
326
327 neighbor = get_spu_context(
328 SPUFS_I(file_inode(filp))->i_ctx);
329
330 if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
331 !list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
332 !list_entry(neighbor->aff_list.next, struct spu_context,
333 aff_list)->aff_head) {
334 err = ERR_PTR(-EEXIST);
335 goto out_put_neighbor;
336 }
337
338 if (gang != neighbor->gang) {
339 err = ERR_PTR(-EINVAL);
340 goto out_put_neighbor;
341 }
342
343 count = 1;
344 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
345 count++;
346 if (list_empty(&neighbor->aff_list))
347 count++;
348
349 for (node = 0; node < MAX_NUMNODES; node++) {
350 if ((cbe_spu_info[node].n_spus - atomic_read(
351 &cbe_spu_info[node].reserved_spus)) >= count)
352 break;
353 }
354
355 if (node == MAX_NUMNODES) {
356 err = ERR_PTR(-EEXIST);
357 goto out_put_neighbor;
358 }
359 }
360
361 return neighbor;
362
363 out_put_neighbor:
364 put_spu_context(neighbor);
365 return err;
366 }
367
368 static void
spufs_set_affinity(unsigned int flags,struct spu_context * ctx,struct spu_context * neighbor)369 spufs_set_affinity(unsigned int flags, struct spu_context *ctx,
370 struct spu_context *neighbor)
371 {
372 if (flags & SPU_CREATE_AFFINITY_MEM)
373 ctx->gang->aff_ref_ctx = ctx;
374
375 if (flags & SPU_CREATE_AFFINITY_SPU) {
376 if (list_empty(&neighbor->aff_list)) {
377 list_add_tail(&neighbor->aff_list,
378 &ctx->gang->aff_list_head);
379 neighbor->aff_head = 1;
380 }
381
382 if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head)
383 || list_entry(neighbor->aff_list.next, struct spu_context,
384 aff_list)->aff_head) {
385 list_add(&ctx->aff_list, &neighbor->aff_list);
386 } else {
387 list_add_tail(&ctx->aff_list, &neighbor->aff_list);
388 if (neighbor->aff_head) {
389 neighbor->aff_head = 0;
390 ctx->aff_head = 1;
391 }
392 }
393
394 if (!ctx->gang->aff_ref_ctx)
395 ctx->gang->aff_ref_ctx = ctx;
396 }
397 }
398
399 static int
spufs_create_context(struct inode * inode,struct dentry * dentry,struct vfsmount * mnt,int flags,umode_t mode,struct file * aff_filp)400 spufs_create_context(struct inode *inode, struct dentry *dentry,
401 struct vfsmount *mnt, int flags, umode_t mode,
402 struct file *aff_filp)
403 {
404 int ret;
405 int affinity;
406 struct spu_gang *gang;
407 struct spu_context *neighbor;
408 struct path path = {.mnt = mnt, .dentry = dentry};
409
410 if ((flags & SPU_CREATE_NOSCHED) &&
411 !capable(CAP_SYS_NICE))
412 return -EPERM;
413
414 if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
415 == SPU_CREATE_ISOLATE)
416 return -EINVAL;
417
418 if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
419 return -ENODEV;
420
421 gang = NULL;
422 neighbor = NULL;
423 affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
424 if (affinity) {
425 gang = SPUFS_I(inode)->i_gang;
426 if (!gang)
427 return -EINVAL;
428 mutex_lock(&gang->aff_mutex);
429 neighbor = spufs_assert_affinity(flags, gang, aff_filp);
430 if (IS_ERR(neighbor)) {
431 ret = PTR_ERR(neighbor);
432 goto out_aff_unlock;
433 }
434 }
435
436 ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
437 if (ret)
438 goto out_aff_unlock;
439
440 if (affinity) {
441 spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
442 neighbor);
443 if (neighbor)
444 put_spu_context(neighbor);
445 }
446
447 ret = spufs_context_open(&path);
448 if (ret < 0)
449 WARN_ON(spufs_rmdir(inode, dentry));
450
451 out_aff_unlock:
452 if (affinity)
453 mutex_unlock(&gang->aff_mutex);
454 return ret;
455 }
456
457 static int
spufs_mkgang(struct inode * dir,struct dentry * dentry,umode_t mode)458 spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
459 {
460 int ret;
461 struct inode *inode;
462 struct spu_gang *gang;
463
464 ret = -ENOSPC;
465 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
466 if (!inode)
467 goto out;
468
469 ret = 0;
470 inode_init_owner(&init_user_ns, inode, dir, mode | S_IFDIR);
471 gang = alloc_spu_gang();
472 SPUFS_I(inode)->i_ctx = NULL;
473 SPUFS_I(inode)->i_gang = gang;
474 if (!gang) {
475 ret = -ENOMEM;
476 goto out_iput;
477 }
478
479 inode->i_op = &simple_dir_inode_operations;
480 inode->i_fop = &simple_dir_operations;
481
482 d_instantiate(dentry, inode);
483 inc_nlink(dir);
484 inc_nlink(d_inode(dentry));
485 return ret;
486
487 out_iput:
488 iput(inode);
489 out:
490 return ret;
491 }
492
spufs_gang_open(struct path * path)493 static int spufs_gang_open(struct path *path)
494 {
495 int ret;
496 struct file *filp;
497
498 ret = get_unused_fd_flags(0);
499 if (ret < 0)
500 return ret;
501
502 /*
503 * get references for dget and mntget, will be released
504 * in error path of *_open().
505 */
506 filp = dentry_open(path, O_RDONLY, current_cred());
507 if (IS_ERR(filp)) {
508 put_unused_fd(ret);
509 return PTR_ERR(filp);
510 }
511
512 filp->f_op = &simple_dir_operations;
513 fd_install(ret, filp);
514 return ret;
515 }
516
spufs_create_gang(struct inode * inode,struct dentry * dentry,struct vfsmount * mnt,umode_t mode)517 static int spufs_create_gang(struct inode *inode,
518 struct dentry *dentry,
519 struct vfsmount *mnt, umode_t mode)
520 {
521 struct path path = {.mnt = mnt, .dentry = dentry};
522 int ret;
523
524 ret = spufs_mkgang(inode, dentry, mode & 0777);
525 if (!ret) {
526 ret = spufs_gang_open(&path);
527 if (ret < 0) {
528 int err = simple_rmdir(inode, dentry);
529 WARN_ON(err);
530 }
531 }
532 return ret;
533 }
534
535
536 static struct file_system_type spufs_type;
537
spufs_create(struct path * path,struct dentry * dentry,unsigned int flags,umode_t mode,struct file * filp)538 long spufs_create(struct path *path, struct dentry *dentry,
539 unsigned int flags, umode_t mode, struct file *filp)
540 {
541 struct inode *dir = d_inode(path->dentry);
542 int ret;
543
544 /* check if we are on spufs */
545 if (path->dentry->d_sb->s_type != &spufs_type)
546 return -EINVAL;
547
548 /* don't accept undefined flags */
549 if (flags & (~SPU_CREATE_FLAG_ALL))
550 return -EINVAL;
551
552 /* only threads can be underneath a gang */
553 if (path->dentry != path->dentry->d_sb->s_root)
554 if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang)
555 return -EINVAL;
556
557 mode &= ~current_umask();
558
559 if (flags & SPU_CREATE_GANG)
560 ret = spufs_create_gang(dir, dentry, path->mnt, mode);
561 else
562 ret = spufs_create_context(dir, dentry, path->mnt, flags, mode,
563 filp);
564 if (ret >= 0)
565 fsnotify_mkdir(dir, dentry);
566
567 return ret;
568 }
569
570 /* File system initialization */
571 struct spufs_fs_context {
572 kuid_t uid;
573 kgid_t gid;
574 umode_t mode;
575 };
576
577 enum {
578 Opt_uid, Opt_gid, Opt_mode, Opt_debug,
579 };
580
581 static const struct fs_parameter_spec spufs_fs_parameters[] = {
582 fsparam_u32 ("gid", Opt_gid),
583 fsparam_u32oct ("mode", Opt_mode),
584 fsparam_u32 ("uid", Opt_uid),
585 fsparam_flag ("debug", Opt_debug),
586 {}
587 };
588
spufs_show_options(struct seq_file * m,struct dentry * root)589 static int spufs_show_options(struct seq_file *m, struct dentry *root)
590 {
591 struct spufs_sb_info *sbi = spufs_get_sb_info(root->d_sb);
592 struct inode *inode = root->d_inode;
593
594 if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
595 seq_printf(m, ",uid=%u",
596 from_kuid_munged(&init_user_ns, inode->i_uid));
597 if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID))
598 seq_printf(m, ",gid=%u",
599 from_kgid_munged(&init_user_ns, inode->i_gid));
600 if ((inode->i_mode & S_IALLUGO) != 0775)
601 seq_printf(m, ",mode=%o", inode->i_mode);
602 if (sbi->debug)
603 seq_puts(m, ",debug");
604 return 0;
605 }
606
spufs_parse_param(struct fs_context * fc,struct fs_parameter * param)607 static int spufs_parse_param(struct fs_context *fc, struct fs_parameter *param)
608 {
609 struct spufs_fs_context *ctx = fc->fs_private;
610 struct spufs_sb_info *sbi = fc->s_fs_info;
611 struct fs_parse_result result;
612 kuid_t uid;
613 kgid_t gid;
614 int opt;
615
616 opt = fs_parse(fc, spufs_fs_parameters, param, &result);
617 if (opt < 0)
618 return opt;
619
620 switch (opt) {
621 case Opt_uid:
622 uid = make_kuid(current_user_ns(), result.uint_32);
623 if (!uid_valid(uid))
624 return invalf(fc, "Unknown uid");
625 ctx->uid = uid;
626 break;
627 case Opt_gid:
628 gid = make_kgid(current_user_ns(), result.uint_32);
629 if (!gid_valid(gid))
630 return invalf(fc, "Unknown gid");
631 ctx->gid = gid;
632 break;
633 case Opt_mode:
634 ctx->mode = result.uint_32 & S_IALLUGO;
635 break;
636 case Opt_debug:
637 sbi->debug = true;
638 break;
639 }
640
641 return 0;
642 }
643
spufs_exit_isolated_loader(void)644 static void spufs_exit_isolated_loader(void)
645 {
646 free_pages((unsigned long) isolated_loader,
647 get_order(isolated_loader_size));
648 }
649
650 static void
spufs_init_isolated_loader(void)651 spufs_init_isolated_loader(void)
652 {
653 struct device_node *dn;
654 const char *loader;
655 int size;
656
657 dn = of_find_node_by_path("/spu-isolation");
658 if (!dn)
659 return;
660
661 loader = of_get_property(dn, "loader", &size);
662 of_node_put(dn);
663 if (!loader)
664 return;
665
666 /* the loader must be align on a 16 byte boundary */
667 isolated_loader = (char *)__get_free_pages(GFP_KERNEL, get_order(size));
668 if (!isolated_loader)
669 return;
670
671 isolated_loader_size = size;
672 memcpy(isolated_loader, loader, size);
673 printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
674 }
675
spufs_create_root(struct super_block * sb,struct fs_context * fc)676 static int spufs_create_root(struct super_block *sb, struct fs_context *fc)
677 {
678 struct spufs_fs_context *ctx = fc->fs_private;
679 struct inode *inode;
680
681 if (!spu_management_ops)
682 return -ENODEV;
683
684 inode = spufs_new_inode(sb, S_IFDIR | ctx->mode);
685 if (!inode)
686 return -ENOMEM;
687
688 inode->i_uid = ctx->uid;
689 inode->i_gid = ctx->gid;
690 inode->i_op = &simple_dir_inode_operations;
691 inode->i_fop = &simple_dir_operations;
692 SPUFS_I(inode)->i_ctx = NULL;
693 inc_nlink(inode);
694
695 sb->s_root = d_make_root(inode);
696 if (!sb->s_root)
697 return -ENOMEM;
698 return 0;
699 }
700
701 static const struct super_operations spufs_ops = {
702 .alloc_inode = spufs_alloc_inode,
703 .free_inode = spufs_free_inode,
704 .statfs = simple_statfs,
705 .evict_inode = spufs_evict_inode,
706 .show_options = spufs_show_options,
707 };
708
spufs_fill_super(struct super_block * sb,struct fs_context * fc)709 static int spufs_fill_super(struct super_block *sb, struct fs_context *fc)
710 {
711 sb->s_maxbytes = MAX_LFS_FILESIZE;
712 sb->s_blocksize = PAGE_SIZE;
713 sb->s_blocksize_bits = PAGE_SHIFT;
714 sb->s_magic = SPUFS_MAGIC;
715 sb->s_op = &spufs_ops;
716
717 return spufs_create_root(sb, fc);
718 }
719
spufs_get_tree(struct fs_context * fc)720 static int spufs_get_tree(struct fs_context *fc)
721 {
722 return get_tree_single(fc, spufs_fill_super);
723 }
724
spufs_free_fc(struct fs_context * fc)725 static void spufs_free_fc(struct fs_context *fc)
726 {
727 kfree(fc->s_fs_info);
728 }
729
730 static const struct fs_context_operations spufs_context_ops = {
731 .free = spufs_free_fc,
732 .parse_param = spufs_parse_param,
733 .get_tree = spufs_get_tree,
734 };
735
spufs_init_fs_context(struct fs_context * fc)736 static int spufs_init_fs_context(struct fs_context *fc)
737 {
738 struct spufs_fs_context *ctx;
739 struct spufs_sb_info *sbi;
740
741 ctx = kzalloc(sizeof(struct spufs_fs_context), GFP_KERNEL);
742 if (!ctx)
743 goto nomem;
744
745 sbi = kzalloc(sizeof(struct spufs_sb_info), GFP_KERNEL);
746 if (!sbi)
747 goto nomem_ctx;
748
749 ctx->uid = current_uid();
750 ctx->gid = current_gid();
751 ctx->mode = 0755;
752
753 fc->fs_private = ctx;
754 fc->s_fs_info = sbi;
755 fc->ops = &spufs_context_ops;
756 return 0;
757
758 nomem_ctx:
759 kfree(ctx);
760 nomem:
761 return -ENOMEM;
762 }
763
764 static struct file_system_type spufs_type = {
765 .owner = THIS_MODULE,
766 .name = "spufs",
767 .init_fs_context = spufs_init_fs_context,
768 .parameters = spufs_fs_parameters,
769 .kill_sb = kill_litter_super,
770 };
771 MODULE_ALIAS_FS("spufs");
772
spufs_init(void)773 static int __init spufs_init(void)
774 {
775 int ret;
776
777 ret = -ENODEV;
778 if (!spu_management_ops)
779 goto out;
780
781 ret = -ENOMEM;
782 spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
783 sizeof(struct spufs_inode_info), 0,
784 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, spufs_init_once);
785
786 if (!spufs_inode_cache)
787 goto out;
788 ret = spu_sched_init();
789 if (ret)
790 goto out_cache;
791 ret = register_spu_syscalls(&spufs_calls);
792 if (ret)
793 goto out_sched;
794 ret = register_filesystem(&spufs_type);
795 if (ret)
796 goto out_syscalls;
797
798 spufs_init_isolated_loader();
799
800 return 0;
801
802 out_syscalls:
803 unregister_spu_syscalls(&spufs_calls);
804 out_sched:
805 spu_sched_exit();
806 out_cache:
807 kmem_cache_destroy(spufs_inode_cache);
808 out:
809 return ret;
810 }
811 module_init(spufs_init);
812
spufs_exit(void)813 static void __exit spufs_exit(void)
814 {
815 spu_sched_exit();
816 spufs_exit_isolated_loader();
817 unregister_spu_syscalls(&spufs_calls);
818 unregister_filesystem(&spufs_type);
819 kmem_cache_destroy(spufs_inode_cache);
820 }
821 module_exit(spufs_exit);
822
823 MODULE_LICENSE("GPL");
824 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
825
826