1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/ceph/ceph_debug.h>
4
5 #include <linux/backing-dev.h>
6 #include <linux/ctype.h>
7 #include <linux/fs.h>
8 #include <linux/inet.h>
9 #include <linux/in6.h>
10 #include <linux/module.h>
11 #include <linux/mount.h>
12 #include <linux/fs_context.h>
13 #include <linux/fs_parser.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/slab.h>
17 #include <linux/statfs.h>
18 #include <linux/string.h>
19
20 #include "super.h"
21 #include "mds_client.h"
22 #include "cache.h"
23
24 #include <linux/ceph/ceph_features.h>
25 #include <linux/ceph/decode.h>
26 #include <linux/ceph/mon_client.h>
27 #include <linux/ceph/auth.h>
28 #include <linux/ceph/debugfs.h>
29
30 static DEFINE_SPINLOCK(ceph_fsc_lock);
31 static LIST_HEAD(ceph_fsc_list);
32
33 /*
34 * Ceph superblock operations
35 *
36 * Handle the basics of mounting, unmounting.
37 */
38
39 /*
40 * super ops
41 */
ceph_put_super(struct super_block * s)42 static void ceph_put_super(struct super_block *s)
43 {
44 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
45
46 dout("put_super\n");
47 ceph_mdsc_close_sessions(fsc->mdsc);
48 }
49
ceph_statfs(struct dentry * dentry,struct kstatfs * buf)50 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
51 {
52 struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
53 struct ceph_mon_client *monc = &fsc->client->monc;
54 struct ceph_statfs st;
55 int i, err;
56 u64 data_pool;
57
58 if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
59 data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
60 } else {
61 data_pool = CEPH_NOPOOL;
62 }
63
64 dout("statfs\n");
65 err = ceph_monc_do_statfs(monc, data_pool, &st);
66 if (err < 0)
67 return err;
68
69 /* fill in kstatfs */
70 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
71
72 /*
73 * express utilization in terms of large blocks to avoid
74 * overflow on 32-bit machines.
75 *
76 * NOTE: for the time being, we make bsize == frsize to humor
77 * not-yet-ancient versions of glibc that are broken.
78 * Someday, we will probably want to report a real block
79 * size... whatever that may mean for a network file system!
80 */
81 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
82 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
83
84 /*
85 * By default use root quota for stats; fallback to overall filesystem
86 * usage if using 'noquotadf' mount option or if the root dir doesn't
87 * have max_bytes quota set.
88 */
89 if (ceph_test_mount_opt(fsc, NOQUOTADF) ||
90 !ceph_quota_update_statfs(fsc, buf)) {
91 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
92 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
93 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
94 }
95
96 buf->f_files = le64_to_cpu(st.num_objects);
97 buf->f_ffree = -1;
98 buf->f_namelen = NAME_MAX;
99
100 /* Must convert the fsid, for consistent values across arches */
101 buf->f_fsid.val[0] = 0;
102 mutex_lock(&monc->mutex);
103 for (i = 0 ; i < sizeof(monc->monmap->fsid) / sizeof(__le32) ; ++i)
104 buf->f_fsid.val[0] ^= le32_to_cpu(((__le32 *)&monc->monmap->fsid)[i]);
105 mutex_unlock(&monc->mutex);
106
107 /* fold the fs_cluster_id into the upper bits */
108 buf->f_fsid.val[1] = monc->fs_cluster_id;
109
110 return 0;
111 }
112
ceph_sync_fs(struct super_block * sb,int wait)113 static int ceph_sync_fs(struct super_block *sb, int wait)
114 {
115 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
116
117 if (!wait) {
118 dout("sync_fs (non-blocking)\n");
119 ceph_flush_dirty_caps(fsc->mdsc);
120 dout("sync_fs (non-blocking) done\n");
121 return 0;
122 }
123
124 dout("sync_fs (blocking)\n");
125 ceph_osdc_sync(&fsc->client->osdc);
126 ceph_mdsc_sync(fsc->mdsc);
127 dout("sync_fs (blocking) done\n");
128 return 0;
129 }
130
131 /*
132 * mount options
133 */
134 enum {
135 Opt_wsize,
136 Opt_rsize,
137 Opt_rasize,
138 Opt_caps_wanted_delay_min,
139 Opt_caps_wanted_delay_max,
140 Opt_caps_max,
141 Opt_readdir_max_entries,
142 Opt_readdir_max_bytes,
143 Opt_congestion_kb,
144 /* int args above */
145 Opt_snapdirname,
146 Opt_mds_namespace,
147 Opt_recover_session,
148 Opt_source,
149 /* string args above */
150 Opt_dirstat,
151 Opt_rbytes,
152 Opt_asyncreaddir,
153 Opt_dcache,
154 Opt_ino32,
155 Opt_fscache,
156 Opt_poolperm,
157 Opt_require_active_mds,
158 Opt_acl,
159 Opt_quotadf,
160 Opt_copyfrom,
161 Opt_wsync,
162 };
163
164 enum ceph_recover_session_mode {
165 ceph_recover_session_no,
166 ceph_recover_session_clean
167 };
168
169 static const struct constant_table ceph_param_recover[] = {
170 { "no", ceph_recover_session_no },
171 { "clean", ceph_recover_session_clean },
172 {}
173 };
174
175 static const struct fs_parameter_spec ceph_mount_parameters[] = {
176 fsparam_flag_no ("acl", Opt_acl),
177 fsparam_flag_no ("asyncreaddir", Opt_asyncreaddir),
178 fsparam_s32 ("caps_max", Opt_caps_max),
179 fsparam_u32 ("caps_wanted_delay_max", Opt_caps_wanted_delay_max),
180 fsparam_u32 ("caps_wanted_delay_min", Opt_caps_wanted_delay_min),
181 fsparam_u32 ("write_congestion_kb", Opt_congestion_kb),
182 fsparam_flag_no ("copyfrom", Opt_copyfrom),
183 fsparam_flag_no ("dcache", Opt_dcache),
184 fsparam_flag_no ("dirstat", Opt_dirstat),
185 fsparam_flag_no ("fsc", Opt_fscache), // fsc|nofsc
186 fsparam_string ("fsc", Opt_fscache), // fsc=...
187 fsparam_flag_no ("ino32", Opt_ino32),
188 fsparam_string ("mds_namespace", Opt_mds_namespace),
189 fsparam_flag_no ("poolperm", Opt_poolperm),
190 fsparam_flag_no ("quotadf", Opt_quotadf),
191 fsparam_u32 ("rasize", Opt_rasize),
192 fsparam_flag_no ("rbytes", Opt_rbytes),
193 fsparam_u32 ("readdir_max_bytes", Opt_readdir_max_bytes),
194 fsparam_u32 ("readdir_max_entries", Opt_readdir_max_entries),
195 fsparam_enum ("recover_session", Opt_recover_session, ceph_param_recover),
196 fsparam_flag_no ("require_active_mds", Opt_require_active_mds),
197 fsparam_u32 ("rsize", Opt_rsize),
198 fsparam_string ("snapdirname", Opt_snapdirname),
199 fsparam_string ("source", Opt_source),
200 fsparam_u32 ("wsize", Opt_wsize),
201 fsparam_flag_no ("wsync", Opt_wsync),
202 {}
203 };
204
205 struct ceph_parse_opts_ctx {
206 struct ceph_options *copts;
207 struct ceph_mount_options *opts;
208 };
209
210 /*
211 * Remove adjacent slashes and then the trailing slash, unless it is
212 * the only remaining character.
213 *
214 * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
215 */
canonicalize_path(char * path)216 static void canonicalize_path(char *path)
217 {
218 int i, j = 0;
219
220 for (i = 0; path[i] != '\0'; i++) {
221 if (path[i] != '/' || j < 1 || path[j - 1] != '/')
222 path[j++] = path[i];
223 }
224
225 if (j > 1 && path[j - 1] == '/')
226 j--;
227 path[j] = '\0';
228 }
229
230 /*
231 * Parse the source parameter. Distinguish the server list from the path.
232 *
233 * The source will look like:
234 * <server_spec>[,<server_spec>...]:[<path>]
235 * where
236 * <server_spec> is <ip>[:<port>]
237 * <path> is optional, but if present must begin with '/'
238 */
ceph_parse_source(struct fs_parameter * param,struct fs_context * fc)239 static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
240 {
241 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
242 struct ceph_mount_options *fsopt = pctx->opts;
243 char *dev_name = param->string, *dev_name_end;
244 int ret;
245
246 dout("%s '%s'\n", __func__, dev_name);
247 if (!dev_name || !*dev_name)
248 return invalfc(fc, "Empty source");
249
250 dev_name_end = strchr(dev_name, '/');
251 if (dev_name_end) {
252 /*
253 * The server_path will include the whole chars from userland
254 * including the leading '/'.
255 */
256 kfree(fsopt->server_path);
257 fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
258 if (!fsopt->server_path)
259 return -ENOMEM;
260
261 canonicalize_path(fsopt->server_path);
262 } else {
263 dev_name_end = dev_name + strlen(dev_name);
264 }
265
266 dev_name_end--; /* back up to ':' separator */
267 if (dev_name_end < dev_name || *dev_name_end != ':')
268 return invalfc(fc, "No path or : separator in source");
269
270 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
271 if (fsopt->server_path)
272 dout("server path '%s'\n", fsopt->server_path);
273
274 ret = ceph_parse_mon_ips(param->string, dev_name_end - dev_name,
275 pctx->copts, fc->log.log);
276 if (ret)
277 return ret;
278
279 fc->source = param->string;
280 param->string = NULL;
281 return 0;
282 }
283
ceph_parse_mount_param(struct fs_context * fc,struct fs_parameter * param)284 static int ceph_parse_mount_param(struct fs_context *fc,
285 struct fs_parameter *param)
286 {
287 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
288 struct ceph_mount_options *fsopt = pctx->opts;
289 struct fs_parse_result result;
290 unsigned int mode;
291 int token, ret;
292
293 ret = ceph_parse_param(param, pctx->copts, fc->log.log);
294 if (ret != -ENOPARAM)
295 return ret;
296
297 token = fs_parse(fc, ceph_mount_parameters, param, &result);
298 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
299 if (token < 0)
300 return token;
301
302 switch (token) {
303 case Opt_snapdirname:
304 kfree(fsopt->snapdir_name);
305 fsopt->snapdir_name = param->string;
306 param->string = NULL;
307 break;
308 case Opt_mds_namespace:
309 kfree(fsopt->mds_namespace);
310 fsopt->mds_namespace = param->string;
311 param->string = NULL;
312 break;
313 case Opt_recover_session:
314 mode = result.uint_32;
315 if (mode == ceph_recover_session_no)
316 fsopt->flags &= ~CEPH_MOUNT_OPT_CLEANRECOVER;
317 else if (mode == ceph_recover_session_clean)
318 fsopt->flags |= CEPH_MOUNT_OPT_CLEANRECOVER;
319 else
320 BUG();
321 break;
322 case Opt_source:
323 if (fc->source)
324 return invalfc(fc, "Multiple sources specified");
325 return ceph_parse_source(param, fc);
326 case Opt_wsize:
327 if (result.uint_32 < PAGE_SIZE ||
328 result.uint_32 > CEPH_MAX_WRITE_SIZE)
329 goto out_of_range;
330 fsopt->wsize = ALIGN(result.uint_32, PAGE_SIZE);
331 break;
332 case Opt_rsize:
333 if (result.uint_32 < PAGE_SIZE ||
334 result.uint_32 > CEPH_MAX_READ_SIZE)
335 goto out_of_range;
336 fsopt->rsize = ALIGN(result.uint_32, PAGE_SIZE);
337 break;
338 case Opt_rasize:
339 fsopt->rasize = ALIGN(result.uint_32, PAGE_SIZE);
340 break;
341 case Opt_caps_wanted_delay_min:
342 if (result.uint_32 < 1)
343 goto out_of_range;
344 fsopt->caps_wanted_delay_min = result.uint_32;
345 break;
346 case Opt_caps_wanted_delay_max:
347 if (result.uint_32 < 1)
348 goto out_of_range;
349 fsopt->caps_wanted_delay_max = result.uint_32;
350 break;
351 case Opt_caps_max:
352 if (result.int_32 < 0)
353 goto out_of_range;
354 fsopt->caps_max = result.int_32;
355 break;
356 case Opt_readdir_max_entries:
357 if (result.uint_32 < 1)
358 goto out_of_range;
359 fsopt->max_readdir = result.uint_32;
360 break;
361 case Opt_readdir_max_bytes:
362 if (result.uint_32 < PAGE_SIZE && result.uint_32 != 0)
363 goto out_of_range;
364 fsopt->max_readdir_bytes = result.uint_32;
365 break;
366 case Opt_congestion_kb:
367 if (result.uint_32 < 1024) /* at least 1M */
368 goto out_of_range;
369 fsopt->congestion_kb = result.uint_32;
370 break;
371 case Opt_dirstat:
372 if (!result.negated)
373 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
374 else
375 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
376 break;
377 case Opt_rbytes:
378 if (!result.negated)
379 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
380 else
381 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
382 break;
383 case Opt_asyncreaddir:
384 if (!result.negated)
385 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
386 else
387 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
388 break;
389 case Opt_dcache:
390 if (!result.negated)
391 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
392 else
393 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
394 break;
395 case Opt_ino32:
396 if (!result.negated)
397 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
398 else
399 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
400 break;
401
402 case Opt_fscache:
403 #ifdef CONFIG_CEPH_FSCACHE
404 kfree(fsopt->fscache_uniq);
405 fsopt->fscache_uniq = NULL;
406 if (result.negated) {
407 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
408 } else {
409 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
410 fsopt->fscache_uniq = param->string;
411 param->string = NULL;
412 }
413 break;
414 #else
415 return invalfc(fc, "fscache support is disabled");
416 #endif
417 case Opt_poolperm:
418 if (!result.negated)
419 fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
420 else
421 fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
422 break;
423 case Opt_require_active_mds:
424 if (!result.negated)
425 fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
426 else
427 fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
428 break;
429 case Opt_quotadf:
430 if (!result.negated)
431 fsopt->flags &= ~CEPH_MOUNT_OPT_NOQUOTADF;
432 else
433 fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
434 break;
435 case Opt_copyfrom:
436 if (!result.negated)
437 fsopt->flags &= ~CEPH_MOUNT_OPT_NOCOPYFROM;
438 else
439 fsopt->flags |= CEPH_MOUNT_OPT_NOCOPYFROM;
440 break;
441 case Opt_acl:
442 if (!result.negated) {
443 #ifdef CONFIG_CEPH_FS_POSIX_ACL
444 fc->sb_flags |= SB_POSIXACL;
445 #else
446 return invalfc(fc, "POSIX ACL support is disabled");
447 #endif
448 } else {
449 fc->sb_flags &= ~SB_POSIXACL;
450 }
451 break;
452 case Opt_wsync:
453 if (!result.negated)
454 fsopt->flags &= ~CEPH_MOUNT_OPT_ASYNC_DIROPS;
455 else
456 fsopt->flags |= CEPH_MOUNT_OPT_ASYNC_DIROPS;
457 break;
458 default:
459 BUG();
460 }
461 return 0;
462
463 out_of_range:
464 return invalfc(fc, "%s out of range", param->key);
465 }
466
destroy_mount_options(struct ceph_mount_options * args)467 static void destroy_mount_options(struct ceph_mount_options *args)
468 {
469 dout("destroy_mount_options %p\n", args);
470 if (!args)
471 return;
472
473 kfree(args->snapdir_name);
474 kfree(args->mds_namespace);
475 kfree(args->server_path);
476 kfree(args->fscache_uniq);
477 kfree(args);
478 }
479
strcmp_null(const char * s1,const char * s2)480 static int strcmp_null(const char *s1, const char *s2)
481 {
482 if (!s1 && !s2)
483 return 0;
484 if (s1 && !s2)
485 return -1;
486 if (!s1 && s2)
487 return 1;
488 return strcmp(s1, s2);
489 }
490
compare_mount_options(struct ceph_mount_options * new_fsopt,struct ceph_options * new_opt,struct ceph_fs_client * fsc)491 static int compare_mount_options(struct ceph_mount_options *new_fsopt,
492 struct ceph_options *new_opt,
493 struct ceph_fs_client *fsc)
494 {
495 struct ceph_mount_options *fsopt1 = new_fsopt;
496 struct ceph_mount_options *fsopt2 = fsc->mount_options;
497 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
498 int ret;
499
500 ret = memcmp(fsopt1, fsopt2, ofs);
501 if (ret)
502 return ret;
503
504 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
505 if (ret)
506 return ret;
507
508 ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
509 if (ret)
510 return ret;
511
512 ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
513 if (ret)
514 return ret;
515
516 ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
517 if (ret)
518 return ret;
519
520 return ceph_compare_options(new_opt, fsc->client);
521 }
522
523 /**
524 * ceph_show_options - Show mount options in /proc/mounts
525 * @m: seq_file to write to
526 * @root: root of that (sub)tree
527 */
ceph_show_options(struct seq_file * m,struct dentry * root)528 static int ceph_show_options(struct seq_file *m, struct dentry *root)
529 {
530 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
531 struct ceph_mount_options *fsopt = fsc->mount_options;
532 size_t pos;
533 int ret;
534
535 /* a comma between MNT/MS and client options */
536 seq_putc(m, ',');
537 pos = m->count;
538
539 ret = ceph_print_client_options(m, fsc->client, false);
540 if (ret)
541 return ret;
542
543 /* retract our comma if no client options */
544 if (m->count == pos)
545 m->count--;
546
547 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
548 seq_puts(m, ",dirstat");
549 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
550 seq_puts(m, ",rbytes");
551 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
552 seq_puts(m, ",noasyncreaddir");
553 if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
554 seq_puts(m, ",nodcache");
555 if (fsopt->flags & CEPH_MOUNT_OPT_INO32)
556 seq_puts(m, ",ino32");
557 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) {
558 seq_show_option(m, "fsc", fsopt->fscache_uniq);
559 }
560 if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
561 seq_puts(m, ",nopoolperm");
562 if (fsopt->flags & CEPH_MOUNT_OPT_NOQUOTADF)
563 seq_puts(m, ",noquotadf");
564
565 #ifdef CONFIG_CEPH_FS_POSIX_ACL
566 if (root->d_sb->s_flags & SB_POSIXACL)
567 seq_puts(m, ",acl");
568 else
569 seq_puts(m, ",noacl");
570 #endif
571
572 if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0)
573 seq_puts(m, ",copyfrom");
574
575 if (fsopt->mds_namespace)
576 seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
577
578 if (fsopt->flags & CEPH_MOUNT_OPT_CLEANRECOVER)
579 seq_show_option(m, "recover_session", "clean");
580
581 if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
582 seq_puts(m, ",nowsync");
583
584 if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
585 seq_printf(m, ",wsize=%u", fsopt->wsize);
586 if (fsopt->rsize != CEPH_MAX_READ_SIZE)
587 seq_printf(m, ",rsize=%u", fsopt->rsize);
588 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
589 seq_printf(m, ",rasize=%u", fsopt->rasize);
590 if (fsopt->congestion_kb != default_congestion_kb())
591 seq_printf(m, ",write_congestion_kb=%u", fsopt->congestion_kb);
592 if (fsopt->caps_max)
593 seq_printf(m, ",caps_max=%d", fsopt->caps_max);
594 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
595 seq_printf(m, ",caps_wanted_delay_min=%u",
596 fsopt->caps_wanted_delay_min);
597 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
598 seq_printf(m, ",caps_wanted_delay_max=%u",
599 fsopt->caps_wanted_delay_max);
600 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
601 seq_printf(m, ",readdir_max_entries=%u", fsopt->max_readdir);
602 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
603 seq_printf(m, ",readdir_max_bytes=%u", fsopt->max_readdir_bytes);
604 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
605 seq_show_option(m, "snapdirname", fsopt->snapdir_name);
606
607 return 0;
608 }
609
610 /*
611 * handle any mon messages the standard library doesn't understand.
612 * return error if we don't either.
613 */
extra_mon_dispatch(struct ceph_client * client,struct ceph_msg * msg)614 static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
615 {
616 struct ceph_fs_client *fsc = client->private;
617 int type = le16_to_cpu(msg->hdr.type);
618
619 switch (type) {
620 case CEPH_MSG_MDS_MAP:
621 ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
622 return 0;
623 case CEPH_MSG_FS_MAP_USER:
624 ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
625 return 0;
626 default:
627 return -1;
628 }
629 }
630
631 /*
632 * create a new fs client
633 *
634 * Success or not, this function consumes @fsopt and @opt.
635 */
create_fs_client(struct ceph_mount_options * fsopt,struct ceph_options * opt)636 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
637 struct ceph_options *opt)
638 {
639 struct ceph_fs_client *fsc;
640 int err;
641
642 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
643 if (!fsc) {
644 err = -ENOMEM;
645 goto fail;
646 }
647
648 fsc->client = ceph_create_client(opt, fsc);
649 if (IS_ERR(fsc->client)) {
650 err = PTR_ERR(fsc->client);
651 goto fail;
652 }
653 opt = NULL; /* fsc->client now owns this */
654
655 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
656 ceph_set_opt(fsc->client, ABORT_ON_FULL);
657
658 if (!fsopt->mds_namespace) {
659 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
660 0, true);
661 } else {
662 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
663 0, false);
664 }
665
666 fsc->mount_options = fsopt;
667
668 fsc->sb = NULL;
669 fsc->mount_state = CEPH_MOUNT_MOUNTING;
670 fsc->filp_gen = 1;
671 fsc->have_copy_from2 = true;
672
673 atomic_long_set(&fsc->writeback_count, 0);
674
675 err = -ENOMEM;
676 /*
677 * The number of concurrent works can be high but they don't need
678 * to be processed in parallel, limit concurrency.
679 */
680 fsc->inode_wq = alloc_workqueue("ceph-inode", WQ_UNBOUND, 0);
681 if (!fsc->inode_wq)
682 goto fail_client;
683 fsc->cap_wq = alloc_workqueue("ceph-cap", 0, 1);
684 if (!fsc->cap_wq)
685 goto fail_inode_wq;
686
687 spin_lock(&ceph_fsc_lock);
688 list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list);
689 spin_unlock(&ceph_fsc_lock);
690
691 return fsc;
692
693 fail_inode_wq:
694 destroy_workqueue(fsc->inode_wq);
695 fail_client:
696 ceph_destroy_client(fsc->client);
697 fail:
698 kfree(fsc);
699 if (opt)
700 ceph_destroy_options(opt);
701 destroy_mount_options(fsopt);
702 return ERR_PTR(err);
703 }
704
flush_fs_workqueues(struct ceph_fs_client * fsc)705 static void flush_fs_workqueues(struct ceph_fs_client *fsc)
706 {
707 flush_workqueue(fsc->inode_wq);
708 flush_workqueue(fsc->cap_wq);
709 }
710
destroy_fs_client(struct ceph_fs_client * fsc)711 static void destroy_fs_client(struct ceph_fs_client *fsc)
712 {
713 dout("destroy_fs_client %p\n", fsc);
714
715 spin_lock(&ceph_fsc_lock);
716 list_del(&fsc->metric_wakeup);
717 spin_unlock(&ceph_fsc_lock);
718
719 ceph_mdsc_destroy(fsc);
720 destroy_workqueue(fsc->inode_wq);
721 destroy_workqueue(fsc->cap_wq);
722
723 destroy_mount_options(fsc->mount_options);
724
725 ceph_destroy_client(fsc->client);
726
727 kfree(fsc);
728 dout("destroy_fs_client %p done\n", fsc);
729 }
730
731 /*
732 * caches
733 */
734 struct kmem_cache *ceph_inode_cachep;
735 struct kmem_cache *ceph_cap_cachep;
736 struct kmem_cache *ceph_cap_flush_cachep;
737 struct kmem_cache *ceph_dentry_cachep;
738 struct kmem_cache *ceph_file_cachep;
739 struct kmem_cache *ceph_dir_file_cachep;
740 struct kmem_cache *ceph_mds_request_cachep;
741 mempool_t *ceph_wb_pagevec_pool;
742
ceph_inode_init_once(void * foo)743 static void ceph_inode_init_once(void *foo)
744 {
745 struct ceph_inode_info *ci = foo;
746 inode_init_once(&ci->vfs_inode);
747 }
748
init_caches(void)749 static int __init init_caches(void)
750 {
751 int error = -ENOMEM;
752
753 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
754 sizeof(struct ceph_inode_info),
755 __alignof__(struct ceph_inode_info),
756 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
757 SLAB_ACCOUNT, ceph_inode_init_once);
758 if (!ceph_inode_cachep)
759 return -ENOMEM;
760
761 ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD);
762 if (!ceph_cap_cachep)
763 goto bad_cap;
764 ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
765 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
766 if (!ceph_cap_flush_cachep)
767 goto bad_cap_flush;
768
769 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
770 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
771 if (!ceph_dentry_cachep)
772 goto bad_dentry;
773
774 ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
775 if (!ceph_file_cachep)
776 goto bad_file;
777
778 ceph_dir_file_cachep = KMEM_CACHE(ceph_dir_file_info, SLAB_MEM_SPREAD);
779 if (!ceph_dir_file_cachep)
780 goto bad_dir_file;
781
782 ceph_mds_request_cachep = KMEM_CACHE(ceph_mds_request, SLAB_MEM_SPREAD);
783 if (!ceph_mds_request_cachep)
784 goto bad_mds_req;
785
786 ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
787 if (!ceph_wb_pagevec_pool)
788 goto bad_pagevec_pool;
789
790 error = ceph_fscache_register();
791 if (error)
792 goto bad_fscache;
793
794 return 0;
795
796 bad_fscache:
797 kmem_cache_destroy(ceph_mds_request_cachep);
798 bad_pagevec_pool:
799 mempool_destroy(ceph_wb_pagevec_pool);
800 bad_mds_req:
801 kmem_cache_destroy(ceph_dir_file_cachep);
802 bad_dir_file:
803 kmem_cache_destroy(ceph_file_cachep);
804 bad_file:
805 kmem_cache_destroy(ceph_dentry_cachep);
806 bad_dentry:
807 kmem_cache_destroy(ceph_cap_flush_cachep);
808 bad_cap_flush:
809 kmem_cache_destroy(ceph_cap_cachep);
810 bad_cap:
811 kmem_cache_destroy(ceph_inode_cachep);
812 return error;
813 }
814
destroy_caches(void)815 static void destroy_caches(void)
816 {
817 /*
818 * Make sure all delayed rcu free inodes are flushed before we
819 * destroy cache.
820 */
821 rcu_barrier();
822
823 kmem_cache_destroy(ceph_inode_cachep);
824 kmem_cache_destroy(ceph_cap_cachep);
825 kmem_cache_destroy(ceph_cap_flush_cachep);
826 kmem_cache_destroy(ceph_dentry_cachep);
827 kmem_cache_destroy(ceph_file_cachep);
828 kmem_cache_destroy(ceph_dir_file_cachep);
829 kmem_cache_destroy(ceph_mds_request_cachep);
830 mempool_destroy(ceph_wb_pagevec_pool);
831
832 ceph_fscache_unregister();
833 }
834
__ceph_umount_begin(struct ceph_fs_client * fsc)835 static void __ceph_umount_begin(struct ceph_fs_client *fsc)
836 {
837 ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
838 ceph_mdsc_force_umount(fsc->mdsc);
839 fsc->filp_gen++; // invalidate open files
840 }
841
842 /*
843 * ceph_umount_begin - initiate forced umount. Tear down the
844 * mount, skipping steps that may hang while waiting for server(s).
845 */
ceph_umount_begin(struct super_block * sb)846 static void ceph_umount_begin(struct super_block *sb)
847 {
848 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
849
850 dout("ceph_umount_begin - starting forced umount\n");
851 if (!fsc)
852 return;
853 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
854 __ceph_umount_begin(fsc);
855 }
856
857 static const struct super_operations ceph_super_ops = {
858 .alloc_inode = ceph_alloc_inode,
859 .free_inode = ceph_free_inode,
860 .write_inode = ceph_write_inode,
861 .drop_inode = generic_delete_inode,
862 .evict_inode = ceph_evict_inode,
863 .sync_fs = ceph_sync_fs,
864 .put_super = ceph_put_super,
865 .show_options = ceph_show_options,
866 .statfs = ceph_statfs,
867 .umount_begin = ceph_umount_begin,
868 };
869
870 /*
871 * Bootstrap mount by opening the root directory. Note the mount
872 * @started time from caller, and time out if this takes too long.
873 */
open_root_dentry(struct ceph_fs_client * fsc,const char * path,unsigned long started)874 static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
875 const char *path,
876 unsigned long started)
877 {
878 struct ceph_mds_client *mdsc = fsc->mdsc;
879 struct ceph_mds_request *req = NULL;
880 int err;
881 struct dentry *root;
882
883 /* open dir */
884 dout("open_root_inode opening '%s'\n", path);
885 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
886 if (IS_ERR(req))
887 return ERR_CAST(req);
888 req->r_path1 = kstrdup(path, GFP_NOFS);
889 if (!req->r_path1) {
890 root = ERR_PTR(-ENOMEM);
891 goto out;
892 }
893
894 req->r_ino1.ino = CEPH_INO_ROOT;
895 req->r_ino1.snap = CEPH_NOSNAP;
896 req->r_started = started;
897 req->r_timeout = fsc->client->options->mount_timeout;
898 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
899 req->r_num_caps = 2;
900 err = ceph_mdsc_do_request(mdsc, NULL, req);
901 if (err == 0) {
902 struct inode *inode = req->r_target_inode;
903 req->r_target_inode = NULL;
904 dout("open_root_inode success\n");
905 root = d_make_root(inode);
906 if (!root) {
907 root = ERR_PTR(-ENOMEM);
908 goto out;
909 }
910 dout("open_root_inode success, root dentry is %p\n", root);
911 } else {
912 root = ERR_PTR(err);
913 }
914 out:
915 ceph_mdsc_put_request(req);
916 return root;
917 }
918
919 /*
920 * mount: join the ceph cluster, and open root directory.
921 */
ceph_real_mount(struct ceph_fs_client * fsc,struct fs_context * fc)922 static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
923 struct fs_context *fc)
924 {
925 int err;
926 unsigned long started = jiffies; /* note the start time */
927 struct dentry *root;
928
929 dout("mount start %p\n", fsc);
930 mutex_lock(&fsc->client->mount_mutex);
931
932 if (!fsc->sb->s_root) {
933 const char *path = fsc->mount_options->server_path ?
934 fsc->mount_options->server_path + 1 : "";
935
936 err = __ceph_open_session(fsc->client, started);
937 if (err < 0)
938 goto out;
939
940 /* setup fscache */
941 if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
942 err = ceph_fscache_register_fs(fsc, fc);
943 if (err < 0)
944 goto out;
945 }
946
947 dout("mount opening path '%s'\n", path);
948
949 ceph_fs_debugfs_init(fsc);
950
951 root = open_root_dentry(fsc, path, started);
952 if (IS_ERR(root)) {
953 err = PTR_ERR(root);
954 goto out;
955 }
956 fsc->sb->s_root = dget(root);
957 } else {
958 root = dget(fsc->sb->s_root);
959 }
960
961 fsc->mount_state = CEPH_MOUNT_MOUNTED;
962 dout("mount success\n");
963 mutex_unlock(&fsc->client->mount_mutex);
964 return root;
965
966 out:
967 mutex_unlock(&fsc->client->mount_mutex);
968 return ERR_PTR(err);
969 }
970
ceph_set_super(struct super_block * s,struct fs_context * fc)971 static int ceph_set_super(struct super_block *s, struct fs_context *fc)
972 {
973 struct ceph_fs_client *fsc = s->s_fs_info;
974 int ret;
975
976 dout("set_super %p\n", s);
977
978 s->s_maxbytes = MAX_LFS_FILESIZE;
979
980 s->s_xattr = ceph_xattr_handlers;
981 fsc->sb = s;
982 fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */
983
984 s->s_op = &ceph_super_ops;
985 s->s_d_op = &ceph_dentry_ops;
986 s->s_export_op = &ceph_export_ops;
987
988 s->s_time_gran = 1;
989 s->s_time_min = 0;
990 s->s_time_max = U32_MAX;
991
992 ret = set_anon_super_fc(s, fc);
993 if (ret != 0)
994 fsc->sb = NULL;
995 return ret;
996 }
997
998 /*
999 * share superblock if same fs AND options
1000 */
ceph_compare_super(struct super_block * sb,struct fs_context * fc)1001 static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
1002 {
1003 struct ceph_fs_client *new = fc->s_fs_info;
1004 struct ceph_mount_options *fsopt = new->mount_options;
1005 struct ceph_options *opt = new->client->options;
1006 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1007
1008 dout("ceph_compare_super %p\n", sb);
1009
1010 if (compare_mount_options(fsopt, opt, fsc)) {
1011 dout("monitor(s)/mount options don't match\n");
1012 return 0;
1013 }
1014 if ((opt->flags & CEPH_OPT_FSID) &&
1015 ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
1016 dout("fsid doesn't match\n");
1017 return 0;
1018 }
1019 if (fc->sb_flags != (sb->s_flags & ~SB_BORN)) {
1020 dout("flags differ\n");
1021 return 0;
1022 }
1023
1024 if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
1025 dout("client is blocklisted (and CLEANRECOVER is not set)\n");
1026 return 0;
1027 }
1028
1029 if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
1030 dout("client has been forcibly unmounted\n");
1031 return 0;
1032 }
1033
1034 return 1;
1035 }
1036
1037 /*
1038 * construct our own bdi so we can control readahead, etc.
1039 */
1040 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1041
ceph_setup_bdi(struct super_block * sb,struct ceph_fs_client * fsc)1042 static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
1043 {
1044 int err;
1045
1046 err = super_setup_bdi_name(sb, "ceph-%ld",
1047 atomic_long_inc_return(&bdi_seq));
1048 if (err)
1049 return err;
1050
1051 /* set ra_pages based on rasize mount option? */
1052 sb->s_bdi->ra_pages = fsc->mount_options->rasize >> PAGE_SHIFT;
1053
1054 /* set io_pages based on max osd read size */
1055 sb->s_bdi->io_pages = fsc->mount_options->rsize >> PAGE_SHIFT;
1056
1057 return 0;
1058 }
1059
ceph_get_tree(struct fs_context * fc)1060 static int ceph_get_tree(struct fs_context *fc)
1061 {
1062 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1063 struct super_block *sb;
1064 struct ceph_fs_client *fsc;
1065 struct dentry *res;
1066 int (*compare_super)(struct super_block *, struct fs_context *) =
1067 ceph_compare_super;
1068 int err;
1069
1070 dout("ceph_get_tree\n");
1071
1072 if (!fc->source)
1073 return invalfc(fc, "No source");
1074
1075 /* create client (which we may/may not use) */
1076 fsc = create_fs_client(pctx->opts, pctx->copts);
1077 pctx->opts = NULL;
1078 pctx->copts = NULL;
1079 if (IS_ERR(fsc)) {
1080 err = PTR_ERR(fsc);
1081 goto out_final;
1082 }
1083
1084 err = ceph_mdsc_init(fsc);
1085 if (err < 0)
1086 goto out;
1087
1088 if (ceph_test_opt(fsc->client, NOSHARE))
1089 compare_super = NULL;
1090
1091 fc->s_fs_info = fsc;
1092 sb = sget_fc(fc, compare_super, ceph_set_super);
1093 fc->s_fs_info = NULL;
1094 if (IS_ERR(sb)) {
1095 err = PTR_ERR(sb);
1096 goto out;
1097 }
1098
1099 if (ceph_sb_to_client(sb) != fsc) {
1100 destroy_fs_client(fsc);
1101 fsc = ceph_sb_to_client(sb);
1102 dout("get_sb got existing client %p\n", fsc);
1103 } else {
1104 dout("get_sb using new client %p\n", fsc);
1105 err = ceph_setup_bdi(sb, fsc);
1106 if (err < 0)
1107 goto out_splat;
1108 }
1109
1110 res = ceph_real_mount(fsc, fc);
1111 if (IS_ERR(res)) {
1112 err = PTR_ERR(res);
1113 goto out_splat;
1114 }
1115 dout("root %p inode %p ino %llx.%llx\n", res,
1116 d_inode(res), ceph_vinop(d_inode(res)));
1117 fc->root = fsc->sb->s_root;
1118 return 0;
1119
1120 out_splat:
1121 if (!ceph_mdsmap_is_cluster_available(fsc->mdsc->mdsmap)) {
1122 pr_info("No mds server is up or the cluster is laggy\n");
1123 err = -EHOSTUNREACH;
1124 }
1125
1126 ceph_mdsc_close_sessions(fsc->mdsc);
1127 deactivate_locked_super(sb);
1128 goto out_final;
1129
1130 out:
1131 destroy_fs_client(fsc);
1132 out_final:
1133 dout("ceph_get_tree fail %d\n", err);
1134 return err;
1135 }
1136
ceph_free_fc(struct fs_context * fc)1137 static void ceph_free_fc(struct fs_context *fc)
1138 {
1139 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1140
1141 if (pctx) {
1142 destroy_mount_options(pctx->opts);
1143 ceph_destroy_options(pctx->copts);
1144 kfree(pctx);
1145 }
1146 }
1147
ceph_reconfigure_fc(struct fs_context * fc)1148 static int ceph_reconfigure_fc(struct fs_context *fc)
1149 {
1150 struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1151 struct ceph_mount_options *fsopt = pctx->opts;
1152 struct ceph_fs_client *fsc = ceph_sb_to_client(fc->root->d_sb);
1153
1154 if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
1155 ceph_set_mount_opt(fsc, ASYNC_DIROPS);
1156 else
1157 ceph_clear_mount_opt(fsc, ASYNC_DIROPS);
1158
1159 sync_filesystem(fc->root->d_sb);
1160 return 0;
1161 }
1162
1163 static const struct fs_context_operations ceph_context_ops = {
1164 .free = ceph_free_fc,
1165 .parse_param = ceph_parse_mount_param,
1166 .get_tree = ceph_get_tree,
1167 .reconfigure = ceph_reconfigure_fc,
1168 };
1169
1170 /*
1171 * Set up the filesystem mount context.
1172 */
ceph_init_fs_context(struct fs_context * fc)1173 static int ceph_init_fs_context(struct fs_context *fc)
1174 {
1175 struct ceph_parse_opts_ctx *pctx;
1176 struct ceph_mount_options *fsopt;
1177
1178 pctx = kzalloc(sizeof(*pctx), GFP_KERNEL);
1179 if (!pctx)
1180 return -ENOMEM;
1181
1182 pctx->copts = ceph_alloc_options();
1183 if (!pctx->copts)
1184 goto nomem;
1185
1186 pctx->opts = kzalloc(sizeof(*pctx->opts), GFP_KERNEL);
1187 if (!pctx->opts)
1188 goto nomem;
1189
1190 fsopt = pctx->opts;
1191 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
1192
1193 fsopt->wsize = CEPH_MAX_WRITE_SIZE;
1194 fsopt->rsize = CEPH_MAX_READ_SIZE;
1195 fsopt->rasize = CEPH_RASIZE_DEFAULT;
1196 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
1197 if (!fsopt->snapdir_name)
1198 goto nomem;
1199
1200 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
1201 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
1202 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
1203 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
1204 fsopt->congestion_kb = default_congestion_kb();
1205
1206 #ifdef CONFIG_CEPH_FS_POSIX_ACL
1207 fc->sb_flags |= SB_POSIXACL;
1208 #endif
1209
1210 fc->fs_private = pctx;
1211 fc->ops = &ceph_context_ops;
1212 return 0;
1213
1214 nomem:
1215 destroy_mount_options(pctx->opts);
1216 ceph_destroy_options(pctx->copts);
1217 kfree(pctx);
1218 return -ENOMEM;
1219 }
1220
ceph_kill_sb(struct super_block * s)1221 static void ceph_kill_sb(struct super_block *s)
1222 {
1223 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1224
1225 dout("kill_sb %p\n", s);
1226
1227 ceph_mdsc_pre_umount(fsc->mdsc);
1228 flush_fs_workqueues(fsc);
1229
1230 /*
1231 * Though the kill_anon_super() will finally trigger the
1232 * sync_filesystem() anyway, we still need to do it here
1233 * and then bump the stage of shutdown to stop the work
1234 * queue as earlier as possible.
1235 */
1236 sync_filesystem(s);
1237
1238 fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
1239
1240 kill_anon_super(s);
1241
1242 fsc->client->extra_mon_dispatch = NULL;
1243 ceph_fs_debugfs_cleanup(fsc);
1244
1245 ceph_fscache_unregister_fs(fsc);
1246
1247 destroy_fs_client(fsc);
1248 }
1249
1250 static struct file_system_type ceph_fs_type = {
1251 .owner = THIS_MODULE,
1252 .name = "ceph",
1253 .init_fs_context = ceph_init_fs_context,
1254 .kill_sb = ceph_kill_sb,
1255 .fs_flags = FS_RENAME_DOES_D_MOVE,
1256 };
1257 MODULE_ALIAS_FS("ceph");
1258
ceph_force_reconnect(struct super_block * sb)1259 int ceph_force_reconnect(struct super_block *sb)
1260 {
1261 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1262 int err = 0;
1263
1264 fsc->mount_state = CEPH_MOUNT_RECOVER;
1265 __ceph_umount_begin(fsc);
1266
1267 /* Make sure all page caches get invalidated.
1268 * see remove_session_caps_cb() */
1269 flush_workqueue(fsc->inode_wq);
1270
1271 /* In case that we were blocklisted. This also reset
1272 * all mon/osd connections */
1273 ceph_reset_client_addr(fsc->client);
1274
1275 ceph_osdc_clear_abort_err(&fsc->client->osdc);
1276
1277 fsc->blocklisted = false;
1278 fsc->mount_state = CEPH_MOUNT_MOUNTED;
1279
1280 if (sb->s_root) {
1281 err = __ceph_do_getattr(d_inode(sb->s_root), NULL,
1282 CEPH_STAT_CAP_INODE, true);
1283 }
1284 return err;
1285 }
1286
init_ceph(void)1287 static int __init init_ceph(void)
1288 {
1289 int ret = init_caches();
1290 if (ret)
1291 goto out;
1292
1293 ceph_flock_init();
1294 ret = register_filesystem(&ceph_fs_type);
1295 if (ret)
1296 goto out_caches;
1297
1298 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1299
1300 return 0;
1301
1302 out_caches:
1303 destroy_caches();
1304 out:
1305 return ret;
1306 }
1307
exit_ceph(void)1308 static void __exit exit_ceph(void)
1309 {
1310 dout("exit_ceph\n");
1311 unregister_filesystem(&ceph_fs_type);
1312 destroy_caches();
1313 }
1314
param_set_metrics(const char * val,const struct kernel_param * kp)1315 static int param_set_metrics(const char *val, const struct kernel_param *kp)
1316 {
1317 struct ceph_fs_client *fsc;
1318 int ret;
1319
1320 ret = param_set_bool(val, kp);
1321 if (ret) {
1322 pr_err("Failed to parse sending metrics switch value '%s'\n",
1323 val);
1324 return ret;
1325 } else if (!disable_send_metrics) {
1326 // wake up all the mds clients
1327 spin_lock(&ceph_fsc_lock);
1328 list_for_each_entry(fsc, &ceph_fsc_list, metric_wakeup) {
1329 metric_schedule_delayed(&fsc->mdsc->metric);
1330 }
1331 spin_unlock(&ceph_fsc_lock);
1332 }
1333
1334 return 0;
1335 }
1336
1337 static const struct kernel_param_ops param_ops_metrics = {
1338 .set = param_set_metrics,
1339 .get = param_get_bool,
1340 };
1341
1342 bool disable_send_metrics = false;
1343 module_param_cb(disable_send_metrics, ¶m_ops_metrics, &disable_send_metrics, 0644);
1344 MODULE_PARM_DESC(disable_send_metrics, "Enable sending perf metrics to ceph cluster (default: on)");
1345
1346 module_init(init_ceph);
1347 module_exit(exit_ceph);
1348
1349 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1350 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1351 MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1352 MODULE_DESCRIPTION("Ceph filesystem for Linux");
1353 MODULE_LICENSE("GPL");
1354 MODULE_IMPORT_NS(ANDROID_GKI_VFS_EXPORT_ONLY);
1355