1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/fs.h>
5 #include <linux/wait.h>
6 #include <linux/slab.h>
7 #include <linux/gfp.h>
8 #include <linux/sched.h>
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
11 #include <linux/ratelimit.h>
12 #include <linux/bits.h>
13 #include <linux/ktime.h>
14 #include <linux/bitmap.h>
15
16 #include "super.h"
17 #include "mds_client.h"
18
19 #include <linux/ceph/ceph_features.h>
20 #include <linux/ceph/messenger.h>
21 #include <linux/ceph/decode.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/auth.h>
24 #include <linux/ceph/debugfs.h>
25
26 #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
27
28 /*
29 * A cluster of MDS (metadata server) daemons is responsible for
30 * managing the file system namespace (the directory hierarchy and
31 * inodes) and for coordinating shared access to storage. Metadata is
32 * partitioning hierarchically across a number of servers, and that
33 * partition varies over time as the cluster adjusts the distribution
34 * in order to balance load.
35 *
36 * The MDS client is primarily responsible to managing synchronous
37 * metadata requests for operations like open, unlink, and so forth.
38 * If there is a MDS failure, we find out about it when we (possibly
39 * request and) receive a new MDS map, and can resubmit affected
40 * requests.
41 *
42 * For the most part, though, we take advantage of a lossless
43 * communications channel to the MDS, and do not need to worry about
44 * timing out or resubmitting requests.
45 *
46 * We maintain a stateful "session" with each MDS we interact with.
47 * Within each session, we sent periodic heartbeat messages to ensure
48 * any capabilities or leases we have been issues remain valid. If
49 * the session times out and goes stale, our leases and capabilities
50 * are no longer valid.
51 */
52
53 struct ceph_reconnect_state {
54 struct ceph_mds_session *session;
55 int nr_caps, nr_realms;
56 struct ceph_pagelist *pagelist;
57 unsigned msg_version;
58 bool allow_multi;
59 };
60
61 static void __wake_requests(struct ceph_mds_client *mdsc,
62 struct list_head *head);
63 static void ceph_cap_release_work(struct work_struct *work);
64 static void ceph_cap_reclaim_work(struct work_struct *work);
65
66 static const struct ceph_connection_operations mds_con_ops;
67
68
69 /*
70 * mds reply parsing
71 */
72
parse_reply_info_quota(void ** p,void * end,struct ceph_mds_reply_info_in * info)73 static int parse_reply_info_quota(void **p, void *end,
74 struct ceph_mds_reply_info_in *info)
75 {
76 u8 struct_v, struct_compat;
77 u32 struct_len;
78
79 ceph_decode_8_safe(p, end, struct_v, bad);
80 ceph_decode_8_safe(p, end, struct_compat, bad);
81 /* struct_v is expected to be >= 1. we only
82 * understand encoding with struct_compat == 1. */
83 if (!struct_v || struct_compat != 1)
84 goto bad;
85 ceph_decode_32_safe(p, end, struct_len, bad);
86 ceph_decode_need(p, end, struct_len, bad);
87 end = *p + struct_len;
88 ceph_decode_64_safe(p, end, info->max_bytes, bad);
89 ceph_decode_64_safe(p, end, info->max_files, bad);
90 *p = end;
91 return 0;
92 bad:
93 return -EIO;
94 }
95
96 /*
97 * parse individual inode info
98 */
parse_reply_info_in(void ** p,void * end,struct ceph_mds_reply_info_in * info,u64 features)99 static int parse_reply_info_in(void **p, void *end,
100 struct ceph_mds_reply_info_in *info,
101 u64 features)
102 {
103 int err = 0;
104 u8 struct_v = 0;
105
106 if (features == (u64)-1) {
107 u32 struct_len;
108 u8 struct_compat;
109 ceph_decode_8_safe(p, end, struct_v, bad);
110 ceph_decode_8_safe(p, end, struct_compat, bad);
111 /* struct_v is expected to be >= 1. we only understand
112 * encoding with struct_compat == 1. */
113 if (!struct_v || struct_compat != 1)
114 goto bad;
115 ceph_decode_32_safe(p, end, struct_len, bad);
116 ceph_decode_need(p, end, struct_len, bad);
117 end = *p + struct_len;
118 }
119
120 ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
121 info->in = *p;
122 *p += sizeof(struct ceph_mds_reply_inode) +
123 sizeof(*info->in->fragtree.splits) *
124 le32_to_cpu(info->in->fragtree.nsplits);
125
126 ceph_decode_32_safe(p, end, info->symlink_len, bad);
127 ceph_decode_need(p, end, info->symlink_len, bad);
128 info->symlink = *p;
129 *p += info->symlink_len;
130
131 ceph_decode_copy_safe(p, end, &info->dir_layout,
132 sizeof(info->dir_layout), bad);
133 ceph_decode_32_safe(p, end, info->xattr_len, bad);
134 ceph_decode_need(p, end, info->xattr_len, bad);
135 info->xattr_data = *p;
136 *p += info->xattr_len;
137
138 if (features == (u64)-1) {
139 /* inline data */
140 ceph_decode_64_safe(p, end, info->inline_version, bad);
141 ceph_decode_32_safe(p, end, info->inline_len, bad);
142 ceph_decode_need(p, end, info->inline_len, bad);
143 info->inline_data = *p;
144 *p += info->inline_len;
145 /* quota */
146 err = parse_reply_info_quota(p, end, info);
147 if (err < 0)
148 goto out_bad;
149 /* pool namespace */
150 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
151 if (info->pool_ns_len > 0) {
152 ceph_decode_need(p, end, info->pool_ns_len, bad);
153 info->pool_ns_data = *p;
154 *p += info->pool_ns_len;
155 }
156
157 /* btime */
158 ceph_decode_need(p, end, sizeof(info->btime), bad);
159 ceph_decode_copy(p, &info->btime, sizeof(info->btime));
160
161 /* change attribute */
162 ceph_decode_64_safe(p, end, info->change_attr, bad);
163
164 /* dir pin */
165 if (struct_v >= 2) {
166 ceph_decode_32_safe(p, end, info->dir_pin, bad);
167 } else {
168 info->dir_pin = -ENODATA;
169 }
170
171 /* snapshot birth time, remains zero for v<=2 */
172 if (struct_v >= 3) {
173 ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
174 ceph_decode_copy(p, &info->snap_btime,
175 sizeof(info->snap_btime));
176 } else {
177 memset(&info->snap_btime, 0, sizeof(info->snap_btime));
178 }
179
180 /* snapshot count, remains zero for v<=3 */
181 if (struct_v >= 4) {
182 ceph_decode_64_safe(p, end, info->rsnaps, bad);
183 } else {
184 info->rsnaps = 0;
185 }
186
187 *p = end;
188 } else {
189 if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
190 ceph_decode_64_safe(p, end, info->inline_version, bad);
191 ceph_decode_32_safe(p, end, info->inline_len, bad);
192 ceph_decode_need(p, end, info->inline_len, bad);
193 info->inline_data = *p;
194 *p += info->inline_len;
195 } else
196 info->inline_version = CEPH_INLINE_NONE;
197
198 if (features & CEPH_FEATURE_MDS_QUOTA) {
199 err = parse_reply_info_quota(p, end, info);
200 if (err < 0)
201 goto out_bad;
202 } else {
203 info->max_bytes = 0;
204 info->max_files = 0;
205 }
206
207 info->pool_ns_len = 0;
208 info->pool_ns_data = NULL;
209 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
210 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
211 if (info->pool_ns_len > 0) {
212 ceph_decode_need(p, end, info->pool_ns_len, bad);
213 info->pool_ns_data = *p;
214 *p += info->pool_ns_len;
215 }
216 }
217
218 if (features & CEPH_FEATURE_FS_BTIME) {
219 ceph_decode_need(p, end, sizeof(info->btime), bad);
220 ceph_decode_copy(p, &info->btime, sizeof(info->btime));
221 ceph_decode_64_safe(p, end, info->change_attr, bad);
222 }
223
224 info->dir_pin = -ENODATA;
225 /* info->snap_btime and info->rsnaps remain zero */
226 }
227 return 0;
228 bad:
229 err = -EIO;
230 out_bad:
231 return err;
232 }
233
parse_reply_info_dir(void ** p,void * end,struct ceph_mds_reply_dirfrag ** dirfrag,u64 features)234 static int parse_reply_info_dir(void **p, void *end,
235 struct ceph_mds_reply_dirfrag **dirfrag,
236 u64 features)
237 {
238 if (features == (u64)-1) {
239 u8 struct_v, struct_compat;
240 u32 struct_len;
241 ceph_decode_8_safe(p, end, struct_v, bad);
242 ceph_decode_8_safe(p, end, struct_compat, bad);
243 /* struct_v is expected to be >= 1. we only understand
244 * encoding whose struct_compat == 1. */
245 if (!struct_v || struct_compat != 1)
246 goto bad;
247 ceph_decode_32_safe(p, end, struct_len, bad);
248 ceph_decode_need(p, end, struct_len, bad);
249 end = *p + struct_len;
250 }
251
252 ceph_decode_need(p, end, sizeof(**dirfrag), bad);
253 *dirfrag = *p;
254 *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
255 if (unlikely(*p > end))
256 goto bad;
257 if (features == (u64)-1)
258 *p = end;
259 return 0;
260 bad:
261 return -EIO;
262 }
263
parse_reply_info_lease(void ** p,void * end,struct ceph_mds_reply_lease ** lease,u64 features)264 static int parse_reply_info_lease(void **p, void *end,
265 struct ceph_mds_reply_lease **lease,
266 u64 features)
267 {
268 if (features == (u64)-1) {
269 u8 struct_v, struct_compat;
270 u32 struct_len;
271 ceph_decode_8_safe(p, end, struct_v, bad);
272 ceph_decode_8_safe(p, end, struct_compat, bad);
273 /* struct_v is expected to be >= 1. we only understand
274 * encoding whose struct_compat == 1. */
275 if (!struct_v || struct_compat != 1)
276 goto bad;
277 ceph_decode_32_safe(p, end, struct_len, bad);
278 ceph_decode_need(p, end, struct_len, bad);
279 end = *p + struct_len;
280 }
281
282 ceph_decode_need(p, end, sizeof(**lease), bad);
283 *lease = *p;
284 *p += sizeof(**lease);
285 if (features == (u64)-1)
286 *p = end;
287 return 0;
288 bad:
289 return -EIO;
290 }
291
292 /*
293 * parse a normal reply, which may contain a (dir+)dentry and/or a
294 * target inode.
295 */
parse_reply_info_trace(void ** p,void * end,struct ceph_mds_reply_info_parsed * info,u64 features)296 static int parse_reply_info_trace(void **p, void *end,
297 struct ceph_mds_reply_info_parsed *info,
298 u64 features)
299 {
300 int err;
301
302 if (info->head->is_dentry) {
303 err = parse_reply_info_in(p, end, &info->diri, features);
304 if (err < 0)
305 goto out_bad;
306
307 err = parse_reply_info_dir(p, end, &info->dirfrag, features);
308 if (err < 0)
309 goto out_bad;
310
311 ceph_decode_32_safe(p, end, info->dname_len, bad);
312 ceph_decode_need(p, end, info->dname_len, bad);
313 info->dname = *p;
314 *p += info->dname_len;
315
316 err = parse_reply_info_lease(p, end, &info->dlease, features);
317 if (err < 0)
318 goto out_bad;
319 }
320
321 if (info->head->is_target) {
322 err = parse_reply_info_in(p, end, &info->targeti, features);
323 if (err < 0)
324 goto out_bad;
325 }
326
327 if (unlikely(*p != end))
328 goto bad;
329 return 0;
330
331 bad:
332 err = -EIO;
333 out_bad:
334 pr_err("problem parsing mds trace %d\n", err);
335 return err;
336 }
337
338 /*
339 * parse readdir results
340 */
parse_reply_info_readdir(void ** p,void * end,struct ceph_mds_reply_info_parsed * info,u64 features)341 static int parse_reply_info_readdir(void **p, void *end,
342 struct ceph_mds_reply_info_parsed *info,
343 u64 features)
344 {
345 u32 num, i = 0;
346 int err;
347
348 err = parse_reply_info_dir(p, end, &info->dir_dir, features);
349 if (err < 0)
350 goto out_bad;
351
352 ceph_decode_need(p, end, sizeof(num) + 2, bad);
353 num = ceph_decode_32(p);
354 {
355 u16 flags = ceph_decode_16(p);
356 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
357 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
358 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
359 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
360 }
361 if (num == 0)
362 goto done;
363
364 BUG_ON(!info->dir_entries);
365 if ((unsigned long)(info->dir_entries + num) >
366 (unsigned long)info->dir_entries + info->dir_buf_size) {
367 pr_err("dir contents are larger than expected\n");
368 WARN_ON(1);
369 goto bad;
370 }
371
372 info->dir_nr = num;
373 while (num) {
374 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
375 /* dentry */
376 ceph_decode_32_safe(p, end, rde->name_len, bad);
377 ceph_decode_need(p, end, rde->name_len, bad);
378 rde->name = *p;
379 *p += rde->name_len;
380 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
381
382 /* dentry lease */
383 err = parse_reply_info_lease(p, end, &rde->lease, features);
384 if (err)
385 goto out_bad;
386 /* inode */
387 err = parse_reply_info_in(p, end, &rde->inode, features);
388 if (err < 0)
389 goto out_bad;
390 /* ceph_readdir_prepopulate() will update it */
391 rde->offset = 0;
392 i++;
393 num--;
394 }
395
396 done:
397 /* Skip over any unrecognized fields */
398 *p = end;
399 return 0;
400
401 bad:
402 err = -EIO;
403 out_bad:
404 pr_err("problem parsing dir contents %d\n", err);
405 return err;
406 }
407
408 /*
409 * parse fcntl F_GETLK results
410 */
parse_reply_info_filelock(void ** p,void * end,struct ceph_mds_reply_info_parsed * info,u64 features)411 static int parse_reply_info_filelock(void **p, void *end,
412 struct ceph_mds_reply_info_parsed *info,
413 u64 features)
414 {
415 if (*p + sizeof(*info->filelock_reply) > end)
416 goto bad;
417
418 info->filelock_reply = *p;
419
420 /* Skip over any unrecognized fields */
421 *p = end;
422 return 0;
423 bad:
424 return -EIO;
425 }
426
427
428 #if BITS_PER_LONG == 64
429
430 #define DELEGATED_INO_AVAILABLE xa_mk_value(1)
431
ceph_parse_deleg_inos(void ** p,void * end,struct ceph_mds_session * s)432 static int ceph_parse_deleg_inos(void **p, void *end,
433 struct ceph_mds_session *s)
434 {
435 u32 sets;
436
437 ceph_decode_32_safe(p, end, sets, bad);
438 dout("got %u sets of delegated inodes\n", sets);
439 while (sets--) {
440 u64 start, len, ino;
441
442 ceph_decode_64_safe(p, end, start, bad);
443 ceph_decode_64_safe(p, end, len, bad);
444
445 /* Don't accept a delegation of system inodes */
446 if (start < CEPH_INO_SYSTEM_BASE) {
447 pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
448 start, len);
449 continue;
450 }
451 while (len--) {
452 int err = xa_insert(&s->s_delegated_inos, ino = start++,
453 DELEGATED_INO_AVAILABLE,
454 GFP_KERNEL);
455 if (!err) {
456 dout("added delegated inode 0x%llx\n",
457 start - 1);
458 } else if (err == -EBUSY) {
459 pr_warn("ceph: MDS delegated inode 0x%llx more than once.\n",
460 start - 1);
461 } else {
462 return err;
463 }
464 }
465 }
466 return 0;
467 bad:
468 return -EIO;
469 }
470
ceph_get_deleg_ino(struct ceph_mds_session * s)471 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
472 {
473 unsigned long ino;
474 void *val;
475
476 xa_for_each(&s->s_delegated_inos, ino, val) {
477 val = xa_erase(&s->s_delegated_inos, ino);
478 if (val == DELEGATED_INO_AVAILABLE)
479 return ino;
480 }
481 return 0;
482 }
483
ceph_restore_deleg_ino(struct ceph_mds_session * s,u64 ino)484 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
485 {
486 return xa_insert(&s->s_delegated_inos, ino, DELEGATED_INO_AVAILABLE,
487 GFP_KERNEL);
488 }
489 #else /* BITS_PER_LONG == 64 */
490 /*
491 * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
492 * ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top
493 * and bottom words?
494 */
ceph_parse_deleg_inos(void ** p,void * end,struct ceph_mds_session * s)495 static int ceph_parse_deleg_inos(void **p, void *end,
496 struct ceph_mds_session *s)
497 {
498 u32 sets;
499
500 ceph_decode_32_safe(p, end, sets, bad);
501 if (sets)
502 ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad);
503 return 0;
504 bad:
505 return -EIO;
506 }
507
ceph_get_deleg_ino(struct ceph_mds_session * s)508 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
509 {
510 return 0;
511 }
512
ceph_restore_deleg_ino(struct ceph_mds_session * s,u64 ino)513 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
514 {
515 return 0;
516 }
517 #endif /* BITS_PER_LONG == 64 */
518
519 /*
520 * parse create results
521 */
parse_reply_info_create(void ** p,void * end,struct ceph_mds_reply_info_parsed * info,u64 features,struct ceph_mds_session * s)522 static int parse_reply_info_create(void **p, void *end,
523 struct ceph_mds_reply_info_parsed *info,
524 u64 features, struct ceph_mds_session *s)
525 {
526 int ret;
527
528 if (features == (u64)-1 ||
529 (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
530 if (*p == end) {
531 /* Malformed reply? */
532 info->has_create_ino = false;
533 } else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) {
534 info->has_create_ino = true;
535 /* struct_v, struct_compat, and len */
536 ceph_decode_skip_n(p, end, 2 + sizeof(u32), bad);
537 ceph_decode_64_safe(p, end, info->ino, bad);
538 ret = ceph_parse_deleg_inos(p, end, s);
539 if (ret)
540 return ret;
541 } else {
542 /* legacy */
543 ceph_decode_64_safe(p, end, info->ino, bad);
544 info->has_create_ino = true;
545 }
546 } else {
547 if (*p != end)
548 goto bad;
549 }
550
551 /* Skip over any unrecognized fields */
552 *p = end;
553 return 0;
554 bad:
555 return -EIO;
556 }
557
558 /*
559 * parse extra results
560 */
parse_reply_info_extra(void ** p,void * end,struct ceph_mds_reply_info_parsed * info,u64 features,struct ceph_mds_session * s)561 static int parse_reply_info_extra(void **p, void *end,
562 struct ceph_mds_reply_info_parsed *info,
563 u64 features, struct ceph_mds_session *s)
564 {
565 u32 op = le32_to_cpu(info->head->op);
566
567 if (op == CEPH_MDS_OP_GETFILELOCK)
568 return parse_reply_info_filelock(p, end, info, features);
569 else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
570 return parse_reply_info_readdir(p, end, info, features);
571 else if (op == CEPH_MDS_OP_CREATE)
572 return parse_reply_info_create(p, end, info, features, s);
573 else
574 return -EIO;
575 }
576
577 /*
578 * parse entire mds reply
579 */
parse_reply_info(struct ceph_mds_session * s,struct ceph_msg * msg,struct ceph_mds_reply_info_parsed * info,u64 features)580 static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
581 struct ceph_mds_reply_info_parsed *info,
582 u64 features)
583 {
584 void *p, *end;
585 u32 len;
586 int err;
587
588 info->head = msg->front.iov_base;
589 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
590 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
591
592 /* trace */
593 ceph_decode_32_safe(&p, end, len, bad);
594 if (len > 0) {
595 ceph_decode_need(&p, end, len, bad);
596 err = parse_reply_info_trace(&p, p+len, info, features);
597 if (err < 0)
598 goto out_bad;
599 }
600
601 /* extra */
602 ceph_decode_32_safe(&p, end, len, bad);
603 if (len > 0) {
604 ceph_decode_need(&p, end, len, bad);
605 err = parse_reply_info_extra(&p, p+len, info, features, s);
606 if (err < 0)
607 goto out_bad;
608 }
609
610 /* snap blob */
611 ceph_decode_32_safe(&p, end, len, bad);
612 info->snapblob_len = len;
613 info->snapblob = p;
614 p += len;
615
616 if (p != end)
617 goto bad;
618 return 0;
619
620 bad:
621 err = -EIO;
622 out_bad:
623 pr_err("mds parse_reply err %d\n", err);
624 return err;
625 }
626
destroy_reply_info(struct ceph_mds_reply_info_parsed * info)627 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
628 {
629 if (!info->dir_entries)
630 return;
631 free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
632 }
633
634
635 /*
636 * sessions
637 */
ceph_session_state_name(int s)638 const char *ceph_session_state_name(int s)
639 {
640 switch (s) {
641 case CEPH_MDS_SESSION_NEW: return "new";
642 case CEPH_MDS_SESSION_OPENING: return "opening";
643 case CEPH_MDS_SESSION_OPEN: return "open";
644 case CEPH_MDS_SESSION_HUNG: return "hung";
645 case CEPH_MDS_SESSION_CLOSING: return "closing";
646 case CEPH_MDS_SESSION_CLOSED: return "closed";
647 case CEPH_MDS_SESSION_RESTARTING: return "restarting";
648 case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
649 case CEPH_MDS_SESSION_REJECTED: return "rejected";
650 default: return "???";
651 }
652 }
653
ceph_get_mds_session(struct ceph_mds_session * s)654 struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
655 {
656 if (refcount_inc_not_zero(&s->s_ref))
657 return s;
658 return NULL;
659 }
660
ceph_put_mds_session(struct ceph_mds_session * s)661 void ceph_put_mds_session(struct ceph_mds_session *s)
662 {
663 if (IS_ERR_OR_NULL(s))
664 return;
665
666 if (refcount_dec_and_test(&s->s_ref)) {
667 if (s->s_auth.authorizer)
668 ceph_auth_destroy_authorizer(s->s_auth.authorizer);
669 WARN_ON(mutex_is_locked(&s->s_mutex));
670 xa_destroy(&s->s_delegated_inos);
671 kfree(s);
672 }
673 }
674
675 /*
676 * called under mdsc->mutex
677 */
__ceph_lookup_mds_session(struct ceph_mds_client * mdsc,int mds)678 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
679 int mds)
680 {
681 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
682 return NULL;
683 return ceph_get_mds_session(mdsc->sessions[mds]);
684 }
685
__have_session(struct ceph_mds_client * mdsc,int mds)686 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
687 {
688 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
689 return false;
690 else
691 return true;
692 }
693
__verify_registered_session(struct ceph_mds_client * mdsc,struct ceph_mds_session * s)694 static int __verify_registered_session(struct ceph_mds_client *mdsc,
695 struct ceph_mds_session *s)
696 {
697 if (s->s_mds >= mdsc->max_sessions ||
698 mdsc->sessions[s->s_mds] != s)
699 return -ENOENT;
700 return 0;
701 }
702
703 /*
704 * create+register a new session for given mds.
705 * called under mdsc->mutex.
706 */
register_session(struct ceph_mds_client * mdsc,int mds)707 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
708 int mds)
709 {
710 struct ceph_mds_session *s;
711
712 if (mds >= mdsc->mdsmap->possible_max_rank)
713 return ERR_PTR(-EINVAL);
714
715 s = kzalloc(sizeof(*s), GFP_NOFS);
716 if (!s)
717 return ERR_PTR(-ENOMEM);
718
719 if (mds >= mdsc->max_sessions) {
720 int newmax = 1 << get_count_order(mds + 1);
721 struct ceph_mds_session **sa;
722
723 dout("%s: realloc to %d\n", __func__, newmax);
724 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
725 if (!sa)
726 goto fail_realloc;
727 if (mdsc->sessions) {
728 memcpy(sa, mdsc->sessions,
729 mdsc->max_sessions * sizeof(void *));
730 kfree(mdsc->sessions);
731 }
732 mdsc->sessions = sa;
733 mdsc->max_sessions = newmax;
734 }
735
736 dout("%s: mds%d\n", __func__, mds);
737 s->s_mdsc = mdsc;
738 s->s_mds = mds;
739 s->s_state = CEPH_MDS_SESSION_NEW;
740 mutex_init(&s->s_mutex);
741
742 ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
743
744 atomic_set(&s->s_cap_gen, 1);
745 s->s_cap_ttl = jiffies - 1;
746
747 spin_lock_init(&s->s_cap_lock);
748 INIT_LIST_HEAD(&s->s_caps);
749 refcount_set(&s->s_ref, 1);
750 INIT_LIST_HEAD(&s->s_waiting);
751 INIT_LIST_HEAD(&s->s_unsafe);
752 xa_init(&s->s_delegated_inos);
753 INIT_LIST_HEAD(&s->s_cap_releases);
754 INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
755
756 INIT_LIST_HEAD(&s->s_cap_dirty);
757 INIT_LIST_HEAD(&s->s_cap_flushing);
758
759 mdsc->sessions[mds] = s;
760 atomic_inc(&mdsc->num_sessions);
761 refcount_inc(&s->s_ref); /* one ref to sessions[], one to caller */
762
763 ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
764 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
765
766 return s;
767
768 fail_realloc:
769 kfree(s);
770 return ERR_PTR(-ENOMEM);
771 }
772
773 /*
774 * called under mdsc->mutex
775 */
__unregister_session(struct ceph_mds_client * mdsc,struct ceph_mds_session * s)776 static void __unregister_session(struct ceph_mds_client *mdsc,
777 struct ceph_mds_session *s)
778 {
779 dout("__unregister_session mds%d %p\n", s->s_mds, s);
780 BUG_ON(mdsc->sessions[s->s_mds] != s);
781 mdsc->sessions[s->s_mds] = NULL;
782 ceph_con_close(&s->s_con);
783 ceph_put_mds_session(s);
784 atomic_dec(&mdsc->num_sessions);
785 }
786
787 /*
788 * drop session refs in request.
789 *
790 * should be last request ref, or hold mdsc->mutex
791 */
put_request_session(struct ceph_mds_request * req)792 static void put_request_session(struct ceph_mds_request *req)
793 {
794 if (req->r_session) {
795 ceph_put_mds_session(req->r_session);
796 req->r_session = NULL;
797 }
798 }
799
ceph_mdsc_iterate_sessions(struct ceph_mds_client * mdsc,void (* cb)(struct ceph_mds_session *),bool check_state)800 void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
801 void (*cb)(struct ceph_mds_session *),
802 bool check_state)
803 {
804 int mds;
805
806 mutex_lock(&mdsc->mutex);
807 for (mds = 0; mds < mdsc->max_sessions; ++mds) {
808 struct ceph_mds_session *s;
809
810 s = __ceph_lookup_mds_session(mdsc, mds);
811 if (!s)
812 continue;
813
814 if (check_state && !check_session_state(s)) {
815 ceph_put_mds_session(s);
816 continue;
817 }
818
819 mutex_unlock(&mdsc->mutex);
820 cb(s);
821 ceph_put_mds_session(s);
822 mutex_lock(&mdsc->mutex);
823 }
824 mutex_unlock(&mdsc->mutex);
825 }
826
ceph_mdsc_release_request(struct kref * kref)827 void ceph_mdsc_release_request(struct kref *kref)
828 {
829 struct ceph_mds_request *req = container_of(kref,
830 struct ceph_mds_request,
831 r_kref);
832 ceph_mdsc_release_dir_caps_no_check(req);
833 destroy_reply_info(&req->r_reply_info);
834 if (req->r_request)
835 ceph_msg_put(req->r_request);
836 if (req->r_reply)
837 ceph_msg_put(req->r_reply);
838 if (req->r_inode) {
839 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
840 iput(req->r_inode);
841 }
842 if (req->r_parent) {
843 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
844 iput(req->r_parent);
845 }
846 iput(req->r_target_inode);
847 if (req->r_dentry)
848 dput(req->r_dentry);
849 if (req->r_old_dentry)
850 dput(req->r_old_dentry);
851 if (req->r_old_dentry_dir) {
852 /*
853 * track (and drop pins for) r_old_dentry_dir
854 * separately, since r_old_dentry's d_parent may have
855 * changed between the dir mutex being dropped and
856 * this request being freed.
857 */
858 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
859 CEPH_CAP_PIN);
860 iput(req->r_old_dentry_dir);
861 }
862 kfree(req->r_path1);
863 kfree(req->r_path2);
864 put_cred(req->r_cred);
865 if (req->r_pagelist)
866 ceph_pagelist_release(req->r_pagelist);
867 put_request_session(req);
868 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
869 WARN_ON_ONCE(!list_empty(&req->r_wait));
870 kmem_cache_free(ceph_mds_request_cachep, req);
871 }
872
DEFINE_RB_FUNCS(request,struct ceph_mds_request,r_tid,r_node)873 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
874
875 /*
876 * lookup session, bump ref if found.
877 *
878 * called under mdsc->mutex.
879 */
880 static struct ceph_mds_request *
881 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
882 {
883 struct ceph_mds_request *req;
884
885 req = lookup_request(&mdsc->request_tree, tid);
886 if (req)
887 ceph_mdsc_get_request(req);
888
889 return req;
890 }
891
892 /*
893 * Register an in-flight request, and assign a tid. Link to directory
894 * are modifying (if any).
895 *
896 * Called under mdsc->mutex.
897 */
__register_request(struct ceph_mds_client * mdsc,struct ceph_mds_request * req,struct inode * dir)898 static void __register_request(struct ceph_mds_client *mdsc,
899 struct ceph_mds_request *req,
900 struct inode *dir)
901 {
902 int ret = 0;
903
904 req->r_tid = ++mdsc->last_tid;
905 if (req->r_num_caps) {
906 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
907 req->r_num_caps);
908 if (ret < 0) {
909 pr_err("__register_request %p "
910 "failed to reserve caps: %d\n", req, ret);
911 /* set req->r_err to fail early from __do_request */
912 req->r_err = ret;
913 return;
914 }
915 }
916 dout("__register_request %p tid %lld\n", req, req->r_tid);
917 ceph_mdsc_get_request(req);
918 insert_request(&mdsc->request_tree, req);
919
920 req->r_cred = get_current_cred();
921
922 if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
923 mdsc->oldest_tid = req->r_tid;
924
925 if (dir) {
926 struct ceph_inode_info *ci = ceph_inode(dir);
927
928 ihold(dir);
929 req->r_unsafe_dir = dir;
930 spin_lock(&ci->i_unsafe_lock);
931 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
932 spin_unlock(&ci->i_unsafe_lock);
933 }
934 }
935
__unregister_request(struct ceph_mds_client * mdsc,struct ceph_mds_request * req)936 static void __unregister_request(struct ceph_mds_client *mdsc,
937 struct ceph_mds_request *req)
938 {
939 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
940
941 /* Never leave an unregistered request on an unsafe list! */
942 list_del_init(&req->r_unsafe_item);
943
944 if (req->r_tid == mdsc->oldest_tid) {
945 struct rb_node *p = rb_next(&req->r_node);
946 mdsc->oldest_tid = 0;
947 while (p) {
948 struct ceph_mds_request *next_req =
949 rb_entry(p, struct ceph_mds_request, r_node);
950 if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
951 mdsc->oldest_tid = next_req->r_tid;
952 break;
953 }
954 p = rb_next(p);
955 }
956 }
957
958 erase_request(&mdsc->request_tree, req);
959
960 if (req->r_unsafe_dir) {
961 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
962 spin_lock(&ci->i_unsafe_lock);
963 list_del_init(&req->r_unsafe_dir_item);
964 spin_unlock(&ci->i_unsafe_lock);
965 }
966 if (req->r_target_inode &&
967 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
968 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
969 spin_lock(&ci->i_unsafe_lock);
970 list_del_init(&req->r_unsafe_target_item);
971 spin_unlock(&ci->i_unsafe_lock);
972 }
973
974 if (req->r_unsafe_dir) {
975 iput(req->r_unsafe_dir);
976 req->r_unsafe_dir = NULL;
977 }
978
979 complete_all(&req->r_safe_completion);
980
981 ceph_mdsc_put_request(req);
982 }
983
984 /*
985 * Walk back up the dentry tree until we hit a dentry representing a
986 * non-snapshot inode. We do this using the rcu_read_lock (which must be held
987 * when calling this) to ensure that the objects won't disappear while we're
988 * working with them. Once we hit a candidate dentry, we attempt to take a
989 * reference to it, and return that as the result.
990 */
get_nonsnap_parent(struct dentry * dentry)991 static struct inode *get_nonsnap_parent(struct dentry *dentry)
992 {
993 struct inode *inode = NULL;
994
995 while (dentry && !IS_ROOT(dentry)) {
996 inode = d_inode_rcu(dentry);
997 if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
998 break;
999 dentry = dentry->d_parent;
1000 }
1001 if (inode)
1002 inode = igrab(inode);
1003 return inode;
1004 }
1005
1006 /*
1007 * Choose mds to send request to next. If there is a hint set in the
1008 * request (e.g., due to a prior forward hint from the mds), use that.
1009 * Otherwise, consult frag tree and/or caps to identify the
1010 * appropriate mds. If all else fails, choose randomly.
1011 *
1012 * Called under mdsc->mutex.
1013 */
__choose_mds(struct ceph_mds_client * mdsc,struct ceph_mds_request * req,bool * random)1014 static int __choose_mds(struct ceph_mds_client *mdsc,
1015 struct ceph_mds_request *req,
1016 bool *random)
1017 {
1018 struct inode *inode;
1019 struct ceph_inode_info *ci;
1020 struct ceph_cap *cap;
1021 int mode = req->r_direct_mode;
1022 int mds = -1;
1023 u32 hash = req->r_direct_hash;
1024 bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
1025
1026 if (random)
1027 *random = false;
1028
1029 /*
1030 * is there a specific mds we should try? ignore hint if we have
1031 * no session and the mds is not up (active or recovering).
1032 */
1033 if (req->r_resend_mds >= 0 &&
1034 (__have_session(mdsc, req->r_resend_mds) ||
1035 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1036 dout("%s using resend_mds mds%d\n", __func__,
1037 req->r_resend_mds);
1038 return req->r_resend_mds;
1039 }
1040
1041 if (mode == USE_RANDOM_MDS)
1042 goto random;
1043
1044 inode = NULL;
1045 if (req->r_inode) {
1046 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
1047 inode = req->r_inode;
1048 ihold(inode);
1049 } else {
1050 /* req->r_dentry is non-null for LSSNAP request */
1051 rcu_read_lock();
1052 inode = get_nonsnap_parent(req->r_dentry);
1053 rcu_read_unlock();
1054 dout("%s using snapdir's parent %p\n", __func__, inode);
1055 }
1056 } else if (req->r_dentry) {
1057 /* ignore race with rename; old or new d_parent is okay */
1058 struct dentry *parent;
1059 struct inode *dir;
1060
1061 rcu_read_lock();
1062 parent = READ_ONCE(req->r_dentry->d_parent);
1063 dir = req->r_parent ? : d_inode_rcu(parent);
1064
1065 if (!dir || dir->i_sb != mdsc->fsc->sb) {
1066 /* not this fs or parent went negative */
1067 inode = d_inode(req->r_dentry);
1068 if (inode)
1069 ihold(inode);
1070 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
1071 /* direct snapped/virtual snapdir requests
1072 * based on parent dir inode */
1073 inode = get_nonsnap_parent(parent);
1074 dout("%s using nonsnap parent %p\n", __func__, inode);
1075 } else {
1076 /* dentry target */
1077 inode = d_inode(req->r_dentry);
1078 if (!inode || mode == USE_AUTH_MDS) {
1079 /* dir + name */
1080 inode = igrab(dir);
1081 hash = ceph_dentry_hash(dir, req->r_dentry);
1082 is_hash = true;
1083 } else {
1084 ihold(inode);
1085 }
1086 }
1087 rcu_read_unlock();
1088 }
1089
1090 dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
1091 hash, mode);
1092 if (!inode)
1093 goto random;
1094 ci = ceph_inode(inode);
1095
1096 if (is_hash && S_ISDIR(inode->i_mode)) {
1097 struct ceph_inode_frag frag;
1098 int found;
1099
1100 ceph_choose_frag(ci, hash, &frag, &found);
1101 if (found) {
1102 if (mode == USE_ANY_MDS && frag.ndist > 0) {
1103 u8 r;
1104
1105 /* choose a random replica */
1106 get_random_bytes(&r, 1);
1107 r %= frag.ndist;
1108 mds = frag.dist[r];
1109 dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
1110 __func__, inode, ceph_vinop(inode),
1111 frag.frag, mds, (int)r, frag.ndist);
1112 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1113 CEPH_MDS_STATE_ACTIVE &&
1114 !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
1115 goto out;
1116 }
1117
1118 /* since this file/dir wasn't known to be
1119 * replicated, then we want to look for the
1120 * authoritative mds. */
1121 if (frag.mds >= 0) {
1122 /* choose auth mds */
1123 mds = frag.mds;
1124 dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
1125 __func__, inode, ceph_vinop(inode),
1126 frag.frag, mds);
1127 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1128 CEPH_MDS_STATE_ACTIVE) {
1129 if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
1130 mds))
1131 goto out;
1132 }
1133 }
1134 mode = USE_AUTH_MDS;
1135 }
1136 }
1137
1138 spin_lock(&ci->i_ceph_lock);
1139 cap = NULL;
1140 if (mode == USE_AUTH_MDS)
1141 cap = ci->i_auth_cap;
1142 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
1143 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
1144 if (!cap) {
1145 spin_unlock(&ci->i_ceph_lock);
1146 iput(inode);
1147 goto random;
1148 }
1149 mds = cap->session->s_mds;
1150 dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
1151 inode, ceph_vinop(inode), mds,
1152 cap == ci->i_auth_cap ? "auth " : "", cap);
1153 spin_unlock(&ci->i_ceph_lock);
1154 out:
1155 iput(inode);
1156 return mds;
1157
1158 random:
1159 if (random)
1160 *random = true;
1161
1162 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
1163 dout("%s chose random mds%d\n", __func__, mds);
1164 return mds;
1165 }
1166
1167
1168 /*
1169 * session messages
1170 */
ceph_create_session_msg(u32 op,u64 seq)1171 struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq)
1172 {
1173 struct ceph_msg *msg;
1174 struct ceph_mds_session_head *h;
1175
1176 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
1177 false);
1178 if (!msg) {
1179 pr_err("ENOMEM creating session %s msg\n",
1180 ceph_session_op_name(op));
1181 return NULL;
1182 }
1183 h = msg->front.iov_base;
1184 h->op = cpu_to_le32(op);
1185 h->seq = cpu_to_le64(seq);
1186
1187 return msg;
1188 }
1189
1190 static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
1191 #define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
encode_supported_features(void ** p,void * end)1192 static int encode_supported_features(void **p, void *end)
1193 {
1194 static const size_t count = ARRAY_SIZE(feature_bits);
1195
1196 if (count > 0) {
1197 size_t i;
1198 size_t size = FEATURE_BYTES(count);
1199 unsigned long bit;
1200
1201 if (WARN_ON_ONCE(*p + 4 + size > end))
1202 return -ERANGE;
1203
1204 ceph_encode_32(p, size);
1205 memset(*p, 0, size);
1206 for (i = 0; i < count; i++) {
1207 bit = feature_bits[i];
1208 ((unsigned char *)(*p))[bit / 8] |= BIT(bit % 8);
1209 }
1210 *p += size;
1211 } else {
1212 if (WARN_ON_ONCE(*p + 4 > end))
1213 return -ERANGE;
1214
1215 ceph_encode_32(p, 0);
1216 }
1217
1218 return 0;
1219 }
1220
1221 static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED;
1222 #define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8)
encode_metric_spec(void ** p,void * end)1223 static int encode_metric_spec(void **p, void *end)
1224 {
1225 static const size_t count = ARRAY_SIZE(metric_bits);
1226
1227 /* header */
1228 if (WARN_ON_ONCE(*p + 2 > end))
1229 return -ERANGE;
1230
1231 ceph_encode_8(p, 1); /* version */
1232 ceph_encode_8(p, 1); /* compat */
1233
1234 if (count > 0) {
1235 size_t i;
1236 size_t size = METRIC_BYTES(count);
1237
1238 if (WARN_ON_ONCE(*p + 4 + 4 + size > end))
1239 return -ERANGE;
1240
1241 /* metric spec info length */
1242 ceph_encode_32(p, 4 + size);
1243
1244 /* metric spec */
1245 ceph_encode_32(p, size);
1246 memset(*p, 0, size);
1247 for (i = 0; i < count; i++)
1248 ((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8);
1249 *p += size;
1250 } else {
1251 if (WARN_ON_ONCE(*p + 4 + 4 > end))
1252 return -ERANGE;
1253
1254 /* metric spec info length */
1255 ceph_encode_32(p, 4);
1256 /* metric spec */
1257 ceph_encode_32(p, 0);
1258 }
1259
1260 return 0;
1261 }
1262
1263 /*
1264 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
1265 * to include additional client metadata fields.
1266 */
create_session_open_msg(struct ceph_mds_client * mdsc,u64 seq)1267 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
1268 {
1269 struct ceph_msg *msg;
1270 struct ceph_mds_session_head *h;
1271 int i;
1272 int extra_bytes = 0;
1273 int metadata_key_count = 0;
1274 struct ceph_options *opt = mdsc->fsc->client->options;
1275 struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
1276 size_t size, count;
1277 void *p, *end;
1278 int ret;
1279
1280 const char* metadata[][2] = {
1281 {"hostname", mdsc->nodename},
1282 {"kernel_version", init_utsname()->release},
1283 {"entity_id", opt->name ? : ""},
1284 {"root", fsopt->server_path ? : "/"},
1285 {NULL, NULL}
1286 };
1287
1288 /* Calculate serialized length of metadata */
1289 extra_bytes = 4; /* map length */
1290 for (i = 0; metadata[i][0]; ++i) {
1291 extra_bytes += 8 + strlen(metadata[i][0]) +
1292 strlen(metadata[i][1]);
1293 metadata_key_count++;
1294 }
1295
1296 /* supported feature */
1297 size = 0;
1298 count = ARRAY_SIZE(feature_bits);
1299 if (count > 0)
1300 size = FEATURE_BYTES(count);
1301 extra_bytes += 4 + size;
1302
1303 /* metric spec */
1304 size = 0;
1305 count = ARRAY_SIZE(metric_bits);
1306 if (count > 0)
1307 size = METRIC_BYTES(count);
1308 extra_bytes += 2 + 4 + 4 + size;
1309
1310 /* Allocate the message */
1311 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
1312 GFP_NOFS, false);
1313 if (!msg) {
1314 pr_err("ENOMEM creating session open msg\n");
1315 return ERR_PTR(-ENOMEM);
1316 }
1317 p = msg->front.iov_base;
1318 end = p + msg->front.iov_len;
1319
1320 h = p;
1321 h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
1322 h->seq = cpu_to_le64(seq);
1323
1324 /*
1325 * Serialize client metadata into waiting buffer space, using
1326 * the format that userspace expects for map<string, string>
1327 *
1328 * ClientSession messages with metadata are v4
1329 */
1330 msg->hdr.version = cpu_to_le16(4);
1331 msg->hdr.compat_version = cpu_to_le16(1);
1332
1333 /* The write pointer, following the session_head structure */
1334 p += sizeof(*h);
1335
1336 /* Number of entries in the map */
1337 ceph_encode_32(&p, metadata_key_count);
1338
1339 /* Two length-prefixed strings for each entry in the map */
1340 for (i = 0; metadata[i][0]; ++i) {
1341 size_t const key_len = strlen(metadata[i][0]);
1342 size_t const val_len = strlen(metadata[i][1]);
1343
1344 ceph_encode_32(&p, key_len);
1345 memcpy(p, metadata[i][0], key_len);
1346 p += key_len;
1347 ceph_encode_32(&p, val_len);
1348 memcpy(p, metadata[i][1], val_len);
1349 p += val_len;
1350 }
1351
1352 ret = encode_supported_features(&p, end);
1353 if (ret) {
1354 pr_err("encode_supported_features failed!\n");
1355 ceph_msg_put(msg);
1356 return ERR_PTR(ret);
1357 }
1358
1359 ret = encode_metric_spec(&p, end);
1360 if (ret) {
1361 pr_err("encode_metric_spec failed!\n");
1362 ceph_msg_put(msg);
1363 return ERR_PTR(ret);
1364 }
1365
1366 msg->front.iov_len = p - msg->front.iov_base;
1367 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1368
1369 return msg;
1370 }
1371
1372 /*
1373 * send session open request.
1374 *
1375 * called under mdsc->mutex
1376 */
__open_session(struct ceph_mds_client * mdsc,struct ceph_mds_session * session)1377 static int __open_session(struct ceph_mds_client *mdsc,
1378 struct ceph_mds_session *session)
1379 {
1380 struct ceph_msg *msg;
1381 int mstate;
1382 int mds = session->s_mds;
1383
1384 /* wait for mds to go active? */
1385 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1386 dout("open_session to mds%d (%s)\n", mds,
1387 ceph_mds_state_name(mstate));
1388 session->s_state = CEPH_MDS_SESSION_OPENING;
1389 session->s_renew_requested = jiffies;
1390
1391 /* send connect message */
1392 msg = create_session_open_msg(mdsc, session->s_seq);
1393 if (IS_ERR(msg))
1394 return PTR_ERR(msg);
1395 ceph_con_send(&session->s_con, msg);
1396 return 0;
1397 }
1398
1399 /*
1400 * open sessions for any export targets for the given mds
1401 *
1402 * called under mdsc->mutex
1403 */
1404 static struct ceph_mds_session *
__open_export_target_session(struct ceph_mds_client * mdsc,int target)1405 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
1406 {
1407 struct ceph_mds_session *session;
1408 int ret;
1409
1410 session = __ceph_lookup_mds_session(mdsc, target);
1411 if (!session) {
1412 session = register_session(mdsc, target);
1413 if (IS_ERR(session))
1414 return session;
1415 }
1416 if (session->s_state == CEPH_MDS_SESSION_NEW ||
1417 session->s_state == CEPH_MDS_SESSION_CLOSING) {
1418 ret = __open_session(mdsc, session);
1419 if (ret)
1420 return ERR_PTR(ret);
1421 }
1422
1423 return session;
1424 }
1425
1426 struct ceph_mds_session *
ceph_mdsc_open_export_target_session(struct ceph_mds_client * mdsc,int target)1427 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1428 {
1429 struct ceph_mds_session *session;
1430
1431 dout("open_export_target_session to mds%d\n", target);
1432
1433 mutex_lock(&mdsc->mutex);
1434 session = __open_export_target_session(mdsc, target);
1435 mutex_unlock(&mdsc->mutex);
1436
1437 return session;
1438 }
1439
__open_export_target_sessions(struct ceph_mds_client * mdsc,struct ceph_mds_session * session)1440 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1441 struct ceph_mds_session *session)
1442 {
1443 struct ceph_mds_info *mi;
1444 struct ceph_mds_session *ts;
1445 int i, mds = session->s_mds;
1446
1447 if (mds >= mdsc->mdsmap->possible_max_rank)
1448 return;
1449
1450 mi = &mdsc->mdsmap->m_info[mds];
1451 dout("open_export_target_sessions for mds%d (%d targets)\n",
1452 session->s_mds, mi->num_export_targets);
1453
1454 for (i = 0; i < mi->num_export_targets; i++) {
1455 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1456 ceph_put_mds_session(ts);
1457 }
1458 }
1459
ceph_mdsc_open_export_target_sessions(struct ceph_mds_client * mdsc,struct ceph_mds_session * session)1460 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1461 struct ceph_mds_session *session)
1462 {
1463 mutex_lock(&mdsc->mutex);
1464 __open_export_target_sessions(mdsc, session);
1465 mutex_unlock(&mdsc->mutex);
1466 }
1467
1468 /*
1469 * session caps
1470 */
1471
detach_cap_releases(struct ceph_mds_session * session,struct list_head * target)1472 static void detach_cap_releases(struct ceph_mds_session *session,
1473 struct list_head *target)
1474 {
1475 lockdep_assert_held(&session->s_cap_lock);
1476
1477 list_splice_init(&session->s_cap_releases, target);
1478 session->s_num_cap_releases = 0;
1479 dout("dispose_cap_releases mds%d\n", session->s_mds);
1480 }
1481
dispose_cap_releases(struct ceph_mds_client * mdsc,struct list_head * dispose)1482 static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1483 struct list_head *dispose)
1484 {
1485 while (!list_empty(dispose)) {
1486 struct ceph_cap *cap;
1487 /* zero out the in-progress message */
1488 cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1489 list_del(&cap->session_caps);
1490 ceph_put_cap(mdsc, cap);
1491 }
1492 }
1493
cleanup_session_requests(struct ceph_mds_client * mdsc,struct ceph_mds_session * session)1494 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1495 struct ceph_mds_session *session)
1496 {
1497 struct ceph_mds_request *req;
1498 struct rb_node *p;
1499
1500 dout("cleanup_session_requests mds%d\n", session->s_mds);
1501 mutex_lock(&mdsc->mutex);
1502 while (!list_empty(&session->s_unsafe)) {
1503 req = list_first_entry(&session->s_unsafe,
1504 struct ceph_mds_request, r_unsafe_item);
1505 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1506 req->r_tid);
1507 if (req->r_target_inode)
1508 mapping_set_error(req->r_target_inode->i_mapping, -EIO);
1509 if (req->r_unsafe_dir)
1510 mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
1511 __unregister_request(mdsc, req);
1512 }
1513 /* zero r_attempts, so kick_requests() will re-send requests */
1514 p = rb_first(&mdsc->request_tree);
1515 while (p) {
1516 req = rb_entry(p, struct ceph_mds_request, r_node);
1517 p = rb_next(p);
1518 if (req->r_session &&
1519 req->r_session->s_mds == session->s_mds)
1520 req->r_attempts = 0;
1521 }
1522 mutex_unlock(&mdsc->mutex);
1523 }
1524
1525 /*
1526 * Helper to safely iterate over all caps associated with a session, with
1527 * special care taken to handle a racing __ceph_remove_cap().
1528 *
1529 * Caller must hold session s_mutex.
1530 */
ceph_iterate_session_caps(struct ceph_mds_session * session,int (* cb)(struct inode *,struct ceph_cap *,void *),void * arg)1531 int ceph_iterate_session_caps(struct ceph_mds_session *session,
1532 int (*cb)(struct inode *, struct ceph_cap *,
1533 void *), void *arg)
1534 {
1535 struct list_head *p;
1536 struct ceph_cap *cap;
1537 struct inode *inode, *last_inode = NULL;
1538 struct ceph_cap *old_cap = NULL;
1539 int ret;
1540
1541 dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1542 spin_lock(&session->s_cap_lock);
1543 p = session->s_caps.next;
1544 while (p != &session->s_caps) {
1545 cap = list_entry(p, struct ceph_cap, session_caps);
1546 inode = igrab(&cap->ci->vfs_inode);
1547 if (!inode) {
1548 p = p->next;
1549 continue;
1550 }
1551 session->s_cap_iterator = cap;
1552 spin_unlock(&session->s_cap_lock);
1553
1554 if (last_inode) {
1555 iput(last_inode);
1556 last_inode = NULL;
1557 }
1558 if (old_cap) {
1559 ceph_put_cap(session->s_mdsc, old_cap);
1560 old_cap = NULL;
1561 }
1562
1563 ret = cb(inode, cap, arg);
1564 last_inode = inode;
1565
1566 spin_lock(&session->s_cap_lock);
1567 p = p->next;
1568 if (!cap->ci) {
1569 dout("iterate_session_caps finishing cap %p removal\n",
1570 cap);
1571 BUG_ON(cap->session != session);
1572 cap->session = NULL;
1573 list_del_init(&cap->session_caps);
1574 session->s_nr_caps--;
1575 atomic64_dec(&session->s_mdsc->metric.total_caps);
1576 if (cap->queue_release)
1577 __ceph_queue_cap_release(session, cap);
1578 else
1579 old_cap = cap; /* put_cap it w/o locks held */
1580 }
1581 if (ret < 0)
1582 goto out;
1583 }
1584 ret = 0;
1585 out:
1586 session->s_cap_iterator = NULL;
1587 spin_unlock(&session->s_cap_lock);
1588
1589 iput(last_inode);
1590 if (old_cap)
1591 ceph_put_cap(session->s_mdsc, old_cap);
1592
1593 return ret;
1594 }
1595
remove_capsnaps(struct ceph_mds_client * mdsc,struct inode * inode)1596 static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
1597 {
1598 struct ceph_inode_info *ci = ceph_inode(inode);
1599 struct ceph_cap_snap *capsnap;
1600 int capsnap_release = 0;
1601
1602 lockdep_assert_held(&ci->i_ceph_lock);
1603
1604 dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
1605
1606 while (!list_empty(&ci->i_cap_snaps)) {
1607 capsnap = list_first_entry(&ci->i_cap_snaps,
1608 struct ceph_cap_snap, ci_item);
1609 __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
1610 ceph_put_snap_context(capsnap->context);
1611 ceph_put_cap_snap(capsnap);
1612 capsnap_release++;
1613 }
1614 wake_up_all(&ci->i_cap_wq);
1615 wake_up_all(&mdsc->cap_flushing_wq);
1616 return capsnap_release;
1617 }
1618
remove_session_caps_cb(struct inode * inode,struct ceph_cap * cap,void * arg)1619 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1620 void *arg)
1621 {
1622 struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1623 struct ceph_mds_client *mdsc = fsc->mdsc;
1624 struct ceph_inode_info *ci = ceph_inode(inode);
1625 LIST_HEAD(to_remove);
1626 bool dirty_dropped = false;
1627 bool invalidate = false;
1628 int capsnap_release = 0;
1629
1630 dout("removing cap %p, ci is %p, inode is %p\n",
1631 cap, ci, &ci->vfs_inode);
1632 spin_lock(&ci->i_ceph_lock);
1633 __ceph_remove_cap(cap, false);
1634 if (!ci->i_auth_cap) {
1635 struct ceph_cap_flush *cf;
1636
1637 if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
1638 if (inode->i_data.nrpages > 0)
1639 invalidate = true;
1640 if (ci->i_wrbuffer_ref > 0)
1641 mapping_set_error(&inode->i_data, -EIO);
1642 }
1643
1644 while (!list_empty(&ci->i_cap_flush_list)) {
1645 cf = list_first_entry(&ci->i_cap_flush_list,
1646 struct ceph_cap_flush, i_list);
1647 list_move(&cf->i_list, &to_remove);
1648 }
1649
1650 spin_lock(&mdsc->cap_dirty_lock);
1651
1652 list_for_each_entry(cf, &to_remove, i_list)
1653 list_del_init(&cf->g_list);
1654
1655 if (!list_empty(&ci->i_dirty_item)) {
1656 pr_warn_ratelimited(
1657 " dropping dirty %s state for %p %lld\n",
1658 ceph_cap_string(ci->i_dirty_caps),
1659 inode, ceph_ino(inode));
1660 ci->i_dirty_caps = 0;
1661 list_del_init(&ci->i_dirty_item);
1662 dirty_dropped = true;
1663 }
1664 if (!list_empty(&ci->i_flushing_item)) {
1665 pr_warn_ratelimited(
1666 " dropping dirty+flushing %s state for %p %lld\n",
1667 ceph_cap_string(ci->i_flushing_caps),
1668 inode, ceph_ino(inode));
1669 ci->i_flushing_caps = 0;
1670 list_del_init(&ci->i_flushing_item);
1671 mdsc->num_cap_flushing--;
1672 dirty_dropped = true;
1673 }
1674 spin_unlock(&mdsc->cap_dirty_lock);
1675
1676 if (dirty_dropped) {
1677 mapping_set_error(inode->i_mapping, -EIO);
1678
1679 if (ci->i_wrbuffer_ref_head == 0 &&
1680 ci->i_wr_ref == 0 &&
1681 ci->i_dirty_caps == 0 &&
1682 ci->i_flushing_caps == 0) {
1683 ceph_put_snap_context(ci->i_head_snapc);
1684 ci->i_head_snapc = NULL;
1685 }
1686 }
1687
1688 if (atomic_read(&ci->i_filelock_ref) > 0) {
1689 /* make further file lock syscall return -EIO */
1690 ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
1691 pr_warn_ratelimited(" dropping file locks for %p %lld\n",
1692 inode, ceph_ino(inode));
1693 }
1694
1695 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1696 list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1697 ci->i_prealloc_cap_flush = NULL;
1698 }
1699
1700 if (!list_empty(&ci->i_cap_snaps))
1701 capsnap_release = remove_capsnaps(mdsc, inode);
1702 }
1703 spin_unlock(&ci->i_ceph_lock);
1704 while (!list_empty(&to_remove)) {
1705 struct ceph_cap_flush *cf;
1706 cf = list_first_entry(&to_remove,
1707 struct ceph_cap_flush, i_list);
1708 list_del_init(&cf->i_list);
1709 if (!cf->is_capsnap)
1710 ceph_free_cap_flush(cf);
1711 }
1712
1713 wake_up_all(&ci->i_cap_wq);
1714 if (invalidate)
1715 ceph_queue_invalidate(inode);
1716 if (dirty_dropped)
1717 iput(inode);
1718 while (capsnap_release--)
1719 iput(inode);
1720 return 0;
1721 }
1722
1723 /*
1724 * caller must hold session s_mutex
1725 */
remove_session_caps(struct ceph_mds_session * session)1726 static void remove_session_caps(struct ceph_mds_session *session)
1727 {
1728 struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1729 struct super_block *sb = fsc->sb;
1730 LIST_HEAD(dispose);
1731
1732 dout("remove_session_caps on %p\n", session);
1733 ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
1734
1735 wake_up_all(&fsc->mdsc->cap_flushing_wq);
1736
1737 spin_lock(&session->s_cap_lock);
1738 if (session->s_nr_caps > 0) {
1739 struct inode *inode;
1740 struct ceph_cap *cap, *prev = NULL;
1741 struct ceph_vino vino;
1742 /*
1743 * iterate_session_caps() skips inodes that are being
1744 * deleted, we need to wait until deletions are complete.
1745 * __wait_on_freeing_inode() is designed for the job,
1746 * but it is not exported, so use lookup inode function
1747 * to access it.
1748 */
1749 while (!list_empty(&session->s_caps)) {
1750 cap = list_entry(session->s_caps.next,
1751 struct ceph_cap, session_caps);
1752 if (cap == prev)
1753 break;
1754 prev = cap;
1755 vino = cap->ci->i_vino;
1756 spin_unlock(&session->s_cap_lock);
1757
1758 inode = ceph_find_inode(sb, vino);
1759 iput(inode);
1760
1761 spin_lock(&session->s_cap_lock);
1762 }
1763 }
1764
1765 // drop cap expires and unlock s_cap_lock
1766 detach_cap_releases(session, &dispose);
1767
1768 BUG_ON(session->s_nr_caps > 0);
1769 BUG_ON(!list_empty(&session->s_cap_flushing));
1770 spin_unlock(&session->s_cap_lock);
1771 dispose_cap_releases(session->s_mdsc, &dispose);
1772 }
1773
1774 enum {
1775 RECONNECT,
1776 RENEWCAPS,
1777 FORCE_RO,
1778 };
1779
1780 /*
1781 * wake up any threads waiting on this session's caps. if the cap is
1782 * old (didn't get renewed on the client reconnect), remove it now.
1783 *
1784 * caller must hold s_mutex.
1785 */
wake_up_session_cb(struct inode * inode,struct ceph_cap * cap,void * arg)1786 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1787 void *arg)
1788 {
1789 struct ceph_inode_info *ci = ceph_inode(inode);
1790 unsigned long ev = (unsigned long)arg;
1791
1792 if (ev == RECONNECT) {
1793 spin_lock(&ci->i_ceph_lock);
1794 ci->i_wanted_max_size = 0;
1795 ci->i_requested_max_size = 0;
1796 spin_unlock(&ci->i_ceph_lock);
1797 } else if (ev == RENEWCAPS) {
1798 if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
1799 /* mds did not re-issue stale cap */
1800 spin_lock(&ci->i_ceph_lock);
1801 cap->issued = cap->implemented = CEPH_CAP_PIN;
1802 spin_unlock(&ci->i_ceph_lock);
1803 }
1804 } else if (ev == FORCE_RO) {
1805 }
1806 wake_up_all(&ci->i_cap_wq);
1807 return 0;
1808 }
1809
wake_up_session_caps(struct ceph_mds_session * session,int ev)1810 static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
1811 {
1812 dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1813 ceph_iterate_session_caps(session, wake_up_session_cb,
1814 (void *)(unsigned long)ev);
1815 }
1816
1817 /*
1818 * Send periodic message to MDS renewing all currently held caps. The
1819 * ack will reset the expiration for all caps from this session.
1820 *
1821 * caller holds s_mutex
1822 */
send_renew_caps(struct ceph_mds_client * mdsc,struct ceph_mds_session * session)1823 static int send_renew_caps(struct ceph_mds_client *mdsc,
1824 struct ceph_mds_session *session)
1825 {
1826 struct ceph_msg *msg;
1827 int state;
1828
1829 if (time_after_eq(jiffies, session->s_cap_ttl) &&
1830 time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1831 pr_info("mds%d caps stale\n", session->s_mds);
1832 session->s_renew_requested = jiffies;
1833
1834 /* do not try to renew caps until a recovering mds has reconnected
1835 * with its clients. */
1836 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1837 if (state < CEPH_MDS_STATE_RECONNECT) {
1838 dout("send_renew_caps ignoring mds%d (%s)\n",
1839 session->s_mds, ceph_mds_state_name(state));
1840 return 0;
1841 }
1842
1843 dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1844 ceph_mds_state_name(state));
1845 msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1846 ++session->s_renew_seq);
1847 if (!msg)
1848 return -ENOMEM;
1849 ceph_con_send(&session->s_con, msg);
1850 return 0;
1851 }
1852
send_flushmsg_ack(struct ceph_mds_client * mdsc,struct ceph_mds_session * session,u64 seq)1853 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1854 struct ceph_mds_session *session, u64 seq)
1855 {
1856 struct ceph_msg *msg;
1857
1858 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1859 session->s_mds, ceph_session_state_name(session->s_state), seq);
1860 msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1861 if (!msg)
1862 return -ENOMEM;
1863 ceph_con_send(&session->s_con, msg);
1864 return 0;
1865 }
1866
1867
1868 /*
1869 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1870 *
1871 * Called under session->s_mutex
1872 */
renewed_caps(struct ceph_mds_client * mdsc,struct ceph_mds_session * session,int is_renew)1873 static void renewed_caps(struct ceph_mds_client *mdsc,
1874 struct ceph_mds_session *session, int is_renew)
1875 {
1876 int was_stale;
1877 int wake = 0;
1878
1879 spin_lock(&session->s_cap_lock);
1880 was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1881
1882 session->s_cap_ttl = session->s_renew_requested +
1883 mdsc->mdsmap->m_session_timeout*HZ;
1884
1885 if (was_stale) {
1886 if (time_before(jiffies, session->s_cap_ttl)) {
1887 pr_info("mds%d caps renewed\n", session->s_mds);
1888 wake = 1;
1889 } else {
1890 pr_info("mds%d caps still stale\n", session->s_mds);
1891 }
1892 }
1893 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1894 session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1895 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1896 spin_unlock(&session->s_cap_lock);
1897
1898 if (wake)
1899 wake_up_session_caps(session, RENEWCAPS);
1900 }
1901
1902 /*
1903 * send a session close request
1904 */
request_close_session(struct ceph_mds_session * session)1905 static int request_close_session(struct ceph_mds_session *session)
1906 {
1907 struct ceph_msg *msg;
1908
1909 dout("request_close_session mds%d state %s seq %lld\n",
1910 session->s_mds, ceph_session_state_name(session->s_state),
1911 session->s_seq);
1912 msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
1913 session->s_seq);
1914 if (!msg)
1915 return -ENOMEM;
1916 ceph_con_send(&session->s_con, msg);
1917 return 1;
1918 }
1919
1920 /*
1921 * Called with s_mutex held.
1922 */
__close_session(struct ceph_mds_client * mdsc,struct ceph_mds_session * session)1923 static int __close_session(struct ceph_mds_client *mdsc,
1924 struct ceph_mds_session *session)
1925 {
1926 if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1927 return 0;
1928 session->s_state = CEPH_MDS_SESSION_CLOSING;
1929 return request_close_session(session);
1930 }
1931
drop_negative_children(struct dentry * dentry)1932 static bool drop_negative_children(struct dentry *dentry)
1933 {
1934 struct dentry *child;
1935 bool all_negative = true;
1936
1937 if (!d_is_dir(dentry))
1938 goto out;
1939
1940 spin_lock(&dentry->d_lock);
1941 list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1942 if (d_really_is_positive(child)) {
1943 all_negative = false;
1944 break;
1945 }
1946 }
1947 spin_unlock(&dentry->d_lock);
1948
1949 if (all_negative)
1950 shrink_dcache_parent(dentry);
1951 out:
1952 return all_negative;
1953 }
1954
1955 /*
1956 * Trim old(er) caps.
1957 *
1958 * Because we can't cache an inode without one or more caps, we do
1959 * this indirectly: if a cap is unused, we prune its aliases, at which
1960 * point the inode will hopefully get dropped to.
1961 *
1962 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1963 * memory pressure from the MDS, though, so it needn't be perfect.
1964 */
trim_caps_cb(struct inode * inode,struct ceph_cap * cap,void * arg)1965 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1966 {
1967 int *remaining = arg;
1968 struct ceph_inode_info *ci = ceph_inode(inode);
1969 int used, wanted, oissued, mine;
1970
1971 if (*remaining <= 0)
1972 return -1;
1973
1974 spin_lock(&ci->i_ceph_lock);
1975 mine = cap->issued | cap->implemented;
1976 used = __ceph_caps_used(ci);
1977 wanted = __ceph_caps_file_wanted(ci);
1978 oissued = __ceph_caps_issued_other(ci, cap);
1979
1980 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1981 inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1982 ceph_cap_string(used), ceph_cap_string(wanted));
1983 if (cap == ci->i_auth_cap) {
1984 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1985 !list_empty(&ci->i_cap_snaps))
1986 goto out;
1987 if ((used | wanted) & CEPH_CAP_ANY_WR)
1988 goto out;
1989 /* Note: it's possible that i_filelock_ref becomes non-zero
1990 * after dropping auth caps. It doesn't hurt because reply
1991 * of lock mds request will re-add auth caps. */
1992 if (atomic_read(&ci->i_filelock_ref) > 0)
1993 goto out;
1994 }
1995 /* The inode has cached pages, but it's no longer used.
1996 * we can safely drop it */
1997 if (S_ISREG(inode->i_mode) &&
1998 wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1999 !(oissued & CEPH_CAP_FILE_CACHE)) {
2000 used = 0;
2001 oissued = 0;
2002 }
2003 if ((used | wanted) & ~oissued & mine)
2004 goto out; /* we need these caps */
2005
2006 if (oissued) {
2007 /* we aren't the only cap.. just remove us */
2008 ceph_remove_cap(cap, true);
2009 (*remaining)--;
2010 } else {
2011 struct dentry *dentry;
2012 /* try dropping referring dentries */
2013 spin_unlock(&ci->i_ceph_lock);
2014 dentry = d_find_any_alias(inode);
2015 if (dentry && drop_negative_children(dentry)) {
2016 int count;
2017 dput(dentry);
2018 d_prune_aliases(inode);
2019 count = atomic_read(&inode->i_count);
2020 if (count == 1)
2021 (*remaining)--;
2022 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
2023 inode, cap, count);
2024 } else {
2025 dput(dentry);
2026 }
2027 return 0;
2028 }
2029
2030 out:
2031 spin_unlock(&ci->i_ceph_lock);
2032 return 0;
2033 }
2034
2035 /*
2036 * Trim session cap count down to some max number.
2037 */
ceph_trim_caps(struct ceph_mds_client * mdsc,struct ceph_mds_session * session,int max_caps)2038 int ceph_trim_caps(struct ceph_mds_client *mdsc,
2039 struct ceph_mds_session *session,
2040 int max_caps)
2041 {
2042 int trim_caps = session->s_nr_caps - max_caps;
2043
2044 dout("trim_caps mds%d start: %d / %d, trim %d\n",
2045 session->s_mds, session->s_nr_caps, max_caps, trim_caps);
2046 if (trim_caps > 0) {
2047 int remaining = trim_caps;
2048
2049 ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
2050 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
2051 session->s_mds, session->s_nr_caps, max_caps,
2052 trim_caps - remaining);
2053 }
2054
2055 ceph_flush_cap_releases(mdsc, session);
2056 return 0;
2057 }
2058
check_caps_flush(struct ceph_mds_client * mdsc,u64 want_flush_tid)2059 static int check_caps_flush(struct ceph_mds_client *mdsc,
2060 u64 want_flush_tid)
2061 {
2062 int ret = 1;
2063
2064 spin_lock(&mdsc->cap_dirty_lock);
2065 if (!list_empty(&mdsc->cap_flush_list)) {
2066 struct ceph_cap_flush *cf =
2067 list_first_entry(&mdsc->cap_flush_list,
2068 struct ceph_cap_flush, g_list);
2069 if (cf->tid <= want_flush_tid) {
2070 dout("check_caps_flush still flushing tid "
2071 "%llu <= %llu\n", cf->tid, want_flush_tid);
2072 ret = 0;
2073 }
2074 }
2075 spin_unlock(&mdsc->cap_dirty_lock);
2076 return ret;
2077 }
2078
2079 /*
2080 * flush all dirty inode data to disk.
2081 *
2082 * returns true if we've flushed through want_flush_tid
2083 */
wait_caps_flush(struct ceph_mds_client * mdsc,u64 want_flush_tid)2084 static void wait_caps_flush(struct ceph_mds_client *mdsc,
2085 u64 want_flush_tid)
2086 {
2087 dout("check_caps_flush want %llu\n", want_flush_tid);
2088
2089 wait_event(mdsc->cap_flushing_wq,
2090 check_caps_flush(mdsc, want_flush_tid));
2091
2092 dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
2093 }
2094
2095 /*
2096 * called under s_mutex
2097 */
ceph_send_cap_releases(struct ceph_mds_client * mdsc,struct ceph_mds_session * session)2098 static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
2099 struct ceph_mds_session *session)
2100 {
2101 struct ceph_msg *msg = NULL;
2102 struct ceph_mds_cap_release *head;
2103 struct ceph_mds_cap_item *item;
2104 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
2105 struct ceph_cap *cap;
2106 LIST_HEAD(tmp_list);
2107 int num_cap_releases;
2108 __le32 barrier, *cap_barrier;
2109
2110 down_read(&osdc->lock);
2111 barrier = cpu_to_le32(osdc->epoch_barrier);
2112 up_read(&osdc->lock);
2113
2114 spin_lock(&session->s_cap_lock);
2115 again:
2116 list_splice_init(&session->s_cap_releases, &tmp_list);
2117 num_cap_releases = session->s_num_cap_releases;
2118 session->s_num_cap_releases = 0;
2119 spin_unlock(&session->s_cap_lock);
2120
2121 while (!list_empty(&tmp_list)) {
2122 if (!msg) {
2123 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
2124 PAGE_SIZE, GFP_NOFS, false);
2125 if (!msg)
2126 goto out_err;
2127 head = msg->front.iov_base;
2128 head->num = cpu_to_le32(0);
2129 msg->front.iov_len = sizeof(*head);
2130
2131 msg->hdr.version = cpu_to_le16(2);
2132 msg->hdr.compat_version = cpu_to_le16(1);
2133 }
2134
2135 cap = list_first_entry(&tmp_list, struct ceph_cap,
2136 session_caps);
2137 list_del(&cap->session_caps);
2138 num_cap_releases--;
2139
2140 head = msg->front.iov_base;
2141 put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
2142 &head->num);
2143 item = msg->front.iov_base + msg->front.iov_len;
2144 item->ino = cpu_to_le64(cap->cap_ino);
2145 item->cap_id = cpu_to_le64(cap->cap_id);
2146 item->migrate_seq = cpu_to_le32(cap->mseq);
2147 item->seq = cpu_to_le32(cap->issue_seq);
2148 msg->front.iov_len += sizeof(*item);
2149
2150 ceph_put_cap(mdsc, cap);
2151
2152 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
2153 // Append cap_barrier field
2154 cap_barrier = msg->front.iov_base + msg->front.iov_len;
2155 *cap_barrier = barrier;
2156 msg->front.iov_len += sizeof(*cap_barrier);
2157
2158 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2159 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2160 ceph_con_send(&session->s_con, msg);
2161 msg = NULL;
2162 }
2163 }
2164
2165 BUG_ON(num_cap_releases != 0);
2166
2167 spin_lock(&session->s_cap_lock);
2168 if (!list_empty(&session->s_cap_releases))
2169 goto again;
2170 spin_unlock(&session->s_cap_lock);
2171
2172 if (msg) {
2173 // Append cap_barrier field
2174 cap_barrier = msg->front.iov_base + msg->front.iov_len;
2175 *cap_barrier = barrier;
2176 msg->front.iov_len += sizeof(*cap_barrier);
2177
2178 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2179 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2180 ceph_con_send(&session->s_con, msg);
2181 }
2182 return;
2183 out_err:
2184 pr_err("send_cap_releases mds%d, failed to allocate message\n",
2185 session->s_mds);
2186 spin_lock(&session->s_cap_lock);
2187 list_splice(&tmp_list, &session->s_cap_releases);
2188 session->s_num_cap_releases += num_cap_releases;
2189 spin_unlock(&session->s_cap_lock);
2190 }
2191
ceph_cap_release_work(struct work_struct * work)2192 static void ceph_cap_release_work(struct work_struct *work)
2193 {
2194 struct ceph_mds_session *session =
2195 container_of(work, struct ceph_mds_session, s_cap_release_work);
2196
2197 mutex_lock(&session->s_mutex);
2198 if (session->s_state == CEPH_MDS_SESSION_OPEN ||
2199 session->s_state == CEPH_MDS_SESSION_HUNG)
2200 ceph_send_cap_releases(session->s_mdsc, session);
2201 mutex_unlock(&session->s_mutex);
2202 ceph_put_mds_session(session);
2203 }
2204
ceph_flush_cap_releases(struct ceph_mds_client * mdsc,struct ceph_mds_session * session)2205 void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
2206 struct ceph_mds_session *session)
2207 {
2208 if (mdsc->stopping)
2209 return;
2210
2211 ceph_get_mds_session(session);
2212 if (queue_work(mdsc->fsc->cap_wq,
2213 &session->s_cap_release_work)) {
2214 dout("cap release work queued\n");
2215 } else {
2216 ceph_put_mds_session(session);
2217 dout("failed to queue cap release work\n");
2218 }
2219 }
2220
2221 /*
2222 * caller holds session->s_cap_lock
2223 */
__ceph_queue_cap_release(struct ceph_mds_session * session,struct ceph_cap * cap)2224 void __ceph_queue_cap_release(struct ceph_mds_session *session,
2225 struct ceph_cap *cap)
2226 {
2227 list_add_tail(&cap->session_caps, &session->s_cap_releases);
2228 session->s_num_cap_releases++;
2229
2230 if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
2231 ceph_flush_cap_releases(session->s_mdsc, session);
2232 }
2233
ceph_cap_reclaim_work(struct work_struct * work)2234 static void ceph_cap_reclaim_work(struct work_struct *work)
2235 {
2236 struct ceph_mds_client *mdsc =
2237 container_of(work, struct ceph_mds_client, cap_reclaim_work);
2238 int ret = ceph_trim_dentries(mdsc);
2239 if (ret == -EAGAIN)
2240 ceph_queue_cap_reclaim_work(mdsc);
2241 }
2242
ceph_queue_cap_reclaim_work(struct ceph_mds_client * mdsc)2243 void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
2244 {
2245 if (mdsc->stopping)
2246 return;
2247
2248 if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
2249 dout("caps reclaim work queued\n");
2250 } else {
2251 dout("failed to queue caps release work\n");
2252 }
2253 }
2254
ceph_reclaim_caps_nr(struct ceph_mds_client * mdsc,int nr)2255 void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
2256 {
2257 int val;
2258 if (!nr)
2259 return;
2260 val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
2261 if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
2262 atomic_set(&mdsc->cap_reclaim_pending, 0);
2263 ceph_queue_cap_reclaim_work(mdsc);
2264 }
2265 }
2266
2267 /*
2268 * requests
2269 */
2270
ceph_alloc_readdir_reply_buffer(struct ceph_mds_request * req,struct inode * dir)2271 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2272 struct inode *dir)
2273 {
2274 struct ceph_inode_info *ci = ceph_inode(dir);
2275 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2276 struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2277 size_t size = sizeof(struct ceph_mds_reply_dir_entry);
2278 unsigned int num_entries;
2279 int order;
2280
2281 spin_lock(&ci->i_ceph_lock);
2282 num_entries = ci->i_files + ci->i_subdirs;
2283 spin_unlock(&ci->i_ceph_lock);
2284 num_entries = max(num_entries, 1U);
2285 num_entries = min(num_entries, opt->max_readdir);
2286
2287 order = get_order(size * num_entries);
2288 while (order >= 0) {
2289 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
2290 __GFP_NOWARN,
2291 order);
2292 if (rinfo->dir_entries)
2293 break;
2294 order--;
2295 }
2296 if (!rinfo->dir_entries)
2297 return -ENOMEM;
2298
2299 num_entries = (PAGE_SIZE << order) / size;
2300 num_entries = min(num_entries, opt->max_readdir);
2301
2302 rinfo->dir_buf_size = PAGE_SIZE << order;
2303 req->r_num_caps = num_entries + 1;
2304 req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2305 req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2306 return 0;
2307 }
2308
2309 /*
2310 * Create an mds request.
2311 */
2312 struct ceph_mds_request *
ceph_mdsc_create_request(struct ceph_mds_client * mdsc,int op,int mode)2313 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
2314 {
2315 struct ceph_mds_request *req;
2316
2317 req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2318 if (!req)
2319 return ERR_PTR(-ENOMEM);
2320
2321 mutex_init(&req->r_fill_mutex);
2322 req->r_mdsc = mdsc;
2323 req->r_started = jiffies;
2324 req->r_start_latency = ktime_get();
2325 req->r_resend_mds = -1;
2326 INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2327 INIT_LIST_HEAD(&req->r_unsafe_target_item);
2328 req->r_fmode = -1;
2329 kref_init(&req->r_kref);
2330 RB_CLEAR_NODE(&req->r_node);
2331 INIT_LIST_HEAD(&req->r_wait);
2332 init_completion(&req->r_completion);
2333 init_completion(&req->r_safe_completion);
2334 INIT_LIST_HEAD(&req->r_unsafe_item);
2335
2336 ktime_get_coarse_real_ts64(&req->r_stamp);
2337
2338 req->r_op = op;
2339 req->r_direct_mode = mode;
2340 return req;
2341 }
2342
2343 /*
2344 * return oldest (lowest) request, tid in request tree, 0 if none.
2345 *
2346 * called under mdsc->mutex.
2347 */
__get_oldest_req(struct ceph_mds_client * mdsc)2348 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
2349 {
2350 if (RB_EMPTY_ROOT(&mdsc->request_tree))
2351 return NULL;
2352 return rb_entry(rb_first(&mdsc->request_tree),
2353 struct ceph_mds_request, r_node);
2354 }
2355
__get_oldest_tid(struct ceph_mds_client * mdsc)2356 static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
2357 {
2358 return mdsc->oldest_tid;
2359 }
2360
2361 /*
2362 * Build a dentry's path. Allocate on heap; caller must kfree. Based
2363 * on build_path_from_dentry in fs/cifs/dir.c.
2364 *
2365 * If @stop_on_nosnap, generate path relative to the first non-snapped
2366 * inode.
2367 *
2368 * Encode hidden .snap dirs as a double /, i.e.
2369 * foo/.snap/bar -> foo//bar
2370 */
ceph_mdsc_build_path(struct dentry * dentry,int * plen,u64 * pbase,int stop_on_nosnap)2371 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
2372 int stop_on_nosnap)
2373 {
2374 struct dentry *temp;
2375 char *path;
2376 int pos;
2377 unsigned seq;
2378 u64 base;
2379
2380 if (!dentry)
2381 return ERR_PTR(-EINVAL);
2382
2383 path = __getname();
2384 if (!path)
2385 return ERR_PTR(-ENOMEM);
2386 retry:
2387 pos = PATH_MAX - 1;
2388 path[pos] = '\0';
2389
2390 seq = read_seqbegin(&rename_lock);
2391 rcu_read_lock();
2392 temp = dentry;
2393 for (;;) {
2394 struct inode *inode;
2395
2396 spin_lock(&temp->d_lock);
2397 inode = d_inode(temp);
2398 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
2399 dout("build_path path+%d: %p SNAPDIR\n",
2400 pos, temp);
2401 } else if (stop_on_nosnap && inode && dentry != temp &&
2402 ceph_snap(inode) == CEPH_NOSNAP) {
2403 spin_unlock(&temp->d_lock);
2404 pos++; /* get rid of any prepended '/' */
2405 break;
2406 } else {
2407 pos -= temp->d_name.len;
2408 if (pos < 0) {
2409 spin_unlock(&temp->d_lock);
2410 break;
2411 }
2412 memcpy(path + pos, temp->d_name.name, temp->d_name.len);
2413 }
2414 spin_unlock(&temp->d_lock);
2415 temp = READ_ONCE(temp->d_parent);
2416
2417 /* Are we at the root? */
2418 if (IS_ROOT(temp))
2419 break;
2420
2421 /* Are we out of buffer? */
2422 if (--pos < 0)
2423 break;
2424
2425 path[pos] = '/';
2426 }
2427 base = ceph_ino(d_inode(temp));
2428 rcu_read_unlock();
2429
2430 if (read_seqretry(&rename_lock, seq))
2431 goto retry;
2432
2433 if (pos < 0) {
2434 /*
2435 * A rename didn't occur, but somehow we didn't end up where
2436 * we thought we would. Throw a warning and try again.
2437 */
2438 pr_warn("build_path did not end path lookup where "
2439 "expected, pos is %d\n", pos);
2440 goto retry;
2441 }
2442
2443 *pbase = base;
2444 *plen = PATH_MAX - 1 - pos;
2445 dout("build_path on %p %d built %llx '%.*s'\n",
2446 dentry, d_count(dentry), base, *plen, path + pos);
2447 return path + pos;
2448 }
2449
build_dentry_path(struct dentry * dentry,struct inode * dir,const char ** ppath,int * ppathlen,u64 * pino,bool * pfreepath,bool parent_locked)2450 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2451 const char **ppath, int *ppathlen, u64 *pino,
2452 bool *pfreepath, bool parent_locked)
2453 {
2454 char *path;
2455
2456 rcu_read_lock();
2457 if (!dir)
2458 dir = d_inode_rcu(dentry->d_parent);
2459 if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
2460 *pino = ceph_ino(dir);
2461 rcu_read_unlock();
2462 *ppath = dentry->d_name.name;
2463 *ppathlen = dentry->d_name.len;
2464 return 0;
2465 }
2466 rcu_read_unlock();
2467 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2468 if (IS_ERR(path))
2469 return PTR_ERR(path);
2470 *ppath = path;
2471 *pfreepath = true;
2472 return 0;
2473 }
2474
build_inode_path(struct inode * inode,const char ** ppath,int * ppathlen,u64 * pino,bool * pfreepath)2475 static int build_inode_path(struct inode *inode,
2476 const char **ppath, int *ppathlen, u64 *pino,
2477 bool *pfreepath)
2478 {
2479 struct dentry *dentry;
2480 char *path;
2481
2482 if (ceph_snap(inode) == CEPH_NOSNAP) {
2483 *pino = ceph_ino(inode);
2484 *ppathlen = 0;
2485 return 0;
2486 }
2487 dentry = d_find_alias(inode);
2488 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2489 dput(dentry);
2490 if (IS_ERR(path))
2491 return PTR_ERR(path);
2492 *ppath = path;
2493 *pfreepath = true;
2494 return 0;
2495 }
2496
2497 /*
2498 * request arguments may be specified via an inode *, a dentry *, or
2499 * an explicit ino+path.
2500 */
set_request_path_attr(struct inode * rinode,struct dentry * rdentry,struct inode * rdiri,const char * rpath,u64 rino,const char ** ppath,int * pathlen,u64 * ino,bool * freepath,bool parent_locked)2501 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2502 struct inode *rdiri, const char *rpath,
2503 u64 rino, const char **ppath, int *pathlen,
2504 u64 *ino, bool *freepath, bool parent_locked)
2505 {
2506 int r = 0;
2507
2508 if (rinode) {
2509 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
2510 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
2511 ceph_snap(rinode));
2512 } else if (rdentry) {
2513 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
2514 freepath, parent_locked);
2515 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
2516 *ppath);
2517 } else if (rpath || rino) {
2518 *ino = rino;
2519 *ppath = rpath;
2520 *pathlen = rpath ? strlen(rpath) : 0;
2521 dout(" path %.*s\n", *pathlen, rpath);
2522 }
2523
2524 return r;
2525 }
2526
encode_timestamp_and_gids(void ** p,const struct ceph_mds_request * req)2527 static void encode_timestamp_and_gids(void **p,
2528 const struct ceph_mds_request *req)
2529 {
2530 struct ceph_timespec ts;
2531 int i;
2532
2533 ceph_encode_timespec64(&ts, &req->r_stamp);
2534 ceph_encode_copy(p, &ts, sizeof(ts));
2535
2536 /* gid_list */
2537 ceph_encode_32(p, req->r_cred->group_info->ngroups);
2538 for (i = 0; i < req->r_cred->group_info->ngroups; i++)
2539 ceph_encode_64(p, from_kgid(&init_user_ns,
2540 req->r_cred->group_info->gid[i]));
2541 }
2542
2543 /*
2544 * called under mdsc->mutex
2545 */
create_request_message(struct ceph_mds_session * session,struct ceph_mds_request * req,bool drop_cap_releases)2546 static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
2547 struct ceph_mds_request *req,
2548 bool drop_cap_releases)
2549 {
2550 int mds = session->s_mds;
2551 struct ceph_mds_client *mdsc = session->s_mdsc;
2552 struct ceph_msg *msg;
2553 struct ceph_mds_request_head_old *head;
2554 const char *path1 = NULL;
2555 const char *path2 = NULL;
2556 u64 ino1 = 0, ino2 = 0;
2557 int pathlen1 = 0, pathlen2 = 0;
2558 bool freepath1 = false, freepath2 = false;
2559 int len;
2560 u16 releases;
2561 void *p, *end;
2562 int ret;
2563 bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
2564
2565 ret = set_request_path_attr(req->r_inode, req->r_dentry,
2566 req->r_parent, req->r_path1, req->r_ino1.ino,
2567 &path1, &pathlen1, &ino1, &freepath1,
2568 test_bit(CEPH_MDS_R_PARENT_LOCKED,
2569 &req->r_req_flags));
2570 if (ret < 0) {
2571 msg = ERR_PTR(ret);
2572 goto out;
2573 }
2574
2575 /* If r_old_dentry is set, then assume that its parent is locked */
2576 ret = set_request_path_attr(NULL, req->r_old_dentry,
2577 req->r_old_dentry_dir,
2578 req->r_path2, req->r_ino2.ino,
2579 &path2, &pathlen2, &ino2, &freepath2, true);
2580 if (ret < 0) {
2581 msg = ERR_PTR(ret);
2582 goto out_free1;
2583 }
2584
2585 len = legacy ? sizeof(*head) : sizeof(struct ceph_mds_request_head);
2586 len += pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
2587 sizeof(struct ceph_timespec);
2588 len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
2589
2590 /* calculate (max) length for cap releases */
2591 len += sizeof(struct ceph_mds_request_release) *
2592 (!!req->r_inode_drop + !!req->r_dentry_drop +
2593 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2594
2595 if (req->r_dentry_drop)
2596 len += pathlen1;
2597 if (req->r_old_dentry_drop)
2598 len += pathlen2;
2599
2600 msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
2601 if (!msg) {
2602 msg = ERR_PTR(-ENOMEM);
2603 goto out_free2;
2604 }
2605
2606 msg->hdr.tid = cpu_to_le64(req->r_tid);
2607
2608 /*
2609 * The old ceph_mds_request_head didn't contain a version field, and
2610 * one was added when we moved the message version from 3->4.
2611 */
2612 if (legacy) {
2613 msg->hdr.version = cpu_to_le16(3);
2614 head = msg->front.iov_base;
2615 p = msg->front.iov_base + sizeof(*head);
2616 } else {
2617 struct ceph_mds_request_head *new_head = msg->front.iov_base;
2618
2619 msg->hdr.version = cpu_to_le16(4);
2620 new_head->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
2621 head = (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2622 p = msg->front.iov_base + sizeof(*new_head);
2623 }
2624
2625 end = msg->front.iov_base + msg->front.iov_len;
2626
2627 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
2628 head->op = cpu_to_le32(req->r_op);
2629 head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
2630 req->r_cred->fsuid));
2631 head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
2632 req->r_cred->fsgid));
2633 head->ino = cpu_to_le64(req->r_deleg_ino);
2634 head->args = req->r_args;
2635
2636 ceph_encode_filepath(&p, end, ino1, path1);
2637 ceph_encode_filepath(&p, end, ino2, path2);
2638
2639 /* make note of release offset, in case we need to replay */
2640 req->r_request_release_offset = p - msg->front.iov_base;
2641
2642 /* cap releases */
2643 releases = 0;
2644 if (req->r_inode_drop)
2645 releases += ceph_encode_inode_release(&p,
2646 req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2647 mds, req->r_inode_drop, req->r_inode_unless,
2648 req->r_op == CEPH_MDS_OP_READDIR);
2649 if (req->r_dentry_drop)
2650 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2651 req->r_parent, mds, req->r_dentry_drop,
2652 req->r_dentry_unless);
2653 if (req->r_old_dentry_drop)
2654 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2655 req->r_old_dentry_dir, mds,
2656 req->r_old_dentry_drop,
2657 req->r_old_dentry_unless);
2658 if (req->r_old_inode_drop)
2659 releases += ceph_encode_inode_release(&p,
2660 d_inode(req->r_old_dentry),
2661 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2662
2663 if (drop_cap_releases) {
2664 releases = 0;
2665 p = msg->front.iov_base + req->r_request_release_offset;
2666 }
2667
2668 head->num_releases = cpu_to_le16(releases);
2669
2670 encode_timestamp_and_gids(&p, req);
2671
2672 if (WARN_ON_ONCE(p > end)) {
2673 ceph_msg_put(msg);
2674 msg = ERR_PTR(-ERANGE);
2675 goto out_free2;
2676 }
2677
2678 msg->front.iov_len = p - msg->front.iov_base;
2679 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2680
2681 if (req->r_pagelist) {
2682 struct ceph_pagelist *pagelist = req->r_pagelist;
2683 ceph_msg_data_add_pagelist(msg, pagelist);
2684 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2685 } else {
2686 msg->hdr.data_len = 0;
2687 }
2688
2689 msg->hdr.data_off = cpu_to_le16(0);
2690
2691 out_free2:
2692 if (freepath2)
2693 ceph_mdsc_free_path((char *)path2, pathlen2);
2694 out_free1:
2695 if (freepath1)
2696 ceph_mdsc_free_path((char *)path1, pathlen1);
2697 out:
2698 return msg;
2699 }
2700
2701 /*
2702 * called under mdsc->mutex if error, under no mutex if
2703 * success.
2704 */
complete_request(struct ceph_mds_client * mdsc,struct ceph_mds_request * req)2705 static void complete_request(struct ceph_mds_client *mdsc,
2706 struct ceph_mds_request *req)
2707 {
2708 req->r_end_latency = ktime_get();
2709
2710 if (req->r_callback)
2711 req->r_callback(mdsc, req);
2712 complete_all(&req->r_completion);
2713 }
2714
2715 static struct ceph_mds_request_head_old *
find_old_request_head(void * p,u64 features)2716 find_old_request_head(void *p, u64 features)
2717 {
2718 bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
2719 struct ceph_mds_request_head *new_head;
2720
2721 if (legacy)
2722 return (struct ceph_mds_request_head_old *)p;
2723 new_head = (struct ceph_mds_request_head *)p;
2724 return (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2725 }
2726
2727 /*
2728 * called under mdsc->mutex
2729 */
__prepare_send_request(struct ceph_mds_session * session,struct ceph_mds_request * req,bool drop_cap_releases)2730 static int __prepare_send_request(struct ceph_mds_session *session,
2731 struct ceph_mds_request *req,
2732 bool drop_cap_releases)
2733 {
2734 int mds = session->s_mds;
2735 struct ceph_mds_client *mdsc = session->s_mdsc;
2736 struct ceph_mds_request_head_old *rhead;
2737 struct ceph_msg *msg;
2738 int flags = 0;
2739
2740 req->r_attempts++;
2741 if (req->r_inode) {
2742 struct ceph_cap *cap =
2743 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2744
2745 if (cap)
2746 req->r_sent_on_mseq = cap->mseq;
2747 else
2748 req->r_sent_on_mseq = -1;
2749 }
2750 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2751 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2752
2753 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2754 void *p;
2755
2756 /*
2757 * Replay. Do not regenerate message (and rebuild
2758 * paths, etc.); just use the original message.
2759 * Rebuilding paths will break for renames because
2760 * d_move mangles the src name.
2761 */
2762 msg = req->r_request;
2763 rhead = find_old_request_head(msg->front.iov_base,
2764 session->s_con.peer_features);
2765
2766 flags = le32_to_cpu(rhead->flags);
2767 flags |= CEPH_MDS_FLAG_REPLAY;
2768 rhead->flags = cpu_to_le32(flags);
2769
2770 if (req->r_target_inode)
2771 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2772
2773 rhead->num_retry = req->r_attempts - 1;
2774
2775 /* remove cap/dentry releases from message */
2776 rhead->num_releases = 0;
2777
2778 p = msg->front.iov_base + req->r_request_release_offset;
2779 encode_timestamp_and_gids(&p, req);
2780
2781 msg->front.iov_len = p - msg->front.iov_base;
2782 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2783 return 0;
2784 }
2785
2786 if (req->r_request) {
2787 ceph_msg_put(req->r_request);
2788 req->r_request = NULL;
2789 }
2790 msg = create_request_message(session, req, drop_cap_releases);
2791 if (IS_ERR(msg)) {
2792 req->r_err = PTR_ERR(msg);
2793 return PTR_ERR(msg);
2794 }
2795 req->r_request = msg;
2796
2797 rhead = find_old_request_head(msg->front.iov_base,
2798 session->s_con.peer_features);
2799 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2800 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2801 flags |= CEPH_MDS_FLAG_REPLAY;
2802 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
2803 flags |= CEPH_MDS_FLAG_ASYNC;
2804 if (req->r_parent)
2805 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2806 rhead->flags = cpu_to_le32(flags);
2807 rhead->num_fwd = req->r_num_fwd;
2808 rhead->num_retry = req->r_attempts - 1;
2809
2810 dout(" r_parent = %p\n", req->r_parent);
2811 return 0;
2812 }
2813
2814 /*
2815 * called under mdsc->mutex
2816 */
__send_request(struct ceph_mds_session * session,struct ceph_mds_request * req,bool drop_cap_releases)2817 static int __send_request(struct ceph_mds_session *session,
2818 struct ceph_mds_request *req,
2819 bool drop_cap_releases)
2820 {
2821 int err;
2822
2823 err = __prepare_send_request(session, req, drop_cap_releases);
2824 if (!err) {
2825 ceph_msg_get(req->r_request);
2826 ceph_con_send(&session->s_con, req->r_request);
2827 }
2828
2829 return err;
2830 }
2831
2832 /*
2833 * send request, or put it on the appropriate wait list.
2834 */
__do_request(struct ceph_mds_client * mdsc,struct ceph_mds_request * req)2835 static void __do_request(struct ceph_mds_client *mdsc,
2836 struct ceph_mds_request *req)
2837 {
2838 struct ceph_mds_session *session = NULL;
2839 int mds = -1;
2840 int err = 0;
2841 bool random;
2842
2843 if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2844 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2845 __unregister_request(mdsc, req);
2846 return;
2847 }
2848
2849 if (req->r_timeout &&
2850 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2851 dout("do_request timed out\n");
2852 err = -ETIMEDOUT;
2853 goto finish;
2854 }
2855 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2856 dout("do_request forced umount\n");
2857 err = -EIO;
2858 goto finish;
2859 }
2860 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2861 if (mdsc->mdsmap_err) {
2862 err = mdsc->mdsmap_err;
2863 dout("do_request mdsmap err %d\n", err);
2864 goto finish;
2865 }
2866 if (mdsc->mdsmap->m_epoch == 0) {
2867 dout("do_request no mdsmap, waiting for map\n");
2868 list_add(&req->r_wait, &mdsc->waiting_for_map);
2869 return;
2870 }
2871 if (!(mdsc->fsc->mount_options->flags &
2872 CEPH_MOUNT_OPT_MOUNTWAIT) &&
2873 !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2874 err = -EHOSTUNREACH;
2875 goto finish;
2876 }
2877 }
2878
2879 put_request_session(req);
2880
2881 mds = __choose_mds(mdsc, req, &random);
2882 if (mds < 0 ||
2883 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2884 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2885 err = -EJUKEBOX;
2886 goto finish;
2887 }
2888 dout("do_request no mds or not active, waiting for map\n");
2889 list_add(&req->r_wait, &mdsc->waiting_for_map);
2890 return;
2891 }
2892
2893 /* get, open session */
2894 session = __ceph_lookup_mds_session(mdsc, mds);
2895 if (!session) {
2896 session = register_session(mdsc, mds);
2897 if (IS_ERR(session)) {
2898 err = PTR_ERR(session);
2899 goto finish;
2900 }
2901 }
2902 req->r_session = ceph_get_mds_session(session);
2903
2904 dout("do_request mds%d session %p state %s\n", mds, session,
2905 ceph_session_state_name(session->s_state));
2906 if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2907 session->s_state != CEPH_MDS_SESSION_HUNG) {
2908 /*
2909 * We cannot queue async requests since the caps and delegated
2910 * inodes are bound to the session. Just return -EJUKEBOX and
2911 * let the caller retry a sync request in that case.
2912 */
2913 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2914 err = -EJUKEBOX;
2915 goto out_session;
2916 }
2917
2918 /*
2919 * If the session has been REJECTED, then return a hard error,
2920 * unless it's a CLEANRECOVER mount, in which case we'll queue
2921 * it to the mdsc queue.
2922 */
2923 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2924 if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER))
2925 list_add(&req->r_wait, &mdsc->waiting_for_map);
2926 else
2927 err = -EACCES;
2928 goto out_session;
2929 }
2930
2931 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2932 session->s_state == CEPH_MDS_SESSION_CLOSING) {
2933 err = __open_session(mdsc, session);
2934 if (err)
2935 goto out_session;
2936 /* retry the same mds later */
2937 if (random)
2938 req->r_resend_mds = mds;
2939 }
2940 list_add(&req->r_wait, &session->s_waiting);
2941 goto out_session;
2942 }
2943
2944 /* send request */
2945 req->r_resend_mds = -1; /* forget any previous mds hint */
2946
2947 if (req->r_request_started == 0) /* note request start time */
2948 req->r_request_started = jiffies;
2949
2950 err = __send_request(session, req, false);
2951
2952 out_session:
2953 ceph_put_mds_session(session);
2954 finish:
2955 if (err) {
2956 dout("__do_request early error %d\n", err);
2957 req->r_err = err;
2958 complete_request(mdsc, req);
2959 __unregister_request(mdsc, req);
2960 }
2961 return;
2962 }
2963
2964 /*
2965 * called under mdsc->mutex
2966 */
__wake_requests(struct ceph_mds_client * mdsc,struct list_head * head)2967 static void __wake_requests(struct ceph_mds_client *mdsc,
2968 struct list_head *head)
2969 {
2970 struct ceph_mds_request *req;
2971 LIST_HEAD(tmp_list);
2972
2973 list_splice_init(head, &tmp_list);
2974
2975 while (!list_empty(&tmp_list)) {
2976 req = list_entry(tmp_list.next,
2977 struct ceph_mds_request, r_wait);
2978 list_del_init(&req->r_wait);
2979 dout(" wake request %p tid %llu\n", req, req->r_tid);
2980 __do_request(mdsc, req);
2981 }
2982 }
2983
2984 /*
2985 * Wake up threads with requests pending for @mds, so that they can
2986 * resubmit their requests to a possibly different mds.
2987 */
kick_requests(struct ceph_mds_client * mdsc,int mds)2988 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2989 {
2990 struct ceph_mds_request *req;
2991 struct rb_node *p = rb_first(&mdsc->request_tree);
2992
2993 dout("kick_requests mds%d\n", mds);
2994 while (p) {
2995 req = rb_entry(p, struct ceph_mds_request, r_node);
2996 p = rb_next(p);
2997 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2998 continue;
2999 if (req->r_attempts > 0)
3000 continue; /* only new requests */
3001 if (req->r_session &&
3002 req->r_session->s_mds == mds) {
3003 dout(" kicking tid %llu\n", req->r_tid);
3004 list_del_init(&req->r_wait);
3005 __do_request(mdsc, req);
3006 }
3007 }
3008 }
3009
ceph_mdsc_submit_request(struct ceph_mds_client * mdsc,struct inode * dir,struct ceph_mds_request * req)3010 int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
3011 struct ceph_mds_request *req)
3012 {
3013 int err = 0;
3014
3015 /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
3016 if (req->r_inode)
3017 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
3018 if (req->r_parent) {
3019 struct ceph_inode_info *ci = ceph_inode(req->r_parent);
3020 int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
3021 CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD;
3022 spin_lock(&ci->i_ceph_lock);
3023 ceph_take_cap_refs(ci, CEPH_CAP_PIN, false);
3024 __ceph_touch_fmode(ci, mdsc, fmode);
3025 spin_unlock(&ci->i_ceph_lock);
3026 }
3027 if (req->r_old_dentry_dir)
3028 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
3029 CEPH_CAP_PIN);
3030
3031 if (req->r_inode) {
3032 err = ceph_wait_on_async_create(req->r_inode);
3033 if (err) {
3034 dout("%s: wait for async create returned: %d\n",
3035 __func__, err);
3036 return err;
3037 }
3038 }
3039
3040 if (!err && req->r_old_inode) {
3041 err = ceph_wait_on_async_create(req->r_old_inode);
3042 if (err) {
3043 dout("%s: wait for async create returned: %d\n",
3044 __func__, err);
3045 return err;
3046 }
3047 }
3048
3049 dout("submit_request on %p for inode %p\n", req, dir);
3050 mutex_lock(&mdsc->mutex);
3051 __register_request(mdsc, req, dir);
3052 __do_request(mdsc, req);
3053 err = req->r_err;
3054 mutex_unlock(&mdsc->mutex);
3055 return err;
3056 }
3057
ceph_mdsc_wait_request(struct ceph_mds_client * mdsc,struct ceph_mds_request * req)3058 static int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
3059 struct ceph_mds_request *req)
3060 {
3061 int err;
3062
3063 /* wait */
3064 dout("do_request waiting\n");
3065 if (!req->r_timeout && req->r_wait_for_completion) {
3066 err = req->r_wait_for_completion(mdsc, req);
3067 } else {
3068 long timeleft = wait_for_completion_killable_timeout(
3069 &req->r_completion,
3070 ceph_timeout_jiffies(req->r_timeout));
3071 if (timeleft > 0)
3072 err = 0;
3073 else if (!timeleft)
3074 err = -ETIMEDOUT; /* timed out */
3075 else
3076 err = timeleft; /* killed */
3077 }
3078 dout("do_request waited, got %d\n", err);
3079 mutex_lock(&mdsc->mutex);
3080
3081 /* only abort if we didn't race with a real reply */
3082 if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
3083 err = le32_to_cpu(req->r_reply_info.head->result);
3084 } else if (err < 0) {
3085 dout("aborted request %lld with %d\n", req->r_tid, err);
3086
3087 /*
3088 * ensure we aren't running concurrently with
3089 * ceph_fill_trace or ceph_readdir_prepopulate, which
3090 * rely on locks (dir mutex) held by our caller.
3091 */
3092 mutex_lock(&req->r_fill_mutex);
3093 req->r_err = err;
3094 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3095 mutex_unlock(&req->r_fill_mutex);
3096
3097 if (req->r_parent &&
3098 (req->r_op & CEPH_MDS_OP_WRITE))
3099 ceph_invalidate_dir_request(req);
3100 } else {
3101 err = req->r_err;
3102 }
3103
3104 mutex_unlock(&mdsc->mutex);
3105 return err;
3106 }
3107
3108 /*
3109 * Synchrously perform an mds request. Take care of all of the
3110 * session setup, forwarding, retry details.
3111 */
ceph_mdsc_do_request(struct ceph_mds_client * mdsc,struct inode * dir,struct ceph_mds_request * req)3112 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
3113 struct inode *dir,
3114 struct ceph_mds_request *req)
3115 {
3116 int err;
3117
3118 dout("do_request on %p\n", req);
3119
3120 /* issue */
3121 err = ceph_mdsc_submit_request(mdsc, dir, req);
3122 if (!err)
3123 err = ceph_mdsc_wait_request(mdsc, req);
3124 dout("do_request %p done, result %d\n", req, err);
3125 return err;
3126 }
3127
3128 /*
3129 * Invalidate dir's completeness, dentry lease state on an aborted MDS
3130 * namespace request.
3131 */
ceph_invalidate_dir_request(struct ceph_mds_request * req)3132 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
3133 {
3134 struct inode *dir = req->r_parent;
3135 struct inode *old_dir = req->r_old_dentry_dir;
3136
3137 dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
3138
3139 ceph_dir_clear_complete(dir);
3140 if (old_dir)
3141 ceph_dir_clear_complete(old_dir);
3142 if (req->r_dentry)
3143 ceph_invalidate_dentry_lease(req->r_dentry);
3144 if (req->r_old_dentry)
3145 ceph_invalidate_dentry_lease(req->r_old_dentry);
3146 }
3147
3148 /*
3149 * Handle mds reply.
3150 *
3151 * We take the session mutex and parse and process the reply immediately.
3152 * This preserves the logical ordering of replies, capabilities, etc., sent
3153 * by the MDS as they are applied to our local cache.
3154 */
handle_reply(struct ceph_mds_session * session,struct ceph_msg * msg)3155 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
3156 {
3157 struct ceph_mds_client *mdsc = session->s_mdsc;
3158 struct ceph_mds_request *req;
3159 struct ceph_mds_reply_head *head = msg->front.iov_base;
3160 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
3161 struct ceph_snap_realm *realm;
3162 u64 tid;
3163 int err, result;
3164 int mds = session->s_mds;
3165
3166 if (msg->front.iov_len < sizeof(*head)) {
3167 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
3168 ceph_msg_dump(msg);
3169 return;
3170 }
3171
3172 /* get request, session */
3173 tid = le64_to_cpu(msg->hdr.tid);
3174 mutex_lock(&mdsc->mutex);
3175 req = lookup_get_request(mdsc, tid);
3176 if (!req) {
3177 dout("handle_reply on unknown tid %llu\n", tid);
3178 mutex_unlock(&mdsc->mutex);
3179 return;
3180 }
3181 dout("handle_reply %p\n", req);
3182
3183 /* correct session? */
3184 if (req->r_session != session) {
3185 pr_err("mdsc_handle_reply got %llu on session mds%d"
3186 " not mds%d\n", tid, session->s_mds,
3187 req->r_session ? req->r_session->s_mds : -1);
3188 mutex_unlock(&mdsc->mutex);
3189 goto out;
3190 }
3191
3192 /* dup? */
3193 if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
3194 (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
3195 pr_warn("got a dup %s reply on %llu from mds%d\n",
3196 head->safe ? "safe" : "unsafe", tid, mds);
3197 mutex_unlock(&mdsc->mutex);
3198 goto out;
3199 }
3200 if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
3201 pr_warn("got unsafe after safe on %llu from mds%d\n",
3202 tid, mds);
3203 mutex_unlock(&mdsc->mutex);
3204 goto out;
3205 }
3206
3207 result = le32_to_cpu(head->result);
3208
3209 /*
3210 * Handle an ESTALE
3211 * if we're not talking to the authority, send to them
3212 * if the authority has changed while we weren't looking,
3213 * send to new authority
3214 * Otherwise we just have to return an ESTALE
3215 */
3216 if (result == -ESTALE) {
3217 dout("got ESTALE on request %llu\n", req->r_tid);
3218 req->r_resend_mds = -1;
3219 if (req->r_direct_mode != USE_AUTH_MDS) {
3220 dout("not using auth, setting for that now\n");
3221 req->r_direct_mode = USE_AUTH_MDS;
3222 __do_request(mdsc, req);
3223 mutex_unlock(&mdsc->mutex);
3224 goto out;
3225 } else {
3226 int mds = __choose_mds(mdsc, req, NULL);
3227 if (mds >= 0 && mds != req->r_session->s_mds) {
3228 dout("but auth changed, so resending\n");
3229 __do_request(mdsc, req);
3230 mutex_unlock(&mdsc->mutex);
3231 goto out;
3232 }
3233 }
3234 dout("have to return ESTALE on request %llu\n", req->r_tid);
3235 }
3236
3237
3238 if (head->safe) {
3239 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
3240 __unregister_request(mdsc, req);
3241
3242 /* last request during umount? */
3243 if (mdsc->stopping && !__get_oldest_req(mdsc))
3244 complete_all(&mdsc->safe_umount_waiters);
3245
3246 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3247 /*
3248 * We already handled the unsafe response, now do the
3249 * cleanup. No need to examine the response; the MDS
3250 * doesn't include any result info in the safe
3251 * response. And even if it did, there is nothing
3252 * useful we could do with a revised return value.
3253 */
3254 dout("got safe reply %llu, mds%d\n", tid, mds);
3255
3256 mutex_unlock(&mdsc->mutex);
3257 goto out;
3258 }
3259 } else {
3260 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
3261 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
3262 }
3263
3264 dout("handle_reply tid %lld result %d\n", tid, result);
3265 rinfo = &req->r_reply_info;
3266 if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
3267 err = parse_reply_info(session, msg, rinfo, (u64)-1);
3268 else
3269 err = parse_reply_info(session, msg, rinfo, session->s_con.peer_features);
3270 mutex_unlock(&mdsc->mutex);
3271
3272 /* Must find target inode outside of mutexes to avoid deadlocks */
3273 if ((err >= 0) && rinfo->head->is_target) {
3274 struct inode *in;
3275 struct ceph_vino tvino = {
3276 .ino = le64_to_cpu(rinfo->targeti.in->ino),
3277 .snap = le64_to_cpu(rinfo->targeti.in->snapid)
3278 };
3279
3280 in = ceph_get_inode(mdsc->fsc->sb, tvino);
3281 if (IS_ERR(in)) {
3282 err = PTR_ERR(in);
3283 mutex_lock(&session->s_mutex);
3284 goto out_err;
3285 }
3286 req->r_target_inode = in;
3287 }
3288
3289 mutex_lock(&session->s_mutex);
3290 if (err < 0) {
3291 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
3292 ceph_msg_dump(msg);
3293 goto out_err;
3294 }
3295
3296 /* snap trace */
3297 realm = NULL;
3298 if (rinfo->snapblob_len) {
3299 down_write(&mdsc->snap_rwsem);
3300 ceph_update_snap_trace(mdsc, rinfo->snapblob,
3301 rinfo->snapblob + rinfo->snapblob_len,
3302 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
3303 &realm);
3304 downgrade_write(&mdsc->snap_rwsem);
3305 } else {
3306 down_read(&mdsc->snap_rwsem);
3307 }
3308
3309 /* insert trace into our cache */
3310 mutex_lock(&req->r_fill_mutex);
3311 current->journal_info = req;
3312 err = ceph_fill_trace(mdsc->fsc->sb, req);
3313 if (err == 0) {
3314 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3315 req->r_op == CEPH_MDS_OP_LSSNAP))
3316 ceph_readdir_prepopulate(req, req->r_session);
3317 }
3318 current->journal_info = NULL;
3319 mutex_unlock(&req->r_fill_mutex);
3320
3321 up_read(&mdsc->snap_rwsem);
3322 if (realm)
3323 ceph_put_snap_realm(mdsc, realm);
3324
3325 if (err == 0) {
3326 if (req->r_target_inode &&
3327 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3328 struct ceph_inode_info *ci =
3329 ceph_inode(req->r_target_inode);
3330 spin_lock(&ci->i_unsafe_lock);
3331 list_add_tail(&req->r_unsafe_target_item,
3332 &ci->i_unsafe_iops);
3333 spin_unlock(&ci->i_unsafe_lock);
3334 }
3335
3336 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3337 }
3338 out_err:
3339 mutex_lock(&mdsc->mutex);
3340 if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3341 if (err) {
3342 req->r_err = err;
3343 } else {
3344 req->r_reply = ceph_msg_get(msg);
3345 set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3346 }
3347 } else {
3348 dout("reply arrived after request %lld was aborted\n", tid);
3349 }
3350 mutex_unlock(&mdsc->mutex);
3351
3352 mutex_unlock(&session->s_mutex);
3353
3354 /* kick calling process */
3355 complete_request(mdsc, req);
3356
3357 ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency,
3358 req->r_end_latency, err);
3359 out:
3360 ceph_mdsc_put_request(req);
3361 return;
3362 }
3363
3364
3365
3366 /*
3367 * handle mds notification that our request has been forwarded.
3368 */
handle_forward(struct ceph_mds_client * mdsc,struct ceph_mds_session * session,struct ceph_msg * msg)3369 static void handle_forward(struct ceph_mds_client *mdsc,
3370 struct ceph_mds_session *session,
3371 struct ceph_msg *msg)
3372 {
3373 struct ceph_mds_request *req;
3374 u64 tid = le64_to_cpu(msg->hdr.tid);
3375 u32 next_mds;
3376 u32 fwd_seq;
3377 int err = -EINVAL;
3378 void *p = msg->front.iov_base;
3379 void *end = p + msg->front.iov_len;
3380
3381 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3382 next_mds = ceph_decode_32(&p);
3383 fwd_seq = ceph_decode_32(&p);
3384
3385 mutex_lock(&mdsc->mutex);
3386 req = lookup_get_request(mdsc, tid);
3387 if (!req) {
3388 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
3389 goto out; /* dup reply? */
3390 }
3391
3392 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3393 dout("forward tid %llu aborted, unregistering\n", tid);
3394 __unregister_request(mdsc, req);
3395 } else if (fwd_seq <= req->r_num_fwd) {
3396 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
3397 tid, next_mds, req->r_num_fwd, fwd_seq);
3398 } else {
3399 /* resend. forward race not possible; mds would drop */
3400 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
3401 BUG_ON(req->r_err);
3402 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
3403 req->r_attempts = 0;
3404 req->r_num_fwd = fwd_seq;
3405 req->r_resend_mds = next_mds;
3406 put_request_session(req);
3407 __do_request(mdsc, req);
3408 }
3409 ceph_mdsc_put_request(req);
3410 out:
3411 mutex_unlock(&mdsc->mutex);
3412 return;
3413
3414 bad:
3415 pr_err("mdsc_handle_forward decode error err=%d\n", err);
3416 }
3417
__decode_session_metadata(void ** p,void * end,bool * blocklisted)3418 static int __decode_session_metadata(void **p, void *end,
3419 bool *blocklisted)
3420 {
3421 /* map<string,string> */
3422 u32 n;
3423 bool err_str;
3424 ceph_decode_32_safe(p, end, n, bad);
3425 while (n-- > 0) {
3426 u32 len;
3427 ceph_decode_32_safe(p, end, len, bad);
3428 ceph_decode_need(p, end, len, bad);
3429 err_str = !strncmp(*p, "error_string", len);
3430 *p += len;
3431 ceph_decode_32_safe(p, end, len, bad);
3432 ceph_decode_need(p, end, len, bad);
3433 /*
3434 * Match "blocklisted (blacklisted)" from newer MDSes,
3435 * or "blacklisted" from older MDSes.
3436 */
3437 if (err_str && strnstr(*p, "blacklisted", len))
3438 *blocklisted = true;
3439 *p += len;
3440 }
3441 return 0;
3442 bad:
3443 return -1;
3444 }
3445
3446 /*
3447 * handle a mds session control message
3448 */
handle_session(struct ceph_mds_session * session,struct ceph_msg * msg)3449 static void handle_session(struct ceph_mds_session *session,
3450 struct ceph_msg *msg)
3451 {
3452 struct ceph_mds_client *mdsc = session->s_mdsc;
3453 int mds = session->s_mds;
3454 int msg_version = le16_to_cpu(msg->hdr.version);
3455 void *p = msg->front.iov_base;
3456 void *end = p + msg->front.iov_len;
3457 struct ceph_mds_session_head *h;
3458 u32 op;
3459 u64 seq, features = 0;
3460 int wake = 0;
3461 bool blocklisted = false;
3462
3463 /* decode */
3464 ceph_decode_need(&p, end, sizeof(*h), bad);
3465 h = p;
3466 p += sizeof(*h);
3467
3468 op = le32_to_cpu(h->op);
3469 seq = le64_to_cpu(h->seq);
3470
3471 if (msg_version >= 3) {
3472 u32 len;
3473 /* version >= 2, metadata */
3474 if (__decode_session_metadata(&p, end, &blocklisted) < 0)
3475 goto bad;
3476 /* version >= 3, feature bits */
3477 ceph_decode_32_safe(&p, end, len, bad);
3478 if (len) {
3479 ceph_decode_64_safe(&p, end, features, bad);
3480 p += len - sizeof(features);
3481 }
3482 }
3483
3484 mutex_lock(&mdsc->mutex);
3485 if (op == CEPH_SESSION_CLOSE) {
3486 ceph_get_mds_session(session);
3487 __unregister_session(mdsc, session);
3488 }
3489 /* FIXME: this ttl calculation is generous */
3490 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
3491 mutex_unlock(&mdsc->mutex);
3492
3493 mutex_lock(&session->s_mutex);
3494
3495 dout("handle_session mds%d %s %p state %s seq %llu\n",
3496 mds, ceph_session_op_name(op), session,
3497 ceph_session_state_name(session->s_state), seq);
3498
3499 if (session->s_state == CEPH_MDS_SESSION_HUNG) {
3500 session->s_state = CEPH_MDS_SESSION_OPEN;
3501 pr_info("mds%d came back\n", session->s_mds);
3502 }
3503
3504 switch (op) {
3505 case CEPH_SESSION_OPEN:
3506 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3507 pr_info("mds%d reconnect success\n", session->s_mds);
3508 session->s_state = CEPH_MDS_SESSION_OPEN;
3509 session->s_features = features;
3510 renewed_caps(mdsc, session, 0);
3511 if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &session->s_features))
3512 metric_schedule_delayed(&mdsc->metric);
3513 wake = 1;
3514 if (mdsc->stopping)
3515 __close_session(mdsc, session);
3516 break;
3517
3518 case CEPH_SESSION_RENEWCAPS:
3519 if (session->s_renew_seq == seq)
3520 renewed_caps(mdsc, session, 1);
3521 break;
3522
3523 case CEPH_SESSION_CLOSE:
3524 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3525 pr_info("mds%d reconnect denied\n", session->s_mds);
3526 session->s_state = CEPH_MDS_SESSION_CLOSED;
3527 cleanup_session_requests(mdsc, session);
3528 remove_session_caps(session);
3529 wake = 2; /* for good measure */
3530 wake_up_all(&mdsc->session_close_wq);
3531 break;
3532
3533 case CEPH_SESSION_STALE:
3534 pr_info("mds%d caps went stale, renewing\n",
3535 session->s_mds);
3536 atomic_inc(&session->s_cap_gen);
3537 session->s_cap_ttl = jiffies - 1;
3538 send_renew_caps(mdsc, session);
3539 break;
3540
3541 case CEPH_SESSION_RECALL_STATE:
3542 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
3543 break;
3544
3545 case CEPH_SESSION_FLUSHMSG:
3546 /* flush cap releases */
3547 spin_lock(&session->s_cap_lock);
3548 if (session->s_num_cap_releases)
3549 ceph_flush_cap_releases(mdsc, session);
3550 spin_unlock(&session->s_cap_lock);
3551
3552 send_flushmsg_ack(mdsc, session, seq);
3553 break;
3554
3555 case CEPH_SESSION_FORCE_RO:
3556 dout("force_session_readonly %p\n", session);
3557 spin_lock(&session->s_cap_lock);
3558 session->s_readonly = true;
3559 spin_unlock(&session->s_cap_lock);
3560 wake_up_session_caps(session, FORCE_RO);
3561 break;
3562
3563 case CEPH_SESSION_REJECT:
3564 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
3565 pr_info("mds%d rejected session\n", session->s_mds);
3566 session->s_state = CEPH_MDS_SESSION_REJECTED;
3567 cleanup_session_requests(mdsc, session);
3568 remove_session_caps(session);
3569 if (blocklisted)
3570 mdsc->fsc->blocklisted = true;
3571 wake = 2; /* for good measure */
3572 break;
3573
3574 default:
3575 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
3576 WARN_ON(1);
3577 }
3578
3579 mutex_unlock(&session->s_mutex);
3580 if (wake) {
3581 mutex_lock(&mdsc->mutex);
3582 __wake_requests(mdsc, &session->s_waiting);
3583 if (wake == 2)
3584 kick_requests(mdsc, mds);
3585 mutex_unlock(&mdsc->mutex);
3586 }
3587 if (op == CEPH_SESSION_CLOSE)
3588 ceph_put_mds_session(session);
3589 return;
3590
3591 bad:
3592 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
3593 (int)msg->front.iov_len);
3594 ceph_msg_dump(msg);
3595 return;
3596 }
3597
ceph_mdsc_release_dir_caps(struct ceph_mds_request * req)3598 void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
3599 {
3600 int dcaps;
3601
3602 dcaps = xchg(&req->r_dir_caps, 0);
3603 if (dcaps) {
3604 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3605 ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
3606 }
3607 }
3608
ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request * req)3609 void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
3610 {
3611 int dcaps;
3612
3613 dcaps = xchg(&req->r_dir_caps, 0);
3614 if (dcaps) {
3615 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3616 ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
3617 dcaps);
3618 }
3619 }
3620
3621 /*
3622 * called under session->mutex.
3623 */
replay_unsafe_requests(struct ceph_mds_client * mdsc,struct ceph_mds_session * session)3624 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
3625 struct ceph_mds_session *session)
3626 {
3627 struct ceph_mds_request *req, *nreq;
3628 struct rb_node *p;
3629
3630 dout("replay_unsafe_requests mds%d\n", session->s_mds);
3631
3632 mutex_lock(&mdsc->mutex);
3633 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
3634 __send_request(session, req, true);
3635
3636 /*
3637 * also re-send old requests when MDS enters reconnect stage. So that MDS
3638 * can process completed request in clientreplay stage.
3639 */
3640 p = rb_first(&mdsc->request_tree);
3641 while (p) {
3642 req = rb_entry(p, struct ceph_mds_request, r_node);
3643 p = rb_next(p);
3644 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3645 continue;
3646 if (req->r_attempts == 0)
3647 continue; /* only old requests */
3648 if (!req->r_session)
3649 continue;
3650 if (req->r_session->s_mds != session->s_mds)
3651 continue;
3652
3653 ceph_mdsc_release_dir_caps_no_check(req);
3654
3655 __send_request(session, req, true);
3656 }
3657 mutex_unlock(&mdsc->mutex);
3658 }
3659
send_reconnect_partial(struct ceph_reconnect_state * recon_state)3660 static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
3661 {
3662 struct ceph_msg *reply;
3663 struct ceph_pagelist *_pagelist;
3664 struct page *page;
3665 __le32 *addr;
3666 int err = -ENOMEM;
3667
3668 if (!recon_state->allow_multi)
3669 return -ENOSPC;
3670
3671 /* can't handle message that contains both caps and realm */
3672 BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
3673
3674 /* pre-allocate new pagelist */
3675 _pagelist = ceph_pagelist_alloc(GFP_NOFS);
3676 if (!_pagelist)
3677 return -ENOMEM;
3678
3679 reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3680 if (!reply)
3681 goto fail_msg;
3682
3683 /* placeholder for nr_caps */
3684 err = ceph_pagelist_encode_32(_pagelist, 0);
3685 if (err < 0)
3686 goto fail;
3687
3688 if (recon_state->nr_caps) {
3689 /* currently encoding caps */
3690 err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
3691 if (err)
3692 goto fail;
3693 } else {
3694 /* placeholder for nr_realms (currently encoding relams) */
3695 err = ceph_pagelist_encode_32(_pagelist, 0);
3696 if (err < 0)
3697 goto fail;
3698 }
3699
3700 err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
3701 if (err)
3702 goto fail;
3703
3704 page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
3705 addr = kmap_atomic(page);
3706 if (recon_state->nr_caps) {
3707 /* currently encoding caps */
3708 *addr = cpu_to_le32(recon_state->nr_caps);
3709 } else {
3710 /* currently encoding relams */
3711 *(addr + 1) = cpu_to_le32(recon_state->nr_realms);
3712 }
3713 kunmap_atomic(addr);
3714
3715 reply->hdr.version = cpu_to_le16(5);
3716 reply->hdr.compat_version = cpu_to_le16(4);
3717
3718 reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
3719 ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
3720
3721 ceph_con_send(&recon_state->session->s_con, reply);
3722 ceph_pagelist_release(recon_state->pagelist);
3723
3724 recon_state->pagelist = _pagelist;
3725 recon_state->nr_caps = 0;
3726 recon_state->nr_realms = 0;
3727 recon_state->msg_version = 5;
3728 return 0;
3729 fail:
3730 ceph_msg_put(reply);
3731 fail_msg:
3732 ceph_pagelist_release(_pagelist);
3733 return err;
3734 }
3735
d_find_primary(struct inode * inode)3736 static struct dentry* d_find_primary(struct inode *inode)
3737 {
3738 struct dentry *alias, *dn = NULL;
3739
3740 if (hlist_empty(&inode->i_dentry))
3741 return NULL;
3742
3743 spin_lock(&inode->i_lock);
3744 if (hlist_empty(&inode->i_dentry))
3745 goto out_unlock;
3746
3747 if (S_ISDIR(inode->i_mode)) {
3748 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
3749 if (!IS_ROOT(alias))
3750 dn = dget(alias);
3751 goto out_unlock;
3752 }
3753
3754 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
3755 spin_lock(&alias->d_lock);
3756 if (!d_unhashed(alias) &&
3757 (ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) {
3758 dn = dget_dlock(alias);
3759 }
3760 spin_unlock(&alias->d_lock);
3761 if (dn)
3762 break;
3763 }
3764 out_unlock:
3765 spin_unlock(&inode->i_lock);
3766 return dn;
3767 }
3768
3769 /*
3770 * Encode information about a cap for a reconnect with the MDS.
3771 */
reconnect_caps_cb(struct inode * inode,struct ceph_cap * cap,void * arg)3772 static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
3773 void *arg)
3774 {
3775 union {
3776 struct ceph_mds_cap_reconnect v2;
3777 struct ceph_mds_cap_reconnect_v1 v1;
3778 } rec;
3779 struct ceph_inode_info *ci = cap->ci;
3780 struct ceph_reconnect_state *recon_state = arg;
3781 struct ceph_pagelist *pagelist = recon_state->pagelist;
3782 struct dentry *dentry;
3783 char *path;
3784 int pathlen = 0, err;
3785 u64 pathbase;
3786 u64 snap_follows;
3787
3788 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
3789 inode, ceph_vinop(inode), cap, cap->cap_id,
3790 ceph_cap_string(cap->issued));
3791
3792 dentry = d_find_primary(inode);
3793 if (dentry) {
3794 /* set pathbase to parent dir when msg_version >= 2 */
3795 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
3796 recon_state->msg_version >= 2);
3797 dput(dentry);
3798 if (IS_ERR(path)) {
3799 err = PTR_ERR(path);
3800 goto out_err;
3801 }
3802 } else {
3803 path = NULL;
3804 pathbase = 0;
3805 }
3806
3807 spin_lock(&ci->i_ceph_lock);
3808 cap->seq = 0; /* reset cap seq */
3809 cap->issue_seq = 0; /* and issue_seq */
3810 cap->mseq = 0; /* and migrate_seq */
3811 cap->cap_gen = atomic_read(&cap->session->s_cap_gen);
3812
3813 /* These are lost when the session goes away */
3814 if (S_ISDIR(inode->i_mode)) {
3815 if (cap->issued & CEPH_CAP_DIR_CREATE) {
3816 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
3817 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
3818 }
3819 cap->issued &= ~CEPH_CAP_ANY_DIR_OPS;
3820 }
3821
3822 if (recon_state->msg_version >= 2) {
3823 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
3824 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3825 rec.v2.issued = cpu_to_le32(cap->issued);
3826 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3827 rec.v2.pathbase = cpu_to_le64(pathbase);
3828 rec.v2.flock_len = (__force __le32)
3829 ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
3830 } else {
3831 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
3832 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3833 rec.v1.issued = cpu_to_le32(cap->issued);
3834 rec.v1.size = cpu_to_le64(i_size_read(inode));
3835 ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
3836 ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
3837 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3838 rec.v1.pathbase = cpu_to_le64(pathbase);
3839 }
3840
3841 if (list_empty(&ci->i_cap_snaps)) {
3842 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
3843 } else {
3844 struct ceph_cap_snap *capsnap =
3845 list_first_entry(&ci->i_cap_snaps,
3846 struct ceph_cap_snap, ci_item);
3847 snap_follows = capsnap->follows;
3848 }
3849 spin_unlock(&ci->i_ceph_lock);
3850
3851 if (recon_state->msg_version >= 2) {
3852 int num_fcntl_locks, num_flock_locks;
3853 struct ceph_filelock *flocks = NULL;
3854 size_t struct_len, total_len = sizeof(u64);
3855 u8 struct_v = 0;
3856
3857 encode_again:
3858 if (rec.v2.flock_len) {
3859 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
3860 } else {
3861 num_fcntl_locks = 0;
3862 num_flock_locks = 0;
3863 }
3864 if (num_fcntl_locks + num_flock_locks > 0) {
3865 flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
3866 sizeof(struct ceph_filelock),
3867 GFP_NOFS);
3868 if (!flocks) {
3869 err = -ENOMEM;
3870 goto out_err;
3871 }
3872 err = ceph_encode_locks_to_buffer(inode, flocks,
3873 num_fcntl_locks,
3874 num_flock_locks);
3875 if (err) {
3876 kfree(flocks);
3877 flocks = NULL;
3878 if (err == -ENOSPC)
3879 goto encode_again;
3880 goto out_err;
3881 }
3882 } else {
3883 kfree(flocks);
3884 flocks = NULL;
3885 }
3886
3887 if (recon_state->msg_version >= 3) {
3888 /* version, compat_version and struct_len */
3889 total_len += 2 * sizeof(u8) + sizeof(u32);
3890 struct_v = 2;
3891 }
3892 /*
3893 * number of encoded locks is stable, so copy to pagelist
3894 */
3895 struct_len = 2 * sizeof(u32) +
3896 (num_fcntl_locks + num_flock_locks) *
3897 sizeof(struct ceph_filelock);
3898 rec.v2.flock_len = cpu_to_le32(struct_len);
3899
3900 struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
3901
3902 if (struct_v >= 2)
3903 struct_len += sizeof(u64); /* snap_follows */
3904
3905 total_len += struct_len;
3906
3907 if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
3908 err = send_reconnect_partial(recon_state);
3909 if (err)
3910 goto out_freeflocks;
3911 pagelist = recon_state->pagelist;
3912 }
3913
3914 err = ceph_pagelist_reserve(pagelist, total_len);
3915 if (err)
3916 goto out_freeflocks;
3917
3918 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3919 if (recon_state->msg_version >= 3) {
3920 ceph_pagelist_encode_8(pagelist, struct_v);
3921 ceph_pagelist_encode_8(pagelist, 1);
3922 ceph_pagelist_encode_32(pagelist, struct_len);
3923 }
3924 ceph_pagelist_encode_string(pagelist, path, pathlen);
3925 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
3926 ceph_locks_to_pagelist(flocks, pagelist,
3927 num_fcntl_locks, num_flock_locks);
3928 if (struct_v >= 2)
3929 ceph_pagelist_encode_64(pagelist, snap_follows);
3930 out_freeflocks:
3931 kfree(flocks);
3932 } else {
3933 err = ceph_pagelist_reserve(pagelist,
3934 sizeof(u64) + sizeof(u32) +
3935 pathlen + sizeof(rec.v1));
3936 if (err)
3937 goto out_err;
3938
3939 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3940 ceph_pagelist_encode_string(pagelist, path, pathlen);
3941 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
3942 }
3943
3944 out_err:
3945 ceph_mdsc_free_path(path, pathlen);
3946 if (!err)
3947 recon_state->nr_caps++;
3948 return err;
3949 }
3950
encode_snap_realms(struct ceph_mds_client * mdsc,struct ceph_reconnect_state * recon_state)3951 static int encode_snap_realms(struct ceph_mds_client *mdsc,
3952 struct ceph_reconnect_state *recon_state)
3953 {
3954 struct rb_node *p;
3955 struct ceph_pagelist *pagelist = recon_state->pagelist;
3956 int err = 0;
3957
3958 if (recon_state->msg_version >= 4) {
3959 err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
3960 if (err < 0)
3961 goto fail;
3962 }
3963
3964 /*
3965 * snaprealms. we provide mds with the ino, seq (version), and
3966 * parent for all of our realms. If the mds has any newer info,
3967 * it will tell us.
3968 */
3969 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3970 struct ceph_snap_realm *realm =
3971 rb_entry(p, struct ceph_snap_realm, node);
3972 struct ceph_mds_snaprealm_reconnect sr_rec;
3973
3974 if (recon_state->msg_version >= 4) {
3975 size_t need = sizeof(u8) * 2 + sizeof(u32) +
3976 sizeof(sr_rec);
3977
3978 if (pagelist->length + need > RECONNECT_MAX_SIZE) {
3979 err = send_reconnect_partial(recon_state);
3980 if (err)
3981 goto fail;
3982 pagelist = recon_state->pagelist;
3983 }
3984
3985 err = ceph_pagelist_reserve(pagelist, need);
3986 if (err)
3987 goto fail;
3988
3989 ceph_pagelist_encode_8(pagelist, 1);
3990 ceph_pagelist_encode_8(pagelist, 1);
3991 ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
3992 }
3993
3994 dout(" adding snap realm %llx seq %lld parent %llx\n",
3995 realm->ino, realm->seq, realm->parent_ino);
3996 sr_rec.ino = cpu_to_le64(realm->ino);
3997 sr_rec.seq = cpu_to_le64(realm->seq);
3998 sr_rec.parent = cpu_to_le64(realm->parent_ino);
3999
4000 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
4001 if (err)
4002 goto fail;
4003
4004 recon_state->nr_realms++;
4005 }
4006 fail:
4007 return err;
4008 }
4009
4010
4011 /*
4012 * If an MDS fails and recovers, clients need to reconnect in order to
4013 * reestablish shared state. This includes all caps issued through
4014 * this session _and_ the snap_realm hierarchy. Because it's not
4015 * clear which snap realms the mds cares about, we send everything we
4016 * know about.. that ensures we'll then get any new info the
4017 * recovering MDS might have.
4018 *
4019 * This is a relatively heavyweight operation, but it's rare.
4020 */
send_mds_reconnect(struct ceph_mds_client * mdsc,struct ceph_mds_session * session)4021 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
4022 struct ceph_mds_session *session)
4023 {
4024 struct ceph_msg *reply;
4025 int mds = session->s_mds;
4026 int err = -ENOMEM;
4027 struct ceph_reconnect_state recon_state = {
4028 .session = session,
4029 };
4030 LIST_HEAD(dispose);
4031
4032 pr_info("mds%d reconnect start\n", mds);
4033
4034 recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
4035 if (!recon_state.pagelist)
4036 goto fail_nopagelist;
4037
4038 reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
4039 if (!reply)
4040 goto fail_nomsg;
4041
4042 xa_destroy(&session->s_delegated_inos);
4043
4044 mutex_lock(&session->s_mutex);
4045 session->s_state = CEPH_MDS_SESSION_RECONNECTING;
4046 session->s_seq = 0;
4047
4048 dout("session %p state %s\n", session,
4049 ceph_session_state_name(session->s_state));
4050
4051 atomic_inc(&session->s_cap_gen);
4052
4053 spin_lock(&session->s_cap_lock);
4054 /* don't know if session is readonly */
4055 session->s_readonly = 0;
4056 /*
4057 * notify __ceph_remove_cap() that we are composing cap reconnect.
4058 * If a cap get released before being added to the cap reconnect,
4059 * __ceph_remove_cap() should skip queuing cap release.
4060 */
4061 session->s_cap_reconnect = 1;
4062 /* drop old cap expires; we're about to reestablish that state */
4063 detach_cap_releases(session, &dispose);
4064 spin_unlock(&session->s_cap_lock);
4065 dispose_cap_releases(mdsc, &dispose);
4066
4067 /* trim unused caps to reduce MDS's cache rejoin time */
4068 if (mdsc->fsc->sb->s_root)
4069 shrink_dcache_parent(mdsc->fsc->sb->s_root);
4070
4071 ceph_con_close(&session->s_con);
4072 ceph_con_open(&session->s_con,
4073 CEPH_ENTITY_TYPE_MDS, mds,
4074 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
4075
4076 /* replay unsafe requests */
4077 replay_unsafe_requests(mdsc, session);
4078
4079 ceph_early_kick_flushing_caps(mdsc, session);
4080
4081 down_read(&mdsc->snap_rwsem);
4082
4083 /* placeholder for nr_caps */
4084 err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
4085 if (err)
4086 goto fail;
4087
4088 if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
4089 recon_state.msg_version = 3;
4090 recon_state.allow_multi = true;
4091 } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
4092 recon_state.msg_version = 3;
4093 } else {
4094 recon_state.msg_version = 2;
4095 }
4096 /* trsaverse this session's caps */
4097 err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
4098
4099 spin_lock(&session->s_cap_lock);
4100 session->s_cap_reconnect = 0;
4101 spin_unlock(&session->s_cap_lock);
4102
4103 if (err < 0)
4104 goto fail;
4105
4106 /* check if all realms can be encoded into current message */
4107 if (mdsc->num_snap_realms) {
4108 size_t total_len =
4109 recon_state.pagelist->length +
4110 mdsc->num_snap_realms *
4111 sizeof(struct ceph_mds_snaprealm_reconnect);
4112 if (recon_state.msg_version >= 4) {
4113 /* number of realms */
4114 total_len += sizeof(u32);
4115 /* version, compat_version and struct_len */
4116 total_len += mdsc->num_snap_realms *
4117 (2 * sizeof(u8) + sizeof(u32));
4118 }
4119 if (total_len > RECONNECT_MAX_SIZE) {
4120 if (!recon_state.allow_multi) {
4121 err = -ENOSPC;
4122 goto fail;
4123 }
4124 if (recon_state.nr_caps) {
4125 err = send_reconnect_partial(&recon_state);
4126 if (err)
4127 goto fail;
4128 }
4129 recon_state.msg_version = 5;
4130 }
4131 }
4132
4133 err = encode_snap_realms(mdsc, &recon_state);
4134 if (err < 0)
4135 goto fail;
4136
4137 if (recon_state.msg_version >= 5) {
4138 err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
4139 if (err < 0)
4140 goto fail;
4141 }
4142
4143 if (recon_state.nr_caps || recon_state.nr_realms) {
4144 struct page *page =
4145 list_first_entry(&recon_state.pagelist->head,
4146 struct page, lru);
4147 __le32 *addr = kmap_atomic(page);
4148 if (recon_state.nr_caps) {
4149 WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
4150 *addr = cpu_to_le32(recon_state.nr_caps);
4151 } else if (recon_state.msg_version >= 4) {
4152 *(addr + 1) = cpu_to_le32(recon_state.nr_realms);
4153 }
4154 kunmap_atomic(addr);
4155 }
4156
4157 reply->hdr.version = cpu_to_le16(recon_state.msg_version);
4158 if (recon_state.msg_version >= 4)
4159 reply->hdr.compat_version = cpu_to_le16(4);
4160
4161 reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
4162 ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
4163
4164 ceph_con_send(&session->s_con, reply);
4165
4166 mutex_unlock(&session->s_mutex);
4167
4168 mutex_lock(&mdsc->mutex);
4169 __wake_requests(mdsc, &session->s_waiting);
4170 mutex_unlock(&mdsc->mutex);
4171
4172 up_read(&mdsc->snap_rwsem);
4173 ceph_pagelist_release(recon_state.pagelist);
4174 return;
4175
4176 fail:
4177 ceph_msg_put(reply);
4178 up_read(&mdsc->snap_rwsem);
4179 mutex_unlock(&session->s_mutex);
4180 fail_nomsg:
4181 ceph_pagelist_release(recon_state.pagelist);
4182 fail_nopagelist:
4183 pr_err("error %d preparing reconnect for mds%d\n", err, mds);
4184 return;
4185 }
4186
4187
4188 /*
4189 * compare old and new mdsmaps, kicking requests
4190 * and closing out old connections as necessary
4191 *
4192 * called under mdsc->mutex.
4193 */
check_new_map(struct ceph_mds_client * mdsc,struct ceph_mdsmap * newmap,struct ceph_mdsmap * oldmap)4194 static void check_new_map(struct ceph_mds_client *mdsc,
4195 struct ceph_mdsmap *newmap,
4196 struct ceph_mdsmap *oldmap)
4197 {
4198 int i, j, err;
4199 int oldstate, newstate;
4200 struct ceph_mds_session *s;
4201 unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
4202
4203 dout("check_new_map new %u old %u\n",
4204 newmap->m_epoch, oldmap->m_epoch);
4205
4206 if (newmap->m_info) {
4207 for (i = 0; i < newmap->possible_max_rank; i++) {
4208 for (j = 0; j < newmap->m_info[i].num_export_targets; j++)
4209 set_bit(newmap->m_info[i].export_targets[j], targets);
4210 }
4211 }
4212
4213 for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4214 if (!mdsc->sessions[i])
4215 continue;
4216 s = mdsc->sessions[i];
4217 oldstate = ceph_mdsmap_get_state(oldmap, i);
4218 newstate = ceph_mdsmap_get_state(newmap, i);
4219
4220 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
4221 i, ceph_mds_state_name(oldstate),
4222 ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
4223 ceph_mds_state_name(newstate),
4224 ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
4225 ceph_session_state_name(s->s_state));
4226
4227 if (i >= newmap->possible_max_rank) {
4228 /* force close session for stopped mds */
4229 ceph_get_mds_session(s);
4230 __unregister_session(mdsc, s);
4231 __wake_requests(mdsc, &s->s_waiting);
4232 mutex_unlock(&mdsc->mutex);
4233
4234 mutex_lock(&s->s_mutex);
4235 cleanup_session_requests(mdsc, s);
4236 remove_session_caps(s);
4237 mutex_unlock(&s->s_mutex);
4238
4239 ceph_put_mds_session(s);
4240
4241 mutex_lock(&mdsc->mutex);
4242 kick_requests(mdsc, i);
4243 continue;
4244 }
4245
4246 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
4247 ceph_mdsmap_get_addr(newmap, i),
4248 sizeof(struct ceph_entity_addr))) {
4249 /* just close it */
4250 mutex_unlock(&mdsc->mutex);
4251 mutex_lock(&s->s_mutex);
4252 mutex_lock(&mdsc->mutex);
4253 ceph_con_close(&s->s_con);
4254 mutex_unlock(&s->s_mutex);
4255 s->s_state = CEPH_MDS_SESSION_RESTARTING;
4256 } else if (oldstate == newstate) {
4257 continue; /* nothing new with this mds */
4258 }
4259
4260 /*
4261 * send reconnect?
4262 */
4263 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
4264 newstate >= CEPH_MDS_STATE_RECONNECT) {
4265 mutex_unlock(&mdsc->mutex);
4266 clear_bit(i, targets);
4267 send_mds_reconnect(mdsc, s);
4268 mutex_lock(&mdsc->mutex);
4269 }
4270
4271 /*
4272 * kick request on any mds that has gone active.
4273 */
4274 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
4275 newstate >= CEPH_MDS_STATE_ACTIVE) {
4276 if (oldstate != CEPH_MDS_STATE_CREATING &&
4277 oldstate != CEPH_MDS_STATE_STARTING)
4278 pr_info("mds%d recovery completed\n", s->s_mds);
4279 kick_requests(mdsc, i);
4280 mutex_unlock(&mdsc->mutex);
4281 mutex_lock(&s->s_mutex);
4282 mutex_lock(&mdsc->mutex);
4283 ceph_kick_flushing_caps(mdsc, s);
4284 mutex_unlock(&s->s_mutex);
4285 wake_up_session_caps(s, RECONNECT);
4286 }
4287 }
4288
4289 /*
4290 * Only open and reconnect sessions that don't exist yet.
4291 */
4292 for (i = 0; i < newmap->possible_max_rank; i++) {
4293 /*
4294 * In case the import MDS is crashed just after
4295 * the EImportStart journal is flushed, so when
4296 * a standby MDS takes over it and is replaying
4297 * the EImportStart journal the new MDS daemon
4298 * will wait the client to reconnect it, but the
4299 * client may never register/open the session yet.
4300 *
4301 * Will try to reconnect that MDS daemon if the
4302 * rank number is in the export targets array and
4303 * is the up:reconnect state.
4304 */
4305 newstate = ceph_mdsmap_get_state(newmap, i);
4306 if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT)
4307 continue;
4308
4309 /*
4310 * The session maybe registered and opened by some
4311 * requests which were choosing random MDSes during
4312 * the mdsc->mutex's unlock/lock gap below in rare
4313 * case. But the related MDS daemon will just queue
4314 * that requests and be still waiting for the client's
4315 * reconnection request in up:reconnect state.
4316 */
4317 s = __ceph_lookup_mds_session(mdsc, i);
4318 if (likely(!s)) {
4319 s = __open_export_target_session(mdsc, i);
4320 if (IS_ERR(s)) {
4321 err = PTR_ERR(s);
4322 pr_err("failed to open export target session, err %d\n",
4323 err);
4324 continue;
4325 }
4326 }
4327 dout("send reconnect to export target mds.%d\n", i);
4328 mutex_unlock(&mdsc->mutex);
4329 send_mds_reconnect(mdsc, s);
4330 ceph_put_mds_session(s);
4331 mutex_lock(&mdsc->mutex);
4332 }
4333
4334 for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4335 s = mdsc->sessions[i];
4336 if (!s)
4337 continue;
4338 if (!ceph_mdsmap_is_laggy(newmap, i))
4339 continue;
4340 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4341 s->s_state == CEPH_MDS_SESSION_HUNG ||
4342 s->s_state == CEPH_MDS_SESSION_CLOSING) {
4343 dout(" connecting to export targets of laggy mds%d\n",
4344 i);
4345 __open_export_target_sessions(mdsc, s);
4346 }
4347 }
4348 }
4349
4350
4351
4352 /*
4353 * leases
4354 */
4355
4356 /*
4357 * caller must hold session s_mutex, dentry->d_lock
4358 */
__ceph_mdsc_drop_dentry_lease(struct dentry * dentry)4359 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
4360 {
4361 struct ceph_dentry_info *di = ceph_dentry(dentry);
4362
4363 ceph_put_mds_session(di->lease_session);
4364 di->lease_session = NULL;
4365 }
4366
handle_lease(struct ceph_mds_client * mdsc,struct ceph_mds_session * session,struct ceph_msg * msg)4367 static void handle_lease(struct ceph_mds_client *mdsc,
4368 struct ceph_mds_session *session,
4369 struct ceph_msg *msg)
4370 {
4371 struct super_block *sb = mdsc->fsc->sb;
4372 struct inode *inode;
4373 struct dentry *parent, *dentry;
4374 struct ceph_dentry_info *di;
4375 int mds = session->s_mds;
4376 struct ceph_mds_lease *h = msg->front.iov_base;
4377 u32 seq;
4378 struct ceph_vino vino;
4379 struct qstr dname;
4380 int release = 0;
4381
4382 dout("handle_lease from mds%d\n", mds);
4383
4384 /* decode */
4385 if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
4386 goto bad;
4387 vino.ino = le64_to_cpu(h->ino);
4388 vino.snap = CEPH_NOSNAP;
4389 seq = le32_to_cpu(h->seq);
4390 dname.len = get_unaligned_le32(h + 1);
4391 if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
4392 goto bad;
4393 dname.name = (void *)(h + 1) + sizeof(u32);
4394
4395 /* lookup inode */
4396 inode = ceph_find_inode(sb, vino);
4397 dout("handle_lease %s, ino %llx %p %.*s\n",
4398 ceph_lease_op_name(h->action), vino.ino, inode,
4399 dname.len, dname.name);
4400
4401 mutex_lock(&session->s_mutex);
4402 inc_session_sequence(session);
4403
4404 if (!inode) {
4405 dout("handle_lease no inode %llx\n", vino.ino);
4406 goto release;
4407 }
4408
4409 /* dentry */
4410 parent = d_find_alias(inode);
4411 if (!parent) {
4412 dout("no parent dentry on inode %p\n", inode);
4413 WARN_ON(1);
4414 goto release; /* hrm... */
4415 }
4416 dname.hash = full_name_hash(parent, dname.name, dname.len);
4417 dentry = d_lookup(parent, &dname);
4418 dput(parent);
4419 if (!dentry)
4420 goto release;
4421
4422 spin_lock(&dentry->d_lock);
4423 di = ceph_dentry(dentry);
4424 switch (h->action) {
4425 case CEPH_MDS_LEASE_REVOKE:
4426 if (di->lease_session == session) {
4427 if (ceph_seq_cmp(di->lease_seq, seq) > 0)
4428 h->seq = cpu_to_le32(di->lease_seq);
4429 __ceph_mdsc_drop_dentry_lease(dentry);
4430 }
4431 release = 1;
4432 break;
4433
4434 case CEPH_MDS_LEASE_RENEW:
4435 if (di->lease_session == session &&
4436 di->lease_gen == atomic_read(&session->s_cap_gen) &&
4437 di->lease_renew_from &&
4438 di->lease_renew_after == 0) {
4439 unsigned long duration =
4440 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
4441
4442 di->lease_seq = seq;
4443 di->time = di->lease_renew_from + duration;
4444 di->lease_renew_after = di->lease_renew_from +
4445 (duration >> 1);
4446 di->lease_renew_from = 0;
4447 }
4448 break;
4449 }
4450 spin_unlock(&dentry->d_lock);
4451 dput(dentry);
4452
4453 if (!release)
4454 goto out;
4455
4456 release:
4457 /* let's just reuse the same message */
4458 h->action = CEPH_MDS_LEASE_REVOKE_ACK;
4459 ceph_msg_get(msg);
4460 ceph_con_send(&session->s_con, msg);
4461
4462 out:
4463 mutex_unlock(&session->s_mutex);
4464 iput(inode);
4465 return;
4466
4467 bad:
4468 pr_err("corrupt lease message\n");
4469 ceph_msg_dump(msg);
4470 }
4471
ceph_mdsc_lease_send_msg(struct ceph_mds_session * session,struct dentry * dentry,char action,u32 seq)4472 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
4473 struct dentry *dentry, char action,
4474 u32 seq)
4475 {
4476 struct ceph_msg *msg;
4477 struct ceph_mds_lease *lease;
4478 struct inode *dir;
4479 int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
4480
4481 dout("lease_send_msg identry %p %s to mds%d\n",
4482 dentry, ceph_lease_op_name(action), session->s_mds);
4483
4484 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
4485 if (!msg)
4486 return;
4487 lease = msg->front.iov_base;
4488 lease->action = action;
4489 lease->seq = cpu_to_le32(seq);
4490
4491 spin_lock(&dentry->d_lock);
4492 dir = d_inode(dentry->d_parent);
4493 lease->ino = cpu_to_le64(ceph_ino(dir));
4494 lease->first = lease->last = cpu_to_le64(ceph_snap(dir));
4495
4496 put_unaligned_le32(dentry->d_name.len, lease + 1);
4497 memcpy((void *)(lease + 1) + 4,
4498 dentry->d_name.name, dentry->d_name.len);
4499 spin_unlock(&dentry->d_lock);
4500 /*
4501 * if this is a preemptive lease RELEASE, no need to
4502 * flush request stream, since the actual request will
4503 * soon follow.
4504 */
4505 msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
4506
4507 ceph_con_send(&session->s_con, msg);
4508 }
4509
4510 /*
4511 * lock unlock the session, to wait ongoing session activities
4512 */
lock_unlock_session(struct ceph_mds_session * s)4513 static void lock_unlock_session(struct ceph_mds_session *s)
4514 {
4515 mutex_lock(&s->s_mutex);
4516 mutex_unlock(&s->s_mutex);
4517 }
4518
maybe_recover_session(struct ceph_mds_client * mdsc)4519 static void maybe_recover_session(struct ceph_mds_client *mdsc)
4520 {
4521 struct ceph_fs_client *fsc = mdsc->fsc;
4522
4523 if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
4524 return;
4525
4526 if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
4527 return;
4528
4529 if (!READ_ONCE(fsc->blocklisted))
4530 return;
4531
4532 pr_info("auto reconnect after blocklisted\n");
4533 ceph_force_reconnect(fsc->sb);
4534 }
4535
check_session_state(struct ceph_mds_session * s)4536 bool check_session_state(struct ceph_mds_session *s)
4537 {
4538 struct ceph_fs_client *fsc = s->s_mdsc->fsc;
4539
4540 switch (s->s_state) {
4541 case CEPH_MDS_SESSION_OPEN:
4542 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
4543 s->s_state = CEPH_MDS_SESSION_HUNG;
4544 pr_info("mds%d hung\n", s->s_mds);
4545 }
4546 break;
4547 case CEPH_MDS_SESSION_CLOSING:
4548 /* Should never reach this when not force unmounting */
4549 WARN_ON_ONCE(s->s_ttl &&
4550 READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
4551 fallthrough;
4552 case CEPH_MDS_SESSION_NEW:
4553 case CEPH_MDS_SESSION_RESTARTING:
4554 case CEPH_MDS_SESSION_CLOSED:
4555 case CEPH_MDS_SESSION_REJECTED:
4556 return false;
4557 }
4558
4559 return true;
4560 }
4561
4562 /*
4563 * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
4564 * then we need to retransmit that request.
4565 */
inc_session_sequence(struct ceph_mds_session * s)4566 void inc_session_sequence(struct ceph_mds_session *s)
4567 {
4568 lockdep_assert_held(&s->s_mutex);
4569
4570 s->s_seq++;
4571
4572 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
4573 int ret;
4574
4575 dout("resending session close request for mds%d\n", s->s_mds);
4576 ret = request_close_session(s);
4577 if (ret < 0)
4578 pr_err("unable to close session to mds%d: %d\n",
4579 s->s_mds, ret);
4580 }
4581 }
4582
4583 /*
4584 * delayed work -- periodically trim expired leases, renew caps with mds. If
4585 * the @delay parameter is set to 0 or if it's more than 5 secs, the default
4586 * workqueue delay value of 5 secs will be used.
4587 */
schedule_delayed(struct ceph_mds_client * mdsc,unsigned long delay)4588 static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
4589 {
4590 unsigned long max_delay = HZ * 5;
4591
4592 /* 5 secs default delay */
4593 if (!delay || (delay > max_delay))
4594 delay = max_delay;
4595 schedule_delayed_work(&mdsc->delayed_work,
4596 round_jiffies_relative(delay));
4597 }
4598
delayed_work(struct work_struct * work)4599 static void delayed_work(struct work_struct *work)
4600 {
4601 struct ceph_mds_client *mdsc =
4602 container_of(work, struct ceph_mds_client, delayed_work.work);
4603 unsigned long delay;
4604 int renew_interval;
4605 int renew_caps;
4606 int i;
4607
4608 dout("mdsc delayed_work\n");
4609
4610 if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
4611 return;
4612
4613 mutex_lock(&mdsc->mutex);
4614 renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
4615 renew_caps = time_after_eq(jiffies, HZ*renew_interval +
4616 mdsc->last_renew_caps);
4617 if (renew_caps)
4618 mdsc->last_renew_caps = jiffies;
4619
4620 for (i = 0; i < mdsc->max_sessions; i++) {
4621 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4622 if (!s)
4623 continue;
4624
4625 if (!check_session_state(s)) {
4626 ceph_put_mds_session(s);
4627 continue;
4628 }
4629 mutex_unlock(&mdsc->mutex);
4630
4631 mutex_lock(&s->s_mutex);
4632 if (renew_caps)
4633 send_renew_caps(mdsc, s);
4634 else
4635 ceph_con_keepalive(&s->s_con);
4636 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4637 s->s_state == CEPH_MDS_SESSION_HUNG)
4638 ceph_send_cap_releases(mdsc, s);
4639 mutex_unlock(&s->s_mutex);
4640 ceph_put_mds_session(s);
4641
4642 mutex_lock(&mdsc->mutex);
4643 }
4644 mutex_unlock(&mdsc->mutex);
4645
4646 delay = ceph_check_delayed_caps(mdsc);
4647
4648 ceph_queue_cap_reclaim_work(mdsc);
4649
4650 ceph_trim_snapid_map(mdsc);
4651
4652 maybe_recover_session(mdsc);
4653
4654 schedule_delayed(mdsc, delay);
4655 }
4656
ceph_mdsc_init(struct ceph_fs_client * fsc)4657 int ceph_mdsc_init(struct ceph_fs_client *fsc)
4658
4659 {
4660 struct ceph_mds_client *mdsc;
4661 int err;
4662
4663 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
4664 if (!mdsc)
4665 return -ENOMEM;
4666 mdsc->fsc = fsc;
4667 mutex_init(&mdsc->mutex);
4668 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
4669 if (!mdsc->mdsmap) {
4670 err = -ENOMEM;
4671 goto err_mdsc;
4672 }
4673
4674 init_completion(&mdsc->safe_umount_waiters);
4675 init_waitqueue_head(&mdsc->session_close_wq);
4676 INIT_LIST_HEAD(&mdsc->waiting_for_map);
4677 mdsc->quotarealms_inodes = RB_ROOT;
4678 mutex_init(&mdsc->quotarealms_inodes_mutex);
4679 init_rwsem(&mdsc->snap_rwsem);
4680 mdsc->snap_realms = RB_ROOT;
4681 INIT_LIST_HEAD(&mdsc->snap_empty);
4682 spin_lock_init(&mdsc->snap_empty_lock);
4683 mdsc->request_tree = RB_ROOT;
4684 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
4685 mdsc->last_renew_caps = jiffies;
4686 INIT_LIST_HEAD(&mdsc->cap_delay_list);
4687 INIT_LIST_HEAD(&mdsc->cap_wait_list);
4688 spin_lock_init(&mdsc->cap_delay_lock);
4689 INIT_LIST_HEAD(&mdsc->snap_flush_list);
4690 spin_lock_init(&mdsc->snap_flush_lock);
4691 mdsc->last_cap_flush_tid = 1;
4692 INIT_LIST_HEAD(&mdsc->cap_flush_list);
4693 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
4694 spin_lock_init(&mdsc->cap_dirty_lock);
4695 init_waitqueue_head(&mdsc->cap_flushing_wq);
4696 INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
4697 err = ceph_metric_init(&mdsc->metric);
4698 if (err)
4699 goto err_mdsmap;
4700
4701 spin_lock_init(&mdsc->dentry_list_lock);
4702 INIT_LIST_HEAD(&mdsc->dentry_leases);
4703 INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
4704
4705 ceph_caps_init(mdsc);
4706 ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
4707
4708 spin_lock_init(&mdsc->snapid_map_lock);
4709 mdsc->snapid_map_tree = RB_ROOT;
4710 INIT_LIST_HEAD(&mdsc->snapid_map_lru);
4711
4712 init_rwsem(&mdsc->pool_perm_rwsem);
4713 mdsc->pool_perm_tree = RB_ROOT;
4714
4715 strscpy(mdsc->nodename, utsname()->nodename,
4716 sizeof(mdsc->nodename));
4717
4718 fsc->mdsc = mdsc;
4719 return 0;
4720
4721 err_mdsmap:
4722 kfree(mdsc->mdsmap);
4723 err_mdsc:
4724 kfree(mdsc);
4725 return err;
4726 }
4727
4728 /*
4729 * Wait for safe replies on open mds requests. If we time out, drop
4730 * all requests from the tree to avoid dangling dentry refs.
4731 */
wait_requests(struct ceph_mds_client * mdsc)4732 static void wait_requests(struct ceph_mds_client *mdsc)
4733 {
4734 struct ceph_options *opts = mdsc->fsc->client->options;
4735 struct ceph_mds_request *req;
4736
4737 mutex_lock(&mdsc->mutex);
4738 if (__get_oldest_req(mdsc)) {
4739 mutex_unlock(&mdsc->mutex);
4740
4741 dout("wait_requests waiting for requests\n");
4742 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
4743 ceph_timeout_jiffies(opts->mount_timeout));
4744
4745 /* tear down remaining requests */
4746 mutex_lock(&mdsc->mutex);
4747 while ((req = __get_oldest_req(mdsc))) {
4748 dout("wait_requests timed out on tid %llu\n",
4749 req->r_tid);
4750 list_del_init(&req->r_wait);
4751 __unregister_request(mdsc, req);
4752 }
4753 }
4754 mutex_unlock(&mdsc->mutex);
4755 dout("wait_requests done\n");
4756 }
4757
send_flush_mdlog(struct ceph_mds_session * s)4758 void send_flush_mdlog(struct ceph_mds_session *s)
4759 {
4760 struct ceph_msg *msg;
4761
4762 /*
4763 * Pre-luminous MDS crashes when it sees an unknown session request
4764 */
4765 if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS))
4766 return;
4767
4768 mutex_lock(&s->s_mutex);
4769 dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds,
4770 ceph_session_state_name(s->s_state), s->s_seq);
4771 msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
4772 s->s_seq);
4773 if (!msg) {
4774 pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n",
4775 s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
4776 } else {
4777 ceph_con_send(&s->s_con, msg);
4778 }
4779 mutex_unlock(&s->s_mutex);
4780 }
4781
4782 /*
4783 * called before mount is ro, and before dentries are torn down.
4784 * (hmm, does this still race with new lookups?)
4785 */
ceph_mdsc_pre_umount(struct ceph_mds_client * mdsc)4786 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
4787 {
4788 dout("pre_umount\n");
4789 mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
4790
4791 ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
4792 ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
4793 ceph_flush_dirty_caps(mdsc);
4794 wait_requests(mdsc);
4795
4796 /*
4797 * wait for reply handlers to drop their request refs and
4798 * their inode/dcache refs
4799 */
4800 ceph_msgr_flush();
4801
4802 ceph_cleanup_quotarealms_inodes(mdsc);
4803 }
4804
4805 /*
4806 * flush the mdlog and wait for all write mds requests to flush.
4807 */
flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client * mdsc,u64 want_tid)4808 static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
4809 u64 want_tid)
4810 {
4811 struct ceph_mds_request *req = NULL, *nextreq;
4812 struct ceph_mds_session *last_session = NULL;
4813 struct rb_node *n;
4814
4815 mutex_lock(&mdsc->mutex);
4816 dout("%s want %lld\n", __func__, want_tid);
4817 restart:
4818 req = __get_oldest_req(mdsc);
4819 while (req && req->r_tid <= want_tid) {
4820 /* find next request */
4821 n = rb_next(&req->r_node);
4822 if (n)
4823 nextreq = rb_entry(n, struct ceph_mds_request, r_node);
4824 else
4825 nextreq = NULL;
4826 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
4827 (req->r_op & CEPH_MDS_OP_WRITE)) {
4828 struct ceph_mds_session *s = req->r_session;
4829
4830 if (!s) {
4831 req = nextreq;
4832 continue;
4833 }
4834
4835 /* write op */
4836 ceph_mdsc_get_request(req);
4837 if (nextreq)
4838 ceph_mdsc_get_request(nextreq);
4839 s = ceph_get_mds_session(s);
4840 mutex_unlock(&mdsc->mutex);
4841
4842 /* send flush mdlog request to MDS */
4843 if (last_session != s) {
4844 send_flush_mdlog(s);
4845 ceph_put_mds_session(last_session);
4846 last_session = s;
4847 } else {
4848 ceph_put_mds_session(s);
4849 }
4850 dout("%s wait on %llu (want %llu)\n", __func__,
4851 req->r_tid, want_tid);
4852 wait_for_completion(&req->r_safe_completion);
4853
4854 mutex_lock(&mdsc->mutex);
4855 ceph_mdsc_put_request(req);
4856 if (!nextreq)
4857 break; /* next dne before, so we're done! */
4858 if (RB_EMPTY_NODE(&nextreq->r_node)) {
4859 /* next request was removed from tree */
4860 ceph_mdsc_put_request(nextreq);
4861 goto restart;
4862 }
4863 ceph_mdsc_put_request(nextreq); /* won't go away */
4864 }
4865 req = nextreq;
4866 }
4867 mutex_unlock(&mdsc->mutex);
4868 ceph_put_mds_session(last_session);
4869 dout("%s done\n", __func__);
4870 }
4871
ceph_mdsc_sync(struct ceph_mds_client * mdsc)4872 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
4873 {
4874 u64 want_tid, want_flush;
4875
4876 if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
4877 return;
4878
4879 dout("sync\n");
4880 mutex_lock(&mdsc->mutex);
4881 want_tid = mdsc->last_tid;
4882 mutex_unlock(&mdsc->mutex);
4883
4884 ceph_flush_dirty_caps(mdsc);
4885 spin_lock(&mdsc->cap_dirty_lock);
4886 want_flush = mdsc->last_cap_flush_tid;
4887 if (!list_empty(&mdsc->cap_flush_list)) {
4888 struct ceph_cap_flush *cf =
4889 list_last_entry(&mdsc->cap_flush_list,
4890 struct ceph_cap_flush, g_list);
4891 cf->wake = true;
4892 }
4893 spin_unlock(&mdsc->cap_dirty_lock);
4894
4895 dout("sync want tid %lld flush_seq %lld\n",
4896 want_tid, want_flush);
4897
4898 flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
4899 wait_caps_flush(mdsc, want_flush);
4900 }
4901
4902 /*
4903 * true if all sessions are closed, or we force unmount
4904 */
done_closing_sessions(struct ceph_mds_client * mdsc,int skipped)4905 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
4906 {
4907 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
4908 return true;
4909 return atomic_read(&mdsc->num_sessions) <= skipped;
4910 }
4911
4912 /*
4913 * called after sb is ro.
4914 */
ceph_mdsc_close_sessions(struct ceph_mds_client * mdsc)4915 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
4916 {
4917 struct ceph_options *opts = mdsc->fsc->client->options;
4918 struct ceph_mds_session *session;
4919 int i;
4920 int skipped = 0;
4921
4922 dout("close_sessions\n");
4923
4924 /* close sessions */
4925 mutex_lock(&mdsc->mutex);
4926 for (i = 0; i < mdsc->max_sessions; i++) {
4927 session = __ceph_lookup_mds_session(mdsc, i);
4928 if (!session)
4929 continue;
4930 mutex_unlock(&mdsc->mutex);
4931 mutex_lock(&session->s_mutex);
4932 if (__close_session(mdsc, session) <= 0)
4933 skipped++;
4934 mutex_unlock(&session->s_mutex);
4935 ceph_put_mds_session(session);
4936 mutex_lock(&mdsc->mutex);
4937 }
4938 mutex_unlock(&mdsc->mutex);
4939
4940 dout("waiting for sessions to close\n");
4941 wait_event_timeout(mdsc->session_close_wq,
4942 done_closing_sessions(mdsc, skipped),
4943 ceph_timeout_jiffies(opts->mount_timeout));
4944
4945 /* tear down remaining sessions */
4946 mutex_lock(&mdsc->mutex);
4947 for (i = 0; i < mdsc->max_sessions; i++) {
4948 if (mdsc->sessions[i]) {
4949 session = ceph_get_mds_session(mdsc->sessions[i]);
4950 __unregister_session(mdsc, session);
4951 mutex_unlock(&mdsc->mutex);
4952 mutex_lock(&session->s_mutex);
4953 remove_session_caps(session);
4954 mutex_unlock(&session->s_mutex);
4955 ceph_put_mds_session(session);
4956 mutex_lock(&mdsc->mutex);
4957 }
4958 }
4959 WARN_ON(!list_empty(&mdsc->cap_delay_list));
4960 mutex_unlock(&mdsc->mutex);
4961
4962 ceph_cleanup_snapid_map(mdsc);
4963 ceph_cleanup_empty_realms(mdsc);
4964
4965 cancel_work_sync(&mdsc->cap_reclaim_work);
4966 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
4967
4968 dout("stopped\n");
4969 }
4970
ceph_mdsc_force_umount(struct ceph_mds_client * mdsc)4971 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
4972 {
4973 struct ceph_mds_session *session;
4974 int mds;
4975
4976 dout("force umount\n");
4977
4978 mutex_lock(&mdsc->mutex);
4979 for (mds = 0; mds < mdsc->max_sessions; mds++) {
4980 session = __ceph_lookup_mds_session(mdsc, mds);
4981 if (!session)
4982 continue;
4983
4984 if (session->s_state == CEPH_MDS_SESSION_REJECTED)
4985 __unregister_session(mdsc, session);
4986 __wake_requests(mdsc, &session->s_waiting);
4987 mutex_unlock(&mdsc->mutex);
4988
4989 mutex_lock(&session->s_mutex);
4990 __close_session(mdsc, session);
4991 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
4992 cleanup_session_requests(mdsc, session);
4993 remove_session_caps(session);
4994 }
4995 mutex_unlock(&session->s_mutex);
4996 ceph_put_mds_session(session);
4997
4998 mutex_lock(&mdsc->mutex);
4999 kick_requests(mdsc, mds);
5000 }
5001 __wake_requests(mdsc, &mdsc->waiting_for_map);
5002 mutex_unlock(&mdsc->mutex);
5003 }
5004
ceph_mdsc_stop(struct ceph_mds_client * mdsc)5005 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
5006 {
5007 dout("stop\n");
5008 /*
5009 * Make sure the delayed work stopped before releasing
5010 * the resources.
5011 *
5012 * Because the cancel_delayed_work_sync() will only
5013 * guarantee that the work finishes executing. But the
5014 * delayed work will re-arm itself again after that.
5015 */
5016 flush_delayed_work(&mdsc->delayed_work);
5017
5018 if (mdsc->mdsmap)
5019 ceph_mdsmap_destroy(mdsc->mdsmap);
5020 kfree(mdsc->sessions);
5021 ceph_caps_finalize(mdsc);
5022 ceph_pool_perm_destroy(mdsc);
5023 }
5024
ceph_mdsc_destroy(struct ceph_fs_client * fsc)5025 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
5026 {
5027 struct ceph_mds_client *mdsc = fsc->mdsc;
5028 dout("mdsc_destroy %p\n", mdsc);
5029
5030 if (!mdsc)
5031 return;
5032
5033 /* flush out any connection work with references to us */
5034 ceph_msgr_flush();
5035
5036 ceph_mdsc_stop(mdsc);
5037
5038 ceph_metric_destroy(&mdsc->metric);
5039
5040 fsc->mdsc = NULL;
5041 kfree(mdsc);
5042 dout("mdsc_destroy %p done\n", mdsc);
5043 }
5044
ceph_mdsc_handle_fsmap(struct ceph_mds_client * mdsc,struct ceph_msg * msg)5045 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
5046 {
5047 struct ceph_fs_client *fsc = mdsc->fsc;
5048 const char *mds_namespace = fsc->mount_options->mds_namespace;
5049 void *p = msg->front.iov_base;
5050 void *end = p + msg->front.iov_len;
5051 u32 epoch;
5052 u32 num_fs;
5053 u32 mount_fscid = (u32)-1;
5054 int err = -EINVAL;
5055
5056 ceph_decode_need(&p, end, sizeof(u32), bad);
5057 epoch = ceph_decode_32(&p);
5058
5059 dout("handle_fsmap epoch %u\n", epoch);
5060
5061 /* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */
5062 ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad);
5063
5064 ceph_decode_32_safe(&p, end, num_fs, bad);
5065 while (num_fs-- > 0) {
5066 void *info_p, *info_end;
5067 u32 info_len;
5068 u32 fscid, namelen;
5069
5070 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
5071 p += 2; // info_v, info_cv
5072 info_len = ceph_decode_32(&p);
5073 ceph_decode_need(&p, end, info_len, bad);
5074 info_p = p;
5075 info_end = p + info_len;
5076 p = info_end;
5077
5078 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
5079 fscid = ceph_decode_32(&info_p);
5080 namelen = ceph_decode_32(&info_p);
5081 ceph_decode_need(&info_p, info_end, namelen, bad);
5082
5083 if (mds_namespace &&
5084 strlen(mds_namespace) == namelen &&
5085 !strncmp(mds_namespace, (char *)info_p, namelen)) {
5086 mount_fscid = fscid;
5087 break;
5088 }
5089 }
5090
5091 ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
5092 if (mount_fscid != (u32)-1) {
5093 fsc->client->monc.fs_cluster_id = mount_fscid;
5094 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
5095 0, true);
5096 ceph_monc_renew_subs(&fsc->client->monc);
5097 } else {
5098 err = -ENOENT;
5099 goto err_out;
5100 }
5101 return;
5102
5103 bad:
5104 pr_err("error decoding fsmap\n");
5105 err_out:
5106 mutex_lock(&mdsc->mutex);
5107 mdsc->mdsmap_err = err;
5108 __wake_requests(mdsc, &mdsc->waiting_for_map);
5109 mutex_unlock(&mdsc->mutex);
5110 }
5111
5112 /*
5113 * handle mds map update.
5114 */
ceph_mdsc_handle_mdsmap(struct ceph_mds_client * mdsc,struct ceph_msg * msg)5115 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
5116 {
5117 u32 epoch;
5118 u32 maplen;
5119 void *p = msg->front.iov_base;
5120 void *end = p + msg->front.iov_len;
5121 struct ceph_mdsmap *newmap, *oldmap;
5122 struct ceph_fsid fsid;
5123 int err = -EINVAL;
5124
5125 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
5126 ceph_decode_copy(&p, &fsid, sizeof(fsid));
5127 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
5128 return;
5129 epoch = ceph_decode_32(&p);
5130 maplen = ceph_decode_32(&p);
5131 dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
5132
5133 /* do we need it? */
5134 mutex_lock(&mdsc->mutex);
5135 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
5136 dout("handle_map epoch %u <= our %u\n",
5137 epoch, mdsc->mdsmap->m_epoch);
5138 mutex_unlock(&mdsc->mutex);
5139 return;
5140 }
5141
5142 newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
5143 if (IS_ERR(newmap)) {
5144 err = PTR_ERR(newmap);
5145 goto bad_unlock;
5146 }
5147
5148 /* swap into place */
5149 if (mdsc->mdsmap) {
5150 oldmap = mdsc->mdsmap;
5151 mdsc->mdsmap = newmap;
5152 check_new_map(mdsc, newmap, oldmap);
5153 ceph_mdsmap_destroy(oldmap);
5154 } else {
5155 mdsc->mdsmap = newmap; /* first mds map */
5156 }
5157 mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
5158 MAX_LFS_FILESIZE);
5159
5160 __wake_requests(mdsc, &mdsc->waiting_for_map);
5161 ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
5162 mdsc->mdsmap->m_epoch);
5163
5164 mutex_unlock(&mdsc->mutex);
5165 schedule_delayed(mdsc, 0);
5166 return;
5167
5168 bad_unlock:
5169 mutex_unlock(&mdsc->mutex);
5170 bad:
5171 pr_err("error decoding mdsmap %d\n", err);
5172 return;
5173 }
5174
mds_get_con(struct ceph_connection * con)5175 static struct ceph_connection *mds_get_con(struct ceph_connection *con)
5176 {
5177 struct ceph_mds_session *s = con->private;
5178
5179 if (ceph_get_mds_session(s))
5180 return con;
5181 return NULL;
5182 }
5183
mds_put_con(struct ceph_connection * con)5184 static void mds_put_con(struct ceph_connection *con)
5185 {
5186 struct ceph_mds_session *s = con->private;
5187
5188 ceph_put_mds_session(s);
5189 }
5190
5191 /*
5192 * if the client is unresponsive for long enough, the mds will kill
5193 * the session entirely.
5194 */
mds_peer_reset(struct ceph_connection * con)5195 static void mds_peer_reset(struct ceph_connection *con)
5196 {
5197 struct ceph_mds_session *s = con->private;
5198 struct ceph_mds_client *mdsc = s->s_mdsc;
5199
5200 pr_warn("mds%d closed our session\n", s->s_mds);
5201 send_mds_reconnect(mdsc, s);
5202 }
5203
mds_dispatch(struct ceph_connection * con,struct ceph_msg * msg)5204 static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5205 {
5206 struct ceph_mds_session *s = con->private;
5207 struct ceph_mds_client *mdsc = s->s_mdsc;
5208 int type = le16_to_cpu(msg->hdr.type);
5209
5210 mutex_lock(&mdsc->mutex);
5211 if (__verify_registered_session(mdsc, s) < 0) {
5212 mutex_unlock(&mdsc->mutex);
5213 goto out;
5214 }
5215 mutex_unlock(&mdsc->mutex);
5216
5217 switch (type) {
5218 case CEPH_MSG_MDS_MAP:
5219 ceph_mdsc_handle_mdsmap(mdsc, msg);
5220 break;
5221 case CEPH_MSG_FS_MAP_USER:
5222 ceph_mdsc_handle_fsmap(mdsc, msg);
5223 break;
5224 case CEPH_MSG_CLIENT_SESSION:
5225 handle_session(s, msg);
5226 break;
5227 case CEPH_MSG_CLIENT_REPLY:
5228 handle_reply(s, msg);
5229 break;
5230 case CEPH_MSG_CLIENT_REQUEST_FORWARD:
5231 handle_forward(mdsc, s, msg);
5232 break;
5233 case CEPH_MSG_CLIENT_CAPS:
5234 ceph_handle_caps(s, msg);
5235 break;
5236 case CEPH_MSG_CLIENT_SNAP:
5237 ceph_handle_snap(mdsc, s, msg);
5238 break;
5239 case CEPH_MSG_CLIENT_LEASE:
5240 handle_lease(mdsc, s, msg);
5241 break;
5242 case CEPH_MSG_CLIENT_QUOTA:
5243 ceph_handle_quota(mdsc, s, msg);
5244 break;
5245
5246 default:
5247 pr_err("received unknown message type %d %s\n", type,
5248 ceph_msg_type_name(type));
5249 }
5250 out:
5251 ceph_msg_put(msg);
5252 }
5253
5254 /*
5255 * authentication
5256 */
5257
5258 /*
5259 * Note: returned pointer is the address of a structure that's
5260 * managed separately. Caller must *not* attempt to free it.
5261 */
5262 static struct ceph_auth_handshake *
mds_get_authorizer(struct ceph_connection * con,int * proto,int force_new)5263 mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
5264 {
5265 struct ceph_mds_session *s = con->private;
5266 struct ceph_mds_client *mdsc = s->s_mdsc;
5267 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5268 struct ceph_auth_handshake *auth = &s->s_auth;
5269 int ret;
5270
5271 ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5272 force_new, proto, NULL, NULL);
5273 if (ret)
5274 return ERR_PTR(ret);
5275
5276 return auth;
5277 }
5278
mds_add_authorizer_challenge(struct ceph_connection * con,void * challenge_buf,int challenge_buf_len)5279 static int mds_add_authorizer_challenge(struct ceph_connection *con,
5280 void *challenge_buf, int challenge_buf_len)
5281 {
5282 struct ceph_mds_session *s = con->private;
5283 struct ceph_mds_client *mdsc = s->s_mdsc;
5284 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5285
5286 return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
5287 challenge_buf, challenge_buf_len);
5288 }
5289
mds_verify_authorizer_reply(struct ceph_connection * con)5290 static int mds_verify_authorizer_reply(struct ceph_connection *con)
5291 {
5292 struct ceph_mds_session *s = con->private;
5293 struct ceph_mds_client *mdsc = s->s_mdsc;
5294 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5295 struct ceph_auth_handshake *auth = &s->s_auth;
5296
5297 return ceph_auth_verify_authorizer_reply(ac, auth->authorizer,
5298 auth->authorizer_reply_buf, auth->authorizer_reply_buf_len,
5299 NULL, NULL, NULL, NULL);
5300 }
5301
mds_invalidate_authorizer(struct ceph_connection * con)5302 static int mds_invalidate_authorizer(struct ceph_connection *con)
5303 {
5304 struct ceph_mds_session *s = con->private;
5305 struct ceph_mds_client *mdsc = s->s_mdsc;
5306 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5307
5308 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
5309
5310 return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
5311 }
5312
mds_get_auth_request(struct ceph_connection * con,void * buf,int * buf_len,void ** authorizer,int * authorizer_len)5313 static int mds_get_auth_request(struct ceph_connection *con,
5314 void *buf, int *buf_len,
5315 void **authorizer, int *authorizer_len)
5316 {
5317 struct ceph_mds_session *s = con->private;
5318 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5319 struct ceph_auth_handshake *auth = &s->s_auth;
5320 int ret;
5321
5322 ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5323 buf, buf_len);
5324 if (ret)
5325 return ret;
5326
5327 *authorizer = auth->authorizer_buf;
5328 *authorizer_len = auth->authorizer_buf_len;
5329 return 0;
5330 }
5331
mds_handle_auth_reply_more(struct ceph_connection * con,void * reply,int reply_len,void * buf,int * buf_len,void ** authorizer,int * authorizer_len)5332 static int mds_handle_auth_reply_more(struct ceph_connection *con,
5333 void *reply, int reply_len,
5334 void *buf, int *buf_len,
5335 void **authorizer, int *authorizer_len)
5336 {
5337 struct ceph_mds_session *s = con->private;
5338 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5339 struct ceph_auth_handshake *auth = &s->s_auth;
5340 int ret;
5341
5342 ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len,
5343 buf, buf_len);
5344 if (ret)
5345 return ret;
5346
5347 *authorizer = auth->authorizer_buf;
5348 *authorizer_len = auth->authorizer_buf_len;
5349 return 0;
5350 }
5351
mds_handle_auth_done(struct ceph_connection * con,u64 global_id,void * reply,int reply_len,u8 * session_key,int * session_key_len,u8 * con_secret,int * con_secret_len)5352 static int mds_handle_auth_done(struct ceph_connection *con,
5353 u64 global_id, void *reply, int reply_len,
5354 u8 *session_key, int *session_key_len,
5355 u8 *con_secret, int *con_secret_len)
5356 {
5357 struct ceph_mds_session *s = con->private;
5358 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5359 struct ceph_auth_handshake *auth = &s->s_auth;
5360
5361 return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len,
5362 session_key, session_key_len,
5363 con_secret, con_secret_len);
5364 }
5365
mds_handle_auth_bad_method(struct ceph_connection * con,int used_proto,int result,const int * allowed_protos,int proto_cnt,const int * allowed_modes,int mode_cnt)5366 static int mds_handle_auth_bad_method(struct ceph_connection *con,
5367 int used_proto, int result,
5368 const int *allowed_protos, int proto_cnt,
5369 const int *allowed_modes, int mode_cnt)
5370 {
5371 struct ceph_mds_session *s = con->private;
5372 struct ceph_mon_client *monc = &s->s_mdsc->fsc->client->monc;
5373 int ret;
5374
5375 if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_MDS,
5376 used_proto, result,
5377 allowed_protos, proto_cnt,
5378 allowed_modes, mode_cnt)) {
5379 ret = ceph_monc_validate_auth(monc);
5380 if (ret)
5381 return ret;
5382 }
5383
5384 return -EACCES;
5385 }
5386
mds_alloc_msg(struct ceph_connection * con,struct ceph_msg_header * hdr,int * skip)5387 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
5388 struct ceph_msg_header *hdr, int *skip)
5389 {
5390 struct ceph_msg *msg;
5391 int type = (int) le16_to_cpu(hdr->type);
5392 int front_len = (int) le32_to_cpu(hdr->front_len);
5393
5394 if (con->in_msg)
5395 return con->in_msg;
5396
5397 *skip = 0;
5398 msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
5399 if (!msg) {
5400 pr_err("unable to allocate msg type %d len %d\n",
5401 type, front_len);
5402 return NULL;
5403 }
5404
5405 return msg;
5406 }
5407
mds_sign_message(struct ceph_msg * msg)5408 static int mds_sign_message(struct ceph_msg *msg)
5409 {
5410 struct ceph_mds_session *s = msg->con->private;
5411 struct ceph_auth_handshake *auth = &s->s_auth;
5412
5413 return ceph_auth_sign_message(auth, msg);
5414 }
5415
mds_check_message_signature(struct ceph_msg * msg)5416 static int mds_check_message_signature(struct ceph_msg *msg)
5417 {
5418 struct ceph_mds_session *s = msg->con->private;
5419 struct ceph_auth_handshake *auth = &s->s_auth;
5420
5421 return ceph_auth_check_message_signature(auth, msg);
5422 }
5423
5424 static const struct ceph_connection_operations mds_con_ops = {
5425 .get = mds_get_con,
5426 .put = mds_put_con,
5427 .alloc_msg = mds_alloc_msg,
5428 .dispatch = mds_dispatch,
5429 .peer_reset = mds_peer_reset,
5430 .get_authorizer = mds_get_authorizer,
5431 .add_authorizer_challenge = mds_add_authorizer_challenge,
5432 .verify_authorizer_reply = mds_verify_authorizer_reply,
5433 .invalidate_authorizer = mds_invalidate_authorizer,
5434 .sign_message = mds_sign_message,
5435 .check_message_signature = mds_check_message_signature,
5436 .get_auth_request = mds_get_auth_request,
5437 .handle_auth_reply_more = mds_handle_auth_reply_more,
5438 .handle_auth_done = mds_handle_auth_done,
5439 .handle_auth_bad_method = mds_handle_auth_bad_method,
5440 };
5441
5442 /* eof */
5443