1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 #include <linux/fscrypt.h>
18
19 #include "super.h"
20 #include "mds_client.h"
21 #include "cache.h"
22 #include "crypto.h"
23 #include <linux/ceph/decode.h>
24
25 /*
26 * Ceph inode operations
27 *
28 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
29 * setattr, etc.), xattr helpers, and helpers for assimilating
30 * metadata returned by the MDS into our cache.
31 *
32 * Also define helpers for doing asynchronous writeback, invalidation,
33 * and truncation for the benefit of those who can't afford to block
34 * (typically because they are in the message handler path).
35 */
36
37 static const struct inode_operations ceph_symlink_iops;
38 static const struct inode_operations ceph_encrypted_symlink_iops;
39
40 static void ceph_inode_work(struct work_struct *work);
41
42 /*
43 * find or create an inode, given the ceph ino number
44 */
ceph_set_ino_cb(struct inode * inode,void * data)45 static int ceph_set_ino_cb(struct inode *inode, void *data)
46 {
47 struct ceph_inode_info *ci = ceph_inode(inode);
48 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
49
50 ci->i_vino = *(struct ceph_vino *)data;
51 inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
52 inode_set_iversion_raw(inode, 0);
53 percpu_counter_inc(&mdsc->metric.total_inodes);
54
55 return 0;
56 }
57
58 /*
59 * Check if the parent inode matches the vino from directory reply info
60 */
ceph_vino_matches_parent(struct inode * parent,struct ceph_vino vino)61 static inline bool ceph_vino_matches_parent(struct inode *parent,
62 struct ceph_vino vino)
63 {
64 return ceph_ino(parent) == vino.ino && ceph_snap(parent) == vino.snap;
65 }
66
67 /*
68 * Validate that the directory inode referenced by @req->r_parent matches the
69 * inode number and snapshot id contained in the reply's directory record. If
70 * they do not match – which can theoretically happen if the parent dentry was
71 * moved between the time the request was issued and the reply arrived – fall
72 * back to looking up the correct inode in the inode cache.
73 *
74 * A reference is *always* returned. Callers that receive a different inode
75 * than the original @parent are responsible for dropping the extra reference
76 * once the reply has been processed.
77 */
ceph_get_reply_dir(struct super_block * sb,struct inode * parent,struct ceph_mds_reply_info_parsed * rinfo)78 static struct inode *ceph_get_reply_dir(struct super_block *sb,
79 struct inode *parent,
80 struct ceph_mds_reply_info_parsed *rinfo)
81 {
82 struct ceph_vino vino;
83
84 if (unlikely(!rinfo->diri.in))
85 return parent; /* nothing to compare against */
86
87 /* If we didn't have a cached parent inode to begin with, just bail out. */
88 if (!parent)
89 return NULL;
90
91 vino.ino = le64_to_cpu(rinfo->diri.in->ino);
92 vino.snap = le64_to_cpu(rinfo->diri.in->snapid);
93
94 if (likely(ceph_vino_matches_parent(parent, vino)))
95 return parent; /* matches – use the original reference */
96
97 /* Mismatch – this should be rare. Emit a WARN and obtain the correct inode. */
98 WARN_ONCE(1, "ceph: reply dir mismatch (parent valid %llx.%llx reply %llx.%llx)\n",
99 ceph_ino(parent), ceph_snap(parent), vino.ino, vino.snap);
100
101 return ceph_get_inode(sb, vino, NULL);
102 }
103
104 /**
105 * ceph_new_inode - allocate a new inode in advance of an expected create
106 * @dir: parent directory for new inode
107 * @dentry: dentry that may eventually point to new inode
108 * @mode: mode of new inode
109 * @as_ctx: pointer to inherited security context
110 *
111 * Allocate a new inode in advance of an operation to create a new inode.
112 * This allocates the inode and sets up the acl_sec_ctx with appropriate
113 * info for the new inode.
114 *
115 * Returns a pointer to the new inode or an ERR_PTR.
116 */
ceph_new_inode(struct inode * dir,struct dentry * dentry,umode_t * mode,struct ceph_acl_sec_ctx * as_ctx)117 struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
118 umode_t *mode, struct ceph_acl_sec_ctx *as_ctx)
119 {
120 int err;
121 struct inode *inode;
122
123 inode = new_inode(dir->i_sb);
124 if (!inode)
125 return ERR_PTR(-ENOMEM);
126
127 inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
128
129 if (!S_ISLNK(*mode)) {
130 err = ceph_pre_init_acls(dir, mode, as_ctx);
131 if (err < 0)
132 goto out_err;
133 }
134
135 inode->i_state = 0;
136 inode->i_mode = *mode;
137
138 err = ceph_security_init_secctx(dentry, *mode, as_ctx);
139 if (err < 0)
140 goto out_err;
141
142 /*
143 * We'll skip setting fscrypt context for snapshots, leaving that for
144 * the handle_reply().
145 */
146 if (ceph_snap(dir) != CEPH_SNAPDIR) {
147 err = ceph_fscrypt_prepare_context(dir, inode, as_ctx);
148 if (err)
149 goto out_err;
150 }
151
152 return inode;
153 out_err:
154 iput(inode);
155 return ERR_PTR(err);
156 }
157
ceph_as_ctx_to_req(struct ceph_mds_request * req,struct ceph_acl_sec_ctx * as_ctx)158 void ceph_as_ctx_to_req(struct ceph_mds_request *req,
159 struct ceph_acl_sec_ctx *as_ctx)
160 {
161 if (as_ctx->pagelist) {
162 req->r_pagelist = as_ctx->pagelist;
163 as_ctx->pagelist = NULL;
164 }
165 ceph_fscrypt_as_ctx_to_req(req, as_ctx);
166 }
167
168 /**
169 * ceph_get_inode - find or create/hash a new inode
170 * @sb: superblock to search and allocate in
171 * @vino: vino to search for
172 * @newino: optional new inode to insert if one isn't found (may be NULL)
173 *
174 * Search for or insert a new inode into the hash for the given vino, and
175 * return a reference to it. If new is non-NULL, its reference is consumed.
176 */
ceph_get_inode(struct super_block * sb,struct ceph_vino vino,struct inode * newino)177 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
178 struct inode *newino)
179 {
180 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
181 struct ceph_client *cl = mdsc->fsc->client;
182 struct inode *inode;
183
184 if (ceph_vino_is_reserved(vino))
185 return ERR_PTR(-EREMOTEIO);
186
187 if (newino) {
188 inode = inode_insert5(newino, (unsigned long)vino.ino,
189 ceph_ino_compare, ceph_set_ino_cb, &vino);
190 if (inode != newino)
191 iput(newino);
192 } else {
193 inode = iget5_locked(sb, (unsigned long)vino.ino,
194 ceph_ino_compare, ceph_set_ino_cb, &vino);
195 }
196
197 if (!inode) {
198 doutc(cl, "no inode found for %llx.%llx\n", vino.ino, vino.snap);
199 return ERR_PTR(-ENOMEM);
200 }
201
202 doutc(cl, "on %llx=%llx.%llx got %p new %d\n",
203 ceph_present_inode(inode), ceph_vinop(inode), inode,
204 !!(inode->i_state & I_NEW));
205 return inode;
206 }
207
208 /*
209 * get/constuct snapdir inode for a given directory
210 */
ceph_get_snapdir(struct inode * parent)211 struct inode *ceph_get_snapdir(struct inode *parent)
212 {
213 struct ceph_client *cl = ceph_inode_to_client(parent);
214 struct ceph_vino vino = {
215 .ino = ceph_ino(parent),
216 .snap = CEPH_SNAPDIR,
217 };
218 struct inode *inode = ceph_get_inode(parent->i_sb, vino, NULL);
219 struct ceph_inode_info *ci = ceph_inode(inode);
220 int ret = -ENOTDIR;
221
222 if (IS_ERR(inode))
223 return inode;
224
225 if (!S_ISDIR(parent->i_mode)) {
226 pr_warn_once_client(cl, "bad snapdir parent type (mode=0%o)\n",
227 parent->i_mode);
228 goto err;
229 }
230
231 if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
232 pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n",
233 inode->i_mode);
234 goto err;
235 }
236
237 inode->i_mode = parent->i_mode;
238 inode->i_uid = parent->i_uid;
239 inode->i_gid = parent->i_gid;
240 inode_set_mtime_to_ts(inode, inode_get_mtime(parent));
241 inode_set_ctime_to_ts(inode, inode_get_ctime(parent));
242 inode_set_atime_to_ts(inode, inode_get_atime(parent));
243 ci->i_rbytes = 0;
244 ci->i_btime = ceph_inode(parent)->i_btime;
245
246 #ifdef CONFIG_FS_ENCRYPTION
247 /* if encrypted, just borrow fscrypt_auth from parent */
248 if (IS_ENCRYPTED(parent)) {
249 struct ceph_inode_info *pci = ceph_inode(parent);
250
251 ci->fscrypt_auth = kmemdup(pci->fscrypt_auth,
252 pci->fscrypt_auth_len,
253 GFP_KERNEL);
254 if (ci->fscrypt_auth) {
255 inode->i_flags |= S_ENCRYPTED;
256 ci->fscrypt_auth_len = pci->fscrypt_auth_len;
257 } else {
258 doutc(cl, "Failed to alloc snapdir fscrypt_auth\n");
259 ret = -ENOMEM;
260 goto err;
261 }
262 }
263 #endif
264 if (inode->i_state & I_NEW) {
265 inode->i_op = &ceph_snapdir_iops;
266 inode->i_fop = &ceph_snapdir_fops;
267 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
268 unlock_new_inode(inode);
269 }
270
271 return inode;
272 err:
273 if ((inode->i_state & I_NEW))
274 discard_new_inode(inode);
275 else
276 iput(inode);
277 return ERR_PTR(ret);
278 }
279
280 const struct inode_operations ceph_file_iops = {
281 .permission = ceph_permission,
282 .setattr = ceph_setattr,
283 .getattr = ceph_getattr,
284 .listxattr = ceph_listxattr,
285 .get_inode_acl = ceph_get_acl,
286 .set_acl = ceph_set_acl,
287 };
288
289
290 /*
291 * We use a 'frag tree' to keep track of the MDS's directory fragments
292 * for a given inode (usually there is just a single fragment). We
293 * need to know when a child frag is delegated to a new MDS, or when
294 * it is flagged as replicated, so we can direct our requests
295 * accordingly.
296 */
297
298 /*
299 * find/create a frag in the tree
300 */
__get_or_create_frag(struct ceph_inode_info * ci,u32 f)301 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
302 u32 f)
303 {
304 struct inode *inode = &ci->netfs.inode;
305 struct ceph_client *cl = ceph_inode_to_client(inode);
306 struct rb_node **p;
307 struct rb_node *parent = NULL;
308 struct ceph_inode_frag *frag;
309 int c;
310
311 p = &ci->i_fragtree.rb_node;
312 while (*p) {
313 parent = *p;
314 frag = rb_entry(parent, struct ceph_inode_frag, node);
315 c = ceph_frag_compare(f, frag->frag);
316 if (c < 0)
317 p = &(*p)->rb_left;
318 else if (c > 0)
319 p = &(*p)->rb_right;
320 else
321 return frag;
322 }
323
324 frag = kmalloc(sizeof(*frag), GFP_NOFS);
325 if (!frag)
326 return ERR_PTR(-ENOMEM);
327
328 frag->frag = f;
329 frag->split_by = 0;
330 frag->mds = -1;
331 frag->ndist = 0;
332
333 rb_link_node(&frag->node, parent, p);
334 rb_insert_color(&frag->node, &ci->i_fragtree);
335
336 doutc(cl, "added %p %llx.%llx frag %x\n", inode, ceph_vinop(inode), f);
337 return frag;
338 }
339
340 /*
341 * find a specific frag @f
342 */
__ceph_find_frag(struct ceph_inode_info * ci,u32 f)343 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
344 {
345 struct rb_node *n = ci->i_fragtree.rb_node;
346
347 while (n) {
348 struct ceph_inode_frag *frag =
349 rb_entry(n, struct ceph_inode_frag, node);
350 int c = ceph_frag_compare(f, frag->frag);
351 if (c < 0)
352 n = n->rb_left;
353 else if (c > 0)
354 n = n->rb_right;
355 else
356 return frag;
357 }
358 return NULL;
359 }
360
361 /*
362 * Choose frag containing the given value @v. If @pfrag is
363 * specified, copy the frag delegation info to the caller if
364 * it is present.
365 */
__ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)366 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
367 struct ceph_inode_frag *pfrag, int *found)
368 {
369 struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
370 u32 t = ceph_frag_make(0, 0);
371 struct ceph_inode_frag *frag;
372 unsigned nway, i;
373 u32 n;
374
375 if (found)
376 *found = 0;
377
378 while (1) {
379 WARN_ON(!ceph_frag_contains_value(t, v));
380 frag = __ceph_find_frag(ci, t);
381 if (!frag)
382 break; /* t is a leaf */
383 if (frag->split_by == 0) {
384 if (pfrag)
385 memcpy(pfrag, frag, sizeof(*pfrag));
386 if (found)
387 *found = 1;
388 break;
389 }
390
391 /* choose child */
392 nway = 1 << frag->split_by;
393 doutc(cl, "frag(%x) %x splits by %d (%d ways)\n", v, t,
394 frag->split_by, nway);
395 for (i = 0; i < nway; i++) {
396 n = ceph_frag_make_child(t, frag->split_by, i);
397 if (ceph_frag_contains_value(n, v)) {
398 t = n;
399 break;
400 }
401 }
402 BUG_ON(i == nway);
403 }
404 doutc(cl, "frag(%x) = %x\n", v, t);
405
406 return t;
407 }
408
ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)409 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
410 struct ceph_inode_frag *pfrag, int *found)
411 {
412 u32 ret;
413 mutex_lock(&ci->i_fragtree_mutex);
414 ret = __ceph_choose_frag(ci, v, pfrag, found);
415 mutex_unlock(&ci->i_fragtree_mutex);
416 return ret;
417 }
418
419 /*
420 * Process dirfrag (delegation) info from the mds. Include leaf
421 * fragment in tree ONLY if ndist > 0. Otherwise, only
422 * branches/splits are included in i_fragtree)
423 */
ceph_fill_dirfrag(struct inode * inode,struct ceph_mds_reply_dirfrag * dirinfo)424 static int ceph_fill_dirfrag(struct inode *inode,
425 struct ceph_mds_reply_dirfrag *dirinfo)
426 {
427 struct ceph_inode_info *ci = ceph_inode(inode);
428 struct ceph_client *cl = ceph_inode_to_client(inode);
429 struct ceph_inode_frag *frag;
430 u32 id = le32_to_cpu(dirinfo->frag);
431 int mds = le32_to_cpu(dirinfo->auth);
432 int ndist = le32_to_cpu(dirinfo->ndist);
433 int diri_auth = -1;
434 int i;
435 int err = 0;
436
437 spin_lock(&ci->i_ceph_lock);
438 if (ci->i_auth_cap)
439 diri_auth = ci->i_auth_cap->mds;
440 spin_unlock(&ci->i_ceph_lock);
441
442 if (mds == -1) /* CDIR_AUTH_PARENT */
443 mds = diri_auth;
444
445 mutex_lock(&ci->i_fragtree_mutex);
446 if (ndist == 0 && mds == diri_auth) {
447 /* no delegation info needed. */
448 frag = __ceph_find_frag(ci, id);
449 if (!frag)
450 goto out;
451 if (frag->split_by == 0) {
452 /* tree leaf, remove */
453 doutc(cl, "removed %p %llx.%llx frag %x (no ref)\n",
454 inode, ceph_vinop(inode), id);
455 rb_erase(&frag->node, &ci->i_fragtree);
456 kfree(frag);
457 } else {
458 /* tree branch, keep and clear */
459 doutc(cl, "cleared %p %llx.%llx frag %x referral\n",
460 inode, ceph_vinop(inode), id);
461 frag->mds = -1;
462 frag->ndist = 0;
463 }
464 goto out;
465 }
466
467
468 /* find/add this frag to store mds delegation info */
469 frag = __get_or_create_frag(ci, id);
470 if (IS_ERR(frag)) {
471 /* this is not the end of the world; we can continue
472 with bad/inaccurate delegation info */
473 pr_err_client(cl, "ENOMEM on mds ref %p %llx.%llx fg %x\n",
474 inode, ceph_vinop(inode),
475 le32_to_cpu(dirinfo->frag));
476 err = -ENOMEM;
477 goto out;
478 }
479
480 frag->mds = mds;
481 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
482 for (i = 0; i < frag->ndist; i++)
483 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
484 doutc(cl, "%p %llx.%llx frag %x ndist=%d\n", inode,
485 ceph_vinop(inode), frag->frag, frag->ndist);
486
487 out:
488 mutex_unlock(&ci->i_fragtree_mutex);
489 return err;
490 }
491
frag_tree_split_cmp(const void * l,const void * r)492 static int frag_tree_split_cmp(const void *l, const void *r)
493 {
494 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
495 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
496 return ceph_frag_compare(le32_to_cpu(ls->frag),
497 le32_to_cpu(rs->frag));
498 }
499
is_frag_child(u32 f,struct ceph_inode_frag * frag)500 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
501 {
502 if (!frag)
503 return f == ceph_frag_make(0, 0);
504 if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
505 return false;
506 return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
507 }
508
ceph_fill_fragtree(struct inode * inode,struct ceph_frag_tree_head * fragtree,struct ceph_mds_reply_dirfrag * dirinfo)509 static int ceph_fill_fragtree(struct inode *inode,
510 struct ceph_frag_tree_head *fragtree,
511 struct ceph_mds_reply_dirfrag *dirinfo)
512 {
513 struct ceph_client *cl = ceph_inode_to_client(inode);
514 struct ceph_inode_info *ci = ceph_inode(inode);
515 struct ceph_inode_frag *frag, *prev_frag = NULL;
516 struct rb_node *rb_node;
517 unsigned i, split_by, nsplits;
518 u32 id;
519 bool update = false;
520
521 mutex_lock(&ci->i_fragtree_mutex);
522 nsplits = le32_to_cpu(fragtree->nsplits);
523 if (nsplits != ci->i_fragtree_nsplits) {
524 update = true;
525 } else if (nsplits) {
526 i = get_random_u32_below(nsplits);
527 id = le32_to_cpu(fragtree->splits[i].frag);
528 if (!__ceph_find_frag(ci, id))
529 update = true;
530 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
531 rb_node = rb_first(&ci->i_fragtree);
532 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
533 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
534 update = true;
535 }
536 if (!update && dirinfo) {
537 id = le32_to_cpu(dirinfo->frag);
538 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
539 update = true;
540 }
541 if (!update)
542 goto out_unlock;
543
544 if (nsplits > 1) {
545 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
546 frag_tree_split_cmp, NULL);
547 }
548
549 doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
550 rb_node = rb_first(&ci->i_fragtree);
551 for (i = 0; i < nsplits; i++) {
552 id = le32_to_cpu(fragtree->splits[i].frag);
553 split_by = le32_to_cpu(fragtree->splits[i].by);
554 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
555 pr_err_client(cl, "%p %llx.%llx invalid split %d/%u, "
556 "frag %x split by %d\n", inode,
557 ceph_vinop(inode), i, nsplits, id, split_by);
558 continue;
559 }
560 frag = NULL;
561 while (rb_node) {
562 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
563 if (ceph_frag_compare(frag->frag, id) >= 0) {
564 if (frag->frag != id)
565 frag = NULL;
566 else
567 rb_node = rb_next(rb_node);
568 break;
569 }
570 rb_node = rb_next(rb_node);
571 /* delete stale split/leaf node */
572 if (frag->split_by > 0 ||
573 !is_frag_child(frag->frag, prev_frag)) {
574 rb_erase(&frag->node, &ci->i_fragtree);
575 if (frag->split_by > 0)
576 ci->i_fragtree_nsplits--;
577 kfree(frag);
578 }
579 frag = NULL;
580 }
581 if (!frag) {
582 frag = __get_or_create_frag(ci, id);
583 if (IS_ERR(frag))
584 continue;
585 }
586 if (frag->split_by == 0)
587 ci->i_fragtree_nsplits++;
588 frag->split_by = split_by;
589 doutc(cl, " frag %x split by %d\n", frag->frag, frag->split_by);
590 prev_frag = frag;
591 }
592 while (rb_node) {
593 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
594 rb_node = rb_next(rb_node);
595 /* delete stale split/leaf node */
596 if (frag->split_by > 0 ||
597 !is_frag_child(frag->frag, prev_frag)) {
598 rb_erase(&frag->node, &ci->i_fragtree);
599 if (frag->split_by > 0)
600 ci->i_fragtree_nsplits--;
601 kfree(frag);
602 }
603 }
604 out_unlock:
605 mutex_unlock(&ci->i_fragtree_mutex);
606 return 0;
607 }
608
609 /*
610 * initialize a newly allocated inode.
611 */
ceph_alloc_inode(struct super_block * sb)612 struct inode *ceph_alloc_inode(struct super_block *sb)
613 {
614 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
615 struct ceph_inode_info *ci;
616 int i;
617
618 ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS);
619 if (!ci)
620 return NULL;
621
622 doutc(fsc->client, "%p\n", &ci->netfs.inode);
623
624 /* Set parameters for the netfs library */
625 netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
626
627 spin_lock_init(&ci->i_ceph_lock);
628
629 ci->i_version = 0;
630 ci->i_inline_version = 0;
631 ci->i_time_warp_seq = 0;
632 ci->i_ceph_flags = 0;
633 atomic64_set(&ci->i_ordered_count, 1);
634 atomic64_set(&ci->i_release_count, 1);
635 atomic64_set(&ci->i_complete_seq[0], 0);
636 atomic64_set(&ci->i_complete_seq[1], 0);
637 ci->i_symlink = NULL;
638
639 ci->i_max_bytes = 0;
640 ci->i_max_files = 0;
641
642 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
643 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
644 RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
645
646 ci->i_fragtree = RB_ROOT;
647 mutex_init(&ci->i_fragtree_mutex);
648
649 ci->i_xattrs.blob = NULL;
650 ci->i_xattrs.prealloc_blob = NULL;
651 ci->i_xattrs.dirty = false;
652 ci->i_xattrs.index = RB_ROOT;
653 ci->i_xattrs.count = 0;
654 ci->i_xattrs.names_size = 0;
655 ci->i_xattrs.vals_size = 0;
656 ci->i_xattrs.version = 0;
657 ci->i_xattrs.index_version = 0;
658
659 ci->i_caps = RB_ROOT;
660 ci->i_auth_cap = NULL;
661 ci->i_dirty_caps = 0;
662 ci->i_flushing_caps = 0;
663 INIT_LIST_HEAD(&ci->i_dirty_item);
664 INIT_LIST_HEAD(&ci->i_flushing_item);
665 ci->i_prealloc_cap_flush = NULL;
666 INIT_LIST_HEAD(&ci->i_cap_flush_list);
667 init_waitqueue_head(&ci->i_cap_wq);
668 ci->i_hold_caps_max = 0;
669 INIT_LIST_HEAD(&ci->i_cap_delay_list);
670 INIT_LIST_HEAD(&ci->i_cap_snaps);
671 ci->i_head_snapc = NULL;
672 ci->i_snap_caps = 0;
673
674 ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
675 for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
676 ci->i_nr_by_mode[i] = 0;
677
678 mutex_init(&ci->i_truncate_mutex);
679 ci->i_truncate_seq = 0;
680 ci->i_truncate_size = 0;
681 ci->i_truncate_pending = 0;
682 ci->i_truncate_pagecache_size = 0;
683
684 ci->i_max_size = 0;
685 ci->i_reported_size = 0;
686 ci->i_wanted_max_size = 0;
687 ci->i_requested_max_size = 0;
688
689 ci->i_pin_ref = 0;
690 ci->i_rd_ref = 0;
691 ci->i_rdcache_ref = 0;
692 ci->i_wr_ref = 0;
693 ci->i_wb_ref = 0;
694 ci->i_fx_ref = 0;
695 ci->i_wrbuffer_ref = 0;
696 ci->i_wrbuffer_ref_head = 0;
697 atomic_set(&ci->i_filelock_ref, 0);
698 atomic_set(&ci->i_shared_gen, 1);
699 ci->i_rdcache_gen = 0;
700 ci->i_rdcache_revoking = 0;
701
702 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
703 INIT_LIST_HEAD(&ci->i_unsafe_iops);
704 spin_lock_init(&ci->i_unsafe_lock);
705
706 ci->i_snap_realm = NULL;
707 INIT_LIST_HEAD(&ci->i_snap_realm_item);
708 INIT_LIST_HEAD(&ci->i_snap_flush_item);
709
710 INIT_WORK(&ci->i_work, ceph_inode_work);
711 ci->i_work_mask = 0;
712 memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
713 #ifdef CONFIG_FS_ENCRYPTION
714 ci->fscrypt_auth = NULL;
715 ci->fscrypt_auth_len = 0;
716 #endif
717 return &ci->netfs.inode;
718 }
719
ceph_free_inode(struct inode * inode)720 void ceph_free_inode(struct inode *inode)
721 {
722 struct ceph_inode_info *ci = ceph_inode(inode);
723
724 kfree(ci->i_symlink);
725 #ifdef CONFIG_FS_ENCRYPTION
726 kfree(ci->fscrypt_auth);
727 #endif
728 fscrypt_free_inode(inode);
729 kmem_cache_free(ceph_inode_cachep, ci);
730 }
731
ceph_evict_inode(struct inode * inode)732 void ceph_evict_inode(struct inode *inode)
733 {
734 struct ceph_inode_info *ci = ceph_inode(inode);
735 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
736 struct ceph_client *cl = ceph_inode_to_client(inode);
737 struct ceph_inode_frag *frag;
738 struct rb_node *n;
739
740 doutc(cl, "%p ino %llx.%llx\n", inode, ceph_vinop(inode));
741
742 percpu_counter_dec(&mdsc->metric.total_inodes);
743
744 netfs_wait_for_outstanding_io(inode);
745 truncate_inode_pages_final(&inode->i_data);
746 if (inode->i_state & I_PINNING_NETFS_WB)
747 ceph_fscache_unuse_cookie(inode, true);
748 clear_inode(inode);
749
750 ceph_fscache_unregister_inode_cookie(ci);
751 fscrypt_put_encryption_info(inode);
752
753 __ceph_remove_caps(ci);
754
755 if (__ceph_has_quota(ci, QUOTA_GET_ANY))
756 ceph_adjust_quota_realms_count(inode, false);
757
758 /*
759 * we may still have a snap_realm reference if there are stray
760 * caps in i_snap_caps.
761 */
762 if (ci->i_snap_realm) {
763 if (ceph_snap(inode) == CEPH_NOSNAP) {
764 doutc(cl, " dropping residual ref to snap realm %p\n",
765 ci->i_snap_realm);
766 ceph_change_snap_realm(inode, NULL);
767 } else {
768 ceph_put_snapid_map(mdsc, ci->i_snapid_map);
769 ci->i_snap_realm = NULL;
770 }
771 }
772
773 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
774 frag = rb_entry(n, struct ceph_inode_frag, node);
775 rb_erase(n, &ci->i_fragtree);
776 kfree(frag);
777 }
778 ci->i_fragtree_nsplits = 0;
779
780 __ceph_destroy_xattrs(ci);
781 if (ci->i_xattrs.blob)
782 ceph_buffer_put(ci->i_xattrs.blob);
783 if (ci->i_xattrs.prealloc_blob)
784 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
785
786 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
787 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
788 }
789
calc_inode_blocks(u64 size)790 static inline blkcnt_t calc_inode_blocks(u64 size)
791 {
792 return (size + (1<<9) - 1) >> 9;
793 }
794
795 /*
796 * Helpers to fill in size, ctime, mtime, and atime. We have to be
797 * careful because either the client or MDS may have more up to date
798 * info, depending on which capabilities are held, and whether
799 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
800 * and size are monotonically increasing, except when utimes() or
801 * truncate() increments the corresponding _seq values.)
802 */
ceph_fill_file_size(struct inode * inode,int issued,u32 truncate_seq,u64 truncate_size,u64 size)803 int ceph_fill_file_size(struct inode *inode, int issued,
804 u32 truncate_seq, u64 truncate_size, u64 size)
805 {
806 struct ceph_client *cl = ceph_inode_to_client(inode);
807 struct ceph_inode_info *ci = ceph_inode(inode);
808 int queue_trunc = 0;
809 loff_t isize = i_size_read(inode);
810
811 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
812 (truncate_seq == ci->i_truncate_seq && size > isize)) {
813 doutc(cl, "size %lld -> %llu\n", isize, size);
814 if (size > 0 && S_ISDIR(inode->i_mode)) {
815 pr_err_client(cl, "non-zero size for directory\n");
816 size = 0;
817 }
818 i_size_write(inode, size);
819 inode->i_blocks = calc_inode_blocks(size);
820 /*
821 * If we're expanding, then we should be able to just update
822 * the existing cookie.
823 */
824 if (size > isize)
825 ceph_fscache_update(inode);
826 ci->i_reported_size = size;
827 if (truncate_seq != ci->i_truncate_seq) {
828 doutc(cl, "truncate_seq %u -> %u\n",
829 ci->i_truncate_seq, truncate_seq);
830 ci->i_truncate_seq = truncate_seq;
831
832 /* the MDS should have revoked these caps */
833 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
834 CEPH_CAP_FILE_LAZYIO));
835 /*
836 * If we hold relevant caps, or in the case where we're
837 * not the only client referencing this file and we
838 * don't hold those caps, then we need to check whether
839 * the file is either opened or mmaped
840 */
841 if ((issued & (CEPH_CAP_FILE_CACHE|
842 CEPH_CAP_FILE_BUFFER)) ||
843 mapping_mapped(inode->i_mapping) ||
844 __ceph_is_file_opened(ci)) {
845 ci->i_truncate_pending++;
846 queue_trunc = 1;
847 }
848 }
849 }
850
851 /*
852 * It's possible that the new sizes of the two consecutive
853 * size truncations will be in the same fscrypt last block,
854 * and we need to truncate the corresponding page caches
855 * anyway.
856 */
857 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0) {
858 doutc(cl, "truncate_size %lld -> %llu, encrypted %d\n",
859 ci->i_truncate_size, truncate_size,
860 !!IS_ENCRYPTED(inode));
861
862 ci->i_truncate_size = truncate_size;
863
864 if (IS_ENCRYPTED(inode)) {
865 doutc(cl, "truncate_pagecache_size %lld -> %llu\n",
866 ci->i_truncate_pagecache_size, size);
867 ci->i_truncate_pagecache_size = size;
868 } else {
869 ci->i_truncate_pagecache_size = truncate_size;
870 }
871 }
872 return queue_trunc;
873 }
874
ceph_fill_file_time(struct inode * inode,int issued,u64 time_warp_seq,struct timespec64 * ctime,struct timespec64 * mtime,struct timespec64 * atime)875 void ceph_fill_file_time(struct inode *inode, int issued,
876 u64 time_warp_seq, struct timespec64 *ctime,
877 struct timespec64 *mtime, struct timespec64 *atime)
878 {
879 struct ceph_client *cl = ceph_inode_to_client(inode);
880 struct ceph_inode_info *ci = ceph_inode(inode);
881 struct timespec64 ictime = inode_get_ctime(inode);
882 int warn = 0;
883
884 if (issued & (CEPH_CAP_FILE_EXCL|
885 CEPH_CAP_FILE_WR|
886 CEPH_CAP_FILE_BUFFER|
887 CEPH_CAP_AUTH_EXCL|
888 CEPH_CAP_XATTR_EXCL)) {
889 if (ci->i_version == 0 ||
890 timespec64_compare(ctime, &ictime) > 0) {
891 doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
892 ictime.tv_sec, ictime.tv_nsec,
893 ctime->tv_sec, ctime->tv_nsec);
894 inode_set_ctime_to_ts(inode, *ctime);
895 }
896 if (ci->i_version == 0 ||
897 ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
898 /* the MDS did a utimes() */
899 doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n",
900 inode_get_mtime_sec(inode),
901 inode_get_mtime_nsec(inode),
902 mtime->tv_sec, mtime->tv_nsec,
903 ci->i_time_warp_seq, (int)time_warp_seq);
904
905 inode_set_mtime_to_ts(inode, *mtime);
906 inode_set_atime_to_ts(inode, *atime);
907 ci->i_time_warp_seq = time_warp_seq;
908 } else if (time_warp_seq == ci->i_time_warp_seq) {
909 struct timespec64 ts;
910
911 /* nobody did utimes(); take the max */
912 ts = inode_get_mtime(inode);
913 if (timespec64_compare(mtime, &ts) > 0) {
914 doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n",
915 ts.tv_sec, ts.tv_nsec,
916 mtime->tv_sec, mtime->tv_nsec);
917 inode_set_mtime_to_ts(inode, *mtime);
918 }
919 ts = inode_get_atime(inode);
920 if (timespec64_compare(atime, &ts) > 0) {
921 doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n",
922 ts.tv_sec, ts.tv_nsec,
923 atime->tv_sec, atime->tv_nsec);
924 inode_set_atime_to_ts(inode, *atime);
925 }
926 } else if (issued & CEPH_CAP_FILE_EXCL) {
927 /* we did a utimes(); ignore mds values */
928 } else {
929 warn = 1;
930 }
931 } else {
932 /* we have no write|excl caps; whatever the MDS says is true */
933 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
934 inode_set_ctime_to_ts(inode, *ctime);
935 inode_set_mtime_to_ts(inode, *mtime);
936 inode_set_atime_to_ts(inode, *atime);
937 ci->i_time_warp_seq = time_warp_seq;
938 } else {
939 warn = 1;
940 }
941 }
942 if (warn) /* time_warp_seq shouldn't go backwards */
943 doutc(cl, "%p mds time_warp_seq %llu < %u\n", inode,
944 time_warp_seq, ci->i_time_warp_seq);
945 }
946
947 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
decode_encrypted_symlink(struct ceph_mds_client * mdsc,const char * encsym,int enclen,u8 ** decsym)948 static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
949 const char *encsym,
950 int enclen, u8 **decsym)
951 {
952 struct ceph_client *cl = mdsc->fsc->client;
953 int declen;
954 u8 *sym;
955
956 sym = kmalloc(enclen + 1, GFP_NOFS);
957 if (!sym)
958 return -ENOMEM;
959
960 declen = ceph_base64_decode(encsym, enclen, sym);
961 if (declen < 0) {
962 pr_err_client(cl,
963 "can't decode symlink (%d). Content: %.*s\n",
964 declen, enclen, encsym);
965 kfree(sym);
966 return -EIO;
967 }
968 sym[declen + 1] = '\0';
969 *decsym = sym;
970 return declen;
971 }
972 #else
decode_encrypted_symlink(struct ceph_mds_client * mdsc,const char * encsym,int symlen,u8 ** decsym)973 static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
974 const char *encsym,
975 int symlen, u8 **decsym)
976 {
977 return -EOPNOTSUPP;
978 }
979 #endif
980
981 /*
982 * Populate an inode based on info from mds. May be called on new or
983 * existing inodes.
984 */
ceph_fill_inode(struct inode * inode,struct page * locked_page,struct ceph_mds_reply_info_in * iinfo,struct ceph_mds_reply_dirfrag * dirinfo,struct ceph_mds_session * session,int cap_fmode,struct ceph_cap_reservation * caps_reservation)985 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
986 struct ceph_mds_reply_info_in *iinfo,
987 struct ceph_mds_reply_dirfrag *dirinfo,
988 struct ceph_mds_session *session, int cap_fmode,
989 struct ceph_cap_reservation *caps_reservation)
990 {
991 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
992 struct ceph_client *cl = mdsc->fsc->client;
993 struct ceph_mds_reply_inode *info = iinfo->in;
994 struct ceph_inode_info *ci = ceph_inode(inode);
995 int issued, new_issued, info_caps;
996 struct timespec64 mtime, atime, ctime;
997 struct ceph_buffer *xattr_blob = NULL;
998 struct ceph_buffer *old_blob = NULL;
999 struct ceph_string *pool_ns = NULL;
1000 struct ceph_cap *new_cap = NULL;
1001 int err = 0;
1002 bool wake = false;
1003 bool queue_trunc = false;
1004 bool new_version = false;
1005 bool fill_inline = false;
1006 umode_t mode = le32_to_cpu(info->mode);
1007 dev_t rdev = le32_to_cpu(info->rdev);
1008
1009 lockdep_assert_held(&mdsc->snap_rwsem);
1010
1011 doutc(cl, "%p ino %llx.%llx v %llu had %llu\n", inode, ceph_vinop(inode),
1012 le64_to_cpu(info->version), ci->i_version);
1013
1014 /* Once I_NEW is cleared, we can't change type or dev numbers */
1015 if (inode->i_state & I_NEW) {
1016 inode->i_mode = mode;
1017 } else {
1018 if (inode_wrong_type(inode, mode)) {
1019 pr_warn_once_client(cl,
1020 "inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
1021 ceph_vinop(inode), inode->i_mode, mode);
1022 return -ESTALE;
1023 }
1024
1025 if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
1026 pr_warn_once_client(cl,
1027 "dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
1028 ceph_vinop(inode), MAJOR(inode->i_rdev),
1029 MINOR(inode->i_rdev), MAJOR(rdev),
1030 MINOR(rdev));
1031 return -ESTALE;
1032 }
1033 }
1034
1035 info_caps = le32_to_cpu(info->cap.caps);
1036
1037 /* prealloc new cap struct */
1038 if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
1039 new_cap = ceph_get_cap(mdsc, caps_reservation);
1040 if (!new_cap)
1041 return -ENOMEM;
1042 }
1043
1044 /*
1045 * prealloc xattr data, if it looks like we'll need it. only
1046 * if len > 4 (meaning there are actually xattrs; the first 4
1047 * bytes are the xattr count).
1048 */
1049 if (iinfo->xattr_len > 4) {
1050 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
1051 if (!xattr_blob)
1052 pr_err_client(cl, "ENOMEM xattr blob %d bytes\n",
1053 iinfo->xattr_len);
1054 }
1055
1056 if (iinfo->pool_ns_len > 0)
1057 pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
1058 iinfo->pool_ns_len);
1059
1060 if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
1061 ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
1062
1063 spin_lock(&ci->i_ceph_lock);
1064
1065 /*
1066 * provided version will be odd if inode value is projected,
1067 * even if stable. skip the update if we have newer stable
1068 * info (ours>=theirs, e.g. due to racing mds replies), unless
1069 * we are getting projected (unstable) info (in which case the
1070 * version is odd, and we want ours>theirs).
1071 * us them
1072 * 2 2 skip
1073 * 3 2 skip
1074 * 3 3 update
1075 */
1076 if (ci->i_version == 0 ||
1077 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1078 le64_to_cpu(info->version) > (ci->i_version & ~1)))
1079 new_version = true;
1080
1081 /* Update change_attribute */
1082 inode_set_max_iversion_raw(inode, iinfo->change_attr);
1083
1084 __ceph_caps_issued(ci, &issued);
1085 issued |= __ceph_caps_dirty(ci);
1086 new_issued = ~issued & info_caps;
1087
1088 __ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
1089
1090 #ifdef CONFIG_FS_ENCRYPTION
1091 if (iinfo->fscrypt_auth_len &&
1092 ((inode->i_state & I_NEW) || (ci->fscrypt_auth_len == 0))) {
1093 kfree(ci->fscrypt_auth);
1094 ci->fscrypt_auth_len = iinfo->fscrypt_auth_len;
1095 ci->fscrypt_auth = iinfo->fscrypt_auth;
1096 iinfo->fscrypt_auth = NULL;
1097 iinfo->fscrypt_auth_len = 0;
1098 inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
1099 }
1100 #endif
1101
1102 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
1103 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
1104 inode->i_mode = mode;
1105 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
1106 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
1107 doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode,
1108 ceph_vinop(inode), inode->i_mode,
1109 from_kuid(&init_user_ns, inode->i_uid),
1110 from_kgid(&init_user_ns, inode->i_gid));
1111 ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
1112 ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
1113 }
1114
1115 /* directories have fl_stripe_unit set to zero */
1116 if (IS_ENCRYPTED(inode))
1117 inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
1118 else if (le32_to_cpu(info->layout.fl_stripe_unit))
1119 inode->i_blkbits =
1120 fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
1121 else
1122 inode->i_blkbits = CEPH_BLOCK_SHIFT;
1123
1124 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
1125 (issued & CEPH_CAP_LINK_EXCL) == 0)
1126 set_nlink(inode, le32_to_cpu(info->nlink));
1127
1128 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
1129 /* be careful with mtime, atime, size */
1130 ceph_decode_timespec64(&atime, &info->atime);
1131 ceph_decode_timespec64(&mtime, &info->mtime);
1132 ceph_decode_timespec64(&ctime, &info->ctime);
1133 ceph_fill_file_time(inode, issued,
1134 le32_to_cpu(info->time_warp_seq),
1135 &ctime, &mtime, &atime);
1136 }
1137
1138 if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
1139 ci->i_files = le64_to_cpu(info->files);
1140 ci->i_subdirs = le64_to_cpu(info->subdirs);
1141 }
1142
1143 if (new_version ||
1144 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
1145 u64 size = le64_to_cpu(info->size);
1146 s64 old_pool = ci->i_layout.pool_id;
1147 struct ceph_string *old_ns;
1148
1149 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
1150 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
1151 lockdep_is_held(&ci->i_ceph_lock));
1152 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
1153
1154 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
1155 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
1156
1157 pool_ns = old_ns;
1158
1159 if (IS_ENCRYPTED(inode) && size &&
1160 iinfo->fscrypt_file_len == sizeof(__le64)) {
1161 u64 fsize = __le64_to_cpu(*(__le64 *)iinfo->fscrypt_file);
1162
1163 if (size == round_up(fsize, CEPH_FSCRYPT_BLOCK_SIZE)) {
1164 size = fsize;
1165 } else {
1166 pr_warn_client(cl,
1167 "fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
1168 info->size, size);
1169 }
1170 }
1171
1172 queue_trunc = ceph_fill_file_size(inode, issued,
1173 le32_to_cpu(info->truncate_seq),
1174 le64_to_cpu(info->truncate_size),
1175 size);
1176 /* only update max_size on auth cap */
1177 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1178 ci->i_max_size != le64_to_cpu(info->max_size)) {
1179 doutc(cl, "max_size %lld -> %llu\n",
1180 ci->i_max_size, le64_to_cpu(info->max_size));
1181 ci->i_max_size = le64_to_cpu(info->max_size);
1182 }
1183 }
1184
1185 /* layout and rstat are not tracked by capability, update them if
1186 * the inode info is from auth mds */
1187 if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
1188 if (S_ISDIR(inode->i_mode)) {
1189 ci->i_dir_layout = iinfo->dir_layout;
1190 ci->i_rbytes = le64_to_cpu(info->rbytes);
1191 ci->i_rfiles = le64_to_cpu(info->rfiles);
1192 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
1193 ci->i_dir_pin = iinfo->dir_pin;
1194 ci->i_rsnaps = iinfo->rsnaps;
1195 ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
1196 }
1197 }
1198
1199 /* xattrs */
1200 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
1201 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
1202 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
1203 if (ci->i_xattrs.blob)
1204 old_blob = ci->i_xattrs.blob;
1205 ci->i_xattrs.blob = xattr_blob;
1206 if (xattr_blob)
1207 memcpy(ci->i_xattrs.blob->vec.iov_base,
1208 iinfo->xattr_data, iinfo->xattr_len);
1209 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
1210 ceph_forget_all_cached_acls(inode);
1211 ceph_security_invalidate_secctx(inode);
1212 xattr_blob = NULL;
1213 }
1214
1215 /* finally update i_version */
1216 if (le64_to_cpu(info->version) > ci->i_version)
1217 ci->i_version = le64_to_cpu(info->version);
1218
1219 inode->i_mapping->a_ops = &ceph_aops;
1220
1221 switch (inode->i_mode & S_IFMT) {
1222 case S_IFIFO:
1223 case S_IFBLK:
1224 case S_IFCHR:
1225 case S_IFSOCK:
1226 inode->i_blkbits = PAGE_SHIFT;
1227 init_special_inode(inode, inode->i_mode, rdev);
1228 inode->i_op = &ceph_file_iops;
1229 break;
1230 case S_IFREG:
1231 inode->i_op = &ceph_file_iops;
1232 inode->i_fop = &ceph_file_fops;
1233 break;
1234 case S_IFLNK:
1235 if (!ci->i_symlink) {
1236 u32 symlen = iinfo->symlink_len;
1237 char *sym;
1238
1239 spin_unlock(&ci->i_ceph_lock);
1240
1241 if (IS_ENCRYPTED(inode)) {
1242 if (symlen != i_size_read(inode))
1243 pr_err_client(cl,
1244 "%p %llx.%llx BAD symlink size %lld\n",
1245 inode, ceph_vinop(inode),
1246 i_size_read(inode));
1247
1248 err = decode_encrypted_symlink(mdsc, iinfo->symlink,
1249 symlen, (u8 **)&sym);
1250 if (err < 0) {
1251 pr_err_client(cl,
1252 "decoding encrypted symlink failed: %d\n",
1253 err);
1254 goto out;
1255 }
1256 symlen = err;
1257 i_size_write(inode, symlen);
1258 inode->i_blocks = calc_inode_blocks(symlen);
1259 } else {
1260 if (symlen != i_size_read(inode)) {
1261 pr_err_client(cl,
1262 "%p %llx.%llx BAD symlink size %lld\n",
1263 inode, ceph_vinop(inode),
1264 i_size_read(inode));
1265 i_size_write(inode, symlen);
1266 inode->i_blocks = calc_inode_blocks(symlen);
1267 }
1268
1269 err = -ENOMEM;
1270 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
1271 if (!sym)
1272 goto out;
1273 }
1274
1275 spin_lock(&ci->i_ceph_lock);
1276 if (!ci->i_symlink)
1277 ci->i_symlink = sym;
1278 else
1279 kfree(sym); /* lost a race */
1280 }
1281
1282 if (IS_ENCRYPTED(inode)) {
1283 /*
1284 * Encrypted symlinks need to be decrypted before we can
1285 * cache their targets in i_link. Don't touch it here.
1286 */
1287 inode->i_op = &ceph_encrypted_symlink_iops;
1288 } else {
1289 inode->i_link = ci->i_symlink;
1290 inode->i_op = &ceph_symlink_iops;
1291 }
1292 break;
1293 case S_IFDIR:
1294 inode->i_op = &ceph_dir_iops;
1295 inode->i_fop = &ceph_dir_fops;
1296 break;
1297 default:
1298 pr_err_client(cl, "%p %llx.%llx BAD mode 0%o\n", inode,
1299 ceph_vinop(inode), inode->i_mode);
1300 }
1301
1302 /* were we issued a capability? */
1303 if (info_caps) {
1304 if (ceph_snap(inode) == CEPH_NOSNAP) {
1305 ceph_add_cap(inode, session,
1306 le64_to_cpu(info->cap.cap_id),
1307 info_caps,
1308 le32_to_cpu(info->cap.wanted),
1309 le32_to_cpu(info->cap.seq),
1310 le32_to_cpu(info->cap.mseq),
1311 le64_to_cpu(info->cap.realm),
1312 info->cap.flags, &new_cap);
1313
1314 /* set dir completion flag? */
1315 if (S_ISDIR(inode->i_mode) &&
1316 ci->i_files == 0 && ci->i_subdirs == 0 &&
1317 (info_caps & CEPH_CAP_FILE_SHARED) &&
1318 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1319 !__ceph_dir_is_complete(ci)) {
1320 doutc(cl, " marking %p complete (empty)\n",
1321 inode);
1322 i_size_write(inode, 0);
1323 __ceph_dir_set_complete(ci,
1324 atomic64_read(&ci->i_release_count),
1325 atomic64_read(&ci->i_ordered_count));
1326 }
1327
1328 wake = true;
1329 } else {
1330 doutc(cl, " %p got snap_caps %s\n", inode,
1331 ceph_cap_string(info_caps));
1332 ci->i_snap_caps |= info_caps;
1333 }
1334 }
1335
1336 if (iinfo->inline_version > 0 &&
1337 iinfo->inline_version >= ci->i_inline_version) {
1338 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1339 ci->i_inline_version = iinfo->inline_version;
1340 if (ceph_has_inline_data(ci) &&
1341 (locked_page || (info_caps & cache_caps)))
1342 fill_inline = true;
1343 }
1344
1345 if (cap_fmode >= 0) {
1346 if (!info_caps)
1347 pr_warn_client(cl, "mds issued no caps on %llx.%llx\n",
1348 ceph_vinop(inode));
1349 __ceph_touch_fmode(ci, mdsc, cap_fmode);
1350 }
1351
1352 spin_unlock(&ci->i_ceph_lock);
1353
1354 ceph_fscache_register_inode_cookie(inode);
1355
1356 if (fill_inline)
1357 ceph_fill_inline_data(inode, locked_page,
1358 iinfo->inline_data, iinfo->inline_len);
1359
1360 if (wake)
1361 wake_up_all(&ci->i_cap_wq);
1362
1363 /* queue truncate if we saw i_size decrease */
1364 if (queue_trunc)
1365 ceph_queue_vmtruncate(inode);
1366
1367 /* populate frag tree */
1368 if (S_ISDIR(inode->i_mode))
1369 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1370
1371 /* update delegation info? */
1372 if (dirinfo)
1373 ceph_fill_dirfrag(inode, dirinfo);
1374
1375 err = 0;
1376 out:
1377 if (new_cap)
1378 ceph_put_cap(mdsc, new_cap);
1379 ceph_buffer_put(old_blob);
1380 ceph_buffer_put(xattr_blob);
1381 ceph_put_string(pool_ns);
1382 return err;
1383 }
1384
1385 /*
1386 * caller should hold session s_mutex and dentry->d_lock.
1387 */
__update_dentry_lease(struct inode * dir,struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time,struct ceph_mds_session ** old_lease_session)1388 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1389 struct ceph_mds_reply_lease *lease,
1390 struct ceph_mds_session *session,
1391 unsigned long from_time,
1392 struct ceph_mds_session **old_lease_session)
1393 {
1394 struct ceph_client *cl = ceph_inode_to_client(dir);
1395 struct ceph_dentry_info *di = ceph_dentry(dentry);
1396 unsigned mask = le16_to_cpu(lease->mask);
1397 long unsigned duration = le32_to_cpu(lease->duration_ms);
1398 long unsigned ttl = from_time + (duration * HZ) / 1000;
1399 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1400
1401 doutc(cl, "%p duration %lu ms ttl %lu\n", dentry, duration, ttl);
1402
1403 /* only track leases on regular dentries */
1404 if (ceph_snap(dir) != CEPH_NOSNAP)
1405 return;
1406
1407 if (mask & CEPH_LEASE_PRIMARY_LINK)
1408 di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1409 else
1410 di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1411
1412 di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1413 if (!(mask & CEPH_LEASE_VALID)) {
1414 __ceph_dentry_dir_lease_touch(di);
1415 return;
1416 }
1417
1418 if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
1419 time_before(ttl, di->time))
1420 return; /* we already have a newer lease. */
1421
1422 if (di->lease_session && di->lease_session != session) {
1423 *old_lease_session = di->lease_session;
1424 di->lease_session = NULL;
1425 }
1426
1427 if (!di->lease_session)
1428 di->lease_session = ceph_get_mds_session(session);
1429 di->lease_gen = atomic_read(&session->s_cap_gen);
1430 di->lease_seq = le32_to_cpu(lease->seq);
1431 di->lease_renew_after = half_ttl;
1432 di->lease_renew_from = 0;
1433 di->time = ttl;
1434
1435 __ceph_dentry_lease_touch(di);
1436 }
1437
update_dentry_lease(struct inode * dir,struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time)1438 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1439 struct ceph_mds_reply_lease *lease,
1440 struct ceph_mds_session *session,
1441 unsigned long from_time)
1442 {
1443 struct ceph_mds_session *old_lease_session = NULL;
1444 spin_lock(&dentry->d_lock);
1445 __update_dentry_lease(dir, dentry, lease, session, from_time,
1446 &old_lease_session);
1447 spin_unlock(&dentry->d_lock);
1448 ceph_put_mds_session(old_lease_session);
1449 }
1450
1451 /*
1452 * update dentry lease without having parent inode locked
1453 */
update_dentry_lease_careful(struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time,char * dname,u32 dname_len,struct ceph_vino * pdvino,struct ceph_vino * ptvino)1454 static void update_dentry_lease_careful(struct dentry *dentry,
1455 struct ceph_mds_reply_lease *lease,
1456 struct ceph_mds_session *session,
1457 unsigned long from_time,
1458 char *dname, u32 dname_len,
1459 struct ceph_vino *pdvino,
1460 struct ceph_vino *ptvino)
1461
1462 {
1463 struct inode *dir;
1464 struct ceph_mds_session *old_lease_session = NULL;
1465
1466 spin_lock(&dentry->d_lock);
1467 /* make sure dentry's name matches target */
1468 if (dentry->d_name.len != dname_len ||
1469 memcmp(dentry->d_name.name, dname, dname_len))
1470 goto out_unlock;
1471
1472 dir = d_inode(dentry->d_parent);
1473 /* make sure parent matches dvino */
1474 if (!ceph_ino_compare(dir, pdvino))
1475 goto out_unlock;
1476
1477 /* make sure dentry's inode matches target. NULL ptvino means that
1478 * we expect a negative dentry */
1479 if (ptvino) {
1480 if (d_really_is_negative(dentry))
1481 goto out_unlock;
1482 if (!ceph_ino_compare(d_inode(dentry), ptvino))
1483 goto out_unlock;
1484 } else {
1485 if (d_really_is_positive(dentry))
1486 goto out_unlock;
1487 }
1488
1489 __update_dentry_lease(dir, dentry, lease, session,
1490 from_time, &old_lease_session);
1491 out_unlock:
1492 spin_unlock(&dentry->d_lock);
1493 ceph_put_mds_session(old_lease_session);
1494 }
1495
1496 /*
1497 * splice a dentry to an inode.
1498 * caller must hold directory i_rwsem for this to be safe.
1499 */
splice_dentry(struct dentry ** pdn,struct inode * in)1500 static int splice_dentry(struct dentry **pdn, struct inode *in)
1501 {
1502 struct ceph_client *cl = ceph_inode_to_client(in);
1503 struct dentry *dn = *pdn;
1504 struct dentry *realdn;
1505
1506 BUG_ON(d_inode(dn));
1507
1508 if (S_ISDIR(in->i_mode)) {
1509 /* If inode is directory, d_splice_alias() below will remove
1510 * 'realdn' from its origin parent. We need to ensure that
1511 * origin parent's readdir cache will not reference 'realdn'
1512 */
1513 realdn = d_find_any_alias(in);
1514 if (realdn) {
1515 struct ceph_dentry_info *di = ceph_dentry(realdn);
1516 spin_lock(&realdn->d_lock);
1517
1518 realdn->d_op->d_prune(realdn);
1519
1520 di->time = jiffies;
1521 di->lease_shared_gen = 0;
1522 di->offset = 0;
1523
1524 spin_unlock(&realdn->d_lock);
1525 dput(realdn);
1526 }
1527 }
1528
1529 /* dn must be unhashed */
1530 if (!d_unhashed(dn))
1531 d_drop(dn);
1532 realdn = d_splice_alias(in, dn);
1533 if (IS_ERR(realdn)) {
1534 pr_err_client(cl, "error %ld %p inode %p ino %llx.%llx\n",
1535 PTR_ERR(realdn), dn, in, ceph_vinop(in));
1536 return PTR_ERR(realdn);
1537 }
1538
1539 if (realdn) {
1540 doutc(cl, "dn %p (%d) spliced with %p (%d) inode %p ino %llx.%llx\n",
1541 dn, d_count(dn), realdn, d_count(realdn),
1542 d_inode(realdn), ceph_vinop(d_inode(realdn)));
1543 dput(dn);
1544 *pdn = realdn;
1545 } else {
1546 BUG_ON(!ceph_dentry(dn));
1547 doutc(cl, "dn %p attached to %p ino %llx.%llx\n", dn,
1548 d_inode(dn), ceph_vinop(d_inode(dn)));
1549 }
1550 return 0;
1551 }
1552
1553 /*
1554 * Incorporate results into the local cache. This is either just
1555 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1556 * after a lookup).
1557 *
1558 * A reply may contain
1559 * a directory inode along with a dentry.
1560 * and/or a target inode
1561 *
1562 * Called with snap_rwsem (read).
1563 */
ceph_fill_trace(struct super_block * sb,struct ceph_mds_request * req)1564 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1565 {
1566 struct ceph_mds_session *session = req->r_session;
1567 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1568 struct inode *in = NULL;
1569 struct ceph_vino tvino, dvino;
1570 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
1571 struct ceph_client *cl = fsc->client;
1572 struct inode *parent_dir = NULL;
1573 int err = 0;
1574
1575 doutc(cl, "%p is_dentry %d is_target %d\n", req,
1576 rinfo->head->is_dentry, rinfo->head->is_target);
1577
1578 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1579 doutc(cl, "reply is empty!\n");
1580 if (rinfo->head->result == 0 && req->r_parent)
1581 ceph_invalidate_dir_request(req);
1582 return 0;
1583 }
1584
1585 if (rinfo->head->is_dentry) {
1586 /*
1587 * r_parent may be stale, in cases when R_PARENT_LOCKED is not set,
1588 * so we need to get the correct inode
1589 */
1590 parent_dir = ceph_get_reply_dir(sb, req->r_parent, rinfo);
1591 if (unlikely(IS_ERR(parent_dir))) {
1592 err = PTR_ERR(parent_dir);
1593 goto done;
1594 }
1595 if (parent_dir) {
1596 err = ceph_fill_inode(parent_dir, NULL, &rinfo->diri,
1597 rinfo->dirfrag, session, -1,
1598 &req->r_caps_reservation);
1599 if (err < 0)
1600 goto done;
1601 } else {
1602 WARN_ON_ONCE(1);
1603 }
1604
1605 if (parent_dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1606 test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1607 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1608 bool is_nokey = false;
1609 struct qstr dname;
1610 struct dentry *dn, *parent;
1611 struct fscrypt_str oname = FSTR_INIT(NULL, 0);
1612 struct ceph_fname fname = { .dir = parent_dir,
1613 .name = rinfo->dname,
1614 .ctext = rinfo->altname,
1615 .name_len = rinfo->dname_len,
1616 .ctext_len = rinfo->altname_len };
1617
1618 BUG_ON(!rinfo->head->is_target);
1619 BUG_ON(req->r_dentry);
1620
1621 parent = d_find_any_alias(parent_dir);
1622 BUG_ON(!parent);
1623
1624 err = ceph_fname_alloc_buffer(parent_dir, &oname);
1625 if (err < 0) {
1626 dput(parent);
1627 goto done;
1628 }
1629
1630 err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey);
1631 if (err < 0) {
1632 dput(parent);
1633 ceph_fname_free_buffer(parent_dir, &oname);
1634 goto done;
1635 }
1636 dname.name = oname.name;
1637 dname.len = oname.len;
1638 dname.hash = full_name_hash(parent, dname.name, dname.len);
1639 tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1640 tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1641 retry_lookup:
1642 dn = d_lookup(parent, &dname);
1643 doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
1644 parent, dname.len, dname.name, dn);
1645
1646 if (!dn) {
1647 dn = d_alloc(parent, &dname);
1648 doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
1649 dname.len, dname.name, dn);
1650 if (!dn) {
1651 dput(parent);
1652 ceph_fname_free_buffer(parent_dir, &oname);
1653 err = -ENOMEM;
1654 goto done;
1655 }
1656 if (is_nokey) {
1657 spin_lock(&dn->d_lock);
1658 dn->d_flags |= DCACHE_NOKEY_NAME;
1659 spin_unlock(&dn->d_lock);
1660 }
1661 err = 0;
1662 } else if (d_really_is_positive(dn) &&
1663 (ceph_ino(d_inode(dn)) != tvino.ino ||
1664 ceph_snap(d_inode(dn)) != tvino.snap)) {
1665 doutc(cl, " dn %p points to wrong inode %p\n",
1666 dn, d_inode(dn));
1667 ceph_dir_clear_ordered(parent_dir);
1668 d_delete(dn);
1669 dput(dn);
1670 goto retry_lookup;
1671 }
1672 ceph_fname_free_buffer(parent_dir, &oname);
1673
1674 req->r_dentry = dn;
1675 dput(parent);
1676 }
1677 }
1678
1679 if (rinfo->head->is_target) {
1680 /* Should be filled in by handle_reply */
1681 BUG_ON(!req->r_target_inode);
1682
1683 in = req->r_target_inode;
1684 err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1685 NULL, session,
1686 (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1687 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1688 rinfo->head->result == 0) ? req->r_fmode : -1,
1689 &req->r_caps_reservation);
1690 if (err < 0) {
1691 pr_err_client(cl, "badness %p %llx.%llx\n", in,
1692 ceph_vinop(in));
1693 req->r_target_inode = NULL;
1694 if (in->i_state & I_NEW)
1695 discard_new_inode(in);
1696 else
1697 iput(in);
1698 goto done;
1699 }
1700 if (in->i_state & I_NEW)
1701 unlock_new_inode(in);
1702 }
1703
1704 /*
1705 * ignore null lease/binding on snapdir ENOENT, or else we
1706 * will have trouble splicing in the virtual snapdir later
1707 */
1708 if (rinfo->head->is_dentry &&
1709 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1710 test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1711 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1712 fsc->mount_options->snapdir_name,
1713 req->r_dentry->d_name.len))) {
1714 /*
1715 * lookup link rename : null -> possibly existing inode
1716 * mknod symlink mkdir : null -> new inode
1717 * unlink : linked -> null
1718 */
1719 struct inode *dir = req->r_parent;
1720 struct dentry *dn = req->r_dentry;
1721 bool have_dir_cap, have_lease;
1722
1723 BUG_ON(!dn);
1724 BUG_ON(!dir);
1725 BUG_ON(d_inode(dn->d_parent) != dir);
1726
1727 dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1728 dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1729
1730 BUG_ON(ceph_ino(dir) != dvino.ino);
1731 BUG_ON(ceph_snap(dir) != dvino.snap);
1732
1733 /* do we have a lease on the whole dir? */
1734 have_dir_cap =
1735 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1736 CEPH_CAP_FILE_SHARED);
1737
1738 /* do we have a dn lease? */
1739 have_lease = have_dir_cap ||
1740 le32_to_cpu(rinfo->dlease->duration_ms);
1741 if (!have_lease)
1742 doutc(cl, "no dentry lease or dir cap\n");
1743
1744 /* rename? */
1745 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1746 struct inode *olddir = req->r_old_dentry_dir;
1747 BUG_ON(!olddir);
1748
1749 doutc(cl, " src %p '%pd' dst %p '%pd'\n",
1750 req->r_old_dentry, req->r_old_dentry, dn, dn);
1751 doutc(cl, "doing d_move %p -> %p\n", req->r_old_dentry, dn);
1752
1753 /* d_move screws up sibling dentries' offsets */
1754 ceph_dir_clear_ordered(dir);
1755 ceph_dir_clear_ordered(olddir);
1756
1757 d_move(req->r_old_dentry, dn);
1758 doutc(cl, " src %p '%pd' dst %p '%pd'\n",
1759 req->r_old_dentry, req->r_old_dentry, dn, dn);
1760
1761 /* ensure target dentry is invalidated, despite
1762 rehashing bug in vfs_rename_dir */
1763 ceph_invalidate_dentry_lease(dn);
1764
1765 doutc(cl, "dn %p gets new offset %lld\n",
1766 req->r_old_dentry,
1767 ceph_dentry(req->r_old_dentry)->offset);
1768
1769 /* swap r_dentry and r_old_dentry in case that
1770 * splice_dentry() gets called later. This is safe
1771 * because no other place will use them */
1772 req->r_dentry = req->r_old_dentry;
1773 req->r_old_dentry = dn;
1774 dn = req->r_dentry;
1775 }
1776
1777 /* null dentry? */
1778 if (!rinfo->head->is_target) {
1779 doutc(cl, "null dentry\n");
1780 if (d_really_is_positive(dn)) {
1781 doutc(cl, "d_delete %p\n", dn);
1782 ceph_dir_clear_ordered(dir);
1783 d_delete(dn);
1784 } else if (have_lease) {
1785 if (d_unhashed(dn))
1786 d_add(dn, NULL);
1787 }
1788
1789 if (!d_unhashed(dn) && have_lease)
1790 update_dentry_lease(dir, dn,
1791 rinfo->dlease, session,
1792 req->r_request_started);
1793 goto done;
1794 }
1795
1796 /* attach proper inode */
1797 if (d_really_is_negative(dn)) {
1798 ceph_dir_clear_ordered(dir);
1799 ihold(in);
1800 err = splice_dentry(&req->r_dentry, in);
1801 if (err < 0)
1802 goto done;
1803 dn = req->r_dentry; /* may have spliced */
1804 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1805 doutc(cl, " %p links to %p %llx.%llx, not %llx.%llx\n",
1806 dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1807 ceph_vinop(in));
1808 d_invalidate(dn);
1809 have_lease = false;
1810 }
1811
1812 if (have_lease) {
1813 update_dentry_lease(dir, dn,
1814 rinfo->dlease, session,
1815 req->r_request_started);
1816 }
1817 doutc(cl, " final dn %p\n", dn);
1818 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1819 req->r_op == CEPH_MDS_OP_MKSNAP) &&
1820 test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1821 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1822 struct inode *dir = req->r_parent;
1823
1824 /* fill out a snapdir LOOKUPSNAP dentry */
1825 BUG_ON(!dir);
1826 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1827 BUG_ON(!req->r_dentry);
1828 doutc(cl, " linking snapped dir %p to dn %p\n", in,
1829 req->r_dentry);
1830 ceph_dir_clear_ordered(dir);
1831 ihold(in);
1832 err = splice_dentry(&req->r_dentry, in);
1833 if (err < 0)
1834 goto done;
1835 } else if (rinfo->head->is_dentry && req->r_dentry) {
1836 /* parent inode is not locked, be careful */
1837 struct ceph_vino *ptvino = NULL;
1838 dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1839 dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1840 if (rinfo->head->is_target) {
1841 tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1842 tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1843 ptvino = &tvino;
1844 }
1845 update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1846 session, req->r_request_started,
1847 rinfo->dname, rinfo->dname_len,
1848 &dvino, ptvino);
1849 }
1850 done:
1851 /* Drop extra ref from ceph_get_reply_dir() if it returned a new inode */
1852 if (unlikely(!IS_ERR_OR_NULL(parent_dir) && parent_dir != req->r_parent))
1853 iput(parent_dir);
1854 doutc(cl, "done err=%d\n", err);
1855 return err;
1856 }
1857
1858 /*
1859 * Prepopulate our cache with readdir results, leases, etc.
1860 */
readdir_prepopulate_inodes_only(struct ceph_mds_request * req,struct ceph_mds_session * session)1861 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1862 struct ceph_mds_session *session)
1863 {
1864 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1865 struct ceph_client *cl = session->s_mdsc->fsc->client;
1866 int i, err = 0;
1867
1868 for (i = 0; i < rinfo->dir_nr; i++) {
1869 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1870 struct ceph_vino vino;
1871 struct inode *in;
1872 int rc;
1873
1874 vino.ino = le64_to_cpu(rde->inode.in->ino);
1875 vino.snap = le64_to_cpu(rde->inode.in->snapid);
1876
1877 in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
1878 if (IS_ERR(in)) {
1879 err = PTR_ERR(in);
1880 doutc(cl, "badness got %d\n", err);
1881 continue;
1882 }
1883 rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1884 -1, &req->r_caps_reservation);
1885 if (rc < 0) {
1886 pr_err_client(cl, "inode badness on %p got %d\n", in,
1887 rc);
1888 err = rc;
1889 if (in->i_state & I_NEW) {
1890 ihold(in);
1891 discard_new_inode(in);
1892 }
1893 } else if (in->i_state & I_NEW) {
1894 unlock_new_inode(in);
1895 }
1896
1897 iput(in);
1898 }
1899
1900 return err;
1901 }
1902
ceph_readdir_cache_release(struct ceph_readdir_cache_control * ctl)1903 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1904 {
1905 if (ctl->page) {
1906 kunmap(ctl->page);
1907 put_page(ctl->page);
1908 ctl->page = NULL;
1909 }
1910 }
1911
fill_readdir_cache(struct inode * dir,struct dentry * dn,struct ceph_readdir_cache_control * ctl,struct ceph_mds_request * req)1912 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1913 struct ceph_readdir_cache_control *ctl,
1914 struct ceph_mds_request *req)
1915 {
1916 struct ceph_client *cl = ceph_inode_to_client(dir);
1917 struct ceph_inode_info *ci = ceph_inode(dir);
1918 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1919 unsigned idx = ctl->index % nsize;
1920 pgoff_t pgoff = ctl->index / nsize;
1921
1922 if (!ctl->page || pgoff != ctl->page->index) {
1923 ceph_readdir_cache_release(ctl);
1924 if (idx == 0)
1925 ctl->page = grab_cache_page(&dir->i_data, pgoff);
1926 else
1927 ctl->page = find_lock_page(&dir->i_data, pgoff);
1928 if (!ctl->page) {
1929 ctl->index = -1;
1930 return idx == 0 ? -ENOMEM : 0;
1931 }
1932 /* reading/filling the cache are serialized by
1933 * i_rwsem, no need to use page lock */
1934 unlock_page(ctl->page);
1935 ctl->dentries = kmap(ctl->page);
1936 if (idx == 0)
1937 memset(ctl->dentries, 0, PAGE_SIZE);
1938 }
1939
1940 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1941 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1942 doutc(cl, "dn %p idx %d\n", dn, ctl->index);
1943 ctl->dentries[idx] = dn;
1944 ctl->index++;
1945 } else {
1946 doutc(cl, "disable readdir cache\n");
1947 ctl->index = -1;
1948 }
1949 return 0;
1950 }
1951
ceph_readdir_prepopulate(struct ceph_mds_request * req,struct ceph_mds_session * session)1952 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1953 struct ceph_mds_session *session)
1954 {
1955 struct dentry *parent = req->r_dentry;
1956 struct inode *inode = d_inode(parent);
1957 struct ceph_inode_info *ci = ceph_inode(inode);
1958 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1959 struct ceph_client *cl = session->s_mdsc->fsc->client;
1960 struct qstr dname;
1961 struct dentry *dn;
1962 struct inode *in;
1963 int err = 0, skipped = 0, ret, i;
1964 u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1965 u32 last_hash = 0;
1966 u32 fpos_offset;
1967 struct ceph_readdir_cache_control cache_ctl = {};
1968
1969 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1970 return readdir_prepopulate_inodes_only(req, session);
1971
1972 if (rinfo->hash_order) {
1973 if (req->r_path2) {
1974 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1975 req->r_path2,
1976 strlen(req->r_path2));
1977 last_hash = ceph_frag_value(last_hash);
1978 } else if (rinfo->offset_hash) {
1979 /* mds understands offset_hash */
1980 WARN_ON_ONCE(req->r_readdir_offset != 2);
1981 last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1982 }
1983 }
1984
1985 if (rinfo->dir_dir &&
1986 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1987 doutc(cl, "got new frag %x -> %x\n", frag,
1988 le32_to_cpu(rinfo->dir_dir->frag));
1989 frag = le32_to_cpu(rinfo->dir_dir->frag);
1990 if (!rinfo->hash_order)
1991 req->r_readdir_offset = 2;
1992 }
1993
1994 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1995 doutc(cl, "%d items under SNAPDIR dn %p\n",
1996 rinfo->dir_nr, parent);
1997 } else {
1998 doutc(cl, "%d items under dn %p\n", rinfo->dir_nr, parent);
1999 if (rinfo->dir_dir)
2000 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
2001
2002 if (ceph_frag_is_leftmost(frag) &&
2003 req->r_readdir_offset == 2 &&
2004 !(rinfo->hash_order && last_hash)) {
2005 /* note dir version at start of readdir so we can
2006 * tell if any dentries get dropped */
2007 req->r_dir_release_cnt =
2008 atomic64_read(&ci->i_release_count);
2009 req->r_dir_ordered_cnt =
2010 atomic64_read(&ci->i_ordered_count);
2011 req->r_readdir_cache_idx = 0;
2012 }
2013 }
2014
2015 cache_ctl.index = req->r_readdir_cache_idx;
2016 fpos_offset = req->r_readdir_offset;
2017
2018 /* FIXME: release caps/leases if error occurs */
2019 for (i = 0; i < rinfo->dir_nr; i++) {
2020 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
2021 struct ceph_vino tvino;
2022
2023 dname.name = rde->name;
2024 dname.len = rde->name_len;
2025 dname.hash = full_name_hash(parent, dname.name, dname.len);
2026
2027 tvino.ino = le64_to_cpu(rde->inode.in->ino);
2028 tvino.snap = le64_to_cpu(rde->inode.in->snapid);
2029
2030 if (rinfo->hash_order) {
2031 u32 hash = ceph_frag_value(rde->raw_hash);
2032 if (hash != last_hash)
2033 fpos_offset = 2;
2034 last_hash = hash;
2035 rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
2036 } else {
2037 rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
2038 }
2039
2040 retry_lookup:
2041 dn = d_lookup(parent, &dname);
2042 doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
2043 parent, dname.len, dname.name, dn);
2044
2045 if (!dn) {
2046 dn = d_alloc(parent, &dname);
2047 doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
2048 dname.len, dname.name, dn);
2049 if (!dn) {
2050 doutc(cl, "d_alloc badness\n");
2051 err = -ENOMEM;
2052 goto out;
2053 }
2054 if (rde->is_nokey) {
2055 spin_lock(&dn->d_lock);
2056 dn->d_flags |= DCACHE_NOKEY_NAME;
2057 spin_unlock(&dn->d_lock);
2058 }
2059 } else if (d_really_is_positive(dn) &&
2060 (ceph_ino(d_inode(dn)) != tvino.ino ||
2061 ceph_snap(d_inode(dn)) != tvino.snap)) {
2062 struct ceph_dentry_info *di = ceph_dentry(dn);
2063 doutc(cl, " dn %p points to wrong inode %p\n",
2064 dn, d_inode(dn));
2065
2066 spin_lock(&dn->d_lock);
2067 if (di->offset > 0 &&
2068 di->lease_shared_gen ==
2069 atomic_read(&ci->i_shared_gen)) {
2070 __ceph_dir_clear_ordered(ci);
2071 di->offset = 0;
2072 }
2073 spin_unlock(&dn->d_lock);
2074
2075 d_delete(dn);
2076 dput(dn);
2077 goto retry_lookup;
2078 }
2079
2080 /* inode */
2081 if (d_really_is_positive(dn)) {
2082 in = d_inode(dn);
2083 } else {
2084 in = ceph_get_inode(parent->d_sb, tvino, NULL);
2085 if (IS_ERR(in)) {
2086 doutc(cl, "new_inode badness\n");
2087 d_drop(dn);
2088 dput(dn);
2089 err = PTR_ERR(in);
2090 goto out;
2091 }
2092 }
2093
2094 ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
2095 -1, &req->r_caps_reservation);
2096 if (ret < 0) {
2097 pr_err_client(cl, "badness on %p %llx.%llx\n", in,
2098 ceph_vinop(in));
2099 if (d_really_is_negative(dn)) {
2100 if (in->i_state & I_NEW) {
2101 ihold(in);
2102 discard_new_inode(in);
2103 }
2104 iput(in);
2105 }
2106 d_drop(dn);
2107 err = ret;
2108 goto next_item;
2109 }
2110 if (in->i_state & I_NEW)
2111 unlock_new_inode(in);
2112
2113 if (d_really_is_negative(dn)) {
2114 if (ceph_security_xattr_deadlock(in)) {
2115 doutc(cl, " skip splicing dn %p to inode %p"
2116 " (security xattr deadlock)\n", dn, in);
2117 iput(in);
2118 skipped++;
2119 goto next_item;
2120 }
2121
2122 err = splice_dentry(&dn, in);
2123 if (err < 0)
2124 goto next_item;
2125 }
2126
2127 ceph_dentry(dn)->offset = rde->offset;
2128
2129 update_dentry_lease(d_inode(parent), dn,
2130 rde->lease, req->r_session,
2131 req->r_request_started);
2132
2133 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
2134 ret = fill_readdir_cache(d_inode(parent), dn,
2135 &cache_ctl, req);
2136 if (ret < 0)
2137 err = ret;
2138 }
2139 next_item:
2140 dput(dn);
2141 }
2142 out:
2143 if (err == 0 && skipped == 0) {
2144 set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
2145 req->r_readdir_cache_idx = cache_ctl.index;
2146 }
2147 ceph_readdir_cache_release(&cache_ctl);
2148 doutc(cl, "done\n");
2149 return err;
2150 }
2151
ceph_inode_set_size(struct inode * inode,loff_t size)2152 bool ceph_inode_set_size(struct inode *inode, loff_t size)
2153 {
2154 struct ceph_client *cl = ceph_inode_to_client(inode);
2155 struct ceph_inode_info *ci = ceph_inode(inode);
2156 bool ret;
2157
2158 spin_lock(&ci->i_ceph_lock);
2159 doutc(cl, "set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
2160 i_size_write(inode, size);
2161 ceph_fscache_update(inode);
2162 inode->i_blocks = calc_inode_blocks(size);
2163
2164 ret = __ceph_should_report_size(ci);
2165
2166 spin_unlock(&ci->i_ceph_lock);
2167
2168 return ret;
2169 }
2170
ceph_queue_inode_work(struct inode * inode,int work_bit)2171 void ceph_queue_inode_work(struct inode *inode, int work_bit)
2172 {
2173 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2174 struct ceph_client *cl = fsc->client;
2175 struct ceph_inode_info *ci = ceph_inode(inode);
2176 set_bit(work_bit, &ci->i_work_mask);
2177
2178 ihold(inode);
2179 if (queue_work(fsc->inode_wq, &ci->i_work)) {
2180 doutc(cl, "%p %llx.%llx mask=%lx\n", inode,
2181 ceph_vinop(inode), ci->i_work_mask);
2182 } else {
2183 doutc(cl, "%p %llx.%llx already queued, mask=%lx\n",
2184 inode, ceph_vinop(inode), ci->i_work_mask);
2185 iput(inode);
2186 }
2187 }
2188
ceph_do_invalidate_pages(struct inode * inode)2189 static void ceph_do_invalidate_pages(struct inode *inode)
2190 {
2191 struct ceph_client *cl = ceph_inode_to_client(inode);
2192 struct ceph_inode_info *ci = ceph_inode(inode);
2193 u32 orig_gen;
2194 int check = 0;
2195
2196 ceph_fscache_invalidate(inode, false);
2197
2198 mutex_lock(&ci->i_truncate_mutex);
2199
2200 if (ceph_inode_is_shutdown(inode)) {
2201 pr_warn_ratelimited_client(cl,
2202 "%p %llx.%llx is shut down\n", inode,
2203 ceph_vinop(inode));
2204 mapping_set_error(inode->i_mapping, -EIO);
2205 truncate_pagecache(inode, 0);
2206 mutex_unlock(&ci->i_truncate_mutex);
2207 goto out;
2208 }
2209
2210 spin_lock(&ci->i_ceph_lock);
2211 doutc(cl, "%p %llx.%llx gen %d revoking %d\n", inode,
2212 ceph_vinop(inode), ci->i_rdcache_gen, ci->i_rdcache_revoking);
2213 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2214 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2215 check = 1;
2216 spin_unlock(&ci->i_ceph_lock);
2217 mutex_unlock(&ci->i_truncate_mutex);
2218 goto out;
2219 }
2220 orig_gen = ci->i_rdcache_gen;
2221 spin_unlock(&ci->i_ceph_lock);
2222
2223 if (invalidate_inode_pages2(inode->i_mapping) < 0) {
2224 pr_err_client(cl, "invalidate_inode_pages2 %llx.%llx failed\n",
2225 ceph_vinop(inode));
2226 }
2227
2228 spin_lock(&ci->i_ceph_lock);
2229 if (orig_gen == ci->i_rdcache_gen &&
2230 orig_gen == ci->i_rdcache_revoking) {
2231 doutc(cl, "%p %llx.%llx gen %d successful\n", inode,
2232 ceph_vinop(inode), ci->i_rdcache_gen);
2233 ci->i_rdcache_revoking--;
2234 check = 1;
2235 } else {
2236 doutc(cl, "%p %llx.%llx gen %d raced, now %d revoking %d\n",
2237 inode, ceph_vinop(inode), orig_gen, ci->i_rdcache_gen,
2238 ci->i_rdcache_revoking);
2239 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2240 check = 1;
2241 }
2242 spin_unlock(&ci->i_ceph_lock);
2243 mutex_unlock(&ci->i_truncate_mutex);
2244 out:
2245 if (check)
2246 ceph_check_caps(ci, 0);
2247 }
2248
2249 /*
2250 * Make sure any pending truncation is applied before doing anything
2251 * that may depend on it.
2252 */
__ceph_do_pending_vmtruncate(struct inode * inode)2253 void __ceph_do_pending_vmtruncate(struct inode *inode)
2254 {
2255 struct ceph_client *cl = ceph_inode_to_client(inode);
2256 struct ceph_inode_info *ci = ceph_inode(inode);
2257 u64 to;
2258 int wrbuffer_refs, finish = 0;
2259
2260 mutex_lock(&ci->i_truncate_mutex);
2261 retry:
2262 spin_lock(&ci->i_ceph_lock);
2263 if (ci->i_truncate_pending == 0) {
2264 doutc(cl, "%p %llx.%llx none pending\n", inode,
2265 ceph_vinop(inode));
2266 spin_unlock(&ci->i_ceph_lock);
2267 mutex_unlock(&ci->i_truncate_mutex);
2268 return;
2269 }
2270
2271 /*
2272 * make sure any dirty snapped pages are flushed before we
2273 * possibly truncate them.. so write AND block!
2274 */
2275 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
2276 spin_unlock(&ci->i_ceph_lock);
2277 doutc(cl, "%p %llx.%llx flushing snaps first\n", inode,
2278 ceph_vinop(inode));
2279 filemap_write_and_wait_range(&inode->i_data, 0,
2280 inode->i_sb->s_maxbytes);
2281 goto retry;
2282 }
2283
2284 /* there should be no reader or writer */
2285 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
2286
2287 to = ci->i_truncate_pagecache_size;
2288 wrbuffer_refs = ci->i_wrbuffer_ref;
2289 doutc(cl, "%p %llx.%llx (%d) to %lld\n", inode, ceph_vinop(inode),
2290 ci->i_truncate_pending, to);
2291 spin_unlock(&ci->i_ceph_lock);
2292
2293 ceph_fscache_resize(inode, to);
2294 truncate_pagecache(inode, to);
2295
2296 spin_lock(&ci->i_ceph_lock);
2297 if (to == ci->i_truncate_pagecache_size) {
2298 ci->i_truncate_pending = 0;
2299 finish = 1;
2300 }
2301 spin_unlock(&ci->i_ceph_lock);
2302 if (!finish)
2303 goto retry;
2304
2305 mutex_unlock(&ci->i_truncate_mutex);
2306
2307 if (wrbuffer_refs == 0)
2308 ceph_check_caps(ci, 0);
2309
2310 wake_up_all(&ci->i_cap_wq);
2311 }
2312
ceph_inode_work(struct work_struct * work)2313 static void ceph_inode_work(struct work_struct *work)
2314 {
2315 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
2316 i_work);
2317 struct inode *inode = &ci->netfs.inode;
2318 struct ceph_client *cl = ceph_inode_to_client(inode);
2319
2320 if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
2321 doutc(cl, "writeback %p %llx.%llx\n", inode, ceph_vinop(inode));
2322 filemap_fdatawrite(&inode->i_data);
2323 }
2324 if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
2325 ceph_do_invalidate_pages(inode);
2326
2327 if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
2328 __ceph_do_pending_vmtruncate(inode);
2329
2330 if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
2331 ceph_check_caps(ci, 0);
2332
2333 if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
2334 ceph_flush_snaps(ci, NULL);
2335
2336 iput(inode);
2337 }
2338
ceph_encrypted_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)2339 static const char *ceph_encrypted_get_link(struct dentry *dentry,
2340 struct inode *inode,
2341 struct delayed_call *done)
2342 {
2343 struct ceph_inode_info *ci = ceph_inode(inode);
2344
2345 if (!dentry)
2346 return ERR_PTR(-ECHILD);
2347
2348 return fscrypt_get_symlink(inode, ci->i_symlink, i_size_read(inode),
2349 done);
2350 }
2351
ceph_encrypted_symlink_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)2352 static int ceph_encrypted_symlink_getattr(struct mnt_idmap *idmap,
2353 const struct path *path,
2354 struct kstat *stat, u32 request_mask,
2355 unsigned int query_flags)
2356 {
2357 int ret;
2358
2359 ret = ceph_getattr(idmap, path, stat, request_mask, query_flags);
2360 if (ret)
2361 return ret;
2362 return fscrypt_symlink_getattr(path, stat);
2363 }
2364
2365 /*
2366 * symlinks
2367 */
2368 static const struct inode_operations ceph_symlink_iops = {
2369 .get_link = simple_get_link,
2370 .setattr = ceph_setattr,
2371 .getattr = ceph_getattr,
2372 .listxattr = ceph_listxattr,
2373 };
2374
2375 static const struct inode_operations ceph_encrypted_symlink_iops = {
2376 .get_link = ceph_encrypted_get_link,
2377 .setattr = ceph_setattr,
2378 .getattr = ceph_encrypted_symlink_getattr,
2379 .listxattr = ceph_listxattr,
2380 };
2381
2382 /*
2383 * Transfer the encrypted last block to the MDS and the MDS
2384 * will help update it when truncating a smaller size.
2385 *
2386 * We don't support a PAGE_SIZE that is smaller than the
2387 * CEPH_FSCRYPT_BLOCK_SIZE.
2388 */
fill_fscrypt_truncate(struct inode * inode,struct ceph_mds_request * req,struct iattr * attr)2389 static int fill_fscrypt_truncate(struct inode *inode,
2390 struct ceph_mds_request *req,
2391 struct iattr *attr)
2392 {
2393 struct ceph_client *cl = ceph_inode_to_client(inode);
2394 struct ceph_inode_info *ci = ceph_inode(inode);
2395 int boff = attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE;
2396 loff_t pos, orig_pos = round_down(attr->ia_size,
2397 CEPH_FSCRYPT_BLOCK_SIZE);
2398 u64 block = orig_pos >> CEPH_FSCRYPT_BLOCK_SHIFT;
2399 struct ceph_pagelist *pagelist = NULL;
2400 struct kvec iov = {0};
2401 struct iov_iter iter;
2402 struct page *page = NULL;
2403 struct ceph_fscrypt_truncate_size_header header;
2404 int retry_op = 0;
2405 int len = CEPH_FSCRYPT_BLOCK_SIZE;
2406 loff_t i_size = i_size_read(inode);
2407 int got, ret, issued;
2408 u64 objver;
2409
2410 ret = __ceph_get_caps(inode, NULL, CEPH_CAP_FILE_RD, 0, -1, &got);
2411 if (ret < 0)
2412 return ret;
2413
2414 issued = __ceph_caps_issued(ci, NULL);
2415
2416 doutc(cl, "size %lld -> %lld got cap refs on %s, issued %s\n",
2417 i_size, attr->ia_size, ceph_cap_string(got),
2418 ceph_cap_string(issued));
2419
2420 /* Try to writeback the dirty pagecaches */
2421 if (issued & (CEPH_CAP_FILE_BUFFER)) {
2422 loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SIZE - 1;
2423
2424 ret = filemap_write_and_wait_range(inode->i_mapping,
2425 orig_pos, lend);
2426 if (ret < 0)
2427 goto out;
2428 }
2429
2430 page = __page_cache_alloc(GFP_KERNEL);
2431 if (page == NULL) {
2432 ret = -ENOMEM;
2433 goto out;
2434 }
2435
2436 pagelist = ceph_pagelist_alloc(GFP_KERNEL);
2437 if (!pagelist) {
2438 ret = -ENOMEM;
2439 goto out;
2440 }
2441
2442 iov.iov_base = kmap_local_page(page);
2443 iov.iov_len = len;
2444 iov_iter_kvec(&iter, READ, &iov, 1, len);
2445
2446 pos = orig_pos;
2447 ret = __ceph_sync_read(inode, &pos, &iter, &retry_op, &objver);
2448 if (ret < 0)
2449 goto out;
2450
2451 /* Insert the header first */
2452 header.ver = 1;
2453 header.compat = 1;
2454 header.change_attr = cpu_to_le64(inode_peek_iversion_raw(inode));
2455
2456 /*
2457 * Always set the block_size to CEPH_FSCRYPT_BLOCK_SIZE,
2458 * because in MDS it may need this to do the truncate.
2459 */
2460 header.block_size = cpu_to_le32(CEPH_FSCRYPT_BLOCK_SIZE);
2461
2462 /*
2463 * If we hit a hole here, we should just skip filling
2464 * the fscrypt for the request, because once the fscrypt
2465 * is enabled, the file will be split into many blocks
2466 * with the size of CEPH_FSCRYPT_BLOCK_SIZE, if there
2467 * has a hole, the hole size should be multiple of block
2468 * size.
2469 *
2470 * If the Rados object doesn't exist, it will be set to 0.
2471 */
2472 if (!objver) {
2473 doutc(cl, "hit hole, ppos %lld < size %lld\n", pos, i_size);
2474
2475 header.data_len = cpu_to_le32(8 + 8 + 4);
2476 header.file_offset = 0;
2477 ret = 0;
2478 } else {
2479 header.data_len = cpu_to_le32(8 + 8 + 4 + CEPH_FSCRYPT_BLOCK_SIZE);
2480 header.file_offset = cpu_to_le64(orig_pos);
2481
2482 doutc(cl, "encrypt block boff/bsize %d/%lu\n", boff,
2483 CEPH_FSCRYPT_BLOCK_SIZE);
2484
2485 /* truncate and zero out the extra contents for the last block */
2486 memset(iov.iov_base + boff, 0, PAGE_SIZE - boff);
2487
2488 /* encrypt the last block */
2489 ret = ceph_fscrypt_encrypt_block_inplace(inode, page,
2490 CEPH_FSCRYPT_BLOCK_SIZE,
2491 0, block,
2492 GFP_KERNEL);
2493 if (ret)
2494 goto out;
2495 }
2496
2497 /* Insert the header */
2498 ret = ceph_pagelist_append(pagelist, &header, sizeof(header));
2499 if (ret)
2500 goto out;
2501
2502 if (header.block_size) {
2503 /* Append the last block contents to pagelist */
2504 ret = ceph_pagelist_append(pagelist, iov.iov_base,
2505 CEPH_FSCRYPT_BLOCK_SIZE);
2506 if (ret)
2507 goto out;
2508 }
2509 req->r_pagelist = pagelist;
2510 out:
2511 doutc(cl, "%p %llx.%llx size dropping cap refs on %s\n", inode,
2512 ceph_vinop(inode), ceph_cap_string(got));
2513 ceph_put_cap_refs(ci, got);
2514 if (iov.iov_base)
2515 kunmap_local(iov.iov_base);
2516 if (page)
2517 __free_pages(page, 0);
2518 if (ret && pagelist)
2519 ceph_pagelist_release(pagelist);
2520 return ret;
2521 }
2522
__ceph_setattr(struct mnt_idmap * idmap,struct inode * inode,struct iattr * attr,struct ceph_iattr * cia)2523 int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
2524 struct iattr *attr, struct ceph_iattr *cia)
2525 {
2526 struct ceph_inode_info *ci = ceph_inode(inode);
2527 unsigned int ia_valid = attr->ia_valid;
2528 struct ceph_mds_request *req;
2529 struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
2530 struct ceph_client *cl = ceph_inode_to_client(inode);
2531 struct ceph_cap_flush *prealloc_cf;
2532 loff_t isize = i_size_read(inode);
2533 int issued;
2534 int release = 0, dirtied = 0;
2535 int mask = 0;
2536 int err = 0;
2537 int inode_dirty_flags = 0;
2538 bool lock_snap_rwsem = false;
2539 bool fill_fscrypt;
2540 int truncate_retry = 20; /* The RMW will take around 50ms */
2541 struct dentry *dentry;
2542 char *path;
2543 bool do_sync = false;
2544
2545 dentry = d_find_alias(inode);
2546 if (!dentry) {
2547 do_sync = true;
2548 } else {
2549 struct ceph_path_info path_info;
2550 path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
2551 if (IS_ERR(path)) {
2552 do_sync = true;
2553 err = 0;
2554 } else {
2555 err = ceph_mds_check_access(mdsc, path, MAY_WRITE);
2556 }
2557 ceph_mdsc_free_path_info(&path_info);
2558 dput(dentry);
2559
2560 /* For none EACCES cases will let the MDS do the mds auth check */
2561 if (err == -EACCES) {
2562 return err;
2563 } else if (err < 0) {
2564 do_sync = true;
2565 err = 0;
2566 }
2567 }
2568
2569 retry:
2570 prealloc_cf = ceph_alloc_cap_flush();
2571 if (!prealloc_cf)
2572 return -ENOMEM;
2573
2574 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2575 USE_AUTH_MDS);
2576 if (IS_ERR(req)) {
2577 ceph_free_cap_flush(prealloc_cf);
2578 return PTR_ERR(req);
2579 }
2580
2581 fill_fscrypt = false;
2582 spin_lock(&ci->i_ceph_lock);
2583 issued = __ceph_caps_issued(ci, NULL);
2584
2585 if (!ci->i_head_snapc &&
2586 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2587 lock_snap_rwsem = true;
2588 if (!down_read_trylock(&mdsc->snap_rwsem)) {
2589 spin_unlock(&ci->i_ceph_lock);
2590 down_read(&mdsc->snap_rwsem);
2591 spin_lock(&ci->i_ceph_lock);
2592 issued = __ceph_caps_issued(ci, NULL);
2593 }
2594 }
2595
2596 doutc(cl, "%p %llx.%llx issued %s\n", inode, ceph_vinop(inode),
2597 ceph_cap_string(issued));
2598 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
2599 if (cia && cia->fscrypt_auth) {
2600 u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth);
2601
2602 if (len > sizeof(*cia->fscrypt_auth)) {
2603 err = -EINVAL;
2604 spin_unlock(&ci->i_ceph_lock);
2605 goto out;
2606 }
2607
2608 doutc(cl, "%p %llx.%llx fscrypt_auth len %u to %u)\n", inode,
2609 ceph_vinop(inode), ci->fscrypt_auth_len, len);
2610
2611 /* It should never be re-set once set */
2612 WARN_ON_ONCE(ci->fscrypt_auth);
2613
2614 if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2615 dirtied |= CEPH_CAP_AUTH_EXCL;
2616 kfree(ci->fscrypt_auth);
2617 ci->fscrypt_auth = (u8 *)cia->fscrypt_auth;
2618 ci->fscrypt_auth_len = len;
2619 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2620 ci->fscrypt_auth_len != len ||
2621 memcmp(ci->fscrypt_auth, cia->fscrypt_auth, len)) {
2622 req->r_fscrypt_auth = cia->fscrypt_auth;
2623 mask |= CEPH_SETATTR_FSCRYPT_AUTH;
2624 release |= CEPH_CAP_AUTH_SHARED;
2625 }
2626 cia->fscrypt_auth = NULL;
2627 }
2628 #else
2629 if (cia && cia->fscrypt_auth) {
2630 err = -EINVAL;
2631 spin_unlock(&ci->i_ceph_lock);
2632 goto out;
2633 }
2634 #endif /* CONFIG_FS_ENCRYPTION */
2635
2636 if (ia_valid & ATTR_UID) {
2637 kuid_t fsuid = from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid);
2638
2639 doutc(cl, "%p %llx.%llx uid %d -> %d\n", inode,
2640 ceph_vinop(inode),
2641 from_kuid(&init_user_ns, inode->i_uid),
2642 from_kuid(&init_user_ns, attr->ia_uid));
2643 if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2644 inode->i_uid = fsuid;
2645 dirtied |= CEPH_CAP_AUTH_EXCL;
2646 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2647 !uid_eq(fsuid, inode->i_uid)) {
2648 req->r_args.setattr.uid = cpu_to_le32(
2649 from_kuid(&init_user_ns, fsuid));
2650 mask |= CEPH_SETATTR_UID;
2651 release |= CEPH_CAP_AUTH_SHARED;
2652 }
2653 }
2654 if (ia_valid & ATTR_GID) {
2655 kgid_t fsgid = from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid);
2656
2657 doutc(cl, "%p %llx.%llx gid %d -> %d\n", inode,
2658 ceph_vinop(inode),
2659 from_kgid(&init_user_ns, inode->i_gid),
2660 from_kgid(&init_user_ns, attr->ia_gid));
2661 if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2662 inode->i_gid = fsgid;
2663 dirtied |= CEPH_CAP_AUTH_EXCL;
2664 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2665 !gid_eq(fsgid, inode->i_gid)) {
2666 req->r_args.setattr.gid = cpu_to_le32(
2667 from_kgid(&init_user_ns, fsgid));
2668 mask |= CEPH_SETATTR_GID;
2669 release |= CEPH_CAP_AUTH_SHARED;
2670 }
2671 }
2672 if (ia_valid & ATTR_MODE) {
2673 doutc(cl, "%p %llx.%llx mode 0%o -> 0%o\n", inode,
2674 ceph_vinop(inode), inode->i_mode, attr->ia_mode);
2675 if (!do_sync && (issued & CEPH_CAP_AUTH_EXCL)) {
2676 inode->i_mode = attr->ia_mode;
2677 dirtied |= CEPH_CAP_AUTH_EXCL;
2678 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2679 attr->ia_mode != inode->i_mode) {
2680 inode->i_mode = attr->ia_mode;
2681 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2682 mask |= CEPH_SETATTR_MODE;
2683 release |= CEPH_CAP_AUTH_SHARED;
2684 }
2685 }
2686
2687 if (ia_valid & ATTR_ATIME) {
2688 struct timespec64 atime = inode_get_atime(inode);
2689
2690 doutc(cl, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n",
2691 inode, ceph_vinop(inode),
2692 atime.tv_sec, atime.tv_nsec,
2693 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2694 if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
2695 ci->i_time_warp_seq++;
2696 inode_set_atime_to_ts(inode, attr->ia_atime);
2697 dirtied |= CEPH_CAP_FILE_EXCL;
2698 } else if (!do_sync && (issued & CEPH_CAP_FILE_WR) &&
2699 timespec64_compare(&atime,
2700 &attr->ia_atime) < 0) {
2701 inode_set_atime_to_ts(inode, attr->ia_atime);
2702 dirtied |= CEPH_CAP_FILE_WR;
2703 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2704 !timespec64_equal(&atime, &attr->ia_atime)) {
2705 ceph_encode_timespec64(&req->r_args.setattr.atime,
2706 &attr->ia_atime);
2707 mask |= CEPH_SETATTR_ATIME;
2708 release |= CEPH_CAP_FILE_SHARED |
2709 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2710 }
2711 }
2712 if (ia_valid & ATTR_SIZE) {
2713 doutc(cl, "%p %llx.%llx size %lld -> %lld\n", inode,
2714 ceph_vinop(inode), isize, attr->ia_size);
2715 /*
2716 * Only when the new size is smaller and not aligned to
2717 * CEPH_FSCRYPT_BLOCK_SIZE will the RMW is needed.
2718 */
2719 if (IS_ENCRYPTED(inode) && attr->ia_size < isize &&
2720 (attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE)) {
2721 mask |= CEPH_SETATTR_SIZE;
2722 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2723 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2724 set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
2725 mask |= CEPH_SETATTR_FSCRYPT_FILE;
2726 req->r_args.setattr.size =
2727 cpu_to_le64(round_up(attr->ia_size,
2728 CEPH_FSCRYPT_BLOCK_SIZE));
2729 req->r_args.setattr.old_size =
2730 cpu_to_le64(round_up(isize,
2731 CEPH_FSCRYPT_BLOCK_SIZE));
2732 req->r_fscrypt_file = attr->ia_size;
2733 fill_fscrypt = true;
2734 } else if (!do_sync && (issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
2735 if (attr->ia_size > isize) {
2736 i_size_write(inode, attr->ia_size);
2737 inode->i_blocks = calc_inode_blocks(attr->ia_size);
2738 ci->i_reported_size = attr->ia_size;
2739 dirtied |= CEPH_CAP_FILE_EXCL;
2740 ia_valid |= ATTR_MTIME;
2741 }
2742 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2743 attr->ia_size != isize) {
2744 mask |= CEPH_SETATTR_SIZE;
2745 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2746 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2747 if (IS_ENCRYPTED(inode) && attr->ia_size) {
2748 set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
2749 mask |= CEPH_SETATTR_FSCRYPT_FILE;
2750 req->r_args.setattr.size =
2751 cpu_to_le64(round_up(attr->ia_size,
2752 CEPH_FSCRYPT_BLOCK_SIZE));
2753 req->r_args.setattr.old_size =
2754 cpu_to_le64(round_up(isize,
2755 CEPH_FSCRYPT_BLOCK_SIZE));
2756 req->r_fscrypt_file = attr->ia_size;
2757 } else {
2758 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2759 req->r_args.setattr.old_size = cpu_to_le64(isize);
2760 req->r_fscrypt_file = 0;
2761 }
2762 }
2763 }
2764 if (ia_valid & ATTR_MTIME) {
2765 struct timespec64 mtime = inode_get_mtime(inode);
2766
2767 doutc(cl, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n",
2768 inode, ceph_vinop(inode),
2769 mtime.tv_sec, mtime.tv_nsec,
2770 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2771 if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) {
2772 ci->i_time_warp_seq++;
2773 inode_set_mtime_to_ts(inode, attr->ia_mtime);
2774 dirtied |= CEPH_CAP_FILE_EXCL;
2775 } else if (!do_sync && (issued & CEPH_CAP_FILE_WR) &&
2776 timespec64_compare(&mtime, &attr->ia_mtime) < 0) {
2777 inode_set_mtime_to_ts(inode, attr->ia_mtime);
2778 dirtied |= CEPH_CAP_FILE_WR;
2779 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2780 !timespec64_equal(&mtime, &attr->ia_mtime)) {
2781 ceph_encode_timespec64(&req->r_args.setattr.mtime,
2782 &attr->ia_mtime);
2783 mask |= CEPH_SETATTR_MTIME;
2784 release |= CEPH_CAP_FILE_SHARED |
2785 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2786 }
2787 }
2788
2789 /* these do nothing */
2790 if (ia_valid & ATTR_CTIME) {
2791 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2792 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2793 doutc(cl, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n",
2794 inode, ceph_vinop(inode),
2795 inode_get_ctime_sec(inode),
2796 inode_get_ctime_nsec(inode),
2797 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2798 only ? "ctime only" : "ignored");
2799 if (only) {
2800 /*
2801 * if kernel wants to dirty ctime but nothing else,
2802 * we need to choose a cap to dirty under, or do
2803 * a almost-no-op setattr
2804 */
2805 if (issued & CEPH_CAP_AUTH_EXCL)
2806 dirtied |= CEPH_CAP_AUTH_EXCL;
2807 else if (issued & CEPH_CAP_FILE_EXCL)
2808 dirtied |= CEPH_CAP_FILE_EXCL;
2809 else if (issued & CEPH_CAP_XATTR_EXCL)
2810 dirtied |= CEPH_CAP_XATTR_EXCL;
2811 else
2812 mask |= CEPH_SETATTR_CTIME;
2813 }
2814 }
2815 if (ia_valid & ATTR_FILE)
2816 doutc(cl, "%p %llx.%llx ATTR_FILE ... hrm!\n", inode,
2817 ceph_vinop(inode));
2818
2819 if (dirtied) {
2820 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2821 &prealloc_cf);
2822 inode_set_ctime_to_ts(inode, attr->ia_ctime);
2823 inode_inc_iversion_raw(inode);
2824 }
2825
2826 release &= issued;
2827 spin_unlock(&ci->i_ceph_lock);
2828 if (lock_snap_rwsem) {
2829 up_read(&mdsc->snap_rwsem);
2830 lock_snap_rwsem = false;
2831 }
2832
2833 if (inode_dirty_flags)
2834 __mark_inode_dirty(inode, inode_dirty_flags);
2835
2836 if (mask) {
2837 req->r_inode = inode;
2838 ihold(inode);
2839 req->r_inode_drop = release;
2840 req->r_args.setattr.mask = cpu_to_le32(mask);
2841 req->r_num_caps = 1;
2842 req->r_stamp = attr->ia_ctime;
2843 if (fill_fscrypt) {
2844 err = fill_fscrypt_truncate(inode, req, attr);
2845 if (err)
2846 goto out;
2847 }
2848
2849 /*
2850 * The truncate request will return -EAGAIN when the
2851 * last block has been updated just before the MDS
2852 * successfully gets the xlock for the FILE lock. To
2853 * avoid corrupting the file contents we need to retry
2854 * it.
2855 */
2856 err = ceph_mdsc_do_request(mdsc, NULL, req);
2857 if (err == -EAGAIN && truncate_retry--) {
2858 doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote), retry it!\n",
2859 inode, ceph_vinop(inode), err,
2860 ceph_cap_string(dirtied), mask);
2861 ceph_mdsc_put_request(req);
2862 ceph_free_cap_flush(prealloc_cf);
2863 goto retry;
2864 }
2865 }
2866 out:
2867 doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote)\n", inode,
2868 ceph_vinop(inode), err, ceph_cap_string(dirtied), mask);
2869
2870 ceph_mdsc_put_request(req);
2871 ceph_free_cap_flush(prealloc_cf);
2872
2873 if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2874 __ceph_do_pending_vmtruncate(inode);
2875
2876 return err;
2877 }
2878
2879 /*
2880 * setattr
2881 */
ceph_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)2882 int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
2883 struct iattr *attr)
2884 {
2885 struct inode *inode = d_inode(dentry);
2886 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2887 int err;
2888
2889 if (ceph_snap(inode) != CEPH_NOSNAP)
2890 return -EROFS;
2891
2892 if (ceph_inode_is_shutdown(inode))
2893 return -ESTALE;
2894
2895 err = fscrypt_prepare_setattr(dentry, attr);
2896 if (err)
2897 return err;
2898
2899 err = setattr_prepare(idmap, dentry, attr);
2900 if (err != 0)
2901 return err;
2902
2903 if ((attr->ia_valid & ATTR_SIZE) &&
2904 attr->ia_size > max(i_size_read(inode), fsc->max_file_size))
2905 return -EFBIG;
2906
2907 if ((attr->ia_valid & ATTR_SIZE) &&
2908 ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2909 return -EDQUOT;
2910
2911 err = __ceph_setattr(idmap, inode, attr, NULL);
2912
2913 if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2914 err = posix_acl_chmod(idmap, dentry, attr->ia_mode);
2915
2916 return err;
2917 }
2918
ceph_try_to_choose_auth_mds(struct inode * inode,int mask)2919 int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
2920 {
2921 int issued = ceph_caps_issued(ceph_inode(inode));
2922
2923 /*
2924 * If any 'x' caps is issued we can just choose the auth MDS
2925 * instead of the random replica MDSes. Because only when the
2926 * Locker is in LOCK_EXEC state will the loner client could
2927 * get the 'x' caps. And if we send the getattr requests to
2928 * any replica MDS it must auth pin and tries to rdlock from
2929 * the auth MDS, and then the auth MDS need to do the Locker
2930 * state transition to LOCK_SYNC. And after that the lock state
2931 * will change back.
2932 *
2933 * This cost much when doing the Locker state transition and
2934 * usually will need to revoke caps from clients.
2935 *
2936 * And for the 'Xs' caps for getxattr we will also choose the
2937 * auth MDS, because the MDS side code is buggy due to setxattr
2938 * won't notify the replica MDSes when the values changed and
2939 * the replica MDS will return the old values. Though we will
2940 * fix it in MDS code, but this still makes sense for old ceph.
2941 */
2942 if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
2943 || (mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR)))
2944 return USE_AUTH_MDS;
2945 else
2946 return USE_ANY_MDS;
2947 }
2948
2949 /*
2950 * Verify that we have a lease on the given mask. If not,
2951 * do a getattr against an mds.
2952 */
__ceph_do_getattr(struct inode * inode,struct page * locked_page,int mask,bool force)2953 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2954 int mask, bool force)
2955 {
2956 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
2957 struct ceph_client *cl = fsc->client;
2958 struct ceph_mds_client *mdsc = fsc->mdsc;
2959 struct ceph_mds_request *req;
2960 int mode;
2961 int err;
2962
2963 if (ceph_snap(inode) == CEPH_SNAPDIR) {
2964 doutc(cl, "inode %p %llx.%llx SNAPDIR\n", inode,
2965 ceph_vinop(inode));
2966 return 0;
2967 }
2968
2969 doutc(cl, "inode %p %llx.%llx mask %s mode 0%o\n", inode,
2970 ceph_vinop(inode), ceph_cap_string(mask), inode->i_mode);
2971 if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2972 return 0;
2973
2974 mode = ceph_try_to_choose_auth_mds(inode, mask);
2975 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2976 if (IS_ERR(req))
2977 return PTR_ERR(req);
2978 req->r_inode = inode;
2979 ihold(inode);
2980 req->r_num_caps = 1;
2981 req->r_args.getattr.mask = cpu_to_le32(mask);
2982 req->r_locked_page = locked_page;
2983 err = ceph_mdsc_do_request(mdsc, NULL, req);
2984 if (locked_page && err == 0) {
2985 u64 inline_version = req->r_reply_info.targeti.inline_version;
2986 if (inline_version == 0) {
2987 /* the reply is supposed to contain inline data */
2988 err = -EINVAL;
2989 } else if (inline_version == CEPH_INLINE_NONE ||
2990 inline_version == 1) {
2991 err = -ENODATA;
2992 } else {
2993 err = req->r_reply_info.targeti.inline_len;
2994 }
2995 }
2996 ceph_mdsc_put_request(req);
2997 doutc(cl, "result=%d\n", err);
2998 return err;
2999 }
3000
ceph_do_getvxattr(struct inode * inode,const char * name,void * value,size_t size)3001 int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
3002 size_t size)
3003 {
3004 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
3005 struct ceph_client *cl = fsc->client;
3006 struct ceph_mds_client *mdsc = fsc->mdsc;
3007 struct ceph_mds_request *req;
3008 int mode = USE_AUTH_MDS;
3009 int err;
3010 char *xattr_value;
3011 size_t xattr_value_len;
3012
3013 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode);
3014 if (IS_ERR(req)) {
3015 err = -ENOMEM;
3016 goto out;
3017 }
3018
3019 req->r_feature_needed = CEPHFS_FEATURE_OP_GETVXATTR;
3020 req->r_path2 = kstrdup(name, GFP_NOFS);
3021 if (!req->r_path2) {
3022 err = -ENOMEM;
3023 goto put;
3024 }
3025
3026 ihold(inode);
3027 req->r_inode = inode;
3028 err = ceph_mdsc_do_request(mdsc, NULL, req);
3029 if (err < 0)
3030 goto put;
3031
3032 xattr_value = req->r_reply_info.xattr_info.xattr_value;
3033 xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
3034
3035 doutc(cl, "xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
3036
3037 err = (int)xattr_value_len;
3038 if (size == 0)
3039 goto put;
3040
3041 if (xattr_value_len > size) {
3042 err = -ERANGE;
3043 goto put;
3044 }
3045
3046 memcpy(value, xattr_value, xattr_value_len);
3047 put:
3048 ceph_mdsc_put_request(req);
3049 out:
3050 doutc(cl, "result=%d\n", err);
3051 return err;
3052 }
3053
3054
3055 /*
3056 * Check inode permissions. We verify we have a valid value for
3057 * the AUTH cap, then call the generic handler.
3058 */
ceph_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)3059 int ceph_permission(struct mnt_idmap *idmap, struct inode *inode,
3060 int mask)
3061 {
3062 int err;
3063
3064 if (mask & MAY_NOT_BLOCK)
3065 return -ECHILD;
3066
3067 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
3068
3069 if (!err)
3070 err = generic_permission(idmap, inode, mask);
3071 return err;
3072 }
3073
3074 /* Craft a mask of needed caps given a set of requested statx attrs. */
statx_to_caps(u32 want,umode_t mode)3075 static int statx_to_caps(u32 want, umode_t mode)
3076 {
3077 int mask = 0;
3078
3079 if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME|STATX_CHANGE_COOKIE))
3080 mask |= CEPH_CAP_AUTH_SHARED;
3081
3082 if (want & (STATX_NLINK|STATX_CTIME|STATX_CHANGE_COOKIE)) {
3083 /*
3084 * The link count for directories depends on inode->i_subdirs,
3085 * and that is only updated when Fs caps are held.
3086 */
3087 if (S_ISDIR(mode))
3088 mask |= CEPH_CAP_FILE_SHARED;
3089 else
3090 mask |= CEPH_CAP_LINK_SHARED;
3091 }
3092
3093 if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|STATX_BLOCKS|STATX_CHANGE_COOKIE))
3094 mask |= CEPH_CAP_FILE_SHARED;
3095
3096 if (want & (STATX_CTIME|STATX_CHANGE_COOKIE))
3097 mask |= CEPH_CAP_XATTR_SHARED;
3098
3099 return mask;
3100 }
3101
3102 /*
3103 * Get all the attributes. If we have sufficient caps for the requested attrs,
3104 * then we can avoid talking to the MDS at all.
3105 */
ceph_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)3106 int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
3107 struct kstat *stat, u32 request_mask, unsigned int flags)
3108 {
3109 struct inode *inode = d_inode(path->dentry);
3110 struct super_block *sb = inode->i_sb;
3111 struct ceph_inode_info *ci = ceph_inode(inode);
3112 u32 valid_mask = STATX_BASIC_STATS;
3113 int err = 0;
3114
3115 if (ceph_inode_is_shutdown(inode))
3116 return -ESTALE;
3117
3118 /* Skip the getattr altogether if we're asked not to sync */
3119 if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
3120 err = ceph_do_getattr(inode,
3121 statx_to_caps(request_mask, inode->i_mode),
3122 flags & AT_STATX_FORCE_SYNC);
3123 if (err)
3124 return err;
3125 }
3126
3127 generic_fillattr(idmap, request_mask, inode, stat);
3128 stat->ino = ceph_present_inode(inode);
3129
3130 /*
3131 * btime on newly-allocated inodes is 0, so if this is still set to
3132 * that, then assume that it's not valid.
3133 */
3134 if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
3135 stat->btime = ci->i_btime;
3136 valid_mask |= STATX_BTIME;
3137 }
3138
3139 if (request_mask & STATX_CHANGE_COOKIE) {
3140 stat->change_cookie = inode_peek_iversion_raw(inode);
3141 valid_mask |= STATX_CHANGE_COOKIE;
3142 }
3143
3144 if (ceph_snap(inode) == CEPH_NOSNAP)
3145 stat->dev = sb->s_dev;
3146 else
3147 stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
3148
3149 if (S_ISDIR(inode->i_mode)) {
3150 if (ceph_test_mount_opt(ceph_sb_to_fs_client(sb), RBYTES)) {
3151 stat->size = ci->i_rbytes;
3152 } else if (ceph_snap(inode) == CEPH_SNAPDIR) {
3153 struct ceph_inode_info *pci;
3154 struct ceph_snap_realm *realm;
3155 struct inode *parent;
3156
3157 parent = ceph_lookup_inode(sb, ceph_ino(inode));
3158 if (IS_ERR(parent))
3159 return PTR_ERR(parent);
3160
3161 pci = ceph_inode(parent);
3162 spin_lock(&pci->i_ceph_lock);
3163 realm = pci->i_snap_realm;
3164 if (realm)
3165 stat->size = realm->num_snaps;
3166 else
3167 stat->size = 0;
3168 spin_unlock(&pci->i_ceph_lock);
3169 iput(parent);
3170 } else {
3171 stat->size = ci->i_files + ci->i_subdirs;
3172 }
3173 stat->blocks = 0;
3174 stat->blksize = 65536;
3175 /*
3176 * Some applications rely on the number of st_nlink
3177 * value on directories to be either 0 (if unlinked)
3178 * or 2 + number of subdirectories.
3179 */
3180 if (stat->nlink == 1)
3181 /* '.' + '..' + subdirs */
3182 stat->nlink = 1 + 1 + ci->i_subdirs;
3183 }
3184
3185 stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
3186 if (IS_ENCRYPTED(inode))
3187 stat->attributes |= STATX_ATTR_ENCRYPTED;
3188 stat->attributes_mask |= (STATX_ATTR_CHANGE_MONOTONIC |
3189 STATX_ATTR_ENCRYPTED);
3190
3191 stat->result_mask = request_mask & valid_mask;
3192 return err;
3193 }
3194
ceph_inode_shutdown(struct inode * inode)3195 void ceph_inode_shutdown(struct inode *inode)
3196 {
3197 struct ceph_inode_info *ci = ceph_inode(inode);
3198 struct rb_node *p;
3199 int iputs = 0;
3200 bool invalidate = false;
3201
3202 spin_lock(&ci->i_ceph_lock);
3203 ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
3204 p = rb_first(&ci->i_caps);
3205 while (p) {
3206 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
3207
3208 p = rb_next(p);
3209 iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
3210 }
3211 spin_unlock(&ci->i_ceph_lock);
3212
3213 if (invalidate)
3214 ceph_queue_invalidate(inode);
3215 while (iputs--)
3216 iput(inode);
3217 }
3218