• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21 #include <linux/ceph/decode.h>
22 
23 /*
24  * Ceph inode operations
25  *
26  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
27  * setattr, etc.), xattr helpers, and helpers for assimilating
28  * metadata returned by the MDS into our cache.
29  *
30  * Also define helpers for doing asynchronous writeback, invalidation,
31  * and truncation for the benefit of those who can't afford to block
32  * (typically because they are in the message handler path).
33  */
34 
35 static const struct inode_operations ceph_symlink_iops;
36 
37 static void ceph_inode_work(struct work_struct *work);
38 
39 /*
40  * find or create an inode, given the ceph ino number
41  */
ceph_set_ino_cb(struct inode * inode,void * data)42 static int ceph_set_ino_cb(struct inode *inode, void *data)
43 {
44 	struct ceph_inode_info *ci = ceph_inode(inode);
45 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
46 
47 	ci->i_vino = *(struct ceph_vino *)data;
48 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
49 	inode_set_iversion_raw(inode, 0);
50 	percpu_counter_inc(&mdsc->metric.total_inodes);
51 
52 	return 0;
53 }
54 
ceph_get_inode(struct super_block * sb,struct ceph_vino vino)55 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
56 {
57 	struct inode *inode;
58 
59 	if (ceph_vino_is_reserved(vino))
60 		return ERR_PTR(-EREMOTEIO);
61 
62 	inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
63 			     ceph_set_ino_cb, &vino);
64 	if (!inode)
65 		return ERR_PTR(-ENOMEM);
66 
67 	dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
68 	     ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
69 	return inode;
70 }
71 
72 /*
73  * get/constuct snapdir inode for a given directory
74  */
ceph_get_snapdir(struct inode * parent)75 struct inode *ceph_get_snapdir(struct inode *parent)
76 {
77 	struct ceph_vino vino = {
78 		.ino = ceph_ino(parent),
79 		.snap = CEPH_SNAPDIR,
80 	};
81 	struct inode *inode = ceph_get_inode(parent->i_sb, vino);
82 	struct ceph_inode_info *ci = ceph_inode(inode);
83 
84 	if (IS_ERR(inode))
85 		return inode;
86 
87 	if (!S_ISDIR(parent->i_mode)) {
88 		pr_warn_once("bad snapdir parent type (mode=0%o)\n",
89 			     parent->i_mode);
90 		goto err;
91 	}
92 
93 	if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
94 		pr_warn_once("bad snapdir inode type (mode=0%o)\n",
95 			     inode->i_mode);
96 		goto err;
97 	}
98 
99 	inode->i_mode = parent->i_mode;
100 	inode->i_uid = parent->i_uid;
101 	inode->i_gid = parent->i_gid;
102 	inode->i_mtime = parent->i_mtime;
103 	inode->i_ctime = parent->i_ctime;
104 	inode->i_atime = parent->i_atime;
105 	ci->i_rbytes = 0;
106 	ci->i_btime = ceph_inode(parent)->i_btime;
107 
108 	if (inode->i_state & I_NEW) {
109 		inode->i_op = &ceph_snapdir_iops;
110 		inode->i_fop = &ceph_snapdir_fops;
111 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
112 		unlock_new_inode(inode);
113 	}
114 
115 	return inode;
116 err:
117 	if ((inode->i_state & I_NEW))
118 		discard_new_inode(inode);
119 	else
120 		iput(inode);
121 	return ERR_PTR(-ENOTDIR);
122 }
123 
124 const struct inode_operations ceph_file_iops = {
125 	.permission = ceph_permission,
126 	.setattr = ceph_setattr,
127 	.getattr = ceph_getattr,
128 	.listxattr = ceph_listxattr,
129 	.get_acl = ceph_get_acl,
130 	.set_acl = ceph_set_acl,
131 };
132 
133 
134 /*
135  * We use a 'frag tree' to keep track of the MDS's directory fragments
136  * for a given inode (usually there is just a single fragment).  We
137  * need to know when a child frag is delegated to a new MDS, or when
138  * it is flagged as replicated, so we can direct our requests
139  * accordingly.
140  */
141 
142 /*
143  * find/create a frag in the tree
144  */
__get_or_create_frag(struct ceph_inode_info * ci,u32 f)145 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
146 						    u32 f)
147 {
148 	struct rb_node **p;
149 	struct rb_node *parent = NULL;
150 	struct ceph_inode_frag *frag;
151 	int c;
152 
153 	p = &ci->i_fragtree.rb_node;
154 	while (*p) {
155 		parent = *p;
156 		frag = rb_entry(parent, struct ceph_inode_frag, node);
157 		c = ceph_frag_compare(f, frag->frag);
158 		if (c < 0)
159 			p = &(*p)->rb_left;
160 		else if (c > 0)
161 			p = &(*p)->rb_right;
162 		else
163 			return frag;
164 	}
165 
166 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
167 	if (!frag)
168 		return ERR_PTR(-ENOMEM);
169 
170 	frag->frag = f;
171 	frag->split_by = 0;
172 	frag->mds = -1;
173 	frag->ndist = 0;
174 
175 	rb_link_node(&frag->node, parent, p);
176 	rb_insert_color(&frag->node, &ci->i_fragtree);
177 
178 	dout("get_or_create_frag added %llx.%llx frag %x\n",
179 	     ceph_vinop(&ci->netfs.inode), f);
180 	return frag;
181 }
182 
183 /*
184  * find a specific frag @f
185  */
__ceph_find_frag(struct ceph_inode_info * ci,u32 f)186 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
187 {
188 	struct rb_node *n = ci->i_fragtree.rb_node;
189 
190 	while (n) {
191 		struct ceph_inode_frag *frag =
192 			rb_entry(n, struct ceph_inode_frag, node);
193 		int c = ceph_frag_compare(f, frag->frag);
194 		if (c < 0)
195 			n = n->rb_left;
196 		else if (c > 0)
197 			n = n->rb_right;
198 		else
199 			return frag;
200 	}
201 	return NULL;
202 }
203 
204 /*
205  * Choose frag containing the given value @v.  If @pfrag is
206  * specified, copy the frag delegation info to the caller if
207  * it is present.
208  */
__ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)209 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
210 			      struct ceph_inode_frag *pfrag, int *found)
211 {
212 	u32 t = ceph_frag_make(0, 0);
213 	struct ceph_inode_frag *frag;
214 	unsigned nway, i;
215 	u32 n;
216 
217 	if (found)
218 		*found = 0;
219 
220 	while (1) {
221 		WARN_ON(!ceph_frag_contains_value(t, v));
222 		frag = __ceph_find_frag(ci, t);
223 		if (!frag)
224 			break; /* t is a leaf */
225 		if (frag->split_by == 0) {
226 			if (pfrag)
227 				memcpy(pfrag, frag, sizeof(*pfrag));
228 			if (found)
229 				*found = 1;
230 			break;
231 		}
232 
233 		/* choose child */
234 		nway = 1 << frag->split_by;
235 		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
236 		     frag->split_by, nway);
237 		for (i = 0; i < nway; i++) {
238 			n = ceph_frag_make_child(t, frag->split_by, i);
239 			if (ceph_frag_contains_value(n, v)) {
240 				t = n;
241 				break;
242 			}
243 		}
244 		BUG_ON(i == nway);
245 	}
246 	dout("choose_frag(%x) = %x\n", v, t);
247 
248 	return t;
249 }
250 
ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)251 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
252 		     struct ceph_inode_frag *pfrag, int *found)
253 {
254 	u32 ret;
255 	mutex_lock(&ci->i_fragtree_mutex);
256 	ret = __ceph_choose_frag(ci, v, pfrag, found);
257 	mutex_unlock(&ci->i_fragtree_mutex);
258 	return ret;
259 }
260 
261 /*
262  * Process dirfrag (delegation) info from the mds.  Include leaf
263  * fragment in tree ONLY if ndist > 0.  Otherwise, only
264  * branches/splits are included in i_fragtree)
265  */
ceph_fill_dirfrag(struct inode * inode,struct ceph_mds_reply_dirfrag * dirinfo)266 static int ceph_fill_dirfrag(struct inode *inode,
267 			     struct ceph_mds_reply_dirfrag *dirinfo)
268 {
269 	struct ceph_inode_info *ci = ceph_inode(inode);
270 	struct ceph_inode_frag *frag;
271 	u32 id = le32_to_cpu(dirinfo->frag);
272 	int mds = le32_to_cpu(dirinfo->auth);
273 	int ndist = le32_to_cpu(dirinfo->ndist);
274 	int diri_auth = -1;
275 	int i;
276 	int err = 0;
277 
278 	spin_lock(&ci->i_ceph_lock);
279 	if (ci->i_auth_cap)
280 		diri_auth = ci->i_auth_cap->mds;
281 	spin_unlock(&ci->i_ceph_lock);
282 
283 	if (mds == -1) /* CDIR_AUTH_PARENT */
284 		mds = diri_auth;
285 
286 	mutex_lock(&ci->i_fragtree_mutex);
287 	if (ndist == 0 && mds == diri_auth) {
288 		/* no delegation info needed. */
289 		frag = __ceph_find_frag(ci, id);
290 		if (!frag)
291 			goto out;
292 		if (frag->split_by == 0) {
293 			/* tree leaf, remove */
294 			dout("fill_dirfrag removed %llx.%llx frag %x"
295 			     " (no ref)\n", ceph_vinop(inode), id);
296 			rb_erase(&frag->node, &ci->i_fragtree);
297 			kfree(frag);
298 		} else {
299 			/* tree branch, keep and clear */
300 			dout("fill_dirfrag cleared %llx.%llx frag %x"
301 			     " referral\n", ceph_vinop(inode), id);
302 			frag->mds = -1;
303 			frag->ndist = 0;
304 		}
305 		goto out;
306 	}
307 
308 
309 	/* find/add this frag to store mds delegation info */
310 	frag = __get_or_create_frag(ci, id);
311 	if (IS_ERR(frag)) {
312 		/* this is not the end of the world; we can continue
313 		   with bad/inaccurate delegation info */
314 		pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
315 		       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
316 		err = -ENOMEM;
317 		goto out;
318 	}
319 
320 	frag->mds = mds;
321 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
322 	for (i = 0; i < frag->ndist; i++)
323 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
324 	dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
325 	     ceph_vinop(inode), frag->frag, frag->ndist);
326 
327 out:
328 	mutex_unlock(&ci->i_fragtree_mutex);
329 	return err;
330 }
331 
frag_tree_split_cmp(const void * l,const void * r)332 static int frag_tree_split_cmp(const void *l, const void *r)
333 {
334 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
335 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
336 	return ceph_frag_compare(le32_to_cpu(ls->frag),
337 				 le32_to_cpu(rs->frag));
338 }
339 
is_frag_child(u32 f,struct ceph_inode_frag * frag)340 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
341 {
342 	if (!frag)
343 		return f == ceph_frag_make(0, 0);
344 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
345 		return false;
346 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
347 }
348 
ceph_fill_fragtree(struct inode * inode,struct ceph_frag_tree_head * fragtree,struct ceph_mds_reply_dirfrag * dirinfo)349 static int ceph_fill_fragtree(struct inode *inode,
350 			      struct ceph_frag_tree_head *fragtree,
351 			      struct ceph_mds_reply_dirfrag *dirinfo)
352 {
353 	struct ceph_inode_info *ci = ceph_inode(inode);
354 	struct ceph_inode_frag *frag, *prev_frag = NULL;
355 	struct rb_node *rb_node;
356 	unsigned i, split_by, nsplits;
357 	u32 id;
358 	bool update = false;
359 
360 	mutex_lock(&ci->i_fragtree_mutex);
361 	nsplits = le32_to_cpu(fragtree->nsplits);
362 	if (nsplits != ci->i_fragtree_nsplits) {
363 		update = true;
364 	} else if (nsplits) {
365 		i = prandom_u32_max(nsplits);
366 		id = le32_to_cpu(fragtree->splits[i].frag);
367 		if (!__ceph_find_frag(ci, id))
368 			update = true;
369 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
370 		rb_node = rb_first(&ci->i_fragtree);
371 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
372 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
373 			update = true;
374 	}
375 	if (!update && dirinfo) {
376 		id = le32_to_cpu(dirinfo->frag);
377 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
378 			update = true;
379 	}
380 	if (!update)
381 		goto out_unlock;
382 
383 	if (nsplits > 1) {
384 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
385 		     frag_tree_split_cmp, NULL);
386 	}
387 
388 	dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
389 	rb_node = rb_first(&ci->i_fragtree);
390 	for (i = 0; i < nsplits; i++) {
391 		id = le32_to_cpu(fragtree->splits[i].frag);
392 		split_by = le32_to_cpu(fragtree->splits[i].by);
393 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
394 			pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
395 			       "frag %x split by %d\n", ceph_vinop(inode),
396 			       i, nsplits, id, split_by);
397 			continue;
398 		}
399 		frag = NULL;
400 		while (rb_node) {
401 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
402 			if (ceph_frag_compare(frag->frag, id) >= 0) {
403 				if (frag->frag != id)
404 					frag = NULL;
405 				else
406 					rb_node = rb_next(rb_node);
407 				break;
408 			}
409 			rb_node = rb_next(rb_node);
410 			/* delete stale split/leaf node */
411 			if (frag->split_by > 0 ||
412 			    !is_frag_child(frag->frag, prev_frag)) {
413 				rb_erase(&frag->node, &ci->i_fragtree);
414 				if (frag->split_by > 0)
415 					ci->i_fragtree_nsplits--;
416 				kfree(frag);
417 			}
418 			frag = NULL;
419 		}
420 		if (!frag) {
421 			frag = __get_or_create_frag(ci, id);
422 			if (IS_ERR(frag))
423 				continue;
424 		}
425 		if (frag->split_by == 0)
426 			ci->i_fragtree_nsplits++;
427 		frag->split_by = split_by;
428 		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
429 		prev_frag = frag;
430 	}
431 	while (rb_node) {
432 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
433 		rb_node = rb_next(rb_node);
434 		/* delete stale split/leaf node */
435 		if (frag->split_by > 0 ||
436 		    !is_frag_child(frag->frag, prev_frag)) {
437 			rb_erase(&frag->node, &ci->i_fragtree);
438 			if (frag->split_by > 0)
439 				ci->i_fragtree_nsplits--;
440 			kfree(frag);
441 		}
442 	}
443 out_unlock:
444 	mutex_unlock(&ci->i_fragtree_mutex);
445 	return 0;
446 }
447 
448 /*
449  * initialize a newly allocated inode.
450  */
ceph_alloc_inode(struct super_block * sb)451 struct inode *ceph_alloc_inode(struct super_block *sb)
452 {
453 	struct ceph_inode_info *ci;
454 	int i;
455 
456 	ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS);
457 	if (!ci)
458 		return NULL;
459 
460 	dout("alloc_inode %p\n", &ci->netfs.inode);
461 
462 	/* Set parameters for the netfs library */
463 	netfs_inode_init(&ci->netfs, &ceph_netfs_ops);
464 
465 	spin_lock_init(&ci->i_ceph_lock);
466 
467 	ci->i_version = 0;
468 	ci->i_inline_version = 0;
469 	ci->i_time_warp_seq = 0;
470 	ci->i_ceph_flags = 0;
471 	atomic64_set(&ci->i_ordered_count, 1);
472 	atomic64_set(&ci->i_release_count, 1);
473 	atomic64_set(&ci->i_complete_seq[0], 0);
474 	atomic64_set(&ci->i_complete_seq[1], 0);
475 	ci->i_symlink = NULL;
476 
477 	ci->i_max_bytes = 0;
478 	ci->i_max_files = 0;
479 
480 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
481 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
482 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
483 
484 	ci->i_fragtree = RB_ROOT;
485 	mutex_init(&ci->i_fragtree_mutex);
486 
487 	ci->i_xattrs.blob = NULL;
488 	ci->i_xattrs.prealloc_blob = NULL;
489 	ci->i_xattrs.dirty = false;
490 	ci->i_xattrs.index = RB_ROOT;
491 	ci->i_xattrs.count = 0;
492 	ci->i_xattrs.names_size = 0;
493 	ci->i_xattrs.vals_size = 0;
494 	ci->i_xattrs.version = 0;
495 	ci->i_xattrs.index_version = 0;
496 
497 	ci->i_caps = RB_ROOT;
498 	ci->i_auth_cap = NULL;
499 	ci->i_dirty_caps = 0;
500 	ci->i_flushing_caps = 0;
501 	INIT_LIST_HEAD(&ci->i_dirty_item);
502 	INIT_LIST_HEAD(&ci->i_flushing_item);
503 	ci->i_prealloc_cap_flush = NULL;
504 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
505 	init_waitqueue_head(&ci->i_cap_wq);
506 	ci->i_hold_caps_max = 0;
507 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
508 	INIT_LIST_HEAD(&ci->i_cap_snaps);
509 	ci->i_head_snapc = NULL;
510 	ci->i_snap_caps = 0;
511 
512 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
513 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
514 		ci->i_nr_by_mode[i] = 0;
515 
516 	mutex_init(&ci->i_truncate_mutex);
517 	ci->i_truncate_seq = 0;
518 	ci->i_truncate_size = 0;
519 	ci->i_truncate_pending = 0;
520 
521 	ci->i_max_size = 0;
522 	ci->i_reported_size = 0;
523 	ci->i_wanted_max_size = 0;
524 	ci->i_requested_max_size = 0;
525 
526 	ci->i_pin_ref = 0;
527 	ci->i_rd_ref = 0;
528 	ci->i_rdcache_ref = 0;
529 	ci->i_wr_ref = 0;
530 	ci->i_wb_ref = 0;
531 	ci->i_fx_ref = 0;
532 	ci->i_wrbuffer_ref = 0;
533 	ci->i_wrbuffer_ref_head = 0;
534 	atomic_set(&ci->i_filelock_ref, 0);
535 	atomic_set(&ci->i_shared_gen, 1);
536 	ci->i_rdcache_gen = 0;
537 	ci->i_rdcache_revoking = 0;
538 
539 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
540 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
541 	spin_lock_init(&ci->i_unsafe_lock);
542 
543 	ci->i_snap_realm = NULL;
544 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
545 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
546 
547 	INIT_WORK(&ci->i_work, ceph_inode_work);
548 	ci->i_work_mask = 0;
549 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
550 	return &ci->netfs.inode;
551 }
552 
ceph_free_inode(struct inode * inode)553 void ceph_free_inode(struct inode *inode)
554 {
555 	struct ceph_inode_info *ci = ceph_inode(inode);
556 
557 	kfree(ci->i_symlink);
558 	kmem_cache_free(ceph_inode_cachep, ci);
559 }
560 
ceph_evict_inode(struct inode * inode)561 void ceph_evict_inode(struct inode *inode)
562 {
563 	struct ceph_inode_info *ci = ceph_inode(inode);
564 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
565 	struct ceph_inode_frag *frag;
566 	struct rb_node *n;
567 
568 	dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
569 
570 	percpu_counter_dec(&mdsc->metric.total_inodes);
571 
572 	truncate_inode_pages_final(&inode->i_data);
573 	if (inode->i_state & I_PINNING_FSCACHE_WB)
574 		ceph_fscache_unuse_cookie(inode, true);
575 	clear_inode(inode);
576 
577 	ceph_fscache_unregister_inode_cookie(ci);
578 
579 	__ceph_remove_caps(ci);
580 
581 	if (__ceph_has_quota(ci, QUOTA_GET_ANY))
582 		ceph_adjust_quota_realms_count(inode, false);
583 
584 	/*
585 	 * we may still have a snap_realm reference if there are stray
586 	 * caps in i_snap_caps.
587 	 */
588 	if (ci->i_snap_realm) {
589 		if (ceph_snap(inode) == CEPH_NOSNAP) {
590 			dout(" dropping residual ref to snap realm %p\n",
591 			     ci->i_snap_realm);
592 			ceph_change_snap_realm(inode, NULL);
593 		} else {
594 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
595 			ci->i_snap_realm = NULL;
596 		}
597 	}
598 
599 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
600 		frag = rb_entry(n, struct ceph_inode_frag, node);
601 		rb_erase(n, &ci->i_fragtree);
602 		kfree(frag);
603 	}
604 	ci->i_fragtree_nsplits = 0;
605 
606 	__ceph_destroy_xattrs(ci);
607 	if (ci->i_xattrs.blob)
608 		ceph_buffer_put(ci->i_xattrs.blob);
609 	if (ci->i_xattrs.prealloc_blob)
610 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
611 
612 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
613 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
614 }
615 
calc_inode_blocks(u64 size)616 static inline blkcnt_t calc_inode_blocks(u64 size)
617 {
618 	return (size + (1<<9) - 1) >> 9;
619 }
620 
621 /*
622  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
623  * careful because either the client or MDS may have more up to date
624  * info, depending on which capabilities are held, and whether
625  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
626  * and size are monotonically increasing, except when utimes() or
627  * truncate() increments the corresponding _seq values.)
628  */
ceph_fill_file_size(struct inode * inode,int issued,u32 truncate_seq,u64 truncate_size,u64 size)629 int ceph_fill_file_size(struct inode *inode, int issued,
630 			u32 truncate_seq, u64 truncate_size, u64 size)
631 {
632 	struct ceph_inode_info *ci = ceph_inode(inode);
633 	int queue_trunc = 0;
634 	loff_t isize = i_size_read(inode);
635 
636 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
637 	    (truncate_seq == ci->i_truncate_seq && size > isize)) {
638 		dout("size %lld -> %llu\n", isize, size);
639 		if (size > 0 && S_ISDIR(inode->i_mode)) {
640 			pr_err("fill_file_size non-zero size for directory\n");
641 			size = 0;
642 		}
643 		i_size_write(inode, size);
644 		inode->i_blocks = calc_inode_blocks(size);
645 		/*
646 		 * If we're expanding, then we should be able to just update
647 		 * the existing cookie.
648 		 */
649 		if (size > isize)
650 			ceph_fscache_update(inode);
651 		ci->i_reported_size = size;
652 		if (truncate_seq != ci->i_truncate_seq) {
653 			dout("truncate_seq %u -> %u\n",
654 			     ci->i_truncate_seq, truncate_seq);
655 			ci->i_truncate_seq = truncate_seq;
656 
657 			/* the MDS should have revoked these caps */
658 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
659 					       CEPH_CAP_FILE_LAZYIO));
660 			/*
661 			 * If we hold relevant caps, or in the case where we're
662 			 * not the only client referencing this file and we
663 			 * don't hold those caps, then we need to check whether
664 			 * the file is either opened or mmaped
665 			 */
666 			if ((issued & (CEPH_CAP_FILE_CACHE|
667 				       CEPH_CAP_FILE_BUFFER)) ||
668 			    mapping_mapped(inode->i_mapping) ||
669 			    __ceph_is_file_opened(ci)) {
670 				ci->i_truncate_pending++;
671 				queue_trunc = 1;
672 			}
673 		}
674 	}
675 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
676 	    ci->i_truncate_size != truncate_size) {
677 		dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
678 		     truncate_size);
679 		ci->i_truncate_size = truncate_size;
680 	}
681 	return queue_trunc;
682 }
683 
ceph_fill_file_time(struct inode * inode,int issued,u64 time_warp_seq,struct timespec64 * ctime,struct timespec64 * mtime,struct timespec64 * atime)684 void ceph_fill_file_time(struct inode *inode, int issued,
685 			 u64 time_warp_seq, struct timespec64 *ctime,
686 			 struct timespec64 *mtime, struct timespec64 *atime)
687 {
688 	struct ceph_inode_info *ci = ceph_inode(inode);
689 	int warn = 0;
690 
691 	if (issued & (CEPH_CAP_FILE_EXCL|
692 		      CEPH_CAP_FILE_WR|
693 		      CEPH_CAP_FILE_BUFFER|
694 		      CEPH_CAP_AUTH_EXCL|
695 		      CEPH_CAP_XATTR_EXCL)) {
696 		if (ci->i_version == 0 ||
697 		    timespec64_compare(ctime, &inode->i_ctime) > 0) {
698 			dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
699 			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
700 			     ctime->tv_sec, ctime->tv_nsec);
701 			inode->i_ctime = *ctime;
702 		}
703 		if (ci->i_version == 0 ||
704 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
705 			/* the MDS did a utimes() */
706 			dout("mtime %lld.%09ld -> %lld.%09ld "
707 			     "tw %d -> %d\n",
708 			     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
709 			     mtime->tv_sec, mtime->tv_nsec,
710 			     ci->i_time_warp_seq, (int)time_warp_seq);
711 
712 			inode->i_mtime = *mtime;
713 			inode->i_atime = *atime;
714 			ci->i_time_warp_seq = time_warp_seq;
715 		} else if (time_warp_seq == ci->i_time_warp_seq) {
716 			/* nobody did utimes(); take the max */
717 			if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
718 				dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
719 				     inode->i_mtime.tv_sec,
720 				     inode->i_mtime.tv_nsec,
721 				     mtime->tv_sec, mtime->tv_nsec);
722 				inode->i_mtime = *mtime;
723 			}
724 			if (timespec64_compare(atime, &inode->i_atime) > 0) {
725 				dout("atime %lld.%09ld -> %lld.%09ld inc\n",
726 				     inode->i_atime.tv_sec,
727 				     inode->i_atime.tv_nsec,
728 				     atime->tv_sec, atime->tv_nsec);
729 				inode->i_atime = *atime;
730 			}
731 		} else if (issued & CEPH_CAP_FILE_EXCL) {
732 			/* we did a utimes(); ignore mds values */
733 		} else {
734 			warn = 1;
735 		}
736 	} else {
737 		/* we have no write|excl caps; whatever the MDS says is true */
738 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
739 			inode->i_ctime = *ctime;
740 			inode->i_mtime = *mtime;
741 			inode->i_atime = *atime;
742 			ci->i_time_warp_seq = time_warp_seq;
743 		} else {
744 			warn = 1;
745 		}
746 	}
747 	if (warn) /* time_warp_seq shouldn't go backwards */
748 		dout("%p mds time_warp_seq %llu < %u\n",
749 		     inode, time_warp_seq, ci->i_time_warp_seq);
750 }
751 
752 /*
753  * Populate an inode based on info from mds.  May be called on new or
754  * existing inodes.
755  */
ceph_fill_inode(struct inode * inode,struct page * locked_page,struct ceph_mds_reply_info_in * iinfo,struct ceph_mds_reply_dirfrag * dirinfo,struct ceph_mds_session * session,int cap_fmode,struct ceph_cap_reservation * caps_reservation)756 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
757 		    struct ceph_mds_reply_info_in *iinfo,
758 		    struct ceph_mds_reply_dirfrag *dirinfo,
759 		    struct ceph_mds_session *session, int cap_fmode,
760 		    struct ceph_cap_reservation *caps_reservation)
761 {
762 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
763 	struct ceph_mds_reply_inode *info = iinfo->in;
764 	struct ceph_inode_info *ci = ceph_inode(inode);
765 	int issued, new_issued, info_caps;
766 	struct timespec64 mtime, atime, ctime;
767 	struct ceph_buffer *xattr_blob = NULL;
768 	struct ceph_buffer *old_blob = NULL;
769 	struct ceph_string *pool_ns = NULL;
770 	struct ceph_cap *new_cap = NULL;
771 	int err = 0;
772 	bool wake = false;
773 	bool queue_trunc = false;
774 	bool new_version = false;
775 	bool fill_inline = false;
776 	umode_t mode = le32_to_cpu(info->mode);
777 	dev_t rdev = le32_to_cpu(info->rdev);
778 
779 	lockdep_assert_held(&mdsc->snap_rwsem);
780 
781 	dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
782 	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
783 	     ci->i_version);
784 
785 	/* Once I_NEW is cleared, we can't change type or dev numbers */
786 	if (inode->i_state & I_NEW) {
787 		inode->i_mode = mode;
788 	} else {
789 		if (inode_wrong_type(inode, mode)) {
790 			pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
791 				     ceph_vinop(inode), inode->i_mode, mode);
792 			return -ESTALE;
793 		}
794 
795 		if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
796 			pr_warn_once("dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
797 				     ceph_vinop(inode), MAJOR(inode->i_rdev),
798 				     MINOR(inode->i_rdev), MAJOR(rdev),
799 				     MINOR(rdev));
800 			return -ESTALE;
801 		}
802 	}
803 
804 	info_caps = le32_to_cpu(info->cap.caps);
805 
806 	/* prealloc new cap struct */
807 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
808 		new_cap = ceph_get_cap(mdsc, caps_reservation);
809 		if (!new_cap)
810 			return -ENOMEM;
811 	}
812 
813 	/*
814 	 * prealloc xattr data, if it looks like we'll need it.  only
815 	 * if len > 4 (meaning there are actually xattrs; the first 4
816 	 * bytes are the xattr count).
817 	 */
818 	if (iinfo->xattr_len > 4) {
819 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
820 		if (!xattr_blob)
821 			pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
822 			       iinfo->xattr_len);
823 	}
824 
825 	if (iinfo->pool_ns_len > 0)
826 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
827 						     iinfo->pool_ns_len);
828 
829 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
830 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
831 
832 	spin_lock(&ci->i_ceph_lock);
833 
834 	/*
835 	 * provided version will be odd if inode value is projected,
836 	 * even if stable.  skip the update if we have newer stable
837 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
838 	 * we are getting projected (unstable) info (in which case the
839 	 * version is odd, and we want ours>theirs).
840 	 *   us   them
841 	 *   2    2     skip
842 	 *   3    2     skip
843 	 *   3    3     update
844 	 */
845 	if (ci->i_version == 0 ||
846 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
847 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
848 		new_version = true;
849 
850 	/* Update change_attribute */
851 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
852 
853 	__ceph_caps_issued(ci, &issued);
854 	issued |= __ceph_caps_dirty(ci);
855 	new_issued = ~issued & info_caps;
856 
857 	/* directories have fl_stripe_unit set to zero */
858 	if (le32_to_cpu(info->layout.fl_stripe_unit))
859 		inode->i_blkbits =
860 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
861 	else
862 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
863 
864 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
865 
866 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
867 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
868 		inode->i_mode = mode;
869 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
870 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
871 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
872 		     from_kuid(&init_user_ns, inode->i_uid),
873 		     from_kgid(&init_user_ns, inode->i_gid));
874 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
875 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
876 	}
877 
878 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
879 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
880 		set_nlink(inode, le32_to_cpu(info->nlink));
881 
882 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
883 		/* be careful with mtime, atime, size */
884 		ceph_decode_timespec64(&atime, &info->atime);
885 		ceph_decode_timespec64(&mtime, &info->mtime);
886 		ceph_decode_timespec64(&ctime, &info->ctime);
887 		ceph_fill_file_time(inode, issued,
888 				le32_to_cpu(info->time_warp_seq),
889 				&ctime, &mtime, &atime);
890 	}
891 
892 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
893 		ci->i_files = le64_to_cpu(info->files);
894 		ci->i_subdirs = le64_to_cpu(info->subdirs);
895 	}
896 
897 	if (new_version ||
898 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
899 		s64 old_pool = ci->i_layout.pool_id;
900 		struct ceph_string *old_ns;
901 
902 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
903 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
904 					lockdep_is_held(&ci->i_ceph_lock));
905 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
906 
907 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
908 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
909 
910 		pool_ns = old_ns;
911 
912 		queue_trunc = ceph_fill_file_size(inode, issued,
913 					le32_to_cpu(info->truncate_seq),
914 					le64_to_cpu(info->truncate_size),
915 					le64_to_cpu(info->size));
916 		/* only update max_size on auth cap */
917 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
918 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
919 			dout("max_size %lld -> %llu\n", ci->i_max_size,
920 					le64_to_cpu(info->max_size));
921 			ci->i_max_size = le64_to_cpu(info->max_size);
922 		}
923 	}
924 
925 	/* layout and rstat are not tracked by capability, update them if
926 	 * the inode info is from auth mds */
927 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
928 		if (S_ISDIR(inode->i_mode)) {
929 			ci->i_dir_layout = iinfo->dir_layout;
930 			ci->i_rbytes = le64_to_cpu(info->rbytes);
931 			ci->i_rfiles = le64_to_cpu(info->rfiles);
932 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
933 			ci->i_dir_pin = iinfo->dir_pin;
934 			ci->i_rsnaps = iinfo->rsnaps;
935 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
936 		}
937 	}
938 
939 	/* xattrs */
940 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
941 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
942 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
943 		if (ci->i_xattrs.blob)
944 			old_blob = ci->i_xattrs.blob;
945 		ci->i_xattrs.blob = xattr_blob;
946 		if (xattr_blob)
947 			memcpy(ci->i_xattrs.blob->vec.iov_base,
948 			       iinfo->xattr_data, iinfo->xattr_len);
949 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
950 		ceph_forget_all_cached_acls(inode);
951 		ceph_security_invalidate_secctx(inode);
952 		xattr_blob = NULL;
953 	}
954 
955 	/* finally update i_version */
956 	if (le64_to_cpu(info->version) > ci->i_version)
957 		ci->i_version = le64_to_cpu(info->version);
958 
959 	inode->i_mapping->a_ops = &ceph_aops;
960 
961 	switch (inode->i_mode & S_IFMT) {
962 	case S_IFIFO:
963 	case S_IFBLK:
964 	case S_IFCHR:
965 	case S_IFSOCK:
966 		inode->i_blkbits = PAGE_SHIFT;
967 		init_special_inode(inode, inode->i_mode, rdev);
968 		inode->i_op = &ceph_file_iops;
969 		break;
970 	case S_IFREG:
971 		inode->i_op = &ceph_file_iops;
972 		inode->i_fop = &ceph_file_fops;
973 		break;
974 	case S_IFLNK:
975 		inode->i_op = &ceph_symlink_iops;
976 		if (!ci->i_symlink) {
977 			u32 symlen = iinfo->symlink_len;
978 			char *sym;
979 
980 			spin_unlock(&ci->i_ceph_lock);
981 
982 			if (symlen != i_size_read(inode)) {
983 				pr_err("%s %llx.%llx BAD symlink "
984 					"size %lld\n", __func__,
985 					ceph_vinop(inode),
986 					i_size_read(inode));
987 				i_size_write(inode, symlen);
988 				inode->i_blocks = calc_inode_blocks(symlen);
989 			}
990 
991 			err = -ENOMEM;
992 			sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
993 			if (!sym)
994 				goto out;
995 
996 			spin_lock(&ci->i_ceph_lock);
997 			if (!ci->i_symlink)
998 				ci->i_symlink = sym;
999 			else
1000 				kfree(sym); /* lost a race */
1001 		}
1002 		inode->i_link = ci->i_symlink;
1003 		break;
1004 	case S_IFDIR:
1005 		inode->i_op = &ceph_dir_iops;
1006 		inode->i_fop = &ceph_dir_fops;
1007 		break;
1008 	default:
1009 		pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
1010 		       ceph_vinop(inode), inode->i_mode);
1011 	}
1012 
1013 	/* were we issued a capability? */
1014 	if (info_caps) {
1015 		if (ceph_snap(inode) == CEPH_NOSNAP) {
1016 			ceph_add_cap(inode, session,
1017 				     le64_to_cpu(info->cap.cap_id),
1018 				     info_caps,
1019 				     le32_to_cpu(info->cap.wanted),
1020 				     le32_to_cpu(info->cap.seq),
1021 				     le32_to_cpu(info->cap.mseq),
1022 				     le64_to_cpu(info->cap.realm),
1023 				     info->cap.flags, &new_cap);
1024 
1025 			/* set dir completion flag? */
1026 			if (S_ISDIR(inode->i_mode) &&
1027 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
1028 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
1029 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1030 			    !__ceph_dir_is_complete(ci)) {
1031 				dout(" marking %p complete (empty)\n", inode);
1032 				i_size_write(inode, 0);
1033 				__ceph_dir_set_complete(ci,
1034 					atomic64_read(&ci->i_release_count),
1035 					atomic64_read(&ci->i_ordered_count));
1036 			}
1037 
1038 			wake = true;
1039 		} else {
1040 			dout(" %p got snap_caps %s\n", inode,
1041 			     ceph_cap_string(info_caps));
1042 			ci->i_snap_caps |= info_caps;
1043 		}
1044 	}
1045 
1046 	if (iinfo->inline_version > 0 &&
1047 	    iinfo->inline_version >= ci->i_inline_version) {
1048 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1049 		ci->i_inline_version = iinfo->inline_version;
1050 		if (ceph_has_inline_data(ci) &&
1051 		    (locked_page || (info_caps & cache_caps)))
1052 			fill_inline = true;
1053 	}
1054 
1055 	if (cap_fmode >= 0) {
1056 		if (!info_caps)
1057 			pr_warn("mds issued no caps on %llx.%llx\n",
1058 				ceph_vinop(inode));
1059 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1060 	}
1061 
1062 	spin_unlock(&ci->i_ceph_lock);
1063 
1064 	ceph_fscache_register_inode_cookie(inode);
1065 
1066 	if (fill_inline)
1067 		ceph_fill_inline_data(inode, locked_page,
1068 				      iinfo->inline_data, iinfo->inline_len);
1069 
1070 	if (wake)
1071 		wake_up_all(&ci->i_cap_wq);
1072 
1073 	/* queue truncate if we saw i_size decrease */
1074 	if (queue_trunc)
1075 		ceph_queue_vmtruncate(inode);
1076 
1077 	/* populate frag tree */
1078 	if (S_ISDIR(inode->i_mode))
1079 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1080 
1081 	/* update delegation info? */
1082 	if (dirinfo)
1083 		ceph_fill_dirfrag(inode, dirinfo);
1084 
1085 	err = 0;
1086 out:
1087 	if (new_cap)
1088 		ceph_put_cap(mdsc, new_cap);
1089 	ceph_buffer_put(old_blob);
1090 	ceph_buffer_put(xattr_blob);
1091 	ceph_put_string(pool_ns);
1092 	return err;
1093 }
1094 
1095 /*
1096  * caller should hold session s_mutex and dentry->d_lock.
1097  */
__update_dentry_lease(struct inode * dir,struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time,struct ceph_mds_session ** old_lease_session)1098 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1099 				  struct ceph_mds_reply_lease *lease,
1100 				  struct ceph_mds_session *session,
1101 				  unsigned long from_time,
1102 				  struct ceph_mds_session **old_lease_session)
1103 {
1104 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1105 	unsigned mask = le16_to_cpu(lease->mask);
1106 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1107 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1108 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1109 
1110 	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1111 	     dentry, duration, ttl);
1112 
1113 	/* only track leases on regular dentries */
1114 	if (ceph_snap(dir) != CEPH_NOSNAP)
1115 		return;
1116 
1117 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1118 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1119 	else
1120 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1121 
1122 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1123 	if (!(mask & CEPH_LEASE_VALID)) {
1124 		__ceph_dentry_dir_lease_touch(di);
1125 		return;
1126 	}
1127 
1128 	if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
1129 	    time_before(ttl, di->time))
1130 		return;  /* we already have a newer lease. */
1131 
1132 	if (di->lease_session && di->lease_session != session) {
1133 		*old_lease_session = di->lease_session;
1134 		di->lease_session = NULL;
1135 	}
1136 
1137 	if (!di->lease_session)
1138 		di->lease_session = ceph_get_mds_session(session);
1139 	di->lease_gen = atomic_read(&session->s_cap_gen);
1140 	di->lease_seq = le32_to_cpu(lease->seq);
1141 	di->lease_renew_after = half_ttl;
1142 	di->lease_renew_from = 0;
1143 	di->time = ttl;
1144 
1145 	__ceph_dentry_lease_touch(di);
1146 }
1147 
update_dentry_lease(struct inode * dir,struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time)1148 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1149 					struct ceph_mds_reply_lease *lease,
1150 					struct ceph_mds_session *session,
1151 					unsigned long from_time)
1152 {
1153 	struct ceph_mds_session *old_lease_session = NULL;
1154 	spin_lock(&dentry->d_lock);
1155 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1156 			      &old_lease_session);
1157 	spin_unlock(&dentry->d_lock);
1158 	ceph_put_mds_session(old_lease_session);
1159 }
1160 
1161 /*
1162  * update dentry lease without having parent inode locked
1163  */
update_dentry_lease_careful(struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time,char * dname,u32 dname_len,struct ceph_vino * pdvino,struct ceph_vino * ptvino)1164 static void update_dentry_lease_careful(struct dentry *dentry,
1165 					struct ceph_mds_reply_lease *lease,
1166 					struct ceph_mds_session *session,
1167 					unsigned long from_time,
1168 					char *dname, u32 dname_len,
1169 					struct ceph_vino *pdvino,
1170 					struct ceph_vino *ptvino)
1171 
1172 {
1173 	struct inode *dir;
1174 	struct ceph_mds_session *old_lease_session = NULL;
1175 
1176 	spin_lock(&dentry->d_lock);
1177 	/* make sure dentry's name matches target */
1178 	if (dentry->d_name.len != dname_len ||
1179 	    memcmp(dentry->d_name.name, dname, dname_len))
1180 		goto out_unlock;
1181 
1182 	dir = d_inode(dentry->d_parent);
1183 	/* make sure parent matches dvino */
1184 	if (!ceph_ino_compare(dir, pdvino))
1185 		goto out_unlock;
1186 
1187 	/* make sure dentry's inode matches target. NULL ptvino means that
1188 	 * we expect a negative dentry */
1189 	if (ptvino) {
1190 		if (d_really_is_negative(dentry))
1191 			goto out_unlock;
1192 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1193 			goto out_unlock;
1194 	} else {
1195 		if (d_really_is_positive(dentry))
1196 			goto out_unlock;
1197 	}
1198 
1199 	__update_dentry_lease(dir, dentry, lease, session,
1200 			      from_time, &old_lease_session);
1201 out_unlock:
1202 	spin_unlock(&dentry->d_lock);
1203 	ceph_put_mds_session(old_lease_session);
1204 }
1205 
1206 /*
1207  * splice a dentry to an inode.
1208  * caller must hold directory i_rwsem for this to be safe.
1209  */
splice_dentry(struct dentry ** pdn,struct inode * in)1210 static int splice_dentry(struct dentry **pdn, struct inode *in)
1211 {
1212 	struct dentry *dn = *pdn;
1213 	struct dentry *realdn;
1214 
1215 	BUG_ON(d_inode(dn));
1216 
1217 	if (S_ISDIR(in->i_mode)) {
1218 		/* If inode is directory, d_splice_alias() below will remove
1219 		 * 'realdn' from its origin parent. We need to ensure that
1220 		 * origin parent's readdir cache will not reference 'realdn'
1221 		 */
1222 		realdn = d_find_any_alias(in);
1223 		if (realdn) {
1224 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1225 			spin_lock(&realdn->d_lock);
1226 
1227 			realdn->d_op->d_prune(realdn);
1228 
1229 			di->time = jiffies;
1230 			di->lease_shared_gen = 0;
1231 			di->offset = 0;
1232 
1233 			spin_unlock(&realdn->d_lock);
1234 			dput(realdn);
1235 		}
1236 	}
1237 
1238 	/* dn must be unhashed */
1239 	if (!d_unhashed(dn))
1240 		d_drop(dn);
1241 	realdn = d_splice_alias(in, dn);
1242 	if (IS_ERR(realdn)) {
1243 		pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1244 		       PTR_ERR(realdn), dn, in, ceph_vinop(in));
1245 		return PTR_ERR(realdn);
1246 	}
1247 
1248 	if (realdn) {
1249 		dout("dn %p (%d) spliced with %p (%d) "
1250 		     "inode %p ino %llx.%llx\n",
1251 		     dn, d_count(dn),
1252 		     realdn, d_count(realdn),
1253 		     d_inode(realdn), ceph_vinop(d_inode(realdn)));
1254 		dput(dn);
1255 		*pdn = realdn;
1256 	} else {
1257 		BUG_ON(!ceph_dentry(dn));
1258 		dout("dn %p attached to %p ino %llx.%llx\n",
1259 		     dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1260 	}
1261 	return 0;
1262 }
1263 
1264 /*
1265  * Incorporate results into the local cache.  This is either just
1266  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1267  * after a lookup).
1268  *
1269  * A reply may contain
1270  *         a directory inode along with a dentry.
1271  *  and/or a target inode
1272  *
1273  * Called with snap_rwsem (read).
1274  */
ceph_fill_trace(struct super_block * sb,struct ceph_mds_request * req)1275 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1276 {
1277 	struct ceph_mds_session *session = req->r_session;
1278 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1279 	struct inode *in = NULL;
1280 	struct ceph_vino tvino, dvino;
1281 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1282 	int err = 0;
1283 
1284 	dout("fill_trace %p is_dentry %d is_target %d\n", req,
1285 	     rinfo->head->is_dentry, rinfo->head->is_target);
1286 
1287 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1288 		dout("fill_trace reply is empty!\n");
1289 		if (rinfo->head->result == 0 && req->r_parent)
1290 			ceph_invalidate_dir_request(req);
1291 		return 0;
1292 	}
1293 
1294 	if (rinfo->head->is_dentry) {
1295 		struct inode *dir = req->r_parent;
1296 
1297 		if (dir) {
1298 			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1299 					      rinfo->dirfrag, session, -1,
1300 					      &req->r_caps_reservation);
1301 			if (err < 0)
1302 				goto done;
1303 		} else {
1304 			WARN_ON_ONCE(1);
1305 		}
1306 
1307 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1308 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1309 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1310 			struct qstr dname;
1311 			struct dentry *dn, *parent;
1312 
1313 			BUG_ON(!rinfo->head->is_target);
1314 			BUG_ON(req->r_dentry);
1315 
1316 			parent = d_find_any_alias(dir);
1317 			BUG_ON(!parent);
1318 
1319 			dname.name = rinfo->dname;
1320 			dname.len = rinfo->dname_len;
1321 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1322 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1323 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1324 retry_lookup:
1325 			dn = d_lookup(parent, &dname);
1326 			dout("d_lookup on parent=%p name=%.*s got %p\n",
1327 			     parent, dname.len, dname.name, dn);
1328 
1329 			if (!dn) {
1330 				dn = d_alloc(parent, &dname);
1331 				dout("d_alloc %p '%.*s' = %p\n", parent,
1332 				     dname.len, dname.name, dn);
1333 				if (!dn) {
1334 					dput(parent);
1335 					err = -ENOMEM;
1336 					goto done;
1337 				}
1338 				err = 0;
1339 			} else if (d_really_is_positive(dn) &&
1340 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1341 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1342 				dout(" dn %p points to wrong inode %p\n",
1343 				     dn, d_inode(dn));
1344 				ceph_dir_clear_ordered(dir);
1345 				d_delete(dn);
1346 				dput(dn);
1347 				goto retry_lookup;
1348 			}
1349 
1350 			req->r_dentry = dn;
1351 			dput(parent);
1352 		}
1353 	}
1354 
1355 	if (rinfo->head->is_target) {
1356 		/* Should be filled in by handle_reply */
1357 		BUG_ON(!req->r_target_inode);
1358 
1359 		in = req->r_target_inode;
1360 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1361 				NULL, session,
1362 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1363 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1364 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1365 				&req->r_caps_reservation);
1366 		if (err < 0) {
1367 			pr_err("ceph_fill_inode badness %p %llx.%llx\n",
1368 				in, ceph_vinop(in));
1369 			req->r_target_inode = NULL;
1370 			if (in->i_state & I_NEW)
1371 				discard_new_inode(in);
1372 			else
1373 				iput(in);
1374 			goto done;
1375 		}
1376 		if (in->i_state & I_NEW)
1377 			unlock_new_inode(in);
1378 	}
1379 
1380 	/*
1381 	 * ignore null lease/binding on snapdir ENOENT, or else we
1382 	 * will have trouble splicing in the virtual snapdir later
1383 	 */
1384 	if (rinfo->head->is_dentry &&
1385             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1386 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1387 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1388 					       fsc->mount_options->snapdir_name,
1389 					       req->r_dentry->d_name.len))) {
1390 		/*
1391 		 * lookup link rename   : null -> possibly existing inode
1392 		 * mknod symlink mkdir  : null -> new inode
1393 		 * unlink               : linked -> null
1394 		 */
1395 		struct inode *dir = req->r_parent;
1396 		struct dentry *dn = req->r_dentry;
1397 		bool have_dir_cap, have_lease;
1398 
1399 		BUG_ON(!dn);
1400 		BUG_ON(!dir);
1401 		BUG_ON(d_inode(dn->d_parent) != dir);
1402 
1403 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1404 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1405 
1406 		BUG_ON(ceph_ino(dir) != dvino.ino);
1407 		BUG_ON(ceph_snap(dir) != dvino.snap);
1408 
1409 		/* do we have a lease on the whole dir? */
1410 		have_dir_cap =
1411 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1412 			 CEPH_CAP_FILE_SHARED);
1413 
1414 		/* do we have a dn lease? */
1415 		have_lease = have_dir_cap ||
1416 			le32_to_cpu(rinfo->dlease->duration_ms);
1417 		if (!have_lease)
1418 			dout("fill_trace  no dentry lease or dir cap\n");
1419 
1420 		/* rename? */
1421 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1422 			struct inode *olddir = req->r_old_dentry_dir;
1423 			BUG_ON(!olddir);
1424 
1425 			dout(" src %p '%pd' dst %p '%pd'\n",
1426 			     req->r_old_dentry,
1427 			     req->r_old_dentry,
1428 			     dn, dn);
1429 			dout("fill_trace doing d_move %p -> %p\n",
1430 			     req->r_old_dentry, dn);
1431 
1432 			/* d_move screws up sibling dentries' offsets */
1433 			ceph_dir_clear_ordered(dir);
1434 			ceph_dir_clear_ordered(olddir);
1435 
1436 			d_move(req->r_old_dentry, dn);
1437 			dout(" src %p '%pd' dst %p '%pd'\n",
1438 			     req->r_old_dentry,
1439 			     req->r_old_dentry,
1440 			     dn, dn);
1441 
1442 			/* ensure target dentry is invalidated, despite
1443 			   rehashing bug in vfs_rename_dir */
1444 			ceph_invalidate_dentry_lease(dn);
1445 
1446 			dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1447 			     ceph_dentry(req->r_old_dentry)->offset);
1448 
1449 			/* swap r_dentry and r_old_dentry in case that
1450 			 * splice_dentry() gets called later. This is safe
1451 			 * because no other place will use them */
1452 			req->r_dentry = req->r_old_dentry;
1453 			req->r_old_dentry = dn;
1454 			dn = req->r_dentry;
1455 		}
1456 
1457 		/* null dentry? */
1458 		if (!rinfo->head->is_target) {
1459 			dout("fill_trace null dentry\n");
1460 			if (d_really_is_positive(dn)) {
1461 				dout("d_delete %p\n", dn);
1462 				ceph_dir_clear_ordered(dir);
1463 				d_delete(dn);
1464 			} else if (have_lease) {
1465 				if (d_unhashed(dn))
1466 					d_add(dn, NULL);
1467 			}
1468 
1469 			if (!d_unhashed(dn) && have_lease)
1470 				update_dentry_lease(dir, dn,
1471 						    rinfo->dlease, session,
1472 						    req->r_request_started);
1473 			goto done;
1474 		}
1475 
1476 		/* attach proper inode */
1477 		if (d_really_is_negative(dn)) {
1478 			ceph_dir_clear_ordered(dir);
1479 			ihold(in);
1480 			err = splice_dentry(&req->r_dentry, in);
1481 			if (err < 0)
1482 				goto done;
1483 			dn = req->r_dentry;  /* may have spliced */
1484 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1485 			dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1486 			     dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1487 			     ceph_vinop(in));
1488 			d_invalidate(dn);
1489 			have_lease = false;
1490 		}
1491 
1492 		if (have_lease) {
1493 			update_dentry_lease(dir, dn,
1494 					    rinfo->dlease, session,
1495 					    req->r_request_started);
1496 		}
1497 		dout(" final dn %p\n", dn);
1498 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1499 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1500 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1501 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1502 		struct inode *dir = req->r_parent;
1503 
1504 		/* fill out a snapdir LOOKUPSNAP dentry */
1505 		BUG_ON(!dir);
1506 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1507 		BUG_ON(!req->r_dentry);
1508 		dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
1509 		ceph_dir_clear_ordered(dir);
1510 		ihold(in);
1511 		err = splice_dentry(&req->r_dentry, in);
1512 		if (err < 0)
1513 			goto done;
1514 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1515 		/* parent inode is not locked, be carefull */
1516 		struct ceph_vino *ptvino = NULL;
1517 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1518 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1519 		if (rinfo->head->is_target) {
1520 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1521 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1522 			ptvino = &tvino;
1523 		}
1524 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1525 					    session, req->r_request_started,
1526 					    rinfo->dname, rinfo->dname_len,
1527 					    &dvino, ptvino);
1528 	}
1529 done:
1530 	dout("fill_trace done err=%d\n", err);
1531 	return err;
1532 }
1533 
1534 /*
1535  * Prepopulate our cache with readdir results, leases, etc.
1536  */
readdir_prepopulate_inodes_only(struct ceph_mds_request * req,struct ceph_mds_session * session)1537 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1538 					   struct ceph_mds_session *session)
1539 {
1540 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1541 	int i, err = 0;
1542 
1543 	for (i = 0; i < rinfo->dir_nr; i++) {
1544 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1545 		struct ceph_vino vino;
1546 		struct inode *in;
1547 		int rc;
1548 
1549 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1550 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1551 
1552 		in = ceph_get_inode(req->r_dentry->d_sb, vino);
1553 		if (IS_ERR(in)) {
1554 			err = PTR_ERR(in);
1555 			dout("new_inode badness got %d\n", err);
1556 			continue;
1557 		}
1558 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1559 				     -1, &req->r_caps_reservation);
1560 		if (rc < 0) {
1561 			pr_err("ceph_fill_inode badness on %p got %d\n",
1562 			       in, rc);
1563 			err = rc;
1564 			if (in->i_state & I_NEW) {
1565 				ihold(in);
1566 				discard_new_inode(in);
1567 			}
1568 		} else if (in->i_state & I_NEW) {
1569 			unlock_new_inode(in);
1570 		}
1571 
1572 		iput(in);
1573 	}
1574 
1575 	return err;
1576 }
1577 
ceph_readdir_cache_release(struct ceph_readdir_cache_control * ctl)1578 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1579 {
1580 	if (ctl->page) {
1581 		kunmap(ctl->page);
1582 		put_page(ctl->page);
1583 		ctl->page = NULL;
1584 	}
1585 }
1586 
fill_readdir_cache(struct inode * dir,struct dentry * dn,struct ceph_readdir_cache_control * ctl,struct ceph_mds_request * req)1587 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1588 			      struct ceph_readdir_cache_control *ctl,
1589 			      struct ceph_mds_request *req)
1590 {
1591 	struct ceph_inode_info *ci = ceph_inode(dir);
1592 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1593 	unsigned idx = ctl->index % nsize;
1594 	pgoff_t pgoff = ctl->index / nsize;
1595 
1596 	if (!ctl->page || pgoff != page_index(ctl->page)) {
1597 		ceph_readdir_cache_release(ctl);
1598 		if (idx == 0)
1599 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
1600 		else
1601 			ctl->page = find_lock_page(&dir->i_data, pgoff);
1602 		if (!ctl->page) {
1603 			ctl->index = -1;
1604 			return idx == 0 ? -ENOMEM : 0;
1605 		}
1606 		/* reading/filling the cache are serialized by
1607 		 * i_rwsem, no need to use page lock */
1608 		unlock_page(ctl->page);
1609 		ctl->dentries = kmap(ctl->page);
1610 		if (idx == 0)
1611 			memset(ctl->dentries, 0, PAGE_SIZE);
1612 	}
1613 
1614 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1615 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1616 		dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1617 		ctl->dentries[idx] = dn;
1618 		ctl->index++;
1619 	} else {
1620 		dout("disable readdir cache\n");
1621 		ctl->index = -1;
1622 	}
1623 	return 0;
1624 }
1625 
ceph_readdir_prepopulate(struct ceph_mds_request * req,struct ceph_mds_session * session)1626 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1627 			     struct ceph_mds_session *session)
1628 {
1629 	struct dentry *parent = req->r_dentry;
1630 	struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1631 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1632 	struct qstr dname;
1633 	struct dentry *dn;
1634 	struct inode *in;
1635 	int err = 0, skipped = 0, ret, i;
1636 	u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1637 	u32 last_hash = 0;
1638 	u32 fpos_offset;
1639 	struct ceph_readdir_cache_control cache_ctl = {};
1640 
1641 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1642 		return readdir_prepopulate_inodes_only(req, session);
1643 
1644 	if (rinfo->hash_order) {
1645 		if (req->r_path2) {
1646 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1647 						  req->r_path2,
1648 						  strlen(req->r_path2));
1649 			last_hash = ceph_frag_value(last_hash);
1650 		} else if (rinfo->offset_hash) {
1651 			/* mds understands offset_hash */
1652 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1653 			last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1654 		}
1655 	}
1656 
1657 	if (rinfo->dir_dir &&
1658 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1659 		dout("readdir_prepopulate got new frag %x -> %x\n",
1660 		     frag, le32_to_cpu(rinfo->dir_dir->frag));
1661 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1662 		if (!rinfo->hash_order)
1663 			req->r_readdir_offset = 2;
1664 	}
1665 
1666 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1667 		dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1668 		     rinfo->dir_nr, parent);
1669 	} else {
1670 		dout("readdir_prepopulate %d items under dn %p\n",
1671 		     rinfo->dir_nr, parent);
1672 		if (rinfo->dir_dir)
1673 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1674 
1675 		if (ceph_frag_is_leftmost(frag) &&
1676 		    req->r_readdir_offset == 2 &&
1677 		    !(rinfo->hash_order && last_hash)) {
1678 			/* note dir version at start of readdir so we can
1679 			 * tell if any dentries get dropped */
1680 			req->r_dir_release_cnt =
1681 				atomic64_read(&ci->i_release_count);
1682 			req->r_dir_ordered_cnt =
1683 				atomic64_read(&ci->i_ordered_count);
1684 			req->r_readdir_cache_idx = 0;
1685 		}
1686 	}
1687 
1688 	cache_ctl.index = req->r_readdir_cache_idx;
1689 	fpos_offset = req->r_readdir_offset;
1690 
1691 	/* FIXME: release caps/leases if error occurs */
1692 	for (i = 0; i < rinfo->dir_nr; i++) {
1693 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1694 		struct ceph_vino tvino;
1695 
1696 		dname.name = rde->name;
1697 		dname.len = rde->name_len;
1698 		dname.hash = full_name_hash(parent, dname.name, dname.len);
1699 
1700 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
1701 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1702 
1703 		if (rinfo->hash_order) {
1704 			u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1705 						 rde->name, rde->name_len);
1706 			hash = ceph_frag_value(hash);
1707 			if (hash != last_hash)
1708 				fpos_offset = 2;
1709 			last_hash = hash;
1710 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1711 		} else {
1712 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1713 		}
1714 
1715 retry_lookup:
1716 		dn = d_lookup(parent, &dname);
1717 		dout("d_lookup on parent=%p name=%.*s got %p\n",
1718 		     parent, dname.len, dname.name, dn);
1719 
1720 		if (!dn) {
1721 			dn = d_alloc(parent, &dname);
1722 			dout("d_alloc %p '%.*s' = %p\n", parent,
1723 			     dname.len, dname.name, dn);
1724 			if (!dn) {
1725 				dout("d_alloc badness\n");
1726 				err = -ENOMEM;
1727 				goto out;
1728 			}
1729 		} else if (d_really_is_positive(dn) &&
1730 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
1731 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
1732 			struct ceph_dentry_info *di = ceph_dentry(dn);
1733 			dout(" dn %p points to wrong inode %p\n",
1734 			     dn, d_inode(dn));
1735 
1736 			spin_lock(&dn->d_lock);
1737 			if (di->offset > 0 &&
1738 			    di->lease_shared_gen ==
1739 			    atomic_read(&ci->i_shared_gen)) {
1740 				__ceph_dir_clear_ordered(ci);
1741 				di->offset = 0;
1742 			}
1743 			spin_unlock(&dn->d_lock);
1744 
1745 			d_delete(dn);
1746 			dput(dn);
1747 			goto retry_lookup;
1748 		}
1749 
1750 		/* inode */
1751 		if (d_really_is_positive(dn)) {
1752 			in = d_inode(dn);
1753 		} else {
1754 			in = ceph_get_inode(parent->d_sb, tvino);
1755 			if (IS_ERR(in)) {
1756 				dout("new_inode badness\n");
1757 				d_drop(dn);
1758 				dput(dn);
1759 				err = PTR_ERR(in);
1760 				goto out;
1761 			}
1762 		}
1763 
1764 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1765 				      -1, &req->r_caps_reservation);
1766 		if (ret < 0) {
1767 			pr_err("ceph_fill_inode badness on %p\n", in);
1768 			if (d_really_is_negative(dn)) {
1769 				if (in->i_state & I_NEW) {
1770 					ihold(in);
1771 					discard_new_inode(in);
1772 				}
1773 				iput(in);
1774 			}
1775 			d_drop(dn);
1776 			err = ret;
1777 			goto next_item;
1778 		}
1779 		if (in->i_state & I_NEW)
1780 			unlock_new_inode(in);
1781 
1782 		if (d_really_is_negative(dn)) {
1783 			if (ceph_security_xattr_deadlock(in)) {
1784 				dout(" skip splicing dn %p to inode %p"
1785 				     " (security xattr deadlock)\n", dn, in);
1786 				iput(in);
1787 				skipped++;
1788 				goto next_item;
1789 			}
1790 
1791 			err = splice_dentry(&dn, in);
1792 			if (err < 0)
1793 				goto next_item;
1794 		}
1795 
1796 		ceph_dentry(dn)->offset = rde->offset;
1797 
1798 		update_dentry_lease(d_inode(parent), dn,
1799 				    rde->lease, req->r_session,
1800 				    req->r_request_started);
1801 
1802 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1803 			ret = fill_readdir_cache(d_inode(parent), dn,
1804 						 &cache_ctl, req);
1805 			if (ret < 0)
1806 				err = ret;
1807 		}
1808 next_item:
1809 		dput(dn);
1810 	}
1811 out:
1812 	if (err == 0 && skipped == 0) {
1813 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
1814 		req->r_readdir_cache_idx = cache_ctl.index;
1815 	}
1816 	ceph_readdir_cache_release(&cache_ctl);
1817 	dout("readdir_prepopulate done\n");
1818 	return err;
1819 }
1820 
ceph_inode_set_size(struct inode * inode,loff_t size)1821 bool ceph_inode_set_size(struct inode *inode, loff_t size)
1822 {
1823 	struct ceph_inode_info *ci = ceph_inode(inode);
1824 	bool ret;
1825 
1826 	spin_lock(&ci->i_ceph_lock);
1827 	dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
1828 	i_size_write(inode, size);
1829 	ceph_fscache_update(inode);
1830 	inode->i_blocks = calc_inode_blocks(size);
1831 
1832 	ret = __ceph_should_report_size(ci);
1833 
1834 	spin_unlock(&ci->i_ceph_lock);
1835 
1836 	return ret;
1837 }
1838 
ceph_queue_inode_work(struct inode * inode,int work_bit)1839 void ceph_queue_inode_work(struct inode *inode, int work_bit)
1840 {
1841 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1842 	struct ceph_inode_info *ci = ceph_inode(inode);
1843 	set_bit(work_bit, &ci->i_work_mask);
1844 
1845 	ihold(inode);
1846 	if (queue_work(fsc->inode_wq, &ci->i_work)) {
1847 		dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
1848 	} else {
1849 		dout("queue_inode_work %p already queued, mask=%lx\n",
1850 		     inode, ci->i_work_mask);
1851 		iput(inode);
1852 	}
1853 }
1854 
ceph_do_invalidate_pages(struct inode * inode)1855 static void ceph_do_invalidate_pages(struct inode *inode)
1856 {
1857 	struct ceph_inode_info *ci = ceph_inode(inode);
1858 	u32 orig_gen;
1859 	int check = 0;
1860 
1861 	ceph_fscache_invalidate(inode, false);
1862 
1863 	mutex_lock(&ci->i_truncate_mutex);
1864 
1865 	if (ceph_inode_is_shutdown(inode)) {
1866 		pr_warn_ratelimited("%s: inode %llx.%llx is shut down\n",
1867 				    __func__, ceph_vinop(inode));
1868 		mapping_set_error(inode->i_mapping, -EIO);
1869 		truncate_pagecache(inode, 0);
1870 		mutex_unlock(&ci->i_truncate_mutex);
1871 		goto out;
1872 	}
1873 
1874 	spin_lock(&ci->i_ceph_lock);
1875 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
1876 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
1877 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1878 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1879 			check = 1;
1880 		spin_unlock(&ci->i_ceph_lock);
1881 		mutex_unlock(&ci->i_truncate_mutex);
1882 		goto out;
1883 	}
1884 	orig_gen = ci->i_rdcache_gen;
1885 	spin_unlock(&ci->i_ceph_lock);
1886 
1887 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1888 		pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
1889 		       ceph_vinop(inode));
1890 	}
1891 
1892 	spin_lock(&ci->i_ceph_lock);
1893 	if (orig_gen == ci->i_rdcache_gen &&
1894 	    orig_gen == ci->i_rdcache_revoking) {
1895 		dout("invalidate_pages %p gen %d successful\n", inode,
1896 		     ci->i_rdcache_gen);
1897 		ci->i_rdcache_revoking--;
1898 		check = 1;
1899 	} else {
1900 		dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1901 		     inode, orig_gen, ci->i_rdcache_gen,
1902 		     ci->i_rdcache_revoking);
1903 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1904 			check = 1;
1905 	}
1906 	spin_unlock(&ci->i_ceph_lock);
1907 	mutex_unlock(&ci->i_truncate_mutex);
1908 out:
1909 	if (check)
1910 		ceph_check_caps(ci, 0, NULL);
1911 }
1912 
1913 /*
1914  * Make sure any pending truncation is applied before doing anything
1915  * that may depend on it.
1916  */
__ceph_do_pending_vmtruncate(struct inode * inode)1917 void __ceph_do_pending_vmtruncate(struct inode *inode)
1918 {
1919 	struct ceph_inode_info *ci = ceph_inode(inode);
1920 	u64 to;
1921 	int wrbuffer_refs, finish = 0;
1922 
1923 	mutex_lock(&ci->i_truncate_mutex);
1924 retry:
1925 	spin_lock(&ci->i_ceph_lock);
1926 	if (ci->i_truncate_pending == 0) {
1927 		dout("__do_pending_vmtruncate %p none pending\n", inode);
1928 		spin_unlock(&ci->i_ceph_lock);
1929 		mutex_unlock(&ci->i_truncate_mutex);
1930 		return;
1931 	}
1932 
1933 	/*
1934 	 * make sure any dirty snapped pages are flushed before we
1935 	 * possibly truncate them.. so write AND block!
1936 	 */
1937 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1938 		spin_unlock(&ci->i_ceph_lock);
1939 		dout("__do_pending_vmtruncate %p flushing snaps first\n",
1940 		     inode);
1941 		filemap_write_and_wait_range(&inode->i_data, 0,
1942 					     inode->i_sb->s_maxbytes);
1943 		goto retry;
1944 	}
1945 
1946 	/* there should be no reader or writer */
1947 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1948 
1949 	to = ci->i_truncate_size;
1950 	wrbuffer_refs = ci->i_wrbuffer_ref;
1951 	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1952 	     ci->i_truncate_pending, to);
1953 	spin_unlock(&ci->i_ceph_lock);
1954 
1955 	ceph_fscache_resize(inode, to);
1956 	truncate_pagecache(inode, to);
1957 
1958 	spin_lock(&ci->i_ceph_lock);
1959 	if (to == ci->i_truncate_size) {
1960 		ci->i_truncate_pending = 0;
1961 		finish = 1;
1962 	}
1963 	spin_unlock(&ci->i_ceph_lock);
1964 	if (!finish)
1965 		goto retry;
1966 
1967 	mutex_unlock(&ci->i_truncate_mutex);
1968 
1969 	if (wrbuffer_refs == 0)
1970 		ceph_check_caps(ci, 0, NULL);
1971 
1972 	wake_up_all(&ci->i_cap_wq);
1973 }
1974 
ceph_inode_work(struct work_struct * work)1975 static void ceph_inode_work(struct work_struct *work)
1976 {
1977 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1978 						 i_work);
1979 	struct inode *inode = &ci->netfs.inode;
1980 
1981 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
1982 		dout("writeback %p\n", inode);
1983 		filemap_fdatawrite(&inode->i_data);
1984 	}
1985 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
1986 		ceph_do_invalidate_pages(inode);
1987 
1988 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
1989 		__ceph_do_pending_vmtruncate(inode);
1990 
1991 	if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
1992 		ceph_check_caps(ci, 0, NULL);
1993 
1994 	if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
1995 		ceph_flush_snaps(ci, NULL);
1996 
1997 	iput(inode);
1998 }
1999 
2000 /*
2001  * symlinks
2002  */
2003 static const struct inode_operations ceph_symlink_iops = {
2004 	.get_link = simple_get_link,
2005 	.setattr = ceph_setattr,
2006 	.getattr = ceph_getattr,
2007 	.listxattr = ceph_listxattr,
2008 };
2009 
__ceph_setattr(struct inode * inode,struct iattr * attr)2010 int __ceph_setattr(struct inode *inode, struct iattr *attr)
2011 {
2012 	struct ceph_inode_info *ci = ceph_inode(inode);
2013 	unsigned int ia_valid = attr->ia_valid;
2014 	struct ceph_mds_request *req;
2015 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2016 	struct ceph_cap_flush *prealloc_cf;
2017 	int issued;
2018 	int release = 0, dirtied = 0;
2019 	int mask = 0;
2020 	int err = 0;
2021 	int inode_dirty_flags = 0;
2022 	bool lock_snap_rwsem = false;
2023 
2024 	prealloc_cf = ceph_alloc_cap_flush();
2025 	if (!prealloc_cf)
2026 		return -ENOMEM;
2027 
2028 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2029 				       USE_AUTH_MDS);
2030 	if (IS_ERR(req)) {
2031 		ceph_free_cap_flush(prealloc_cf);
2032 		return PTR_ERR(req);
2033 	}
2034 
2035 	spin_lock(&ci->i_ceph_lock);
2036 	issued = __ceph_caps_issued(ci, NULL);
2037 
2038 	if (!ci->i_head_snapc &&
2039 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2040 		lock_snap_rwsem = true;
2041 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2042 			spin_unlock(&ci->i_ceph_lock);
2043 			down_read(&mdsc->snap_rwsem);
2044 			spin_lock(&ci->i_ceph_lock);
2045 			issued = __ceph_caps_issued(ci, NULL);
2046 		}
2047 	}
2048 
2049 	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
2050 
2051 	if (ia_valid & ATTR_UID) {
2052 		dout("setattr %p uid %d -> %d\n", inode,
2053 		     from_kuid(&init_user_ns, inode->i_uid),
2054 		     from_kuid(&init_user_ns, attr->ia_uid));
2055 		if (issued & CEPH_CAP_AUTH_EXCL) {
2056 			inode->i_uid = attr->ia_uid;
2057 			dirtied |= CEPH_CAP_AUTH_EXCL;
2058 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2059 			   !uid_eq(attr->ia_uid, inode->i_uid)) {
2060 			req->r_args.setattr.uid = cpu_to_le32(
2061 				from_kuid(&init_user_ns, attr->ia_uid));
2062 			mask |= CEPH_SETATTR_UID;
2063 			release |= CEPH_CAP_AUTH_SHARED;
2064 		}
2065 	}
2066 	if (ia_valid & ATTR_GID) {
2067 		dout("setattr %p gid %d -> %d\n", inode,
2068 		     from_kgid(&init_user_ns, inode->i_gid),
2069 		     from_kgid(&init_user_ns, attr->ia_gid));
2070 		if (issued & CEPH_CAP_AUTH_EXCL) {
2071 			inode->i_gid = attr->ia_gid;
2072 			dirtied |= CEPH_CAP_AUTH_EXCL;
2073 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2074 			   !gid_eq(attr->ia_gid, inode->i_gid)) {
2075 			req->r_args.setattr.gid = cpu_to_le32(
2076 				from_kgid(&init_user_ns, attr->ia_gid));
2077 			mask |= CEPH_SETATTR_GID;
2078 			release |= CEPH_CAP_AUTH_SHARED;
2079 		}
2080 	}
2081 	if (ia_valid & ATTR_MODE) {
2082 		dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
2083 		     attr->ia_mode);
2084 		if (issued & CEPH_CAP_AUTH_EXCL) {
2085 			inode->i_mode = attr->ia_mode;
2086 			dirtied |= CEPH_CAP_AUTH_EXCL;
2087 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2088 			   attr->ia_mode != inode->i_mode) {
2089 			inode->i_mode = attr->ia_mode;
2090 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2091 			mask |= CEPH_SETATTR_MODE;
2092 			release |= CEPH_CAP_AUTH_SHARED;
2093 		}
2094 	}
2095 
2096 	if (ia_valid & ATTR_ATIME) {
2097 		dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
2098 		     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
2099 		     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2100 		if (issued & CEPH_CAP_FILE_EXCL) {
2101 			ci->i_time_warp_seq++;
2102 			inode->i_atime = attr->ia_atime;
2103 			dirtied |= CEPH_CAP_FILE_EXCL;
2104 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2105 			   timespec64_compare(&inode->i_atime,
2106 					    &attr->ia_atime) < 0) {
2107 			inode->i_atime = attr->ia_atime;
2108 			dirtied |= CEPH_CAP_FILE_WR;
2109 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2110 			   !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
2111 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2112 					       &attr->ia_atime);
2113 			mask |= CEPH_SETATTR_ATIME;
2114 			release |= CEPH_CAP_FILE_SHARED |
2115 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2116 		}
2117 	}
2118 	if (ia_valid & ATTR_SIZE) {
2119 		loff_t isize = i_size_read(inode);
2120 
2121 		dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
2122 		if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
2123 			if (attr->ia_size > isize) {
2124 				i_size_write(inode, attr->ia_size);
2125 				inode->i_blocks = calc_inode_blocks(attr->ia_size);
2126 				ci->i_reported_size = attr->ia_size;
2127 				dirtied |= CEPH_CAP_FILE_EXCL;
2128 				ia_valid |= ATTR_MTIME;
2129 			}
2130 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2131 			   attr->ia_size != isize) {
2132 			req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2133 			req->r_args.setattr.old_size = cpu_to_le64(isize);
2134 			mask |= CEPH_SETATTR_SIZE;
2135 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2136 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2137 		}
2138 	}
2139 	if (ia_valid & ATTR_MTIME) {
2140 		dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
2141 		     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
2142 		     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2143 		if (issued & CEPH_CAP_FILE_EXCL) {
2144 			ci->i_time_warp_seq++;
2145 			inode->i_mtime = attr->ia_mtime;
2146 			dirtied |= CEPH_CAP_FILE_EXCL;
2147 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2148 			   timespec64_compare(&inode->i_mtime,
2149 					    &attr->ia_mtime) < 0) {
2150 			inode->i_mtime = attr->ia_mtime;
2151 			dirtied |= CEPH_CAP_FILE_WR;
2152 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2153 			   !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
2154 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2155 					       &attr->ia_mtime);
2156 			mask |= CEPH_SETATTR_MTIME;
2157 			release |= CEPH_CAP_FILE_SHARED |
2158 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2159 		}
2160 	}
2161 
2162 	/* these do nothing */
2163 	if (ia_valid & ATTR_CTIME) {
2164 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2165 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2166 		dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
2167 		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2168 		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2169 		     only ? "ctime only" : "ignored");
2170 		if (only) {
2171 			/*
2172 			 * if kernel wants to dirty ctime but nothing else,
2173 			 * we need to choose a cap to dirty under, or do
2174 			 * a almost-no-op setattr
2175 			 */
2176 			if (issued & CEPH_CAP_AUTH_EXCL)
2177 				dirtied |= CEPH_CAP_AUTH_EXCL;
2178 			else if (issued & CEPH_CAP_FILE_EXCL)
2179 				dirtied |= CEPH_CAP_FILE_EXCL;
2180 			else if (issued & CEPH_CAP_XATTR_EXCL)
2181 				dirtied |= CEPH_CAP_XATTR_EXCL;
2182 			else
2183 				mask |= CEPH_SETATTR_CTIME;
2184 		}
2185 	}
2186 	if (ia_valid & ATTR_FILE)
2187 		dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2188 
2189 	if (dirtied) {
2190 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2191 							   &prealloc_cf);
2192 		inode->i_ctime = attr->ia_ctime;
2193 		inode_inc_iversion_raw(inode);
2194 	}
2195 
2196 	release &= issued;
2197 	spin_unlock(&ci->i_ceph_lock);
2198 	if (lock_snap_rwsem)
2199 		up_read(&mdsc->snap_rwsem);
2200 
2201 	if (inode_dirty_flags)
2202 		__mark_inode_dirty(inode, inode_dirty_flags);
2203 
2204 	if (mask) {
2205 		req->r_inode = inode;
2206 		ihold(inode);
2207 		req->r_inode_drop = release;
2208 		req->r_args.setattr.mask = cpu_to_le32(mask);
2209 		req->r_num_caps = 1;
2210 		req->r_stamp = attr->ia_ctime;
2211 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2212 	}
2213 	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2214 	     ceph_cap_string(dirtied), mask);
2215 
2216 	ceph_mdsc_put_request(req);
2217 	ceph_free_cap_flush(prealloc_cf);
2218 
2219 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2220 		__ceph_do_pending_vmtruncate(inode);
2221 
2222 	return err;
2223 }
2224 
2225 /*
2226  * setattr
2227  */
ceph_setattr(struct user_namespace * mnt_userns,struct dentry * dentry,struct iattr * attr)2228 int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
2229 		 struct iattr *attr)
2230 {
2231 	struct inode *inode = d_inode(dentry);
2232 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2233 	int err;
2234 
2235 	if (ceph_snap(inode) != CEPH_NOSNAP)
2236 		return -EROFS;
2237 
2238 	if (ceph_inode_is_shutdown(inode))
2239 		return -ESTALE;
2240 
2241 	err = setattr_prepare(&init_user_ns, dentry, attr);
2242 	if (err != 0)
2243 		return err;
2244 
2245 	if ((attr->ia_valid & ATTR_SIZE) &&
2246 	    attr->ia_size > max(i_size_read(inode), fsc->max_file_size))
2247 		return -EFBIG;
2248 
2249 	if ((attr->ia_valid & ATTR_SIZE) &&
2250 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2251 		return -EDQUOT;
2252 
2253 	err = __ceph_setattr(inode, attr);
2254 
2255 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2256 		err = posix_acl_chmod(&init_user_ns, inode, attr->ia_mode);
2257 
2258 	return err;
2259 }
2260 
ceph_try_to_choose_auth_mds(struct inode * inode,int mask)2261 int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
2262 {
2263 	int issued = ceph_caps_issued(ceph_inode(inode));
2264 
2265 	/*
2266 	 * If any 'x' caps is issued we can just choose the auth MDS
2267 	 * instead of the random replica MDSes. Because only when the
2268 	 * Locker is in LOCK_EXEC state will the loner client could
2269 	 * get the 'x' caps. And if we send the getattr requests to
2270 	 * any replica MDS it must auth pin and tries to rdlock from
2271 	 * the auth MDS, and then the auth MDS need to do the Locker
2272 	 * state transition to LOCK_SYNC. And after that the lock state
2273 	 * will change back.
2274 	 *
2275 	 * This cost much when doing the Locker state transition and
2276 	 * usually will need to revoke caps from clients.
2277 	 *
2278 	 * And for the 'Xs' caps for getxattr we will also choose the
2279 	 * auth MDS, because the MDS side code is buggy due to setxattr
2280 	 * won't notify the replica MDSes when the values changed and
2281 	 * the replica MDS will return the old values. Though we will
2282 	 * fix it in MDS code, but this still makes sense for old ceph.
2283 	 */
2284 	if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
2285 	    || (mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR)))
2286 		return USE_AUTH_MDS;
2287 	else
2288 		return USE_ANY_MDS;
2289 }
2290 
2291 /*
2292  * Verify that we have a lease on the given mask.  If not,
2293  * do a getattr against an mds.
2294  */
__ceph_do_getattr(struct inode * inode,struct page * locked_page,int mask,bool force)2295 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2296 		      int mask, bool force)
2297 {
2298 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2299 	struct ceph_mds_client *mdsc = fsc->mdsc;
2300 	struct ceph_mds_request *req;
2301 	int mode;
2302 	int err;
2303 
2304 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2305 		dout("do_getattr inode %p SNAPDIR\n", inode);
2306 		return 0;
2307 	}
2308 
2309 	dout("do_getattr inode %p mask %s mode 0%o\n",
2310 	     inode, ceph_cap_string(mask), inode->i_mode);
2311 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2312 			return 0;
2313 
2314 	mode = ceph_try_to_choose_auth_mds(inode, mask);
2315 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2316 	if (IS_ERR(req))
2317 		return PTR_ERR(req);
2318 	req->r_inode = inode;
2319 	ihold(inode);
2320 	req->r_num_caps = 1;
2321 	req->r_args.getattr.mask = cpu_to_le32(mask);
2322 	req->r_locked_page = locked_page;
2323 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2324 	if (locked_page && err == 0) {
2325 		u64 inline_version = req->r_reply_info.targeti.inline_version;
2326 		if (inline_version == 0) {
2327 			/* the reply is supposed to contain inline data */
2328 			err = -EINVAL;
2329 		} else if (inline_version == CEPH_INLINE_NONE ||
2330 			   inline_version == 1) {
2331 			err = -ENODATA;
2332 		} else {
2333 			err = req->r_reply_info.targeti.inline_len;
2334 		}
2335 	}
2336 	ceph_mdsc_put_request(req);
2337 	dout("do_getattr result=%d\n", err);
2338 	return err;
2339 }
2340 
ceph_do_getvxattr(struct inode * inode,const char * name,void * value,size_t size)2341 int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
2342 		      size_t size)
2343 {
2344 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2345 	struct ceph_mds_client *mdsc = fsc->mdsc;
2346 	struct ceph_mds_request *req;
2347 	int mode = USE_AUTH_MDS;
2348 	int err;
2349 	char *xattr_value;
2350 	size_t xattr_value_len;
2351 
2352 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode);
2353 	if (IS_ERR(req)) {
2354 		err = -ENOMEM;
2355 		goto out;
2356 	}
2357 
2358 	req->r_feature_needed = CEPHFS_FEATURE_OP_GETVXATTR;
2359 	req->r_path2 = kstrdup(name, GFP_NOFS);
2360 	if (!req->r_path2) {
2361 		err = -ENOMEM;
2362 		goto put;
2363 	}
2364 
2365 	ihold(inode);
2366 	req->r_inode = inode;
2367 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2368 	if (err < 0)
2369 		goto put;
2370 
2371 	xattr_value = req->r_reply_info.xattr_info.xattr_value;
2372 	xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
2373 
2374 	dout("do_getvxattr xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
2375 
2376 	err = (int)xattr_value_len;
2377 	if (size == 0)
2378 		goto put;
2379 
2380 	if (xattr_value_len > size) {
2381 		err = -ERANGE;
2382 		goto put;
2383 	}
2384 
2385 	memcpy(value, xattr_value, xattr_value_len);
2386 put:
2387 	ceph_mdsc_put_request(req);
2388 out:
2389 	dout("do_getvxattr result=%d\n", err);
2390 	return err;
2391 }
2392 
2393 
2394 /*
2395  * Check inode permissions.  We verify we have a valid value for
2396  * the AUTH cap, then call the generic handler.
2397  */
ceph_permission(struct user_namespace * mnt_userns,struct inode * inode,int mask)2398 int ceph_permission(struct user_namespace *mnt_userns, struct inode *inode,
2399 		    int mask)
2400 {
2401 	int err;
2402 
2403 	if (mask & MAY_NOT_BLOCK)
2404 		return -ECHILD;
2405 
2406 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2407 
2408 	if (!err)
2409 		err = generic_permission(&init_user_ns, inode, mask);
2410 	return err;
2411 }
2412 
2413 /* Craft a mask of needed caps given a set of requested statx attrs. */
statx_to_caps(u32 want,umode_t mode)2414 static int statx_to_caps(u32 want, umode_t mode)
2415 {
2416 	int mask = 0;
2417 
2418 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME))
2419 		mask |= CEPH_CAP_AUTH_SHARED;
2420 
2421 	if (want & (STATX_NLINK|STATX_CTIME)) {
2422 		/*
2423 		 * The link count for directories depends on inode->i_subdirs,
2424 		 * and that is only updated when Fs caps are held.
2425 		 */
2426 		if (S_ISDIR(mode))
2427 			mask |= CEPH_CAP_FILE_SHARED;
2428 		else
2429 			mask |= CEPH_CAP_LINK_SHARED;
2430 	}
2431 
2432 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|
2433 		    STATX_BLOCKS))
2434 		mask |= CEPH_CAP_FILE_SHARED;
2435 
2436 	if (want & (STATX_CTIME))
2437 		mask |= CEPH_CAP_XATTR_SHARED;
2438 
2439 	return mask;
2440 }
2441 
2442 /*
2443  * Get all the attributes. If we have sufficient caps for the requested attrs,
2444  * then we can avoid talking to the MDS at all.
2445  */
ceph_getattr(struct user_namespace * mnt_userns,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)2446 int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
2447 		 struct kstat *stat, u32 request_mask, unsigned int flags)
2448 {
2449 	struct inode *inode = d_inode(path->dentry);
2450 	struct super_block *sb = inode->i_sb;
2451 	struct ceph_inode_info *ci = ceph_inode(inode);
2452 	u32 valid_mask = STATX_BASIC_STATS;
2453 	int err = 0;
2454 
2455 	if (ceph_inode_is_shutdown(inode))
2456 		return -ESTALE;
2457 
2458 	/* Skip the getattr altogether if we're asked not to sync */
2459 	if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
2460 		err = ceph_do_getattr(inode,
2461 				statx_to_caps(request_mask, inode->i_mode),
2462 				flags & AT_STATX_FORCE_SYNC);
2463 		if (err)
2464 			return err;
2465 	}
2466 
2467 	generic_fillattr(&init_user_ns, inode, stat);
2468 	stat->ino = ceph_present_inode(inode);
2469 
2470 	/*
2471 	 * btime on newly-allocated inodes is 0, so if this is still set to
2472 	 * that, then assume that it's not valid.
2473 	 */
2474 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
2475 		stat->btime = ci->i_btime;
2476 		valid_mask |= STATX_BTIME;
2477 	}
2478 
2479 	if (ceph_snap(inode) == CEPH_NOSNAP)
2480 		stat->dev = sb->s_dev;
2481 	else
2482 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
2483 
2484 	if (S_ISDIR(inode->i_mode)) {
2485 		if (ceph_test_mount_opt(ceph_sb_to_client(sb), RBYTES)) {
2486 			stat->size = ci->i_rbytes;
2487 		} else if (ceph_snap(inode) == CEPH_SNAPDIR) {
2488 			struct ceph_inode_info *pci;
2489 			struct ceph_snap_realm *realm;
2490 			struct inode *parent;
2491 
2492 			parent = ceph_lookup_inode(sb, ceph_ino(inode));
2493 			if (IS_ERR(parent))
2494 				return PTR_ERR(parent);
2495 
2496 			pci = ceph_inode(parent);
2497 			spin_lock(&pci->i_ceph_lock);
2498 			realm = pci->i_snap_realm;
2499 			if (realm)
2500 				stat->size = realm->num_snaps;
2501 			else
2502 				stat->size = 0;
2503 			spin_unlock(&pci->i_ceph_lock);
2504 			iput(parent);
2505 		} else {
2506 			stat->size = ci->i_files + ci->i_subdirs;
2507 		}
2508 		stat->blocks = 0;
2509 		stat->blksize = 65536;
2510 		/*
2511 		 * Some applications rely on the number of st_nlink
2512 		 * value on directories to be either 0 (if unlinked)
2513 		 * or 2 + number of subdirectories.
2514 		 */
2515 		if (stat->nlink == 1)
2516 			/* '.' + '..' + subdirs */
2517 			stat->nlink = 1 + 1 + ci->i_subdirs;
2518 	}
2519 
2520 	stat->result_mask = request_mask & valid_mask;
2521 	return err;
2522 }
2523 
ceph_inode_shutdown(struct inode * inode)2524 void ceph_inode_shutdown(struct inode *inode)
2525 {
2526 	struct ceph_inode_info *ci = ceph_inode(inode);
2527 	struct rb_node *p;
2528 	int iputs = 0;
2529 	bool invalidate = false;
2530 
2531 	spin_lock(&ci->i_ceph_lock);
2532 	ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
2533 	p = rb_first(&ci->i_caps);
2534 	while (p) {
2535 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
2536 
2537 		p = rb_next(p);
2538 		iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
2539 	}
2540 	spin_unlock(&ci->i_ceph_lock);
2541 
2542 	if (invalidate)
2543 		ceph_queue_invalidate(inode);
2544 	while (iputs--)
2545 		iput(inode);
2546 }
2547