• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21 #include <linux/ceph/decode.h>
22 
23 /*
24  * Ceph inode operations
25  *
26  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
27  * setattr, etc.), xattr helpers, and helpers for assimilating
28  * metadata returned by the MDS into our cache.
29  *
30  * Also define helpers for doing asynchronous writeback, invalidation,
31  * and truncation for the benefit of those who can't afford to block
32  * (typically because they are in the message handler path).
33  */
34 
35 static const struct inode_operations ceph_symlink_iops;
36 
37 static void ceph_inode_work(struct work_struct *work);
38 
39 /*
40  * find or create an inode, given the ceph ino number
41  */
ceph_set_ino_cb(struct inode * inode,void * data)42 static int ceph_set_ino_cb(struct inode *inode, void *data)
43 {
44 	struct ceph_inode_info *ci = ceph_inode(inode);
45 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
46 
47 	ci->i_vino = *(struct ceph_vino *)data;
48 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
49 	inode_set_iversion_raw(inode, 0);
50 	percpu_counter_inc(&mdsc->metric.total_inodes);
51 
52 	return 0;
53 }
54 
ceph_get_inode(struct super_block * sb,struct ceph_vino vino)55 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
56 {
57 	struct inode *inode;
58 
59 	if (ceph_vino_is_reserved(vino))
60 		return ERR_PTR(-EREMOTEIO);
61 
62 	inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
63 			     ceph_set_ino_cb, &vino);
64 	if (!inode)
65 		return ERR_PTR(-ENOMEM);
66 
67 	dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
68 	     ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
69 	return inode;
70 }
71 
72 /*
73  * get/constuct snapdir inode for a given directory
74  */
ceph_get_snapdir(struct inode * parent)75 struct inode *ceph_get_snapdir(struct inode *parent)
76 {
77 	struct ceph_vino vino = {
78 		.ino = ceph_ino(parent),
79 		.snap = CEPH_SNAPDIR,
80 	};
81 	struct inode *inode = ceph_get_inode(parent->i_sb, vino);
82 	struct ceph_inode_info *ci = ceph_inode(inode);
83 
84 	BUG_ON(!S_ISDIR(parent->i_mode));
85 	if (IS_ERR(inode))
86 		return inode;
87 	inode->i_mode = parent->i_mode;
88 	inode->i_uid = parent->i_uid;
89 	inode->i_gid = parent->i_gid;
90 	inode->i_mtime = parent->i_mtime;
91 	inode->i_ctime = parent->i_ctime;
92 	inode->i_atime = parent->i_atime;
93 	ci->i_rbytes = 0;
94 	ci->i_btime = ceph_inode(parent)->i_btime;
95 
96 	if (inode->i_state & I_NEW) {
97 		inode->i_op = &ceph_snapdir_iops;
98 		inode->i_fop = &ceph_snapdir_fops;
99 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
100 		unlock_new_inode(inode);
101 	}
102 
103 	return inode;
104 }
105 
106 const struct inode_operations ceph_file_iops = {
107 	.permission = ceph_permission,
108 	.setattr = ceph_setattr,
109 	.getattr = ceph_getattr,
110 	.listxattr = ceph_listxattr,
111 	.get_acl = ceph_get_acl,
112 	.set_acl = ceph_set_acl,
113 };
114 
115 
116 /*
117  * We use a 'frag tree' to keep track of the MDS's directory fragments
118  * for a given inode (usually there is just a single fragment).  We
119  * need to know when a child frag is delegated to a new MDS, or when
120  * it is flagged as replicated, so we can direct our requests
121  * accordingly.
122  */
123 
124 /*
125  * find/create a frag in the tree
126  */
__get_or_create_frag(struct ceph_inode_info * ci,u32 f)127 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
128 						    u32 f)
129 {
130 	struct rb_node **p;
131 	struct rb_node *parent = NULL;
132 	struct ceph_inode_frag *frag;
133 	int c;
134 
135 	p = &ci->i_fragtree.rb_node;
136 	while (*p) {
137 		parent = *p;
138 		frag = rb_entry(parent, struct ceph_inode_frag, node);
139 		c = ceph_frag_compare(f, frag->frag);
140 		if (c < 0)
141 			p = &(*p)->rb_left;
142 		else if (c > 0)
143 			p = &(*p)->rb_right;
144 		else
145 			return frag;
146 	}
147 
148 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
149 	if (!frag)
150 		return ERR_PTR(-ENOMEM);
151 
152 	frag->frag = f;
153 	frag->split_by = 0;
154 	frag->mds = -1;
155 	frag->ndist = 0;
156 
157 	rb_link_node(&frag->node, parent, p);
158 	rb_insert_color(&frag->node, &ci->i_fragtree);
159 
160 	dout("get_or_create_frag added %llx.%llx frag %x\n",
161 	     ceph_vinop(&ci->vfs_inode), f);
162 	return frag;
163 }
164 
165 /*
166  * find a specific frag @f
167  */
__ceph_find_frag(struct ceph_inode_info * ci,u32 f)168 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
169 {
170 	struct rb_node *n = ci->i_fragtree.rb_node;
171 
172 	while (n) {
173 		struct ceph_inode_frag *frag =
174 			rb_entry(n, struct ceph_inode_frag, node);
175 		int c = ceph_frag_compare(f, frag->frag);
176 		if (c < 0)
177 			n = n->rb_left;
178 		else if (c > 0)
179 			n = n->rb_right;
180 		else
181 			return frag;
182 	}
183 	return NULL;
184 }
185 
186 /*
187  * Choose frag containing the given value @v.  If @pfrag is
188  * specified, copy the frag delegation info to the caller if
189  * it is present.
190  */
__ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)191 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
192 			      struct ceph_inode_frag *pfrag, int *found)
193 {
194 	u32 t = ceph_frag_make(0, 0);
195 	struct ceph_inode_frag *frag;
196 	unsigned nway, i;
197 	u32 n;
198 
199 	if (found)
200 		*found = 0;
201 
202 	while (1) {
203 		WARN_ON(!ceph_frag_contains_value(t, v));
204 		frag = __ceph_find_frag(ci, t);
205 		if (!frag)
206 			break; /* t is a leaf */
207 		if (frag->split_by == 0) {
208 			if (pfrag)
209 				memcpy(pfrag, frag, sizeof(*pfrag));
210 			if (found)
211 				*found = 1;
212 			break;
213 		}
214 
215 		/* choose child */
216 		nway = 1 << frag->split_by;
217 		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
218 		     frag->split_by, nway);
219 		for (i = 0; i < nway; i++) {
220 			n = ceph_frag_make_child(t, frag->split_by, i);
221 			if (ceph_frag_contains_value(n, v)) {
222 				t = n;
223 				break;
224 			}
225 		}
226 		BUG_ON(i == nway);
227 	}
228 	dout("choose_frag(%x) = %x\n", v, t);
229 
230 	return t;
231 }
232 
ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)233 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
234 		     struct ceph_inode_frag *pfrag, int *found)
235 {
236 	u32 ret;
237 	mutex_lock(&ci->i_fragtree_mutex);
238 	ret = __ceph_choose_frag(ci, v, pfrag, found);
239 	mutex_unlock(&ci->i_fragtree_mutex);
240 	return ret;
241 }
242 
243 /*
244  * Process dirfrag (delegation) info from the mds.  Include leaf
245  * fragment in tree ONLY if ndist > 0.  Otherwise, only
246  * branches/splits are included in i_fragtree)
247  */
ceph_fill_dirfrag(struct inode * inode,struct ceph_mds_reply_dirfrag * dirinfo)248 static int ceph_fill_dirfrag(struct inode *inode,
249 			     struct ceph_mds_reply_dirfrag *dirinfo)
250 {
251 	struct ceph_inode_info *ci = ceph_inode(inode);
252 	struct ceph_inode_frag *frag;
253 	u32 id = le32_to_cpu(dirinfo->frag);
254 	int mds = le32_to_cpu(dirinfo->auth);
255 	int ndist = le32_to_cpu(dirinfo->ndist);
256 	int diri_auth = -1;
257 	int i;
258 	int err = 0;
259 
260 	spin_lock(&ci->i_ceph_lock);
261 	if (ci->i_auth_cap)
262 		diri_auth = ci->i_auth_cap->mds;
263 	spin_unlock(&ci->i_ceph_lock);
264 
265 	if (mds == -1) /* CDIR_AUTH_PARENT */
266 		mds = diri_auth;
267 
268 	mutex_lock(&ci->i_fragtree_mutex);
269 	if (ndist == 0 && mds == diri_auth) {
270 		/* no delegation info needed. */
271 		frag = __ceph_find_frag(ci, id);
272 		if (!frag)
273 			goto out;
274 		if (frag->split_by == 0) {
275 			/* tree leaf, remove */
276 			dout("fill_dirfrag removed %llx.%llx frag %x"
277 			     " (no ref)\n", ceph_vinop(inode), id);
278 			rb_erase(&frag->node, &ci->i_fragtree);
279 			kfree(frag);
280 		} else {
281 			/* tree branch, keep and clear */
282 			dout("fill_dirfrag cleared %llx.%llx frag %x"
283 			     " referral\n", ceph_vinop(inode), id);
284 			frag->mds = -1;
285 			frag->ndist = 0;
286 		}
287 		goto out;
288 	}
289 
290 
291 	/* find/add this frag to store mds delegation info */
292 	frag = __get_or_create_frag(ci, id);
293 	if (IS_ERR(frag)) {
294 		/* this is not the end of the world; we can continue
295 		   with bad/inaccurate delegation info */
296 		pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
297 		       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
298 		err = -ENOMEM;
299 		goto out;
300 	}
301 
302 	frag->mds = mds;
303 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
304 	for (i = 0; i < frag->ndist; i++)
305 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
306 	dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
307 	     ceph_vinop(inode), frag->frag, frag->ndist);
308 
309 out:
310 	mutex_unlock(&ci->i_fragtree_mutex);
311 	return err;
312 }
313 
frag_tree_split_cmp(const void * l,const void * r)314 static int frag_tree_split_cmp(const void *l, const void *r)
315 {
316 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
317 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
318 	return ceph_frag_compare(le32_to_cpu(ls->frag),
319 				 le32_to_cpu(rs->frag));
320 }
321 
is_frag_child(u32 f,struct ceph_inode_frag * frag)322 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
323 {
324 	if (!frag)
325 		return f == ceph_frag_make(0, 0);
326 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
327 		return false;
328 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
329 }
330 
ceph_fill_fragtree(struct inode * inode,struct ceph_frag_tree_head * fragtree,struct ceph_mds_reply_dirfrag * dirinfo)331 static int ceph_fill_fragtree(struct inode *inode,
332 			      struct ceph_frag_tree_head *fragtree,
333 			      struct ceph_mds_reply_dirfrag *dirinfo)
334 {
335 	struct ceph_inode_info *ci = ceph_inode(inode);
336 	struct ceph_inode_frag *frag, *prev_frag = NULL;
337 	struct rb_node *rb_node;
338 	unsigned i, split_by, nsplits;
339 	u32 id;
340 	bool update = false;
341 
342 	mutex_lock(&ci->i_fragtree_mutex);
343 	nsplits = le32_to_cpu(fragtree->nsplits);
344 	if (nsplits != ci->i_fragtree_nsplits) {
345 		update = true;
346 	} else if (nsplits) {
347 		i = prandom_u32() % nsplits;
348 		id = le32_to_cpu(fragtree->splits[i].frag);
349 		if (!__ceph_find_frag(ci, id))
350 			update = true;
351 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
352 		rb_node = rb_first(&ci->i_fragtree);
353 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
354 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
355 			update = true;
356 	}
357 	if (!update && dirinfo) {
358 		id = le32_to_cpu(dirinfo->frag);
359 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
360 			update = true;
361 	}
362 	if (!update)
363 		goto out_unlock;
364 
365 	if (nsplits > 1) {
366 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
367 		     frag_tree_split_cmp, NULL);
368 	}
369 
370 	dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
371 	rb_node = rb_first(&ci->i_fragtree);
372 	for (i = 0; i < nsplits; i++) {
373 		id = le32_to_cpu(fragtree->splits[i].frag);
374 		split_by = le32_to_cpu(fragtree->splits[i].by);
375 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
376 			pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
377 			       "frag %x split by %d\n", ceph_vinop(inode),
378 			       i, nsplits, id, split_by);
379 			continue;
380 		}
381 		frag = NULL;
382 		while (rb_node) {
383 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
384 			if (ceph_frag_compare(frag->frag, id) >= 0) {
385 				if (frag->frag != id)
386 					frag = NULL;
387 				else
388 					rb_node = rb_next(rb_node);
389 				break;
390 			}
391 			rb_node = rb_next(rb_node);
392 			/* delete stale split/leaf node */
393 			if (frag->split_by > 0 ||
394 			    !is_frag_child(frag->frag, prev_frag)) {
395 				rb_erase(&frag->node, &ci->i_fragtree);
396 				if (frag->split_by > 0)
397 					ci->i_fragtree_nsplits--;
398 				kfree(frag);
399 			}
400 			frag = NULL;
401 		}
402 		if (!frag) {
403 			frag = __get_or_create_frag(ci, id);
404 			if (IS_ERR(frag))
405 				continue;
406 		}
407 		if (frag->split_by == 0)
408 			ci->i_fragtree_nsplits++;
409 		frag->split_by = split_by;
410 		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
411 		prev_frag = frag;
412 	}
413 	while (rb_node) {
414 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
415 		rb_node = rb_next(rb_node);
416 		/* delete stale split/leaf node */
417 		if (frag->split_by > 0 ||
418 		    !is_frag_child(frag->frag, prev_frag)) {
419 			rb_erase(&frag->node, &ci->i_fragtree);
420 			if (frag->split_by > 0)
421 				ci->i_fragtree_nsplits--;
422 			kfree(frag);
423 		}
424 	}
425 out_unlock:
426 	mutex_unlock(&ci->i_fragtree_mutex);
427 	return 0;
428 }
429 
430 /*
431  * initialize a newly allocated inode.
432  */
ceph_alloc_inode(struct super_block * sb)433 struct inode *ceph_alloc_inode(struct super_block *sb)
434 {
435 	struct ceph_inode_info *ci;
436 	int i;
437 
438 	ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
439 	if (!ci)
440 		return NULL;
441 
442 	dout("alloc_inode %p\n", &ci->vfs_inode);
443 
444 	spin_lock_init(&ci->i_ceph_lock);
445 
446 	ci->i_version = 0;
447 	ci->i_inline_version = 0;
448 	ci->i_time_warp_seq = 0;
449 	ci->i_ceph_flags = 0;
450 	atomic64_set(&ci->i_ordered_count, 1);
451 	atomic64_set(&ci->i_release_count, 1);
452 	atomic64_set(&ci->i_complete_seq[0], 0);
453 	atomic64_set(&ci->i_complete_seq[1], 0);
454 	ci->i_symlink = NULL;
455 
456 	ci->i_max_bytes = 0;
457 	ci->i_max_files = 0;
458 
459 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
460 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
461 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
462 
463 	ci->i_fragtree = RB_ROOT;
464 	mutex_init(&ci->i_fragtree_mutex);
465 
466 	ci->i_xattrs.blob = NULL;
467 	ci->i_xattrs.prealloc_blob = NULL;
468 	ci->i_xattrs.dirty = false;
469 	ci->i_xattrs.index = RB_ROOT;
470 	ci->i_xattrs.count = 0;
471 	ci->i_xattrs.names_size = 0;
472 	ci->i_xattrs.vals_size = 0;
473 	ci->i_xattrs.version = 0;
474 	ci->i_xattrs.index_version = 0;
475 
476 	ci->i_caps = RB_ROOT;
477 	ci->i_auth_cap = NULL;
478 	ci->i_dirty_caps = 0;
479 	ci->i_flushing_caps = 0;
480 	INIT_LIST_HEAD(&ci->i_dirty_item);
481 	INIT_LIST_HEAD(&ci->i_flushing_item);
482 	ci->i_prealloc_cap_flush = NULL;
483 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
484 	init_waitqueue_head(&ci->i_cap_wq);
485 	ci->i_hold_caps_max = 0;
486 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
487 	INIT_LIST_HEAD(&ci->i_cap_snaps);
488 	ci->i_head_snapc = NULL;
489 	ci->i_snap_caps = 0;
490 
491 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
492 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
493 		ci->i_nr_by_mode[i] = 0;
494 
495 	mutex_init(&ci->i_truncate_mutex);
496 	ci->i_truncate_seq = 0;
497 	ci->i_truncate_size = 0;
498 	ci->i_truncate_pending = 0;
499 
500 	ci->i_max_size = 0;
501 	ci->i_reported_size = 0;
502 	ci->i_wanted_max_size = 0;
503 	ci->i_requested_max_size = 0;
504 
505 	ci->i_pin_ref = 0;
506 	ci->i_rd_ref = 0;
507 	ci->i_rdcache_ref = 0;
508 	ci->i_wr_ref = 0;
509 	ci->i_wb_ref = 0;
510 	ci->i_fx_ref = 0;
511 	ci->i_wrbuffer_ref = 0;
512 	ci->i_wrbuffer_ref_head = 0;
513 	atomic_set(&ci->i_filelock_ref, 0);
514 	atomic_set(&ci->i_shared_gen, 1);
515 	ci->i_rdcache_gen = 0;
516 	ci->i_rdcache_revoking = 0;
517 
518 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
519 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
520 	spin_lock_init(&ci->i_unsafe_lock);
521 
522 	ci->i_snap_realm = NULL;
523 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
524 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
525 
526 	INIT_WORK(&ci->i_work, ceph_inode_work);
527 	ci->i_work_mask = 0;
528 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
529 
530 	ceph_fscache_inode_init(ci);
531 
532 	return &ci->vfs_inode;
533 }
534 
ceph_free_inode(struct inode * inode)535 void ceph_free_inode(struct inode *inode)
536 {
537 	struct ceph_inode_info *ci = ceph_inode(inode);
538 
539 	kfree(ci->i_symlink);
540 	kmem_cache_free(ceph_inode_cachep, ci);
541 }
542 
ceph_evict_inode(struct inode * inode)543 void ceph_evict_inode(struct inode *inode)
544 {
545 	struct ceph_inode_info *ci = ceph_inode(inode);
546 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
547 	struct ceph_inode_frag *frag;
548 	struct rb_node *n;
549 
550 	dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
551 
552 	percpu_counter_dec(&mdsc->metric.total_inodes);
553 
554 	truncate_inode_pages_final(&inode->i_data);
555 	clear_inode(inode);
556 
557 	ceph_fscache_unregister_inode_cookie(ci);
558 
559 	__ceph_remove_caps(ci);
560 
561 	if (__ceph_has_any_quota(ci))
562 		ceph_adjust_quota_realms_count(inode, false);
563 
564 	/*
565 	 * we may still have a snap_realm reference if there are stray
566 	 * caps in i_snap_caps.
567 	 */
568 	if (ci->i_snap_realm) {
569 		if (ceph_snap(inode) == CEPH_NOSNAP) {
570 			struct ceph_snap_realm *realm = ci->i_snap_realm;
571 			dout(" dropping residual ref to snap realm %p\n",
572 			     realm);
573 			spin_lock(&realm->inodes_with_caps_lock);
574 			list_del_init(&ci->i_snap_realm_item);
575 			ci->i_snap_realm = NULL;
576 			if (realm->ino == ci->i_vino.ino)
577 				realm->inode = NULL;
578 			spin_unlock(&realm->inodes_with_caps_lock);
579 			ceph_put_snap_realm(mdsc, realm);
580 		} else {
581 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
582 			ci->i_snap_realm = NULL;
583 		}
584 	}
585 
586 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
587 		frag = rb_entry(n, struct ceph_inode_frag, node);
588 		rb_erase(n, &ci->i_fragtree);
589 		kfree(frag);
590 	}
591 	ci->i_fragtree_nsplits = 0;
592 
593 	__ceph_destroy_xattrs(ci);
594 	if (ci->i_xattrs.blob)
595 		ceph_buffer_put(ci->i_xattrs.blob);
596 	if (ci->i_xattrs.prealloc_blob)
597 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
598 
599 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
600 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
601 }
602 
calc_inode_blocks(u64 size)603 static inline blkcnt_t calc_inode_blocks(u64 size)
604 {
605 	return (size + (1<<9) - 1) >> 9;
606 }
607 
608 /*
609  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
610  * careful because either the client or MDS may have more up to date
611  * info, depending on which capabilities are held, and whether
612  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
613  * and size are monotonically increasing, except when utimes() or
614  * truncate() increments the corresponding _seq values.)
615  */
ceph_fill_file_size(struct inode * inode,int issued,u32 truncate_seq,u64 truncate_size,u64 size)616 int ceph_fill_file_size(struct inode *inode, int issued,
617 			u32 truncate_seq, u64 truncate_size, u64 size)
618 {
619 	struct ceph_inode_info *ci = ceph_inode(inode);
620 	int queue_trunc = 0;
621 
622 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
623 	    (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
624 		dout("size %lld -> %llu\n", inode->i_size, size);
625 		if (size > 0 && S_ISDIR(inode->i_mode)) {
626 			pr_err("fill_file_size non-zero size for directory\n");
627 			size = 0;
628 		}
629 		i_size_write(inode, size);
630 		inode->i_blocks = calc_inode_blocks(size);
631 		ci->i_reported_size = size;
632 		if (truncate_seq != ci->i_truncate_seq) {
633 			dout("truncate_seq %u -> %u\n",
634 			     ci->i_truncate_seq, truncate_seq);
635 			ci->i_truncate_seq = truncate_seq;
636 
637 			/* the MDS should have revoked these caps */
638 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
639 					       CEPH_CAP_FILE_RD |
640 					       CEPH_CAP_FILE_WR |
641 					       CEPH_CAP_FILE_LAZYIO));
642 			/*
643 			 * If we hold relevant caps, or in the case where we're
644 			 * not the only client referencing this file and we
645 			 * don't hold those caps, then we need to check whether
646 			 * the file is either opened or mmaped
647 			 */
648 			if ((issued & (CEPH_CAP_FILE_CACHE|
649 				       CEPH_CAP_FILE_BUFFER)) ||
650 			    mapping_mapped(inode->i_mapping) ||
651 			    __ceph_is_file_opened(ci)) {
652 				ci->i_truncate_pending++;
653 				queue_trunc = 1;
654 			}
655 		}
656 	}
657 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
658 	    ci->i_truncate_size != truncate_size) {
659 		dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
660 		     truncate_size);
661 		ci->i_truncate_size = truncate_size;
662 	}
663 
664 	if (queue_trunc)
665 		ceph_fscache_invalidate(inode);
666 
667 	return queue_trunc;
668 }
669 
ceph_fill_file_time(struct inode * inode,int issued,u64 time_warp_seq,struct timespec64 * ctime,struct timespec64 * mtime,struct timespec64 * atime)670 void ceph_fill_file_time(struct inode *inode, int issued,
671 			 u64 time_warp_seq, struct timespec64 *ctime,
672 			 struct timespec64 *mtime, struct timespec64 *atime)
673 {
674 	struct ceph_inode_info *ci = ceph_inode(inode);
675 	int warn = 0;
676 
677 	if (issued & (CEPH_CAP_FILE_EXCL|
678 		      CEPH_CAP_FILE_WR|
679 		      CEPH_CAP_FILE_BUFFER|
680 		      CEPH_CAP_AUTH_EXCL|
681 		      CEPH_CAP_XATTR_EXCL)) {
682 		if (ci->i_version == 0 ||
683 		    timespec64_compare(ctime, &inode->i_ctime) > 0) {
684 			dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
685 			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
686 			     ctime->tv_sec, ctime->tv_nsec);
687 			inode->i_ctime = *ctime;
688 		}
689 		if (ci->i_version == 0 ||
690 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
691 			/* the MDS did a utimes() */
692 			dout("mtime %lld.%09ld -> %lld.%09ld "
693 			     "tw %d -> %d\n",
694 			     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
695 			     mtime->tv_sec, mtime->tv_nsec,
696 			     ci->i_time_warp_seq, (int)time_warp_seq);
697 
698 			inode->i_mtime = *mtime;
699 			inode->i_atime = *atime;
700 			ci->i_time_warp_seq = time_warp_seq;
701 		} else if (time_warp_seq == ci->i_time_warp_seq) {
702 			/* nobody did utimes(); take the max */
703 			if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
704 				dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
705 				     inode->i_mtime.tv_sec,
706 				     inode->i_mtime.tv_nsec,
707 				     mtime->tv_sec, mtime->tv_nsec);
708 				inode->i_mtime = *mtime;
709 			}
710 			if (timespec64_compare(atime, &inode->i_atime) > 0) {
711 				dout("atime %lld.%09ld -> %lld.%09ld inc\n",
712 				     inode->i_atime.tv_sec,
713 				     inode->i_atime.tv_nsec,
714 				     atime->tv_sec, atime->tv_nsec);
715 				inode->i_atime = *atime;
716 			}
717 		} else if (issued & CEPH_CAP_FILE_EXCL) {
718 			/* we did a utimes(); ignore mds values */
719 		} else {
720 			warn = 1;
721 		}
722 	} else {
723 		/* we have no write|excl caps; whatever the MDS says is true */
724 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
725 			inode->i_ctime = *ctime;
726 			inode->i_mtime = *mtime;
727 			inode->i_atime = *atime;
728 			ci->i_time_warp_seq = time_warp_seq;
729 		} else {
730 			warn = 1;
731 		}
732 	}
733 	if (warn) /* time_warp_seq shouldn't go backwards */
734 		dout("%p mds time_warp_seq %llu < %u\n",
735 		     inode, time_warp_seq, ci->i_time_warp_seq);
736 }
737 
738 /*
739  * Populate an inode based on info from mds.  May be called on new or
740  * existing inodes.
741  */
ceph_fill_inode(struct inode * inode,struct page * locked_page,struct ceph_mds_reply_info_in * iinfo,struct ceph_mds_reply_dirfrag * dirinfo,struct ceph_mds_session * session,int cap_fmode,struct ceph_cap_reservation * caps_reservation)742 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
743 		    struct ceph_mds_reply_info_in *iinfo,
744 		    struct ceph_mds_reply_dirfrag *dirinfo,
745 		    struct ceph_mds_session *session, int cap_fmode,
746 		    struct ceph_cap_reservation *caps_reservation)
747 {
748 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
749 	struct ceph_mds_reply_inode *info = iinfo->in;
750 	struct ceph_inode_info *ci = ceph_inode(inode);
751 	int issued, new_issued, info_caps;
752 	struct timespec64 mtime, atime, ctime;
753 	struct ceph_buffer *xattr_blob = NULL;
754 	struct ceph_buffer *old_blob = NULL;
755 	struct ceph_string *pool_ns = NULL;
756 	struct ceph_cap *new_cap = NULL;
757 	int err = 0;
758 	bool wake = false;
759 	bool queue_trunc = false;
760 	bool new_version = false;
761 	bool fill_inline = false;
762 
763 	lockdep_assert_held(&mdsc->snap_rwsem);
764 
765 	dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
766 	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
767 	     ci->i_version);
768 
769 	info_caps = le32_to_cpu(info->cap.caps);
770 
771 	/* prealloc new cap struct */
772 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
773 		new_cap = ceph_get_cap(mdsc, caps_reservation);
774 		if (!new_cap)
775 			return -ENOMEM;
776 	}
777 
778 	/*
779 	 * prealloc xattr data, if it looks like we'll need it.  only
780 	 * if len > 4 (meaning there are actually xattrs; the first 4
781 	 * bytes are the xattr count).
782 	 */
783 	if (iinfo->xattr_len > 4) {
784 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
785 		if (!xattr_blob)
786 			pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
787 			       iinfo->xattr_len);
788 	}
789 
790 	if (iinfo->pool_ns_len > 0)
791 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
792 						     iinfo->pool_ns_len);
793 
794 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
795 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
796 
797 	spin_lock(&ci->i_ceph_lock);
798 
799 	/*
800 	 * provided version will be odd if inode value is projected,
801 	 * even if stable.  skip the update if we have newer stable
802 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
803 	 * we are getting projected (unstable) info (in which case the
804 	 * version is odd, and we want ours>theirs).
805 	 *   us   them
806 	 *   2    2     skip
807 	 *   3    2     skip
808 	 *   3    3     update
809 	 */
810 	if (ci->i_version == 0 ||
811 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
812 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
813 		new_version = true;
814 
815 	/* Update change_attribute */
816 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
817 
818 	__ceph_caps_issued(ci, &issued);
819 	issued |= __ceph_caps_dirty(ci);
820 	new_issued = ~issued & info_caps;
821 
822 	/* update inode */
823 	inode->i_rdev = le32_to_cpu(info->rdev);
824 	/* directories have fl_stripe_unit set to zero */
825 	if (le32_to_cpu(info->layout.fl_stripe_unit))
826 		inode->i_blkbits =
827 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
828 	else
829 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
830 
831 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
832 
833 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
834 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
835 		inode->i_mode = le32_to_cpu(info->mode);
836 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
837 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
838 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
839 		     from_kuid(&init_user_ns, inode->i_uid),
840 		     from_kgid(&init_user_ns, inode->i_gid));
841 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
842 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
843 	}
844 
845 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
846 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
847 		set_nlink(inode, le32_to_cpu(info->nlink));
848 
849 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
850 		/* be careful with mtime, atime, size */
851 		ceph_decode_timespec64(&atime, &info->atime);
852 		ceph_decode_timespec64(&mtime, &info->mtime);
853 		ceph_decode_timespec64(&ctime, &info->ctime);
854 		ceph_fill_file_time(inode, issued,
855 				le32_to_cpu(info->time_warp_seq),
856 				&ctime, &mtime, &atime);
857 	}
858 
859 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
860 		ci->i_files = le64_to_cpu(info->files);
861 		ci->i_subdirs = le64_to_cpu(info->subdirs);
862 	}
863 
864 	if (new_version ||
865 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
866 		s64 old_pool = ci->i_layout.pool_id;
867 		struct ceph_string *old_ns;
868 
869 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
870 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
871 					lockdep_is_held(&ci->i_ceph_lock));
872 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
873 
874 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
875 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
876 
877 		pool_ns = old_ns;
878 
879 		queue_trunc = ceph_fill_file_size(inode, issued,
880 					le32_to_cpu(info->truncate_seq),
881 					le64_to_cpu(info->truncate_size),
882 					le64_to_cpu(info->size));
883 		/* only update max_size on auth cap */
884 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
885 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
886 			dout("max_size %lld -> %llu\n", ci->i_max_size,
887 					le64_to_cpu(info->max_size));
888 			ci->i_max_size = le64_to_cpu(info->max_size);
889 		}
890 	}
891 
892 	/* layout and rstat are not tracked by capability, update them if
893 	 * the inode info is from auth mds */
894 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
895 		if (S_ISDIR(inode->i_mode)) {
896 			ci->i_dir_layout = iinfo->dir_layout;
897 			ci->i_rbytes = le64_to_cpu(info->rbytes);
898 			ci->i_rfiles = le64_to_cpu(info->rfiles);
899 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
900 			ci->i_dir_pin = iinfo->dir_pin;
901 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
902 		}
903 	}
904 
905 	/* xattrs */
906 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
907 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
908 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
909 		if (ci->i_xattrs.blob)
910 			old_blob = ci->i_xattrs.blob;
911 		ci->i_xattrs.blob = xattr_blob;
912 		if (xattr_blob)
913 			memcpy(ci->i_xattrs.blob->vec.iov_base,
914 			       iinfo->xattr_data, iinfo->xattr_len);
915 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
916 		ceph_forget_all_cached_acls(inode);
917 		ceph_security_invalidate_secctx(inode);
918 		xattr_blob = NULL;
919 	}
920 
921 	/* finally update i_version */
922 	if (le64_to_cpu(info->version) > ci->i_version)
923 		ci->i_version = le64_to_cpu(info->version);
924 
925 	inode->i_mapping->a_ops = &ceph_aops;
926 
927 	switch (inode->i_mode & S_IFMT) {
928 	case S_IFIFO:
929 	case S_IFBLK:
930 	case S_IFCHR:
931 	case S_IFSOCK:
932 		inode->i_blkbits = PAGE_SHIFT;
933 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
934 		inode->i_op = &ceph_file_iops;
935 		break;
936 	case S_IFREG:
937 		inode->i_op = &ceph_file_iops;
938 		inode->i_fop = &ceph_file_fops;
939 		break;
940 	case S_IFLNK:
941 		inode->i_op = &ceph_symlink_iops;
942 		if (!ci->i_symlink) {
943 			u32 symlen = iinfo->symlink_len;
944 			char *sym;
945 
946 			spin_unlock(&ci->i_ceph_lock);
947 
948 			if (symlen != i_size_read(inode)) {
949 				pr_err("%s %llx.%llx BAD symlink "
950 					"size %lld\n", __func__,
951 					ceph_vinop(inode),
952 					i_size_read(inode));
953 				i_size_write(inode, symlen);
954 				inode->i_blocks = calc_inode_blocks(symlen);
955 			}
956 
957 			err = -ENOMEM;
958 			sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
959 			if (!sym)
960 				goto out;
961 
962 			spin_lock(&ci->i_ceph_lock);
963 			if (!ci->i_symlink)
964 				ci->i_symlink = sym;
965 			else
966 				kfree(sym); /* lost a race */
967 		}
968 		inode->i_link = ci->i_symlink;
969 		break;
970 	case S_IFDIR:
971 		inode->i_op = &ceph_dir_iops;
972 		inode->i_fop = &ceph_dir_fops;
973 		break;
974 	default:
975 		pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
976 		       ceph_vinop(inode), inode->i_mode);
977 	}
978 
979 	/* were we issued a capability? */
980 	if (info_caps) {
981 		if (ceph_snap(inode) == CEPH_NOSNAP) {
982 			ceph_add_cap(inode, session,
983 				     le64_to_cpu(info->cap.cap_id),
984 				     info_caps,
985 				     le32_to_cpu(info->cap.wanted),
986 				     le32_to_cpu(info->cap.seq),
987 				     le32_to_cpu(info->cap.mseq),
988 				     le64_to_cpu(info->cap.realm),
989 				     info->cap.flags, &new_cap);
990 
991 			/* set dir completion flag? */
992 			if (S_ISDIR(inode->i_mode) &&
993 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
994 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
995 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
996 			    !__ceph_dir_is_complete(ci)) {
997 				dout(" marking %p complete (empty)\n", inode);
998 				i_size_write(inode, 0);
999 				__ceph_dir_set_complete(ci,
1000 					atomic64_read(&ci->i_release_count),
1001 					atomic64_read(&ci->i_ordered_count));
1002 			}
1003 
1004 			wake = true;
1005 		} else {
1006 			dout(" %p got snap_caps %s\n", inode,
1007 			     ceph_cap_string(info_caps));
1008 			ci->i_snap_caps |= info_caps;
1009 		}
1010 	}
1011 
1012 	if (iinfo->inline_version > 0 &&
1013 	    iinfo->inline_version >= ci->i_inline_version) {
1014 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1015 		ci->i_inline_version = iinfo->inline_version;
1016 		if (ci->i_inline_version != CEPH_INLINE_NONE &&
1017 		    (locked_page || (info_caps & cache_caps)))
1018 			fill_inline = true;
1019 	}
1020 
1021 	if (cap_fmode >= 0) {
1022 		if (!info_caps)
1023 			pr_warn("mds issued no caps on %llx.%llx\n",
1024 				ceph_vinop(inode));
1025 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1026 	}
1027 
1028 	spin_unlock(&ci->i_ceph_lock);
1029 
1030 	if (fill_inline)
1031 		ceph_fill_inline_data(inode, locked_page,
1032 				      iinfo->inline_data, iinfo->inline_len);
1033 
1034 	if (wake)
1035 		wake_up_all(&ci->i_cap_wq);
1036 
1037 	/* queue truncate if we saw i_size decrease */
1038 	if (queue_trunc)
1039 		ceph_queue_vmtruncate(inode);
1040 
1041 	/* populate frag tree */
1042 	if (S_ISDIR(inode->i_mode))
1043 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1044 
1045 	/* update delegation info? */
1046 	if (dirinfo)
1047 		ceph_fill_dirfrag(inode, dirinfo);
1048 
1049 	err = 0;
1050 out:
1051 	if (new_cap)
1052 		ceph_put_cap(mdsc, new_cap);
1053 	ceph_buffer_put(old_blob);
1054 	ceph_buffer_put(xattr_blob);
1055 	ceph_put_string(pool_ns);
1056 	return err;
1057 }
1058 
1059 /*
1060  * caller should hold session s_mutex and dentry->d_lock.
1061  */
__update_dentry_lease(struct inode * dir,struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time,struct ceph_mds_session ** old_lease_session)1062 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1063 				  struct ceph_mds_reply_lease *lease,
1064 				  struct ceph_mds_session *session,
1065 				  unsigned long from_time,
1066 				  struct ceph_mds_session **old_lease_session)
1067 {
1068 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1069 	unsigned mask = le16_to_cpu(lease->mask);
1070 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1071 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1072 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1073 
1074 	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1075 	     dentry, duration, ttl);
1076 
1077 	/* only track leases on regular dentries */
1078 	if (ceph_snap(dir) != CEPH_NOSNAP)
1079 		return;
1080 
1081 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1082 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1083 	else
1084 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1085 
1086 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1087 	if (!(mask & CEPH_LEASE_VALID)) {
1088 		__ceph_dentry_dir_lease_touch(di);
1089 		return;
1090 	}
1091 
1092 	if (di->lease_gen == session->s_cap_gen &&
1093 	    time_before(ttl, di->time))
1094 		return;  /* we already have a newer lease. */
1095 
1096 	if (di->lease_session && di->lease_session != session) {
1097 		*old_lease_session = di->lease_session;
1098 		di->lease_session = NULL;
1099 	}
1100 
1101 	if (!di->lease_session)
1102 		di->lease_session = ceph_get_mds_session(session);
1103 	di->lease_gen = session->s_cap_gen;
1104 	di->lease_seq = le32_to_cpu(lease->seq);
1105 	di->lease_renew_after = half_ttl;
1106 	di->lease_renew_from = 0;
1107 	di->time = ttl;
1108 
1109 	__ceph_dentry_lease_touch(di);
1110 }
1111 
update_dentry_lease(struct inode * dir,struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time)1112 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1113 					struct ceph_mds_reply_lease *lease,
1114 					struct ceph_mds_session *session,
1115 					unsigned long from_time)
1116 {
1117 	struct ceph_mds_session *old_lease_session = NULL;
1118 	spin_lock(&dentry->d_lock);
1119 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1120 			      &old_lease_session);
1121 	spin_unlock(&dentry->d_lock);
1122 	ceph_put_mds_session(old_lease_session);
1123 }
1124 
1125 /*
1126  * update dentry lease without having parent inode locked
1127  */
update_dentry_lease_careful(struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time,char * dname,u32 dname_len,struct ceph_vino * pdvino,struct ceph_vino * ptvino)1128 static void update_dentry_lease_careful(struct dentry *dentry,
1129 					struct ceph_mds_reply_lease *lease,
1130 					struct ceph_mds_session *session,
1131 					unsigned long from_time,
1132 					char *dname, u32 dname_len,
1133 					struct ceph_vino *pdvino,
1134 					struct ceph_vino *ptvino)
1135 
1136 {
1137 	struct inode *dir;
1138 	struct ceph_mds_session *old_lease_session = NULL;
1139 
1140 	spin_lock(&dentry->d_lock);
1141 	/* make sure dentry's name matches target */
1142 	if (dentry->d_name.len != dname_len ||
1143 	    memcmp(dentry->d_name.name, dname, dname_len))
1144 		goto out_unlock;
1145 
1146 	dir = d_inode(dentry->d_parent);
1147 	/* make sure parent matches dvino */
1148 	if (!ceph_ino_compare(dir, pdvino))
1149 		goto out_unlock;
1150 
1151 	/* make sure dentry's inode matches target. NULL ptvino means that
1152 	 * we expect a negative dentry */
1153 	if (ptvino) {
1154 		if (d_really_is_negative(dentry))
1155 			goto out_unlock;
1156 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1157 			goto out_unlock;
1158 	} else {
1159 		if (d_really_is_positive(dentry))
1160 			goto out_unlock;
1161 	}
1162 
1163 	__update_dentry_lease(dir, dentry, lease, session,
1164 			      from_time, &old_lease_session);
1165 out_unlock:
1166 	spin_unlock(&dentry->d_lock);
1167 	ceph_put_mds_session(old_lease_session);
1168 }
1169 
1170 /*
1171  * splice a dentry to an inode.
1172  * caller must hold directory i_mutex for this to be safe.
1173  */
splice_dentry(struct dentry ** pdn,struct inode * in)1174 static int splice_dentry(struct dentry **pdn, struct inode *in)
1175 {
1176 	struct dentry *dn = *pdn;
1177 	struct dentry *realdn;
1178 
1179 	BUG_ON(d_inode(dn));
1180 
1181 	if (S_ISDIR(in->i_mode)) {
1182 		/* If inode is directory, d_splice_alias() below will remove
1183 		 * 'realdn' from its origin parent. We need to ensure that
1184 		 * origin parent's readdir cache will not reference 'realdn'
1185 		 */
1186 		realdn = d_find_any_alias(in);
1187 		if (realdn) {
1188 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1189 			spin_lock(&realdn->d_lock);
1190 
1191 			realdn->d_op->d_prune(realdn);
1192 
1193 			di->time = jiffies;
1194 			di->lease_shared_gen = 0;
1195 			di->offset = 0;
1196 
1197 			spin_unlock(&realdn->d_lock);
1198 			dput(realdn);
1199 		}
1200 	}
1201 
1202 	/* dn must be unhashed */
1203 	if (!d_unhashed(dn))
1204 		d_drop(dn);
1205 	realdn = d_splice_alias(in, dn);
1206 	if (IS_ERR(realdn)) {
1207 		pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1208 		       PTR_ERR(realdn), dn, in, ceph_vinop(in));
1209 		return PTR_ERR(realdn);
1210 	}
1211 
1212 	if (realdn) {
1213 		dout("dn %p (%d) spliced with %p (%d) "
1214 		     "inode %p ino %llx.%llx\n",
1215 		     dn, d_count(dn),
1216 		     realdn, d_count(realdn),
1217 		     d_inode(realdn), ceph_vinop(d_inode(realdn)));
1218 		dput(dn);
1219 		*pdn = realdn;
1220 	} else {
1221 		BUG_ON(!ceph_dentry(dn));
1222 		dout("dn %p attached to %p ino %llx.%llx\n",
1223 		     dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1224 	}
1225 	return 0;
1226 }
1227 
1228 /*
1229  * Incorporate results into the local cache.  This is either just
1230  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1231  * after a lookup).
1232  *
1233  * A reply may contain
1234  *         a directory inode along with a dentry.
1235  *  and/or a target inode
1236  *
1237  * Called with snap_rwsem (read).
1238  */
ceph_fill_trace(struct super_block * sb,struct ceph_mds_request * req)1239 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1240 {
1241 	struct ceph_mds_session *session = req->r_session;
1242 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1243 	struct inode *in = NULL;
1244 	struct ceph_vino tvino, dvino;
1245 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1246 	int err = 0;
1247 
1248 	dout("fill_trace %p is_dentry %d is_target %d\n", req,
1249 	     rinfo->head->is_dentry, rinfo->head->is_target);
1250 
1251 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1252 		dout("fill_trace reply is empty!\n");
1253 		if (rinfo->head->result == 0 && req->r_parent)
1254 			ceph_invalidate_dir_request(req);
1255 		return 0;
1256 	}
1257 
1258 	if (rinfo->head->is_dentry) {
1259 		struct inode *dir = req->r_parent;
1260 
1261 		if (dir) {
1262 			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1263 					      rinfo->dirfrag, session, -1,
1264 					      &req->r_caps_reservation);
1265 			if (err < 0)
1266 				goto done;
1267 		} else {
1268 			WARN_ON_ONCE(1);
1269 		}
1270 
1271 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1272 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1273 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1274 			struct qstr dname;
1275 			struct dentry *dn, *parent;
1276 
1277 			BUG_ON(!rinfo->head->is_target);
1278 			BUG_ON(req->r_dentry);
1279 
1280 			parent = d_find_any_alias(dir);
1281 			BUG_ON(!parent);
1282 
1283 			dname.name = rinfo->dname;
1284 			dname.len = rinfo->dname_len;
1285 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1286 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1287 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1288 retry_lookup:
1289 			dn = d_lookup(parent, &dname);
1290 			dout("d_lookup on parent=%p name=%.*s got %p\n",
1291 			     parent, dname.len, dname.name, dn);
1292 
1293 			if (!dn) {
1294 				dn = d_alloc(parent, &dname);
1295 				dout("d_alloc %p '%.*s' = %p\n", parent,
1296 				     dname.len, dname.name, dn);
1297 				if (!dn) {
1298 					dput(parent);
1299 					err = -ENOMEM;
1300 					goto done;
1301 				}
1302 				err = 0;
1303 			} else if (d_really_is_positive(dn) &&
1304 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1305 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1306 				dout(" dn %p points to wrong inode %p\n",
1307 				     dn, d_inode(dn));
1308 				ceph_dir_clear_ordered(dir);
1309 				d_delete(dn);
1310 				dput(dn);
1311 				goto retry_lookup;
1312 			}
1313 
1314 			req->r_dentry = dn;
1315 			dput(parent);
1316 		}
1317 	}
1318 
1319 	if (rinfo->head->is_target) {
1320 		tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1321 		tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1322 
1323 		in = ceph_get_inode(sb, tvino);
1324 		if (IS_ERR(in)) {
1325 			err = PTR_ERR(in);
1326 			goto done;
1327 		}
1328 
1329 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1330 				NULL, session,
1331 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1332 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1333 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1334 				&req->r_caps_reservation);
1335 		if (err < 0) {
1336 			pr_err("ceph_fill_inode badness %p %llx.%llx\n",
1337 				in, ceph_vinop(in));
1338 			if (in->i_state & I_NEW)
1339 				discard_new_inode(in);
1340 			else
1341 				iput(in);
1342 			goto done;
1343 		}
1344 		req->r_target_inode = in;
1345 		if (in->i_state & I_NEW)
1346 			unlock_new_inode(in);
1347 	}
1348 
1349 	/*
1350 	 * ignore null lease/binding on snapdir ENOENT, or else we
1351 	 * will have trouble splicing in the virtual snapdir later
1352 	 */
1353 	if (rinfo->head->is_dentry &&
1354             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1355 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1356 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1357 					       fsc->mount_options->snapdir_name,
1358 					       req->r_dentry->d_name.len))) {
1359 		/*
1360 		 * lookup link rename   : null -> possibly existing inode
1361 		 * mknod symlink mkdir  : null -> new inode
1362 		 * unlink               : linked -> null
1363 		 */
1364 		struct inode *dir = req->r_parent;
1365 		struct dentry *dn = req->r_dentry;
1366 		bool have_dir_cap, have_lease;
1367 
1368 		BUG_ON(!dn);
1369 		BUG_ON(!dir);
1370 		BUG_ON(d_inode(dn->d_parent) != dir);
1371 
1372 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1373 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1374 
1375 		BUG_ON(ceph_ino(dir) != dvino.ino);
1376 		BUG_ON(ceph_snap(dir) != dvino.snap);
1377 
1378 		/* do we have a lease on the whole dir? */
1379 		have_dir_cap =
1380 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1381 			 CEPH_CAP_FILE_SHARED);
1382 
1383 		/* do we have a dn lease? */
1384 		have_lease = have_dir_cap ||
1385 			le32_to_cpu(rinfo->dlease->duration_ms);
1386 		if (!have_lease)
1387 			dout("fill_trace  no dentry lease or dir cap\n");
1388 
1389 		/* rename? */
1390 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1391 			struct inode *olddir = req->r_old_dentry_dir;
1392 			BUG_ON(!olddir);
1393 
1394 			dout(" src %p '%pd' dst %p '%pd'\n",
1395 			     req->r_old_dentry,
1396 			     req->r_old_dentry,
1397 			     dn, dn);
1398 			dout("fill_trace doing d_move %p -> %p\n",
1399 			     req->r_old_dentry, dn);
1400 
1401 			/* d_move screws up sibling dentries' offsets */
1402 			ceph_dir_clear_ordered(dir);
1403 			ceph_dir_clear_ordered(olddir);
1404 
1405 			d_move(req->r_old_dentry, dn);
1406 			dout(" src %p '%pd' dst %p '%pd'\n",
1407 			     req->r_old_dentry,
1408 			     req->r_old_dentry,
1409 			     dn, dn);
1410 
1411 			/* ensure target dentry is invalidated, despite
1412 			   rehashing bug in vfs_rename_dir */
1413 			ceph_invalidate_dentry_lease(dn);
1414 
1415 			dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1416 			     ceph_dentry(req->r_old_dentry)->offset);
1417 
1418 			/* swap r_dentry and r_old_dentry in case that
1419 			 * splice_dentry() gets called later. This is safe
1420 			 * because no other place will use them */
1421 			req->r_dentry = req->r_old_dentry;
1422 			req->r_old_dentry = dn;
1423 			dn = req->r_dentry;
1424 		}
1425 
1426 		/* null dentry? */
1427 		if (!rinfo->head->is_target) {
1428 			dout("fill_trace null dentry\n");
1429 			if (d_really_is_positive(dn)) {
1430 				dout("d_delete %p\n", dn);
1431 				ceph_dir_clear_ordered(dir);
1432 				d_delete(dn);
1433 			} else if (have_lease) {
1434 				if (d_unhashed(dn))
1435 					d_add(dn, NULL);
1436 				update_dentry_lease(dir, dn,
1437 						    rinfo->dlease, session,
1438 						    req->r_request_started);
1439 			}
1440 			goto done;
1441 		}
1442 
1443 		/* attach proper inode */
1444 		if (d_really_is_negative(dn)) {
1445 			ceph_dir_clear_ordered(dir);
1446 			ihold(in);
1447 			err = splice_dentry(&req->r_dentry, in);
1448 			if (err < 0)
1449 				goto done;
1450 			dn = req->r_dentry;  /* may have spliced */
1451 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1452 			dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1453 			     dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1454 			     ceph_vinop(in));
1455 			d_invalidate(dn);
1456 			have_lease = false;
1457 		}
1458 
1459 		if (have_lease) {
1460 			update_dentry_lease(dir, dn,
1461 					    rinfo->dlease, session,
1462 					    req->r_request_started);
1463 		}
1464 		dout(" final dn %p\n", dn);
1465 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1466 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1467 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1468 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1469 		struct inode *dir = req->r_parent;
1470 
1471 		/* fill out a snapdir LOOKUPSNAP dentry */
1472 		BUG_ON(!dir);
1473 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1474 		BUG_ON(!req->r_dentry);
1475 		dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
1476 		ceph_dir_clear_ordered(dir);
1477 		ihold(in);
1478 		err = splice_dentry(&req->r_dentry, in);
1479 		if (err < 0)
1480 			goto done;
1481 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1482 		/* parent inode is not locked, be carefull */
1483 		struct ceph_vino *ptvino = NULL;
1484 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1485 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1486 		if (rinfo->head->is_target) {
1487 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1488 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1489 			ptvino = &tvino;
1490 		}
1491 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1492 					    session, req->r_request_started,
1493 					    rinfo->dname, rinfo->dname_len,
1494 					    &dvino, ptvino);
1495 	}
1496 done:
1497 	dout("fill_trace done err=%d\n", err);
1498 	return err;
1499 }
1500 
1501 /*
1502  * Prepopulate our cache with readdir results, leases, etc.
1503  */
readdir_prepopulate_inodes_only(struct ceph_mds_request * req,struct ceph_mds_session * session)1504 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1505 					   struct ceph_mds_session *session)
1506 {
1507 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1508 	int i, err = 0;
1509 
1510 	for (i = 0; i < rinfo->dir_nr; i++) {
1511 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1512 		struct ceph_vino vino;
1513 		struct inode *in;
1514 		int rc;
1515 
1516 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1517 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1518 
1519 		in = ceph_get_inode(req->r_dentry->d_sb, vino);
1520 		if (IS_ERR(in)) {
1521 			err = PTR_ERR(in);
1522 			dout("new_inode badness got %d\n", err);
1523 			continue;
1524 		}
1525 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1526 				     -1, &req->r_caps_reservation);
1527 		if (rc < 0) {
1528 			pr_err("ceph_fill_inode badness on %p got %d\n",
1529 			       in, rc);
1530 			err = rc;
1531 			if (in->i_state & I_NEW) {
1532 				ihold(in);
1533 				discard_new_inode(in);
1534 			}
1535 		} else if (in->i_state & I_NEW) {
1536 			unlock_new_inode(in);
1537 		}
1538 
1539 		/* avoid calling iput_final() in mds dispatch threads */
1540 		ceph_async_iput(in);
1541 	}
1542 
1543 	return err;
1544 }
1545 
ceph_readdir_cache_release(struct ceph_readdir_cache_control * ctl)1546 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1547 {
1548 	if (ctl->page) {
1549 		kunmap(ctl->page);
1550 		put_page(ctl->page);
1551 		ctl->page = NULL;
1552 	}
1553 }
1554 
fill_readdir_cache(struct inode * dir,struct dentry * dn,struct ceph_readdir_cache_control * ctl,struct ceph_mds_request * req)1555 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1556 			      struct ceph_readdir_cache_control *ctl,
1557 			      struct ceph_mds_request *req)
1558 {
1559 	struct ceph_inode_info *ci = ceph_inode(dir);
1560 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1561 	unsigned idx = ctl->index % nsize;
1562 	pgoff_t pgoff = ctl->index / nsize;
1563 
1564 	if (!ctl->page || pgoff != page_index(ctl->page)) {
1565 		ceph_readdir_cache_release(ctl);
1566 		if (idx == 0)
1567 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
1568 		else
1569 			ctl->page = find_lock_page(&dir->i_data, pgoff);
1570 		if (!ctl->page) {
1571 			ctl->index = -1;
1572 			return idx == 0 ? -ENOMEM : 0;
1573 		}
1574 		/* reading/filling the cache are serialized by
1575 		 * i_mutex, no need to use page lock */
1576 		unlock_page(ctl->page);
1577 		ctl->dentries = kmap(ctl->page);
1578 		if (idx == 0)
1579 			memset(ctl->dentries, 0, PAGE_SIZE);
1580 	}
1581 
1582 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1583 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1584 		dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1585 		ctl->dentries[idx] = dn;
1586 		ctl->index++;
1587 	} else {
1588 		dout("disable readdir cache\n");
1589 		ctl->index = -1;
1590 	}
1591 	return 0;
1592 }
1593 
ceph_readdir_prepopulate(struct ceph_mds_request * req,struct ceph_mds_session * session)1594 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1595 			     struct ceph_mds_session *session)
1596 {
1597 	struct dentry *parent = req->r_dentry;
1598 	struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1599 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1600 	struct qstr dname;
1601 	struct dentry *dn;
1602 	struct inode *in;
1603 	int err = 0, skipped = 0, ret, i;
1604 	struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1605 	u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1606 	u32 last_hash = 0;
1607 	u32 fpos_offset;
1608 	struct ceph_readdir_cache_control cache_ctl = {};
1609 
1610 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1611 		return readdir_prepopulate_inodes_only(req, session);
1612 
1613 	if (rinfo->hash_order) {
1614 		if (req->r_path2) {
1615 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1616 						  req->r_path2,
1617 						  strlen(req->r_path2));
1618 			last_hash = ceph_frag_value(last_hash);
1619 		} else if (rinfo->offset_hash) {
1620 			/* mds understands offset_hash */
1621 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1622 			last_hash = le32_to_cpu(rhead->args.readdir.offset_hash);
1623 		}
1624 	}
1625 
1626 	if (rinfo->dir_dir &&
1627 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1628 		dout("readdir_prepopulate got new frag %x -> %x\n",
1629 		     frag, le32_to_cpu(rinfo->dir_dir->frag));
1630 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1631 		if (!rinfo->hash_order)
1632 			req->r_readdir_offset = 2;
1633 	}
1634 
1635 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1636 		dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1637 		     rinfo->dir_nr, parent);
1638 	} else {
1639 		dout("readdir_prepopulate %d items under dn %p\n",
1640 		     rinfo->dir_nr, parent);
1641 		if (rinfo->dir_dir)
1642 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1643 
1644 		if (ceph_frag_is_leftmost(frag) &&
1645 		    req->r_readdir_offset == 2 &&
1646 		    !(rinfo->hash_order && last_hash)) {
1647 			/* note dir version at start of readdir so we can
1648 			 * tell if any dentries get dropped */
1649 			req->r_dir_release_cnt =
1650 				atomic64_read(&ci->i_release_count);
1651 			req->r_dir_ordered_cnt =
1652 				atomic64_read(&ci->i_ordered_count);
1653 			req->r_readdir_cache_idx = 0;
1654 		}
1655 	}
1656 
1657 	cache_ctl.index = req->r_readdir_cache_idx;
1658 	fpos_offset = req->r_readdir_offset;
1659 
1660 	/* FIXME: release caps/leases if error occurs */
1661 	for (i = 0; i < rinfo->dir_nr; i++) {
1662 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1663 		struct ceph_vino tvino;
1664 
1665 		dname.name = rde->name;
1666 		dname.len = rde->name_len;
1667 		dname.hash = full_name_hash(parent, dname.name, dname.len);
1668 
1669 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
1670 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1671 
1672 		if (rinfo->hash_order) {
1673 			u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1674 						 rde->name, rde->name_len);
1675 			hash = ceph_frag_value(hash);
1676 			if (hash != last_hash)
1677 				fpos_offset = 2;
1678 			last_hash = hash;
1679 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1680 		} else {
1681 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1682 		}
1683 
1684 retry_lookup:
1685 		dn = d_lookup(parent, &dname);
1686 		dout("d_lookup on parent=%p name=%.*s got %p\n",
1687 		     parent, dname.len, dname.name, dn);
1688 
1689 		if (!dn) {
1690 			dn = d_alloc(parent, &dname);
1691 			dout("d_alloc %p '%.*s' = %p\n", parent,
1692 			     dname.len, dname.name, dn);
1693 			if (!dn) {
1694 				dout("d_alloc badness\n");
1695 				err = -ENOMEM;
1696 				goto out;
1697 			}
1698 		} else if (d_really_is_positive(dn) &&
1699 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
1700 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
1701 			struct ceph_dentry_info *di = ceph_dentry(dn);
1702 			dout(" dn %p points to wrong inode %p\n",
1703 			     dn, d_inode(dn));
1704 
1705 			spin_lock(&dn->d_lock);
1706 			if (di->offset > 0 &&
1707 			    di->lease_shared_gen ==
1708 			    atomic_read(&ci->i_shared_gen)) {
1709 				__ceph_dir_clear_ordered(ci);
1710 				di->offset = 0;
1711 			}
1712 			spin_unlock(&dn->d_lock);
1713 
1714 			d_delete(dn);
1715 			dput(dn);
1716 			goto retry_lookup;
1717 		}
1718 
1719 		/* inode */
1720 		if (d_really_is_positive(dn)) {
1721 			in = d_inode(dn);
1722 		} else {
1723 			in = ceph_get_inode(parent->d_sb, tvino);
1724 			if (IS_ERR(in)) {
1725 				dout("new_inode badness\n");
1726 				d_drop(dn);
1727 				dput(dn);
1728 				err = PTR_ERR(in);
1729 				goto out;
1730 			}
1731 		}
1732 
1733 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1734 				      -1, &req->r_caps_reservation);
1735 		if (ret < 0) {
1736 			pr_err("ceph_fill_inode badness on %p\n", in);
1737 			if (d_really_is_negative(dn)) {
1738 				/* avoid calling iput_final() in mds
1739 				 * dispatch threads */
1740 				if (in->i_state & I_NEW) {
1741 					ihold(in);
1742 					discard_new_inode(in);
1743 				}
1744 				ceph_async_iput(in);
1745 			}
1746 			d_drop(dn);
1747 			err = ret;
1748 			goto next_item;
1749 		}
1750 		if (in->i_state & I_NEW)
1751 			unlock_new_inode(in);
1752 
1753 		if (d_really_is_negative(dn)) {
1754 			if (ceph_security_xattr_deadlock(in)) {
1755 				dout(" skip splicing dn %p to inode %p"
1756 				     " (security xattr deadlock)\n", dn, in);
1757 				ceph_async_iput(in);
1758 				skipped++;
1759 				goto next_item;
1760 			}
1761 
1762 			err = splice_dentry(&dn, in);
1763 			if (err < 0)
1764 				goto next_item;
1765 		}
1766 
1767 		ceph_dentry(dn)->offset = rde->offset;
1768 
1769 		update_dentry_lease(d_inode(parent), dn,
1770 				    rde->lease, req->r_session,
1771 				    req->r_request_started);
1772 
1773 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1774 			ret = fill_readdir_cache(d_inode(parent), dn,
1775 						 &cache_ctl, req);
1776 			if (ret < 0)
1777 				err = ret;
1778 		}
1779 next_item:
1780 		dput(dn);
1781 	}
1782 out:
1783 	if (err == 0 && skipped == 0) {
1784 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
1785 		req->r_readdir_cache_idx = cache_ctl.index;
1786 	}
1787 	ceph_readdir_cache_release(&cache_ctl);
1788 	dout("readdir_prepopulate done\n");
1789 	return err;
1790 }
1791 
ceph_inode_set_size(struct inode * inode,loff_t size)1792 bool ceph_inode_set_size(struct inode *inode, loff_t size)
1793 {
1794 	struct ceph_inode_info *ci = ceph_inode(inode);
1795 	bool ret;
1796 
1797 	spin_lock(&ci->i_ceph_lock);
1798 	dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1799 	i_size_write(inode, size);
1800 	inode->i_blocks = calc_inode_blocks(size);
1801 
1802 	ret = __ceph_should_report_size(ci);
1803 
1804 	spin_unlock(&ci->i_ceph_lock);
1805 	return ret;
1806 }
1807 
1808 /*
1809  * Put reference to inode, but avoid calling iput_final() in current thread.
1810  * iput_final() may wait for reahahead pages. The wait can cause deadlock in
1811  * some contexts.
1812  */
ceph_async_iput(struct inode * inode)1813 void ceph_async_iput(struct inode *inode)
1814 {
1815 	if (!inode)
1816 		return;
1817 	for (;;) {
1818 		if (atomic_add_unless(&inode->i_count, -1, 1))
1819 			break;
1820 		if (queue_work(ceph_inode_to_client(inode)->inode_wq,
1821 			       &ceph_inode(inode)->i_work))
1822 			break;
1823 		/* queue work failed, i_count must be at least 2 */
1824 	}
1825 }
1826 
1827 /*
1828  * Write back inode data in a worker thread.  (This can't be done
1829  * in the message handler context.)
1830  */
ceph_queue_writeback(struct inode * inode)1831 void ceph_queue_writeback(struct inode *inode)
1832 {
1833 	struct ceph_inode_info *ci = ceph_inode(inode);
1834 	set_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask);
1835 
1836 	ihold(inode);
1837 	if (queue_work(ceph_inode_to_client(inode)->inode_wq,
1838 		       &ci->i_work)) {
1839 		dout("ceph_queue_writeback %p\n", inode);
1840 	} else {
1841 		dout("ceph_queue_writeback %p already queued, mask=%lx\n",
1842 		     inode, ci->i_work_mask);
1843 		iput(inode);
1844 	}
1845 }
1846 
1847 /*
1848  * queue an async invalidation
1849  */
ceph_queue_invalidate(struct inode * inode)1850 void ceph_queue_invalidate(struct inode *inode)
1851 {
1852 	struct ceph_inode_info *ci = ceph_inode(inode);
1853 	set_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask);
1854 
1855 	ihold(inode);
1856 	if (queue_work(ceph_inode_to_client(inode)->inode_wq,
1857 		       &ceph_inode(inode)->i_work)) {
1858 		dout("ceph_queue_invalidate %p\n", inode);
1859 	} else {
1860 		dout("ceph_queue_invalidate %p already queued, mask=%lx\n",
1861 		     inode, ci->i_work_mask);
1862 		iput(inode);
1863 	}
1864 }
1865 
1866 /*
1867  * Queue an async vmtruncate.  If we fail to queue work, we will handle
1868  * the truncation the next time we call __ceph_do_pending_vmtruncate.
1869  */
ceph_queue_vmtruncate(struct inode * inode)1870 void ceph_queue_vmtruncate(struct inode *inode)
1871 {
1872 	struct ceph_inode_info *ci = ceph_inode(inode);
1873 	set_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask);
1874 
1875 	ihold(inode);
1876 	if (queue_work(ceph_inode_to_client(inode)->inode_wq,
1877 		       &ci->i_work)) {
1878 		dout("ceph_queue_vmtruncate %p\n", inode);
1879 	} else {
1880 		dout("ceph_queue_vmtruncate %p already queued, mask=%lx\n",
1881 		     inode, ci->i_work_mask);
1882 		iput(inode);
1883 	}
1884 }
1885 
ceph_do_invalidate_pages(struct inode * inode)1886 static void ceph_do_invalidate_pages(struct inode *inode)
1887 {
1888 	struct ceph_inode_info *ci = ceph_inode(inode);
1889 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1890 	u32 orig_gen;
1891 	int check = 0;
1892 
1893 	mutex_lock(&ci->i_truncate_mutex);
1894 
1895 	if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1896 		pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
1897 				    inode, ceph_ino(inode));
1898 		mapping_set_error(inode->i_mapping, -EIO);
1899 		truncate_pagecache(inode, 0);
1900 		mutex_unlock(&ci->i_truncate_mutex);
1901 		goto out;
1902 	}
1903 
1904 	spin_lock(&ci->i_ceph_lock);
1905 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
1906 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
1907 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1908 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1909 			check = 1;
1910 		spin_unlock(&ci->i_ceph_lock);
1911 		mutex_unlock(&ci->i_truncate_mutex);
1912 		goto out;
1913 	}
1914 	orig_gen = ci->i_rdcache_gen;
1915 	spin_unlock(&ci->i_ceph_lock);
1916 
1917 	ceph_fscache_invalidate(inode);
1918 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1919 		pr_err("invalidate_pages %p fails\n", inode);
1920 	}
1921 
1922 	spin_lock(&ci->i_ceph_lock);
1923 	if (orig_gen == ci->i_rdcache_gen &&
1924 	    orig_gen == ci->i_rdcache_revoking) {
1925 		dout("invalidate_pages %p gen %d successful\n", inode,
1926 		     ci->i_rdcache_gen);
1927 		ci->i_rdcache_revoking--;
1928 		check = 1;
1929 	} else {
1930 		dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1931 		     inode, orig_gen, ci->i_rdcache_gen,
1932 		     ci->i_rdcache_revoking);
1933 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1934 			check = 1;
1935 	}
1936 	spin_unlock(&ci->i_ceph_lock);
1937 	mutex_unlock(&ci->i_truncate_mutex);
1938 out:
1939 	if (check)
1940 		ceph_check_caps(ci, 0, NULL);
1941 }
1942 
1943 /*
1944  * Make sure any pending truncation is applied before doing anything
1945  * that may depend on it.
1946  */
__ceph_do_pending_vmtruncate(struct inode * inode)1947 void __ceph_do_pending_vmtruncate(struct inode *inode)
1948 {
1949 	struct ceph_inode_info *ci = ceph_inode(inode);
1950 	u64 to;
1951 	int wrbuffer_refs, finish = 0;
1952 
1953 	mutex_lock(&ci->i_truncate_mutex);
1954 retry:
1955 	spin_lock(&ci->i_ceph_lock);
1956 	if (ci->i_truncate_pending == 0) {
1957 		dout("__do_pending_vmtruncate %p none pending\n", inode);
1958 		spin_unlock(&ci->i_ceph_lock);
1959 		mutex_unlock(&ci->i_truncate_mutex);
1960 		return;
1961 	}
1962 
1963 	/*
1964 	 * make sure any dirty snapped pages are flushed before we
1965 	 * possibly truncate them.. so write AND block!
1966 	 */
1967 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1968 		spin_unlock(&ci->i_ceph_lock);
1969 		dout("__do_pending_vmtruncate %p flushing snaps first\n",
1970 		     inode);
1971 		filemap_write_and_wait_range(&inode->i_data, 0,
1972 					     inode->i_sb->s_maxbytes);
1973 		goto retry;
1974 	}
1975 
1976 	/* there should be no reader or writer */
1977 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1978 
1979 	to = ci->i_truncate_size;
1980 	wrbuffer_refs = ci->i_wrbuffer_ref;
1981 	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1982 	     ci->i_truncate_pending, to);
1983 	spin_unlock(&ci->i_ceph_lock);
1984 
1985 	truncate_pagecache(inode, to);
1986 
1987 	spin_lock(&ci->i_ceph_lock);
1988 	if (to == ci->i_truncate_size) {
1989 		ci->i_truncate_pending = 0;
1990 		finish = 1;
1991 	}
1992 	spin_unlock(&ci->i_ceph_lock);
1993 	if (!finish)
1994 		goto retry;
1995 
1996 	mutex_unlock(&ci->i_truncate_mutex);
1997 
1998 	if (wrbuffer_refs == 0)
1999 		ceph_check_caps(ci, 0, NULL);
2000 
2001 	wake_up_all(&ci->i_cap_wq);
2002 }
2003 
ceph_inode_work(struct work_struct * work)2004 static void ceph_inode_work(struct work_struct *work)
2005 {
2006 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
2007 						 i_work);
2008 	struct inode *inode = &ci->vfs_inode;
2009 
2010 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
2011 		dout("writeback %p\n", inode);
2012 		filemap_fdatawrite(&inode->i_data);
2013 	}
2014 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
2015 		ceph_do_invalidate_pages(inode);
2016 
2017 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
2018 		__ceph_do_pending_vmtruncate(inode);
2019 
2020 	iput(inode);
2021 }
2022 
2023 /*
2024  * symlinks
2025  */
2026 static const struct inode_operations ceph_symlink_iops = {
2027 	.get_link = simple_get_link,
2028 	.setattr = ceph_setattr,
2029 	.getattr = ceph_getattr,
2030 	.listxattr = ceph_listxattr,
2031 };
2032 
__ceph_setattr(struct inode * inode,struct iattr * attr)2033 int __ceph_setattr(struct inode *inode, struct iattr *attr)
2034 {
2035 	struct ceph_inode_info *ci = ceph_inode(inode);
2036 	unsigned int ia_valid = attr->ia_valid;
2037 	struct ceph_mds_request *req;
2038 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2039 	struct ceph_cap_flush *prealloc_cf;
2040 	int issued;
2041 	int release = 0, dirtied = 0;
2042 	int mask = 0;
2043 	int err = 0;
2044 	int inode_dirty_flags = 0;
2045 	bool lock_snap_rwsem = false;
2046 
2047 	prealloc_cf = ceph_alloc_cap_flush();
2048 	if (!prealloc_cf)
2049 		return -ENOMEM;
2050 
2051 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2052 				       USE_AUTH_MDS);
2053 	if (IS_ERR(req)) {
2054 		ceph_free_cap_flush(prealloc_cf);
2055 		return PTR_ERR(req);
2056 	}
2057 
2058 	spin_lock(&ci->i_ceph_lock);
2059 	issued = __ceph_caps_issued(ci, NULL);
2060 
2061 	if (!ci->i_head_snapc &&
2062 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2063 		lock_snap_rwsem = true;
2064 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2065 			spin_unlock(&ci->i_ceph_lock);
2066 			down_read(&mdsc->snap_rwsem);
2067 			spin_lock(&ci->i_ceph_lock);
2068 			issued = __ceph_caps_issued(ci, NULL);
2069 		}
2070 	}
2071 
2072 	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
2073 
2074 	if (ia_valid & ATTR_UID) {
2075 		dout("setattr %p uid %d -> %d\n", inode,
2076 		     from_kuid(&init_user_ns, inode->i_uid),
2077 		     from_kuid(&init_user_ns, attr->ia_uid));
2078 		if (issued & CEPH_CAP_AUTH_EXCL) {
2079 			inode->i_uid = attr->ia_uid;
2080 			dirtied |= CEPH_CAP_AUTH_EXCL;
2081 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2082 			   !uid_eq(attr->ia_uid, inode->i_uid)) {
2083 			req->r_args.setattr.uid = cpu_to_le32(
2084 				from_kuid(&init_user_ns, attr->ia_uid));
2085 			mask |= CEPH_SETATTR_UID;
2086 			release |= CEPH_CAP_AUTH_SHARED;
2087 		}
2088 	}
2089 	if (ia_valid & ATTR_GID) {
2090 		dout("setattr %p gid %d -> %d\n", inode,
2091 		     from_kgid(&init_user_ns, inode->i_gid),
2092 		     from_kgid(&init_user_ns, attr->ia_gid));
2093 		if (issued & CEPH_CAP_AUTH_EXCL) {
2094 			inode->i_gid = attr->ia_gid;
2095 			dirtied |= CEPH_CAP_AUTH_EXCL;
2096 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2097 			   !gid_eq(attr->ia_gid, inode->i_gid)) {
2098 			req->r_args.setattr.gid = cpu_to_le32(
2099 				from_kgid(&init_user_ns, attr->ia_gid));
2100 			mask |= CEPH_SETATTR_GID;
2101 			release |= CEPH_CAP_AUTH_SHARED;
2102 		}
2103 	}
2104 	if (ia_valid & ATTR_MODE) {
2105 		dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
2106 		     attr->ia_mode);
2107 		if (issued & CEPH_CAP_AUTH_EXCL) {
2108 			inode->i_mode = attr->ia_mode;
2109 			dirtied |= CEPH_CAP_AUTH_EXCL;
2110 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2111 			   attr->ia_mode != inode->i_mode) {
2112 			inode->i_mode = attr->ia_mode;
2113 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2114 			mask |= CEPH_SETATTR_MODE;
2115 			release |= CEPH_CAP_AUTH_SHARED;
2116 		}
2117 	}
2118 
2119 	if (ia_valid & ATTR_ATIME) {
2120 		dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
2121 		     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
2122 		     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2123 		if (issued & CEPH_CAP_FILE_EXCL) {
2124 			ci->i_time_warp_seq++;
2125 			inode->i_atime = attr->ia_atime;
2126 			dirtied |= CEPH_CAP_FILE_EXCL;
2127 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2128 			   timespec64_compare(&inode->i_atime,
2129 					    &attr->ia_atime) < 0) {
2130 			inode->i_atime = attr->ia_atime;
2131 			dirtied |= CEPH_CAP_FILE_WR;
2132 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2133 			   !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
2134 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2135 					       &attr->ia_atime);
2136 			mask |= CEPH_SETATTR_ATIME;
2137 			release |= CEPH_CAP_FILE_SHARED |
2138 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2139 		}
2140 	}
2141 	if (ia_valid & ATTR_SIZE) {
2142 		dout("setattr %p size %lld -> %lld\n", inode,
2143 		     inode->i_size, attr->ia_size);
2144 		if ((issued & CEPH_CAP_FILE_EXCL) &&
2145 		    attr->ia_size > inode->i_size) {
2146 			i_size_write(inode, attr->ia_size);
2147 			inode->i_blocks = calc_inode_blocks(attr->ia_size);
2148 			ci->i_reported_size = attr->ia_size;
2149 			dirtied |= CEPH_CAP_FILE_EXCL;
2150 			ia_valid |= ATTR_MTIME;
2151 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2152 			   attr->ia_size != inode->i_size) {
2153 			req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2154 			req->r_args.setattr.old_size =
2155 				cpu_to_le64(inode->i_size);
2156 			mask |= CEPH_SETATTR_SIZE;
2157 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2158 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2159 		}
2160 	}
2161 	if (ia_valid & ATTR_MTIME) {
2162 		dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
2163 		     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
2164 		     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2165 		if (issued & CEPH_CAP_FILE_EXCL) {
2166 			ci->i_time_warp_seq++;
2167 			inode->i_mtime = attr->ia_mtime;
2168 			dirtied |= CEPH_CAP_FILE_EXCL;
2169 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2170 			   timespec64_compare(&inode->i_mtime,
2171 					    &attr->ia_mtime) < 0) {
2172 			inode->i_mtime = attr->ia_mtime;
2173 			dirtied |= CEPH_CAP_FILE_WR;
2174 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2175 			   !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
2176 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2177 					       &attr->ia_mtime);
2178 			mask |= CEPH_SETATTR_MTIME;
2179 			release |= CEPH_CAP_FILE_SHARED |
2180 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2181 		}
2182 	}
2183 
2184 	/* these do nothing */
2185 	if (ia_valid & ATTR_CTIME) {
2186 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2187 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2188 		dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
2189 		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2190 		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2191 		     only ? "ctime only" : "ignored");
2192 		if (only) {
2193 			/*
2194 			 * if kernel wants to dirty ctime but nothing else,
2195 			 * we need to choose a cap to dirty under, or do
2196 			 * a almost-no-op setattr
2197 			 */
2198 			if (issued & CEPH_CAP_AUTH_EXCL)
2199 				dirtied |= CEPH_CAP_AUTH_EXCL;
2200 			else if (issued & CEPH_CAP_FILE_EXCL)
2201 				dirtied |= CEPH_CAP_FILE_EXCL;
2202 			else if (issued & CEPH_CAP_XATTR_EXCL)
2203 				dirtied |= CEPH_CAP_XATTR_EXCL;
2204 			else
2205 				mask |= CEPH_SETATTR_CTIME;
2206 		}
2207 	}
2208 	if (ia_valid & ATTR_FILE)
2209 		dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2210 
2211 	if (dirtied) {
2212 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2213 							   &prealloc_cf);
2214 		inode->i_ctime = attr->ia_ctime;
2215 	}
2216 
2217 	release &= issued;
2218 	spin_unlock(&ci->i_ceph_lock);
2219 	if (lock_snap_rwsem)
2220 		up_read(&mdsc->snap_rwsem);
2221 
2222 	if (inode_dirty_flags)
2223 		__mark_inode_dirty(inode, inode_dirty_flags);
2224 
2225 
2226 	if (mask) {
2227 		req->r_inode = inode;
2228 		ihold(inode);
2229 		req->r_inode_drop = release;
2230 		req->r_args.setattr.mask = cpu_to_le32(mask);
2231 		req->r_num_caps = 1;
2232 		req->r_stamp = attr->ia_ctime;
2233 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2234 	}
2235 	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2236 	     ceph_cap_string(dirtied), mask);
2237 
2238 	ceph_mdsc_put_request(req);
2239 	ceph_free_cap_flush(prealloc_cf);
2240 
2241 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2242 		__ceph_do_pending_vmtruncate(inode);
2243 
2244 	return err;
2245 }
2246 
2247 /*
2248  * setattr
2249  */
ceph_setattr(struct dentry * dentry,struct iattr * attr)2250 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
2251 {
2252 	struct inode *inode = d_inode(dentry);
2253 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2254 	int err;
2255 
2256 	if (ceph_snap(inode) != CEPH_NOSNAP)
2257 		return -EROFS;
2258 
2259 	err = setattr_prepare(dentry, attr);
2260 	if (err != 0)
2261 		return err;
2262 
2263 	if ((attr->ia_valid & ATTR_SIZE) &&
2264 	    attr->ia_size > max(inode->i_size, fsc->max_file_size))
2265 		return -EFBIG;
2266 
2267 	if ((attr->ia_valid & ATTR_SIZE) &&
2268 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2269 		return -EDQUOT;
2270 
2271 	err = __ceph_setattr(inode, attr);
2272 
2273 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2274 		err = posix_acl_chmod(inode, attr->ia_mode);
2275 
2276 	return err;
2277 }
2278 
2279 /*
2280  * Verify that we have a lease on the given mask.  If not,
2281  * do a getattr against an mds.
2282  */
__ceph_do_getattr(struct inode * inode,struct page * locked_page,int mask,bool force)2283 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2284 		      int mask, bool force)
2285 {
2286 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2287 	struct ceph_mds_client *mdsc = fsc->mdsc;
2288 	struct ceph_mds_request *req;
2289 	int mode;
2290 	int err;
2291 
2292 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2293 		dout("do_getattr inode %p SNAPDIR\n", inode);
2294 		return 0;
2295 	}
2296 
2297 	dout("do_getattr inode %p mask %s mode 0%o\n",
2298 	     inode, ceph_cap_string(mask), inode->i_mode);
2299 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2300 			return 0;
2301 
2302 	mode = (mask & CEPH_STAT_RSTAT) ? USE_AUTH_MDS : USE_ANY_MDS;
2303 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2304 	if (IS_ERR(req))
2305 		return PTR_ERR(req);
2306 	req->r_inode = inode;
2307 	ihold(inode);
2308 	req->r_num_caps = 1;
2309 	req->r_args.getattr.mask = cpu_to_le32(mask);
2310 	req->r_locked_page = locked_page;
2311 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2312 	if (locked_page && err == 0) {
2313 		u64 inline_version = req->r_reply_info.targeti.inline_version;
2314 		if (inline_version == 0) {
2315 			/* the reply is supposed to contain inline data */
2316 			err = -EINVAL;
2317 		} else if (inline_version == CEPH_INLINE_NONE) {
2318 			err = -ENODATA;
2319 		} else {
2320 			err = req->r_reply_info.targeti.inline_len;
2321 		}
2322 	}
2323 	ceph_mdsc_put_request(req);
2324 	dout("do_getattr result=%d\n", err);
2325 	return err;
2326 }
2327 
2328 
2329 /*
2330  * Check inode permissions.  We verify we have a valid value for
2331  * the AUTH cap, then call the generic handler.
2332  */
ceph_permission(struct inode * inode,int mask)2333 int ceph_permission(struct inode *inode, int mask)
2334 {
2335 	int err;
2336 
2337 	if (mask & MAY_NOT_BLOCK)
2338 		return -ECHILD;
2339 
2340 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2341 
2342 	if (!err)
2343 		err = generic_permission(inode, mask);
2344 	return err;
2345 }
2346 
2347 /* Craft a mask of needed caps given a set of requested statx attrs. */
statx_to_caps(u32 want)2348 static int statx_to_caps(u32 want)
2349 {
2350 	int mask = 0;
2351 
2352 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME))
2353 		mask |= CEPH_CAP_AUTH_SHARED;
2354 
2355 	if (want & (STATX_NLINK|STATX_CTIME))
2356 		mask |= CEPH_CAP_LINK_SHARED;
2357 
2358 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|
2359 		    STATX_BLOCKS))
2360 		mask |= CEPH_CAP_FILE_SHARED;
2361 
2362 	if (want & (STATX_CTIME))
2363 		mask |= CEPH_CAP_XATTR_SHARED;
2364 
2365 	return mask;
2366 }
2367 
2368 /*
2369  * Get all the attributes. If we have sufficient caps for the requested attrs,
2370  * then we can avoid talking to the MDS at all.
2371  */
ceph_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)2372 int ceph_getattr(const struct path *path, struct kstat *stat,
2373 		 u32 request_mask, unsigned int flags)
2374 {
2375 	struct inode *inode = d_inode(path->dentry);
2376 	struct ceph_inode_info *ci = ceph_inode(inode);
2377 	u32 valid_mask = STATX_BASIC_STATS;
2378 	int err = 0;
2379 
2380 	/* Skip the getattr altogether if we're asked not to sync */
2381 	if (!(flags & AT_STATX_DONT_SYNC)) {
2382 		err = ceph_do_getattr(inode, statx_to_caps(request_mask),
2383 				      flags & AT_STATX_FORCE_SYNC);
2384 		if (err)
2385 			return err;
2386 	}
2387 
2388 	generic_fillattr(inode, stat);
2389 	stat->ino = ceph_present_inode(inode);
2390 
2391 	/*
2392 	 * btime on newly-allocated inodes is 0, so if this is still set to
2393 	 * that, then assume that it's not valid.
2394 	 */
2395 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
2396 		stat->btime = ci->i_btime;
2397 		valid_mask |= STATX_BTIME;
2398 	}
2399 
2400 	if (ceph_snap(inode) == CEPH_NOSNAP)
2401 		stat->dev = inode->i_sb->s_dev;
2402 	else
2403 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
2404 
2405 	if (S_ISDIR(inode->i_mode)) {
2406 		if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2407 					RBYTES))
2408 			stat->size = ci->i_rbytes;
2409 		else
2410 			stat->size = ci->i_files + ci->i_subdirs;
2411 		stat->blocks = 0;
2412 		stat->blksize = 65536;
2413 		/*
2414 		 * Some applications rely on the number of st_nlink
2415 		 * value on directories to be either 0 (if unlinked)
2416 		 * or 2 + number of subdirectories.
2417 		 */
2418 		if (stat->nlink == 1)
2419 			/* '.' + '..' + subdirs */
2420 			stat->nlink = 1 + 1 + ci->i_subdirs;
2421 	}
2422 
2423 	stat->result_mask = request_mask & valid_mask;
2424 	return err;
2425 }
2426