1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/module.h>
4 #include <linux/fs.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/uaccess.h>
8 #include <linux/kernel.h>
9 #include <linux/writeback.h>
10 #include <linux/vmalloc.h>
11 #include <linux/posix_acl.h>
12 #include <linux/random.h>
13
14 #include "super.h"
15 #include "mds_client.h"
16 #include "cache.h"
17 #include <linux/ceph/decode.h>
18
19 /*
20 * Ceph inode operations
21 *
22 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
23 * setattr, etc.), xattr helpers, and helpers for assimilating
24 * metadata returned by the MDS into our cache.
25 *
26 * Also define helpers for doing asynchronous writeback, invalidation,
27 * and truncation for the benefit of those who can't afford to block
28 * (typically because they are in the message handler path).
29 */
30
31 static const struct inode_operations ceph_symlink_iops;
32
33 static void ceph_invalidate_work(struct work_struct *work);
34 static void ceph_writeback_work(struct work_struct *work);
35 static void ceph_vmtruncate_work(struct work_struct *work);
36
37 /*
38 * find or create an inode, given the ceph ino number
39 */
ceph_set_ino_cb(struct inode * inode,void * data)40 static int ceph_set_ino_cb(struct inode *inode, void *data)
41 {
42 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
43 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
44 return 0;
45 }
46
ceph_get_inode(struct super_block * sb,struct ceph_vino vino)47 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
48 {
49 struct inode *inode;
50 ino_t t = ceph_vino_to_ino(vino);
51
52 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
53 if (inode == NULL)
54 return ERR_PTR(-ENOMEM);
55 if (inode->i_state & I_NEW) {
56 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
57 inode, ceph_vinop(inode), (u64)inode->i_ino);
58 unlock_new_inode(inode);
59 }
60
61 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
62 vino.snap, inode);
63 return inode;
64 }
65
66 /*
67 * get/constuct snapdir inode for a given directory
68 */
ceph_get_snapdir(struct inode * parent)69 struct inode *ceph_get_snapdir(struct inode *parent)
70 {
71 struct ceph_vino vino = {
72 .ino = ceph_ino(parent),
73 .snap = CEPH_SNAPDIR,
74 };
75 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
76 struct ceph_inode_info *ci = ceph_inode(inode);
77
78 BUG_ON(!S_ISDIR(parent->i_mode));
79 if (IS_ERR(inode))
80 return inode;
81 inode->i_mode = parent->i_mode;
82 inode->i_uid = parent->i_uid;
83 inode->i_gid = parent->i_gid;
84 inode->i_op = &ceph_snapdir_iops;
85 inode->i_fop = &ceph_snapdir_fops;
86 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
87 ci->i_rbytes = 0;
88 return inode;
89 }
90
91 const struct inode_operations ceph_file_iops = {
92 .permission = ceph_permission,
93 .setattr = ceph_setattr,
94 .getattr = ceph_getattr,
95 .setxattr = ceph_setxattr,
96 .getxattr = ceph_getxattr,
97 .listxattr = ceph_listxattr,
98 .removexattr = ceph_removexattr,
99 .get_acl = ceph_get_acl,
100 .set_acl = ceph_set_acl,
101 };
102
103
104 /*
105 * We use a 'frag tree' to keep track of the MDS's directory fragments
106 * for a given inode (usually there is just a single fragment). We
107 * need to know when a child frag is delegated to a new MDS, or when
108 * it is flagged as replicated, so we can direct our requests
109 * accordingly.
110 */
111
112 /*
113 * find/create a frag in the tree
114 */
__get_or_create_frag(struct ceph_inode_info * ci,u32 f)115 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
116 u32 f)
117 {
118 struct rb_node **p;
119 struct rb_node *parent = NULL;
120 struct ceph_inode_frag *frag;
121 int c;
122
123 p = &ci->i_fragtree.rb_node;
124 while (*p) {
125 parent = *p;
126 frag = rb_entry(parent, struct ceph_inode_frag, node);
127 c = ceph_frag_compare(f, frag->frag);
128 if (c < 0)
129 p = &(*p)->rb_left;
130 else if (c > 0)
131 p = &(*p)->rb_right;
132 else
133 return frag;
134 }
135
136 frag = kmalloc(sizeof(*frag), GFP_NOFS);
137 if (!frag) {
138 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
139 "frag %x\n", &ci->vfs_inode,
140 ceph_vinop(&ci->vfs_inode), f);
141 return ERR_PTR(-ENOMEM);
142 }
143 frag->frag = f;
144 frag->split_by = 0;
145 frag->mds = -1;
146 frag->ndist = 0;
147
148 rb_link_node(&frag->node, parent, p);
149 rb_insert_color(&frag->node, &ci->i_fragtree);
150
151 dout("get_or_create_frag added %llx.%llx frag %x\n",
152 ceph_vinop(&ci->vfs_inode), f);
153 return frag;
154 }
155
156 /*
157 * find a specific frag @f
158 */
__ceph_find_frag(struct ceph_inode_info * ci,u32 f)159 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
160 {
161 struct rb_node *n = ci->i_fragtree.rb_node;
162
163 while (n) {
164 struct ceph_inode_frag *frag =
165 rb_entry(n, struct ceph_inode_frag, node);
166 int c = ceph_frag_compare(f, frag->frag);
167 if (c < 0)
168 n = n->rb_left;
169 else if (c > 0)
170 n = n->rb_right;
171 else
172 return frag;
173 }
174 return NULL;
175 }
176
177 /*
178 * Choose frag containing the given value @v. If @pfrag is
179 * specified, copy the frag delegation info to the caller if
180 * it is present.
181 */
__ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)182 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
183 struct ceph_inode_frag *pfrag, int *found)
184 {
185 u32 t = ceph_frag_make(0, 0);
186 struct ceph_inode_frag *frag;
187 unsigned nway, i;
188 u32 n;
189
190 if (found)
191 *found = 0;
192
193 while (1) {
194 WARN_ON(!ceph_frag_contains_value(t, v));
195 frag = __ceph_find_frag(ci, t);
196 if (!frag)
197 break; /* t is a leaf */
198 if (frag->split_by == 0) {
199 if (pfrag)
200 memcpy(pfrag, frag, sizeof(*pfrag));
201 if (found)
202 *found = 1;
203 break;
204 }
205
206 /* choose child */
207 nway = 1 << frag->split_by;
208 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
209 frag->split_by, nway);
210 for (i = 0; i < nway; i++) {
211 n = ceph_frag_make_child(t, frag->split_by, i);
212 if (ceph_frag_contains_value(n, v)) {
213 t = n;
214 break;
215 }
216 }
217 BUG_ON(i == nway);
218 }
219 dout("choose_frag(%x) = %x\n", v, t);
220
221 return t;
222 }
223
ceph_choose_frag(struct ceph_inode_info * ci,u32 v,struct ceph_inode_frag * pfrag,int * found)224 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
225 struct ceph_inode_frag *pfrag, int *found)
226 {
227 u32 ret;
228 mutex_lock(&ci->i_fragtree_mutex);
229 ret = __ceph_choose_frag(ci, v, pfrag, found);
230 mutex_unlock(&ci->i_fragtree_mutex);
231 return ret;
232 }
233
234 /*
235 * Process dirfrag (delegation) info from the mds. Include leaf
236 * fragment in tree ONLY if ndist > 0. Otherwise, only
237 * branches/splits are included in i_fragtree)
238 */
ceph_fill_dirfrag(struct inode * inode,struct ceph_mds_reply_dirfrag * dirinfo)239 static int ceph_fill_dirfrag(struct inode *inode,
240 struct ceph_mds_reply_dirfrag *dirinfo)
241 {
242 struct ceph_inode_info *ci = ceph_inode(inode);
243 struct ceph_inode_frag *frag;
244 u32 id = le32_to_cpu(dirinfo->frag);
245 int mds = le32_to_cpu(dirinfo->auth);
246 int ndist = le32_to_cpu(dirinfo->ndist);
247 int diri_auth = -1;
248 int i;
249 int err = 0;
250
251 spin_lock(&ci->i_ceph_lock);
252 if (ci->i_auth_cap)
253 diri_auth = ci->i_auth_cap->mds;
254 spin_unlock(&ci->i_ceph_lock);
255
256 mutex_lock(&ci->i_fragtree_mutex);
257 if (ndist == 0 && mds == diri_auth) {
258 /* no delegation info needed. */
259 frag = __ceph_find_frag(ci, id);
260 if (!frag)
261 goto out;
262 if (frag->split_by == 0) {
263 /* tree leaf, remove */
264 dout("fill_dirfrag removed %llx.%llx frag %x"
265 " (no ref)\n", ceph_vinop(inode), id);
266 rb_erase(&frag->node, &ci->i_fragtree);
267 kfree(frag);
268 } else {
269 /* tree branch, keep and clear */
270 dout("fill_dirfrag cleared %llx.%llx frag %x"
271 " referral\n", ceph_vinop(inode), id);
272 frag->mds = -1;
273 frag->ndist = 0;
274 }
275 goto out;
276 }
277
278
279 /* find/add this frag to store mds delegation info */
280 frag = __get_or_create_frag(ci, id);
281 if (IS_ERR(frag)) {
282 /* this is not the end of the world; we can continue
283 with bad/inaccurate delegation info */
284 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
285 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
286 err = -ENOMEM;
287 goto out;
288 }
289
290 frag->mds = mds;
291 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
292 for (i = 0; i < frag->ndist; i++)
293 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
294 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
295 ceph_vinop(inode), frag->frag, frag->ndist);
296
297 out:
298 mutex_unlock(&ci->i_fragtree_mutex);
299 return err;
300 }
301
ceph_fill_fragtree(struct inode * inode,struct ceph_frag_tree_head * fragtree,struct ceph_mds_reply_dirfrag * dirinfo)302 static int ceph_fill_fragtree(struct inode *inode,
303 struct ceph_frag_tree_head *fragtree,
304 struct ceph_mds_reply_dirfrag *dirinfo)
305 {
306 struct ceph_inode_info *ci = ceph_inode(inode);
307 struct ceph_inode_frag *frag;
308 struct rb_node *rb_node;
309 int i;
310 u32 id, nsplits;
311 bool update = false;
312
313 mutex_lock(&ci->i_fragtree_mutex);
314 nsplits = le32_to_cpu(fragtree->nsplits);
315 if (nsplits) {
316 i = prandom_u32() % nsplits;
317 id = le32_to_cpu(fragtree->splits[i].frag);
318 if (!__ceph_find_frag(ci, id))
319 update = true;
320 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
321 rb_node = rb_first(&ci->i_fragtree);
322 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
323 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
324 update = true;
325 }
326 if (!update && dirinfo) {
327 id = le32_to_cpu(dirinfo->frag);
328 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
329 update = true;
330 }
331 if (!update)
332 goto out_unlock;
333
334 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
335 rb_node = rb_first(&ci->i_fragtree);
336 for (i = 0; i < nsplits; i++) {
337 id = le32_to_cpu(fragtree->splits[i].frag);
338 frag = NULL;
339 while (rb_node) {
340 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
341 if (ceph_frag_compare(frag->frag, id) >= 0) {
342 if (frag->frag != id)
343 frag = NULL;
344 else
345 rb_node = rb_next(rb_node);
346 break;
347 }
348 rb_node = rb_next(rb_node);
349 rb_erase(&frag->node, &ci->i_fragtree);
350 kfree(frag);
351 frag = NULL;
352 }
353 if (!frag) {
354 frag = __get_or_create_frag(ci, id);
355 if (IS_ERR(frag))
356 continue;
357 }
358 frag->split_by = le32_to_cpu(fragtree->splits[i].by);
359 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
360 }
361 while (rb_node) {
362 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
363 rb_node = rb_next(rb_node);
364 rb_erase(&frag->node, &ci->i_fragtree);
365 kfree(frag);
366 }
367 out_unlock:
368 mutex_unlock(&ci->i_fragtree_mutex);
369 return 0;
370 }
371
372 /*
373 * initialize a newly allocated inode.
374 */
ceph_alloc_inode(struct super_block * sb)375 struct inode *ceph_alloc_inode(struct super_block *sb)
376 {
377 struct ceph_inode_info *ci;
378 int i;
379
380 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
381 if (!ci)
382 return NULL;
383
384 dout("alloc_inode %p\n", &ci->vfs_inode);
385
386 spin_lock_init(&ci->i_ceph_lock);
387
388 ci->i_version = 0;
389 ci->i_inline_version = 0;
390 ci->i_time_warp_seq = 0;
391 ci->i_ceph_flags = 0;
392 atomic64_set(&ci->i_ordered_count, 1);
393 atomic64_set(&ci->i_release_count, 1);
394 atomic64_set(&ci->i_complete_seq[0], 0);
395 atomic64_set(&ci->i_complete_seq[1], 0);
396 ci->i_symlink = NULL;
397
398 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
399
400 ci->i_fragtree = RB_ROOT;
401 mutex_init(&ci->i_fragtree_mutex);
402
403 ci->i_xattrs.blob = NULL;
404 ci->i_xattrs.prealloc_blob = NULL;
405 ci->i_xattrs.dirty = false;
406 ci->i_xattrs.index = RB_ROOT;
407 ci->i_xattrs.count = 0;
408 ci->i_xattrs.names_size = 0;
409 ci->i_xattrs.vals_size = 0;
410 ci->i_xattrs.version = 0;
411 ci->i_xattrs.index_version = 0;
412
413 ci->i_caps = RB_ROOT;
414 ci->i_auth_cap = NULL;
415 ci->i_dirty_caps = 0;
416 ci->i_flushing_caps = 0;
417 INIT_LIST_HEAD(&ci->i_dirty_item);
418 INIT_LIST_HEAD(&ci->i_flushing_item);
419 ci->i_prealloc_cap_flush = NULL;
420 ci->i_cap_flush_tree = RB_ROOT;
421 init_waitqueue_head(&ci->i_cap_wq);
422 ci->i_hold_caps_min = 0;
423 ci->i_hold_caps_max = 0;
424 INIT_LIST_HEAD(&ci->i_cap_delay_list);
425 INIT_LIST_HEAD(&ci->i_cap_snaps);
426 ci->i_head_snapc = NULL;
427 ci->i_snap_caps = 0;
428
429 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
430 ci->i_nr_by_mode[i] = 0;
431
432 mutex_init(&ci->i_truncate_mutex);
433 ci->i_truncate_seq = 0;
434 ci->i_truncate_size = 0;
435 ci->i_truncate_pending = 0;
436
437 ci->i_max_size = 0;
438 ci->i_reported_size = 0;
439 ci->i_wanted_max_size = 0;
440 ci->i_requested_max_size = 0;
441
442 ci->i_pin_ref = 0;
443 ci->i_rd_ref = 0;
444 ci->i_rdcache_ref = 0;
445 ci->i_wr_ref = 0;
446 ci->i_wb_ref = 0;
447 ci->i_wrbuffer_ref = 0;
448 ci->i_wrbuffer_ref_head = 0;
449 ci->i_shared_gen = 0;
450 ci->i_rdcache_gen = 0;
451 ci->i_rdcache_revoking = 0;
452
453 INIT_LIST_HEAD(&ci->i_unsafe_writes);
454 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
455 INIT_LIST_HEAD(&ci->i_unsafe_iops);
456 spin_lock_init(&ci->i_unsafe_lock);
457
458 ci->i_snap_realm = NULL;
459 INIT_LIST_HEAD(&ci->i_snap_realm_item);
460 INIT_LIST_HEAD(&ci->i_snap_flush_item);
461
462 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
463 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
464
465 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
466
467 ceph_fscache_inode_init(ci);
468
469 return &ci->vfs_inode;
470 }
471
ceph_i_callback(struct rcu_head * head)472 static void ceph_i_callback(struct rcu_head *head)
473 {
474 struct inode *inode = container_of(head, struct inode, i_rcu);
475 struct ceph_inode_info *ci = ceph_inode(inode);
476
477 kfree(ci->i_symlink);
478 kmem_cache_free(ceph_inode_cachep, ci);
479 }
480
ceph_destroy_inode(struct inode * inode)481 void ceph_destroy_inode(struct inode *inode)
482 {
483 struct ceph_inode_info *ci = ceph_inode(inode);
484 struct ceph_inode_frag *frag;
485 struct rb_node *n;
486
487 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
488
489 ceph_fscache_unregister_inode_cookie(ci);
490
491 ceph_queue_caps_release(inode);
492
493 /*
494 * we may still have a snap_realm reference if there are stray
495 * caps in i_snap_caps.
496 */
497 if (ci->i_snap_realm) {
498 struct ceph_mds_client *mdsc =
499 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
500 struct ceph_snap_realm *realm = ci->i_snap_realm;
501
502 dout(" dropping residual ref to snap realm %p\n", realm);
503 spin_lock(&realm->inodes_with_caps_lock);
504 list_del_init(&ci->i_snap_realm_item);
505 spin_unlock(&realm->inodes_with_caps_lock);
506 ceph_put_snap_realm(mdsc, realm);
507 }
508
509 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
510 frag = rb_entry(n, struct ceph_inode_frag, node);
511 rb_erase(n, &ci->i_fragtree);
512 kfree(frag);
513 }
514
515 __ceph_destroy_xattrs(ci);
516 if (ci->i_xattrs.blob)
517 ceph_buffer_put(ci->i_xattrs.blob);
518 if (ci->i_xattrs.prealloc_blob)
519 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
520
521 call_rcu(&inode->i_rcu, ceph_i_callback);
522 }
523
ceph_drop_inode(struct inode * inode)524 int ceph_drop_inode(struct inode *inode)
525 {
526 /*
527 * Positve dentry and corresponding inode are always accompanied
528 * in MDS reply. So no need to keep inode in the cache after
529 * dropping all its aliases.
530 */
531 return 1;
532 }
533
534 /*
535 * Helpers to fill in size, ctime, mtime, and atime. We have to be
536 * careful because either the client or MDS may have more up to date
537 * info, depending on which capabilities are held, and whether
538 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
539 * and size are monotonically increasing, except when utimes() or
540 * truncate() increments the corresponding _seq values.)
541 */
ceph_fill_file_size(struct inode * inode,int issued,u32 truncate_seq,u64 truncate_size,u64 size)542 int ceph_fill_file_size(struct inode *inode, int issued,
543 u32 truncate_seq, u64 truncate_size, u64 size)
544 {
545 struct ceph_inode_info *ci = ceph_inode(inode);
546 int queue_trunc = 0;
547
548 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
549 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
550 dout("size %lld -> %llu\n", inode->i_size, size);
551 inode->i_size = size;
552 inode->i_blocks = (size + (1<<9) - 1) >> 9;
553 ci->i_reported_size = size;
554 if (truncate_seq != ci->i_truncate_seq) {
555 dout("truncate_seq %u -> %u\n",
556 ci->i_truncate_seq, truncate_seq);
557 ci->i_truncate_seq = truncate_seq;
558
559 /* the MDS should have revoked these caps */
560 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
561 CEPH_CAP_FILE_RD |
562 CEPH_CAP_FILE_WR |
563 CEPH_CAP_FILE_LAZYIO));
564 /*
565 * If we hold relevant caps, or in the case where we're
566 * not the only client referencing this file and we
567 * don't hold those caps, then we need to check whether
568 * the file is either opened or mmaped
569 */
570 if ((issued & (CEPH_CAP_FILE_CACHE|
571 CEPH_CAP_FILE_BUFFER)) ||
572 mapping_mapped(inode->i_mapping) ||
573 __ceph_caps_file_wanted(ci)) {
574 ci->i_truncate_pending++;
575 queue_trunc = 1;
576 }
577 }
578 }
579 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
580 ci->i_truncate_size != truncate_size) {
581 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
582 truncate_size);
583 ci->i_truncate_size = truncate_size;
584 }
585
586 if (queue_trunc)
587 ceph_fscache_invalidate(inode);
588
589 return queue_trunc;
590 }
591
ceph_fill_file_time(struct inode * inode,int issued,u64 time_warp_seq,struct timespec * ctime,struct timespec * mtime,struct timespec * atime)592 void ceph_fill_file_time(struct inode *inode, int issued,
593 u64 time_warp_seq, struct timespec *ctime,
594 struct timespec *mtime, struct timespec *atime)
595 {
596 struct ceph_inode_info *ci = ceph_inode(inode);
597 int warn = 0;
598
599 if (issued & (CEPH_CAP_FILE_EXCL|
600 CEPH_CAP_FILE_WR|
601 CEPH_CAP_FILE_BUFFER|
602 CEPH_CAP_AUTH_EXCL|
603 CEPH_CAP_XATTR_EXCL)) {
604 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
605 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
606 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
607 ctime->tv_sec, ctime->tv_nsec);
608 inode->i_ctime = *ctime;
609 }
610 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
611 /* the MDS did a utimes() */
612 dout("mtime %ld.%09ld -> %ld.%09ld "
613 "tw %d -> %d\n",
614 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
615 mtime->tv_sec, mtime->tv_nsec,
616 ci->i_time_warp_seq, (int)time_warp_seq);
617
618 inode->i_mtime = *mtime;
619 inode->i_atime = *atime;
620 ci->i_time_warp_seq = time_warp_seq;
621 } else if (time_warp_seq == ci->i_time_warp_seq) {
622 /* nobody did utimes(); take the max */
623 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
624 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
625 inode->i_mtime.tv_sec,
626 inode->i_mtime.tv_nsec,
627 mtime->tv_sec, mtime->tv_nsec);
628 inode->i_mtime = *mtime;
629 }
630 if (timespec_compare(atime, &inode->i_atime) > 0) {
631 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
632 inode->i_atime.tv_sec,
633 inode->i_atime.tv_nsec,
634 atime->tv_sec, atime->tv_nsec);
635 inode->i_atime = *atime;
636 }
637 } else if (issued & CEPH_CAP_FILE_EXCL) {
638 /* we did a utimes(); ignore mds values */
639 } else {
640 warn = 1;
641 }
642 } else {
643 /* we have no write|excl caps; whatever the MDS says is true */
644 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
645 inode->i_ctime = *ctime;
646 inode->i_mtime = *mtime;
647 inode->i_atime = *atime;
648 ci->i_time_warp_seq = time_warp_seq;
649 } else {
650 warn = 1;
651 }
652 }
653 if (warn) /* time_warp_seq shouldn't go backwards */
654 dout("%p mds time_warp_seq %llu < %u\n",
655 inode, time_warp_seq, ci->i_time_warp_seq);
656 }
657
658 /*
659 * Populate an inode based on info from mds. May be called on new or
660 * existing inodes.
661 */
fill_inode(struct inode * inode,struct page * locked_page,struct ceph_mds_reply_info_in * iinfo,struct ceph_mds_reply_dirfrag * dirinfo,struct ceph_mds_session * session,unsigned long ttl_from,int cap_fmode,struct ceph_cap_reservation * caps_reservation)662 static int fill_inode(struct inode *inode, struct page *locked_page,
663 struct ceph_mds_reply_info_in *iinfo,
664 struct ceph_mds_reply_dirfrag *dirinfo,
665 struct ceph_mds_session *session,
666 unsigned long ttl_from, int cap_fmode,
667 struct ceph_cap_reservation *caps_reservation)
668 {
669 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
670 struct ceph_mds_reply_inode *info = iinfo->in;
671 struct ceph_inode_info *ci = ceph_inode(inode);
672 int issued = 0, implemented, new_issued;
673 struct timespec mtime, atime, ctime;
674 struct ceph_buffer *xattr_blob = NULL;
675 struct ceph_cap *new_cap = NULL;
676 int err = 0;
677 bool wake = false;
678 bool queue_trunc = false;
679 bool new_version = false;
680 bool fill_inline = false;
681
682 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
683 inode, ceph_vinop(inode), le64_to_cpu(info->version),
684 ci->i_version);
685
686 /* prealloc new cap struct */
687 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
688 new_cap = ceph_get_cap(mdsc, caps_reservation);
689
690 /*
691 * prealloc xattr data, if it looks like we'll need it. only
692 * if len > 4 (meaning there are actually xattrs; the first 4
693 * bytes are the xattr count).
694 */
695 if (iinfo->xattr_len > 4) {
696 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
697 if (!xattr_blob)
698 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
699 iinfo->xattr_len);
700 }
701
702 spin_lock(&ci->i_ceph_lock);
703
704 /*
705 * provided version will be odd if inode value is projected,
706 * even if stable. skip the update if we have newer stable
707 * info (ours>=theirs, e.g. due to racing mds replies), unless
708 * we are getting projected (unstable) info (in which case the
709 * version is odd, and we want ours>theirs).
710 * us them
711 * 2 2 skip
712 * 3 2 skip
713 * 3 3 update
714 */
715 if (ci->i_version == 0 ||
716 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
717 le64_to_cpu(info->version) > (ci->i_version & ~1)))
718 new_version = true;
719
720 issued = __ceph_caps_issued(ci, &implemented);
721 issued |= implemented | __ceph_caps_dirty(ci);
722 new_issued = ~issued & le32_to_cpu(info->cap.caps);
723
724 /* update inode */
725 ci->i_version = le64_to_cpu(info->version);
726 inode->i_version++;
727 inode->i_rdev = le32_to_cpu(info->rdev);
728 /* directories have fl_stripe_unit set to zero */
729 if (le32_to_cpu(info->layout.fl_stripe_unit))
730 inode->i_blkbits =
731 fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
732 else
733 inode->i_blkbits = CEPH_BLOCK_SHIFT;
734
735 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
736 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
737 inode->i_mode = le32_to_cpu(info->mode);
738 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
739 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
740 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
741 from_kuid(&init_user_ns, inode->i_uid),
742 from_kgid(&init_user_ns, inode->i_gid));
743 }
744
745 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
746 (issued & CEPH_CAP_LINK_EXCL) == 0)
747 set_nlink(inode, le32_to_cpu(info->nlink));
748
749 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
750 /* be careful with mtime, atime, size */
751 ceph_decode_timespec(&atime, &info->atime);
752 ceph_decode_timespec(&mtime, &info->mtime);
753 ceph_decode_timespec(&ctime, &info->ctime);
754 ceph_fill_file_time(inode, issued,
755 le32_to_cpu(info->time_warp_seq),
756 &ctime, &mtime, &atime);
757 }
758
759 if (new_version ||
760 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
761 if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
762 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
763 ci->i_layout = info->layout;
764
765 queue_trunc = ceph_fill_file_size(inode, issued,
766 le32_to_cpu(info->truncate_seq),
767 le64_to_cpu(info->truncate_size),
768 le64_to_cpu(info->size));
769 /* only update max_size on auth cap */
770 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
771 ci->i_max_size != le64_to_cpu(info->max_size)) {
772 dout("max_size %lld -> %llu\n", ci->i_max_size,
773 le64_to_cpu(info->max_size));
774 ci->i_max_size = le64_to_cpu(info->max_size);
775 }
776 }
777
778 /* xattrs */
779 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
780 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
781 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
782 if (ci->i_xattrs.blob)
783 ceph_buffer_put(ci->i_xattrs.blob);
784 ci->i_xattrs.blob = xattr_blob;
785 if (xattr_blob)
786 memcpy(ci->i_xattrs.blob->vec.iov_base,
787 iinfo->xattr_data, iinfo->xattr_len);
788 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
789 ceph_forget_all_cached_acls(inode);
790 xattr_blob = NULL;
791 }
792
793 inode->i_mapping->a_ops = &ceph_aops;
794
795 switch (inode->i_mode & S_IFMT) {
796 case S_IFIFO:
797 case S_IFBLK:
798 case S_IFCHR:
799 case S_IFSOCK:
800 init_special_inode(inode, inode->i_mode, inode->i_rdev);
801 inode->i_op = &ceph_file_iops;
802 break;
803 case S_IFREG:
804 inode->i_op = &ceph_file_iops;
805 inode->i_fop = &ceph_file_fops;
806 break;
807 case S_IFLNK:
808 inode->i_op = &ceph_symlink_iops;
809 if (!ci->i_symlink) {
810 u32 symlen = iinfo->symlink_len;
811 char *sym;
812
813 spin_unlock(&ci->i_ceph_lock);
814
815 err = -EINVAL;
816 if (WARN_ON(symlen != inode->i_size))
817 goto out;
818
819 err = -ENOMEM;
820 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
821 if (!sym)
822 goto out;
823
824 spin_lock(&ci->i_ceph_lock);
825 if (!ci->i_symlink)
826 ci->i_symlink = sym;
827 else
828 kfree(sym); /* lost a race */
829 }
830 inode->i_link = ci->i_symlink;
831 break;
832 case S_IFDIR:
833 inode->i_op = &ceph_dir_iops;
834 inode->i_fop = &ceph_dir_fops;
835
836 ci->i_dir_layout = iinfo->dir_layout;
837
838 ci->i_files = le64_to_cpu(info->files);
839 ci->i_subdirs = le64_to_cpu(info->subdirs);
840 ci->i_rbytes = le64_to_cpu(info->rbytes);
841 ci->i_rfiles = le64_to_cpu(info->rfiles);
842 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
843 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
844 break;
845 default:
846 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
847 ceph_vinop(inode), inode->i_mode);
848 }
849
850 /* were we issued a capability? */
851 if (info->cap.caps) {
852 if (ceph_snap(inode) == CEPH_NOSNAP) {
853 unsigned caps = le32_to_cpu(info->cap.caps);
854 ceph_add_cap(inode, session,
855 le64_to_cpu(info->cap.cap_id),
856 cap_fmode, caps,
857 le32_to_cpu(info->cap.wanted),
858 le32_to_cpu(info->cap.seq),
859 le32_to_cpu(info->cap.mseq),
860 le64_to_cpu(info->cap.realm),
861 info->cap.flags, &new_cap);
862
863 /* set dir completion flag? */
864 if (S_ISDIR(inode->i_mode) &&
865 ci->i_files == 0 && ci->i_subdirs == 0 &&
866 (caps & CEPH_CAP_FILE_SHARED) &&
867 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
868 !__ceph_dir_is_complete(ci)) {
869 dout(" marking %p complete (empty)\n", inode);
870 i_size_write(inode, 0);
871 __ceph_dir_set_complete(ci,
872 atomic64_read(&ci->i_release_count),
873 atomic64_read(&ci->i_ordered_count));
874 }
875
876 wake = true;
877 } else {
878 dout(" %p got snap_caps %s\n", inode,
879 ceph_cap_string(le32_to_cpu(info->cap.caps)));
880 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
881 if (cap_fmode >= 0)
882 __ceph_get_fmode(ci, cap_fmode);
883 }
884 } else if (cap_fmode >= 0) {
885 pr_warn("mds issued no caps on %llx.%llx\n",
886 ceph_vinop(inode));
887 __ceph_get_fmode(ci, cap_fmode);
888 }
889
890 if (iinfo->inline_version > 0 &&
891 iinfo->inline_version >= ci->i_inline_version) {
892 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
893 ci->i_inline_version = iinfo->inline_version;
894 if (ci->i_inline_version != CEPH_INLINE_NONE &&
895 (locked_page ||
896 (le32_to_cpu(info->cap.caps) & cache_caps)))
897 fill_inline = true;
898 }
899
900 spin_unlock(&ci->i_ceph_lock);
901
902 if (fill_inline)
903 ceph_fill_inline_data(inode, locked_page,
904 iinfo->inline_data, iinfo->inline_len);
905
906 if (wake)
907 wake_up_all(&ci->i_cap_wq);
908
909 /* queue truncate if we saw i_size decrease */
910 if (queue_trunc)
911 ceph_queue_vmtruncate(inode);
912
913 /* populate frag tree */
914 if (S_ISDIR(inode->i_mode))
915 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
916
917 /* update delegation info? */
918 if (dirinfo)
919 ceph_fill_dirfrag(inode, dirinfo);
920
921 err = 0;
922 out:
923 if (new_cap)
924 ceph_put_cap(mdsc, new_cap);
925 if (xattr_blob)
926 ceph_buffer_put(xattr_blob);
927 return err;
928 }
929
930 /*
931 * caller should hold session s_mutex.
932 */
update_dentry_lease(struct dentry * dentry,struct ceph_mds_reply_lease * lease,struct ceph_mds_session * session,unsigned long from_time)933 static void update_dentry_lease(struct dentry *dentry,
934 struct ceph_mds_reply_lease *lease,
935 struct ceph_mds_session *session,
936 unsigned long from_time)
937 {
938 struct ceph_dentry_info *di = ceph_dentry(dentry);
939 long unsigned duration = le32_to_cpu(lease->duration_ms);
940 long unsigned ttl = from_time + (duration * HZ) / 1000;
941 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
942 struct inode *dir;
943
944 /* only track leases on regular dentries */
945 if (dentry->d_op != &ceph_dentry_ops)
946 return;
947
948 spin_lock(&dentry->d_lock);
949 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
950 dentry, duration, ttl);
951
952 /* make lease_rdcache_gen match directory */
953 dir = d_inode(dentry->d_parent);
954 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
955
956 if (duration == 0)
957 goto out_unlock;
958
959 if (di->lease_gen == session->s_cap_gen &&
960 time_before(ttl, dentry->d_time))
961 goto out_unlock; /* we already have a newer lease. */
962
963 if (di->lease_session && di->lease_session != session)
964 goto out_unlock;
965
966 ceph_dentry_lru_touch(dentry);
967
968 if (!di->lease_session)
969 di->lease_session = ceph_get_mds_session(session);
970 di->lease_gen = session->s_cap_gen;
971 di->lease_seq = le32_to_cpu(lease->seq);
972 di->lease_renew_after = half_ttl;
973 di->lease_renew_from = 0;
974 dentry->d_time = ttl;
975 out_unlock:
976 spin_unlock(&dentry->d_lock);
977 return;
978 }
979
980 /*
981 * splice a dentry to an inode.
982 * caller must hold directory i_mutex for this to be safe.
983 *
984 * we will only rehash the resulting dentry if @prehash is
985 * true; @prehash will be set to false (for the benefit of
986 * the caller) if we fail.
987 */
splice_dentry(struct dentry * dn,struct inode * in,bool * prehash)988 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
989 bool *prehash)
990 {
991 struct dentry *realdn;
992
993 BUG_ON(d_inode(dn));
994
995 /* dn must be unhashed */
996 if (!d_unhashed(dn))
997 d_drop(dn);
998 realdn = d_splice_alias(in, dn);
999 if (IS_ERR(realdn)) {
1000 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1001 PTR_ERR(realdn), dn, in, ceph_vinop(in));
1002 if (prehash)
1003 *prehash = false; /* don't rehash on error */
1004 dn = realdn; /* note realdn contains the error */
1005 goto out;
1006 } else if (realdn) {
1007 dout("dn %p (%d) spliced with %p (%d) "
1008 "inode %p ino %llx.%llx\n",
1009 dn, d_count(dn),
1010 realdn, d_count(realdn),
1011 d_inode(realdn), ceph_vinop(d_inode(realdn)));
1012 dput(dn);
1013 dn = realdn;
1014 } else {
1015 BUG_ON(!ceph_dentry(dn));
1016 dout("dn %p attached to %p ino %llx.%llx\n",
1017 dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1018 }
1019 if ((!prehash || *prehash) && d_unhashed(dn))
1020 d_rehash(dn);
1021 out:
1022 return dn;
1023 }
1024
1025 /*
1026 * Incorporate results into the local cache. This is either just
1027 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1028 * after a lookup).
1029 *
1030 * A reply may contain
1031 * a directory inode along with a dentry.
1032 * and/or a target inode
1033 *
1034 * Called with snap_rwsem (read).
1035 */
ceph_fill_trace(struct super_block * sb,struct ceph_mds_request * req,struct ceph_mds_session * session)1036 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1037 struct ceph_mds_session *session)
1038 {
1039 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1040 struct inode *in = NULL;
1041 struct ceph_vino vino;
1042 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1043 int err = 0;
1044
1045 dout("fill_trace %p is_dentry %d is_target %d\n", req,
1046 rinfo->head->is_dentry, rinfo->head->is_target);
1047
1048 #if 0
1049 /*
1050 * Debugging hook:
1051 *
1052 * If we resend completed ops to a recovering mds, we get no
1053 * trace. Since that is very rare, pretend this is the case
1054 * to ensure the 'no trace' handlers in the callers behave.
1055 *
1056 * Fill in inodes unconditionally to avoid breaking cap
1057 * invariants.
1058 */
1059 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1060 pr_info("fill_trace faking empty trace on %lld %s\n",
1061 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1062 if (rinfo->head->is_dentry) {
1063 rinfo->head->is_dentry = 0;
1064 err = fill_inode(req->r_locked_dir,
1065 &rinfo->diri, rinfo->dirfrag,
1066 session, req->r_request_started, -1);
1067 }
1068 if (rinfo->head->is_target) {
1069 rinfo->head->is_target = 0;
1070 ininfo = rinfo->targeti.in;
1071 vino.ino = le64_to_cpu(ininfo->ino);
1072 vino.snap = le64_to_cpu(ininfo->snapid);
1073 in = ceph_get_inode(sb, vino);
1074 err = fill_inode(in, &rinfo->targeti, NULL,
1075 session, req->r_request_started,
1076 req->r_fmode);
1077 iput(in);
1078 }
1079 }
1080 #endif
1081
1082 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1083 dout("fill_trace reply is empty!\n");
1084 if (rinfo->head->result == 0 && req->r_locked_dir)
1085 ceph_invalidate_dir_request(req);
1086 return 0;
1087 }
1088
1089 if (rinfo->head->is_dentry) {
1090 struct inode *dir = req->r_locked_dir;
1091
1092 if (dir) {
1093 err = fill_inode(dir, NULL,
1094 &rinfo->diri, rinfo->dirfrag,
1095 session, req->r_request_started, -1,
1096 &req->r_caps_reservation);
1097 if (err < 0)
1098 goto done;
1099 } else {
1100 WARN_ON_ONCE(1);
1101 }
1102
1103 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
1104 struct qstr dname;
1105 struct dentry *dn, *parent;
1106
1107 BUG_ON(!rinfo->head->is_target);
1108 BUG_ON(req->r_dentry);
1109
1110 parent = d_find_any_alias(dir);
1111 BUG_ON(!parent);
1112
1113 dname.name = rinfo->dname;
1114 dname.len = rinfo->dname_len;
1115 dname.hash = full_name_hash(dname.name, dname.len);
1116 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1117 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1118 retry_lookup:
1119 dn = d_lookup(parent, &dname);
1120 dout("d_lookup on parent=%p name=%.*s got %p\n",
1121 parent, dname.len, dname.name, dn);
1122
1123 if (!dn) {
1124 dn = d_alloc(parent, &dname);
1125 dout("d_alloc %p '%.*s' = %p\n", parent,
1126 dname.len, dname.name, dn);
1127 if (dn == NULL) {
1128 dput(parent);
1129 err = -ENOMEM;
1130 goto done;
1131 }
1132 err = ceph_init_dentry(dn);
1133 if (err < 0) {
1134 dput(dn);
1135 dput(parent);
1136 goto done;
1137 }
1138 } else if (d_really_is_positive(dn) &&
1139 (ceph_ino(d_inode(dn)) != vino.ino ||
1140 ceph_snap(d_inode(dn)) != vino.snap)) {
1141 dout(" dn %p points to wrong inode %p\n",
1142 dn, d_inode(dn));
1143 d_delete(dn);
1144 dput(dn);
1145 goto retry_lookup;
1146 }
1147
1148 req->r_dentry = dn;
1149 dput(parent);
1150 }
1151 }
1152
1153 if (rinfo->head->is_target) {
1154 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1155 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1156
1157 in = ceph_get_inode(sb, vino);
1158 if (IS_ERR(in)) {
1159 err = PTR_ERR(in);
1160 goto done;
1161 }
1162 req->r_target_inode = in;
1163
1164 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
1165 session, req->r_request_started,
1166 (!req->r_aborted && rinfo->head->result == 0) ?
1167 req->r_fmode : -1,
1168 &req->r_caps_reservation);
1169 if (err < 0) {
1170 pr_err("fill_inode badness %p %llx.%llx\n",
1171 in, ceph_vinop(in));
1172 goto done;
1173 }
1174 }
1175
1176 /*
1177 * ignore null lease/binding on snapdir ENOENT, or else we
1178 * will have trouble splicing in the virtual snapdir later
1179 */
1180 if (rinfo->head->is_dentry && !req->r_aborted &&
1181 req->r_locked_dir &&
1182 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1183 fsc->mount_options->snapdir_name,
1184 req->r_dentry->d_name.len))) {
1185 /*
1186 * lookup link rename : null -> possibly existing inode
1187 * mknod symlink mkdir : null -> new inode
1188 * unlink : linked -> null
1189 */
1190 struct inode *dir = req->r_locked_dir;
1191 struct dentry *dn = req->r_dentry;
1192 bool have_dir_cap, have_lease;
1193
1194 BUG_ON(!dn);
1195 BUG_ON(!dir);
1196 BUG_ON(d_inode(dn->d_parent) != dir);
1197 BUG_ON(ceph_ino(dir) !=
1198 le64_to_cpu(rinfo->diri.in->ino));
1199 BUG_ON(ceph_snap(dir) !=
1200 le64_to_cpu(rinfo->diri.in->snapid));
1201
1202 /* do we have a lease on the whole dir? */
1203 have_dir_cap =
1204 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1205 CEPH_CAP_FILE_SHARED);
1206
1207 /* do we have a dn lease? */
1208 have_lease = have_dir_cap ||
1209 le32_to_cpu(rinfo->dlease->duration_ms);
1210 if (!have_lease)
1211 dout("fill_trace no dentry lease or dir cap\n");
1212
1213 /* rename? */
1214 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1215 struct inode *olddir = req->r_old_dentry_dir;
1216 BUG_ON(!olddir);
1217
1218 dout(" src %p '%pd' dst %p '%pd'\n",
1219 req->r_old_dentry,
1220 req->r_old_dentry,
1221 dn, dn);
1222 dout("fill_trace doing d_move %p -> %p\n",
1223 req->r_old_dentry, dn);
1224
1225 /* d_move screws up sibling dentries' offsets */
1226 ceph_dir_clear_ordered(dir);
1227 ceph_dir_clear_ordered(olddir);
1228
1229 d_move(req->r_old_dentry, dn);
1230 dout(" src %p '%pd' dst %p '%pd'\n",
1231 req->r_old_dentry,
1232 req->r_old_dentry,
1233 dn, dn);
1234
1235 /* ensure target dentry is invalidated, despite
1236 rehashing bug in vfs_rename_dir */
1237 ceph_invalidate_dentry_lease(dn);
1238
1239 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1240 ceph_dentry(req->r_old_dentry)->offset);
1241
1242 dn = req->r_old_dentry; /* use old_dentry */
1243 }
1244
1245 /* null dentry? */
1246 if (!rinfo->head->is_target) {
1247 dout("fill_trace null dentry\n");
1248 if (d_really_is_positive(dn)) {
1249 ceph_dir_clear_ordered(dir);
1250 dout("d_delete %p\n", dn);
1251 d_delete(dn);
1252 } else {
1253 dout("d_instantiate %p NULL\n", dn);
1254 d_instantiate(dn, NULL);
1255 if (have_lease && d_unhashed(dn))
1256 d_rehash(dn);
1257 update_dentry_lease(dn, rinfo->dlease,
1258 session,
1259 req->r_request_started);
1260 }
1261 goto done;
1262 }
1263
1264 /* attach proper inode */
1265 if (d_really_is_negative(dn)) {
1266 ceph_dir_clear_ordered(dir);
1267 ihold(in);
1268 dn = splice_dentry(dn, in, &have_lease);
1269 if (IS_ERR(dn)) {
1270 err = PTR_ERR(dn);
1271 goto done;
1272 }
1273 req->r_dentry = dn; /* may have spliced */
1274 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1275 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1276 dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1277 ceph_vinop(in));
1278 have_lease = false;
1279 }
1280
1281 if (have_lease)
1282 update_dentry_lease(dn, rinfo->dlease, session,
1283 req->r_request_started);
1284 dout(" final dn %p\n", dn);
1285 } else if (!req->r_aborted &&
1286 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1287 req->r_op == CEPH_MDS_OP_MKSNAP)) {
1288 struct dentry *dn = req->r_dentry;
1289 struct inode *dir = req->r_locked_dir;
1290
1291 /* fill out a snapdir LOOKUPSNAP dentry */
1292 BUG_ON(!dn);
1293 BUG_ON(!dir);
1294 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1295 dout(" linking snapped dir %p to dn %p\n", in, dn);
1296 ceph_dir_clear_ordered(dir);
1297 ihold(in);
1298 dn = splice_dentry(dn, in, NULL);
1299 if (IS_ERR(dn)) {
1300 err = PTR_ERR(dn);
1301 goto done;
1302 }
1303 req->r_dentry = dn; /* may have spliced */
1304 }
1305 done:
1306 dout("fill_trace done err=%d\n", err);
1307 return err;
1308 }
1309
1310 /*
1311 * Prepopulate our cache with readdir results, leases, etc.
1312 */
readdir_prepopulate_inodes_only(struct ceph_mds_request * req,struct ceph_mds_session * session)1313 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1314 struct ceph_mds_session *session)
1315 {
1316 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1317 int i, err = 0;
1318
1319 for (i = 0; i < rinfo->dir_nr; i++) {
1320 struct ceph_vino vino;
1321 struct inode *in;
1322 int rc;
1323
1324 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1325 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1326
1327 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1328 if (IS_ERR(in)) {
1329 err = PTR_ERR(in);
1330 dout("new_inode badness got %d\n", err);
1331 continue;
1332 }
1333 rc = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
1334 req->r_request_started, -1,
1335 &req->r_caps_reservation);
1336 if (rc < 0) {
1337 pr_err("fill_inode badness on %p got %d\n", in, rc);
1338 err = rc;
1339 continue;
1340 }
1341 }
1342
1343 return err;
1344 }
1345
ceph_readdir_cache_release(struct ceph_readdir_cache_control * ctl)1346 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1347 {
1348 if (ctl->page) {
1349 kunmap(ctl->page);
1350 page_cache_release(ctl->page);
1351 ctl->page = NULL;
1352 }
1353 }
1354
fill_readdir_cache(struct inode * dir,struct dentry * dn,struct ceph_readdir_cache_control * ctl,struct ceph_mds_request * req)1355 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1356 struct ceph_readdir_cache_control *ctl,
1357 struct ceph_mds_request *req)
1358 {
1359 struct ceph_inode_info *ci = ceph_inode(dir);
1360 unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*);
1361 unsigned idx = ctl->index % nsize;
1362 pgoff_t pgoff = ctl->index / nsize;
1363
1364 if (!ctl->page || pgoff != page_index(ctl->page)) {
1365 ceph_readdir_cache_release(ctl);
1366 if (idx == 0)
1367 ctl->page = grab_cache_page(&dir->i_data, pgoff);
1368 else
1369 ctl->page = find_lock_page(&dir->i_data, pgoff);
1370 if (!ctl->page) {
1371 ctl->index = -1;
1372 return idx == 0 ? -ENOMEM : 0;
1373 }
1374 /* reading/filling the cache are serialized by
1375 * i_mutex, no need to use page lock */
1376 unlock_page(ctl->page);
1377 ctl->dentries = kmap(ctl->page);
1378 if (idx == 0)
1379 memset(ctl->dentries, 0, PAGE_CACHE_SIZE);
1380 }
1381
1382 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1383 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1384 dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1385 ctl->dentries[idx] = dn;
1386 ctl->index++;
1387 } else {
1388 dout("disable readdir cache\n");
1389 ctl->index = -1;
1390 }
1391 return 0;
1392 }
1393
ceph_readdir_prepopulate(struct ceph_mds_request * req,struct ceph_mds_session * session)1394 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1395 struct ceph_mds_session *session)
1396 {
1397 struct dentry *parent = req->r_dentry;
1398 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1399 struct qstr dname;
1400 struct dentry *dn;
1401 struct inode *in;
1402 int err = 0, ret, i;
1403 struct inode *snapdir = NULL;
1404 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1405 struct ceph_dentry_info *di;
1406 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1407 struct ceph_readdir_cache_control cache_ctl = {};
1408
1409 if (req->r_aborted)
1410 return readdir_prepopulate_inodes_only(req, session);
1411
1412 if (rinfo->dir_dir &&
1413 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1414 dout("readdir_prepopulate got new frag %x -> %x\n",
1415 frag, le32_to_cpu(rinfo->dir_dir->frag));
1416 frag = le32_to_cpu(rinfo->dir_dir->frag);
1417 if (ceph_frag_is_leftmost(frag))
1418 req->r_readdir_offset = 2;
1419 else
1420 req->r_readdir_offset = 0;
1421 }
1422
1423 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1424 snapdir = ceph_get_snapdir(d_inode(parent));
1425 parent = d_find_alias(snapdir);
1426 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1427 rinfo->dir_nr, parent);
1428 } else {
1429 dout("readdir_prepopulate %d items under dn %p\n",
1430 rinfo->dir_nr, parent);
1431 if (rinfo->dir_dir)
1432 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1433 }
1434
1435 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
1436 /* note dir version at start of readdir so we can tell
1437 * if any dentries get dropped */
1438 struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1439 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
1440 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
1441 req->r_readdir_cache_idx = 0;
1442 }
1443
1444 cache_ctl.index = req->r_readdir_cache_idx;
1445
1446 /* FIXME: release caps/leases if error occurs */
1447 for (i = 0; i < rinfo->dir_nr; i++) {
1448 struct ceph_vino vino;
1449
1450 dname.name = rinfo->dir_dname[i];
1451 dname.len = rinfo->dir_dname_len[i];
1452 dname.hash = full_name_hash(dname.name, dname.len);
1453
1454 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1455 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1456
1457 retry_lookup:
1458 dn = d_lookup(parent, &dname);
1459 dout("d_lookup on parent=%p name=%.*s got %p\n",
1460 parent, dname.len, dname.name, dn);
1461
1462 if (!dn) {
1463 dn = d_alloc(parent, &dname);
1464 dout("d_alloc %p '%.*s' = %p\n", parent,
1465 dname.len, dname.name, dn);
1466 if (dn == NULL) {
1467 dout("d_alloc badness\n");
1468 err = -ENOMEM;
1469 goto out;
1470 }
1471 ret = ceph_init_dentry(dn);
1472 if (ret < 0) {
1473 dput(dn);
1474 err = ret;
1475 goto out;
1476 }
1477 } else if (d_really_is_positive(dn) &&
1478 (ceph_ino(d_inode(dn)) != vino.ino ||
1479 ceph_snap(d_inode(dn)) != vino.snap)) {
1480 dout(" dn %p points to wrong inode %p\n",
1481 dn, d_inode(dn));
1482 d_delete(dn);
1483 dput(dn);
1484 goto retry_lookup;
1485 }
1486
1487 /* inode */
1488 if (d_really_is_positive(dn)) {
1489 in = d_inode(dn);
1490 } else {
1491 in = ceph_get_inode(parent->d_sb, vino);
1492 if (IS_ERR(in)) {
1493 dout("new_inode badness\n");
1494 d_drop(dn);
1495 dput(dn);
1496 err = PTR_ERR(in);
1497 goto out;
1498 }
1499 }
1500
1501 ret = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
1502 req->r_request_started, -1,
1503 &req->r_caps_reservation);
1504 if (ret < 0) {
1505 pr_err("fill_inode badness on %p\n", in);
1506 if (d_really_is_negative(dn))
1507 iput(in);
1508 d_drop(dn);
1509 err = ret;
1510 goto next_item;
1511 }
1512
1513 if (d_really_is_negative(dn)) {
1514 struct dentry *realdn = splice_dentry(dn, in, NULL);
1515 if (IS_ERR(realdn)) {
1516 err = PTR_ERR(realdn);
1517 d_drop(dn);
1518 goto next_item;
1519 }
1520 dn = realdn;
1521 }
1522
1523 di = dn->d_fsdata;
1524 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1525
1526 update_dentry_lease(dn, rinfo->dir_dlease[i],
1527 req->r_session,
1528 req->r_request_started);
1529
1530 if (err == 0 && cache_ctl.index >= 0) {
1531 ret = fill_readdir_cache(d_inode(parent), dn,
1532 &cache_ctl, req);
1533 if (ret < 0)
1534 err = ret;
1535 }
1536 next_item:
1537 if (dn)
1538 dput(dn);
1539 }
1540 out:
1541 if (err == 0) {
1542 req->r_did_prepopulate = true;
1543 req->r_readdir_cache_idx = cache_ctl.index;
1544 }
1545 ceph_readdir_cache_release(&cache_ctl);
1546 if (snapdir) {
1547 iput(snapdir);
1548 dput(parent);
1549 }
1550 dout("readdir_prepopulate done\n");
1551 return err;
1552 }
1553
ceph_inode_set_size(struct inode * inode,loff_t size)1554 int ceph_inode_set_size(struct inode *inode, loff_t size)
1555 {
1556 struct ceph_inode_info *ci = ceph_inode(inode);
1557 int ret = 0;
1558
1559 spin_lock(&ci->i_ceph_lock);
1560 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1561 inode->i_size = size;
1562 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1563
1564 /* tell the MDS if we are approaching max_size */
1565 if ((size << 1) >= ci->i_max_size &&
1566 (ci->i_reported_size << 1) < ci->i_max_size)
1567 ret = 1;
1568
1569 spin_unlock(&ci->i_ceph_lock);
1570 return ret;
1571 }
1572
1573 /*
1574 * Write back inode data in a worker thread. (This can't be done
1575 * in the message handler context.)
1576 */
ceph_queue_writeback(struct inode * inode)1577 void ceph_queue_writeback(struct inode *inode)
1578 {
1579 ihold(inode);
1580 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1581 &ceph_inode(inode)->i_wb_work)) {
1582 dout("ceph_queue_writeback %p\n", inode);
1583 } else {
1584 dout("ceph_queue_writeback %p failed\n", inode);
1585 iput(inode);
1586 }
1587 }
1588
ceph_writeback_work(struct work_struct * work)1589 static void ceph_writeback_work(struct work_struct *work)
1590 {
1591 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1592 i_wb_work);
1593 struct inode *inode = &ci->vfs_inode;
1594
1595 dout("writeback %p\n", inode);
1596 filemap_fdatawrite(&inode->i_data);
1597 iput(inode);
1598 }
1599
1600 /*
1601 * queue an async invalidation
1602 */
ceph_queue_invalidate(struct inode * inode)1603 void ceph_queue_invalidate(struct inode *inode)
1604 {
1605 ihold(inode);
1606 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1607 &ceph_inode(inode)->i_pg_inv_work)) {
1608 dout("ceph_queue_invalidate %p\n", inode);
1609 } else {
1610 dout("ceph_queue_invalidate %p failed\n", inode);
1611 iput(inode);
1612 }
1613 }
1614
1615 /*
1616 * Invalidate inode pages in a worker thread. (This can't be done
1617 * in the message handler context.)
1618 */
ceph_invalidate_work(struct work_struct * work)1619 static void ceph_invalidate_work(struct work_struct *work)
1620 {
1621 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1622 i_pg_inv_work);
1623 struct inode *inode = &ci->vfs_inode;
1624 u32 orig_gen;
1625 int check = 0;
1626
1627 mutex_lock(&ci->i_truncate_mutex);
1628 spin_lock(&ci->i_ceph_lock);
1629 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1630 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1631 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1632 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1633 check = 1;
1634 spin_unlock(&ci->i_ceph_lock);
1635 mutex_unlock(&ci->i_truncate_mutex);
1636 goto out;
1637 }
1638 orig_gen = ci->i_rdcache_gen;
1639 spin_unlock(&ci->i_ceph_lock);
1640
1641 truncate_pagecache(inode, 0);
1642
1643 spin_lock(&ci->i_ceph_lock);
1644 if (orig_gen == ci->i_rdcache_gen &&
1645 orig_gen == ci->i_rdcache_revoking) {
1646 dout("invalidate_pages %p gen %d successful\n", inode,
1647 ci->i_rdcache_gen);
1648 ci->i_rdcache_revoking--;
1649 check = 1;
1650 } else {
1651 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1652 inode, orig_gen, ci->i_rdcache_gen,
1653 ci->i_rdcache_revoking);
1654 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1655 check = 1;
1656 }
1657 spin_unlock(&ci->i_ceph_lock);
1658 mutex_unlock(&ci->i_truncate_mutex);
1659 out:
1660 if (check)
1661 ceph_check_caps(ci, 0, NULL);
1662 iput(inode);
1663 }
1664
1665
1666 /*
1667 * called by trunc_wq;
1668 *
1669 * We also truncate in a separate thread as well.
1670 */
ceph_vmtruncate_work(struct work_struct * work)1671 static void ceph_vmtruncate_work(struct work_struct *work)
1672 {
1673 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1674 i_vmtruncate_work);
1675 struct inode *inode = &ci->vfs_inode;
1676
1677 dout("vmtruncate_work %p\n", inode);
1678 __ceph_do_pending_vmtruncate(inode);
1679 iput(inode);
1680 }
1681
1682 /*
1683 * Queue an async vmtruncate. If we fail to queue work, we will handle
1684 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1685 */
ceph_queue_vmtruncate(struct inode * inode)1686 void ceph_queue_vmtruncate(struct inode *inode)
1687 {
1688 struct ceph_inode_info *ci = ceph_inode(inode);
1689
1690 ihold(inode);
1691
1692 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1693 &ci->i_vmtruncate_work)) {
1694 dout("ceph_queue_vmtruncate %p\n", inode);
1695 } else {
1696 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1697 inode, ci->i_truncate_pending);
1698 iput(inode);
1699 }
1700 }
1701
1702 /*
1703 * Make sure any pending truncation is applied before doing anything
1704 * that may depend on it.
1705 */
__ceph_do_pending_vmtruncate(struct inode * inode)1706 void __ceph_do_pending_vmtruncate(struct inode *inode)
1707 {
1708 struct ceph_inode_info *ci = ceph_inode(inode);
1709 u64 to;
1710 int wrbuffer_refs, finish = 0;
1711
1712 mutex_lock(&ci->i_truncate_mutex);
1713 retry:
1714 spin_lock(&ci->i_ceph_lock);
1715 if (ci->i_truncate_pending == 0) {
1716 dout("__do_pending_vmtruncate %p none pending\n", inode);
1717 spin_unlock(&ci->i_ceph_lock);
1718 mutex_unlock(&ci->i_truncate_mutex);
1719 return;
1720 }
1721
1722 /*
1723 * make sure any dirty snapped pages are flushed before we
1724 * possibly truncate them.. so write AND block!
1725 */
1726 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1727 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1728 inode);
1729 spin_unlock(&ci->i_ceph_lock);
1730 filemap_write_and_wait_range(&inode->i_data, 0,
1731 inode->i_sb->s_maxbytes);
1732 goto retry;
1733 }
1734
1735 /* there should be no reader or writer */
1736 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1737
1738 to = ci->i_truncate_size;
1739 wrbuffer_refs = ci->i_wrbuffer_ref;
1740 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1741 ci->i_truncate_pending, to);
1742 spin_unlock(&ci->i_ceph_lock);
1743
1744 truncate_pagecache(inode, to);
1745
1746 spin_lock(&ci->i_ceph_lock);
1747 if (to == ci->i_truncate_size) {
1748 ci->i_truncate_pending = 0;
1749 finish = 1;
1750 }
1751 spin_unlock(&ci->i_ceph_lock);
1752 if (!finish)
1753 goto retry;
1754
1755 mutex_unlock(&ci->i_truncate_mutex);
1756
1757 if (wrbuffer_refs == 0)
1758 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1759
1760 wake_up_all(&ci->i_cap_wq);
1761 }
1762
1763 /*
1764 * symlinks
1765 */
1766 static const struct inode_operations ceph_symlink_iops = {
1767 .readlink = generic_readlink,
1768 .follow_link = simple_follow_link,
1769 .setattr = ceph_setattr,
1770 .getattr = ceph_getattr,
1771 .setxattr = ceph_setxattr,
1772 .getxattr = ceph_getxattr,
1773 .listxattr = ceph_listxattr,
1774 .removexattr = ceph_removexattr,
1775 };
1776
1777 /*
1778 * setattr
1779 */
__ceph_setattr(struct dentry * dentry,struct iattr * attr)1780 int __ceph_setattr(struct dentry *dentry, struct iattr *attr)
1781 {
1782 struct inode *inode = d_inode(dentry);
1783 struct ceph_inode_info *ci = ceph_inode(inode);
1784 const unsigned int ia_valid = attr->ia_valid;
1785 struct ceph_mds_request *req;
1786 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
1787 struct ceph_cap_flush *prealloc_cf;
1788 int issued;
1789 int release = 0, dirtied = 0;
1790 int mask = 0;
1791 int err = 0;
1792 int inode_dirty_flags = 0;
1793 bool lock_snap_rwsem = false;
1794
1795 if (ceph_snap(inode) != CEPH_NOSNAP)
1796 return -EROFS;
1797
1798 err = inode_change_ok(inode, attr);
1799 if (err != 0)
1800 return err;
1801
1802 prealloc_cf = ceph_alloc_cap_flush();
1803 if (!prealloc_cf)
1804 return -ENOMEM;
1805
1806 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1807 USE_AUTH_MDS);
1808 if (IS_ERR(req)) {
1809 ceph_free_cap_flush(prealloc_cf);
1810 return PTR_ERR(req);
1811 }
1812
1813 spin_lock(&ci->i_ceph_lock);
1814 issued = __ceph_caps_issued(ci, NULL);
1815
1816 if (!ci->i_head_snapc &&
1817 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
1818 lock_snap_rwsem = true;
1819 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1820 spin_unlock(&ci->i_ceph_lock);
1821 down_read(&mdsc->snap_rwsem);
1822 spin_lock(&ci->i_ceph_lock);
1823 issued = __ceph_caps_issued(ci, NULL);
1824 }
1825 }
1826
1827 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1828
1829 if (ia_valid & ATTR_UID) {
1830 dout("setattr %p uid %d -> %d\n", inode,
1831 from_kuid(&init_user_ns, inode->i_uid),
1832 from_kuid(&init_user_ns, attr->ia_uid));
1833 if (issued & CEPH_CAP_AUTH_EXCL) {
1834 inode->i_uid = attr->ia_uid;
1835 dirtied |= CEPH_CAP_AUTH_EXCL;
1836 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1837 !uid_eq(attr->ia_uid, inode->i_uid)) {
1838 req->r_args.setattr.uid = cpu_to_le32(
1839 from_kuid(&init_user_ns, attr->ia_uid));
1840 mask |= CEPH_SETATTR_UID;
1841 release |= CEPH_CAP_AUTH_SHARED;
1842 }
1843 }
1844 if (ia_valid & ATTR_GID) {
1845 dout("setattr %p gid %d -> %d\n", inode,
1846 from_kgid(&init_user_ns, inode->i_gid),
1847 from_kgid(&init_user_ns, attr->ia_gid));
1848 if (issued & CEPH_CAP_AUTH_EXCL) {
1849 inode->i_gid = attr->ia_gid;
1850 dirtied |= CEPH_CAP_AUTH_EXCL;
1851 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1852 !gid_eq(attr->ia_gid, inode->i_gid)) {
1853 req->r_args.setattr.gid = cpu_to_le32(
1854 from_kgid(&init_user_ns, attr->ia_gid));
1855 mask |= CEPH_SETATTR_GID;
1856 release |= CEPH_CAP_AUTH_SHARED;
1857 }
1858 }
1859 if (ia_valid & ATTR_MODE) {
1860 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1861 attr->ia_mode);
1862 if (issued & CEPH_CAP_AUTH_EXCL) {
1863 inode->i_mode = attr->ia_mode;
1864 dirtied |= CEPH_CAP_AUTH_EXCL;
1865 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1866 attr->ia_mode != inode->i_mode) {
1867 inode->i_mode = attr->ia_mode;
1868 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1869 mask |= CEPH_SETATTR_MODE;
1870 release |= CEPH_CAP_AUTH_SHARED;
1871 }
1872 }
1873
1874 if (ia_valid & ATTR_ATIME) {
1875 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1876 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1877 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1878 if (issued & CEPH_CAP_FILE_EXCL) {
1879 ci->i_time_warp_seq++;
1880 inode->i_atime = attr->ia_atime;
1881 dirtied |= CEPH_CAP_FILE_EXCL;
1882 } else if ((issued & CEPH_CAP_FILE_WR) &&
1883 timespec_compare(&inode->i_atime,
1884 &attr->ia_atime) < 0) {
1885 inode->i_atime = attr->ia_atime;
1886 dirtied |= CEPH_CAP_FILE_WR;
1887 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1888 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1889 ceph_encode_timespec(&req->r_args.setattr.atime,
1890 &attr->ia_atime);
1891 mask |= CEPH_SETATTR_ATIME;
1892 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1893 CEPH_CAP_FILE_WR;
1894 }
1895 }
1896 if (ia_valid & ATTR_MTIME) {
1897 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1898 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1899 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1900 if (issued & CEPH_CAP_FILE_EXCL) {
1901 ci->i_time_warp_seq++;
1902 inode->i_mtime = attr->ia_mtime;
1903 dirtied |= CEPH_CAP_FILE_EXCL;
1904 } else if ((issued & CEPH_CAP_FILE_WR) &&
1905 timespec_compare(&inode->i_mtime,
1906 &attr->ia_mtime) < 0) {
1907 inode->i_mtime = attr->ia_mtime;
1908 dirtied |= CEPH_CAP_FILE_WR;
1909 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1910 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1911 ceph_encode_timespec(&req->r_args.setattr.mtime,
1912 &attr->ia_mtime);
1913 mask |= CEPH_SETATTR_MTIME;
1914 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1915 CEPH_CAP_FILE_WR;
1916 }
1917 }
1918 if (ia_valid & ATTR_SIZE) {
1919 dout("setattr %p size %lld -> %lld\n", inode,
1920 inode->i_size, attr->ia_size);
1921 if ((issued & CEPH_CAP_FILE_EXCL) &&
1922 attr->ia_size > inode->i_size) {
1923 inode->i_size = attr->ia_size;
1924 inode->i_blocks =
1925 (attr->ia_size + (1 << 9) - 1) >> 9;
1926 inode->i_ctime = attr->ia_ctime;
1927 ci->i_reported_size = attr->ia_size;
1928 dirtied |= CEPH_CAP_FILE_EXCL;
1929 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1930 attr->ia_size != inode->i_size) {
1931 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1932 req->r_args.setattr.old_size =
1933 cpu_to_le64(inode->i_size);
1934 mask |= CEPH_SETATTR_SIZE;
1935 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1936 CEPH_CAP_FILE_WR;
1937 }
1938 }
1939
1940 /* these do nothing */
1941 if (ia_valid & ATTR_CTIME) {
1942 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1943 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1944 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1945 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1946 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1947 only ? "ctime only" : "ignored");
1948 inode->i_ctime = attr->ia_ctime;
1949 if (only) {
1950 /*
1951 * if kernel wants to dirty ctime but nothing else,
1952 * we need to choose a cap to dirty under, or do
1953 * a almost-no-op setattr
1954 */
1955 if (issued & CEPH_CAP_AUTH_EXCL)
1956 dirtied |= CEPH_CAP_AUTH_EXCL;
1957 else if (issued & CEPH_CAP_FILE_EXCL)
1958 dirtied |= CEPH_CAP_FILE_EXCL;
1959 else if (issued & CEPH_CAP_XATTR_EXCL)
1960 dirtied |= CEPH_CAP_XATTR_EXCL;
1961 else
1962 mask |= CEPH_SETATTR_CTIME;
1963 }
1964 }
1965 if (ia_valid & ATTR_FILE)
1966 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1967
1968 if (dirtied) {
1969 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
1970 &prealloc_cf);
1971 inode->i_ctime = CURRENT_TIME;
1972 }
1973
1974 release &= issued;
1975 spin_unlock(&ci->i_ceph_lock);
1976 if (lock_snap_rwsem)
1977 up_read(&mdsc->snap_rwsem);
1978
1979 if (inode_dirty_flags)
1980 __mark_inode_dirty(inode, inode_dirty_flags);
1981
1982
1983 if (mask) {
1984 req->r_inode = inode;
1985 ihold(inode);
1986 req->r_inode_drop = release;
1987 req->r_args.setattr.mask = cpu_to_le32(mask);
1988 req->r_num_caps = 1;
1989 err = ceph_mdsc_do_request(mdsc, NULL, req);
1990 }
1991 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1992 ceph_cap_string(dirtied), mask);
1993
1994 ceph_mdsc_put_request(req);
1995 ceph_free_cap_flush(prealloc_cf);
1996
1997 if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
1998 __ceph_do_pending_vmtruncate(inode);
1999
2000 return err;
2001 }
2002
ceph_setattr(struct dentry * dentry,struct iattr * attr)2003 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
2004 {
2005 int err;
2006
2007 err = __ceph_setattr(dentry, attr);
2008
2009 if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2010 err = posix_acl_chmod(d_inode(dentry), attr->ia_mode);
2011
2012 return err;
2013 }
2014
2015 /*
2016 * Verify that we have a lease on the given mask. If not,
2017 * do a getattr against an mds.
2018 */
__ceph_do_getattr(struct inode * inode,struct page * locked_page,int mask,bool force)2019 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2020 int mask, bool force)
2021 {
2022 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2023 struct ceph_mds_client *mdsc = fsc->mdsc;
2024 struct ceph_mds_request *req;
2025 int err;
2026
2027 if (ceph_snap(inode) == CEPH_SNAPDIR) {
2028 dout("do_getattr inode %p SNAPDIR\n", inode);
2029 return 0;
2030 }
2031
2032 dout("do_getattr inode %p mask %s mode 0%o\n",
2033 inode, ceph_cap_string(mask), inode->i_mode);
2034 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
2035 return 0;
2036
2037 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
2038 if (IS_ERR(req))
2039 return PTR_ERR(req);
2040 req->r_inode = inode;
2041 ihold(inode);
2042 req->r_num_caps = 1;
2043 req->r_args.getattr.mask = cpu_to_le32(mask);
2044 req->r_locked_page = locked_page;
2045 err = ceph_mdsc_do_request(mdsc, NULL, req);
2046 if (locked_page && err == 0) {
2047 u64 inline_version = req->r_reply_info.targeti.inline_version;
2048 if (inline_version == 0) {
2049 /* the reply is supposed to contain inline data */
2050 err = -EINVAL;
2051 } else if (inline_version == CEPH_INLINE_NONE) {
2052 err = -ENODATA;
2053 } else {
2054 err = req->r_reply_info.targeti.inline_len;
2055 }
2056 }
2057 ceph_mdsc_put_request(req);
2058 dout("do_getattr result=%d\n", err);
2059 return err;
2060 }
2061
2062
2063 /*
2064 * Check inode permissions. We verify we have a valid value for
2065 * the AUTH cap, then call the generic handler.
2066 */
ceph_permission(struct inode * inode,int mask)2067 int ceph_permission(struct inode *inode, int mask)
2068 {
2069 int err;
2070
2071 if (mask & MAY_NOT_BLOCK)
2072 return -ECHILD;
2073
2074 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2075
2076 if (!err)
2077 err = generic_permission(inode, mask);
2078 return err;
2079 }
2080
2081 /*
2082 * Get all attributes. Hopefully somedata we'll have a statlite()
2083 * and can limit the fields we require to be accurate.
2084 */
ceph_getattr(struct vfsmount * mnt,struct dentry * dentry,struct kstat * stat)2085 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
2086 struct kstat *stat)
2087 {
2088 struct inode *inode = d_inode(dentry);
2089 struct ceph_inode_info *ci = ceph_inode(inode);
2090 int err;
2091
2092 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
2093 if (!err) {
2094 generic_fillattr(inode, stat);
2095 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
2096 if (ceph_snap(inode) != CEPH_NOSNAP)
2097 stat->dev = ceph_snap(inode);
2098 else
2099 stat->dev = 0;
2100 if (S_ISDIR(inode->i_mode)) {
2101 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2102 RBYTES))
2103 stat->size = ci->i_rbytes;
2104 else
2105 stat->size = ci->i_files + ci->i_subdirs;
2106 stat->blocks = 0;
2107 stat->blksize = 65536;
2108 }
2109 }
2110 return err;
2111 }
2112