1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15
16 #include "super.h"
17 #include "mds_client.h"
18 #include "cache.h"
19 #include "io.h"
20 #include "metric.h"
21
ceph_flags_sys2wire(u32 flags)22 static __le32 ceph_flags_sys2wire(u32 flags)
23 {
24 u32 wire_flags = 0;
25
26 switch (flags & O_ACCMODE) {
27 case O_RDONLY:
28 wire_flags |= CEPH_O_RDONLY;
29 break;
30 case O_WRONLY:
31 wire_flags |= CEPH_O_WRONLY;
32 break;
33 case O_RDWR:
34 wire_flags |= CEPH_O_RDWR;
35 break;
36 }
37
38 flags &= ~O_ACCMODE;
39
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41
42 ceph_sys2wire(O_CREAT);
43 ceph_sys2wire(O_EXCL);
44 ceph_sys2wire(O_TRUNC);
45 ceph_sys2wire(O_DIRECTORY);
46 ceph_sys2wire(O_NOFOLLOW);
47
48 #undef ceph_sys2wire
49
50 if (flags)
51 dout("unused open flags: %x\n", flags);
52
53 return cpu_to_le32(wire_flags);
54 }
55
56 /*
57 * Ceph file operations
58 *
59 * Implement basic open/close functionality, and implement
60 * read/write.
61 *
62 * We implement three modes of file I/O:
63 * - buffered uses the generic_file_aio_{read,write} helpers
64 *
65 * - synchronous is used when there is multi-client read/write
66 * sharing, avoids the page cache, and synchronously waits for an
67 * ack from the OSD.
68 *
69 * - direct io takes the variant of the sync path that references
70 * user pages directly.
71 *
72 * fsync() flushes and waits on dirty pages, but just queues metadata
73 * for writeback: since the MDS can recover size and mtime there is no
74 * need to wait for MDS acknowledgement.
75 */
76
77 /*
78 * How many pages to get in one call to iov_iter_get_pages(). This
79 * determines the size of the on-stack array used as a buffer.
80 */
81 #define ITER_GET_BVECS_PAGES 64
82
__iter_get_bvecs(struct iov_iter * iter,size_t maxsize,struct bio_vec * bvecs)83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84 struct bio_vec *bvecs)
85 {
86 size_t size = 0;
87 int bvec_idx = 0;
88
89 if (maxsize > iov_iter_count(iter))
90 maxsize = iov_iter_count(iter);
91
92 while (size < maxsize) {
93 struct page *pages[ITER_GET_BVECS_PAGES];
94 ssize_t bytes;
95 size_t start;
96 int idx = 0;
97
98 bytes = iov_iter_get_pages(iter, pages, maxsize - size,
99 ITER_GET_BVECS_PAGES, &start);
100 if (bytes < 0)
101 return size ?: bytes;
102
103 iov_iter_advance(iter, bytes);
104 size += bytes;
105
106 for ( ; bytes; idx++, bvec_idx++) {
107 struct bio_vec bv = {
108 .bv_page = pages[idx],
109 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
110 .bv_offset = start,
111 };
112
113 bvecs[bvec_idx] = bv;
114 bytes -= bv.bv_len;
115 start = 0;
116 }
117 }
118
119 return size;
120 }
121
122 /*
123 * iov_iter_get_pages() only considers one iov_iter segment, no matter
124 * what maxsize or maxpages are given. For ITER_BVEC that is a single
125 * page.
126 *
127 * Attempt to get up to @maxsize bytes worth of pages from @iter.
128 * Return the number of bytes in the created bio_vec array, or an error.
129 */
iter_get_bvecs_alloc(struct iov_iter * iter,size_t maxsize,struct bio_vec ** bvecs,int * num_bvecs)130 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
131 struct bio_vec **bvecs, int *num_bvecs)
132 {
133 struct bio_vec *bv;
134 size_t orig_count = iov_iter_count(iter);
135 ssize_t bytes;
136 int npages;
137
138 iov_iter_truncate(iter, maxsize);
139 npages = iov_iter_npages(iter, INT_MAX);
140 iov_iter_reexpand(iter, orig_count);
141
142 /*
143 * __iter_get_bvecs() may populate only part of the array -- zero it
144 * out.
145 */
146 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
147 if (!bv)
148 return -ENOMEM;
149
150 bytes = __iter_get_bvecs(iter, maxsize, bv);
151 if (bytes < 0) {
152 /*
153 * No pages were pinned -- just free the array.
154 */
155 kvfree(bv);
156 return bytes;
157 }
158
159 *bvecs = bv;
160 *num_bvecs = npages;
161 return bytes;
162 }
163
put_bvecs(struct bio_vec * bvecs,int num_bvecs,bool should_dirty)164 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
165 {
166 int i;
167
168 for (i = 0; i < num_bvecs; i++) {
169 if (bvecs[i].bv_page) {
170 if (should_dirty)
171 set_page_dirty_lock(bvecs[i].bv_page);
172 put_page(bvecs[i].bv_page);
173 }
174 }
175 kvfree(bvecs);
176 }
177
178 /*
179 * Prepare an open request. Preallocate ceph_cap to avoid an
180 * inopportune ENOMEM later.
181 */
182 static struct ceph_mds_request *
prepare_open_request(struct super_block * sb,int flags,int create_mode)183 prepare_open_request(struct super_block *sb, int flags, int create_mode)
184 {
185 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
186 struct ceph_mds_request *req;
187 int want_auth = USE_ANY_MDS;
188 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
189
190 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
191 want_auth = USE_AUTH_MDS;
192
193 req = ceph_mdsc_create_request(mdsc, op, want_auth);
194 if (IS_ERR(req))
195 goto out;
196 req->r_fmode = ceph_flags_to_mode(flags);
197 req->r_args.open.flags = ceph_flags_sys2wire(flags);
198 req->r_args.open.mode = cpu_to_le32(create_mode);
199 out:
200 return req;
201 }
202
ceph_init_file_info(struct inode * inode,struct file * file,int fmode,bool isdir)203 static int ceph_init_file_info(struct inode *inode, struct file *file,
204 int fmode, bool isdir)
205 {
206 struct ceph_inode_info *ci = ceph_inode(inode);
207 struct ceph_file_info *fi;
208
209 dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
210 inode->i_mode, isdir ? "dir" : "regular");
211 BUG_ON(inode->i_fop->release != ceph_release);
212
213 if (isdir) {
214 struct ceph_dir_file_info *dfi =
215 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
216 if (!dfi)
217 return -ENOMEM;
218
219 file->private_data = dfi;
220 fi = &dfi->file_info;
221 dfi->next_offset = 2;
222 dfi->readdir_cache_idx = -1;
223 } else {
224 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
225 if (!fi)
226 return -ENOMEM;
227
228 file->private_data = fi;
229 }
230
231 ceph_get_fmode(ci, fmode, 1);
232 fi->fmode = fmode;
233
234 spin_lock_init(&fi->rw_contexts_lock);
235 INIT_LIST_HEAD(&fi->rw_contexts);
236 fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
237
238 return 0;
239 }
240
241 /*
242 * initialize private struct file data.
243 * if we fail, clean up by dropping fmode reference on the ceph_inode
244 */
ceph_init_file(struct inode * inode,struct file * file,int fmode)245 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
246 {
247 int ret = 0;
248
249 switch (inode->i_mode & S_IFMT) {
250 case S_IFREG:
251 ceph_fscache_register_inode_cookie(inode);
252 ceph_fscache_file_set_cookie(inode, file);
253 fallthrough;
254 case S_IFDIR:
255 ret = ceph_init_file_info(inode, file, fmode,
256 S_ISDIR(inode->i_mode));
257 break;
258
259 case S_IFLNK:
260 dout("init_file %p %p 0%o (symlink)\n", inode, file,
261 inode->i_mode);
262 break;
263
264 default:
265 dout("init_file %p %p 0%o (special)\n", inode, file,
266 inode->i_mode);
267 /*
268 * we need to drop the open ref now, since we don't
269 * have .release set to ceph_release.
270 */
271 BUG_ON(inode->i_fop->release == ceph_release);
272
273 /* call the proper open fop */
274 ret = inode->i_fop->open(inode, file);
275 }
276 return ret;
277 }
278
279 /*
280 * try renew caps after session gets killed.
281 */
ceph_renew_caps(struct inode * inode,int fmode)282 int ceph_renew_caps(struct inode *inode, int fmode)
283 {
284 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
285 struct ceph_inode_info *ci = ceph_inode(inode);
286 struct ceph_mds_request *req;
287 int err, flags, wanted;
288
289 spin_lock(&ci->i_ceph_lock);
290 __ceph_touch_fmode(ci, mdsc, fmode);
291 wanted = __ceph_caps_file_wanted(ci);
292 if (__ceph_is_any_real_caps(ci) &&
293 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
294 int issued = __ceph_caps_issued(ci, NULL);
295 spin_unlock(&ci->i_ceph_lock);
296 dout("renew caps %p want %s issued %s updating mds_wanted\n",
297 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
298 ceph_check_caps(ci, 0, NULL);
299 return 0;
300 }
301 spin_unlock(&ci->i_ceph_lock);
302
303 flags = 0;
304 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
305 flags = O_RDWR;
306 else if (wanted & CEPH_CAP_FILE_RD)
307 flags = O_RDONLY;
308 else if (wanted & CEPH_CAP_FILE_WR)
309 flags = O_WRONLY;
310 #ifdef O_LAZY
311 if (wanted & CEPH_CAP_FILE_LAZYIO)
312 flags |= O_LAZY;
313 #endif
314
315 req = prepare_open_request(inode->i_sb, flags, 0);
316 if (IS_ERR(req)) {
317 err = PTR_ERR(req);
318 goto out;
319 }
320
321 req->r_inode = inode;
322 ihold(inode);
323 req->r_num_caps = 1;
324
325 err = ceph_mdsc_do_request(mdsc, NULL, req);
326 ceph_mdsc_put_request(req);
327 out:
328 dout("renew caps %p open result=%d\n", inode, err);
329 return err < 0 ? err : 0;
330 }
331
332 /*
333 * If we already have the requisite capabilities, we can satisfy
334 * the open request locally (no need to request new caps from the
335 * MDS). We do, however, need to inform the MDS (asynchronously)
336 * if our wanted caps set expands.
337 */
ceph_open(struct inode * inode,struct file * file)338 int ceph_open(struct inode *inode, struct file *file)
339 {
340 struct ceph_inode_info *ci = ceph_inode(inode);
341 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
342 struct ceph_mds_client *mdsc = fsc->mdsc;
343 struct ceph_mds_request *req;
344 struct ceph_file_info *fi = file->private_data;
345 int err;
346 int flags, fmode, wanted;
347
348 if (fi) {
349 dout("open file %p is already opened\n", file);
350 return 0;
351 }
352
353 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
354 flags = file->f_flags & ~(O_CREAT|O_EXCL);
355 if (S_ISDIR(inode->i_mode))
356 flags = O_DIRECTORY; /* mds likes to know */
357
358 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
359 ceph_vinop(inode), file, flags, file->f_flags);
360 fmode = ceph_flags_to_mode(flags);
361 wanted = ceph_caps_for_mode(fmode);
362
363 /* snapped files are read-only */
364 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
365 return -EROFS;
366
367 /* trivially open snapdir */
368 if (ceph_snap(inode) == CEPH_SNAPDIR) {
369 return ceph_init_file(inode, file, fmode);
370 }
371
372 /*
373 * No need to block if we have caps on the auth MDS (for
374 * write) or any MDS (for read). Update wanted set
375 * asynchronously.
376 */
377 spin_lock(&ci->i_ceph_lock);
378 if (__ceph_is_any_real_caps(ci) &&
379 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
380 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
381 int issued = __ceph_caps_issued(ci, NULL);
382
383 dout("open %p fmode %d want %s issued %s using existing\n",
384 inode, fmode, ceph_cap_string(wanted),
385 ceph_cap_string(issued));
386 __ceph_touch_fmode(ci, mdsc, fmode);
387 spin_unlock(&ci->i_ceph_lock);
388
389 /* adjust wanted? */
390 if ((issued & wanted) != wanted &&
391 (mds_wanted & wanted) != wanted &&
392 ceph_snap(inode) != CEPH_SNAPDIR)
393 ceph_check_caps(ci, 0, NULL);
394
395 return ceph_init_file(inode, file, fmode);
396 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
397 (ci->i_snap_caps & wanted) == wanted) {
398 __ceph_touch_fmode(ci, mdsc, fmode);
399 spin_unlock(&ci->i_ceph_lock);
400 return ceph_init_file(inode, file, fmode);
401 }
402
403 spin_unlock(&ci->i_ceph_lock);
404
405 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
406 req = prepare_open_request(inode->i_sb, flags, 0);
407 if (IS_ERR(req)) {
408 err = PTR_ERR(req);
409 goto out;
410 }
411 req->r_inode = inode;
412 ihold(inode);
413
414 req->r_num_caps = 1;
415 err = ceph_mdsc_do_request(mdsc, NULL, req);
416 if (!err)
417 err = ceph_init_file(inode, file, req->r_fmode);
418 ceph_mdsc_put_request(req);
419 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
420 out:
421 return err;
422 }
423
424 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
425 static void
cache_file_layout(struct inode * dst,struct inode * src)426 cache_file_layout(struct inode *dst, struct inode *src)
427 {
428 struct ceph_inode_info *cdst = ceph_inode(dst);
429 struct ceph_inode_info *csrc = ceph_inode(src);
430
431 spin_lock(&cdst->i_ceph_lock);
432 if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
433 !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
434 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
435 sizeof(cdst->i_cached_layout));
436 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
437 ceph_try_get_string(csrc->i_layout.pool_ns));
438 }
439 spin_unlock(&cdst->i_ceph_lock);
440 }
441
442 /*
443 * Try to set up an async create. We need caps, a file layout, and inode number,
444 * and either a lease on the dentry or complete dir info. If any of those
445 * criteria are not satisfied, then return false and the caller can go
446 * synchronous.
447 */
try_prep_async_create(struct inode * dir,struct dentry * dentry,struct ceph_file_layout * lo,u64 * pino)448 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
449 struct ceph_file_layout *lo, u64 *pino)
450 {
451 struct ceph_inode_info *ci = ceph_inode(dir);
452 struct ceph_dentry_info *di = ceph_dentry(dentry);
453 int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
454 u64 ino;
455
456 spin_lock(&ci->i_ceph_lock);
457 /* No auth cap means no chance for Dc caps */
458 if (!ci->i_auth_cap)
459 goto no_async;
460
461 /* Any delegated inos? */
462 if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
463 goto no_async;
464
465 if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
466 goto no_async;
467
468 if ((__ceph_caps_issued(ci, NULL) & want) != want)
469 goto no_async;
470
471 if (d_in_lookup(dentry)) {
472 if (!__ceph_dir_is_complete(ci))
473 goto no_async;
474 spin_lock(&dentry->d_lock);
475 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
476 spin_unlock(&dentry->d_lock);
477 } else if (atomic_read(&ci->i_shared_gen) !=
478 READ_ONCE(di->lease_shared_gen)) {
479 goto no_async;
480 }
481
482 ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
483 if (!ino)
484 goto no_async;
485
486 *pino = ino;
487 ceph_take_cap_refs(ci, want, false);
488 memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
489 rcu_assign_pointer(lo->pool_ns,
490 ceph_try_get_string(ci->i_cached_layout.pool_ns));
491 got = want;
492 no_async:
493 spin_unlock(&ci->i_ceph_lock);
494 return got;
495 }
496
restore_deleg_ino(struct inode * dir,u64 ino)497 static void restore_deleg_ino(struct inode *dir, u64 ino)
498 {
499 struct ceph_inode_info *ci = ceph_inode(dir);
500 struct ceph_mds_session *s = NULL;
501
502 spin_lock(&ci->i_ceph_lock);
503 if (ci->i_auth_cap)
504 s = ceph_get_mds_session(ci->i_auth_cap->session);
505 spin_unlock(&ci->i_ceph_lock);
506 if (s) {
507 int err = ceph_restore_deleg_ino(s, ino);
508 if (err)
509 pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
510 ino, err);
511 ceph_put_mds_session(s);
512 }
513 }
514
ceph_async_create_cb(struct ceph_mds_client * mdsc,struct ceph_mds_request * req)515 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
516 struct ceph_mds_request *req)
517 {
518 int result = req->r_err ? req->r_err :
519 le32_to_cpu(req->r_reply_info.head->result);
520
521 if (result == -EJUKEBOX)
522 goto out;
523
524 mapping_set_error(req->r_parent->i_mapping, result);
525
526 if (result) {
527 struct dentry *dentry = req->r_dentry;
528 int pathlen = 0;
529 u64 base = 0;
530 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
531 &base, 0);
532
533 ceph_dir_clear_complete(req->r_parent);
534 if (!d_unhashed(dentry))
535 d_drop(dentry);
536
537 /* FIXME: start returning I/O errors on all accesses? */
538 pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
539 base, IS_ERR(path) ? "<<bad>>" : path, result);
540 ceph_mdsc_free_path(path, pathlen);
541 }
542
543 if (req->r_target_inode) {
544 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
545 u64 ino = ceph_vino(req->r_target_inode).ino;
546
547 if (req->r_deleg_ino != ino)
548 pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
549 __func__, req->r_err, req->r_deleg_ino, ino);
550 mapping_set_error(req->r_target_inode->i_mapping, result);
551
552 spin_lock(&ci->i_ceph_lock);
553 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
554 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
555 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
556 }
557 ceph_kick_flushing_inode_caps(req->r_session, ci);
558 spin_unlock(&ci->i_ceph_lock);
559 } else {
560 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
561 req->r_deleg_ino);
562 }
563 out:
564 ceph_mdsc_release_dir_caps(req);
565 }
566
ceph_finish_async_create(struct inode * dir,struct dentry * dentry,struct file * file,umode_t mode,struct ceph_mds_request * req,struct ceph_acl_sec_ctx * as_ctx,struct ceph_file_layout * lo)567 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
568 struct file *file, umode_t mode,
569 struct ceph_mds_request *req,
570 struct ceph_acl_sec_ctx *as_ctx,
571 struct ceph_file_layout *lo)
572 {
573 int ret;
574 char xattr_buf[4];
575 struct ceph_mds_reply_inode in = { };
576 struct ceph_mds_reply_info_in iinfo = { .in = &in };
577 struct ceph_inode_info *ci = ceph_inode(dir);
578 struct inode *inode;
579 struct timespec64 now;
580 struct ceph_string *pool_ns;
581 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
582 struct ceph_vino vino = { .ino = req->r_deleg_ino,
583 .snap = CEPH_NOSNAP };
584
585 ktime_get_real_ts64(&now);
586
587 inode = ceph_get_inode(dentry->d_sb, vino);
588 if (IS_ERR(inode))
589 return PTR_ERR(inode);
590
591 iinfo.inline_version = CEPH_INLINE_NONE;
592 iinfo.change_attr = 1;
593 ceph_encode_timespec64(&iinfo.btime, &now);
594
595 if (req->r_pagelist) {
596 iinfo.xattr_len = req->r_pagelist->length;
597 iinfo.xattr_data = req->r_pagelist->mapped_tail;
598 } else {
599 /* fake it */
600 iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
601 iinfo.xattr_data = xattr_buf;
602 memset(iinfo.xattr_data, 0, iinfo.xattr_len);
603 }
604
605 in.ino = cpu_to_le64(vino.ino);
606 in.snapid = cpu_to_le64(CEPH_NOSNAP);
607 in.version = cpu_to_le64(1); // ???
608 in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
609 in.cap.cap_id = cpu_to_le64(1);
610 in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
611 in.cap.flags = CEPH_CAP_FLAG_AUTH;
612 in.ctime = in.mtime = in.atime = iinfo.btime;
613 in.truncate_seq = cpu_to_le32(1);
614 in.truncate_size = cpu_to_le64(-1ULL);
615 in.xattr_version = cpu_to_le64(1);
616 in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
617 if (dir->i_mode & S_ISGID) {
618 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
619
620 /* Directories always inherit the setgid bit. */
621 if (S_ISDIR(mode))
622 mode |= S_ISGID;
623 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
624 !in_group_p(dir->i_gid) &&
625 !capable_wrt_inode_uidgid(&init_user_ns, dir, CAP_FSETID))
626 mode &= ~S_ISGID;
627 } else {
628 in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
629 }
630 in.mode = cpu_to_le32((u32)mode);
631
632 in.nlink = cpu_to_le32(1);
633 in.max_size = cpu_to_le64(lo->stripe_unit);
634
635 ceph_file_layout_to_legacy(lo, &in.layout);
636 /* lo is private, so pool_ns can't change */
637 pool_ns = rcu_dereference_raw(lo->pool_ns);
638 if (pool_ns) {
639 iinfo.pool_ns_len = pool_ns->len;
640 iinfo.pool_ns_data = pool_ns->str;
641 }
642
643 down_read(&mdsc->snap_rwsem);
644 ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
645 req->r_fmode, NULL);
646 up_read(&mdsc->snap_rwsem);
647 if (ret) {
648 dout("%s failed to fill inode: %d\n", __func__, ret);
649 ceph_dir_clear_complete(dir);
650 if (!d_unhashed(dentry))
651 d_drop(dentry);
652 if (inode->i_state & I_NEW)
653 discard_new_inode(inode);
654 } else {
655 struct dentry *dn;
656
657 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
658 vino.ino, ceph_ino(dir), dentry->d_name.name);
659 ceph_dir_clear_ordered(dir);
660 ceph_init_inode_acls(inode, as_ctx);
661 if (inode->i_state & I_NEW) {
662 /*
663 * If it's not I_NEW, then someone created this before
664 * we got here. Assume the server is aware of it at
665 * that point and don't worry about setting
666 * CEPH_I_ASYNC_CREATE.
667 */
668 ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
669 unlock_new_inode(inode);
670 }
671 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
672 if (!d_unhashed(dentry))
673 d_drop(dentry);
674 dn = d_splice_alias(inode, dentry);
675 WARN_ON_ONCE(dn && dn != dentry);
676 }
677 file->f_mode |= FMODE_CREATED;
678 ret = finish_open(file, dentry, ceph_open);
679 }
680 return ret;
681 }
682
683 /*
684 * Do a lookup + open with a single request. If we get a non-existent
685 * file or symlink, return 1 so the VFS can retry.
686 */
ceph_atomic_open(struct inode * dir,struct dentry * dentry,struct file * file,unsigned flags,umode_t mode)687 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
688 struct file *file, unsigned flags, umode_t mode)
689 {
690 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
691 struct ceph_mds_client *mdsc = fsc->mdsc;
692 struct ceph_mds_request *req;
693 struct dentry *dn;
694 struct ceph_acl_sec_ctx as_ctx = {};
695 bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
696 int mask;
697 int err;
698
699 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
700 dir, dentry, dentry,
701 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
702
703 if (dentry->d_name.len > NAME_MAX)
704 return -ENAMETOOLONG;
705
706 /*
707 * Do not truncate the file, since atomic_open is called before the
708 * permission check. The caller will do the truncation afterward.
709 */
710 flags &= ~O_TRUNC;
711
712 if (flags & O_CREAT) {
713 if (ceph_quota_is_max_files_exceeded(dir))
714 return -EDQUOT;
715 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
716 if (err < 0)
717 return err;
718 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
719 if (err < 0)
720 goto out_ctx;
721 /* Async create can't handle more than a page of xattrs */
722 if (as_ctx.pagelist &&
723 !list_is_singular(&as_ctx.pagelist->head))
724 try_async = false;
725 } else if (!d_in_lookup(dentry)) {
726 /* If it's not being looked up, it's negative */
727 return -ENOENT;
728 }
729 retry:
730 /* do the open */
731 req = prepare_open_request(dir->i_sb, flags, mode);
732 if (IS_ERR(req)) {
733 err = PTR_ERR(req);
734 goto out_ctx;
735 }
736 req->r_dentry = dget(dentry);
737 req->r_num_caps = 2;
738 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
739 if (ceph_security_xattr_wanted(dir))
740 mask |= CEPH_CAP_XATTR_SHARED;
741 req->r_args.open.mask = cpu_to_le32(mask);
742 req->r_parent = dir;
743 ihold(dir);
744
745 if (flags & O_CREAT) {
746 struct ceph_file_layout lo;
747
748 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
749 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
750 if (as_ctx.pagelist) {
751 req->r_pagelist = as_ctx.pagelist;
752 as_ctx.pagelist = NULL;
753 }
754 if (try_async &&
755 (req->r_dir_caps =
756 try_prep_async_create(dir, dentry, &lo,
757 &req->r_deleg_ino))) {
758 set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
759 req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
760 req->r_callback = ceph_async_create_cb;
761 err = ceph_mdsc_submit_request(mdsc, dir, req);
762 if (!err) {
763 err = ceph_finish_async_create(dir, dentry,
764 file, mode, req,
765 &as_ctx, &lo);
766 } else if (err == -EJUKEBOX) {
767 restore_deleg_ino(dir, req->r_deleg_ino);
768 ceph_mdsc_put_request(req);
769 try_async = false;
770 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
771 goto retry;
772 }
773 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
774 goto out_req;
775 }
776 }
777
778 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
779 err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
780 if (err == -ENOENT) {
781 dentry = ceph_handle_snapdir(req, dentry);
782 if (IS_ERR(dentry)) {
783 err = PTR_ERR(dentry);
784 goto out_req;
785 }
786 err = 0;
787 }
788
789 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
790 err = ceph_handle_notrace_create(dir, dentry);
791
792 if (d_in_lookup(dentry)) {
793 dn = ceph_finish_lookup(req, dentry, err);
794 if (IS_ERR(dn))
795 err = PTR_ERR(dn);
796 } else {
797 /* we were given a hashed negative dentry */
798 dn = NULL;
799 }
800 if (err)
801 goto out_req;
802 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
803 /* make vfs retry on splice, ENOENT, or symlink */
804 dout("atomic_open finish_no_open on dn %p\n", dn);
805 err = finish_no_open(file, dn);
806 } else {
807 dout("atomic_open finish_open on dn %p\n", dn);
808 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
809 struct inode *newino = d_inode(dentry);
810
811 cache_file_layout(dir, newino);
812 ceph_init_inode_acls(newino, &as_ctx);
813 file->f_mode |= FMODE_CREATED;
814 }
815 err = finish_open(file, dentry, ceph_open);
816 }
817 out_req:
818 ceph_mdsc_put_request(req);
819 out_ctx:
820 ceph_release_acl_sec_ctx(&as_ctx);
821 dout("atomic_open result=%d\n", err);
822 return err;
823 }
824
ceph_release(struct inode * inode,struct file * file)825 int ceph_release(struct inode *inode, struct file *file)
826 {
827 struct ceph_inode_info *ci = ceph_inode(inode);
828
829 if (S_ISDIR(inode->i_mode)) {
830 struct ceph_dir_file_info *dfi = file->private_data;
831 dout("release inode %p dir file %p\n", inode, file);
832 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
833
834 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
835
836 if (dfi->last_readdir)
837 ceph_mdsc_put_request(dfi->last_readdir);
838 kfree(dfi->last_name);
839 kfree(dfi->dir_info);
840 kmem_cache_free(ceph_dir_file_cachep, dfi);
841 } else {
842 struct ceph_file_info *fi = file->private_data;
843 dout("release inode %p regular file %p\n", inode, file);
844 WARN_ON(!list_empty(&fi->rw_contexts));
845
846 ceph_put_fmode(ci, fi->fmode, 1);
847
848 kmem_cache_free(ceph_file_cachep, fi);
849 }
850
851 /* wake up anyone waiting for caps on this inode */
852 wake_up_all(&ci->i_cap_wq);
853 return 0;
854 }
855
856 enum {
857 HAVE_RETRIED = 1,
858 CHECK_EOF = 2,
859 READ_INLINE = 3,
860 };
861
862 /*
863 * Completely synchronous read and write methods. Direct from __user
864 * buffer to osd, or directly to user pages (if O_DIRECT).
865 *
866 * If the read spans object boundary, just do multiple reads. (That's not
867 * atomic, but good enough for now.)
868 *
869 * If we get a short result from the OSD, check against i_size; we need to
870 * only return a short read to the caller if we hit EOF.
871 */
ceph_sync_read(struct kiocb * iocb,struct iov_iter * to,int * retry_op)872 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
873 int *retry_op)
874 {
875 struct file *file = iocb->ki_filp;
876 struct inode *inode = file_inode(file);
877 struct ceph_inode_info *ci = ceph_inode(inode);
878 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
879 struct ceph_osd_client *osdc = &fsc->client->osdc;
880 ssize_t ret;
881 u64 off = iocb->ki_pos;
882 u64 len = iov_iter_count(to);
883
884 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
885 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
886
887 if (!len)
888 return 0;
889 /*
890 * flush any page cache pages in this range. this
891 * will make concurrent normal and sync io slow,
892 * but it will at least behave sensibly when they are
893 * in sequence.
894 */
895 ret = filemap_write_and_wait_range(inode->i_mapping,
896 off, off + len - 1);
897 if (ret < 0)
898 return ret;
899
900 ret = 0;
901 while ((len = iov_iter_count(to)) > 0) {
902 struct ceph_osd_request *req;
903 struct page **pages;
904 int num_pages;
905 size_t page_off;
906 u64 i_size;
907 bool more;
908 int idx;
909 size_t left;
910
911 req = ceph_osdc_new_request(osdc, &ci->i_layout,
912 ci->i_vino, off, &len, 0, 1,
913 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
914 NULL, ci->i_truncate_seq,
915 ci->i_truncate_size, false);
916 if (IS_ERR(req)) {
917 ret = PTR_ERR(req);
918 break;
919 }
920
921 more = len < iov_iter_count(to);
922
923 num_pages = calc_pages_for(off, len);
924 page_off = off & ~PAGE_MASK;
925 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
926 if (IS_ERR(pages)) {
927 ceph_osdc_put_request(req);
928 ret = PTR_ERR(pages);
929 break;
930 }
931
932 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
933 false, false);
934 ret = ceph_osdc_start_request(osdc, req, false);
935 if (!ret)
936 ret = ceph_osdc_wait_request(osdc, req);
937
938 ceph_update_read_metrics(&fsc->mdsc->metric,
939 req->r_start_latency,
940 req->r_end_latency,
941 len, ret);
942
943 ceph_osdc_put_request(req);
944
945 i_size = i_size_read(inode);
946 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
947 off, len, ret, i_size, (more ? " MORE" : ""));
948
949 if (ret == -ENOENT)
950 ret = 0;
951 if (ret >= 0 && ret < len && (off + ret < i_size)) {
952 int zlen = min(len - ret, i_size - off - ret);
953 int zoff = page_off + ret;
954 dout("sync_read zero gap %llu~%llu\n",
955 off + ret, off + ret + zlen);
956 ceph_zero_page_vector_range(zoff, zlen, pages);
957 ret += zlen;
958 }
959
960 idx = 0;
961 left = ret > 0 ? ret : 0;
962 while (left > 0) {
963 size_t len, copied;
964 page_off = off & ~PAGE_MASK;
965 len = min_t(size_t, left, PAGE_SIZE - page_off);
966 SetPageUptodate(pages[idx]);
967 copied = copy_page_to_iter(pages[idx++],
968 page_off, len, to);
969 off += copied;
970 left -= copied;
971 if (copied < len) {
972 ret = -EFAULT;
973 break;
974 }
975 }
976 ceph_release_page_vector(pages, num_pages);
977
978 if (ret < 0) {
979 if (ret == -EBLOCKLISTED)
980 fsc->blocklisted = true;
981 break;
982 }
983
984 if (off >= i_size || !more)
985 break;
986 }
987
988 if (off > iocb->ki_pos) {
989 if (ret >= 0 &&
990 iov_iter_count(to) > 0 && off >= i_size_read(inode))
991 *retry_op = CHECK_EOF;
992 ret = off - iocb->ki_pos;
993 iocb->ki_pos = off;
994 }
995
996 dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
997 return ret;
998 }
999
1000 struct ceph_aio_request {
1001 struct kiocb *iocb;
1002 size_t total_len;
1003 bool write;
1004 bool should_dirty;
1005 int error;
1006 struct list_head osd_reqs;
1007 unsigned num_reqs;
1008 atomic_t pending_reqs;
1009 struct timespec64 mtime;
1010 struct ceph_cap_flush *prealloc_cf;
1011 };
1012
1013 struct ceph_aio_work {
1014 struct work_struct work;
1015 struct ceph_osd_request *req;
1016 };
1017
1018 static void ceph_aio_retry_work(struct work_struct *work);
1019
ceph_aio_complete(struct inode * inode,struct ceph_aio_request * aio_req)1020 static void ceph_aio_complete(struct inode *inode,
1021 struct ceph_aio_request *aio_req)
1022 {
1023 struct ceph_inode_info *ci = ceph_inode(inode);
1024 int ret;
1025
1026 if (!atomic_dec_and_test(&aio_req->pending_reqs))
1027 return;
1028
1029 if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1030 inode_dio_end(inode);
1031
1032 ret = aio_req->error;
1033 if (!ret)
1034 ret = aio_req->total_len;
1035
1036 dout("ceph_aio_complete %p rc %d\n", inode, ret);
1037
1038 if (ret >= 0 && aio_req->write) {
1039 int dirty;
1040
1041 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1042 if (endoff > i_size_read(inode)) {
1043 if (ceph_inode_set_size(inode, endoff))
1044 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1045 }
1046
1047 spin_lock(&ci->i_ceph_lock);
1048 ci->i_inline_version = CEPH_INLINE_NONE;
1049 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1050 &aio_req->prealloc_cf);
1051 spin_unlock(&ci->i_ceph_lock);
1052 if (dirty)
1053 __mark_inode_dirty(inode, dirty);
1054
1055 }
1056
1057 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1058 CEPH_CAP_FILE_RD));
1059
1060 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
1061
1062 ceph_free_cap_flush(aio_req->prealloc_cf);
1063 kfree(aio_req);
1064 }
1065
ceph_aio_complete_req(struct ceph_osd_request * req)1066 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1067 {
1068 int rc = req->r_result;
1069 struct inode *inode = req->r_inode;
1070 struct ceph_aio_request *aio_req = req->r_priv;
1071 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1072 struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1073 unsigned int len = osd_data->bvec_pos.iter.bi_size;
1074
1075 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1076 BUG_ON(!osd_data->num_bvecs);
1077
1078 dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1079
1080 if (rc == -EOLDSNAPC) {
1081 struct ceph_aio_work *aio_work;
1082 BUG_ON(!aio_req->write);
1083
1084 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1085 if (aio_work) {
1086 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1087 aio_work->req = req;
1088 queue_work(ceph_inode_to_client(inode)->inode_wq,
1089 &aio_work->work);
1090 return;
1091 }
1092 rc = -ENOMEM;
1093 } else if (!aio_req->write) {
1094 if (rc == -ENOENT)
1095 rc = 0;
1096 if (rc >= 0 && len > rc) {
1097 struct iov_iter i;
1098 int zlen = len - rc;
1099
1100 /*
1101 * If read is satisfied by single OSD request,
1102 * it can pass EOF. Otherwise read is within
1103 * i_size.
1104 */
1105 if (aio_req->num_reqs == 1) {
1106 loff_t i_size = i_size_read(inode);
1107 loff_t endoff = aio_req->iocb->ki_pos + rc;
1108 if (endoff < i_size)
1109 zlen = min_t(size_t, zlen,
1110 i_size - endoff);
1111 aio_req->total_len = rc + zlen;
1112 }
1113
1114 iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
1115 osd_data->num_bvecs, len);
1116 iov_iter_advance(&i, rc);
1117 iov_iter_zero(zlen, &i);
1118 }
1119 }
1120
1121 /* r_start_latency == 0 means the request was not submitted */
1122 if (req->r_start_latency) {
1123 if (aio_req->write)
1124 ceph_update_write_metrics(metric, req->r_start_latency,
1125 req->r_end_latency, len, rc);
1126 else
1127 ceph_update_read_metrics(metric, req->r_start_latency,
1128 req->r_end_latency, len, rc);
1129 }
1130
1131 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1132 aio_req->should_dirty);
1133 ceph_osdc_put_request(req);
1134
1135 if (rc < 0)
1136 cmpxchg(&aio_req->error, 0, rc);
1137
1138 ceph_aio_complete(inode, aio_req);
1139 return;
1140 }
1141
ceph_aio_retry_work(struct work_struct * work)1142 static void ceph_aio_retry_work(struct work_struct *work)
1143 {
1144 struct ceph_aio_work *aio_work =
1145 container_of(work, struct ceph_aio_work, work);
1146 struct ceph_osd_request *orig_req = aio_work->req;
1147 struct ceph_aio_request *aio_req = orig_req->r_priv;
1148 struct inode *inode = orig_req->r_inode;
1149 struct ceph_inode_info *ci = ceph_inode(inode);
1150 struct ceph_snap_context *snapc;
1151 struct ceph_osd_request *req;
1152 int ret;
1153
1154 spin_lock(&ci->i_ceph_lock);
1155 if (__ceph_have_pending_cap_snap(ci)) {
1156 struct ceph_cap_snap *capsnap =
1157 list_last_entry(&ci->i_cap_snaps,
1158 struct ceph_cap_snap,
1159 ci_item);
1160 snapc = ceph_get_snap_context(capsnap->context);
1161 } else {
1162 BUG_ON(!ci->i_head_snapc);
1163 snapc = ceph_get_snap_context(ci->i_head_snapc);
1164 }
1165 spin_unlock(&ci->i_ceph_lock);
1166
1167 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1168 false, GFP_NOFS);
1169 if (!req) {
1170 ret = -ENOMEM;
1171 req = orig_req;
1172 goto out;
1173 }
1174
1175 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1176 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1177 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1178
1179 req->r_ops[0] = orig_req->r_ops[0];
1180
1181 req->r_mtime = aio_req->mtime;
1182 req->r_data_offset = req->r_ops[0].extent.offset;
1183
1184 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1185 if (ret) {
1186 ceph_osdc_put_request(req);
1187 req = orig_req;
1188 goto out;
1189 }
1190
1191 ceph_osdc_put_request(orig_req);
1192
1193 req->r_callback = ceph_aio_complete_req;
1194 req->r_inode = inode;
1195 req->r_priv = aio_req;
1196
1197 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1198 out:
1199 if (ret < 0) {
1200 req->r_result = ret;
1201 ceph_aio_complete_req(req);
1202 }
1203
1204 ceph_put_snap_context(snapc);
1205 kfree(aio_work);
1206 }
1207
1208 static ssize_t
ceph_direct_read_write(struct kiocb * iocb,struct iov_iter * iter,struct ceph_snap_context * snapc,struct ceph_cap_flush ** pcf)1209 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1210 struct ceph_snap_context *snapc,
1211 struct ceph_cap_flush **pcf)
1212 {
1213 struct file *file = iocb->ki_filp;
1214 struct inode *inode = file_inode(file);
1215 struct ceph_inode_info *ci = ceph_inode(inode);
1216 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1217 struct ceph_client_metric *metric = &fsc->mdsc->metric;
1218 struct ceph_vino vino;
1219 struct ceph_osd_request *req;
1220 struct bio_vec *bvecs;
1221 struct ceph_aio_request *aio_req = NULL;
1222 int num_pages = 0;
1223 int flags;
1224 int ret = 0;
1225 struct timespec64 mtime = current_time(inode);
1226 size_t count = iov_iter_count(iter);
1227 loff_t pos = iocb->ki_pos;
1228 bool write = iov_iter_rw(iter) == WRITE;
1229 bool should_dirty = !write && iter_is_iovec(iter);
1230
1231 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1232 return -EROFS;
1233
1234 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1235 (write ? "write" : "read"), file, pos, (unsigned)count,
1236 snapc, snapc ? snapc->seq : 0);
1237
1238 if (write) {
1239 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1240 pos >> PAGE_SHIFT,
1241 (pos + count - 1) >> PAGE_SHIFT);
1242 if (ret2 < 0)
1243 dout("invalidate_inode_pages2_range returned %d\n", ret2);
1244
1245 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1246 } else {
1247 flags = CEPH_OSD_FLAG_READ;
1248 }
1249
1250 while (iov_iter_count(iter) > 0) {
1251 u64 size = iov_iter_count(iter);
1252 ssize_t len;
1253
1254 if (write)
1255 size = min_t(u64, size, fsc->mount_options->wsize);
1256 else
1257 size = min_t(u64, size, fsc->mount_options->rsize);
1258
1259 vino = ceph_vino(inode);
1260 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1261 vino, pos, &size, 0,
1262 1,
1263 write ? CEPH_OSD_OP_WRITE :
1264 CEPH_OSD_OP_READ,
1265 flags, snapc,
1266 ci->i_truncate_seq,
1267 ci->i_truncate_size,
1268 false);
1269 if (IS_ERR(req)) {
1270 ret = PTR_ERR(req);
1271 break;
1272 }
1273
1274 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1275 if (len < 0) {
1276 ceph_osdc_put_request(req);
1277 ret = len;
1278 break;
1279 }
1280 if (len != size)
1281 osd_req_op_extent_update(req, 0, len);
1282
1283 /*
1284 * To simplify error handling, allow AIO when IO within i_size
1285 * or IO can be satisfied by single OSD request.
1286 */
1287 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1288 (len == count || pos + count <= i_size_read(inode))) {
1289 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1290 if (aio_req) {
1291 aio_req->iocb = iocb;
1292 aio_req->write = write;
1293 aio_req->should_dirty = should_dirty;
1294 INIT_LIST_HEAD(&aio_req->osd_reqs);
1295 if (write) {
1296 aio_req->mtime = mtime;
1297 swap(aio_req->prealloc_cf, *pcf);
1298 }
1299 }
1300 /* ignore error */
1301 }
1302
1303 if (write) {
1304 /*
1305 * throw out any page cache pages in this range. this
1306 * may block.
1307 */
1308 truncate_inode_pages_range(inode->i_mapping, pos,
1309 PAGE_ALIGN(pos + len) - 1);
1310
1311 req->r_mtime = mtime;
1312 }
1313
1314 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1315
1316 if (aio_req) {
1317 aio_req->total_len += len;
1318 aio_req->num_reqs++;
1319 atomic_inc(&aio_req->pending_reqs);
1320
1321 req->r_callback = ceph_aio_complete_req;
1322 req->r_inode = inode;
1323 req->r_priv = aio_req;
1324 list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1325
1326 pos += len;
1327 continue;
1328 }
1329
1330 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1331 if (!ret)
1332 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1333
1334 if (write)
1335 ceph_update_write_metrics(metric, req->r_start_latency,
1336 req->r_end_latency, len, ret);
1337 else
1338 ceph_update_read_metrics(metric, req->r_start_latency,
1339 req->r_end_latency, len, ret);
1340
1341 size = i_size_read(inode);
1342 if (!write) {
1343 if (ret == -ENOENT)
1344 ret = 0;
1345 if (ret >= 0 && ret < len && pos + ret < size) {
1346 struct iov_iter i;
1347 int zlen = min_t(size_t, len - ret,
1348 size - pos - ret);
1349
1350 iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1351 iov_iter_advance(&i, ret);
1352 iov_iter_zero(zlen, &i);
1353 ret += zlen;
1354 }
1355 if (ret >= 0)
1356 len = ret;
1357 }
1358
1359 put_bvecs(bvecs, num_pages, should_dirty);
1360 ceph_osdc_put_request(req);
1361 if (ret < 0)
1362 break;
1363
1364 pos += len;
1365 if (!write && pos >= size)
1366 break;
1367
1368 if (write && pos > size) {
1369 if (ceph_inode_set_size(inode, pos))
1370 ceph_check_caps(ceph_inode(inode),
1371 CHECK_CAPS_AUTHONLY,
1372 NULL);
1373 }
1374 }
1375
1376 if (aio_req) {
1377 LIST_HEAD(osd_reqs);
1378
1379 if (aio_req->num_reqs == 0) {
1380 kfree(aio_req);
1381 return ret;
1382 }
1383
1384 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1385 CEPH_CAP_FILE_RD);
1386
1387 list_splice(&aio_req->osd_reqs, &osd_reqs);
1388 inode_dio_begin(inode);
1389 while (!list_empty(&osd_reqs)) {
1390 req = list_first_entry(&osd_reqs,
1391 struct ceph_osd_request,
1392 r_private_item);
1393 list_del_init(&req->r_private_item);
1394 if (ret >= 0)
1395 ret = ceph_osdc_start_request(req->r_osdc,
1396 req, false);
1397 if (ret < 0) {
1398 req->r_result = ret;
1399 ceph_aio_complete_req(req);
1400 }
1401 }
1402 return -EIOCBQUEUED;
1403 }
1404
1405 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1406 ret = pos - iocb->ki_pos;
1407 iocb->ki_pos = pos;
1408 }
1409 return ret;
1410 }
1411
1412 /*
1413 * Synchronous write, straight from __user pointer or user pages.
1414 *
1415 * If write spans object boundary, just do multiple writes. (For a
1416 * correct atomic write, we should e.g. take write locks on all
1417 * objects, rollback on failure, etc.)
1418 */
1419 static ssize_t
ceph_sync_write(struct kiocb * iocb,struct iov_iter * from,loff_t pos,struct ceph_snap_context * snapc)1420 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1421 struct ceph_snap_context *snapc)
1422 {
1423 struct file *file = iocb->ki_filp;
1424 struct inode *inode = file_inode(file);
1425 struct ceph_inode_info *ci = ceph_inode(inode);
1426 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1427 struct ceph_vino vino;
1428 struct ceph_osd_request *req;
1429 struct page **pages;
1430 u64 len;
1431 int num_pages;
1432 int written = 0;
1433 int flags;
1434 int ret;
1435 bool check_caps = false;
1436 struct timespec64 mtime = current_time(inode);
1437 size_t count = iov_iter_count(from);
1438
1439 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1440 return -EROFS;
1441
1442 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1443 file, pos, (unsigned)count, snapc, snapc->seq);
1444
1445 ret = filemap_write_and_wait_range(inode->i_mapping,
1446 pos, pos + count - 1);
1447 if (ret < 0)
1448 return ret;
1449
1450 ret = invalidate_inode_pages2_range(inode->i_mapping,
1451 pos >> PAGE_SHIFT,
1452 (pos + count - 1) >> PAGE_SHIFT);
1453 if (ret < 0)
1454 dout("invalidate_inode_pages2_range returned %d\n", ret);
1455
1456 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1457
1458 while ((len = iov_iter_count(from)) > 0) {
1459 size_t left;
1460 int n;
1461
1462 vino = ceph_vino(inode);
1463 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1464 vino, pos, &len, 0, 1,
1465 CEPH_OSD_OP_WRITE, flags, snapc,
1466 ci->i_truncate_seq,
1467 ci->i_truncate_size,
1468 false);
1469 if (IS_ERR(req)) {
1470 ret = PTR_ERR(req);
1471 break;
1472 }
1473
1474 /*
1475 * write from beginning of first page,
1476 * regardless of io alignment
1477 */
1478 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1479
1480 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1481 if (IS_ERR(pages)) {
1482 ret = PTR_ERR(pages);
1483 goto out;
1484 }
1485
1486 left = len;
1487 for (n = 0; n < num_pages; n++) {
1488 size_t plen = min_t(size_t, left, PAGE_SIZE);
1489 ret = copy_page_from_iter(pages[n], 0, plen, from);
1490 if (ret != plen) {
1491 ret = -EFAULT;
1492 break;
1493 }
1494 left -= ret;
1495 }
1496
1497 if (ret < 0) {
1498 ceph_release_page_vector(pages, num_pages);
1499 goto out;
1500 }
1501
1502 req->r_inode = inode;
1503
1504 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1505 false, true);
1506
1507 req->r_mtime = mtime;
1508 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1509 if (!ret)
1510 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1511
1512 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1513 req->r_end_latency, len, ret);
1514 out:
1515 ceph_osdc_put_request(req);
1516 if (ret != 0) {
1517 ceph_set_error_write(ci);
1518 break;
1519 }
1520
1521 ceph_clear_error_write(ci);
1522 pos += len;
1523 written += len;
1524 if (pos > i_size_read(inode)) {
1525 check_caps = ceph_inode_set_size(inode, pos);
1526 if (check_caps)
1527 ceph_check_caps(ceph_inode(inode),
1528 CHECK_CAPS_AUTHONLY,
1529 NULL);
1530 }
1531
1532 }
1533
1534 if (ret != -EOLDSNAPC && written > 0) {
1535 ret = written;
1536 iocb->ki_pos = pos;
1537 }
1538 return ret;
1539 }
1540
1541 /*
1542 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1543 * Atomically grab references, so that those bits are not released
1544 * back to the MDS mid-read.
1545 *
1546 * Hmm, the sync read case isn't actually async... should it be?
1547 */
ceph_read_iter(struct kiocb * iocb,struct iov_iter * to)1548 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1549 {
1550 struct file *filp = iocb->ki_filp;
1551 struct ceph_file_info *fi = filp->private_data;
1552 size_t len = iov_iter_count(to);
1553 struct inode *inode = file_inode(filp);
1554 struct ceph_inode_info *ci = ceph_inode(inode);
1555 bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1556 ssize_t ret;
1557 int want, got = 0;
1558 int retry_op = 0, read = 0;
1559
1560 again:
1561 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1562 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1563
1564 if (direct_lock)
1565 ceph_start_io_direct(inode);
1566 else
1567 ceph_start_io_read(inode);
1568
1569 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1570 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1571 else
1572 want = CEPH_CAP_FILE_CACHE;
1573 ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1574 if (ret < 0) {
1575 if (iocb->ki_flags & IOCB_DIRECT)
1576 ceph_end_io_direct(inode);
1577 else
1578 ceph_end_io_read(inode);
1579 return ret;
1580 }
1581
1582 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1583 (iocb->ki_flags & IOCB_DIRECT) ||
1584 (fi->flags & CEPH_F_SYNC)) {
1585
1586 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1587 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1588 ceph_cap_string(got));
1589
1590 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1591 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1592 ret = ceph_direct_read_write(iocb, to,
1593 NULL, NULL);
1594 if (ret >= 0 && ret < len)
1595 retry_op = CHECK_EOF;
1596 } else {
1597 ret = ceph_sync_read(iocb, to, &retry_op);
1598 }
1599 } else {
1600 retry_op = READ_INLINE;
1601 }
1602 } else {
1603 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1604 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1605 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1606 ceph_cap_string(got));
1607 ceph_add_rw_context(fi, &rw_ctx);
1608 ret = generic_file_read_iter(iocb, to);
1609 ceph_del_rw_context(fi, &rw_ctx);
1610 }
1611
1612 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1613 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1614 ceph_put_cap_refs(ci, got);
1615
1616 if (direct_lock)
1617 ceph_end_io_direct(inode);
1618 else
1619 ceph_end_io_read(inode);
1620
1621 if (retry_op > HAVE_RETRIED && ret >= 0) {
1622 int statret;
1623 struct page *page = NULL;
1624 loff_t i_size;
1625 if (retry_op == READ_INLINE) {
1626 page = __page_cache_alloc(GFP_KERNEL);
1627 if (!page)
1628 return -ENOMEM;
1629 }
1630
1631 statret = __ceph_do_getattr(inode, page,
1632 CEPH_STAT_CAP_INLINE_DATA, !!page);
1633 if (statret < 0) {
1634 if (page)
1635 __free_page(page);
1636 if (statret == -ENODATA) {
1637 BUG_ON(retry_op != READ_INLINE);
1638 goto again;
1639 }
1640 return statret;
1641 }
1642
1643 i_size = i_size_read(inode);
1644 if (retry_op == READ_INLINE) {
1645 BUG_ON(ret > 0 || read > 0);
1646 if (iocb->ki_pos < i_size &&
1647 iocb->ki_pos < PAGE_SIZE) {
1648 loff_t end = min_t(loff_t, i_size,
1649 iocb->ki_pos + len);
1650 end = min_t(loff_t, end, PAGE_SIZE);
1651 if (statret < end)
1652 zero_user_segment(page, statret, end);
1653 ret = copy_page_to_iter(page,
1654 iocb->ki_pos & ~PAGE_MASK,
1655 end - iocb->ki_pos, to);
1656 iocb->ki_pos += ret;
1657 read += ret;
1658 }
1659 if (iocb->ki_pos < i_size && read < len) {
1660 size_t zlen = min_t(size_t, len - read,
1661 i_size - iocb->ki_pos);
1662 ret = iov_iter_zero(zlen, to);
1663 iocb->ki_pos += ret;
1664 read += ret;
1665 }
1666 __free_pages(page, 0);
1667 return read;
1668 }
1669
1670 /* hit EOF or hole? */
1671 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1672 ret < len) {
1673 dout("sync_read hit hole, ppos %lld < size %lld"
1674 ", reading more\n", iocb->ki_pos, i_size);
1675
1676 read += ret;
1677 len -= ret;
1678 retry_op = HAVE_RETRIED;
1679 goto again;
1680 }
1681 }
1682
1683 if (ret >= 0)
1684 ret += read;
1685
1686 return ret;
1687 }
1688
1689 /*
1690 * Take cap references to avoid releasing caps to MDS mid-write.
1691 *
1692 * If we are synchronous, and write with an old snap context, the OSD
1693 * may return EOLDSNAPC. In that case, retry the write.. _after_
1694 * dropping our cap refs and allowing the pending snap to logically
1695 * complete _before_ this write occurs.
1696 *
1697 * If we are near ENOSPC, write synchronously.
1698 */
ceph_write_iter(struct kiocb * iocb,struct iov_iter * from)1699 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1700 {
1701 struct file *file = iocb->ki_filp;
1702 struct ceph_file_info *fi = file->private_data;
1703 struct inode *inode = file_inode(file);
1704 struct ceph_inode_info *ci = ceph_inode(inode);
1705 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1706 struct ceph_osd_client *osdc = &fsc->client->osdc;
1707 struct ceph_cap_flush *prealloc_cf;
1708 ssize_t count, written = 0;
1709 int err, want, got;
1710 bool direct_lock = false;
1711 u32 map_flags;
1712 u64 pool_flags;
1713 loff_t pos;
1714 loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1715
1716 if (ceph_snap(inode) != CEPH_NOSNAP)
1717 return -EROFS;
1718
1719 prealloc_cf = ceph_alloc_cap_flush();
1720 if (!prealloc_cf)
1721 return -ENOMEM;
1722
1723 if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1724 direct_lock = true;
1725
1726 retry_snap:
1727 if (direct_lock)
1728 ceph_start_io_direct(inode);
1729 else
1730 ceph_start_io_write(inode);
1731
1732 /* We can write back this queue in page reclaim */
1733 current->backing_dev_info = inode_to_bdi(inode);
1734
1735 if (iocb->ki_flags & IOCB_APPEND) {
1736 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1737 if (err < 0)
1738 goto out;
1739 }
1740
1741 err = generic_write_checks(iocb, from);
1742 if (err <= 0)
1743 goto out;
1744
1745 pos = iocb->ki_pos;
1746 if (unlikely(pos >= limit)) {
1747 err = -EFBIG;
1748 goto out;
1749 } else {
1750 iov_iter_truncate(from, limit - pos);
1751 }
1752
1753 count = iov_iter_count(from);
1754 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1755 err = -EDQUOT;
1756 goto out;
1757 }
1758
1759 down_read(&osdc->lock);
1760 map_flags = osdc->osdmap->flags;
1761 pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1762 up_read(&osdc->lock);
1763 if ((map_flags & CEPH_OSDMAP_FULL) ||
1764 (pool_flags & CEPH_POOL_FLAG_FULL)) {
1765 err = -ENOSPC;
1766 goto out;
1767 }
1768
1769 err = file_remove_privs(file);
1770 if (err)
1771 goto out;
1772
1773 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1774 err = ceph_uninline_data(file, NULL);
1775 if (err < 0)
1776 goto out;
1777 }
1778
1779 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1780 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1781 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1782 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1783 else
1784 want = CEPH_CAP_FILE_BUFFER;
1785 got = 0;
1786 err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1787 if (err < 0)
1788 goto out;
1789
1790 err = file_update_time(file);
1791 if (err)
1792 goto out_caps;
1793
1794 inode_inc_iversion_raw(inode);
1795
1796 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1797 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1798
1799 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1800 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1801 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1802 struct ceph_snap_context *snapc;
1803 struct iov_iter data;
1804
1805 spin_lock(&ci->i_ceph_lock);
1806 if (__ceph_have_pending_cap_snap(ci)) {
1807 struct ceph_cap_snap *capsnap =
1808 list_last_entry(&ci->i_cap_snaps,
1809 struct ceph_cap_snap,
1810 ci_item);
1811 snapc = ceph_get_snap_context(capsnap->context);
1812 } else {
1813 BUG_ON(!ci->i_head_snapc);
1814 snapc = ceph_get_snap_context(ci->i_head_snapc);
1815 }
1816 spin_unlock(&ci->i_ceph_lock);
1817
1818 /* we might need to revert back to that point */
1819 data = *from;
1820 if (iocb->ki_flags & IOCB_DIRECT)
1821 written = ceph_direct_read_write(iocb, &data, snapc,
1822 &prealloc_cf);
1823 else
1824 written = ceph_sync_write(iocb, &data, pos, snapc);
1825 if (direct_lock)
1826 ceph_end_io_direct(inode);
1827 else
1828 ceph_end_io_write(inode);
1829 if (written > 0)
1830 iov_iter_advance(from, written);
1831 ceph_put_snap_context(snapc);
1832 } else {
1833 /*
1834 * No need to acquire the i_truncate_mutex. Because
1835 * the MDS revokes Fwb caps before sending truncate
1836 * message to us. We can't get Fwb cap while there
1837 * are pending vmtruncate. So write and vmtruncate
1838 * can not run at the same time
1839 */
1840 written = generic_perform_write(file, from, pos);
1841 if (likely(written >= 0))
1842 iocb->ki_pos = pos + written;
1843 ceph_end_io_write(inode);
1844 }
1845
1846 if (written >= 0) {
1847 int dirty;
1848
1849 spin_lock(&ci->i_ceph_lock);
1850 ci->i_inline_version = CEPH_INLINE_NONE;
1851 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1852 &prealloc_cf);
1853 spin_unlock(&ci->i_ceph_lock);
1854 if (dirty)
1855 __mark_inode_dirty(inode, dirty);
1856 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1857 ceph_check_caps(ci, 0, NULL);
1858 }
1859
1860 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1861 inode, ceph_vinop(inode), pos, (unsigned)count,
1862 ceph_cap_string(got));
1863 ceph_put_cap_refs(ci, got);
1864
1865 if (written == -EOLDSNAPC) {
1866 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1867 inode, ceph_vinop(inode), pos, (unsigned)count);
1868 goto retry_snap;
1869 }
1870
1871 if (written >= 0) {
1872 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1873 (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1874 iocb->ki_flags |= IOCB_DSYNC;
1875 written = generic_write_sync(iocb, written);
1876 }
1877
1878 goto out_unlocked;
1879 out_caps:
1880 ceph_put_cap_refs(ci, got);
1881 out:
1882 if (direct_lock)
1883 ceph_end_io_direct(inode);
1884 else
1885 ceph_end_io_write(inode);
1886 out_unlocked:
1887 ceph_free_cap_flush(prealloc_cf);
1888 current->backing_dev_info = NULL;
1889 return written ? written : err;
1890 }
1891
1892 /*
1893 * llseek. be sure to verify file size on SEEK_END.
1894 */
ceph_llseek(struct file * file,loff_t offset,int whence)1895 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1896 {
1897 struct inode *inode = file->f_mapping->host;
1898 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1899 loff_t i_size;
1900 loff_t ret;
1901
1902 inode_lock(inode);
1903
1904 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1905 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1906 if (ret < 0)
1907 goto out;
1908 }
1909
1910 i_size = i_size_read(inode);
1911 switch (whence) {
1912 case SEEK_END:
1913 offset += i_size;
1914 break;
1915 case SEEK_CUR:
1916 /*
1917 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1918 * position-querying operation. Avoid rewriting the "same"
1919 * f_pos value back to the file because a concurrent read(),
1920 * write() or lseek() might have altered it
1921 */
1922 if (offset == 0) {
1923 ret = file->f_pos;
1924 goto out;
1925 }
1926 offset += file->f_pos;
1927 break;
1928 case SEEK_DATA:
1929 if (offset < 0 || offset >= i_size) {
1930 ret = -ENXIO;
1931 goto out;
1932 }
1933 break;
1934 case SEEK_HOLE:
1935 if (offset < 0 || offset >= i_size) {
1936 ret = -ENXIO;
1937 goto out;
1938 }
1939 offset = i_size;
1940 break;
1941 }
1942
1943 ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1944
1945 out:
1946 inode_unlock(inode);
1947 return ret;
1948 }
1949
ceph_zero_partial_page(struct inode * inode,loff_t offset,unsigned size)1950 static inline void ceph_zero_partial_page(
1951 struct inode *inode, loff_t offset, unsigned size)
1952 {
1953 struct page *page;
1954 pgoff_t index = offset >> PAGE_SHIFT;
1955
1956 page = find_lock_page(inode->i_mapping, index);
1957 if (page) {
1958 wait_on_page_writeback(page);
1959 zero_user(page, offset & (PAGE_SIZE - 1), size);
1960 unlock_page(page);
1961 put_page(page);
1962 }
1963 }
1964
ceph_zero_pagecache_range(struct inode * inode,loff_t offset,loff_t length)1965 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1966 loff_t length)
1967 {
1968 loff_t nearly = round_up(offset, PAGE_SIZE);
1969 if (offset < nearly) {
1970 loff_t size = nearly - offset;
1971 if (length < size)
1972 size = length;
1973 ceph_zero_partial_page(inode, offset, size);
1974 offset += size;
1975 length -= size;
1976 }
1977 if (length >= PAGE_SIZE) {
1978 loff_t size = round_down(length, PAGE_SIZE);
1979 truncate_pagecache_range(inode, offset, offset + size - 1);
1980 offset += size;
1981 length -= size;
1982 }
1983 if (length)
1984 ceph_zero_partial_page(inode, offset, length);
1985 }
1986
ceph_zero_partial_object(struct inode * inode,loff_t offset,loff_t * length)1987 static int ceph_zero_partial_object(struct inode *inode,
1988 loff_t offset, loff_t *length)
1989 {
1990 struct ceph_inode_info *ci = ceph_inode(inode);
1991 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1992 struct ceph_osd_request *req;
1993 int ret = 0;
1994 loff_t zero = 0;
1995 int op;
1996
1997 if (!length) {
1998 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1999 length = &zero;
2000 } else {
2001 op = CEPH_OSD_OP_ZERO;
2002 }
2003
2004 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2005 ceph_vino(inode),
2006 offset, length,
2007 0, 1, op,
2008 CEPH_OSD_FLAG_WRITE,
2009 NULL, 0, 0, false);
2010 if (IS_ERR(req)) {
2011 ret = PTR_ERR(req);
2012 goto out;
2013 }
2014
2015 req->r_mtime = inode->i_mtime;
2016 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
2017 if (!ret) {
2018 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2019 if (ret == -ENOENT)
2020 ret = 0;
2021 }
2022 ceph_osdc_put_request(req);
2023
2024 out:
2025 return ret;
2026 }
2027
ceph_zero_objects(struct inode * inode,loff_t offset,loff_t length)2028 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2029 {
2030 int ret = 0;
2031 struct ceph_inode_info *ci = ceph_inode(inode);
2032 s32 stripe_unit = ci->i_layout.stripe_unit;
2033 s32 stripe_count = ci->i_layout.stripe_count;
2034 s32 object_size = ci->i_layout.object_size;
2035 u64 object_set_size = object_size * stripe_count;
2036 u64 nearly, t;
2037
2038 /* round offset up to next period boundary */
2039 nearly = offset + object_set_size - 1;
2040 t = nearly;
2041 nearly -= do_div(t, object_set_size);
2042
2043 while (length && offset < nearly) {
2044 loff_t size = length;
2045 ret = ceph_zero_partial_object(inode, offset, &size);
2046 if (ret < 0)
2047 return ret;
2048 offset += size;
2049 length -= size;
2050 }
2051 while (length >= object_set_size) {
2052 int i;
2053 loff_t pos = offset;
2054 for (i = 0; i < stripe_count; ++i) {
2055 ret = ceph_zero_partial_object(inode, pos, NULL);
2056 if (ret < 0)
2057 return ret;
2058 pos += stripe_unit;
2059 }
2060 offset += object_set_size;
2061 length -= object_set_size;
2062 }
2063 while (length) {
2064 loff_t size = length;
2065 ret = ceph_zero_partial_object(inode, offset, &size);
2066 if (ret < 0)
2067 return ret;
2068 offset += size;
2069 length -= size;
2070 }
2071 return ret;
2072 }
2073
ceph_fallocate(struct file * file,int mode,loff_t offset,loff_t length)2074 static long ceph_fallocate(struct file *file, int mode,
2075 loff_t offset, loff_t length)
2076 {
2077 struct ceph_file_info *fi = file->private_data;
2078 struct inode *inode = file_inode(file);
2079 struct ceph_inode_info *ci = ceph_inode(inode);
2080 struct ceph_cap_flush *prealloc_cf;
2081 int want, got = 0;
2082 int dirty;
2083 int ret = 0;
2084 loff_t endoff = 0;
2085 loff_t size;
2086
2087 dout("%s %p %llx.%llx mode %x, offset %llu length %llu\n", __func__,
2088 inode, ceph_vinop(inode), mode, offset, length);
2089
2090 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2091 return -EOPNOTSUPP;
2092
2093 if (!S_ISREG(inode->i_mode))
2094 return -EOPNOTSUPP;
2095
2096 prealloc_cf = ceph_alloc_cap_flush();
2097 if (!prealloc_cf)
2098 return -ENOMEM;
2099
2100 inode_lock(inode);
2101
2102 if (ceph_snap(inode) != CEPH_NOSNAP) {
2103 ret = -EROFS;
2104 goto unlock;
2105 }
2106
2107 if (ci->i_inline_version != CEPH_INLINE_NONE) {
2108 ret = ceph_uninline_data(file, NULL);
2109 if (ret < 0)
2110 goto unlock;
2111 }
2112
2113 size = i_size_read(inode);
2114
2115 /* Are we punching a hole beyond EOF? */
2116 if (offset >= size)
2117 goto unlock;
2118 if ((offset + length) > size)
2119 length = size - offset;
2120
2121 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2122 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2123 else
2124 want = CEPH_CAP_FILE_BUFFER;
2125
2126 ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2127 if (ret < 0)
2128 goto unlock;
2129
2130 ret = file_modified(file);
2131 if (ret)
2132 goto put_caps;
2133
2134 filemap_invalidate_lock(inode->i_mapping);
2135 ceph_zero_pagecache_range(inode, offset, length);
2136 ret = ceph_zero_objects(inode, offset, length);
2137
2138 if (!ret) {
2139 spin_lock(&ci->i_ceph_lock);
2140 ci->i_inline_version = CEPH_INLINE_NONE;
2141 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2142 &prealloc_cf);
2143 spin_unlock(&ci->i_ceph_lock);
2144 if (dirty)
2145 __mark_inode_dirty(inode, dirty);
2146 }
2147 filemap_invalidate_unlock(inode->i_mapping);
2148
2149 put_caps:
2150 ceph_put_cap_refs(ci, got);
2151 unlock:
2152 inode_unlock(inode);
2153 ceph_free_cap_flush(prealloc_cf);
2154 return ret;
2155 }
2156
2157 /*
2158 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2159 * src_ci. Two attempts are made to obtain both caps, and an error is return if
2160 * this fails; zero is returned on success.
2161 */
get_rd_wr_caps(struct file * src_filp,int * src_got,struct file * dst_filp,loff_t dst_endoff,int * dst_got)2162 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2163 struct file *dst_filp,
2164 loff_t dst_endoff, int *dst_got)
2165 {
2166 int ret = 0;
2167 bool retrying = false;
2168
2169 retry_caps:
2170 ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2171 dst_endoff, dst_got);
2172 if (ret < 0)
2173 return ret;
2174
2175 /*
2176 * Since we're already holding the FILE_WR capability for the dst file,
2177 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
2178 * retry dance instead to try to get both capabilities.
2179 */
2180 ret = ceph_try_get_caps(file_inode(src_filp),
2181 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2182 false, src_got);
2183 if (ret <= 0) {
2184 /* Start by dropping dst_ci caps and getting src_ci caps */
2185 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2186 if (retrying) {
2187 if (!ret)
2188 /* ceph_try_get_caps masks EAGAIN */
2189 ret = -EAGAIN;
2190 return ret;
2191 }
2192 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2193 CEPH_CAP_FILE_SHARED, -1, src_got);
2194 if (ret < 0)
2195 return ret;
2196 /*... drop src_ci caps too, and retry */
2197 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2198 retrying = true;
2199 goto retry_caps;
2200 }
2201 return ret;
2202 }
2203
put_rd_wr_caps(struct ceph_inode_info * src_ci,int src_got,struct ceph_inode_info * dst_ci,int dst_got)2204 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2205 struct ceph_inode_info *dst_ci, int dst_got)
2206 {
2207 ceph_put_cap_refs(src_ci, src_got);
2208 ceph_put_cap_refs(dst_ci, dst_got);
2209 }
2210
2211 /*
2212 * This function does several size-related checks, returning an error if:
2213 * - source file is smaller than off+len
2214 * - destination file size is not OK (inode_newsize_ok())
2215 * - max bytes quotas is exceeded
2216 */
is_file_size_ok(struct inode * src_inode,struct inode * dst_inode,loff_t src_off,loff_t dst_off,size_t len)2217 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2218 loff_t src_off, loff_t dst_off, size_t len)
2219 {
2220 loff_t size, endoff;
2221
2222 size = i_size_read(src_inode);
2223 /*
2224 * Don't copy beyond source file EOF. Instead of simply setting length
2225 * to (size - src_off), just drop to VFS default implementation, as the
2226 * local i_size may be stale due to other clients writing to the source
2227 * inode.
2228 */
2229 if (src_off + len > size) {
2230 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2231 src_off, len, size);
2232 return -EOPNOTSUPP;
2233 }
2234 size = i_size_read(dst_inode);
2235
2236 endoff = dst_off + len;
2237 if (inode_newsize_ok(dst_inode, endoff))
2238 return -EOPNOTSUPP;
2239
2240 if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2241 return -EDQUOT;
2242
2243 return 0;
2244 }
2245
ceph_do_objects_copy(struct ceph_inode_info * src_ci,u64 * src_off,struct ceph_inode_info * dst_ci,u64 * dst_off,struct ceph_fs_client * fsc,size_t len,unsigned int flags)2246 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2247 struct ceph_inode_info *dst_ci, u64 *dst_off,
2248 struct ceph_fs_client *fsc,
2249 size_t len, unsigned int flags)
2250 {
2251 struct ceph_object_locator src_oloc, dst_oloc;
2252 struct ceph_object_id src_oid, dst_oid;
2253 size_t bytes = 0;
2254 u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2255 u32 src_objlen, dst_objlen;
2256 u32 object_size = src_ci->i_layout.object_size;
2257 int ret;
2258
2259 src_oloc.pool = src_ci->i_layout.pool_id;
2260 src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2261 dst_oloc.pool = dst_ci->i_layout.pool_id;
2262 dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2263
2264 while (len >= object_size) {
2265 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2266 object_size, &src_objnum,
2267 &src_objoff, &src_objlen);
2268 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2269 object_size, &dst_objnum,
2270 &dst_objoff, &dst_objlen);
2271 ceph_oid_init(&src_oid);
2272 ceph_oid_printf(&src_oid, "%llx.%08llx",
2273 src_ci->i_vino.ino, src_objnum);
2274 ceph_oid_init(&dst_oid);
2275 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2276 dst_ci->i_vino.ino, dst_objnum);
2277 /* Do an object remote copy */
2278 ret = ceph_osdc_copy_from(&fsc->client->osdc,
2279 src_ci->i_vino.snap, 0,
2280 &src_oid, &src_oloc,
2281 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2282 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2283 &dst_oid, &dst_oloc,
2284 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2285 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
2286 dst_ci->i_truncate_seq,
2287 dst_ci->i_truncate_size,
2288 CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2289 if (ret) {
2290 if (ret == -EOPNOTSUPP) {
2291 fsc->have_copy_from2 = false;
2292 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2293 }
2294 dout("ceph_osdc_copy_from returned %d\n", ret);
2295 if (!bytes)
2296 bytes = ret;
2297 goto out;
2298 }
2299 len -= object_size;
2300 bytes += object_size;
2301 *src_off += object_size;
2302 *dst_off += object_size;
2303 }
2304
2305 out:
2306 ceph_oloc_destroy(&src_oloc);
2307 ceph_oloc_destroy(&dst_oloc);
2308 return bytes;
2309 }
2310
__ceph_copy_file_range(struct file * src_file,loff_t src_off,struct file * dst_file,loff_t dst_off,size_t len,unsigned int flags)2311 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2312 struct file *dst_file, loff_t dst_off,
2313 size_t len, unsigned int flags)
2314 {
2315 struct inode *src_inode = file_inode(src_file);
2316 struct inode *dst_inode = file_inode(dst_file);
2317 struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2318 struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2319 struct ceph_cap_flush *prealloc_cf;
2320 struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2321 loff_t size;
2322 ssize_t ret = -EIO, bytes;
2323 u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2324 u32 src_objlen, dst_objlen;
2325 int src_got = 0, dst_got = 0, err, dirty;
2326
2327 if (src_inode->i_sb != dst_inode->i_sb) {
2328 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2329
2330 if (ceph_fsid_compare(&src_fsc->client->fsid,
2331 &dst_fsc->client->fsid)) {
2332 dout("Copying files across clusters: src: %pU dst: %pU\n",
2333 &src_fsc->client->fsid, &dst_fsc->client->fsid);
2334 return -EXDEV;
2335 }
2336 }
2337 if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2338 return -EROFS;
2339
2340 /*
2341 * Some of the checks below will return -EOPNOTSUPP, which will force a
2342 * fallback to the default VFS copy_file_range implementation. This is
2343 * desirable in several cases (for ex, the 'len' is smaller than the
2344 * size of the objects, or in cases where that would be more
2345 * efficient).
2346 */
2347
2348 if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2349 return -EOPNOTSUPP;
2350
2351 if (!src_fsc->have_copy_from2)
2352 return -EOPNOTSUPP;
2353
2354 /*
2355 * Striped file layouts require that we copy partial objects, but the
2356 * OSD copy-from operation only supports full-object copies. Limit
2357 * this to non-striped file layouts for now.
2358 */
2359 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2360 (src_ci->i_layout.stripe_count != 1) ||
2361 (dst_ci->i_layout.stripe_count != 1) ||
2362 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2363 dout("Invalid src/dst files layout\n");
2364 return -EOPNOTSUPP;
2365 }
2366
2367 if (len < src_ci->i_layout.object_size)
2368 return -EOPNOTSUPP; /* no remote copy will be done */
2369
2370 prealloc_cf = ceph_alloc_cap_flush();
2371 if (!prealloc_cf)
2372 return -ENOMEM;
2373
2374 /* Start by sync'ing the source and destination files */
2375 ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2376 if (ret < 0) {
2377 dout("failed to write src file (%zd)\n", ret);
2378 goto out;
2379 }
2380 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2381 if (ret < 0) {
2382 dout("failed to write dst file (%zd)\n", ret);
2383 goto out;
2384 }
2385
2386 /*
2387 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2388 * clients may have dirty data in their caches. And OSDs know nothing
2389 * about caps, so they can't safely do the remote object copies.
2390 */
2391 err = get_rd_wr_caps(src_file, &src_got,
2392 dst_file, (dst_off + len), &dst_got);
2393 if (err < 0) {
2394 dout("get_rd_wr_caps returned %d\n", err);
2395 ret = -EOPNOTSUPP;
2396 goto out;
2397 }
2398
2399 ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2400 if (ret < 0)
2401 goto out_caps;
2402
2403 /* Drop dst file cached pages */
2404 ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2405 dst_off >> PAGE_SHIFT,
2406 (dst_off + len) >> PAGE_SHIFT);
2407 if (ret < 0) {
2408 dout("Failed to invalidate inode pages (%zd)\n", ret);
2409 ret = 0; /* XXX */
2410 }
2411 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2412 src_ci->i_layout.object_size,
2413 &src_objnum, &src_objoff, &src_objlen);
2414 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2415 dst_ci->i_layout.object_size,
2416 &dst_objnum, &dst_objoff, &dst_objlen);
2417 /* object-level offsets need to the same */
2418 if (src_objoff != dst_objoff) {
2419 ret = -EOPNOTSUPP;
2420 goto out_caps;
2421 }
2422
2423 /*
2424 * Do a manual copy if the object offset isn't object aligned.
2425 * 'src_objlen' contains the bytes left until the end of the object,
2426 * starting at the src_off
2427 */
2428 if (src_objoff) {
2429 dout("Initial partial copy of %u bytes\n", src_objlen);
2430
2431 /*
2432 * we need to temporarily drop all caps as we'll be calling
2433 * {read,write}_iter, which will get caps again.
2434 */
2435 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2436 ret = do_splice_direct(src_file, &src_off, dst_file,
2437 &dst_off, src_objlen, flags);
2438 /* Abort on short copies or on error */
2439 if (ret < (long)src_objlen) {
2440 dout("Failed partial copy (%zd)\n", ret);
2441 goto out;
2442 }
2443 len -= ret;
2444 err = get_rd_wr_caps(src_file, &src_got,
2445 dst_file, (dst_off + len), &dst_got);
2446 if (err < 0)
2447 goto out;
2448 err = is_file_size_ok(src_inode, dst_inode,
2449 src_off, dst_off, len);
2450 if (err < 0)
2451 goto out_caps;
2452 }
2453
2454 size = i_size_read(dst_inode);
2455 bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2456 src_fsc, len, flags);
2457 if (bytes <= 0) {
2458 if (!ret)
2459 ret = bytes;
2460 goto out_caps;
2461 }
2462 dout("Copied %zu bytes out of %zu\n", bytes, len);
2463 len -= bytes;
2464 ret += bytes;
2465
2466 file_update_time(dst_file);
2467 inode_inc_iversion_raw(dst_inode);
2468
2469 if (dst_off > size) {
2470 /* Let the MDS know about dst file size change */
2471 if (ceph_inode_set_size(dst_inode, dst_off) ||
2472 ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2473 ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
2474 }
2475 /* Mark Fw dirty */
2476 spin_lock(&dst_ci->i_ceph_lock);
2477 dst_ci->i_inline_version = CEPH_INLINE_NONE;
2478 dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2479 spin_unlock(&dst_ci->i_ceph_lock);
2480 if (dirty)
2481 __mark_inode_dirty(dst_inode, dirty);
2482
2483 out_caps:
2484 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2485
2486 /*
2487 * Do the final manual copy if we still have some bytes left, unless
2488 * there were errors in remote object copies (len >= object_size).
2489 */
2490 if (len && (len < src_ci->i_layout.object_size)) {
2491 dout("Final partial copy of %zu bytes\n", len);
2492 bytes = do_splice_direct(src_file, &src_off, dst_file,
2493 &dst_off, len, flags);
2494 if (bytes > 0)
2495 ret += bytes;
2496 else
2497 dout("Failed partial copy (%zd)\n", bytes);
2498 }
2499
2500 out:
2501 ceph_free_cap_flush(prealloc_cf);
2502
2503 return ret;
2504 }
2505
ceph_copy_file_range(struct file * src_file,loff_t src_off,struct file * dst_file,loff_t dst_off,size_t len,unsigned int flags)2506 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2507 struct file *dst_file, loff_t dst_off,
2508 size_t len, unsigned int flags)
2509 {
2510 ssize_t ret;
2511
2512 ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2513 len, flags);
2514
2515 if (ret == -EOPNOTSUPP || ret == -EXDEV)
2516 ret = generic_copy_file_range(src_file, src_off, dst_file,
2517 dst_off, len, flags);
2518 return ret;
2519 }
2520
2521 const struct file_operations ceph_file_fops = {
2522 .open = ceph_open,
2523 .release = ceph_release,
2524 .llseek = ceph_llseek,
2525 .read_iter = ceph_read_iter,
2526 .write_iter = ceph_write_iter,
2527 .mmap = ceph_mmap,
2528 .fsync = ceph_fsync,
2529 .lock = ceph_lock,
2530 .setlease = simple_nosetlease,
2531 .flock = ceph_flock,
2532 .splice_read = generic_file_splice_read,
2533 .splice_write = iter_file_splice_write,
2534 .unlocked_ioctl = ceph_ioctl,
2535 .compat_ioctl = compat_ptr_ioctl,
2536 .fallocate = ceph_fallocate,
2537 .copy_file_range = ceph_copy_file_range,
2538 };
2539