1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/mount.h>
8 #include <linux/namei.h>
9 #include <linux/writeback.h>
10 #include <linux/falloc.h>
11
12 #include "super.h"
13 #include "mds_client.h"
14 #include "cache.h"
15
16 /*
17 * Ceph file operations
18 *
19 * Implement basic open/close functionality, and implement
20 * read/write.
21 *
22 * We implement three modes of file I/O:
23 * - buffered uses the generic_file_aio_{read,write} helpers
24 *
25 * - synchronous is used when there is multi-client read/write
26 * sharing, avoids the page cache, and synchronously waits for an
27 * ack from the OSD.
28 *
29 * - direct io takes the variant of the sync path that references
30 * user pages directly.
31 *
32 * fsync() flushes and waits on dirty pages, but just queues metadata
33 * for writeback: since the MDS can recover size and mtime there is no
34 * need to wait for MDS acknowledgement.
35 */
36
37 /*
38 * Calculate the length sum of direct io vectors that can
39 * be combined into one page vector.
40 */
dio_get_pagev_size(const struct iov_iter * it)41 static size_t dio_get_pagev_size(const struct iov_iter *it)
42 {
43 const struct iovec *iov = it->iov;
44 const struct iovec *iovend = iov + it->nr_segs;
45 size_t size;
46
47 size = iov->iov_len - it->iov_offset;
48 /*
49 * An iov can be page vectored when both the current tail
50 * and the next base are page aligned.
51 */
52 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
53 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
54 size += iov->iov_len;
55 }
56 dout("dio_get_pagevlen len = %zu\n", size);
57 return size;
58 }
59
60 /*
61 * Allocate a page vector based on (@it, @nbytes).
62 * The return value is the tuple describing a page vector,
63 * that is (@pages, @page_align, @num_pages).
64 */
65 static struct page **
dio_get_pages_alloc(const struct iov_iter * it,size_t nbytes,size_t * page_align,int * num_pages)66 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
67 size_t *page_align, int *num_pages)
68 {
69 struct iov_iter tmp_it = *it;
70 size_t align;
71 struct page **pages;
72 int ret = 0, idx, npages;
73
74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
75 (PAGE_SIZE - 1);
76 npages = calc_pages_for(align, nbytes);
77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
78 if (!pages) {
79 pages = vmalloc(sizeof(*pages) * npages);
80 if (!pages)
81 return ERR_PTR(-ENOMEM);
82 }
83
84 for (idx = 0; idx < npages; ) {
85 size_t start;
86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
87 npages - idx, &start);
88 if (ret < 0)
89 goto fail;
90
91 iov_iter_advance(&tmp_it, ret);
92 nbytes -= ret;
93 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
94 }
95
96 BUG_ON(nbytes != 0);
97 *num_pages = npages;
98 *page_align = align;
99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
100 return pages;
101 fail:
102 ceph_put_page_vector(pages, idx, false);
103 return ERR_PTR(ret);
104 }
105
106 /*
107 * Prepare an open request. Preallocate ceph_cap to avoid an
108 * inopportune ENOMEM later.
109 */
110 static struct ceph_mds_request *
prepare_open_request(struct super_block * sb,int flags,int create_mode)111 prepare_open_request(struct super_block *sb, int flags, int create_mode)
112 {
113 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
114 struct ceph_mds_client *mdsc = fsc->mdsc;
115 struct ceph_mds_request *req;
116 int want_auth = USE_ANY_MDS;
117 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
118
119 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
120 want_auth = USE_AUTH_MDS;
121
122 req = ceph_mdsc_create_request(mdsc, op, want_auth);
123 if (IS_ERR(req))
124 goto out;
125 req->r_fmode = ceph_flags_to_mode(flags);
126 req->r_args.open.flags = cpu_to_le32(flags);
127 req->r_args.open.mode = cpu_to_le32(create_mode);
128 out:
129 return req;
130 }
131
132 /*
133 * initialize private struct file data.
134 * if we fail, clean up by dropping fmode reference on the ceph_inode
135 */
ceph_init_file(struct inode * inode,struct file * file,int fmode)136 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
137 {
138 struct ceph_file_info *cf;
139 int ret = 0;
140
141 switch (inode->i_mode & S_IFMT) {
142 case S_IFREG:
143 ceph_fscache_register_inode_cookie(inode);
144 ceph_fscache_file_set_cookie(inode, file);
145 case S_IFDIR:
146 dout("init_file %p %p 0%o (regular)\n", inode, file,
147 inode->i_mode);
148 cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
149 if (cf == NULL) {
150 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
151 return -ENOMEM;
152 }
153 cf->fmode = fmode;
154 cf->next_offset = 2;
155 cf->readdir_cache_idx = -1;
156 file->private_data = cf;
157 BUG_ON(inode->i_fop->release != ceph_release);
158 break;
159
160 case S_IFLNK:
161 dout("init_file %p %p 0%o (symlink)\n", inode, file,
162 inode->i_mode);
163 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
164 break;
165
166 default:
167 dout("init_file %p %p 0%o (special)\n", inode, file,
168 inode->i_mode);
169 /*
170 * we need to drop the open ref now, since we don't
171 * have .release set to ceph_release.
172 */
173 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
174 BUG_ON(inode->i_fop->release == ceph_release);
175
176 /* call the proper open fop */
177 ret = inode->i_fop->open(inode, file);
178 }
179 return ret;
180 }
181
182 /*
183 * try renew caps after session gets killed.
184 */
ceph_renew_caps(struct inode * inode)185 int ceph_renew_caps(struct inode *inode)
186 {
187 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
188 struct ceph_inode_info *ci = ceph_inode(inode);
189 struct ceph_mds_request *req;
190 int err, flags, wanted;
191
192 spin_lock(&ci->i_ceph_lock);
193 wanted = __ceph_caps_file_wanted(ci);
194 if (__ceph_is_any_real_caps(ci) &&
195 (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) {
196 int issued = __ceph_caps_issued(ci, NULL);
197 spin_unlock(&ci->i_ceph_lock);
198 dout("renew caps %p want %s issued %s updating mds_wanted\n",
199 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
200 ceph_check_caps(ci, 0, NULL);
201 return 0;
202 }
203 spin_unlock(&ci->i_ceph_lock);
204
205 flags = 0;
206 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
207 flags = O_RDWR;
208 else if (wanted & CEPH_CAP_FILE_RD)
209 flags = O_RDONLY;
210 else if (wanted & CEPH_CAP_FILE_WR)
211 flags = O_WRONLY;
212 #ifdef O_LAZY
213 if (wanted & CEPH_CAP_FILE_LAZYIO)
214 flags |= O_LAZY;
215 #endif
216
217 req = prepare_open_request(inode->i_sb, flags, 0);
218 if (IS_ERR(req)) {
219 err = PTR_ERR(req);
220 goto out;
221 }
222
223 req->r_inode = inode;
224 ihold(inode);
225 req->r_num_caps = 1;
226 req->r_fmode = -1;
227
228 err = ceph_mdsc_do_request(mdsc, NULL, req);
229 ceph_mdsc_put_request(req);
230 out:
231 dout("renew caps %p open result=%d\n", inode, err);
232 return err < 0 ? err : 0;
233 }
234
235 /*
236 * If we already have the requisite capabilities, we can satisfy
237 * the open request locally (no need to request new caps from the
238 * MDS). We do, however, need to inform the MDS (asynchronously)
239 * if our wanted caps set expands.
240 */
ceph_open(struct inode * inode,struct file * file)241 int ceph_open(struct inode *inode, struct file *file)
242 {
243 struct ceph_inode_info *ci = ceph_inode(inode);
244 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
245 struct ceph_mds_client *mdsc = fsc->mdsc;
246 struct ceph_mds_request *req;
247 struct ceph_file_info *cf = file->private_data;
248 int err;
249 int flags, fmode, wanted;
250
251 if (cf) {
252 dout("open file %p is already opened\n", file);
253 return 0;
254 }
255
256 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
257 flags = file->f_flags & ~(O_CREAT|O_EXCL);
258 if (S_ISDIR(inode->i_mode))
259 flags = O_DIRECTORY; /* mds likes to know */
260
261 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
262 ceph_vinop(inode), file, flags, file->f_flags);
263 fmode = ceph_flags_to_mode(flags);
264 wanted = ceph_caps_for_mode(fmode);
265
266 /* snapped files are read-only */
267 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
268 return -EROFS;
269
270 /* trivially open snapdir */
271 if (ceph_snap(inode) == CEPH_SNAPDIR) {
272 spin_lock(&ci->i_ceph_lock);
273 __ceph_get_fmode(ci, fmode);
274 spin_unlock(&ci->i_ceph_lock);
275 return ceph_init_file(inode, file, fmode);
276 }
277
278 /*
279 * No need to block if we have caps on the auth MDS (for
280 * write) or any MDS (for read). Update wanted set
281 * asynchronously.
282 */
283 spin_lock(&ci->i_ceph_lock);
284 if (__ceph_is_any_real_caps(ci) &&
285 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
286 int mds_wanted = __ceph_caps_mds_wanted(ci);
287 int issued = __ceph_caps_issued(ci, NULL);
288
289 dout("open %p fmode %d want %s issued %s using existing\n",
290 inode, fmode, ceph_cap_string(wanted),
291 ceph_cap_string(issued));
292 __ceph_get_fmode(ci, fmode);
293 spin_unlock(&ci->i_ceph_lock);
294
295 /* adjust wanted? */
296 if ((issued & wanted) != wanted &&
297 (mds_wanted & wanted) != wanted &&
298 ceph_snap(inode) != CEPH_SNAPDIR)
299 ceph_check_caps(ci, 0, NULL);
300
301 return ceph_init_file(inode, file, fmode);
302 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
303 (ci->i_snap_caps & wanted) == wanted) {
304 __ceph_get_fmode(ci, fmode);
305 spin_unlock(&ci->i_ceph_lock);
306 return ceph_init_file(inode, file, fmode);
307 }
308
309 spin_unlock(&ci->i_ceph_lock);
310
311 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
312 req = prepare_open_request(inode->i_sb, flags, 0);
313 if (IS_ERR(req)) {
314 err = PTR_ERR(req);
315 goto out;
316 }
317 req->r_inode = inode;
318 ihold(inode);
319
320 req->r_num_caps = 1;
321 err = ceph_mdsc_do_request(mdsc, NULL, req);
322 if (!err)
323 err = ceph_init_file(inode, file, req->r_fmode);
324 ceph_mdsc_put_request(req);
325 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
326 out:
327 return err;
328 }
329
330
331 /*
332 * Do a lookup + open with a single request. If we get a non-existent
333 * file or symlink, return 1 so the VFS can retry.
334 */
ceph_atomic_open(struct inode * dir,struct dentry * dentry,struct file * file,unsigned flags,umode_t mode,int * opened)335 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
336 struct file *file, unsigned flags, umode_t mode,
337 int *opened)
338 {
339 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
340 struct ceph_mds_client *mdsc = fsc->mdsc;
341 struct ceph_mds_request *req;
342 struct dentry *dn;
343 struct ceph_acls_info acls = {};
344 int mask;
345 int err;
346
347 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
348 dir, dentry, dentry,
349 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
350
351 if (dentry->d_name.len > NAME_MAX)
352 return -ENAMETOOLONG;
353
354 err = ceph_init_dentry(dentry);
355 if (err < 0)
356 return err;
357
358 if (flags & O_CREAT) {
359 err = ceph_pre_init_acls(dir, &mode, &acls);
360 if (err < 0)
361 return err;
362 }
363
364 /* do the open */
365 req = prepare_open_request(dir->i_sb, flags, mode);
366 if (IS_ERR(req)) {
367 err = PTR_ERR(req);
368 goto out_acl;
369 }
370 req->r_dentry = dget(dentry);
371 req->r_num_caps = 2;
372 if (flags & O_CREAT) {
373 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
374 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
375 if (acls.pagelist) {
376 req->r_pagelist = acls.pagelist;
377 acls.pagelist = NULL;
378 }
379 }
380
381 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
382 if (ceph_security_xattr_wanted(dir))
383 mask |= CEPH_CAP_XATTR_SHARED;
384 req->r_args.open.mask = cpu_to_le32(mask);
385
386 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
387 err = ceph_mdsc_do_request(mdsc,
388 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
389 req);
390 err = ceph_handle_snapdir(req, dentry, err);
391 if (err)
392 goto out_req;
393
394 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
395 err = ceph_handle_notrace_create(dir, dentry);
396
397 if (d_in_lookup(dentry)) {
398 dn = ceph_finish_lookup(req, dentry, err);
399 if (IS_ERR(dn))
400 err = PTR_ERR(dn);
401 } else {
402 /* we were given a hashed negative dentry */
403 dn = NULL;
404 }
405 if (err)
406 goto out_req;
407 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
408 /* make vfs retry on splice, ENOENT, or symlink */
409 dout("atomic_open finish_no_open on dn %p\n", dn);
410 err = finish_no_open(file, dn);
411 } else {
412 dout("atomic_open finish_open on dn %p\n", dn);
413 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
414 ceph_init_inode_acls(d_inode(dentry), &acls);
415 *opened |= FILE_CREATED;
416 }
417 err = finish_open(file, dentry, ceph_open, opened);
418 }
419 out_req:
420 if (!req->r_err && req->r_target_inode)
421 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
422 ceph_mdsc_put_request(req);
423 out_acl:
424 ceph_release_acls_info(&acls);
425 dout("atomic_open result=%d\n", err);
426 return err;
427 }
428
ceph_release(struct inode * inode,struct file * file)429 int ceph_release(struct inode *inode, struct file *file)
430 {
431 struct ceph_inode_info *ci = ceph_inode(inode);
432 struct ceph_file_info *cf = file->private_data;
433
434 dout("release inode %p file %p\n", inode, file);
435 ceph_put_fmode(ci, cf->fmode);
436 if (cf->last_readdir)
437 ceph_mdsc_put_request(cf->last_readdir);
438 kfree(cf->last_name);
439 kfree(cf->dir_info);
440 kmem_cache_free(ceph_file_cachep, cf);
441
442 /* wake up anyone waiting for caps on this inode */
443 wake_up_all(&ci->i_cap_wq);
444 return 0;
445 }
446
447 enum {
448 HAVE_RETRIED = 1,
449 CHECK_EOF = 2,
450 READ_INLINE = 3,
451 };
452
453 /*
454 * Read a range of bytes striped over one or more objects. Iterate over
455 * objects we stripe over. (That's not atomic, but good enough for now.)
456 *
457 * If we get a short result from the OSD, check against i_size; we need to
458 * only return a short read to the caller if we hit EOF.
459 */
striped_read(struct inode * inode,u64 off,u64 len,struct page ** pages,int num_pages,int * checkeof)460 static int striped_read(struct inode *inode,
461 u64 off, u64 len,
462 struct page **pages, int num_pages,
463 int *checkeof)
464 {
465 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
466 struct ceph_inode_info *ci = ceph_inode(inode);
467 u64 pos, this_len, left;
468 loff_t i_size;
469 int page_align, pages_left;
470 int read, ret;
471 struct page **page_pos;
472 bool hit_stripe, was_short;
473
474 /*
475 * we may need to do multiple reads. not atomic, unfortunately.
476 */
477 pos = off;
478 left = len;
479 page_pos = pages;
480 pages_left = num_pages;
481 read = 0;
482
483 more:
484 page_align = pos & ~PAGE_MASK;
485 this_len = left;
486 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
487 &ci->i_layout, pos, &this_len,
488 ci->i_truncate_seq,
489 ci->i_truncate_size,
490 page_pos, pages_left, page_align);
491 if (ret == -ENOENT)
492 ret = 0;
493 hit_stripe = this_len < left;
494 was_short = ret >= 0 && ret < this_len;
495 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
496 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
497
498 i_size = i_size_read(inode);
499 if (ret >= 0) {
500 int didpages;
501 if (was_short && (pos + ret < i_size)) {
502 int zlen = min(this_len - ret, i_size - pos - ret);
503 int zoff = (off & ~PAGE_MASK) + read + ret;
504 dout(" zero gap %llu to %llu\n",
505 pos + ret, pos + ret + zlen);
506 ceph_zero_page_vector_range(zoff, zlen, pages);
507 ret += zlen;
508 }
509
510 didpages = (page_align + ret) >> PAGE_SHIFT;
511 pos += ret;
512 read = pos - off;
513 left -= ret;
514 page_pos += didpages;
515 pages_left -= didpages;
516
517 /* hit stripe and need continue*/
518 if (left && hit_stripe && pos < i_size)
519 goto more;
520 }
521
522 if (read > 0) {
523 ret = read;
524 /* did we bounce off eof? */
525 if (pos + left > i_size)
526 *checkeof = CHECK_EOF;
527 }
528
529 dout("striped_read returns %d\n", ret);
530 return ret;
531 }
532
533 /*
534 * Completely synchronous read and write methods. Direct from __user
535 * buffer to osd, or directly to user pages (if O_DIRECT).
536 *
537 * If the read spans object boundary, just do multiple reads.
538 */
ceph_sync_read(struct kiocb * iocb,struct iov_iter * i,int * checkeof)539 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
540 int *checkeof)
541 {
542 struct file *file = iocb->ki_filp;
543 struct inode *inode = file_inode(file);
544 struct page **pages;
545 u64 off = iocb->ki_pos;
546 int num_pages, ret;
547 size_t len = iov_iter_count(i);
548
549 dout("sync_read on file %p %llu~%u %s\n", file, off,
550 (unsigned)len,
551 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
552
553 if (!len)
554 return 0;
555 /*
556 * flush any page cache pages in this range. this
557 * will make concurrent normal and sync io slow,
558 * but it will at least behave sensibly when they are
559 * in sequence.
560 */
561 ret = filemap_write_and_wait_range(inode->i_mapping, off,
562 off + len);
563 if (ret < 0)
564 return ret;
565
566 num_pages = calc_pages_for(off, len);
567 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
568 if (IS_ERR(pages))
569 return PTR_ERR(pages);
570 ret = striped_read(inode, off, len, pages,
571 num_pages, checkeof);
572 if (ret > 0) {
573 int l, k = 0;
574 size_t left = ret;
575
576 while (left) {
577 size_t page_off = off & ~PAGE_MASK;
578 size_t copy = min_t(size_t, left,
579 PAGE_SIZE - page_off);
580 l = copy_page_to_iter(pages[k++], page_off, copy, i);
581 off += l;
582 left -= l;
583 if (l < copy)
584 break;
585 }
586 }
587 ceph_release_page_vector(pages, num_pages);
588
589 if (off > iocb->ki_pos) {
590 ret = off - iocb->ki_pos;
591 iocb->ki_pos = off;
592 }
593
594 dout("sync_read result %d\n", ret);
595 return ret;
596 }
597
598 struct ceph_aio_request {
599 struct kiocb *iocb;
600 size_t total_len;
601 bool write;
602 bool should_dirty;
603 int error;
604 struct list_head osd_reqs;
605 unsigned num_reqs;
606 atomic_t pending_reqs;
607 struct timespec mtime;
608 struct ceph_cap_flush *prealloc_cf;
609 };
610
611 struct ceph_aio_work {
612 struct work_struct work;
613 struct ceph_osd_request *req;
614 };
615
616 static void ceph_aio_retry_work(struct work_struct *work);
617
ceph_aio_complete(struct inode * inode,struct ceph_aio_request * aio_req)618 static void ceph_aio_complete(struct inode *inode,
619 struct ceph_aio_request *aio_req)
620 {
621 struct ceph_inode_info *ci = ceph_inode(inode);
622 int ret;
623
624 if (!atomic_dec_and_test(&aio_req->pending_reqs))
625 return;
626
627 ret = aio_req->error;
628 if (!ret)
629 ret = aio_req->total_len;
630
631 dout("ceph_aio_complete %p rc %d\n", inode, ret);
632
633 if (ret >= 0 && aio_req->write) {
634 int dirty;
635
636 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
637 if (endoff > i_size_read(inode)) {
638 if (ceph_inode_set_size(inode, endoff))
639 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
640 }
641
642 spin_lock(&ci->i_ceph_lock);
643 ci->i_inline_version = CEPH_INLINE_NONE;
644 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
645 &aio_req->prealloc_cf);
646 spin_unlock(&ci->i_ceph_lock);
647 if (dirty)
648 __mark_inode_dirty(inode, dirty);
649
650 }
651
652 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
653 CEPH_CAP_FILE_RD));
654
655 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
656
657 ceph_free_cap_flush(aio_req->prealloc_cf);
658 kfree(aio_req);
659 }
660
ceph_aio_complete_req(struct ceph_osd_request * req)661 static void ceph_aio_complete_req(struct ceph_osd_request *req)
662 {
663 int rc = req->r_result;
664 struct inode *inode = req->r_inode;
665 struct ceph_aio_request *aio_req = req->r_priv;
666 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
667 int num_pages = calc_pages_for((u64)osd_data->alignment,
668 osd_data->length);
669
670 dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
671 inode, rc, osd_data->length);
672
673 if (rc == -EOLDSNAPC) {
674 struct ceph_aio_work *aio_work;
675 BUG_ON(!aio_req->write);
676
677 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
678 if (aio_work) {
679 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
680 aio_work->req = req;
681 queue_work(ceph_inode_to_client(inode)->wb_wq,
682 &aio_work->work);
683 return;
684 }
685 rc = -ENOMEM;
686 } else if (!aio_req->write) {
687 if (rc == -ENOENT)
688 rc = 0;
689 if (rc >= 0 && osd_data->length > rc) {
690 int zoff = osd_data->alignment + rc;
691 int zlen = osd_data->length - rc;
692 /*
693 * If read is satisfied by single OSD request,
694 * it can pass EOF. Otherwise read is within
695 * i_size.
696 */
697 if (aio_req->num_reqs == 1) {
698 loff_t i_size = i_size_read(inode);
699 loff_t endoff = aio_req->iocb->ki_pos + rc;
700 if (endoff < i_size)
701 zlen = min_t(size_t, zlen,
702 i_size - endoff);
703 aio_req->total_len = rc + zlen;
704 }
705
706 if (zlen > 0)
707 ceph_zero_page_vector_range(zoff, zlen,
708 osd_data->pages);
709 }
710 }
711
712 ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty);
713 ceph_osdc_put_request(req);
714
715 if (rc < 0)
716 cmpxchg(&aio_req->error, 0, rc);
717
718 ceph_aio_complete(inode, aio_req);
719 return;
720 }
721
ceph_aio_retry_work(struct work_struct * work)722 static void ceph_aio_retry_work(struct work_struct *work)
723 {
724 struct ceph_aio_work *aio_work =
725 container_of(work, struct ceph_aio_work, work);
726 struct ceph_osd_request *orig_req = aio_work->req;
727 struct ceph_aio_request *aio_req = orig_req->r_priv;
728 struct inode *inode = orig_req->r_inode;
729 struct ceph_inode_info *ci = ceph_inode(inode);
730 struct ceph_snap_context *snapc;
731 struct ceph_osd_request *req;
732 int ret;
733
734 spin_lock(&ci->i_ceph_lock);
735 if (__ceph_have_pending_cap_snap(ci)) {
736 struct ceph_cap_snap *capsnap =
737 list_last_entry(&ci->i_cap_snaps,
738 struct ceph_cap_snap,
739 ci_item);
740 snapc = ceph_get_snap_context(capsnap->context);
741 } else {
742 BUG_ON(!ci->i_head_snapc);
743 snapc = ceph_get_snap_context(ci->i_head_snapc);
744 }
745 spin_unlock(&ci->i_ceph_lock);
746
747 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
748 false, GFP_NOFS);
749 if (!req) {
750 ret = -ENOMEM;
751 req = orig_req;
752 goto out;
753 }
754
755 req->r_flags = CEPH_OSD_FLAG_ORDERSNAP |
756 CEPH_OSD_FLAG_ONDISK |
757 CEPH_OSD_FLAG_WRITE;
758 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
759 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
760
761 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
762 if (ret) {
763 ceph_osdc_put_request(req);
764 req = orig_req;
765 goto out;
766 }
767
768 req->r_ops[0] = orig_req->r_ops[0];
769 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
770
771 req->r_mtime = aio_req->mtime;
772 req->r_data_offset = req->r_ops[0].extent.offset;
773
774 ceph_osdc_put_request(orig_req);
775
776 req->r_callback = ceph_aio_complete_req;
777 req->r_inode = inode;
778 req->r_priv = aio_req;
779
780 ret = ceph_osdc_start_request(req->r_osdc, req, false);
781 out:
782 if (ret < 0) {
783 req->r_result = ret;
784 ceph_aio_complete_req(req);
785 }
786
787 ceph_put_snap_context(snapc);
788 kfree(aio_work);
789 }
790
791 /*
792 * Write commit request unsafe callback, called to tell us when a
793 * request is unsafe (that is, in flight--has been handed to the
794 * messenger to send to its target osd). It is called again when
795 * we've received a response message indicating the request is
796 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
797 * is completed early (and unsuccessfully) due to a timeout or
798 * interrupt.
799 *
800 * This is used if we requested both an ACK and ONDISK commit reply
801 * from the OSD.
802 */
ceph_sync_write_unsafe(struct ceph_osd_request * req,bool unsafe)803 static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
804 {
805 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
806
807 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
808 unsafe ? "un" : "");
809 if (unsafe) {
810 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
811 spin_lock(&ci->i_unsafe_lock);
812 list_add_tail(&req->r_unsafe_item,
813 &ci->i_unsafe_writes);
814 spin_unlock(&ci->i_unsafe_lock);
815
816 complete_all(&req->r_completion);
817 } else {
818 spin_lock(&ci->i_unsafe_lock);
819 list_del_init(&req->r_unsafe_item);
820 spin_unlock(&ci->i_unsafe_lock);
821 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
822 }
823 }
824
825 /*
826 * Wait on any unsafe replies for the given inode. First wait on the
827 * newest request, and make that the upper bound. Then, if there are
828 * more requests, keep waiting on the oldest as long as it is still older
829 * than the original request.
830 */
ceph_sync_write_wait(struct inode * inode)831 void ceph_sync_write_wait(struct inode *inode)
832 {
833 struct ceph_inode_info *ci = ceph_inode(inode);
834 struct list_head *head = &ci->i_unsafe_writes;
835 struct ceph_osd_request *req;
836 u64 last_tid;
837
838 if (!S_ISREG(inode->i_mode))
839 return;
840
841 spin_lock(&ci->i_unsafe_lock);
842 if (list_empty(head))
843 goto out;
844
845 /* set upper bound as _last_ entry in chain */
846
847 req = list_last_entry(head, struct ceph_osd_request,
848 r_unsafe_item);
849 last_tid = req->r_tid;
850
851 do {
852 ceph_osdc_get_request(req);
853 spin_unlock(&ci->i_unsafe_lock);
854
855 dout("sync_write_wait on tid %llu (until %llu)\n",
856 req->r_tid, last_tid);
857 wait_for_completion(&req->r_safe_completion);
858 ceph_osdc_put_request(req);
859
860 spin_lock(&ci->i_unsafe_lock);
861 /*
862 * from here on look at first entry in chain, since we
863 * only want to wait for anything older than last_tid
864 */
865 if (list_empty(head))
866 break;
867 req = list_first_entry(head, struct ceph_osd_request,
868 r_unsafe_item);
869 } while (req->r_tid < last_tid);
870 out:
871 spin_unlock(&ci->i_unsafe_lock);
872 }
873
874 static ssize_t
ceph_direct_read_write(struct kiocb * iocb,struct iov_iter * iter,struct ceph_snap_context * snapc,struct ceph_cap_flush ** pcf)875 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
876 struct ceph_snap_context *snapc,
877 struct ceph_cap_flush **pcf)
878 {
879 struct file *file = iocb->ki_filp;
880 struct inode *inode = file_inode(file);
881 struct ceph_inode_info *ci = ceph_inode(inode);
882 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
883 struct ceph_vino vino;
884 struct ceph_osd_request *req;
885 struct page **pages;
886 struct ceph_aio_request *aio_req = NULL;
887 int num_pages = 0;
888 int flags;
889 int ret;
890 struct timespec mtime = current_time(inode);
891 size_t count = iov_iter_count(iter);
892 loff_t pos = iocb->ki_pos;
893 bool write = iov_iter_rw(iter) == WRITE;
894 bool should_dirty = !write && iter_is_iovec(iter);
895
896 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
897 return -EROFS;
898
899 dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
900 (write ? "write" : "read"), file, pos, (unsigned)count);
901
902 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
903 if (ret < 0)
904 return ret;
905
906 if (write) {
907 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
908 pos >> PAGE_SHIFT,
909 (pos + count) >> PAGE_SHIFT);
910 if (ret2 < 0)
911 dout("invalidate_inode_pages2_range returned %d\n", ret);
912
913 flags = CEPH_OSD_FLAG_ORDERSNAP |
914 CEPH_OSD_FLAG_ONDISK |
915 CEPH_OSD_FLAG_WRITE;
916 } else {
917 flags = CEPH_OSD_FLAG_READ;
918 }
919
920 while (iov_iter_count(iter) > 0) {
921 u64 size = dio_get_pagev_size(iter);
922 size_t start = 0;
923 ssize_t len;
924
925 vino = ceph_vino(inode);
926 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
927 vino, pos, &size, 0,
928 /*include a 'startsync' command*/
929 write ? 2 : 1,
930 write ? CEPH_OSD_OP_WRITE :
931 CEPH_OSD_OP_READ,
932 flags, snapc,
933 ci->i_truncate_seq,
934 ci->i_truncate_size,
935 false);
936 if (IS_ERR(req)) {
937 ret = PTR_ERR(req);
938 break;
939 }
940
941 len = size;
942 pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
943 if (IS_ERR(pages)) {
944 ceph_osdc_put_request(req);
945 ret = PTR_ERR(pages);
946 break;
947 }
948
949 /*
950 * To simplify error handling, allow AIO when IO within i_size
951 * or IO can be satisfied by single OSD request.
952 */
953 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
954 (len == count || pos + count <= i_size_read(inode))) {
955 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
956 if (aio_req) {
957 aio_req->iocb = iocb;
958 aio_req->write = write;
959 aio_req->should_dirty = should_dirty;
960 INIT_LIST_HEAD(&aio_req->osd_reqs);
961 if (write) {
962 aio_req->mtime = mtime;
963 swap(aio_req->prealloc_cf, *pcf);
964 }
965 }
966 /* ignore error */
967 }
968
969 if (write) {
970 /*
971 * throw out any page cache pages in this range. this
972 * may block.
973 */
974 truncate_inode_pages_range(inode->i_mapping, pos,
975 (pos+len) | (PAGE_SIZE - 1));
976
977 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
978 req->r_mtime = mtime;
979 }
980
981 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
982 false, false);
983
984 if (aio_req) {
985 aio_req->total_len += len;
986 aio_req->num_reqs++;
987 atomic_inc(&aio_req->pending_reqs);
988
989 req->r_callback = ceph_aio_complete_req;
990 req->r_inode = inode;
991 req->r_priv = aio_req;
992 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
993
994 pos += len;
995 iov_iter_advance(iter, len);
996 continue;
997 }
998
999 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1000 if (!ret)
1001 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1002
1003 size = i_size_read(inode);
1004 if (!write) {
1005 if (ret == -ENOENT)
1006 ret = 0;
1007 if (ret >= 0 && ret < len && pos + ret < size) {
1008 int zlen = min_t(size_t, len - ret,
1009 size - pos - ret);
1010 ceph_zero_page_vector_range(start + ret, zlen,
1011 pages);
1012 ret += zlen;
1013 }
1014 if (ret >= 0)
1015 len = ret;
1016 }
1017
1018 ceph_put_page_vector(pages, num_pages, should_dirty);
1019
1020 ceph_osdc_put_request(req);
1021 if (ret < 0)
1022 break;
1023
1024 pos += len;
1025 iov_iter_advance(iter, len);
1026
1027 if (!write && pos >= size)
1028 break;
1029
1030 if (write && pos > size) {
1031 if (ceph_inode_set_size(inode, pos))
1032 ceph_check_caps(ceph_inode(inode),
1033 CHECK_CAPS_AUTHONLY,
1034 NULL);
1035 }
1036 }
1037
1038 if (aio_req) {
1039 LIST_HEAD(osd_reqs);
1040
1041 if (aio_req->num_reqs == 0) {
1042 kfree(aio_req);
1043 return ret;
1044 }
1045
1046 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1047 CEPH_CAP_FILE_RD);
1048
1049 list_splice(&aio_req->osd_reqs, &osd_reqs);
1050 while (!list_empty(&osd_reqs)) {
1051 req = list_first_entry(&osd_reqs,
1052 struct ceph_osd_request,
1053 r_unsafe_item);
1054 list_del_init(&req->r_unsafe_item);
1055 if (ret >= 0)
1056 ret = ceph_osdc_start_request(req->r_osdc,
1057 req, false);
1058 if (ret < 0) {
1059 req->r_result = ret;
1060 ceph_aio_complete_req(req);
1061 }
1062 }
1063 return -EIOCBQUEUED;
1064 }
1065
1066 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1067 ret = pos - iocb->ki_pos;
1068 iocb->ki_pos = pos;
1069 }
1070 return ret;
1071 }
1072
1073 /*
1074 * Synchronous write, straight from __user pointer or user pages.
1075 *
1076 * If write spans object boundary, just do multiple writes. (For a
1077 * correct atomic write, we should e.g. take write locks on all
1078 * objects, rollback on failure, etc.)
1079 */
1080 static ssize_t
ceph_sync_write(struct kiocb * iocb,struct iov_iter * from,loff_t pos,struct ceph_snap_context * snapc)1081 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1082 struct ceph_snap_context *snapc)
1083 {
1084 struct file *file = iocb->ki_filp;
1085 struct inode *inode = file_inode(file);
1086 struct ceph_inode_info *ci = ceph_inode(inode);
1087 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1088 struct ceph_vino vino;
1089 struct ceph_osd_request *req;
1090 struct page **pages;
1091 u64 len;
1092 int num_pages;
1093 int written = 0;
1094 int flags;
1095 int check_caps = 0;
1096 int ret;
1097 struct timespec mtime = current_time(inode);
1098 size_t count = iov_iter_count(from);
1099
1100 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1101 return -EROFS;
1102
1103 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
1104
1105 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1106 if (ret < 0)
1107 return ret;
1108
1109 ret = invalidate_inode_pages2_range(inode->i_mapping,
1110 pos >> PAGE_SHIFT,
1111 (pos + count) >> PAGE_SHIFT);
1112 if (ret < 0)
1113 dout("invalidate_inode_pages2_range returned %d\n", ret);
1114
1115 flags = CEPH_OSD_FLAG_ORDERSNAP |
1116 CEPH_OSD_FLAG_ONDISK |
1117 CEPH_OSD_FLAG_WRITE |
1118 CEPH_OSD_FLAG_ACK;
1119
1120 while ((len = iov_iter_count(from)) > 0) {
1121 size_t left;
1122 int n;
1123
1124 vino = ceph_vino(inode);
1125 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1126 vino, pos, &len, 0, 1,
1127 CEPH_OSD_OP_WRITE, flags, snapc,
1128 ci->i_truncate_seq,
1129 ci->i_truncate_size,
1130 false);
1131 if (IS_ERR(req)) {
1132 ret = PTR_ERR(req);
1133 break;
1134 }
1135
1136 /*
1137 * write from beginning of first page,
1138 * regardless of io alignment
1139 */
1140 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1141
1142 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1143 if (IS_ERR(pages)) {
1144 ret = PTR_ERR(pages);
1145 goto out;
1146 }
1147
1148 left = len;
1149 for (n = 0; n < num_pages; n++) {
1150 size_t plen = min_t(size_t, left, PAGE_SIZE);
1151 ret = copy_page_from_iter(pages[n], 0, plen, from);
1152 if (ret != plen) {
1153 ret = -EFAULT;
1154 break;
1155 }
1156 left -= ret;
1157 }
1158
1159 if (ret < 0) {
1160 ceph_release_page_vector(pages, num_pages);
1161 goto out;
1162 }
1163
1164 /* get a second commit callback */
1165 req->r_unsafe_callback = ceph_sync_write_unsafe;
1166 req->r_inode = inode;
1167
1168 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1169 false, true);
1170
1171 req->r_mtime = mtime;
1172 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1173 if (!ret)
1174 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1175
1176 out:
1177 ceph_osdc_put_request(req);
1178 if (ret == 0) {
1179 pos += len;
1180 written += len;
1181
1182 if (pos > i_size_read(inode)) {
1183 check_caps = ceph_inode_set_size(inode, pos);
1184 if (check_caps)
1185 ceph_check_caps(ceph_inode(inode),
1186 CHECK_CAPS_AUTHONLY,
1187 NULL);
1188 }
1189 } else
1190 break;
1191 }
1192
1193 if (ret != -EOLDSNAPC && written > 0) {
1194 ret = written;
1195 iocb->ki_pos = pos;
1196 }
1197 return ret;
1198 }
1199
1200 /*
1201 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1202 * Atomically grab references, so that those bits are not released
1203 * back to the MDS mid-read.
1204 *
1205 * Hmm, the sync read case isn't actually async... should it be?
1206 */
ceph_read_iter(struct kiocb * iocb,struct iov_iter * to)1207 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1208 {
1209 struct file *filp = iocb->ki_filp;
1210 struct ceph_file_info *fi = filp->private_data;
1211 size_t len = iov_iter_count(to);
1212 struct inode *inode = file_inode(filp);
1213 struct ceph_inode_info *ci = ceph_inode(inode);
1214 struct page *pinned_page = NULL;
1215 ssize_t ret;
1216 int want, got = 0;
1217 int retry_op = 0, read = 0;
1218
1219 again:
1220 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1221 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1222
1223 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1224 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1225 else
1226 want = CEPH_CAP_FILE_CACHE;
1227 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1228 if (ret < 0)
1229 return ret;
1230
1231 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1232 (iocb->ki_flags & IOCB_DIRECT) ||
1233 (fi->flags & CEPH_F_SYNC)) {
1234
1235 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1236 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1237 ceph_cap_string(got));
1238
1239 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1240 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1241 ret = ceph_direct_read_write(iocb, to,
1242 NULL, NULL);
1243 if (ret >= 0 && ret < len)
1244 retry_op = CHECK_EOF;
1245 } else {
1246 ret = ceph_sync_read(iocb, to, &retry_op);
1247 }
1248 } else {
1249 retry_op = READ_INLINE;
1250 }
1251 } else {
1252 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1253 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1254 ceph_cap_string(got));
1255 current->journal_info = filp;
1256 ret = generic_file_read_iter(iocb, to);
1257 current->journal_info = NULL;
1258 }
1259 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1260 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1261 if (pinned_page) {
1262 put_page(pinned_page);
1263 pinned_page = NULL;
1264 }
1265 ceph_put_cap_refs(ci, got);
1266 if (retry_op > HAVE_RETRIED && ret >= 0) {
1267 int statret;
1268 struct page *page = NULL;
1269 loff_t i_size;
1270 if (retry_op == READ_INLINE) {
1271 page = __page_cache_alloc(GFP_KERNEL);
1272 if (!page)
1273 return -ENOMEM;
1274 }
1275
1276 statret = __ceph_do_getattr(inode, page,
1277 CEPH_STAT_CAP_INLINE_DATA, !!page);
1278 if (statret < 0) {
1279 if (page)
1280 __free_page(page);
1281 if (statret == -ENODATA) {
1282 BUG_ON(retry_op != READ_INLINE);
1283 goto again;
1284 }
1285 return statret;
1286 }
1287
1288 i_size = i_size_read(inode);
1289 if (retry_op == READ_INLINE) {
1290 BUG_ON(ret > 0 || read > 0);
1291 if (iocb->ki_pos < i_size &&
1292 iocb->ki_pos < PAGE_SIZE) {
1293 loff_t end = min_t(loff_t, i_size,
1294 iocb->ki_pos + len);
1295 end = min_t(loff_t, end, PAGE_SIZE);
1296 if (statret < end)
1297 zero_user_segment(page, statret, end);
1298 ret = copy_page_to_iter(page,
1299 iocb->ki_pos & ~PAGE_MASK,
1300 end - iocb->ki_pos, to);
1301 iocb->ki_pos += ret;
1302 read += ret;
1303 }
1304 if (iocb->ki_pos < i_size && read < len) {
1305 size_t zlen = min_t(size_t, len - read,
1306 i_size - iocb->ki_pos);
1307 ret = iov_iter_zero(zlen, to);
1308 iocb->ki_pos += ret;
1309 read += ret;
1310 }
1311 __free_pages(page, 0);
1312 return read;
1313 }
1314
1315 /* hit EOF or hole? */
1316 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1317 ret < len) {
1318 dout("sync_read hit hole, ppos %lld < size %lld"
1319 ", reading more\n", iocb->ki_pos, i_size);
1320
1321 read += ret;
1322 len -= ret;
1323 retry_op = HAVE_RETRIED;
1324 goto again;
1325 }
1326 }
1327
1328 if (ret >= 0)
1329 ret += read;
1330
1331 return ret;
1332 }
1333
1334 /*
1335 * Take cap references to avoid releasing caps to MDS mid-write.
1336 *
1337 * If we are synchronous, and write with an old snap context, the OSD
1338 * may return EOLDSNAPC. In that case, retry the write.. _after_
1339 * dropping our cap refs and allowing the pending snap to logically
1340 * complete _before_ this write occurs.
1341 *
1342 * If we are near ENOSPC, write synchronously.
1343 */
ceph_write_iter(struct kiocb * iocb,struct iov_iter * from)1344 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1345 {
1346 struct file *file = iocb->ki_filp;
1347 struct ceph_file_info *fi = file->private_data;
1348 struct inode *inode = file_inode(file);
1349 struct ceph_inode_info *ci = ceph_inode(inode);
1350 struct ceph_osd_client *osdc =
1351 &ceph_sb_to_client(inode->i_sb)->client->osdc;
1352 struct ceph_cap_flush *prealloc_cf;
1353 ssize_t count, written = 0;
1354 int err, want, got;
1355 loff_t pos;
1356
1357 if (ceph_snap(inode) != CEPH_NOSNAP)
1358 return -EROFS;
1359
1360 prealloc_cf = ceph_alloc_cap_flush();
1361 if (!prealloc_cf)
1362 return -ENOMEM;
1363
1364 inode_lock(inode);
1365
1366 /* We can write back this queue in page reclaim */
1367 current->backing_dev_info = inode_to_bdi(inode);
1368
1369 if (iocb->ki_flags & IOCB_APPEND) {
1370 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1371 if (err < 0)
1372 goto out;
1373 }
1374
1375 err = generic_write_checks(iocb, from);
1376 if (err <= 0)
1377 goto out;
1378
1379 pos = iocb->ki_pos;
1380 count = iov_iter_count(from);
1381 err = file_remove_privs(file);
1382 if (err)
1383 goto out;
1384
1385 err = file_update_time(file);
1386 if (err)
1387 goto out;
1388
1389 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1390 err = ceph_uninline_data(file, NULL);
1391 if (err < 0)
1392 goto out;
1393 }
1394
1395 retry_snap:
1396 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1397 err = -ENOSPC;
1398 goto out;
1399 }
1400
1401 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1402 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1403 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1404 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1405 else
1406 want = CEPH_CAP_FILE_BUFFER;
1407 got = 0;
1408 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1409 &got, NULL);
1410 if (err < 0)
1411 goto out;
1412
1413 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1414 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1415
1416 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1417 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
1418 struct ceph_snap_context *snapc;
1419 struct iov_iter data;
1420 inode_unlock(inode);
1421
1422 spin_lock(&ci->i_ceph_lock);
1423 if (__ceph_have_pending_cap_snap(ci)) {
1424 struct ceph_cap_snap *capsnap =
1425 list_last_entry(&ci->i_cap_snaps,
1426 struct ceph_cap_snap,
1427 ci_item);
1428 snapc = ceph_get_snap_context(capsnap->context);
1429 } else {
1430 BUG_ON(!ci->i_head_snapc);
1431 snapc = ceph_get_snap_context(ci->i_head_snapc);
1432 }
1433 spin_unlock(&ci->i_ceph_lock);
1434
1435 /* we might need to revert back to that point */
1436 data = *from;
1437 if (iocb->ki_flags & IOCB_DIRECT)
1438 written = ceph_direct_read_write(iocb, &data, snapc,
1439 &prealloc_cf);
1440 else
1441 written = ceph_sync_write(iocb, &data, pos, snapc);
1442 if (written == -EOLDSNAPC) {
1443 dout("aio_write %p %llx.%llx %llu~%u"
1444 "got EOLDSNAPC, retrying\n",
1445 inode, ceph_vinop(inode),
1446 pos, (unsigned)count);
1447 inode_lock(inode);
1448 goto retry_snap;
1449 }
1450 if (written > 0)
1451 iov_iter_advance(from, written);
1452 ceph_put_snap_context(snapc);
1453 } else {
1454 /*
1455 * No need to acquire the i_truncate_mutex. Because
1456 * the MDS revokes Fwb caps before sending truncate
1457 * message to us. We can't get Fwb cap while there
1458 * are pending vmtruncate. So write and vmtruncate
1459 * can not run at the same time
1460 */
1461 written = generic_perform_write(file, from, pos);
1462 if (likely(written >= 0))
1463 iocb->ki_pos = pos + written;
1464 inode_unlock(inode);
1465 }
1466
1467 if (written >= 0) {
1468 int dirty;
1469 spin_lock(&ci->i_ceph_lock);
1470 ci->i_inline_version = CEPH_INLINE_NONE;
1471 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1472 &prealloc_cf);
1473 spin_unlock(&ci->i_ceph_lock);
1474 if (dirty)
1475 __mark_inode_dirty(inode, dirty);
1476 }
1477
1478 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1479 inode, ceph_vinop(inode), pos, (unsigned)count,
1480 ceph_cap_string(got));
1481 ceph_put_cap_refs(ci, got);
1482
1483 if (written >= 0) {
1484 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
1485 iocb->ki_flags |= IOCB_DSYNC;
1486
1487 written = generic_write_sync(iocb, written);
1488 }
1489
1490 goto out_unlocked;
1491
1492 out:
1493 inode_unlock(inode);
1494 out_unlocked:
1495 ceph_free_cap_flush(prealloc_cf);
1496 current->backing_dev_info = NULL;
1497 return written ? written : err;
1498 }
1499
1500 /*
1501 * llseek. be sure to verify file size on SEEK_END.
1502 */
ceph_llseek(struct file * file,loff_t offset,int whence)1503 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1504 {
1505 struct inode *inode = file->f_mapping->host;
1506 loff_t i_size;
1507 loff_t ret;
1508
1509 inode_lock(inode);
1510
1511 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1512 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1513 if (ret < 0)
1514 goto out;
1515 }
1516
1517 i_size = i_size_read(inode);
1518 switch (whence) {
1519 case SEEK_END:
1520 offset += i_size;
1521 break;
1522 case SEEK_CUR:
1523 /*
1524 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1525 * position-querying operation. Avoid rewriting the "same"
1526 * f_pos value back to the file because a concurrent read(),
1527 * write() or lseek() might have altered it
1528 */
1529 if (offset == 0) {
1530 ret = file->f_pos;
1531 goto out;
1532 }
1533 offset += file->f_pos;
1534 break;
1535 case SEEK_DATA:
1536 if (offset >= i_size) {
1537 ret = -ENXIO;
1538 goto out;
1539 }
1540 break;
1541 case SEEK_HOLE:
1542 if (offset >= i_size) {
1543 ret = -ENXIO;
1544 goto out;
1545 }
1546 offset = i_size;
1547 break;
1548 }
1549
1550 ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1551
1552 out:
1553 inode_unlock(inode);
1554 return ret;
1555 }
1556
ceph_zero_partial_page(struct inode * inode,loff_t offset,unsigned size)1557 static inline void ceph_zero_partial_page(
1558 struct inode *inode, loff_t offset, unsigned size)
1559 {
1560 struct page *page;
1561 pgoff_t index = offset >> PAGE_SHIFT;
1562
1563 page = find_lock_page(inode->i_mapping, index);
1564 if (page) {
1565 wait_on_page_writeback(page);
1566 zero_user(page, offset & (PAGE_SIZE - 1), size);
1567 unlock_page(page);
1568 put_page(page);
1569 }
1570 }
1571
ceph_zero_pagecache_range(struct inode * inode,loff_t offset,loff_t length)1572 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1573 loff_t length)
1574 {
1575 loff_t nearly = round_up(offset, PAGE_SIZE);
1576 if (offset < nearly) {
1577 loff_t size = nearly - offset;
1578 if (length < size)
1579 size = length;
1580 ceph_zero_partial_page(inode, offset, size);
1581 offset += size;
1582 length -= size;
1583 }
1584 if (length >= PAGE_SIZE) {
1585 loff_t size = round_down(length, PAGE_SIZE);
1586 truncate_pagecache_range(inode, offset, offset + size - 1);
1587 offset += size;
1588 length -= size;
1589 }
1590 if (length)
1591 ceph_zero_partial_page(inode, offset, length);
1592 }
1593
ceph_zero_partial_object(struct inode * inode,loff_t offset,loff_t * length)1594 static int ceph_zero_partial_object(struct inode *inode,
1595 loff_t offset, loff_t *length)
1596 {
1597 struct ceph_inode_info *ci = ceph_inode(inode);
1598 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1599 struct ceph_osd_request *req;
1600 int ret = 0;
1601 loff_t zero = 0;
1602 int op;
1603
1604 if (!length) {
1605 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1606 length = &zero;
1607 } else {
1608 op = CEPH_OSD_OP_ZERO;
1609 }
1610
1611 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1612 ceph_vino(inode),
1613 offset, length,
1614 0, 1, op,
1615 CEPH_OSD_FLAG_WRITE |
1616 CEPH_OSD_FLAG_ONDISK,
1617 NULL, 0, 0, false);
1618 if (IS_ERR(req)) {
1619 ret = PTR_ERR(req);
1620 goto out;
1621 }
1622
1623 req->r_mtime = inode->i_mtime;
1624 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1625 if (!ret) {
1626 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1627 if (ret == -ENOENT)
1628 ret = 0;
1629 }
1630 ceph_osdc_put_request(req);
1631
1632 out:
1633 return ret;
1634 }
1635
ceph_zero_objects(struct inode * inode,loff_t offset,loff_t length)1636 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1637 {
1638 int ret = 0;
1639 struct ceph_inode_info *ci = ceph_inode(inode);
1640 s32 stripe_unit = ci->i_layout.stripe_unit;
1641 s32 stripe_count = ci->i_layout.stripe_count;
1642 s32 object_size = ci->i_layout.object_size;
1643 u64 object_set_size = object_size * stripe_count;
1644 u64 nearly, t;
1645
1646 /* round offset up to next period boundary */
1647 nearly = offset + object_set_size - 1;
1648 t = nearly;
1649 nearly -= do_div(t, object_set_size);
1650
1651 while (length && offset < nearly) {
1652 loff_t size = length;
1653 ret = ceph_zero_partial_object(inode, offset, &size);
1654 if (ret < 0)
1655 return ret;
1656 offset += size;
1657 length -= size;
1658 }
1659 while (length >= object_set_size) {
1660 int i;
1661 loff_t pos = offset;
1662 for (i = 0; i < stripe_count; ++i) {
1663 ret = ceph_zero_partial_object(inode, pos, NULL);
1664 if (ret < 0)
1665 return ret;
1666 pos += stripe_unit;
1667 }
1668 offset += object_set_size;
1669 length -= object_set_size;
1670 }
1671 while (length) {
1672 loff_t size = length;
1673 ret = ceph_zero_partial_object(inode, offset, &size);
1674 if (ret < 0)
1675 return ret;
1676 offset += size;
1677 length -= size;
1678 }
1679 return ret;
1680 }
1681
ceph_fallocate(struct file * file,int mode,loff_t offset,loff_t length)1682 static long ceph_fallocate(struct file *file, int mode,
1683 loff_t offset, loff_t length)
1684 {
1685 struct ceph_file_info *fi = file->private_data;
1686 struct inode *inode = file_inode(file);
1687 struct ceph_inode_info *ci = ceph_inode(inode);
1688 struct ceph_osd_client *osdc =
1689 &ceph_inode_to_client(inode)->client->osdc;
1690 struct ceph_cap_flush *prealloc_cf;
1691 int want, got = 0;
1692 int dirty;
1693 int ret = 0;
1694 loff_t endoff = 0;
1695 loff_t size;
1696
1697 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1698 return -EOPNOTSUPP;
1699
1700 if (!S_ISREG(inode->i_mode))
1701 return -EOPNOTSUPP;
1702
1703 prealloc_cf = ceph_alloc_cap_flush();
1704 if (!prealloc_cf)
1705 return -ENOMEM;
1706
1707 inode_lock(inode);
1708
1709 if (ceph_snap(inode) != CEPH_NOSNAP) {
1710 ret = -EROFS;
1711 goto unlock;
1712 }
1713
1714 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1715 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1716 ret = -ENOSPC;
1717 goto unlock;
1718 }
1719
1720 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1721 ret = ceph_uninline_data(file, NULL);
1722 if (ret < 0)
1723 goto unlock;
1724 }
1725
1726 size = i_size_read(inode);
1727 if (!(mode & FALLOC_FL_KEEP_SIZE))
1728 endoff = offset + length;
1729
1730 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1731 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1732 else
1733 want = CEPH_CAP_FILE_BUFFER;
1734
1735 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1736 if (ret < 0)
1737 goto unlock;
1738
1739 if (mode & FALLOC_FL_PUNCH_HOLE) {
1740 if (offset < size)
1741 ceph_zero_pagecache_range(inode, offset, length);
1742 ret = ceph_zero_objects(inode, offset, length);
1743 } else if (endoff > size) {
1744 truncate_pagecache_range(inode, size, -1);
1745 if (ceph_inode_set_size(inode, endoff))
1746 ceph_check_caps(ceph_inode(inode),
1747 CHECK_CAPS_AUTHONLY, NULL);
1748 }
1749
1750 if (!ret) {
1751 spin_lock(&ci->i_ceph_lock);
1752 ci->i_inline_version = CEPH_INLINE_NONE;
1753 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1754 &prealloc_cf);
1755 spin_unlock(&ci->i_ceph_lock);
1756 if (dirty)
1757 __mark_inode_dirty(inode, dirty);
1758 }
1759
1760 ceph_put_cap_refs(ci, got);
1761 unlock:
1762 inode_unlock(inode);
1763 ceph_free_cap_flush(prealloc_cf);
1764 return ret;
1765 }
1766
1767 const struct file_operations ceph_file_fops = {
1768 .open = ceph_open,
1769 .release = ceph_release,
1770 .llseek = ceph_llseek,
1771 .read_iter = ceph_read_iter,
1772 .write_iter = ceph_write_iter,
1773 .mmap = ceph_mmap,
1774 .fsync = ceph_fsync,
1775 .lock = ceph_lock,
1776 .flock = ceph_flock,
1777 .splice_write = iter_file_splice_write,
1778 .unlocked_ioctl = ceph_ioctl,
1779 .compat_ioctl = ceph_ioctl,
1780 .fallocate = ceph_fallocate,
1781 };
1782
1783