1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * (C) 2001 Clemson University and The University of Chicago
4 * Copyright 2018 Omnibond Systems, L.L.C.
5 *
6 * See COPYING in top-level directory.
7 */
8
9 /*
10 * Linux VFS inode operations.
11 */
12
13 #include <linux/bvec.h>
14 #include "protocol.h"
15 #include "orangefs-kernel.h"
16 #include "orangefs-bufmap.h"
17
orangefs_writepage_locked(struct page * page,struct writeback_control * wbc)18 static int orangefs_writepage_locked(struct page *page,
19 struct writeback_control *wbc)
20 {
21 struct inode *inode = page->mapping->host;
22 struct orangefs_write_range *wr = NULL;
23 struct iov_iter iter;
24 struct bio_vec bv;
25 size_t len, wlen;
26 ssize_t ret;
27 loff_t off;
28
29 set_page_writeback(page);
30
31 len = i_size_read(inode);
32 if (PagePrivate(page)) {
33 wr = (struct orangefs_write_range *)page_private(page);
34 WARN_ON(wr->pos >= len);
35 off = wr->pos;
36 if (off + wr->len > len)
37 wlen = len - off;
38 else
39 wlen = wr->len;
40 } else {
41 WARN_ON(1);
42 off = page_offset(page);
43 if (off + PAGE_SIZE > len)
44 wlen = len - off;
45 else
46 wlen = PAGE_SIZE;
47 }
48 /* Should've been handled in orangefs_invalidatepage. */
49 WARN_ON(off == len || off + wlen > len);
50
51 bv.bv_page = page;
52 bv.bv_len = wlen;
53 bv.bv_offset = off % PAGE_SIZE;
54 WARN_ON(wlen == 0);
55 iov_iter_bvec(&iter, WRITE, &bv, 1, wlen);
56
57 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
58 len, wr, NULL);
59 if (ret < 0) {
60 SetPageError(page);
61 mapping_set_error(page->mapping, ret);
62 } else {
63 ret = 0;
64 }
65 if (wr) {
66 kfree(wr);
67 set_page_private(page, 0);
68 ClearPagePrivate(page);
69 put_page(page);
70 }
71 return ret;
72 }
73
orangefs_writepage(struct page * page,struct writeback_control * wbc)74 static int orangefs_writepage(struct page *page, struct writeback_control *wbc)
75 {
76 int ret;
77 ret = orangefs_writepage_locked(page, wbc);
78 unlock_page(page);
79 end_page_writeback(page);
80 return ret;
81 }
82
83 struct orangefs_writepages {
84 loff_t off;
85 size_t len;
86 kuid_t uid;
87 kgid_t gid;
88 int maxpages;
89 int npages;
90 struct page **pages;
91 struct bio_vec *bv;
92 };
93
orangefs_writepages_work(struct orangefs_writepages * ow,struct writeback_control * wbc)94 static int orangefs_writepages_work(struct orangefs_writepages *ow,
95 struct writeback_control *wbc)
96 {
97 struct inode *inode = ow->pages[0]->mapping->host;
98 struct orangefs_write_range *wrp, wr;
99 struct iov_iter iter;
100 ssize_t ret;
101 size_t len;
102 loff_t off;
103 int i;
104
105 len = i_size_read(inode);
106
107 for (i = 0; i < ow->npages; i++) {
108 set_page_writeback(ow->pages[i]);
109 ow->bv[i].bv_page = ow->pages[i];
110 ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE,
111 ow->off + ow->len) -
112 max(ow->off, page_offset(ow->pages[i]));
113 if (i == 0)
114 ow->bv[i].bv_offset = ow->off -
115 page_offset(ow->pages[i]);
116 else
117 ow->bv[i].bv_offset = 0;
118 }
119 iov_iter_bvec(&iter, WRITE, ow->bv, ow->npages, ow->len);
120
121 WARN_ON(ow->off >= len);
122 if (ow->off + ow->len > len)
123 ow->len = len - ow->off;
124
125 off = ow->off;
126 wr.uid = ow->uid;
127 wr.gid = ow->gid;
128 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len,
129 0, &wr, NULL);
130 if (ret < 0) {
131 for (i = 0; i < ow->npages; i++) {
132 SetPageError(ow->pages[i]);
133 mapping_set_error(ow->pages[i]->mapping, ret);
134 if (PagePrivate(ow->pages[i])) {
135 wrp = (struct orangefs_write_range *)
136 page_private(ow->pages[i]);
137 ClearPagePrivate(ow->pages[i]);
138 put_page(ow->pages[i]);
139 kfree(wrp);
140 }
141 end_page_writeback(ow->pages[i]);
142 unlock_page(ow->pages[i]);
143 }
144 } else {
145 ret = 0;
146 for (i = 0; i < ow->npages; i++) {
147 if (PagePrivate(ow->pages[i])) {
148 wrp = (struct orangefs_write_range *)
149 page_private(ow->pages[i]);
150 ClearPagePrivate(ow->pages[i]);
151 put_page(ow->pages[i]);
152 kfree(wrp);
153 }
154 end_page_writeback(ow->pages[i]);
155 unlock_page(ow->pages[i]);
156 }
157 }
158 return ret;
159 }
160
orangefs_writepages_callback(struct page * page,struct writeback_control * wbc,void * data)161 static int orangefs_writepages_callback(struct page *page,
162 struct writeback_control *wbc, void *data)
163 {
164 struct orangefs_writepages *ow = data;
165 struct orangefs_write_range *wr;
166 int ret;
167
168 if (!PagePrivate(page)) {
169 unlock_page(page);
170 /* It's not private so there's nothing to write, right? */
171 printk("writepages_callback not private!\n");
172 BUG();
173 return 0;
174 }
175 wr = (struct orangefs_write_range *)page_private(page);
176
177 ret = -1;
178 if (ow->npages == 0) {
179 ow->off = wr->pos;
180 ow->len = wr->len;
181 ow->uid = wr->uid;
182 ow->gid = wr->gid;
183 ow->pages[ow->npages++] = page;
184 ret = 0;
185 goto done;
186 }
187 if (!uid_eq(ow->uid, wr->uid) || !gid_eq(ow->gid, wr->gid)) {
188 orangefs_writepages_work(ow, wbc);
189 ow->npages = 0;
190 ret = -1;
191 goto done;
192 }
193 if (ow->off + ow->len == wr->pos) {
194 ow->len += wr->len;
195 ow->pages[ow->npages++] = page;
196 ret = 0;
197 goto done;
198 }
199 done:
200 if (ret == -1) {
201 if (ow->npages) {
202 orangefs_writepages_work(ow, wbc);
203 ow->npages = 0;
204 }
205 ret = orangefs_writepage_locked(page, wbc);
206 mapping_set_error(page->mapping, ret);
207 unlock_page(page);
208 end_page_writeback(page);
209 } else {
210 if (ow->npages == ow->maxpages) {
211 orangefs_writepages_work(ow, wbc);
212 ow->npages = 0;
213 }
214 }
215 return ret;
216 }
217
orangefs_writepages(struct address_space * mapping,struct writeback_control * wbc)218 static int orangefs_writepages(struct address_space *mapping,
219 struct writeback_control *wbc)
220 {
221 struct orangefs_writepages *ow;
222 struct blk_plug plug;
223 int ret;
224 ow = kzalloc(sizeof(struct orangefs_writepages), GFP_KERNEL);
225 if (!ow)
226 return -ENOMEM;
227 ow->maxpages = orangefs_bufmap_size_query()/PAGE_SIZE;
228 ow->pages = kcalloc(ow->maxpages, sizeof(struct page *), GFP_KERNEL);
229 if (!ow->pages) {
230 kfree(ow);
231 return -ENOMEM;
232 }
233 ow->bv = kcalloc(ow->maxpages, sizeof(struct bio_vec), GFP_KERNEL);
234 if (!ow->bv) {
235 kfree(ow->pages);
236 kfree(ow);
237 return -ENOMEM;
238 }
239 blk_start_plug(&plug);
240 ret = write_cache_pages(mapping, wbc, orangefs_writepages_callback, ow);
241 if (ow->npages)
242 ret = orangefs_writepages_work(ow, wbc);
243 blk_finish_plug(&plug);
244 kfree(ow->pages);
245 kfree(ow->bv);
246 kfree(ow);
247 return ret;
248 }
249
250 static int orangefs_launder_page(struct page *);
251
orangefs_readpage(struct file * file,struct page * page)252 static int orangefs_readpage(struct file *file, struct page *page)
253 {
254 struct inode *inode = page->mapping->host;
255 struct iov_iter iter;
256 struct bio_vec bv;
257 ssize_t ret;
258 loff_t off; /* offset into this page */
259 pgoff_t index; /* which page */
260 struct page *next_page;
261 char *kaddr;
262 struct orangefs_read_options *ro = file->private_data;
263 loff_t read_size;
264 loff_t roundedup;
265 int buffer_index = -1; /* orangefs shared memory slot */
266 int slot_index; /* index into slot */
267 int remaining;
268
269 /*
270 * If they set some miniscule size for "count" in read(2)
271 * (for example) then let's try to read a page, or the whole file
272 * if it is smaller than a page. Once "count" goes over a page
273 * then lets round up to the highest page size multiple that is
274 * less than or equal to "count" and do that much orangefs IO and
275 * try to fill as many pages as we can from it.
276 *
277 * "count" should be represented in ro->blksiz.
278 *
279 * inode->i_size = file size.
280 */
281 if (ro) {
282 if (ro->blksiz < PAGE_SIZE) {
283 if (inode->i_size < PAGE_SIZE)
284 read_size = inode->i_size;
285 else
286 read_size = PAGE_SIZE;
287 } else {
288 roundedup = ((PAGE_SIZE - 1) & ro->blksiz) ?
289 ((ro->blksiz + PAGE_SIZE) & ~(PAGE_SIZE -1)) :
290 ro->blksiz;
291 if (roundedup > inode->i_size)
292 read_size = inode->i_size;
293 else
294 read_size = roundedup;
295
296 }
297 } else {
298 read_size = PAGE_SIZE;
299 }
300 if (!read_size)
301 read_size = PAGE_SIZE;
302
303 if (PageDirty(page))
304 orangefs_launder_page(page);
305
306 off = page_offset(page);
307 index = off >> PAGE_SHIFT;
308 bv.bv_page = page;
309 bv.bv_len = PAGE_SIZE;
310 bv.bv_offset = 0;
311 iov_iter_bvec(&iter, READ, &bv, 1, PAGE_SIZE);
312
313 ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
314 read_size, inode->i_size, NULL, &buffer_index);
315 remaining = ret;
316 /* this will only zero remaining unread portions of the page data */
317 iov_iter_zero(~0U, &iter);
318 /* takes care of potential aliasing */
319 flush_dcache_page(page);
320 if (ret < 0) {
321 SetPageError(page);
322 unlock_page(page);
323 goto out;
324 } else {
325 SetPageUptodate(page);
326 if (PageError(page))
327 ClearPageError(page);
328 ret = 0;
329 }
330 /* unlock the page after the ->readpage() routine completes */
331 unlock_page(page);
332
333 if (remaining > PAGE_SIZE) {
334 slot_index = 0;
335 while ((remaining - PAGE_SIZE) >= PAGE_SIZE) {
336 remaining -= PAGE_SIZE;
337 /*
338 * It is an optimization to try and fill more than one
339 * page... by now we've already gotten the single
340 * page we were after, if stuff doesn't seem to
341 * be going our way at this point just return
342 * and hope for the best.
343 *
344 * If we look for pages and they're already there is
345 * one reason to give up, and if they're not there
346 * and we can't create them is another reason.
347 */
348
349 index++;
350 slot_index++;
351 next_page = find_get_page(inode->i_mapping, index);
352 if (next_page) {
353 gossip_debug(GOSSIP_FILE_DEBUG,
354 "%s: found next page, quitting\n",
355 __func__);
356 put_page(next_page);
357 goto out;
358 }
359 next_page = find_or_create_page(inode->i_mapping,
360 index,
361 GFP_KERNEL);
362 /*
363 * I've never hit this, leave it as a printk for
364 * now so it will be obvious.
365 */
366 if (!next_page) {
367 printk("%s: can't create next page, quitting\n",
368 __func__);
369 goto out;
370 }
371 kaddr = kmap_atomic(next_page);
372 orangefs_bufmap_page_fill(kaddr,
373 buffer_index,
374 slot_index);
375 kunmap_atomic(kaddr);
376 SetPageUptodate(next_page);
377 unlock_page(next_page);
378 put_page(next_page);
379 }
380 }
381
382 out:
383 if (buffer_index != -1)
384 orangefs_bufmap_put(buffer_index);
385 return ret;
386 }
387
orangefs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)388 static int orangefs_write_begin(struct file *file,
389 struct address_space *mapping,
390 loff_t pos, unsigned len, unsigned flags, struct page **pagep,
391 void **fsdata)
392 {
393 struct orangefs_write_range *wr;
394 struct page *page;
395 pgoff_t index;
396 int ret;
397
398 index = pos >> PAGE_SHIFT;
399
400 page = grab_cache_page_write_begin(mapping, index, flags);
401 if (!page)
402 return -ENOMEM;
403
404 *pagep = page;
405
406 if (PageDirty(page) && !PagePrivate(page)) {
407 /*
408 * Should be impossible. If it happens, launder the page
409 * since we don't know what's dirty. This will WARN in
410 * orangefs_writepage_locked.
411 */
412 ret = orangefs_launder_page(page);
413 if (ret)
414 return ret;
415 }
416 if (PagePrivate(page)) {
417 struct orangefs_write_range *wr;
418 wr = (struct orangefs_write_range *)page_private(page);
419 if (wr->pos + wr->len == pos &&
420 uid_eq(wr->uid, current_fsuid()) &&
421 gid_eq(wr->gid, current_fsgid())) {
422 wr->len += len;
423 goto okay;
424 } else {
425 ret = orangefs_launder_page(page);
426 if (ret)
427 return ret;
428 }
429 }
430
431 wr = kmalloc(sizeof *wr, GFP_KERNEL);
432 if (!wr)
433 return -ENOMEM;
434
435 wr->pos = pos;
436 wr->len = len;
437 wr->uid = current_fsuid();
438 wr->gid = current_fsgid();
439 SetPagePrivate(page);
440 set_page_private(page, (unsigned long)wr);
441 get_page(page);
442 okay:
443 return 0;
444 }
445
orangefs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)446 static int orangefs_write_end(struct file *file, struct address_space *mapping,
447 loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata)
448 {
449 struct inode *inode = page->mapping->host;
450 loff_t last_pos = pos + copied;
451
452 /*
453 * No need to use i_size_read() here, the i_size
454 * cannot change under us because we hold the i_mutex.
455 */
456 if (last_pos > inode->i_size)
457 i_size_write(inode, last_pos);
458
459 /* zero the stale part of the page if we did a short copy */
460 if (!PageUptodate(page)) {
461 unsigned from = pos & (PAGE_SIZE - 1);
462 if (copied < len) {
463 zero_user(page, from + copied, len - copied);
464 }
465 /* Set fully written pages uptodate. */
466 if (pos == page_offset(page) &&
467 (len == PAGE_SIZE || pos + len == inode->i_size)) {
468 zero_user_segment(page, from + copied, PAGE_SIZE);
469 SetPageUptodate(page);
470 }
471 }
472
473 set_page_dirty(page);
474 unlock_page(page);
475 put_page(page);
476
477 mark_inode_dirty_sync(file_inode(file));
478 return copied;
479 }
480
orangefs_invalidatepage(struct page * page,unsigned int offset,unsigned int length)481 static void orangefs_invalidatepage(struct page *page,
482 unsigned int offset,
483 unsigned int length)
484 {
485 struct orangefs_write_range *wr;
486 wr = (struct orangefs_write_range *)page_private(page);
487
488 if (offset == 0 && length == PAGE_SIZE) {
489 kfree((struct orangefs_write_range *)page_private(page));
490 set_page_private(page, 0);
491 ClearPagePrivate(page);
492 put_page(page);
493 return;
494 /* write range entirely within invalidate range (or equal) */
495 } else if (page_offset(page) + offset <= wr->pos &&
496 wr->pos + wr->len <= page_offset(page) + offset + length) {
497 kfree((struct orangefs_write_range *)page_private(page));
498 set_page_private(page, 0);
499 ClearPagePrivate(page);
500 put_page(page);
501 /* XXX is this right? only caller in fs */
502 cancel_dirty_page(page);
503 return;
504 /* invalidate range chops off end of write range */
505 } else if (wr->pos < page_offset(page) + offset &&
506 wr->pos + wr->len <= page_offset(page) + offset + length &&
507 page_offset(page) + offset < wr->pos + wr->len) {
508 size_t x;
509 x = wr->pos + wr->len - (page_offset(page) + offset);
510 WARN_ON(x > wr->len);
511 wr->len -= x;
512 wr->uid = current_fsuid();
513 wr->gid = current_fsgid();
514 /* invalidate range chops off beginning of write range */
515 } else if (page_offset(page) + offset <= wr->pos &&
516 page_offset(page) + offset + length < wr->pos + wr->len &&
517 wr->pos < page_offset(page) + offset + length) {
518 size_t x;
519 x = page_offset(page) + offset + length - wr->pos;
520 WARN_ON(x > wr->len);
521 wr->pos += x;
522 wr->len -= x;
523 wr->uid = current_fsuid();
524 wr->gid = current_fsgid();
525 /* invalidate range entirely within write range (punch hole) */
526 } else if (wr->pos < page_offset(page) + offset &&
527 page_offset(page) + offset + length < wr->pos + wr->len) {
528 /* XXX what do we do here... should not WARN_ON */
529 WARN_ON(1);
530 /* punch hole */
531 /*
532 * should we just ignore this and write it out anyway?
533 * it hardly makes sense
534 */
535 return;
536 /* non-overlapping ranges */
537 } else {
538 /* WARN if they do overlap */
539 if (!((page_offset(page) + offset + length <= wr->pos) ^
540 (wr->pos + wr->len <= page_offset(page) + offset))) {
541 WARN_ON(1);
542 printk("invalidate range offset %llu length %u\n",
543 page_offset(page) + offset, length);
544 printk("write range offset %llu length %zu\n",
545 wr->pos, wr->len);
546 }
547 return;
548 }
549
550 /*
551 * Above there are returns where wr is freed or where we WARN.
552 * Thus the following runs if wr was modified above.
553 */
554
555 orangefs_launder_page(page);
556 }
557
orangefs_releasepage(struct page * page,gfp_t foo)558 static int orangefs_releasepage(struct page *page, gfp_t foo)
559 {
560 return !PagePrivate(page);
561 }
562
orangefs_freepage(struct page * page)563 static void orangefs_freepage(struct page *page)
564 {
565 if (PagePrivate(page)) {
566 kfree((struct orangefs_write_range *)page_private(page));
567 set_page_private(page, 0);
568 ClearPagePrivate(page);
569 put_page(page);
570 }
571 }
572
orangefs_launder_page(struct page * page)573 static int orangefs_launder_page(struct page *page)
574 {
575 int r = 0;
576 struct writeback_control wbc = {
577 .sync_mode = WB_SYNC_ALL,
578 .nr_to_write = 0,
579 };
580 wait_on_page_writeback(page);
581 if (clear_page_dirty_for_io(page)) {
582 r = orangefs_writepage_locked(page, &wbc);
583 end_page_writeback(page);
584 }
585 return r;
586 }
587
orangefs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)588 static ssize_t orangefs_direct_IO(struct kiocb *iocb,
589 struct iov_iter *iter)
590 {
591 /*
592 * Comment from original do_readv_writev:
593 * Common entry point for read/write/readv/writev
594 * This function will dispatch it to either the direct I/O
595 * or buffered I/O path depending on the mount options and/or
596 * augmented/extended metadata attached to the file.
597 * Note: File extended attributes override any mount options.
598 */
599 struct file *file = iocb->ki_filp;
600 loff_t pos = iocb->ki_pos;
601 enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ?
602 ORANGEFS_IO_WRITE : ORANGEFS_IO_READ;
603 loff_t *offset = &pos;
604 struct inode *inode = file->f_mapping->host;
605 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
606 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
607 size_t count = iov_iter_count(iter);
608 ssize_t total_count = 0;
609 ssize_t ret = -EINVAL;
610 int i = 0;
611
612 gossip_debug(GOSSIP_FILE_DEBUG,
613 "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
614 __func__,
615 handle,
616 (int)count);
617
618 if (type == ORANGEFS_IO_WRITE) {
619 gossip_debug(GOSSIP_FILE_DEBUG,
620 "%s(%pU): proceeding with offset : %llu, "
621 "size %d\n",
622 __func__,
623 handle,
624 llu(*offset),
625 (int)count);
626 }
627
628 if (count == 0) {
629 ret = 0;
630 goto out;
631 }
632
633 while (iov_iter_count(iter)) {
634 size_t each_count = iov_iter_count(iter);
635 size_t amt_complete;
636 i++;
637
638 /* how much to transfer in this loop iteration */
639 if (each_count > orangefs_bufmap_size_query())
640 each_count = orangefs_bufmap_size_query();
641
642 gossip_debug(GOSSIP_FILE_DEBUG,
643 "%s(%pU): size of each_count(%d)\n",
644 __func__,
645 handle,
646 (int)each_count);
647 gossip_debug(GOSSIP_FILE_DEBUG,
648 "%s(%pU): BEFORE wait_for_io: offset is %d\n",
649 __func__,
650 handle,
651 (int)*offset);
652
653 ret = wait_for_direct_io(type, inode, offset, iter,
654 each_count, 0, NULL, NULL);
655 gossip_debug(GOSSIP_FILE_DEBUG,
656 "%s(%pU): return from wait_for_io:%d\n",
657 __func__,
658 handle,
659 (int)ret);
660
661 if (ret < 0)
662 goto out;
663
664 *offset += ret;
665 total_count += ret;
666 amt_complete = ret;
667
668 gossip_debug(GOSSIP_FILE_DEBUG,
669 "%s(%pU): AFTER wait_for_io: offset is %d\n",
670 __func__,
671 handle,
672 (int)*offset);
673
674 /*
675 * if we got a short I/O operations,
676 * fall out and return what we got so far
677 */
678 if (amt_complete < each_count)
679 break;
680 } /*end while */
681
682 out:
683 if (total_count > 0)
684 ret = total_count;
685 if (ret > 0) {
686 if (type == ORANGEFS_IO_READ) {
687 file_accessed(file);
688 } else {
689 file_update_time(file);
690 if (*offset > i_size_read(inode))
691 i_size_write(inode, *offset);
692 }
693 }
694
695 gossip_debug(GOSSIP_FILE_DEBUG,
696 "%s(%pU): Value(%d) returned.\n",
697 __func__,
698 handle,
699 (int)ret);
700
701 return ret;
702 }
703
704 /** ORANGEFS2 implementation of address space operations */
705 static const struct address_space_operations orangefs_address_operations = {
706 .writepage = orangefs_writepage,
707 .readpage = orangefs_readpage,
708 .writepages = orangefs_writepages,
709 .set_page_dirty = __set_page_dirty_nobuffers,
710 .write_begin = orangefs_write_begin,
711 .write_end = orangefs_write_end,
712 .invalidatepage = orangefs_invalidatepage,
713 .releasepage = orangefs_releasepage,
714 .freepage = orangefs_freepage,
715 .launder_page = orangefs_launder_page,
716 .direct_IO = orangefs_direct_IO,
717 };
718
orangefs_page_mkwrite(struct vm_fault * vmf)719 vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
720 {
721 struct page *page = vmf->page;
722 struct inode *inode = file_inode(vmf->vma->vm_file);
723 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
724 unsigned long *bitlock = &orangefs_inode->bitlock;
725 vm_fault_t ret;
726 struct orangefs_write_range *wr;
727
728 sb_start_pagefault(inode->i_sb);
729
730 if (wait_on_bit(bitlock, 1, TASK_KILLABLE)) {
731 ret = VM_FAULT_RETRY;
732 goto out;
733 }
734
735 lock_page(page);
736 if (PageDirty(page) && !PagePrivate(page)) {
737 /*
738 * Should be impossible. If it happens, launder the page
739 * since we don't know what's dirty. This will WARN in
740 * orangefs_writepage_locked.
741 */
742 if (orangefs_launder_page(page)) {
743 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
744 goto out;
745 }
746 }
747 if (PagePrivate(page)) {
748 wr = (struct orangefs_write_range *)page_private(page);
749 if (uid_eq(wr->uid, current_fsuid()) &&
750 gid_eq(wr->gid, current_fsgid())) {
751 wr->pos = page_offset(page);
752 wr->len = PAGE_SIZE;
753 goto okay;
754 } else {
755 if (orangefs_launder_page(page)) {
756 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
757 goto out;
758 }
759 }
760 }
761 wr = kmalloc(sizeof *wr, GFP_KERNEL);
762 if (!wr) {
763 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
764 goto out;
765 }
766 wr->pos = page_offset(page);
767 wr->len = PAGE_SIZE;
768 wr->uid = current_fsuid();
769 wr->gid = current_fsgid();
770 SetPagePrivate(page);
771 set_page_private(page, (unsigned long)wr);
772 get_page(page);
773 okay:
774
775 file_update_time(vmf->vma->vm_file);
776 if (page->mapping != inode->i_mapping) {
777 unlock_page(page);
778 ret = VM_FAULT_LOCKED|VM_FAULT_NOPAGE;
779 goto out;
780 }
781
782 /*
783 * We mark the page dirty already here so that when freeze is in
784 * progress, we are guaranteed that writeback during freezing will
785 * see the dirty page and writeprotect it again.
786 */
787 set_page_dirty(page);
788 wait_for_stable_page(page);
789 ret = VM_FAULT_LOCKED;
790 out:
791 sb_end_pagefault(inode->i_sb);
792 return ret;
793 }
794
orangefs_setattr_size(struct inode * inode,struct iattr * iattr)795 static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
796 {
797 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
798 struct orangefs_kernel_op_s *new_op;
799 loff_t orig_size;
800 int ret = -EINVAL;
801
802 gossip_debug(GOSSIP_INODE_DEBUG,
803 "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n",
804 __func__,
805 get_khandle_from_ino(inode),
806 &orangefs_inode->refn.khandle,
807 orangefs_inode->refn.fs_id,
808 iattr->ia_size);
809
810 /* Ensure that we have a up to date size, so we know if it changed. */
811 ret = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_SIZE);
812 if (ret == -ESTALE)
813 ret = -EIO;
814 if (ret) {
815 gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n",
816 __func__, ret);
817 return ret;
818 }
819 orig_size = i_size_read(inode);
820
821 /* This is truncate_setsize in a different order. */
822 truncate_pagecache(inode, iattr->ia_size);
823 i_size_write(inode, iattr->ia_size);
824 if (iattr->ia_size > orig_size)
825 pagecache_isize_extended(inode, orig_size, iattr->ia_size);
826
827 new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE);
828 if (!new_op)
829 return -ENOMEM;
830
831 new_op->upcall.req.truncate.refn = orangefs_inode->refn;
832 new_op->upcall.req.truncate.size = (__s64) iattr->ia_size;
833
834 ret = service_operation(new_op,
835 __func__,
836 get_interruptible_flag(inode));
837
838 /*
839 * the truncate has no downcall members to retrieve, but
840 * the status value tells us if it went through ok or not
841 */
842 gossip_debug(GOSSIP_INODE_DEBUG, "%s: ret:%d:\n", __func__, ret);
843
844 op_release(new_op);
845
846 if (ret != 0)
847 return ret;
848
849 if (orig_size != i_size_read(inode))
850 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
851
852 return ret;
853 }
854
__orangefs_setattr(struct inode * inode,struct iattr * iattr)855 int __orangefs_setattr(struct inode *inode, struct iattr *iattr)
856 {
857 int ret;
858
859 if (iattr->ia_valid & ATTR_MODE) {
860 if (iattr->ia_mode & (S_ISVTX)) {
861 if (is_root_handle(inode)) {
862 /*
863 * allow sticky bit to be set on root (since
864 * it shows up that way by default anyhow),
865 * but don't show it to the server
866 */
867 iattr->ia_mode -= S_ISVTX;
868 } else {
869 gossip_debug(GOSSIP_UTILS_DEBUG,
870 "User attempted to set sticky bit on non-root directory; returning EINVAL.\n");
871 ret = -EINVAL;
872 goto out;
873 }
874 }
875 if (iattr->ia_mode & (S_ISUID)) {
876 gossip_debug(GOSSIP_UTILS_DEBUG,
877 "Attempting to set setuid bit (not supported); returning EINVAL.\n");
878 ret = -EINVAL;
879 goto out;
880 }
881 }
882
883 if (iattr->ia_valid & ATTR_SIZE) {
884 ret = orangefs_setattr_size(inode, iattr);
885 if (ret)
886 goto out;
887 }
888
889 again:
890 spin_lock(&inode->i_lock);
891 if (ORANGEFS_I(inode)->attr_valid) {
892 if (uid_eq(ORANGEFS_I(inode)->attr_uid, current_fsuid()) &&
893 gid_eq(ORANGEFS_I(inode)->attr_gid, current_fsgid())) {
894 ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
895 } else {
896 spin_unlock(&inode->i_lock);
897 write_inode_now(inode, 1);
898 goto again;
899 }
900 } else {
901 ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
902 ORANGEFS_I(inode)->attr_uid = current_fsuid();
903 ORANGEFS_I(inode)->attr_gid = current_fsgid();
904 }
905 setattr_copy(inode, iattr);
906 spin_unlock(&inode->i_lock);
907 mark_inode_dirty(inode);
908
909 if (iattr->ia_valid & ATTR_MODE)
910 /* change mod on a file that has ACLs */
911 ret = posix_acl_chmod(inode, inode->i_mode);
912
913 ret = 0;
914 out:
915 return ret;
916 }
917
918 /*
919 * Change attributes of an object referenced by dentry.
920 */
orangefs_setattr(struct dentry * dentry,struct iattr * iattr)921 int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
922 {
923 int ret;
924 gossip_debug(GOSSIP_INODE_DEBUG, "__orangefs_setattr: called on %pd\n",
925 dentry);
926 ret = setattr_prepare(dentry, iattr);
927 if (ret)
928 goto out;
929 ret = __orangefs_setattr(d_inode(dentry), iattr);
930 sync_inode_metadata(d_inode(dentry), 1);
931 out:
932 gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n",
933 ret);
934 return ret;
935 }
936
937 /*
938 * Obtain attributes of an object given a dentry
939 */
orangefs_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)940 int orangefs_getattr(const struct path *path, struct kstat *stat,
941 u32 request_mask, unsigned int flags)
942 {
943 int ret;
944 struct inode *inode = path->dentry->d_inode;
945
946 gossip_debug(GOSSIP_INODE_DEBUG,
947 "orangefs_getattr: called on %pd mask %u\n",
948 path->dentry, request_mask);
949
950 ret = orangefs_inode_getattr(inode,
951 request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0);
952 if (ret == 0) {
953 generic_fillattr(inode, stat);
954
955 /* override block size reported to stat */
956 if (!(request_mask & STATX_SIZE))
957 stat->result_mask &= ~STATX_SIZE;
958
959 stat->attributes_mask = STATX_ATTR_IMMUTABLE |
960 STATX_ATTR_APPEND;
961 if (inode->i_flags & S_IMMUTABLE)
962 stat->attributes |= STATX_ATTR_IMMUTABLE;
963 if (inode->i_flags & S_APPEND)
964 stat->attributes |= STATX_ATTR_APPEND;
965 }
966 return ret;
967 }
968
orangefs_permission(struct inode * inode,int mask)969 int orangefs_permission(struct inode *inode, int mask)
970 {
971 int ret;
972
973 if (mask & MAY_NOT_BLOCK)
974 return -ECHILD;
975
976 gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__);
977
978 /* Make sure the permission (and other common attrs) are up to date. */
979 ret = orangefs_inode_getattr(inode, 0);
980 if (ret < 0)
981 return ret;
982
983 return generic_permission(inode, mask);
984 }
985
orangefs_update_time(struct inode * inode,struct timespec64 * time,int flags)986 int orangefs_update_time(struct inode *inode, struct timespec64 *time, int flags)
987 {
988 struct iattr iattr;
989 gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n",
990 get_khandle_from_ino(inode));
991 generic_update_time(inode, time, flags);
992 memset(&iattr, 0, sizeof iattr);
993 if (flags & S_ATIME)
994 iattr.ia_valid |= ATTR_ATIME;
995 if (flags & S_CTIME)
996 iattr.ia_valid |= ATTR_CTIME;
997 if (flags & S_MTIME)
998 iattr.ia_valid |= ATTR_MTIME;
999 return __orangefs_setattr(inode, &iattr);
1000 }
1001
1002 /* ORANGEFS2 implementation of VFS inode operations for files */
1003 static const struct inode_operations orangefs_file_inode_operations = {
1004 .get_acl = orangefs_get_acl,
1005 .set_acl = orangefs_set_acl,
1006 .setattr = orangefs_setattr,
1007 .getattr = orangefs_getattr,
1008 .listxattr = orangefs_listxattr,
1009 .permission = orangefs_permission,
1010 .update_time = orangefs_update_time,
1011 };
1012
orangefs_init_iops(struct inode * inode)1013 static int orangefs_init_iops(struct inode *inode)
1014 {
1015 inode->i_mapping->a_ops = &orangefs_address_operations;
1016
1017 switch (inode->i_mode & S_IFMT) {
1018 case S_IFREG:
1019 inode->i_op = &orangefs_file_inode_operations;
1020 inode->i_fop = &orangefs_file_operations;
1021 break;
1022 case S_IFLNK:
1023 inode->i_op = &orangefs_symlink_inode_operations;
1024 break;
1025 case S_IFDIR:
1026 inode->i_op = &orangefs_dir_inode_operations;
1027 inode->i_fop = &orangefs_dir_operations;
1028 break;
1029 default:
1030 gossip_debug(GOSSIP_INODE_DEBUG,
1031 "%s: unsupported mode\n",
1032 __func__);
1033 return -EINVAL;
1034 }
1035
1036 return 0;
1037 }
1038
1039 /*
1040 * Given an ORANGEFS object identifier (fsid, handle), convert it into
1041 * a ino_t type that will be used as a hash-index from where the handle will
1042 * be searched for in the VFS hash table of inodes.
1043 */
orangefs_handle_hash(struct orangefs_object_kref * ref)1044 static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref)
1045 {
1046 if (!ref)
1047 return 0;
1048 return orangefs_khandle_to_ino(&(ref->khandle));
1049 }
1050
1051 /*
1052 * Called to set up an inode from iget5_locked.
1053 */
orangefs_set_inode(struct inode * inode,void * data)1054 static int orangefs_set_inode(struct inode *inode, void *data)
1055 {
1056 struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
1057 ORANGEFS_I(inode)->refn.fs_id = ref->fs_id;
1058 ORANGEFS_I(inode)->refn.khandle = ref->khandle;
1059 ORANGEFS_I(inode)->attr_valid = 0;
1060 hash_init(ORANGEFS_I(inode)->xattr_cache);
1061 ORANGEFS_I(inode)->mapping_time = jiffies - 1;
1062 ORANGEFS_I(inode)->bitlock = 0;
1063 return 0;
1064 }
1065
1066 /*
1067 * Called to determine if handles match.
1068 */
orangefs_test_inode(struct inode * inode,void * data)1069 static int orangefs_test_inode(struct inode *inode, void *data)
1070 {
1071 struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
1072 struct orangefs_inode_s *orangefs_inode = NULL;
1073
1074 orangefs_inode = ORANGEFS_I(inode);
1075 /* test handles and fs_ids... */
1076 return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle),
1077 &(ref->khandle)) &&
1078 orangefs_inode->refn.fs_id == ref->fs_id);
1079 }
1080
1081 /*
1082 * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS
1083 * file handle.
1084 *
1085 * @sb: the file system super block instance.
1086 * @ref: The ORANGEFS object for which we are trying to locate an inode.
1087 */
orangefs_iget(struct super_block * sb,struct orangefs_object_kref * ref)1088 struct inode *orangefs_iget(struct super_block *sb,
1089 struct orangefs_object_kref *ref)
1090 {
1091 struct inode *inode = NULL;
1092 unsigned long hash;
1093 int error;
1094
1095 hash = orangefs_handle_hash(ref);
1096 inode = iget5_locked(sb,
1097 hash,
1098 orangefs_test_inode,
1099 orangefs_set_inode,
1100 ref);
1101
1102 if (!inode)
1103 return ERR_PTR(-ENOMEM);
1104
1105 if (!(inode->i_state & I_NEW))
1106 return inode;
1107
1108 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
1109 if (error) {
1110 iget_failed(inode);
1111 return ERR_PTR(error);
1112 }
1113
1114 inode->i_ino = hash; /* needed for stat etc */
1115 orangefs_init_iops(inode);
1116 unlock_new_inode(inode);
1117
1118 gossip_debug(GOSSIP_INODE_DEBUG,
1119 "iget handle %pU, fsid %d hash %ld i_ino %lu\n",
1120 &ref->khandle,
1121 ref->fs_id,
1122 hash,
1123 inode->i_ino);
1124
1125 return inode;
1126 }
1127
1128 /*
1129 * Allocate an inode for a newly created file and insert it into the inode hash.
1130 */
orangefs_new_inode(struct super_block * sb,struct inode * dir,int mode,dev_t dev,struct orangefs_object_kref * ref)1131 struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
1132 int mode, dev_t dev, struct orangefs_object_kref *ref)
1133 {
1134 unsigned long hash = orangefs_handle_hash(ref);
1135 struct inode *inode;
1136 int error;
1137
1138 gossip_debug(GOSSIP_INODE_DEBUG,
1139 "%s:(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n",
1140 __func__,
1141 sb,
1142 MAJOR(dev),
1143 MINOR(dev),
1144 mode);
1145
1146 inode = new_inode(sb);
1147 if (!inode)
1148 return ERR_PTR(-ENOMEM);
1149
1150 orangefs_set_inode(inode, ref);
1151 inode->i_ino = hash; /* needed for stat etc */
1152
1153 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
1154 if (error)
1155 goto out_iput;
1156
1157 orangefs_init_iops(inode);
1158 inode->i_rdev = dev;
1159
1160 error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
1161 if (error < 0)
1162 goto out_iput;
1163
1164 gossip_debug(GOSSIP_INODE_DEBUG,
1165 "Initializing ACL's for inode %pU\n",
1166 get_khandle_from_ino(inode));
1167 orangefs_init_acl(inode, dir);
1168 return inode;
1169
1170 out_iput:
1171 iput(inode);
1172 return ERR_PTR(error);
1173 }
1174