1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Storage object read/write
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #include <linux/mount.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/swap.h>
12 #include "internal.h"
13
14 /*
15 * detect wake up events generated by the unlocking of pages in which we're
16 * interested
17 * - we use this to detect read completion of backing pages
18 * - the caller holds the waitqueue lock
19 */
cachefiles_read_waiter(wait_queue_entry_t * wait,unsigned mode,int sync,void * _key)20 static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
21 int sync, void *_key)
22 {
23 struct cachefiles_one_read *monitor =
24 container_of(wait, struct cachefiles_one_read, monitor);
25 struct cachefiles_object *object;
26 struct fscache_retrieval *op = monitor->op;
27 struct wait_page_key *key = _key;
28 struct page *page = wait->private;
29
30 ASSERT(key);
31
32 _enter("{%lu},%u,%d,{%p,%u}",
33 monitor->netfs_page->index, mode, sync,
34 key->page, key->bit_nr);
35
36 if (key->page != page || key->bit_nr != PG_locked)
37 return 0;
38
39 _debug("--- monitor %p %lx ---", page, page->flags);
40
41 if (!PageUptodate(page) && !PageError(page)) {
42 /* unlocked, not uptodate and not erronous? */
43 _debug("page probably truncated");
44 }
45
46 /* remove from the waitqueue */
47 list_del(&wait->entry);
48
49 /* move onto the action list and queue for FS-Cache thread pool */
50 ASSERT(op);
51
52 /* We need to temporarily bump the usage count as we don't own a ref
53 * here otherwise cachefiles_read_copier() may free the op between the
54 * monitor being enqueued on the op->to_do list and the op getting
55 * enqueued on the work queue.
56 */
57 fscache_get_retrieval(op);
58
59 object = container_of(op->op.object, struct cachefiles_object, fscache);
60 spin_lock(&object->work_lock);
61 list_add_tail(&monitor->op_link, &op->to_do);
62 fscache_enqueue_retrieval(op);
63 spin_unlock(&object->work_lock);
64
65 fscache_put_retrieval(op);
66 return 0;
67 }
68
69 /*
70 * handle a probably truncated page
71 * - check to see if the page is still relevant and reissue the read if
72 * possible
73 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
74 * must wait again and 0 if successful
75 */
cachefiles_read_reissue(struct cachefiles_object * object,struct cachefiles_one_read * monitor)76 static int cachefiles_read_reissue(struct cachefiles_object *object,
77 struct cachefiles_one_read *monitor)
78 {
79 struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
80 struct page *backpage = monitor->back_page, *backpage2;
81 int ret;
82
83 _enter("{ino=%lx},{%lx,%lx}",
84 d_backing_inode(object->backer)->i_ino,
85 backpage->index, backpage->flags);
86
87 /* skip if the page was truncated away completely */
88 if (backpage->mapping != bmapping) {
89 _leave(" = -ENODATA [mapping]");
90 return -ENODATA;
91 }
92
93 backpage2 = find_get_page(bmapping, backpage->index);
94 if (!backpage2) {
95 _leave(" = -ENODATA [gone]");
96 return -ENODATA;
97 }
98
99 if (backpage != backpage2) {
100 put_page(backpage2);
101 _leave(" = -ENODATA [different]");
102 return -ENODATA;
103 }
104
105 /* the page is still there and we already have a ref on it, so we don't
106 * need a second */
107 put_page(backpage2);
108
109 INIT_LIST_HEAD(&monitor->op_link);
110 add_page_wait_queue(backpage, &monitor->monitor);
111
112 if (trylock_page(backpage)) {
113 ret = -EIO;
114 if (PageError(backpage))
115 goto unlock_discard;
116 ret = 0;
117 if (PageUptodate(backpage))
118 goto unlock_discard;
119
120 _debug("reissue read");
121 ret = bmapping->a_ops->readpage(NULL, backpage);
122 if (ret < 0)
123 goto discard;
124 }
125
126 /* but the page may have been read before the monitor was installed, so
127 * the monitor may miss the event - so we have to ensure that we do get
128 * one in such a case */
129 if (trylock_page(backpage)) {
130 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
131 unlock_page(backpage);
132 }
133
134 /* it'll reappear on the todo list */
135 _leave(" = -EINPROGRESS");
136 return -EINPROGRESS;
137
138 unlock_discard:
139 unlock_page(backpage);
140 discard:
141 spin_lock_irq(&object->work_lock);
142 list_del(&monitor->op_link);
143 spin_unlock_irq(&object->work_lock);
144 _leave(" = %d", ret);
145 return ret;
146 }
147
148 /*
149 * copy data from backing pages to netfs pages to complete a read operation
150 * - driven by FS-Cache's thread pool
151 */
cachefiles_read_copier(struct fscache_operation * _op)152 static void cachefiles_read_copier(struct fscache_operation *_op)
153 {
154 struct cachefiles_one_read *monitor;
155 struct cachefiles_object *object;
156 struct fscache_retrieval *op;
157 int error, max;
158
159 op = container_of(_op, struct fscache_retrieval, op);
160 object = container_of(op->op.object,
161 struct cachefiles_object, fscache);
162
163 _enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
164
165 max = 8;
166 spin_lock_irq(&object->work_lock);
167
168 while (!list_empty(&op->to_do)) {
169 monitor = list_entry(op->to_do.next,
170 struct cachefiles_one_read, op_link);
171 list_del(&monitor->op_link);
172
173 spin_unlock_irq(&object->work_lock);
174
175 _debug("- copy {%lu}", monitor->back_page->index);
176
177 recheck:
178 if (test_bit(FSCACHE_COOKIE_INVALIDATING,
179 &object->fscache.cookie->flags)) {
180 error = -ESTALE;
181 } else if (PageUptodate(monitor->back_page)) {
182 copy_highpage(monitor->netfs_page, monitor->back_page);
183 fscache_mark_page_cached(monitor->op,
184 monitor->netfs_page);
185 error = 0;
186 } else if (!PageError(monitor->back_page)) {
187 /* the page has probably been truncated */
188 error = cachefiles_read_reissue(object, monitor);
189 if (error == -EINPROGRESS)
190 goto next;
191 goto recheck;
192 } else {
193 cachefiles_io_error_obj(
194 object,
195 "Readpage failed on backing file %lx",
196 (unsigned long) monitor->back_page->flags);
197 error = -EIO;
198 }
199
200 put_page(monitor->back_page);
201
202 fscache_end_io(op, monitor->netfs_page, error);
203 put_page(monitor->netfs_page);
204 fscache_retrieval_complete(op, 1);
205 fscache_put_retrieval(op);
206 kfree(monitor);
207
208 next:
209 /* let the thread pool have some air occasionally */
210 max--;
211 if (max < 0 || need_resched()) {
212 if (!list_empty(&op->to_do))
213 fscache_enqueue_retrieval(op);
214 _leave(" [maxed out]");
215 return;
216 }
217
218 spin_lock_irq(&object->work_lock);
219 }
220
221 spin_unlock_irq(&object->work_lock);
222 _leave("");
223 }
224
225 /*
226 * read the corresponding page to the given set from the backing file
227 * - an uncertain page is simply discarded, to be tried again another time
228 */
cachefiles_read_backing_file_one(struct cachefiles_object * object,struct fscache_retrieval * op,struct page * netpage)229 static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
230 struct fscache_retrieval *op,
231 struct page *netpage)
232 {
233 struct cachefiles_one_read *monitor;
234 struct address_space *bmapping;
235 struct page *newpage, *backpage;
236 int ret;
237
238 _enter("");
239
240 _debug("read back %p{%lu,%d}",
241 netpage, netpage->index, page_count(netpage));
242
243 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
244 if (!monitor)
245 goto nomem;
246
247 monitor->netfs_page = netpage;
248 monitor->op = fscache_get_retrieval(op);
249
250 init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
251
252 /* attempt to get hold of the backing page */
253 bmapping = d_backing_inode(object->backer)->i_mapping;
254 newpage = NULL;
255
256 for (;;) {
257 backpage = find_get_page(bmapping, netpage->index);
258 if (backpage)
259 goto backing_page_already_present;
260
261 if (!newpage) {
262 newpage = __page_cache_alloc(cachefiles_gfp);
263 if (!newpage)
264 goto nomem_monitor;
265 }
266
267 ret = add_to_page_cache_lru(newpage, bmapping,
268 netpage->index, cachefiles_gfp);
269 if (ret == 0)
270 goto installed_new_backing_page;
271 if (ret != -EEXIST)
272 goto nomem_page;
273 }
274
275 /* we've installed a new backing page, so now we need to start
276 * it reading */
277 installed_new_backing_page:
278 _debug("- new %p", newpage);
279
280 backpage = newpage;
281 newpage = NULL;
282
283 read_backing_page:
284 ret = bmapping->a_ops->readpage(NULL, backpage);
285 if (ret < 0)
286 goto read_error;
287
288 /* set the monitor to transfer the data across */
289 monitor_backing_page:
290 _debug("- monitor add");
291
292 /* install the monitor */
293 get_page(monitor->netfs_page);
294 get_page(backpage);
295 monitor->back_page = backpage;
296 monitor->monitor.private = backpage;
297 add_page_wait_queue(backpage, &monitor->monitor);
298 monitor = NULL;
299
300 /* but the page may have been read before the monitor was installed, so
301 * the monitor may miss the event - so we have to ensure that we do get
302 * one in such a case */
303 if (trylock_page(backpage)) {
304 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
305 unlock_page(backpage);
306 }
307 goto success;
308
309 /* if the backing page is already present, it can be in one of
310 * three states: read in progress, read failed or read okay */
311 backing_page_already_present:
312 _debug("- present");
313
314 if (newpage) {
315 put_page(newpage);
316 newpage = NULL;
317 }
318
319 if (PageError(backpage))
320 goto io_error;
321
322 if (PageUptodate(backpage))
323 goto backing_page_already_uptodate;
324
325 if (!trylock_page(backpage))
326 goto monitor_backing_page;
327 _debug("read %p {%lx}", backpage, backpage->flags);
328 goto read_backing_page;
329
330 /* the backing page is already up to date, attach the netfs
331 * page to the pagecache and LRU and copy the data across */
332 backing_page_already_uptodate:
333 _debug("- uptodate");
334
335 fscache_mark_page_cached(op, netpage);
336
337 copy_highpage(netpage, backpage);
338 fscache_end_io(op, netpage, 0);
339 fscache_retrieval_complete(op, 1);
340
341 success:
342 _debug("success");
343 ret = 0;
344
345 out:
346 if (backpage)
347 put_page(backpage);
348 if (monitor) {
349 fscache_put_retrieval(monitor->op);
350 kfree(monitor);
351 }
352 _leave(" = %d", ret);
353 return ret;
354
355 read_error:
356 _debug("read error %d", ret);
357 if (ret == -ENOMEM) {
358 fscache_retrieval_complete(op, 1);
359 goto out;
360 }
361 io_error:
362 cachefiles_io_error_obj(object, "Page read error on backing file");
363 fscache_retrieval_complete(op, 1);
364 ret = -ENOBUFS;
365 goto out;
366
367 nomem_page:
368 put_page(newpage);
369 nomem_monitor:
370 fscache_put_retrieval(monitor->op);
371 kfree(monitor);
372 nomem:
373 fscache_retrieval_complete(op, 1);
374 _leave(" = -ENOMEM");
375 return -ENOMEM;
376 }
377
378 /*
379 * read a page from the cache or allocate a block in which to store it
380 * - cache withdrawal is prevented by the caller
381 * - returns -EINTR if interrupted
382 * - returns -ENOMEM if ran out of memory
383 * - returns -ENOBUFS if no buffers can be made available
384 * - returns -ENOBUFS if page is beyond EOF
385 * - if the page is backed by a block in the cache:
386 * - a read will be started which will call the callback on completion
387 * - 0 will be returned
388 * - else if the page is unbacked:
389 * - the metadata will be retained
390 * - -ENODATA will be returned
391 */
cachefiles_read_or_alloc_page(struct fscache_retrieval * op,struct page * page,gfp_t gfp)392 int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
393 struct page *page,
394 gfp_t gfp)
395 {
396 struct cachefiles_object *object;
397 struct cachefiles_cache *cache;
398 struct inode *inode;
399 sector_t block;
400 unsigned shift;
401 int ret, ret2;
402
403 object = container_of(op->op.object,
404 struct cachefiles_object, fscache);
405 cache = container_of(object->fscache.cache,
406 struct cachefiles_cache, cache);
407
408 _enter("{%p},{%lx},,,", object, page->index);
409
410 if (!object->backer)
411 goto enobufs;
412
413 inode = d_backing_inode(object->backer);
414 ASSERT(S_ISREG(inode->i_mode));
415
416 /* calculate the shift required to use bmap */
417 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
418
419 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
420 op->op.flags |= FSCACHE_OP_ASYNC;
421 op->op.processor = cachefiles_read_copier;
422
423 /* we assume the absence or presence of the first block is a good
424 * enough indication for the page as a whole
425 * - TODO: don't use bmap() for this as it is _not_ actually good
426 * enough for this as it doesn't indicate errors, but it's all we've
427 * got for the moment
428 */
429 block = page->index;
430 block <<= shift;
431
432 ret2 = bmap(inode, &block);
433 ASSERT(ret2 == 0);
434
435 _debug("%llx -> %llx",
436 (unsigned long long) (page->index << shift),
437 (unsigned long long) block);
438
439 if (block) {
440 /* submit the apparently valid page to the backing fs to be
441 * read from disk */
442 ret = cachefiles_read_backing_file_one(object, op, page);
443 } else if (cachefiles_has_space(cache, 0, 1) == 0) {
444 /* there's space in the cache we can use */
445 fscache_mark_page_cached(op, page);
446 fscache_retrieval_complete(op, 1);
447 ret = -ENODATA;
448 } else {
449 goto enobufs;
450 }
451
452 _leave(" = %d", ret);
453 return ret;
454
455 enobufs:
456 fscache_retrieval_complete(op, 1);
457 _leave(" = -ENOBUFS");
458 return -ENOBUFS;
459 }
460
461 /*
462 * read the corresponding pages to the given set from the backing file
463 * - any uncertain pages are simply discarded, to be tried again another time
464 */
cachefiles_read_backing_file(struct cachefiles_object * object,struct fscache_retrieval * op,struct list_head * list)465 static int cachefiles_read_backing_file(struct cachefiles_object *object,
466 struct fscache_retrieval *op,
467 struct list_head *list)
468 {
469 struct cachefiles_one_read *monitor = NULL;
470 struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
471 struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
472 int ret = 0;
473
474 _enter("");
475
476 list_for_each_entry_safe(netpage, _n, list, lru) {
477 list_del(&netpage->lru);
478
479 _debug("read back %p{%lu,%d}",
480 netpage, netpage->index, page_count(netpage));
481
482 if (!monitor) {
483 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
484 if (!monitor)
485 goto nomem;
486
487 monitor->op = fscache_get_retrieval(op);
488 init_waitqueue_func_entry(&monitor->monitor,
489 cachefiles_read_waiter);
490 }
491
492 for (;;) {
493 backpage = find_get_page(bmapping, netpage->index);
494 if (backpage)
495 goto backing_page_already_present;
496
497 if (!newpage) {
498 newpage = __page_cache_alloc(cachefiles_gfp);
499 if (!newpage)
500 goto nomem;
501 }
502
503 ret = add_to_page_cache_lru(newpage, bmapping,
504 netpage->index,
505 cachefiles_gfp);
506 if (ret == 0)
507 goto installed_new_backing_page;
508 if (ret != -EEXIST)
509 goto nomem;
510 }
511
512 /* we've installed a new backing page, so now we need
513 * to start it reading */
514 installed_new_backing_page:
515 _debug("- new %p", newpage);
516
517 backpage = newpage;
518 newpage = NULL;
519
520 reread_backing_page:
521 ret = bmapping->a_ops->readpage(NULL, backpage);
522 if (ret < 0)
523 goto read_error;
524
525 /* add the netfs page to the pagecache and LRU, and set the
526 * monitor to transfer the data across */
527 monitor_backing_page:
528 _debug("- monitor add");
529
530 ret = add_to_page_cache_lru(netpage, op->mapping,
531 netpage->index, cachefiles_gfp);
532 if (ret < 0) {
533 if (ret == -EEXIST) {
534 put_page(backpage);
535 backpage = NULL;
536 put_page(netpage);
537 netpage = NULL;
538 fscache_retrieval_complete(op, 1);
539 continue;
540 }
541 goto nomem;
542 }
543
544 /* install a monitor */
545 get_page(netpage);
546 monitor->netfs_page = netpage;
547
548 get_page(backpage);
549 monitor->back_page = backpage;
550 monitor->monitor.private = backpage;
551 add_page_wait_queue(backpage, &monitor->monitor);
552 monitor = NULL;
553
554 /* but the page may have been read before the monitor was
555 * installed, so the monitor may miss the event - so we have to
556 * ensure that we do get one in such a case */
557 if (trylock_page(backpage)) {
558 _debug("2unlock %p {%lx}", backpage, backpage->flags);
559 unlock_page(backpage);
560 }
561
562 put_page(backpage);
563 backpage = NULL;
564
565 put_page(netpage);
566 netpage = NULL;
567 continue;
568
569 /* if the backing page is already present, it can be in one of
570 * three states: read in progress, read failed or read okay */
571 backing_page_already_present:
572 _debug("- present %p", backpage);
573
574 if (PageError(backpage))
575 goto io_error;
576
577 if (PageUptodate(backpage))
578 goto backing_page_already_uptodate;
579
580 _debug("- not ready %p{%lx}", backpage, backpage->flags);
581
582 if (!trylock_page(backpage))
583 goto monitor_backing_page;
584
585 if (PageError(backpage)) {
586 _debug("error %lx", backpage->flags);
587 unlock_page(backpage);
588 goto io_error;
589 }
590
591 if (PageUptodate(backpage))
592 goto backing_page_already_uptodate_unlock;
593
594 /* we've locked a page that's neither up to date nor erroneous,
595 * so we need to attempt to read it again */
596 goto reread_backing_page;
597
598 /* the backing page is already up to date, attach the netfs
599 * page to the pagecache and LRU and copy the data across */
600 backing_page_already_uptodate_unlock:
601 _debug("uptodate %lx", backpage->flags);
602 unlock_page(backpage);
603 backing_page_already_uptodate:
604 _debug("- uptodate");
605
606 ret = add_to_page_cache_lru(netpage, op->mapping,
607 netpage->index, cachefiles_gfp);
608 if (ret < 0) {
609 if (ret == -EEXIST) {
610 put_page(backpage);
611 backpage = NULL;
612 put_page(netpage);
613 netpage = NULL;
614 fscache_retrieval_complete(op, 1);
615 continue;
616 }
617 goto nomem;
618 }
619
620 copy_highpage(netpage, backpage);
621
622 put_page(backpage);
623 backpage = NULL;
624
625 fscache_mark_page_cached(op, netpage);
626
627 /* the netpage is unlocked and marked up to date here */
628 fscache_end_io(op, netpage, 0);
629 put_page(netpage);
630 netpage = NULL;
631 fscache_retrieval_complete(op, 1);
632 continue;
633 }
634
635 netpage = NULL;
636
637 _debug("out");
638
639 out:
640 /* tidy up */
641 if (newpage)
642 put_page(newpage);
643 if (netpage)
644 put_page(netpage);
645 if (backpage)
646 put_page(backpage);
647 if (monitor) {
648 fscache_put_retrieval(op);
649 kfree(monitor);
650 }
651
652 list_for_each_entry_safe(netpage, _n, list, lru) {
653 list_del(&netpage->lru);
654 put_page(netpage);
655 fscache_retrieval_complete(op, 1);
656 }
657
658 _leave(" = %d", ret);
659 return ret;
660
661 nomem:
662 _debug("nomem");
663 ret = -ENOMEM;
664 goto record_page_complete;
665
666 read_error:
667 _debug("read error %d", ret);
668 if (ret == -ENOMEM)
669 goto record_page_complete;
670 io_error:
671 cachefiles_io_error_obj(object, "Page read error on backing file");
672 ret = -ENOBUFS;
673 record_page_complete:
674 fscache_retrieval_complete(op, 1);
675 goto out;
676 }
677
678 /*
679 * read a list of pages from the cache or allocate blocks in which to store
680 * them
681 */
cachefiles_read_or_alloc_pages(struct fscache_retrieval * op,struct list_head * pages,unsigned * nr_pages,gfp_t gfp)682 int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
683 struct list_head *pages,
684 unsigned *nr_pages,
685 gfp_t gfp)
686 {
687 struct cachefiles_object *object;
688 struct cachefiles_cache *cache;
689 struct list_head backpages;
690 struct pagevec pagevec;
691 struct inode *inode;
692 struct page *page, *_n;
693 unsigned shift, nrbackpages;
694 int ret, ret2, space;
695
696 object = container_of(op->op.object,
697 struct cachefiles_object, fscache);
698 cache = container_of(object->fscache.cache,
699 struct cachefiles_cache, cache);
700
701 _enter("{OBJ%x,%d},,%d,,",
702 object->fscache.debug_id, atomic_read(&op->op.usage),
703 *nr_pages);
704
705 if (!object->backer)
706 goto all_enobufs;
707
708 space = 1;
709 if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
710 space = 0;
711
712 inode = d_backing_inode(object->backer);
713 ASSERT(S_ISREG(inode->i_mode));
714
715 /* calculate the shift required to use bmap */
716 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
717
718 pagevec_init(&pagevec);
719
720 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
721 op->op.flags |= FSCACHE_OP_ASYNC;
722 op->op.processor = cachefiles_read_copier;
723
724 INIT_LIST_HEAD(&backpages);
725 nrbackpages = 0;
726
727 ret = space ? -ENODATA : -ENOBUFS;
728 list_for_each_entry_safe(page, _n, pages, lru) {
729 sector_t block;
730
731 /* we assume the absence or presence of the first block is a
732 * good enough indication for the page as a whole
733 * - TODO: don't use bmap() for this as it is _not_ actually
734 * good enough for this as it doesn't indicate errors, but
735 * it's all we've got for the moment
736 */
737 block = page->index;
738 block <<= shift;
739
740 ret2 = bmap(inode, &block);
741 ASSERT(ret2 == 0);
742
743 _debug("%llx -> %llx",
744 (unsigned long long) (page->index << shift),
745 (unsigned long long) block);
746
747 if (block) {
748 /* we have data - add it to the list to give to the
749 * backing fs */
750 list_move(&page->lru, &backpages);
751 (*nr_pages)--;
752 nrbackpages++;
753 } else if (space && pagevec_add(&pagevec, page) == 0) {
754 fscache_mark_pages_cached(op, &pagevec);
755 fscache_retrieval_complete(op, 1);
756 ret = -ENODATA;
757 } else {
758 fscache_retrieval_complete(op, 1);
759 }
760 }
761
762 if (pagevec_count(&pagevec) > 0)
763 fscache_mark_pages_cached(op, &pagevec);
764
765 if (list_empty(pages))
766 ret = 0;
767
768 /* submit the apparently valid pages to the backing fs to be read from
769 * disk */
770 if (nrbackpages > 0) {
771 ret2 = cachefiles_read_backing_file(object, op, &backpages);
772 if (ret2 == -ENOMEM || ret2 == -EINTR)
773 ret = ret2;
774 }
775
776 _leave(" = %d [nr=%u%s]",
777 ret, *nr_pages, list_empty(pages) ? " empty" : "");
778 return ret;
779
780 all_enobufs:
781 fscache_retrieval_complete(op, *nr_pages);
782 return -ENOBUFS;
783 }
784
785 /*
786 * allocate a block in the cache in which to store a page
787 * - cache withdrawal is prevented by the caller
788 * - returns -EINTR if interrupted
789 * - returns -ENOMEM if ran out of memory
790 * - returns -ENOBUFS if no buffers can be made available
791 * - returns -ENOBUFS if page is beyond EOF
792 * - otherwise:
793 * - the metadata will be retained
794 * - 0 will be returned
795 */
cachefiles_allocate_page(struct fscache_retrieval * op,struct page * page,gfp_t gfp)796 int cachefiles_allocate_page(struct fscache_retrieval *op,
797 struct page *page,
798 gfp_t gfp)
799 {
800 struct cachefiles_object *object;
801 struct cachefiles_cache *cache;
802 int ret;
803
804 object = container_of(op->op.object,
805 struct cachefiles_object, fscache);
806 cache = container_of(object->fscache.cache,
807 struct cachefiles_cache, cache);
808
809 _enter("%p,{%lx},", object, page->index);
810
811 ret = cachefiles_has_space(cache, 0, 1);
812 if (ret == 0)
813 fscache_mark_page_cached(op, page);
814 else
815 ret = -ENOBUFS;
816
817 fscache_retrieval_complete(op, 1);
818 _leave(" = %d", ret);
819 return ret;
820 }
821
822 /*
823 * allocate blocks in the cache in which to store a set of pages
824 * - cache withdrawal is prevented by the caller
825 * - returns -EINTR if interrupted
826 * - returns -ENOMEM if ran out of memory
827 * - returns -ENOBUFS if some buffers couldn't be made available
828 * - returns -ENOBUFS if some pages are beyond EOF
829 * - otherwise:
830 * - -ENODATA will be returned
831 * - metadata will be retained for any page marked
832 */
cachefiles_allocate_pages(struct fscache_retrieval * op,struct list_head * pages,unsigned * nr_pages,gfp_t gfp)833 int cachefiles_allocate_pages(struct fscache_retrieval *op,
834 struct list_head *pages,
835 unsigned *nr_pages,
836 gfp_t gfp)
837 {
838 struct cachefiles_object *object;
839 struct cachefiles_cache *cache;
840 struct pagevec pagevec;
841 struct page *page;
842 int ret;
843
844 object = container_of(op->op.object,
845 struct cachefiles_object, fscache);
846 cache = container_of(object->fscache.cache,
847 struct cachefiles_cache, cache);
848
849 _enter("%p,,,%d,", object, *nr_pages);
850
851 ret = cachefiles_has_space(cache, 0, *nr_pages);
852 if (ret == 0) {
853 pagevec_init(&pagevec);
854
855 list_for_each_entry(page, pages, lru) {
856 if (pagevec_add(&pagevec, page) == 0)
857 fscache_mark_pages_cached(op, &pagevec);
858 }
859
860 if (pagevec_count(&pagevec) > 0)
861 fscache_mark_pages_cached(op, &pagevec);
862 ret = -ENODATA;
863 } else {
864 ret = -ENOBUFS;
865 }
866
867 fscache_retrieval_complete(op, *nr_pages);
868 _leave(" = %d", ret);
869 return ret;
870 }
871
872 /*
873 * request a page be stored in the cache
874 * - cache withdrawal is prevented by the caller
875 * - this request may be ignored if there's no cache block available, in which
876 * case -ENOBUFS will be returned
877 * - if the op is in progress, 0 will be returned
878 */
cachefiles_write_page(struct fscache_storage * op,struct page * page)879 int cachefiles_write_page(struct fscache_storage *op, struct page *page)
880 {
881 struct cachefiles_object *object;
882 struct cachefiles_cache *cache;
883 struct file *file;
884 struct path path;
885 loff_t pos, eof;
886 size_t len;
887 void *data;
888 int ret = -ENOBUFS;
889
890 ASSERT(op != NULL);
891 ASSERT(page != NULL);
892
893 object = container_of(op->op.object,
894 struct cachefiles_object, fscache);
895
896 _enter("%p,%p{%lx},,,", object, page, page->index);
897
898 if (!object->backer) {
899 _leave(" = -ENOBUFS");
900 return -ENOBUFS;
901 }
902
903 ASSERT(d_is_reg(object->backer));
904
905 cache = container_of(object->fscache.cache,
906 struct cachefiles_cache, cache);
907
908 pos = (loff_t)page->index << PAGE_SHIFT;
909
910 /* We mustn't write more data than we have, so we have to beware of a
911 * partial page at EOF.
912 */
913 eof = object->fscache.store_limit_l;
914 if (pos >= eof)
915 goto error;
916
917 /* write the page to the backing filesystem and let it store it in its
918 * own time */
919 path.mnt = cache->mnt;
920 path.dentry = object->backer;
921 file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
922 if (IS_ERR(file)) {
923 ret = PTR_ERR(file);
924 goto error_2;
925 }
926
927 len = PAGE_SIZE;
928 if (eof & ~PAGE_MASK) {
929 if (eof - pos < PAGE_SIZE) {
930 _debug("cut short %llx to %llx",
931 pos, eof);
932 len = eof - pos;
933 ASSERTCMP(pos + len, ==, eof);
934 }
935 }
936
937 data = kmap(page);
938 ret = kernel_write(file, data, len, &pos);
939 kunmap(page);
940 fput(file);
941 if (ret != len)
942 goto error_eio;
943
944 _leave(" = 0");
945 return 0;
946
947 error_eio:
948 ret = -EIO;
949 error_2:
950 if (ret == -EIO)
951 cachefiles_io_error_obj(object,
952 "Write page to backing file failed");
953 error:
954 _leave(" = -ENOBUFS [%d]", ret);
955 return -ENOBUFS;
956 }
957
958 /*
959 * detach a backing block from a page
960 * - cache withdrawal is prevented by the caller
961 */
cachefiles_uncache_page(struct fscache_object * _object,struct page * page)962 void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
963 __releases(&object->fscache.cookie->lock)
964 {
965 struct cachefiles_object *object;
966
967 object = container_of(_object, struct cachefiles_object, fscache);
968
969 _enter("%p,{%lu}", object, page->index);
970
971 spin_unlock(&object->fscache.cookie->lock);
972 }
973