• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Storage object read/write
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 
12 #include <linux/mount.h>
13 #include <linux/slab.h>
14 #include <linux/file.h>
15 #include <linux/swap.h>
16 #include "internal.h"
17 
18 /*
19  * detect wake up events generated by the unlocking of pages in which we're
20  * interested
21  * - we use this to detect read completion of backing pages
22  * - the caller holds the waitqueue lock
23  */
cachefiles_read_waiter(wait_queue_t * wait,unsigned mode,int sync,void * _key)24 static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
25 				  int sync, void *_key)
26 {
27 	struct cachefiles_one_read *monitor =
28 		container_of(wait, struct cachefiles_one_read, monitor);
29 	struct cachefiles_object *object;
30 	struct wait_bit_key *key = _key;
31 	struct page *page = wait->private;
32 
33 	ASSERT(key);
34 
35 	_enter("{%lu},%u,%d,{%p,%u}",
36 	       monitor->netfs_page->index, mode, sync,
37 	       key->flags, key->bit_nr);
38 
39 	if (key->flags != &page->flags ||
40 	    key->bit_nr != PG_locked)
41 		return 0;
42 
43 	_debug("--- monitor %p %lx ---", page, page->flags);
44 
45 	if (!PageUptodate(page) && !PageError(page)) {
46 		/* unlocked, not uptodate and not erronous? */
47 		_debug("page probably truncated");
48 	}
49 
50 	/* remove from the waitqueue */
51 	list_del(&wait->task_list);
52 
53 	/* move onto the action list and queue for FS-Cache thread pool */
54 	ASSERT(monitor->op);
55 
56 	object = container_of(monitor->op->op.object,
57 			      struct cachefiles_object, fscache);
58 
59 	spin_lock(&object->work_lock);
60 	list_add_tail(&monitor->op_link, &monitor->op->to_do);
61 	spin_unlock(&object->work_lock);
62 
63 	fscache_enqueue_retrieval(monitor->op);
64 	return 0;
65 }
66 
67 /*
68  * handle a probably truncated page
69  * - check to see if the page is still relevant and reissue the read if
70  *   possible
71  * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
72  *   must wait again and 0 if successful
73  */
cachefiles_read_reissue(struct cachefiles_object * object,struct cachefiles_one_read * monitor)74 static int cachefiles_read_reissue(struct cachefiles_object *object,
75 				   struct cachefiles_one_read *monitor)
76 {
77 	struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
78 	struct page *backpage = monitor->back_page, *backpage2;
79 	int ret;
80 
81 	_enter("{ino=%lx},{%lx,%lx}",
82 	       d_backing_inode(object->backer)->i_ino,
83 	       backpage->index, backpage->flags);
84 
85 	/* skip if the page was truncated away completely */
86 	if (backpage->mapping != bmapping) {
87 		_leave(" = -ENODATA [mapping]");
88 		return -ENODATA;
89 	}
90 
91 	backpage2 = find_get_page(bmapping, backpage->index);
92 	if (!backpage2) {
93 		_leave(" = -ENODATA [gone]");
94 		return -ENODATA;
95 	}
96 
97 	if (backpage != backpage2) {
98 		put_page(backpage2);
99 		_leave(" = -ENODATA [different]");
100 		return -ENODATA;
101 	}
102 
103 	/* the page is still there and we already have a ref on it, so we don't
104 	 * need a second */
105 	put_page(backpage2);
106 
107 	INIT_LIST_HEAD(&monitor->op_link);
108 	add_page_wait_queue(backpage, &monitor->monitor);
109 
110 	if (trylock_page(backpage)) {
111 		ret = -EIO;
112 		if (PageError(backpage))
113 			goto unlock_discard;
114 		ret = 0;
115 		if (PageUptodate(backpage))
116 			goto unlock_discard;
117 
118 		_debug("reissue read");
119 		ret = bmapping->a_ops->readpage(NULL, backpage);
120 		if (ret < 0)
121 			goto unlock_discard;
122 	}
123 
124 	/* but the page may have been read before the monitor was installed, so
125 	 * the monitor may miss the event - so we have to ensure that we do get
126 	 * one in such a case */
127 	if (trylock_page(backpage)) {
128 		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
129 		unlock_page(backpage);
130 	}
131 
132 	/* it'll reappear on the todo list */
133 	_leave(" = -EINPROGRESS");
134 	return -EINPROGRESS;
135 
136 unlock_discard:
137 	unlock_page(backpage);
138 	spin_lock_irq(&object->work_lock);
139 	list_del(&monitor->op_link);
140 	spin_unlock_irq(&object->work_lock);
141 	_leave(" = %d", ret);
142 	return ret;
143 }
144 
145 /*
146  * copy data from backing pages to netfs pages to complete a read operation
147  * - driven by FS-Cache's thread pool
148  */
cachefiles_read_copier(struct fscache_operation * _op)149 static void cachefiles_read_copier(struct fscache_operation *_op)
150 {
151 	struct cachefiles_one_read *monitor;
152 	struct cachefiles_object *object;
153 	struct fscache_retrieval *op;
154 	int error, max;
155 
156 	op = container_of(_op, struct fscache_retrieval, op);
157 	object = container_of(op->op.object,
158 			      struct cachefiles_object, fscache);
159 
160 	_enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
161 
162 	max = 8;
163 	spin_lock_irq(&object->work_lock);
164 
165 	while (!list_empty(&op->to_do)) {
166 		monitor = list_entry(op->to_do.next,
167 				     struct cachefiles_one_read, op_link);
168 		list_del(&monitor->op_link);
169 
170 		spin_unlock_irq(&object->work_lock);
171 
172 		_debug("- copy {%lu}", monitor->back_page->index);
173 
174 	recheck:
175 		if (test_bit(FSCACHE_COOKIE_INVALIDATING,
176 			     &object->fscache.cookie->flags)) {
177 			error = -ESTALE;
178 		} else if (PageUptodate(monitor->back_page)) {
179 			copy_highpage(monitor->netfs_page, monitor->back_page);
180 			fscache_mark_page_cached(monitor->op,
181 						 monitor->netfs_page);
182 			error = 0;
183 		} else if (!PageError(monitor->back_page)) {
184 			/* the page has probably been truncated */
185 			error = cachefiles_read_reissue(object, monitor);
186 			if (error == -EINPROGRESS)
187 				goto next;
188 			goto recheck;
189 		} else {
190 			cachefiles_io_error_obj(
191 				object,
192 				"Readpage failed on backing file %lx",
193 				(unsigned long) monitor->back_page->flags);
194 			error = -EIO;
195 		}
196 
197 		put_page(monitor->back_page);
198 
199 		fscache_end_io(op, monitor->netfs_page, error);
200 		put_page(monitor->netfs_page);
201 		fscache_retrieval_complete(op, 1);
202 		fscache_put_retrieval(op);
203 		kfree(monitor);
204 
205 	next:
206 		/* let the thread pool have some air occasionally */
207 		max--;
208 		if (max < 0 || need_resched()) {
209 			if (!list_empty(&op->to_do))
210 				fscache_enqueue_retrieval(op);
211 			_leave(" [maxed out]");
212 			return;
213 		}
214 
215 		spin_lock_irq(&object->work_lock);
216 	}
217 
218 	spin_unlock_irq(&object->work_lock);
219 	_leave("");
220 }
221 
222 /*
223  * read the corresponding page to the given set from the backing file
224  * - an uncertain page is simply discarded, to be tried again another time
225  */
cachefiles_read_backing_file_one(struct cachefiles_object * object,struct fscache_retrieval * op,struct page * netpage)226 static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
227 					    struct fscache_retrieval *op,
228 					    struct page *netpage)
229 {
230 	struct cachefiles_one_read *monitor;
231 	struct address_space *bmapping;
232 	struct page *newpage, *backpage;
233 	int ret;
234 
235 	_enter("");
236 
237 	_debug("read back %p{%lu,%d}",
238 	       netpage, netpage->index, page_count(netpage));
239 
240 	monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
241 	if (!monitor)
242 		goto nomem;
243 
244 	monitor->netfs_page = netpage;
245 	monitor->op = fscache_get_retrieval(op);
246 
247 	init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
248 
249 	/* attempt to get hold of the backing page */
250 	bmapping = d_backing_inode(object->backer)->i_mapping;
251 	newpage = NULL;
252 
253 	for (;;) {
254 		backpage = find_get_page(bmapping, netpage->index);
255 		if (backpage)
256 			goto backing_page_already_present;
257 
258 		if (!newpage) {
259 			newpage = __page_cache_alloc(cachefiles_gfp |
260 						     __GFP_COLD);
261 			if (!newpage)
262 				goto nomem_monitor;
263 		}
264 
265 		ret = add_to_page_cache_lru(newpage, bmapping,
266 					    netpage->index, cachefiles_gfp);
267 		if (ret == 0)
268 			goto installed_new_backing_page;
269 		if (ret != -EEXIST)
270 			goto nomem_page;
271 	}
272 
273 	/* we've installed a new backing page, so now we need to start
274 	 * it reading */
275 installed_new_backing_page:
276 	_debug("- new %p", newpage);
277 
278 	backpage = newpage;
279 	newpage = NULL;
280 
281 read_backing_page:
282 	ret = bmapping->a_ops->readpage(NULL, backpage);
283 	if (ret < 0)
284 		goto read_error;
285 
286 	/* set the monitor to transfer the data across */
287 monitor_backing_page:
288 	_debug("- monitor add");
289 
290 	/* install the monitor */
291 	get_page(monitor->netfs_page);
292 	get_page(backpage);
293 	monitor->back_page = backpage;
294 	monitor->monitor.private = backpage;
295 	add_page_wait_queue(backpage, &monitor->monitor);
296 	monitor = NULL;
297 
298 	/* but the page may have been read before the monitor was installed, so
299 	 * the monitor may miss the event - so we have to ensure that we do get
300 	 * one in such a case */
301 	if (trylock_page(backpage)) {
302 		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
303 		unlock_page(backpage);
304 	}
305 	goto success;
306 
307 	/* if the backing page is already present, it can be in one of
308 	 * three states: read in progress, read failed or read okay */
309 backing_page_already_present:
310 	_debug("- present");
311 
312 	if (newpage) {
313 		put_page(newpage);
314 		newpage = NULL;
315 	}
316 
317 	if (PageError(backpage))
318 		goto io_error;
319 
320 	if (PageUptodate(backpage))
321 		goto backing_page_already_uptodate;
322 
323 	if (!trylock_page(backpage))
324 		goto monitor_backing_page;
325 	_debug("read %p {%lx}", backpage, backpage->flags);
326 	goto read_backing_page;
327 
328 	/* the backing page is already up to date, attach the netfs
329 	 * page to the pagecache and LRU and copy the data across */
330 backing_page_already_uptodate:
331 	_debug("- uptodate");
332 
333 	fscache_mark_page_cached(op, netpage);
334 
335 	copy_highpage(netpage, backpage);
336 	fscache_end_io(op, netpage, 0);
337 	fscache_retrieval_complete(op, 1);
338 
339 success:
340 	_debug("success");
341 	ret = 0;
342 
343 out:
344 	if (backpage)
345 		put_page(backpage);
346 	if (monitor) {
347 		fscache_put_retrieval(monitor->op);
348 		kfree(monitor);
349 	}
350 	_leave(" = %d", ret);
351 	return ret;
352 
353 read_error:
354 	_debug("read error %d", ret);
355 	if (ret == -ENOMEM) {
356 		fscache_retrieval_complete(op, 1);
357 		goto out;
358 	}
359 io_error:
360 	cachefiles_io_error_obj(object, "Page read error on backing file");
361 	fscache_retrieval_complete(op, 1);
362 	ret = -ENOBUFS;
363 	goto out;
364 
365 nomem_page:
366 	put_page(newpage);
367 nomem_monitor:
368 	fscache_put_retrieval(monitor->op);
369 	kfree(monitor);
370 nomem:
371 	fscache_retrieval_complete(op, 1);
372 	_leave(" = -ENOMEM");
373 	return -ENOMEM;
374 }
375 
376 /*
377  * read a page from the cache or allocate a block in which to store it
378  * - cache withdrawal is prevented by the caller
379  * - returns -EINTR if interrupted
380  * - returns -ENOMEM if ran out of memory
381  * - returns -ENOBUFS if no buffers can be made available
382  * - returns -ENOBUFS if page is beyond EOF
383  * - if the page is backed by a block in the cache:
384  *   - a read will be started which will call the callback on completion
385  *   - 0 will be returned
386  * - else if the page is unbacked:
387  *   - the metadata will be retained
388  *   - -ENODATA will be returned
389  */
cachefiles_read_or_alloc_page(struct fscache_retrieval * op,struct page * page,gfp_t gfp)390 int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
391 				  struct page *page,
392 				  gfp_t gfp)
393 {
394 	struct cachefiles_object *object;
395 	struct cachefiles_cache *cache;
396 	struct inode *inode;
397 	sector_t block0, block;
398 	unsigned shift;
399 	int ret;
400 
401 	object = container_of(op->op.object,
402 			      struct cachefiles_object, fscache);
403 	cache = container_of(object->fscache.cache,
404 			     struct cachefiles_cache, cache);
405 
406 	_enter("{%p},{%lx},,,", object, page->index);
407 
408 	if (!object->backer)
409 		goto enobufs;
410 
411 	inode = d_backing_inode(object->backer);
412 	ASSERT(S_ISREG(inode->i_mode));
413 	ASSERT(inode->i_mapping->a_ops->bmap);
414 	ASSERT(inode->i_mapping->a_ops->readpages);
415 
416 	/* calculate the shift required to use bmap */
417 	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
418 
419 	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
420 	op->op.flags |= FSCACHE_OP_ASYNC;
421 	op->op.processor = cachefiles_read_copier;
422 
423 	/* we assume the absence or presence of the first block is a good
424 	 * enough indication for the page as a whole
425 	 * - TODO: don't use bmap() for this as it is _not_ actually good
426 	 *   enough for this as it doesn't indicate errors, but it's all we've
427 	 *   got for the moment
428 	 */
429 	block0 = page->index;
430 	block0 <<= shift;
431 
432 	block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
433 	_debug("%llx -> %llx",
434 	       (unsigned long long) block0,
435 	       (unsigned long long) block);
436 
437 	if (block) {
438 		/* submit the apparently valid page to the backing fs to be
439 		 * read from disk */
440 		ret = cachefiles_read_backing_file_one(object, op, page);
441 	} else if (cachefiles_has_space(cache, 0, 1) == 0) {
442 		/* there's space in the cache we can use */
443 		fscache_mark_page_cached(op, page);
444 		fscache_retrieval_complete(op, 1);
445 		ret = -ENODATA;
446 	} else {
447 		goto enobufs;
448 	}
449 
450 	_leave(" = %d", ret);
451 	return ret;
452 
453 enobufs:
454 	fscache_retrieval_complete(op, 1);
455 	_leave(" = -ENOBUFS");
456 	return -ENOBUFS;
457 }
458 
459 /*
460  * read the corresponding pages to the given set from the backing file
461  * - any uncertain pages are simply discarded, to be tried again another time
462  */
cachefiles_read_backing_file(struct cachefiles_object * object,struct fscache_retrieval * op,struct list_head * list)463 static int cachefiles_read_backing_file(struct cachefiles_object *object,
464 					struct fscache_retrieval *op,
465 					struct list_head *list)
466 {
467 	struct cachefiles_one_read *monitor = NULL;
468 	struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
469 	struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
470 	int ret = 0;
471 
472 	_enter("");
473 
474 	list_for_each_entry_safe(netpage, _n, list, lru) {
475 		list_del(&netpage->lru);
476 
477 		_debug("read back %p{%lu,%d}",
478 		       netpage, netpage->index, page_count(netpage));
479 
480 		if (!monitor) {
481 			monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
482 			if (!monitor)
483 				goto nomem;
484 
485 			monitor->op = fscache_get_retrieval(op);
486 			init_waitqueue_func_entry(&monitor->monitor,
487 						  cachefiles_read_waiter);
488 		}
489 
490 		for (;;) {
491 			backpage = find_get_page(bmapping, netpage->index);
492 			if (backpage)
493 				goto backing_page_already_present;
494 
495 			if (!newpage) {
496 				newpage = __page_cache_alloc(cachefiles_gfp |
497 							     __GFP_COLD);
498 				if (!newpage)
499 					goto nomem;
500 			}
501 
502 			ret = add_to_page_cache_lru(newpage, bmapping,
503 						    netpage->index,
504 						    cachefiles_gfp);
505 			if (ret == 0)
506 				goto installed_new_backing_page;
507 			if (ret != -EEXIST)
508 				goto nomem;
509 		}
510 
511 		/* we've installed a new backing page, so now we need
512 		 * to start it reading */
513 	installed_new_backing_page:
514 		_debug("- new %p", newpage);
515 
516 		backpage = newpage;
517 		newpage = NULL;
518 
519 	reread_backing_page:
520 		ret = bmapping->a_ops->readpage(NULL, backpage);
521 		if (ret < 0)
522 			goto read_error;
523 
524 		/* add the netfs page to the pagecache and LRU, and set the
525 		 * monitor to transfer the data across */
526 	monitor_backing_page:
527 		_debug("- monitor add");
528 
529 		ret = add_to_page_cache_lru(netpage, op->mapping,
530 					    netpage->index, cachefiles_gfp);
531 		if (ret < 0) {
532 			if (ret == -EEXIST) {
533 				put_page(netpage);
534 				fscache_retrieval_complete(op, 1);
535 				continue;
536 			}
537 			goto nomem;
538 		}
539 
540 		/* install a monitor */
541 		get_page(netpage);
542 		monitor->netfs_page = netpage;
543 
544 		get_page(backpage);
545 		monitor->back_page = backpage;
546 		monitor->monitor.private = backpage;
547 		add_page_wait_queue(backpage, &monitor->monitor);
548 		monitor = NULL;
549 
550 		/* but the page may have been read before the monitor was
551 		 * installed, so the monitor may miss the event - so we have to
552 		 * ensure that we do get one in such a case */
553 		if (trylock_page(backpage)) {
554 			_debug("2unlock %p {%lx}", backpage, backpage->flags);
555 			unlock_page(backpage);
556 		}
557 
558 		put_page(backpage);
559 		backpage = NULL;
560 
561 		put_page(netpage);
562 		netpage = NULL;
563 		continue;
564 
565 		/* if the backing page is already present, it can be in one of
566 		 * three states: read in progress, read failed or read okay */
567 	backing_page_already_present:
568 		_debug("- present %p", backpage);
569 
570 		if (PageError(backpage))
571 			goto io_error;
572 
573 		if (PageUptodate(backpage))
574 			goto backing_page_already_uptodate;
575 
576 		_debug("- not ready %p{%lx}", backpage, backpage->flags);
577 
578 		if (!trylock_page(backpage))
579 			goto monitor_backing_page;
580 
581 		if (PageError(backpage)) {
582 			_debug("error %lx", backpage->flags);
583 			unlock_page(backpage);
584 			goto io_error;
585 		}
586 
587 		if (PageUptodate(backpage))
588 			goto backing_page_already_uptodate_unlock;
589 
590 		/* we've locked a page that's neither up to date nor erroneous,
591 		 * so we need to attempt to read it again */
592 		goto reread_backing_page;
593 
594 		/* the backing page is already up to date, attach the netfs
595 		 * page to the pagecache and LRU and copy the data across */
596 	backing_page_already_uptodate_unlock:
597 		_debug("uptodate %lx", backpage->flags);
598 		unlock_page(backpage);
599 	backing_page_already_uptodate:
600 		_debug("- uptodate");
601 
602 		ret = add_to_page_cache_lru(netpage, op->mapping,
603 					    netpage->index, cachefiles_gfp);
604 		if (ret < 0) {
605 			if (ret == -EEXIST) {
606 				put_page(netpage);
607 				fscache_retrieval_complete(op, 1);
608 				continue;
609 			}
610 			goto nomem;
611 		}
612 
613 		copy_highpage(netpage, backpage);
614 
615 		put_page(backpage);
616 		backpage = NULL;
617 
618 		fscache_mark_page_cached(op, netpage);
619 
620 		/* the netpage is unlocked and marked up to date here */
621 		fscache_end_io(op, netpage, 0);
622 		put_page(netpage);
623 		netpage = NULL;
624 		fscache_retrieval_complete(op, 1);
625 		continue;
626 	}
627 
628 	netpage = NULL;
629 
630 	_debug("out");
631 
632 out:
633 	/* tidy up */
634 	if (newpage)
635 		put_page(newpage);
636 	if (netpage)
637 		put_page(netpage);
638 	if (backpage)
639 		put_page(backpage);
640 	if (monitor) {
641 		fscache_put_retrieval(op);
642 		kfree(monitor);
643 	}
644 
645 	list_for_each_entry_safe(netpage, _n, list, lru) {
646 		list_del(&netpage->lru);
647 		put_page(netpage);
648 		fscache_retrieval_complete(op, 1);
649 	}
650 
651 	_leave(" = %d", ret);
652 	return ret;
653 
654 nomem:
655 	_debug("nomem");
656 	ret = -ENOMEM;
657 	goto record_page_complete;
658 
659 read_error:
660 	_debug("read error %d", ret);
661 	if (ret == -ENOMEM)
662 		goto record_page_complete;
663 io_error:
664 	cachefiles_io_error_obj(object, "Page read error on backing file");
665 	ret = -ENOBUFS;
666 record_page_complete:
667 	fscache_retrieval_complete(op, 1);
668 	goto out;
669 }
670 
671 /*
672  * read a list of pages from the cache or allocate blocks in which to store
673  * them
674  */
cachefiles_read_or_alloc_pages(struct fscache_retrieval * op,struct list_head * pages,unsigned * nr_pages,gfp_t gfp)675 int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
676 				   struct list_head *pages,
677 				   unsigned *nr_pages,
678 				   gfp_t gfp)
679 {
680 	struct cachefiles_object *object;
681 	struct cachefiles_cache *cache;
682 	struct list_head backpages;
683 	struct pagevec pagevec;
684 	struct inode *inode;
685 	struct page *page, *_n;
686 	unsigned shift, nrbackpages;
687 	int ret, ret2, space;
688 
689 	object = container_of(op->op.object,
690 			      struct cachefiles_object, fscache);
691 	cache = container_of(object->fscache.cache,
692 			     struct cachefiles_cache, cache);
693 
694 	_enter("{OBJ%x,%d},,%d,,",
695 	       object->fscache.debug_id, atomic_read(&op->op.usage),
696 	       *nr_pages);
697 
698 	if (!object->backer)
699 		goto all_enobufs;
700 
701 	space = 1;
702 	if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
703 		space = 0;
704 
705 	inode = d_backing_inode(object->backer);
706 	ASSERT(S_ISREG(inode->i_mode));
707 	ASSERT(inode->i_mapping->a_ops->bmap);
708 	ASSERT(inode->i_mapping->a_ops->readpages);
709 
710 	/* calculate the shift required to use bmap */
711 	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
712 
713 	pagevec_init(&pagevec, 0);
714 
715 	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
716 	op->op.flags |= FSCACHE_OP_ASYNC;
717 	op->op.processor = cachefiles_read_copier;
718 
719 	INIT_LIST_HEAD(&backpages);
720 	nrbackpages = 0;
721 
722 	ret = space ? -ENODATA : -ENOBUFS;
723 	list_for_each_entry_safe(page, _n, pages, lru) {
724 		sector_t block0, block;
725 
726 		/* we assume the absence or presence of the first block is a
727 		 * good enough indication for the page as a whole
728 		 * - TODO: don't use bmap() for this as it is _not_ actually
729 		 *   good enough for this as it doesn't indicate errors, but
730 		 *   it's all we've got for the moment
731 		 */
732 		block0 = page->index;
733 		block0 <<= shift;
734 
735 		block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
736 						      block0);
737 		_debug("%llx -> %llx",
738 		       (unsigned long long) block0,
739 		       (unsigned long long) block);
740 
741 		if (block) {
742 			/* we have data - add it to the list to give to the
743 			 * backing fs */
744 			list_move(&page->lru, &backpages);
745 			(*nr_pages)--;
746 			nrbackpages++;
747 		} else if (space && pagevec_add(&pagevec, page) == 0) {
748 			fscache_mark_pages_cached(op, &pagevec);
749 			fscache_retrieval_complete(op, 1);
750 			ret = -ENODATA;
751 		} else {
752 			fscache_retrieval_complete(op, 1);
753 		}
754 	}
755 
756 	if (pagevec_count(&pagevec) > 0)
757 		fscache_mark_pages_cached(op, &pagevec);
758 
759 	if (list_empty(pages))
760 		ret = 0;
761 
762 	/* submit the apparently valid pages to the backing fs to be read from
763 	 * disk */
764 	if (nrbackpages > 0) {
765 		ret2 = cachefiles_read_backing_file(object, op, &backpages);
766 		if (ret2 == -ENOMEM || ret2 == -EINTR)
767 			ret = ret2;
768 	}
769 
770 	_leave(" = %d [nr=%u%s]",
771 	       ret, *nr_pages, list_empty(pages) ? " empty" : "");
772 	return ret;
773 
774 all_enobufs:
775 	fscache_retrieval_complete(op, *nr_pages);
776 	return -ENOBUFS;
777 }
778 
779 /*
780  * allocate a block in the cache in which to store a page
781  * - cache withdrawal is prevented by the caller
782  * - returns -EINTR if interrupted
783  * - returns -ENOMEM if ran out of memory
784  * - returns -ENOBUFS if no buffers can be made available
785  * - returns -ENOBUFS if page is beyond EOF
786  * - otherwise:
787  *   - the metadata will be retained
788  *   - 0 will be returned
789  */
cachefiles_allocate_page(struct fscache_retrieval * op,struct page * page,gfp_t gfp)790 int cachefiles_allocate_page(struct fscache_retrieval *op,
791 			     struct page *page,
792 			     gfp_t gfp)
793 {
794 	struct cachefiles_object *object;
795 	struct cachefiles_cache *cache;
796 	int ret;
797 
798 	object = container_of(op->op.object,
799 			      struct cachefiles_object, fscache);
800 	cache = container_of(object->fscache.cache,
801 			     struct cachefiles_cache, cache);
802 
803 	_enter("%p,{%lx},", object, page->index);
804 
805 	ret = cachefiles_has_space(cache, 0, 1);
806 	if (ret == 0)
807 		fscache_mark_page_cached(op, page);
808 	else
809 		ret = -ENOBUFS;
810 
811 	fscache_retrieval_complete(op, 1);
812 	_leave(" = %d", ret);
813 	return ret;
814 }
815 
816 /*
817  * allocate blocks in the cache in which to store a set of pages
818  * - cache withdrawal is prevented by the caller
819  * - returns -EINTR if interrupted
820  * - returns -ENOMEM if ran out of memory
821  * - returns -ENOBUFS if some buffers couldn't be made available
822  * - returns -ENOBUFS if some pages are beyond EOF
823  * - otherwise:
824  *   - -ENODATA will be returned
825  * - metadata will be retained for any page marked
826  */
cachefiles_allocate_pages(struct fscache_retrieval * op,struct list_head * pages,unsigned * nr_pages,gfp_t gfp)827 int cachefiles_allocate_pages(struct fscache_retrieval *op,
828 			      struct list_head *pages,
829 			      unsigned *nr_pages,
830 			      gfp_t gfp)
831 {
832 	struct cachefiles_object *object;
833 	struct cachefiles_cache *cache;
834 	struct pagevec pagevec;
835 	struct page *page;
836 	int ret;
837 
838 	object = container_of(op->op.object,
839 			      struct cachefiles_object, fscache);
840 	cache = container_of(object->fscache.cache,
841 			     struct cachefiles_cache, cache);
842 
843 	_enter("%p,,,%d,", object, *nr_pages);
844 
845 	ret = cachefiles_has_space(cache, 0, *nr_pages);
846 	if (ret == 0) {
847 		pagevec_init(&pagevec, 0);
848 
849 		list_for_each_entry(page, pages, lru) {
850 			if (pagevec_add(&pagevec, page) == 0)
851 				fscache_mark_pages_cached(op, &pagevec);
852 		}
853 
854 		if (pagevec_count(&pagevec) > 0)
855 			fscache_mark_pages_cached(op, &pagevec);
856 		ret = -ENODATA;
857 	} else {
858 		ret = -ENOBUFS;
859 	}
860 
861 	fscache_retrieval_complete(op, *nr_pages);
862 	_leave(" = %d", ret);
863 	return ret;
864 }
865 
866 /*
867  * request a page be stored in the cache
868  * - cache withdrawal is prevented by the caller
869  * - this request may be ignored if there's no cache block available, in which
870  *   case -ENOBUFS will be returned
871  * - if the op is in progress, 0 will be returned
872  */
cachefiles_write_page(struct fscache_storage * op,struct page * page)873 int cachefiles_write_page(struct fscache_storage *op, struct page *page)
874 {
875 	struct cachefiles_object *object;
876 	struct cachefiles_cache *cache;
877 	struct file *file;
878 	struct path path;
879 	loff_t pos, eof;
880 	size_t len;
881 	void *data;
882 	int ret = -ENOBUFS;
883 
884 	ASSERT(op != NULL);
885 	ASSERT(page != NULL);
886 
887 	object = container_of(op->op.object,
888 			      struct cachefiles_object, fscache);
889 
890 	_enter("%p,%p{%lx},,,", object, page, page->index);
891 
892 	if (!object->backer) {
893 		_leave(" = -ENOBUFS");
894 		return -ENOBUFS;
895 	}
896 
897 	ASSERT(d_is_reg(object->backer));
898 
899 	cache = container_of(object->fscache.cache,
900 			     struct cachefiles_cache, cache);
901 
902 	pos = (loff_t)page->index << PAGE_SHIFT;
903 
904 	/* We mustn't write more data than we have, so we have to beware of a
905 	 * partial page at EOF.
906 	 */
907 	eof = object->fscache.store_limit_l;
908 	if (pos >= eof)
909 		goto error;
910 
911 	/* write the page to the backing filesystem and let it store it in its
912 	 * own time */
913 	path.mnt = cache->mnt;
914 	path.dentry = object->backer;
915 	file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
916 	if (IS_ERR(file)) {
917 		ret = PTR_ERR(file);
918 		goto error_2;
919 	}
920 
921 	len = PAGE_SIZE;
922 	if (eof & ~PAGE_MASK) {
923 		if (eof - pos < PAGE_SIZE) {
924 			_debug("cut short %llx to %llx",
925 			       pos, eof);
926 			len = eof - pos;
927 			ASSERTCMP(pos + len, ==, eof);
928 		}
929 	}
930 
931 	data = kmap(page);
932 	ret = __kernel_write(file, data, len, &pos);
933 	kunmap(page);
934 	fput(file);
935 	if (ret != len)
936 		goto error_eio;
937 
938 	_leave(" = 0");
939 	return 0;
940 
941 error_eio:
942 	ret = -EIO;
943 error_2:
944 	if (ret == -EIO)
945 		cachefiles_io_error_obj(object,
946 					"Write page to backing file failed");
947 error:
948 	_leave(" = -ENOBUFS [%d]", ret);
949 	return -ENOBUFS;
950 }
951 
952 /*
953  * detach a backing block from a page
954  * - cache withdrawal is prevented by the caller
955  */
cachefiles_uncache_page(struct fscache_object * _object,struct page * page)956 void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
957 {
958 	struct cachefiles_object *object;
959 	struct cachefiles_cache *cache;
960 
961 	object = container_of(_object, struct cachefiles_object, fscache);
962 	cache = container_of(object->fscache.cache,
963 			     struct cachefiles_cache, cache);
964 
965 	_enter("%p,{%lu}", object, page->index);
966 
967 	spin_unlock(&object->fscache.cookie->lock);
968 }
969