• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Storage object read/write
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 
12 #include <linux/mount.h>
13 #include <linux/slab.h>
14 #include <linux/file.h>
15 #include <linux/swap.h>
16 #include "internal.h"
17 
18 /*
19  * detect wake up events generated by the unlocking of pages in which we're
20  * interested
21  * - we use this to detect read completion of backing pages
22  * - the caller holds the waitqueue lock
23  */
cachefiles_read_waiter(wait_queue_t * wait,unsigned mode,int sync,void * _key)24 static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
25 				  int sync, void *_key)
26 {
27 	struct cachefiles_one_read *monitor =
28 		container_of(wait, struct cachefiles_one_read, monitor);
29 	struct cachefiles_object *object;
30 	struct fscache_retrieval *op = monitor->op;
31 	struct wait_bit_key *key = _key;
32 	struct page *page = wait->private;
33 
34 	ASSERT(key);
35 
36 	_enter("{%lu},%u,%d,{%p,%u}",
37 	       monitor->netfs_page->index, mode, sync,
38 	       key->flags, key->bit_nr);
39 
40 	if (key->flags != &page->flags ||
41 	    key->bit_nr != PG_locked)
42 		return 0;
43 
44 	_debug("--- monitor %p %lx ---", page, page->flags);
45 
46 	if (!PageUptodate(page) && !PageError(page)) {
47 		/* unlocked, not uptodate and not erronous? */
48 		_debug("page probably truncated");
49 	}
50 
51 	/* remove from the waitqueue */
52 	list_del(&wait->task_list);
53 
54 	/* move onto the action list and queue for FS-Cache thread pool */
55 	ASSERT(op);
56 
57 	/* We need to temporarily bump the usage count as we don't own a ref
58 	 * here otherwise cachefiles_read_copier() may free the op between the
59 	 * monitor being enqueued on the op->to_do list and the op getting
60 	 * enqueued on the work queue.
61 	 */
62 	fscache_get_retrieval(op);
63 
64 	object = container_of(op->op.object, struct cachefiles_object, fscache);
65 	spin_lock(&object->work_lock);
66 	list_add_tail(&monitor->op_link, &op->to_do);
67 	fscache_enqueue_retrieval(op);
68 	spin_unlock(&object->work_lock);
69 
70 	fscache_put_retrieval(op);
71 	return 0;
72 }
73 
74 /*
75  * handle a probably truncated page
76  * - check to see if the page is still relevant and reissue the read if
77  *   possible
78  * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
79  *   must wait again and 0 if successful
80  */
cachefiles_read_reissue(struct cachefiles_object * object,struct cachefiles_one_read * monitor)81 static int cachefiles_read_reissue(struct cachefiles_object *object,
82 				   struct cachefiles_one_read *monitor)
83 {
84 	struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
85 	struct page *backpage = monitor->back_page, *backpage2;
86 	int ret;
87 
88 	_enter("{ino=%lx},{%lx,%lx}",
89 	       d_backing_inode(object->backer)->i_ino,
90 	       backpage->index, backpage->flags);
91 
92 	/* skip if the page was truncated away completely */
93 	if (backpage->mapping != bmapping) {
94 		_leave(" = -ENODATA [mapping]");
95 		return -ENODATA;
96 	}
97 
98 	backpage2 = find_get_page(bmapping, backpage->index);
99 	if (!backpage2) {
100 		_leave(" = -ENODATA [gone]");
101 		return -ENODATA;
102 	}
103 
104 	if (backpage != backpage2) {
105 		put_page(backpage2);
106 		_leave(" = -ENODATA [different]");
107 		return -ENODATA;
108 	}
109 
110 	/* the page is still there and we already have a ref on it, so we don't
111 	 * need a second */
112 	put_page(backpage2);
113 
114 	INIT_LIST_HEAD(&monitor->op_link);
115 	add_page_wait_queue(backpage, &monitor->monitor);
116 
117 	if (trylock_page(backpage)) {
118 		ret = -EIO;
119 		if (PageError(backpage))
120 			goto unlock_discard;
121 		ret = 0;
122 		if (PageUptodate(backpage))
123 			goto unlock_discard;
124 
125 		_debug("reissue read");
126 		ret = bmapping->a_ops->readpage(NULL, backpage);
127 		if (ret < 0)
128 			goto discard;
129 	}
130 
131 	/* but the page may have been read before the monitor was installed, so
132 	 * the monitor may miss the event - so we have to ensure that we do get
133 	 * one in such a case */
134 	if (trylock_page(backpage)) {
135 		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
136 		unlock_page(backpage);
137 	}
138 
139 	/* it'll reappear on the todo list */
140 	_leave(" = -EINPROGRESS");
141 	return -EINPROGRESS;
142 
143 unlock_discard:
144 	unlock_page(backpage);
145 discard:
146 	spin_lock_irq(&object->work_lock);
147 	list_del(&monitor->op_link);
148 	spin_unlock_irq(&object->work_lock);
149 	_leave(" = %d", ret);
150 	return ret;
151 }
152 
153 /*
154  * copy data from backing pages to netfs pages to complete a read operation
155  * - driven by FS-Cache's thread pool
156  */
cachefiles_read_copier(struct fscache_operation * _op)157 static void cachefiles_read_copier(struct fscache_operation *_op)
158 {
159 	struct cachefiles_one_read *monitor;
160 	struct cachefiles_object *object;
161 	struct fscache_retrieval *op;
162 	int error, max;
163 
164 	op = container_of(_op, struct fscache_retrieval, op);
165 	object = container_of(op->op.object,
166 			      struct cachefiles_object, fscache);
167 
168 	_enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
169 
170 	max = 8;
171 	spin_lock_irq(&object->work_lock);
172 
173 	while (!list_empty(&op->to_do)) {
174 		monitor = list_entry(op->to_do.next,
175 				     struct cachefiles_one_read, op_link);
176 		list_del(&monitor->op_link);
177 
178 		spin_unlock_irq(&object->work_lock);
179 
180 		_debug("- copy {%lu}", monitor->back_page->index);
181 
182 	recheck:
183 		if (test_bit(FSCACHE_COOKIE_INVALIDATING,
184 			     &object->fscache.cookie->flags)) {
185 			error = -ESTALE;
186 		} else if (PageUptodate(monitor->back_page)) {
187 			copy_highpage(monitor->netfs_page, monitor->back_page);
188 			fscache_mark_page_cached(monitor->op,
189 						 monitor->netfs_page);
190 			error = 0;
191 		} else if (!PageError(monitor->back_page)) {
192 			/* the page has probably been truncated */
193 			error = cachefiles_read_reissue(object, monitor);
194 			if (error == -EINPROGRESS)
195 				goto next;
196 			goto recheck;
197 		} else {
198 			cachefiles_io_error_obj(
199 				object,
200 				"Readpage failed on backing file %lx",
201 				(unsigned long) monitor->back_page->flags);
202 			error = -EIO;
203 		}
204 
205 		page_cache_release(monitor->back_page);
206 
207 		fscache_end_io(op, monitor->netfs_page, error);
208 		page_cache_release(monitor->netfs_page);
209 		fscache_retrieval_complete(op, 1);
210 		fscache_put_retrieval(op);
211 		kfree(monitor);
212 
213 	next:
214 		/* let the thread pool have some air occasionally */
215 		max--;
216 		if (max < 0 || need_resched()) {
217 			if (!list_empty(&op->to_do))
218 				fscache_enqueue_retrieval(op);
219 			_leave(" [maxed out]");
220 			return;
221 		}
222 
223 		spin_lock_irq(&object->work_lock);
224 	}
225 
226 	spin_unlock_irq(&object->work_lock);
227 	_leave("");
228 }
229 
230 /*
231  * read the corresponding page to the given set from the backing file
232  * - an uncertain page is simply discarded, to be tried again another time
233  */
cachefiles_read_backing_file_one(struct cachefiles_object * object,struct fscache_retrieval * op,struct page * netpage)234 static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
235 					    struct fscache_retrieval *op,
236 					    struct page *netpage)
237 {
238 	struct cachefiles_one_read *monitor;
239 	struct address_space *bmapping;
240 	struct page *newpage, *backpage;
241 	int ret;
242 
243 	_enter("");
244 
245 	_debug("read back %p{%lu,%d}",
246 	       netpage, netpage->index, page_count(netpage));
247 
248 	monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
249 	if (!monitor)
250 		goto nomem;
251 
252 	monitor->netfs_page = netpage;
253 	monitor->op = fscache_get_retrieval(op);
254 
255 	init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
256 
257 	/* attempt to get hold of the backing page */
258 	bmapping = d_backing_inode(object->backer)->i_mapping;
259 	newpage = NULL;
260 
261 	for (;;) {
262 		backpage = find_get_page(bmapping, netpage->index);
263 		if (backpage)
264 			goto backing_page_already_present;
265 
266 		if (!newpage) {
267 			newpage = __page_cache_alloc(cachefiles_gfp |
268 						     __GFP_COLD);
269 			if (!newpage)
270 				goto nomem_monitor;
271 		}
272 
273 		ret = add_to_page_cache_lru(newpage, bmapping,
274 					    netpage->index, cachefiles_gfp);
275 		if (ret == 0)
276 			goto installed_new_backing_page;
277 		if (ret != -EEXIST)
278 			goto nomem_page;
279 	}
280 
281 	/* we've installed a new backing page, so now we need to start
282 	 * it reading */
283 installed_new_backing_page:
284 	_debug("- new %p", newpage);
285 
286 	backpage = newpage;
287 	newpage = NULL;
288 
289 read_backing_page:
290 	ret = bmapping->a_ops->readpage(NULL, backpage);
291 	if (ret < 0)
292 		goto read_error;
293 
294 	/* set the monitor to transfer the data across */
295 monitor_backing_page:
296 	_debug("- monitor add");
297 
298 	/* install the monitor */
299 	page_cache_get(monitor->netfs_page);
300 	page_cache_get(backpage);
301 	monitor->back_page = backpage;
302 	monitor->monitor.private = backpage;
303 	add_page_wait_queue(backpage, &monitor->monitor);
304 	monitor = NULL;
305 
306 	/* but the page may have been read before the monitor was installed, so
307 	 * the monitor may miss the event - so we have to ensure that we do get
308 	 * one in such a case */
309 	if (trylock_page(backpage)) {
310 		_debug("jumpstart %p {%lx}", backpage, backpage->flags);
311 		unlock_page(backpage);
312 	}
313 	goto success;
314 
315 	/* if the backing page is already present, it can be in one of
316 	 * three states: read in progress, read failed or read okay */
317 backing_page_already_present:
318 	_debug("- present");
319 
320 	if (newpage) {
321 		page_cache_release(newpage);
322 		newpage = NULL;
323 	}
324 
325 	if (PageError(backpage))
326 		goto io_error;
327 
328 	if (PageUptodate(backpage))
329 		goto backing_page_already_uptodate;
330 
331 	if (!trylock_page(backpage))
332 		goto monitor_backing_page;
333 	_debug("read %p {%lx}", backpage, backpage->flags);
334 	goto read_backing_page;
335 
336 	/* the backing page is already up to date, attach the netfs
337 	 * page to the pagecache and LRU and copy the data across */
338 backing_page_already_uptodate:
339 	_debug("- uptodate");
340 
341 	fscache_mark_page_cached(op, netpage);
342 
343 	copy_highpage(netpage, backpage);
344 	fscache_end_io(op, netpage, 0);
345 	fscache_retrieval_complete(op, 1);
346 
347 success:
348 	_debug("success");
349 	ret = 0;
350 
351 out:
352 	if (backpage)
353 		page_cache_release(backpage);
354 	if (monitor) {
355 		fscache_put_retrieval(monitor->op);
356 		kfree(monitor);
357 	}
358 	_leave(" = %d", ret);
359 	return ret;
360 
361 read_error:
362 	_debug("read error %d", ret);
363 	if (ret == -ENOMEM) {
364 		fscache_retrieval_complete(op, 1);
365 		goto out;
366 	}
367 io_error:
368 	cachefiles_io_error_obj(object, "Page read error on backing file");
369 	fscache_retrieval_complete(op, 1);
370 	ret = -ENOBUFS;
371 	goto out;
372 
373 nomem_page:
374 	page_cache_release(newpage);
375 nomem_monitor:
376 	fscache_put_retrieval(monitor->op);
377 	kfree(monitor);
378 nomem:
379 	fscache_retrieval_complete(op, 1);
380 	_leave(" = -ENOMEM");
381 	return -ENOMEM;
382 }
383 
384 /*
385  * read a page from the cache or allocate a block in which to store it
386  * - cache withdrawal is prevented by the caller
387  * - returns -EINTR if interrupted
388  * - returns -ENOMEM if ran out of memory
389  * - returns -ENOBUFS if no buffers can be made available
390  * - returns -ENOBUFS if page is beyond EOF
391  * - if the page is backed by a block in the cache:
392  *   - a read will be started which will call the callback on completion
393  *   - 0 will be returned
394  * - else if the page is unbacked:
395  *   - the metadata will be retained
396  *   - -ENODATA will be returned
397  */
cachefiles_read_or_alloc_page(struct fscache_retrieval * op,struct page * page,gfp_t gfp)398 int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
399 				  struct page *page,
400 				  gfp_t gfp)
401 {
402 	struct cachefiles_object *object;
403 	struct cachefiles_cache *cache;
404 	struct inode *inode;
405 	sector_t block0, block;
406 	unsigned shift;
407 	int ret;
408 
409 	object = container_of(op->op.object,
410 			      struct cachefiles_object, fscache);
411 	cache = container_of(object->fscache.cache,
412 			     struct cachefiles_cache, cache);
413 
414 	_enter("{%p},{%lx},,,", object, page->index);
415 
416 	if (!object->backer)
417 		goto enobufs;
418 
419 	inode = d_backing_inode(object->backer);
420 	ASSERT(S_ISREG(inode->i_mode));
421 	ASSERT(inode->i_mapping->a_ops->bmap);
422 	ASSERT(inode->i_mapping->a_ops->readpages);
423 
424 	/* calculate the shift required to use bmap */
425 	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
426 
427 	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
428 	op->op.flags |= FSCACHE_OP_ASYNC;
429 	op->op.processor = cachefiles_read_copier;
430 
431 	/* we assume the absence or presence of the first block is a good
432 	 * enough indication for the page as a whole
433 	 * - TODO: don't use bmap() for this as it is _not_ actually good
434 	 *   enough for this as it doesn't indicate errors, but it's all we've
435 	 *   got for the moment
436 	 */
437 	block0 = page->index;
438 	block0 <<= shift;
439 
440 	block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
441 	_debug("%llx -> %llx",
442 	       (unsigned long long) block0,
443 	       (unsigned long long) block);
444 
445 	if (block) {
446 		/* submit the apparently valid page to the backing fs to be
447 		 * read from disk */
448 		ret = cachefiles_read_backing_file_one(object, op, page);
449 	} else if (cachefiles_has_space(cache, 0, 1) == 0) {
450 		/* there's space in the cache we can use */
451 		fscache_mark_page_cached(op, page);
452 		fscache_retrieval_complete(op, 1);
453 		ret = -ENODATA;
454 	} else {
455 		goto enobufs;
456 	}
457 
458 	_leave(" = %d", ret);
459 	return ret;
460 
461 enobufs:
462 	fscache_retrieval_complete(op, 1);
463 	_leave(" = -ENOBUFS");
464 	return -ENOBUFS;
465 }
466 
467 /*
468  * read the corresponding pages to the given set from the backing file
469  * - any uncertain pages are simply discarded, to be tried again another time
470  */
cachefiles_read_backing_file(struct cachefiles_object * object,struct fscache_retrieval * op,struct list_head * list)471 static int cachefiles_read_backing_file(struct cachefiles_object *object,
472 					struct fscache_retrieval *op,
473 					struct list_head *list)
474 {
475 	struct cachefiles_one_read *monitor = NULL;
476 	struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
477 	struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
478 	int ret = 0;
479 
480 	_enter("");
481 
482 	list_for_each_entry_safe(netpage, _n, list, lru) {
483 		list_del(&netpage->lru);
484 
485 		_debug("read back %p{%lu,%d}",
486 		       netpage, netpage->index, page_count(netpage));
487 
488 		if (!monitor) {
489 			monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
490 			if (!monitor)
491 				goto nomem;
492 
493 			monitor->op = fscache_get_retrieval(op);
494 			init_waitqueue_func_entry(&monitor->monitor,
495 						  cachefiles_read_waiter);
496 		}
497 
498 		for (;;) {
499 			backpage = find_get_page(bmapping, netpage->index);
500 			if (backpage)
501 				goto backing_page_already_present;
502 
503 			if (!newpage) {
504 				newpage = __page_cache_alloc(cachefiles_gfp |
505 							     __GFP_COLD);
506 				if (!newpage)
507 					goto nomem;
508 			}
509 
510 			ret = add_to_page_cache_lru(newpage, bmapping,
511 						    netpage->index,
512 						    cachefiles_gfp);
513 			if (ret == 0)
514 				goto installed_new_backing_page;
515 			if (ret != -EEXIST)
516 				goto nomem;
517 		}
518 
519 		/* we've installed a new backing page, so now we need
520 		 * to start it reading */
521 	installed_new_backing_page:
522 		_debug("- new %p", newpage);
523 
524 		backpage = newpage;
525 		newpage = NULL;
526 
527 	reread_backing_page:
528 		ret = bmapping->a_ops->readpage(NULL, backpage);
529 		if (ret < 0)
530 			goto read_error;
531 
532 		/* add the netfs page to the pagecache and LRU, and set the
533 		 * monitor to transfer the data across */
534 	monitor_backing_page:
535 		_debug("- monitor add");
536 
537 		ret = add_to_page_cache_lru(netpage, op->mapping,
538 					    netpage->index, cachefiles_gfp);
539 		if (ret < 0) {
540 			if (ret == -EEXIST) {
541 				page_cache_release(netpage);
542 				fscache_retrieval_complete(op, 1);
543 				continue;
544 			}
545 			goto nomem;
546 		}
547 
548 		/* install a monitor */
549 		page_cache_get(netpage);
550 		monitor->netfs_page = netpage;
551 
552 		page_cache_get(backpage);
553 		monitor->back_page = backpage;
554 		monitor->monitor.private = backpage;
555 		add_page_wait_queue(backpage, &monitor->monitor);
556 		monitor = NULL;
557 
558 		/* but the page may have been read before the monitor was
559 		 * installed, so the monitor may miss the event - so we have to
560 		 * ensure that we do get one in such a case */
561 		if (trylock_page(backpage)) {
562 			_debug("2unlock %p {%lx}", backpage, backpage->flags);
563 			unlock_page(backpage);
564 		}
565 
566 		page_cache_release(backpage);
567 		backpage = NULL;
568 
569 		page_cache_release(netpage);
570 		netpage = NULL;
571 		continue;
572 
573 		/* if the backing page is already present, it can be in one of
574 		 * three states: read in progress, read failed or read okay */
575 	backing_page_already_present:
576 		_debug("- present %p", backpage);
577 
578 		if (PageError(backpage))
579 			goto io_error;
580 
581 		if (PageUptodate(backpage))
582 			goto backing_page_already_uptodate;
583 
584 		_debug("- not ready %p{%lx}", backpage, backpage->flags);
585 
586 		if (!trylock_page(backpage))
587 			goto monitor_backing_page;
588 
589 		if (PageError(backpage)) {
590 			_debug("error %lx", backpage->flags);
591 			unlock_page(backpage);
592 			goto io_error;
593 		}
594 
595 		if (PageUptodate(backpage))
596 			goto backing_page_already_uptodate_unlock;
597 
598 		/* we've locked a page that's neither up to date nor erroneous,
599 		 * so we need to attempt to read it again */
600 		goto reread_backing_page;
601 
602 		/* the backing page is already up to date, attach the netfs
603 		 * page to the pagecache and LRU and copy the data across */
604 	backing_page_already_uptodate_unlock:
605 		_debug("uptodate %lx", backpage->flags);
606 		unlock_page(backpage);
607 	backing_page_already_uptodate:
608 		_debug("- uptodate");
609 
610 		ret = add_to_page_cache_lru(netpage, op->mapping,
611 					    netpage->index, cachefiles_gfp);
612 		if (ret < 0) {
613 			if (ret == -EEXIST) {
614 				page_cache_release(netpage);
615 				fscache_retrieval_complete(op, 1);
616 				continue;
617 			}
618 			goto nomem;
619 		}
620 
621 		copy_highpage(netpage, backpage);
622 
623 		page_cache_release(backpage);
624 		backpage = NULL;
625 
626 		fscache_mark_page_cached(op, netpage);
627 
628 		/* the netpage is unlocked and marked up to date here */
629 		fscache_end_io(op, netpage, 0);
630 		page_cache_release(netpage);
631 		netpage = NULL;
632 		fscache_retrieval_complete(op, 1);
633 		continue;
634 	}
635 
636 	netpage = NULL;
637 
638 	_debug("out");
639 
640 out:
641 	/* tidy up */
642 	if (newpage)
643 		page_cache_release(newpage);
644 	if (netpage)
645 		page_cache_release(netpage);
646 	if (backpage)
647 		page_cache_release(backpage);
648 	if (monitor) {
649 		fscache_put_retrieval(op);
650 		kfree(monitor);
651 	}
652 
653 	list_for_each_entry_safe(netpage, _n, list, lru) {
654 		list_del(&netpage->lru);
655 		page_cache_release(netpage);
656 		fscache_retrieval_complete(op, 1);
657 	}
658 
659 	_leave(" = %d", ret);
660 	return ret;
661 
662 nomem:
663 	_debug("nomem");
664 	ret = -ENOMEM;
665 	goto record_page_complete;
666 
667 read_error:
668 	_debug("read error %d", ret);
669 	if (ret == -ENOMEM)
670 		goto record_page_complete;
671 io_error:
672 	cachefiles_io_error_obj(object, "Page read error on backing file");
673 	ret = -ENOBUFS;
674 record_page_complete:
675 	fscache_retrieval_complete(op, 1);
676 	goto out;
677 }
678 
679 /*
680  * read a list of pages from the cache or allocate blocks in which to store
681  * them
682  */
cachefiles_read_or_alloc_pages(struct fscache_retrieval * op,struct list_head * pages,unsigned * nr_pages,gfp_t gfp)683 int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
684 				   struct list_head *pages,
685 				   unsigned *nr_pages,
686 				   gfp_t gfp)
687 {
688 	struct cachefiles_object *object;
689 	struct cachefiles_cache *cache;
690 	struct list_head backpages;
691 	struct pagevec pagevec;
692 	struct inode *inode;
693 	struct page *page, *_n;
694 	unsigned shift, nrbackpages;
695 	int ret, ret2, space;
696 
697 	object = container_of(op->op.object,
698 			      struct cachefiles_object, fscache);
699 	cache = container_of(object->fscache.cache,
700 			     struct cachefiles_cache, cache);
701 
702 	_enter("{OBJ%x,%d},,%d,,",
703 	       object->fscache.debug_id, atomic_read(&op->op.usage),
704 	       *nr_pages);
705 
706 	if (!object->backer)
707 		goto all_enobufs;
708 
709 	space = 1;
710 	if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
711 		space = 0;
712 
713 	inode = d_backing_inode(object->backer);
714 	ASSERT(S_ISREG(inode->i_mode));
715 	ASSERT(inode->i_mapping->a_ops->bmap);
716 	ASSERT(inode->i_mapping->a_ops->readpages);
717 
718 	/* calculate the shift required to use bmap */
719 	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
720 
721 	pagevec_init(&pagevec, 0);
722 
723 	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
724 	op->op.flags |= FSCACHE_OP_ASYNC;
725 	op->op.processor = cachefiles_read_copier;
726 
727 	INIT_LIST_HEAD(&backpages);
728 	nrbackpages = 0;
729 
730 	ret = space ? -ENODATA : -ENOBUFS;
731 	list_for_each_entry_safe(page, _n, pages, lru) {
732 		sector_t block0, block;
733 
734 		/* we assume the absence or presence of the first block is a
735 		 * good enough indication for the page as a whole
736 		 * - TODO: don't use bmap() for this as it is _not_ actually
737 		 *   good enough for this as it doesn't indicate errors, but
738 		 *   it's all we've got for the moment
739 		 */
740 		block0 = page->index;
741 		block0 <<= shift;
742 
743 		block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
744 						      block0);
745 		_debug("%llx -> %llx",
746 		       (unsigned long long) block0,
747 		       (unsigned long long) block);
748 
749 		if (block) {
750 			/* we have data - add it to the list to give to the
751 			 * backing fs */
752 			list_move(&page->lru, &backpages);
753 			(*nr_pages)--;
754 			nrbackpages++;
755 		} else if (space && pagevec_add(&pagevec, page) == 0) {
756 			fscache_mark_pages_cached(op, &pagevec);
757 			fscache_retrieval_complete(op, 1);
758 			ret = -ENODATA;
759 		} else {
760 			fscache_retrieval_complete(op, 1);
761 		}
762 	}
763 
764 	if (pagevec_count(&pagevec) > 0)
765 		fscache_mark_pages_cached(op, &pagevec);
766 
767 	if (list_empty(pages))
768 		ret = 0;
769 
770 	/* submit the apparently valid pages to the backing fs to be read from
771 	 * disk */
772 	if (nrbackpages > 0) {
773 		ret2 = cachefiles_read_backing_file(object, op, &backpages);
774 		if (ret2 == -ENOMEM || ret2 == -EINTR)
775 			ret = ret2;
776 	}
777 
778 	_leave(" = %d [nr=%u%s]",
779 	       ret, *nr_pages, list_empty(pages) ? " empty" : "");
780 	return ret;
781 
782 all_enobufs:
783 	fscache_retrieval_complete(op, *nr_pages);
784 	return -ENOBUFS;
785 }
786 
787 /*
788  * allocate a block in the cache in which to store a page
789  * - cache withdrawal is prevented by the caller
790  * - returns -EINTR if interrupted
791  * - returns -ENOMEM if ran out of memory
792  * - returns -ENOBUFS if no buffers can be made available
793  * - returns -ENOBUFS if page is beyond EOF
794  * - otherwise:
795  *   - the metadata will be retained
796  *   - 0 will be returned
797  */
cachefiles_allocate_page(struct fscache_retrieval * op,struct page * page,gfp_t gfp)798 int cachefiles_allocate_page(struct fscache_retrieval *op,
799 			     struct page *page,
800 			     gfp_t gfp)
801 {
802 	struct cachefiles_object *object;
803 	struct cachefiles_cache *cache;
804 	int ret;
805 
806 	object = container_of(op->op.object,
807 			      struct cachefiles_object, fscache);
808 	cache = container_of(object->fscache.cache,
809 			     struct cachefiles_cache, cache);
810 
811 	_enter("%p,{%lx},", object, page->index);
812 
813 	ret = cachefiles_has_space(cache, 0, 1);
814 	if (ret == 0)
815 		fscache_mark_page_cached(op, page);
816 	else
817 		ret = -ENOBUFS;
818 
819 	fscache_retrieval_complete(op, 1);
820 	_leave(" = %d", ret);
821 	return ret;
822 }
823 
824 /*
825  * allocate blocks in the cache in which to store a set of pages
826  * - cache withdrawal is prevented by the caller
827  * - returns -EINTR if interrupted
828  * - returns -ENOMEM if ran out of memory
829  * - returns -ENOBUFS if some buffers couldn't be made available
830  * - returns -ENOBUFS if some pages are beyond EOF
831  * - otherwise:
832  *   - -ENODATA will be returned
833  * - metadata will be retained for any page marked
834  */
cachefiles_allocate_pages(struct fscache_retrieval * op,struct list_head * pages,unsigned * nr_pages,gfp_t gfp)835 int cachefiles_allocate_pages(struct fscache_retrieval *op,
836 			      struct list_head *pages,
837 			      unsigned *nr_pages,
838 			      gfp_t gfp)
839 {
840 	struct cachefiles_object *object;
841 	struct cachefiles_cache *cache;
842 	struct pagevec pagevec;
843 	struct page *page;
844 	int ret;
845 
846 	object = container_of(op->op.object,
847 			      struct cachefiles_object, fscache);
848 	cache = container_of(object->fscache.cache,
849 			     struct cachefiles_cache, cache);
850 
851 	_enter("%p,,,%d,", object, *nr_pages);
852 
853 	ret = cachefiles_has_space(cache, 0, *nr_pages);
854 	if (ret == 0) {
855 		pagevec_init(&pagevec, 0);
856 
857 		list_for_each_entry(page, pages, lru) {
858 			if (pagevec_add(&pagevec, page) == 0)
859 				fscache_mark_pages_cached(op, &pagevec);
860 		}
861 
862 		if (pagevec_count(&pagevec) > 0)
863 			fscache_mark_pages_cached(op, &pagevec);
864 		ret = -ENODATA;
865 	} else {
866 		ret = -ENOBUFS;
867 	}
868 
869 	fscache_retrieval_complete(op, *nr_pages);
870 	_leave(" = %d", ret);
871 	return ret;
872 }
873 
874 /*
875  * request a page be stored in the cache
876  * - cache withdrawal is prevented by the caller
877  * - this request may be ignored if there's no cache block available, in which
878  *   case -ENOBUFS will be returned
879  * - if the op is in progress, 0 will be returned
880  */
cachefiles_write_page(struct fscache_storage * op,struct page * page)881 int cachefiles_write_page(struct fscache_storage *op, struct page *page)
882 {
883 	struct cachefiles_object *object;
884 	struct cachefiles_cache *cache;
885 	struct file *file;
886 	struct path path;
887 	loff_t pos, eof;
888 	size_t len;
889 	void *data;
890 	int ret = -ENOBUFS;
891 
892 	ASSERT(op != NULL);
893 	ASSERT(page != NULL);
894 
895 	object = container_of(op->op.object,
896 			      struct cachefiles_object, fscache);
897 
898 	_enter("%p,%p{%lx},,,", object, page, page->index);
899 
900 	if (!object->backer) {
901 		_leave(" = -ENOBUFS");
902 		return -ENOBUFS;
903 	}
904 
905 	ASSERT(d_is_reg(object->backer));
906 
907 	cache = container_of(object->fscache.cache,
908 			     struct cachefiles_cache, cache);
909 
910 	pos = (loff_t)page->index << PAGE_SHIFT;
911 
912 	/* We mustn't write more data than we have, so we have to beware of a
913 	 * partial page at EOF.
914 	 */
915 	eof = object->fscache.store_limit_l;
916 	if (pos >= eof)
917 		goto error;
918 
919 	/* write the page to the backing filesystem and let it store it in its
920 	 * own time */
921 	path.mnt = cache->mnt;
922 	path.dentry = object->backer;
923 	file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
924 	if (IS_ERR(file)) {
925 		ret = PTR_ERR(file);
926 		goto error_2;
927 	}
928 
929 	len = PAGE_SIZE;
930 	if (eof & ~PAGE_MASK) {
931 		if (eof - pos < PAGE_SIZE) {
932 			_debug("cut short %llx to %llx",
933 			       pos, eof);
934 			len = eof - pos;
935 			ASSERTCMP(pos + len, ==, eof);
936 		}
937 	}
938 
939 	data = kmap(page);
940 	ret = __kernel_write(file, data, len, &pos);
941 	kunmap(page);
942 	fput(file);
943 	if (ret != len)
944 		goto error_eio;
945 
946 	_leave(" = 0");
947 	return 0;
948 
949 error_eio:
950 	ret = -EIO;
951 error_2:
952 	if (ret == -EIO)
953 		cachefiles_io_error_obj(object,
954 					"Write page to backing file failed");
955 error:
956 	_leave(" = -ENOBUFS [%d]", ret);
957 	return -ENOBUFS;
958 }
959 
960 /*
961  * detach a backing block from a page
962  * - cache withdrawal is prevented by the caller
963  */
cachefiles_uncache_page(struct fscache_object * _object,struct page * page)964 void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
965 {
966 	struct cachefiles_object *object;
967 
968 	object = container_of(_object, struct cachefiles_object, fscache);
969 
970 	_enter("%p,{%lu}", object, page->index);
971 
972 	spin_unlock(&object->fscache.cookie->lock);
973 }
974